summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFangrui Song <i@maskray.me>2024-05-28 00:04:56 -0700
committerFangrui Song <i@maskray.me>2024-05-28 00:04:56 -0700
commit269bd6dabdd6edb8b7133ed0ae16e46983c4621d (patch)
treef26f0fb1e9debc3cb3f72ed79be9d8ac5ba65672
parent516e44e9001cbad455882ebdf36b555286a155f4 (diff)
parentfcffea06fdf3876dc48170d5577d7454d7303792 (diff)
Created using spr 1.3.5-bogner
-rwxr-xr-x.ci/generate-buildkite-pipeline-premerge44
-rwxr-xr-x.ci/monolithic-linux.sh74
-rw-r--r--.github/workflows/llvm-bugs.yml2
-rw-r--r--bolt/CMakeLists.txt4
-rw-r--r--bolt/cmake/modules/AddBOLT.cmake1
-rw-r--r--bolt/docs/BAT.md5
-rw-r--r--bolt/docs/CMakeLists.txt1
-rw-r--r--bolt/include/bolt/Core/BinaryContext.h12
-rw-r--r--bolt/include/bolt/Passes/BinaryPasses.h29
-rw-r--r--bolt/include/bolt/Passes/MCF.h41
-rw-r--r--bolt/include/bolt/Passes/StokeInfo.h4
-rw-r--r--bolt/include/bolt/Profile/BoltAddressTranslation.h49
-rw-r--r--bolt/include/bolt/Profile/DataAggregator.h5
-rw-r--r--bolt/lib/Core/BinaryContext.cpp21
-rw-r--r--bolt/lib/Core/BinaryEmitter.cpp4
-rw-r--r--bolt/lib/Core/BinaryFunction.cpp26
-rw-r--r--bolt/lib/Core/DebugNames.cpp4
-rw-r--r--bolt/lib/Core/DynoStats.cpp5
-rw-r--r--bolt/lib/Passes/BinaryFunctionCallGraph.cpp4
-rw-r--r--bolt/lib/Passes/BinaryPasses.cpp65
-rw-r--r--bolt/lib/Passes/CacheMetrics.cpp43
-rw-r--r--bolt/lib/Passes/Inliner.cpp4
-rw-r--r--bolt/lib/Passes/MCF.cpp33
-rw-r--r--bolt/lib/Profile/BoltAddressTranslation.cpp50
-rw-r--r--bolt/lib/Profile/CMakeLists.txt1
-rw-r--r--bolt/lib/Profile/DataAggregator.cpp28
-rw-r--r--bolt/lib/Profile/DataReader.cpp2
-rw-r--r--bolt/lib/Profile/StaleProfileMatching.cpp12
-rw-r--r--bolt/lib/Profile/YAMLProfileReader.cpp20
-rw-r--r--bolt/lib/Profile/YAMLProfileWriter.cpp7
-rw-r--r--bolt/lib/Rewrite/BinaryPassManager.cpp19
-rw-r--r--bolt/lib/Rewrite/DWARFRewriter.cpp3
-rw-r--r--bolt/lib/Rewrite/LinuxKernelRewriter.cpp2
-rw-r--r--bolt/lib/Rewrite/RewriteInstance.cpp83
-rw-r--r--bolt/lib/Target/X86/X86MCPlusBuilder.cpp38
-rw-r--r--bolt/lib/Utils/CommandLineOpts.cpp4
-rw-r--r--bolt/runtime/instr.cpp4
-rw-r--r--bolt/test/CMakeLists.txt3
-rw-r--r--bolt/test/X86/bb-with-two-tail-calls.s18
-rw-r--r--bolt/test/X86/bolt-address-translation-yaml.test7
-rw-r--r--bolt/test/X86/bolt-address-translation.test2
-rw-r--r--bolt/test/X86/dwarf5-debug-names-class-type-decl.s670
-rw-r--r--bolt/test/X86/dwarf5-debug-names-enumeration-type-decl.s485
-rw-r--r--bolt/test/X86/dwarf5-debug-names-structure-type-decl.s671
-rw-r--r--bolt/test/X86/ignored-interprocedural-reference.s49
-rw-r--r--bolt/test/X86/register-fragments-bolt-symbols.s8
-rw-r--r--bolt/test/X86/yaml-non-simple.test71
-rwxr-xr-xbolt/test/link_fdata.py3
-rwxr-xr-xbolt/test/runtime/X86/hot-end-symbol.s3
-rw-r--r--bolt/unittests/CMakeLists.txt2
-rw-r--r--clang-tools-extra/CMakeLists.txt2
-rw-r--r--clang-tools-extra/clang-tidy/CMakeLists.txt2
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp4
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp5
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/SizeofExpressionCheck.cpp32
-rw-r--r--clang-tools-extra/clang-tidy/misc/CMakeLists.txt1
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp8
-rw-r--r--clang-tools-extra/clang-tidy/readability/ImplicitBoolConversionCheck.cpp16
-rw-r--r--clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp3
-rw-r--r--clang-tools-extra/clangd/Hover.cpp11
-rw-r--r--clang-tools-extra/clangd/test/infinite-instantiation.test5
-rw-r--r--clang-tools-extra/clangd/unittests/CMakeLists.txt1
-rw-r--r--clang-tools-extra/clangd/unittests/ClangdTests.cpp2
-rw-r--r--clang-tools-extra/clangd/unittests/FindTargetTests.cpp4
-rw-r--r--clang-tools-extra/docs/CMakeLists.txt1
-rw-r--r--clang-tools-extra/docs/ReleaseNotes.rst7
-rw-r--r--clang-tools-extra/docs/clang-tidy/checks/readability/implicit-bool-conversion.rst4
-rw-r--r--clang-tools-extra/include-cleaner/unittests/CMakeLists.txt1
-rw-r--r--clang-tools-extra/modularize/ModularizeUtilities.cpp6
-rw-r--r--clang-tools-extra/pseudo/include/CMakeLists.txt1
-rw-r--r--clang-tools-extra/pseudo/tool/CMakeLists.txt1
-rw-r--r--clang-tools-extra/pseudo/unittests/CMakeLists.txt1
-rw-r--r--clang-tools-extra/test/CMakeLists.txt1
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init-no-crash.cpp8
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init.cpp6
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/misc/new-delete-overloads.cpp10
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-outofline.cpp30
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/readability/implicit-bool-conversion.c354
-rw-r--r--clang-tools-extra/unittests/CMakeLists.txt2
-rw-r--r--clang/CMakeLists.txt13
-rw-r--r--clang/bindings/python/tests/CMakeLists.txt2
-rw-r--r--clang/cmake/caches/CrossWinToARMLinux.cmake38
-rw-r--r--clang/cmake/caches/Fuchsia-stage2.cmake1
-rw-r--r--clang/cmake/caches/Fuchsia.cmake7
-rw-r--r--clang/cmake/caches/HLSL.cmake2
-rw-r--r--clang/cmake/caches/VectorEngine.cmake4
-rw-r--r--clang/cmake/modules/AddClang.cmake3
-rw-r--r--clang/docs/CMakeLists.txt1
-rw-r--r--clang/docs/ClangFormatStyleOptions.rst12
-rw-r--r--clang/docs/HLSL/AvailabilityDiagnostics.rst137
-rw-r--r--clang/docs/HLSL/HLSLDocs.rst1
-rw-r--r--clang/docs/LanguageExtensions.rst39
-rw-r--r--clang/docs/ReleaseNotes.rst58
-rw-r--r--clang/docs/analyzer/checkers.rst125
-rw-r--r--clang/docs/tools/clang-formatted-files.txt3
-rw-r--r--clang/include/clang/AST/ASTContext.h11
-rw-r--r--clang/include/clang/AST/ASTNodeTraverser.h8
-rw-r--r--clang/include/clang/AST/Decl.h2
-rw-r--r--clang/include/clang/AST/DeclTemplate.h28
-rw-r--r--clang/include/clang/AST/ExprCXX.h19
-rw-r--r--clang/include/clang/AST/OpenACCClause.h29
-rw-r--r--clang/include/clang/AST/RecursiveASTVisitor.h25
-rw-r--r--clang/include/clang/AST/Type.h5
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/CNFFormula.h179
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h7
-rw-r--r--clang/include/clang/Basic/Attr.td38
-rw-r--r--clang/include/clang/Basic/AttrDocs.td12
-rw-r--r--clang/include/clang/Basic/BuiltinsAArch64.def2
-rw-r--r--clang/include/clang/Basic/BuiltinsAMDGPU.def3
-rw-r--r--clang/include/clang/Basic/BuiltinsWebAssembly.def2
-rw-r--r--clang/include/clang/Basic/BuiltinsX86.def21
-rw-r--r--clang/include/clang/Basic/DiagnosticCommonKinds.td3
-rw-r--r--clang/include/clang/Basic/DiagnosticDriverKinds.td61
-rw-r--r--clang/include/clang/Basic/DiagnosticFrontendKinds.td23
-rw-r--r--clang/include/clang/Basic/DiagnosticGroups.td2
-rw-r--r--clang/include/clang/Basic/DiagnosticInstallAPIKinds.td6
-rw-r--r--clang/include/clang/Basic/DiagnosticLexKinds.td2
-rw-r--r--clang/include/clang/Basic/DiagnosticParseKinds.td8
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td113
-rw-r--r--clang/include/clang/Basic/FileManager.h2
-rw-r--r--clang/include/clang/Basic/LangOptions.def1
-rw-r--r--clang/include/clang/Basic/OpenACCClauses.def1
-rw-r--r--clang/include/clang/Basic/OpenACCKinds.h36
-rw-r--r--clang/include/clang/Basic/arm_sve.td6
-rw-r--r--clang/include/clang/Driver/Options.td21
-rw-r--r--clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h9
-rw-r--r--clang/include/clang/Format/Format.h25
-rw-r--r--clang/include/clang/Parse/Parser.h16
-rw-r--r--clang/include/clang/Sema/ParsedAttr.h42
-rw-r--r--clang/include/clang/Sema/Sema.h208
-rw-r--r--clang/include/clang/Sema/SemaOpenACC.h29
-rw-r--r--clang/include/clang/Sema/SemaOpenMP.h4
-rw-r--r--clang/include/clang/Sema/SemaPseudoObject.h40
-rw-r--r--clang/include/clang/Sema/SemaRISCV.h52
-rw-r--r--clang/include/clang/Sema/SemaX86.h38
-rw-r--r--clang/include/clang/Serialization/ASTReader.h6
-rw-r--r--clang/include/clang/Serialization/ASTWriter.h3
-rw-r--r--clang/include/clang/Serialization/ModuleFile.h4
-rw-r--r--clang/include/clang/StaticAnalyzer/Checkers/Checkers.td27
-rw-r--r--clang/lib/ARCMigrate/ARCMT.cpp3
-rw-r--r--clang/lib/ARCMigrate/ObjCMT.cpp3
-rw-r--r--clang/lib/AST/ASTContext.cpp41
-rw-r--r--clang/lib/AST/ASTDiagnostic.cpp111
-rw-r--r--clang/lib/AST/ASTImporter.cpp11
-rw-r--r--clang/lib/AST/DeclBase.cpp28
-rw-r--r--clang/lib/AST/DeclPrinter.cpp7
-rw-r--r--clang/lib/AST/DeclTemplate.cpp32
-rw-r--r--clang/lib/AST/ExprCXX.cpp15
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.cpp200
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.h3
-rw-r--r--clang/lib/AST/Interp/Context.cpp3
-rw-r--r--clang/lib/AST/Interp/Descriptor.cpp7
-rw-r--r--clang/lib/AST/Interp/EvaluationResult.cpp4
-rw-r--r--clang/lib/AST/Interp/Interp.cpp46
-rw-r--r--clang/lib/AST/Interp/Interp.h34
-rw-r--r--clang/lib/AST/Interp/InterpBuiltin.cpp2
-rw-r--r--clang/lib/AST/Interp/Pointer.cpp14
-rw-r--r--clang/lib/AST/Interp/Pointer.h15
-rw-r--r--clang/lib/AST/Interp/PrimType.h28
-rw-r--r--clang/lib/AST/Interp/Record.cpp2
-rw-r--r--clang/lib/AST/Interp/Record.h4
-rw-r--r--clang/lib/AST/JSONNodeDumper.cpp4
-rw-r--r--clang/lib/AST/ODRDiagsEmitter.cpp19
-rw-r--r--clang/lib/AST/ODRHash.cpp4
-rw-r--r--clang/lib/AST/OpenACCClause.cpp20
-rw-r--r--clang/lib/AST/ParentMap.cpp16
-rw-r--r--clang/lib/AST/StmtProfile.cpp6
-rw-r--r--clang/lib/AST/TemplateBase.cpp14
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp4
-rw-r--r--clang/lib/AST/Type.cpp8
-rw-r--r--clang/lib/AST/TypePrinter.cpp9
-rw-r--r--clang/lib/Analysis/CFG.cpp50
-rw-r--r--clang/lib/Analysis/FlowSensitive/CMakeLists.txt2
-rw-r--r--clang/lib/Analysis/FlowSensitive/CNFFormula.cpp303
-rw-r--r--clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp482
-rw-r--r--clang/lib/Basic/FileManager.cpp48
-rw-r--r--clang/lib/Basic/Targets/Mips.cpp28
-rw-r--r--clang/lib/Basic/Targets/Mips.h28
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.h3
-rw-r--r--clang/lib/Basic/Targets/X86.cpp21
-rw-r--r--clang/lib/Basic/Targets/X86.h3
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp14
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp2
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp50
-rw-r--r--clang/lib/CodeGen/CGCall.cpp87
-rw-r--r--clang/lib/CodeGen/CGClass.cpp18
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp34
-rw-r--r--clang/lib/CodeGen/CGDeclCXX.cpp2
-rw-r--r--clang/lib/CodeGen/CGException.cpp8
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp113
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp45
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp23
-rw-r--r--clang/lib/CodeGen/CGExprComplex.cpp10
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp20
-rw-r--r--clang/lib/CodeGen/CGNonTrivialStruct.cpp18
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp20
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp91
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp16
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp13
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp185
-rw-r--r--clang/lib/CodeGen/CGValue.h13
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp4
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h3
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp26
-rw-r--r--clang/lib/CodeGen/CodeGenTypeCache.h2
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.cpp3
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.cpp115
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp91
-rw-r--r--clang/lib/CodeGen/Targets/AArch64.cpp13
-rw-r--r--clang/lib/CodeGen/Targets/NVPTX.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/X86.cpp2
-rw-r--r--clang/lib/Driver/Driver.cpp11
-rw-r--r--clang/lib/Driver/ToolChains/AIX.cpp8
-rw-r--r--clang/lib/Driver/ToolChains/Arch/LoongArch.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/Arch/Mips.cpp10
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp23
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.cpp58
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.h4
-rw-r--r--clang/lib/Driver/ToolChains/Gnu.cpp79
-rw-r--r--clang/lib/Driver/ToolChains/HIPSPV.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/PS4CPU.cpp18
-rw-r--r--clang/lib/Driver/ToolChains/ZOS.cpp6
-rw-r--r--clang/lib/ExtractAPI/DeclarationFragments.cpp45
-rw-r--r--clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp11
-rw-r--r--clang/lib/Format/Format.cpp55
-rw-r--r--clang/lib/Format/FormatTokenSource.h2
-rw-r--r--clang/lib/Format/MacroExpander.cpp12
-rw-r--r--clang/lib/Format/Macros.h24
-rw-r--r--clang/lib/Format/SortJavaScriptImports.cpp18
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp30
-rw-r--r--clang/lib/Format/WhitespaceManager.cpp29
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp5
-rw-r--r--clang/lib/Frontend/SerializedDiagnosticPrinter.cpp2
-rw-r--r--clang/lib/Headers/CMakeLists.txt6
-rw-r--r--clang/lib/Headers/avx512erintrin.h271
-rw-r--r--clang/lib/Headers/avx512pfintrin.h92
-rw-r--r--clang/lib/Headers/hlsl/hlsl_intrinsics.h15
-rw-r--r--clang/lib/Headers/immintrin.h8
-rw-r--r--clang/lib/Headers/intrin.h2
-rw-r--r--clang/lib/Headers/module.modulemap1
-rw-r--r--clang/lib/Headers/opencl-c-base.h4
-rw-r--r--clang/lib/Headers/opencl-c.h15
-rw-r--r--clang/lib/Index/CommentToXML.cpp6
-rw-r--r--clang/lib/Index/IndexDecl.cpp6
-rw-r--r--clang/lib/Parse/ParseDecl.cpp115
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp57
-rw-r--r--clang/lib/Parse/ParseOpenACC.cpp30
-rw-r--r--clang/lib/Parse/ParsePragma.cpp5
-rw-r--r--clang/lib/Sema/CMakeLists.txt3
-rw-r--r--clang/lib/Sema/HLSLExternalSemaSource.cpp58
-rw-r--r--clang/lib/Sema/OpenCLBuiltins.td14
-rw-r--r--clang/lib/Sema/Sema.cpp8
-rw-r--r--clang/lib/Sema/SemaAPINotes.cpp3
-rw-r--r--clang/lib/Sema/SemaAvailability.cpp133
-rw-r--r--clang/lib/Sema/SemaCXXScopeSpec.cpp8
-rw-r--r--clang/lib/Sema/SemaCast.cpp5
-rw-r--r--clang/lib/Sema/SemaChecking.cpp1939
-rw-r--r--clang/lib/Sema/SemaCodeComplete.cpp11
-rw-r--r--clang/lib/Sema/SemaDecl.cpp123
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp48
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp262
-rw-r--r--clang/lib/Sema/SemaExpr.cpp199
-rw-r--r--clang/lib/Sema/SemaExprCXX.cpp11
-rw-r--r--clang/lib/Sema/SemaInit.cpp19
-rw-r--r--clang/lib/Sema/SemaLambda.cpp68
-rw-r--r--clang/lib/Sema/SemaLookup.cpp20
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp154
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp197
-rw-r--r--clang/lib/Sema/SemaOverload.cpp32
-rw-r--r--clang/lib/Sema/SemaPseudoObject.cpp75
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp1427
-rw-r--r--clang/lib/Sema/SemaRISCVVectorLookup.cpp504
-rw-r--r--clang/lib/Sema/SemaStmtAttr.cpp8
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp348
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp25
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp11
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp17
-rw-r--r--clang/lib/Sema/SemaTemplateVariadic.cpp5
-rw-r--r--clang/lib/Sema/SemaX86.cpp878
-rw-r--r--clang/lib/Sema/TreeTransform.h62
-rw-r--r--clang/lib/Serialization/ASTCommon.h24
-rw-r--r--clang/lib/Serialization/ASTReader.cpp14
-rw-r--r--clang/lib/Serialization/ASTReaderDecl.cpp35
-rw-r--r--clang/lib/Serialization/ASTReaderStmt.cpp1
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp59
-rw-r--r--clang/lib/Serialization/ASTWriterDecl.cpp4
-rw-r--r--clang/lib/Serialization/ASTWriterStmt.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PutenvStackArrayChecker.cpp (renamed from clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp)35
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/SetgidSetuidOrderChecker.cpp196
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp72
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp292
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp68
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngine.cpp56
-rw-r--r--clang/test/AST/Interp/arrays.cpp14
-rw-r--r--clang/test/AST/Interp/builtin-functions.cpp2
-rw-r--r--clang/test/AST/Interp/c.c12
-rw-r--r--clang/test/AST/Interp/cxx03.cpp5
-rw-r--r--clang/test/AST/Interp/cxx11.cpp16
-rw-r--r--clang/test/AST/Interp/cxx98.cpp4
-rw-r--r--clang/test/AST/Interp/eval-order.cpp30
-rw-r--r--clang/test/AST/Interp/functions.cpp12
-rw-r--r--clang/test/AST/Interp/objc.mm13
-rw-r--r--clang/test/AST/Interp/records.cpp29
-rw-r--r--clang/test/AST/Interp/sycl.cpp9
-rw-r--r--clang/test/AST/Interp/unions.cpp67
-rw-r--r--clang/test/AST/ast-dump-ctad-alias.cpp20
-rw-r--r--clang/test/AST/ast-dump-decl.cpp4
-rw-r--r--clang/test/AST/ast-dump-default-init-json.cpp6
-rw-r--r--clang/test/AST/ast-dump-default-init.cpp2
-rw-r--r--clang/test/AST/ast-dump-expr-json.cpp2
-rw-r--r--clang/test/AST/ast-dump-expr.cpp2
-rw-r--r--clang/test/AST/ast-dump-stmt-json.cpp244
-rw-r--r--clang/test/AST/ast-print-openacc-compute-construct.cpp28
-rw-r--r--clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor-ref-deref-on-diff-classes.cpp1
-rw-r--r--clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor-templates.cpp324
-rw-r--r--clang/test/Analysis/Checkers/WebKit/uncounted-local-vars.cpp73
-rw-r--r--clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp30
-rw-r--r--clang/test/Analysis/cert/pos34-c-fp-suppression.cpp51
-rw-r--r--clang/test/Analysis/cert/pos34-c.cpp61
-rw-r--r--clang/test/Analysis/cxx-uninitialized-object.cpp12
-rw-r--r--clang/test/Analysis/cxxnewexpr-callback.cpp4
-rw-r--r--clang/test/Analysis/lifetime-extended-regions.cpp10
-rw-r--r--clang/test/Analysis/putenv-stack-array.c90
-rw-r--r--clang/test/Analysis/setgid-setuid-order-notes.c73
-rw-r--r--clang/test/Analysis/setgid-setuid-order.c257
-rw-r--r--clang/test/CMakeLists.txt5
-rw-r--r--clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp10
-rw-r--r--clang/test/CXX/basic/basic.stc/basic.stc.dynamic/basic.stc.dynamic.deallocation/p2.cpp2
-rw-r--r--clang/test/CXX/class.derived/class.derived.general/p2.cpp116
-rw-r--r--clang/test/CXX/class/class.mfct/class.mfct.non-static/p3.cpp91
-rw-r--r--clang/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p1.cpp24
-rw-r--r--clang/test/CXX/drs/cwg16xx.cpp2
-rw-r--r--clang/test/CXX/drs/cwg18xx.cpp19
-rw-r--r--clang/test/CXX/drs/cwg28xx.cpp71
-rw-r--r--clang/test/CXX/drs/cwg292.cpp17
-rw-r--r--clang/test/CXX/expr/expr.unary/expr.new/p14.cpp2
-rw-r--r--clang/test/CXX/expr/expr.unary/expr.sizeof/p5-0x.cpp2
-rw-r--r--clang/test/CXX/special/class.temporary/p6.cpp34
-rw-r--r--clang/test/CXX/temp/temp.spec/temp.expl.spec/p12.cpp70
-rw-r--r--clang/test/ClangScanDeps/response-file-clang-cl.c56
-rw-r--r--clang/test/CodeCompletion/member-access.cpp16
-rw-r--r--clang/test/CodeGen/RISCV/riscv-inline-asm.c6
-rw-r--r--clang/test/CodeGen/SystemZ/sync-builtins-i128-8Al.c8
-rw-r--r--clang/test/CodeGen/X86/avx512er-builtins.c347
-rw-r--r--clang/test/CodeGen/X86/avx512pf-builtins.c100
-rw-r--r--clang/test/CodeGen/aarch64-byval-temp.c45
-rw-r--r--clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_reinterpret_svcount_svbool.c6
-rw-r--r--clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c2
-rw-r--r--clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp2
-rw-r--r--clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret-bfloat.c57
-rw-r--r--clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret.c253
-rw-r--r--clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret_from_streaming_mode.c35
-rw-r--r--clang/test/CodeGen/aarch64-sve-vector-subscript-ops.c22
-rw-r--r--clang/test/CodeGen/aarch64-varargs.c30
-rw-r--r--clang/test/CodeGen/assume_attr.c58
-rw-r--r--clang/test/CodeGen/attr-counted-by.c12
-rw-r--r--clang/test/CodeGen/attr-cpuspecific.c10
-rw-r--r--clang/test/CodeGen/attr-target-x86.c4
-rw-r--r--clang/test/CodeGen/builtins-wasm.c12
-rw-r--r--clang/test/CodeGen/darwin-target-variant.c2
-rw-r--r--clang/test/CodeGen/fat-lto-objects.c2
-rw-r--r--clang/test/CodeGen/function-target-features.c4
-rw-r--r--clang/test/CodeGen/functions.c12
-rw-r--r--clang/test/CodeGen/nofpclass.c22
-rw-r--r--clang/test/CodeGen/target-builtin-noerror.c2
-rw-r--r--clang/test/CodeGenCXX/assume_attr.cpp48
-rw-r--r--clang/test/CodeGenCXX/atomicinit.cpp4
-rw-r--r--clang/test/CodeGenCXX/auto-var-init.cpp4
-rw-r--r--clang/test/CodeGenCXX/builtin-amdgcn-fence.cpp103
-rw-r--r--clang/test/CodeGenCXX/cxx1y-sized-deallocation.cpp10
-rw-r--r--clang/test/CodeGenCXX/cxx1z-aligned-allocation.cpp6
-rw-r--r--clang/test/CodeGenCXX/cxx2a-destroying-delete.cpp4
-rw-r--r--clang/test/CodeGenCXX/cxx2b-deducing-this.cpp63
-rw-r--r--clang/test/CodeGenCXX/delete-two-arg.cpp4
-rw-r--r--clang/test/CodeGenCXX/delete.cpp12
-rw-r--r--clang/test/CodeGenCXX/dllimport.cpp4
-rw-r--r--clang/test/CodeGenCXX/dynamic-cast-address-space.cpp123
-rw-r--r--clang/test/CodeGenCXX/eh.cpp6
-rw-r--r--clang/test/CodeGenCXX/fmv-namespace.cpp93
-rw-r--r--clang/test/CodeGenCXX/new.cpp6
-rw-r--r--clang/test/CodeGenCXX/nrvo.cpp4
-rw-r--r--clang/test/CodeGenCXX/ps-dllstorage-vtable-rtti.cpp114
-rw-r--r--clang/test/CodeGenCXX/ps4-dllstorage-vtable-rtti.cpp211
-rw-r--r--clang/test/CodeGenCXX/template-param-objects-address-space.cpp10
-rw-r--r--clang/test/CodeGenCXX/throw-expression-typeinfo-in-address-space.cpp2
-rw-r--r--clang/test/CodeGenCXX/try-catch-with-address-space.cpp7
-rw-r--r--clang/test/CodeGenCXX/typeid-cxx11-with-address-space.cpp4
-rw-r--r--clang/test/CodeGenCXX/typeid-with-address-space.cpp11
-rw-r--r--clang/test/CodeGenCXX/typeinfo-with-address-space.cpp7
-rw-r--r--clang/test/CodeGenCXX/vtable-assume-load-address-space.cpp110
-rw-r--r--clang/test/CodeGenCXX/vtable-pointer-initialization-address-space.cpp7
-rw-r--r--clang/test/CodeGenCXX/vtt-address-space.cpp7
-rw-r--r--clang/test/CodeGenCXX/wasm-eh.cpp8
-rw-r--r--clang/test/CodeGenCXX/weak-external.cpp2
-rw-r--r--clang/test/CodeGenCXX/windows-implicit-dllexport-template-specialization.cpp13
-rw-r--r--clang/test/CodeGenCXX/windows-itanium-dllexport.cpp18
-rw-r--r--clang/test/CodeGenCoroutines/coro-aligned-alloc-2.cpp2
-rw-r--r--clang/test/CodeGenCoroutines/coro-aligned-alloc.cpp6
-rw-r--r--clang/test/CodeGenCoroutines/coro-alloc.cpp6
-rw-r--r--clang/test/CodeGenCoroutines/coro-cleanup.cpp6
-rw-r--r--clang/test/CodeGenCoroutines/coro-dealloc.cpp2
-rw-r--r--clang/test/CodeGenCoroutines/coro-gro.cpp3
-rw-r--r--clang/test/CodeGenCoroutines/pr56919.cpp9
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-gfx940.cl52
-rw-r--r--clang/test/CodeGenOpenCLCXX/array-type-infinite-loop.clcpp25
-rw-r--r--clang/test/CoverageMapping/builtinmacro.c2
-rw-r--r--clang/test/CoverageMapping/macros.c8
-rw-r--r--clang/test/CoverageMapping/mcdc-scratch-space.c65
-rw-r--r--clang/test/CoverageMapping/mcdc-system-headers.cpp50
-rw-r--r--clang/test/CoverageMapping/templates.cpp3
-rw-r--r--clang/test/Driver/Ofast.c7
-rw-r--r--clang/test/Driver/aarch64-v95a.c8
-rw-r--r--clang/test/Driver/android-unversioned-fallback-warning.cpp8
-rw-r--r--clang/test/Driver/cl-options.c3
-rw-r--r--clang/test/Driver/cl-x86-flags.c10
-rw-r--r--clang/test/Driver/clang_f_opts.c6
-rw-r--r--clang/test/Driver/cuda-cross-compiling.c4
-rw-r--r--clang/test/Driver/dxc_dxv_path.hlsl2
-rw-r--r--clang/test/Driver/fast-math.c24
-rw-r--r--clang/test/Driver/fat-archive-unbundle-ext.c2
-rw-r--r--clang/test/Driver/fatal-warnings.c4
-rw-r--r--clang/test/Driver/fbinutils-version.c14
-rw-r--r--clang/test/Driver/fdirect-access-external-data.c14
-rw-r--r--clang/test/Driver/fembed-bitcode.c10
-rw-r--r--clang/test/Driver/fexcess-precision.c32
-rw-r--r--clang/test/Driver/fextend-args.c2
-rw-r--r--clang/test/Driver/fforce-dwarf-frame.c6
-rw-r--r--clang/test/Driver/fgnuc-version.c18
-rw-r--r--clang/test/Driver/flags.c6
-rw-r--r--clang/test/Driver/flang/msvc-link.f902
-rw-r--r--clang/test/Driver/fmemprof.cpp10
-rw-r--r--clang/test/Driver/fopenmp.c204
-rw-r--r--clang/test/Driver/fortran.f956
-rw-r--r--clang/test/Driver/fpatchable-function-entry.c26
-rw-r--r--clang/test/Driver/frame-pointer-elim.c88
-rw-r--r--clang/test/Driver/freebsd-mips-as.c34
-rw-r--r--clang/test/Driver/freebsd.cpp10
-rw-r--r--clang/test/Driver/fsanitize-coverage.c106
-rw-r--r--clang/test/Driver/fsanitize-ignorelist.c32
-rw-r--r--clang/test/Driver/fsanitize-memory-param-retval.c16
-rw-r--r--clang/test/Driver/fsanitize-metadata-ignorelist.c8
-rw-r--r--clang/test/Driver/fsanitize-object-size.c32
-rw-r--r--clang/test/Driver/fsemantic-interposition.c22
-rw-r--r--clang/test/Driver/fsjlj-exceptions.c4
-rw-r--r--clang/test/Driver/fuse-ld-windows.c8
-rw-r--r--clang/test/Driver/fuse-ld.c28
-rw-r--r--clang/test/Driver/fuzzer.c10
-rw-r--r--clang/test/Driver/fveclib.c34
-rw-r--r--clang/test/Driver/loongarch-mlasx-error.c4
-rw-r--r--clang/test/Driver/loongarch-mlsx-error.c2
-rw-r--r--clang/test/Driver/m68k-features.cpp42
-rw-r--r--clang/test/Driver/m68k-macros.cpp40
-rw-r--r--clang/test/Driver/m68k-sub-archs.cpp48
-rw-r--r--clang/test/Driver/masm.c6
-rw-r--r--clang/test/Driver/masm.s6
-rw-r--r--clang/test/Driver/mbackchain.c2
-rw-r--r--clang/test/Driver/mcount.c6
-rw-r--r--clang/test/Driver/mdouble.c2
-rw-r--r--clang/test/Driver/memtag-stack.c8
-rw-r--r--clang/test/Driver/mfentry.c18
-rw-r--r--clang/test/Driver/mglobal-merge.c18
-rw-r--r--clang/test/Driver/mingw-implicit-extension-windows.c6
-rw-r--r--clang/test/Driver/mingw-libgcc.c24
-rw-r--r--clang/test/Driver/mingw-msvcrt.c10
-rw-r--r--clang/test/Driver/mingw-sanitizers.c6
-rw-r--r--clang/test/Driver/mingw-sysroot.cpp6
-rw-r--r--clang/test/Driver/mingw-windowsapp.c4
-rw-r--r--clang/test/Driver/mingw.cpp34
-rw-r--r--clang/test/Driver/mips-abi.c70
-rw-r--r--clang/test/Driver/mips-abicalls-error.c2
-rw-r--r--clang/test/Driver/mips-abicalls-warning.c20
-rw-r--r--clang/test/Driver/mips-as.c272
-rw-r--r--clang/test/Driver/mips-features.c174
-rw-r--r--clang/test/Driver/mips-float.c28
-rw-r--r--clang/test/Driver/mips-gpopt-warning.c4
-rw-r--r--clang/test/Driver/mips-ias-Wa.s54
-rw-r--r--clang/test/Driver/mips-integrated-as.s122
-rw-r--r--clang/test/Driver/mips-mabs-warning.c4
-rw-r--r--clang/test/Driver/mlong-double-128.c16
-rw-r--r--clang/test/Driver/mlong-double-64.c10
-rw-r--r--clang/test/Driver/module-output.cppm4
-rw-r--r--clang/test/Driver/ms-bitfields.c4
-rw-r--r--clang/test/Driver/ms-define-stdc.c11
-rw-r--r--clang/test/Driver/msan.c40
-rw-r--r--clang/test/Driver/msc-version.c18
-rw-r--r--clang/test/Driver/msp430-hwmult.c30
-rw-r--r--clang/test/Driver/msvc-compiler-rt.c6
-rw-r--r--clang/test/Driver/msvc-static-rtti.cpp4
-rw-r--r--clang/test/Driver/msvc-triple.c8
-rw-r--r--clang/test/Driver/msvc_forward.c2
-rw-r--r--clang/test/Driver/objc-encode-cxx-class-template-spec.m4
-rw-r--r--clang/test/Driver/openbsd.cpp16
-rw-r--r--clang/test/Driver/opencl.cl2
-rw-r--r--clang/test/Driver/openmp-offload-infer.c2
-rw-r--r--clang/test/Driver/openmp-system-arch.c2
-rw-r--r--clang/test/Driver/ps4-ps5-visibility-dllstorageclass.c39
-rw-r--r--clang/test/Driver/ps4-visibility.cl32
-rw-r--r--clang/test/Driver/ps5-visibility.cl33
-rw-r--r--clang/test/Driver/tocdata-cc1.c17
-rw-r--r--clang/test/Driver/x-args.c4
-rw-r--r--clang/test/Driver/x86-target-features.c13
-rw-r--r--clang/test/ExtractAPI/non_type_template.cpp44
-rw-r--r--clang/test/ExtractAPI/objc_external_category.m18
-rw-r--r--clang/test/Frontend/optimization-remark-options.c4
-rw-r--r--clang/test/Frontend/x86-target-cpu.c10
-rw-r--r--clang/test/InstallAPI/alias_list.test2
-rw-r--r--clang/test/InstallAPI/binary-attributes.test4
-rw-r--r--clang/test/InstallAPI/exclusive-passes-2.test9
-rw-r--r--clang/test/InstallAPI/exclusive-passes-3.test86
-rw-r--r--clang/test/InstallAPI/exclusive-passes.test15
-rw-r--r--clang/test/InstallAPI/invalid-exclusive-passes.test33
-rw-r--r--clang/test/Lexer/cxx-features.cpp20
-rw-r--r--clang/test/Misc/diag-template-diffing-cxx11.cpp (renamed from clang/test/Misc/diag-template-diffing.cpp)0
-rw-r--r--clang/test/Misc/diag-template-diffing-cxx26.cpp49
-rw-r--r--clang/test/Modules/implicit-module-remap.cpp21
-rw-r--r--clang/test/Modules/no-implicit-declarations.cppm26
-rw-r--r--clang/test/Modules/pr91418.cppm65
-rw-r--r--clang/test/OpenMP/assumes_codegen.cpp80
-rw-r--r--clang/test/OpenMP/assumes_print.cpp6
-rw-r--r--clang/test/OpenMP/assumes_template_print.cpp20
-rw-r--r--clang/test/OpenMP/atomic_messages.c96
-rw-r--r--clang/test/OpenMP/distribute_firstprivate_messages.cpp6
-rw-r--r--clang/test/OpenMP/distribute_parallel_for_firstprivate_messages.cpp18
-rw-r--r--clang/test/OpenMP/distribute_parallel_for_lastprivate_messages.cpp18
-rw-r--r--clang/test/OpenMP/distribute_parallel_for_private_messages.cpp2
-rw-r--r--clang/test/OpenMP/distribute_parallel_for_reduction_messages.cpp20
-rw-r--r--clang/test/OpenMP/distribute_parallel_for_simd_private_messages.cpp2
-rw-r--r--clang/test/OpenMP/distribute_parallel_for_simd_shared_messages.cpp16
-rw-r--r--clang/test/OpenMP/distribute_simd_firstprivate_messages.cpp18
-rw-r--r--clang/test/OpenMP/distribute_simd_lastprivate_messages.cpp18
-rw-r--r--clang/test/OpenMP/distribute_simd_loop_messages.cpp30
-rw-r--r--clang/test/OpenMP/distribute_simd_private_messages.cpp2
-rw-r--r--clang/test/OpenMP/distribute_simd_reduction_messages.cpp20
-rw-r--r--clang/test/OpenMP/nvptx_lambda_capturing.cpp246
-rw-r--r--clang/test/OpenMP/reduction_implicit_map.cpp2
-rw-r--r--clang/test/OpenMP/remarks_parallel_in_multiple_target_state_machines.c8
-rw-r--r--clang/test/OpenMP/remarks_parallel_in_target_state_machine.c4
-rw-r--r--clang/test/OpenMP/requires_default_atomic_mem_order_messages.cpp4
-rw-r--r--clang/test/OpenMP/requires_messages.cpp26
-rw-r--r--clang/test/OpenMP/target_device_ancestor_messages.cpp2
-rw-r--r--clang/test/OpenMP/target_firstprivate_messages.cpp2
-rw-r--r--clang/test/OpenMP/target_map_messages.cpp20
-rw-r--r--clang/test/OpenMP/target_parallel_for_private_messages.cpp2
-rw-r--r--clang/test/OpenMP/target_parallel_for_simd_private_messages.cpp2
-rw-r--r--clang/test/OpenMP/target_private_messages.cpp2
-rw-r--r--clang/test/OpenMP/target_simd_private_messages.cpp2
-rw-r--r--clang/test/OpenMP/target_teams_distribute_firstprivate_messages.cpp2
-rw-r--r--clang/test/OpenMP/target_update_messages.cpp4
-rw-r--r--clang/test/OpenMP/teams_distribute_loop_messages.cpp28
-rw-r--r--clang/test/OpenMP/teams_distribute_parallel_for_loop_messages.cpp28
-rw-r--r--clang/test/OpenMP/teams_distribute_parallel_for_simd_loop_messages.cpp28
-rw-r--r--clang/test/OpenMP/teams_distribute_simd_loop_messages.cpp28
-rw-r--r--clang/test/OpenMP/threadprivate_codegen.cpp3686
-rw-r--r--clang/test/OpenMP/tile_codegen.cpp887
-rw-r--r--clang/test/OpenMP/tile_codegen_for_dependent.cpp130
-rw-r--r--clang/test/OpenMP/tile_codegen_tile_for.cpp218
-rw-r--r--clang/test/PCH/cxx1z-aligned-alloc.cpp10
-rw-r--r--clang/test/PCH/pack_indexing.cpp4
-rw-r--r--clang/test/Parser/MicrosoftExtensions.cpp2
-rw-r--r--clang/test/Parser/altivec.c24
-rw-r--r--clang/test/Parser/attr-availability.c2
-rw-r--r--clang/test/Parser/cxx-altivec.cpp24
-rw-r--r--clang/test/Parser/lax-conv.cpp52
-rw-r--r--clang/test/Parser/objcbridge-related-attribute.m4
-rw-r--r--clang/test/Parser/pragma-attribute.cpp2
-rw-r--r--clang/test/ParserOpenACC/parse-clauses.c26
-rw-r--r--clang/test/Preprocessor/predefined-arch-macros.c12
-rw-r--r--clang/test/Preprocessor/riscv-target-features.c36
-rw-r--r--clang/test/Preprocessor/stdc-ms-extension.cpp9
-rw-r--r--clang/test/Preprocessor/x86_target_features.c50
-rw-r--r--clang/test/Profile/c-unreachable-after-switch.c4
-rw-r--r--clang/test/Profile/misexpect-branch.c8
-rw-r--r--clang/test/Profile/misexpect-switch-default.c2
-rw-r--r--clang/test/Profile/misexpect-switch.c2
-rw-r--r--clang/test/Sema/atomic-ops.c32
-rw-r--r--clang/test/Sema/attr-assume.c14
-rw-r--r--clang/test/Sema/attr-availability-ios.c1
-rw-r--r--clang/test/Sema/attr-objc-bridge-related.m2
-rw-r--r--clang/test/Sema/builtin-assume.c12
-rw-r--r--clang/test/Sema/builtins-x86.c8
-rw-r--r--clang/test/Sema/builtins.c8
-rw-r--r--clang/test/Sema/constant_builtins_vector.cpp4
-rw-r--r--clang/test/Sema/fmv-namespace.cpp12
-rw-r--r--clang/test/Sema/stmtexprs.c2
-rw-r--r--clang/test/Sema/x86-eval-method.c4
-rw-r--r--clang/test/Sema/x86_64-eval-method.c2
-rw-r--r--clang/test/SemaCUDA/device-var-init.cu314
-rw-r--r--clang/test/SemaCUDA/function-overload.cu2
-rw-r--r--clang/test/SemaCUDA/union-init.cu8
-rw-r--r--clang/test/SemaCXX/MicrosoftExtensions.cpp8
-rw-r--r--clang/test/SemaCXX/addr-label-in-coroutines.cpp18
-rw-r--r--clang/test/SemaCXX/attribute-pack-expansion.cpp20
-rw-r--r--clang/test/SemaCXX/builtin-operator-new-delete.cpp2
-rw-r--r--clang/test/SemaCXX/constexpr-default-arg.cpp4
-rw-r--r--clang/test/SemaCXX/cxx11-default-member-initializers.cpp74
-rw-r--r--clang/test/SemaCXX/cxx1y-sized-deallocation.cpp2
-rw-r--r--clang/test/SemaCXX/cxx20-ctad-type-alias.cpp7
-rw-r--r--clang/test/SemaCXX/cxx23-assume.cpp35
-rw-r--r--clang/test/SemaCXX/cxx2b-consteval-propagate.cpp8
-rw-r--r--clang/test/SemaCXX/cxx2c-pack-indexing.cpp8
-rw-r--r--clang/test/SemaCXX/eval-crashes.cpp6
-rw-r--r--clang/test/SemaCXX/overload-decl.cpp17
-rw-r--r--clang/test/SemaCXX/overload-template.cpp10
-rw-r--r--clang/test/SemaCXX/overloaded-operator.cpp11
-rw-r--r--clang/test/SemaCXX/recovery-expr-type.cpp2
-rw-r--r--clang/test/SemaCXX/source_location.cpp17
-rw-r--r--clang/test/SemaCXX/type-traits.cpp4
-rw-r--r--clang/test/SemaCXX/unavailable_aligned_allocation.cpp15
-rw-r--r--clang/test/SemaCXX/warn-thread-safety-analysis.cpp10
-rw-r--r--clang/test/SemaCXX/warn-unsafe-buffer-usage-pragma-misuse.cpp4
-rw-r--r--clang/test/SemaHLSL/Availability/attr-availability-compute.hlsl73
-rw-r--r--clang/test/SemaHLSL/Availability/attr-availability-errors.hlsl11
-rw-r--r--clang/test/SemaHLSL/Availability/attr-availability-mesh.hlsl73
-rw-r--r--clang/test/SemaHLSL/Availability/attr-availability-pixel.hlsl63
-rw-r--r--clang/test/SemaHLSL/AvailabilityMarkup.hlsl25
-rw-r--r--clang/test/SemaHLSL/WaveBuiltinAvailability.hlsl4
-rw-r--r--clang/test/SemaObjC/unguarded-availability.m20
-rw-r--r--clang/test/SemaOpenACC/compute-construct-attach-clause.c2
-rw-r--r--clang/test/SemaOpenACC/compute-construct-clause-ast.cpp248
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copy-clause.c8
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copy-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copyin-clause.c10
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copyin-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copyout-clause.c10
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copyout-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-create-clause.c10
-rw-r--r--clang/test/SemaOpenACC/compute-construct-create-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-device_type-clause.c2
-rw-r--r--clang/test/SemaOpenACC/compute-construct-deviceptr-clause.c2
-rw-r--r--clang/test/SemaOpenACC/compute-construct-firstprivate-clause.c8
-rw-r--r--clang/test/SemaOpenACC/compute-construct-firstprivate-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-no_create-clause.c8
-rw-r--r--clang/test/SemaOpenACC/compute-construct-no_create-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-present-clause.c8
-rw-r--r--clang/test/SemaOpenACC/compute-construct-present-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-private-clause.c10
-rw-r--r--clang/test/SemaOpenACC/compute-construct-private-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-reduction-clause.c107
-rw-r--r--clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp175
-rw-r--r--clang/test/SemaOpenCL/builtins-amdgcn-error.cl4
-rw-r--r--clang/test/SemaOpenCL/builtins-amdgcn-gfx940-err.cl14
-rw-r--r--clang/test/SemaOpenCL/vector_swizzle_length.cl4
-rw-r--r--clang/test/SemaTemplate/cwg2398.cpp15
-rw-r--r--clang/test/SemaTemplate/deduction-guide.cpp47
-rw-r--r--clang/test/SemaTemplate/dependent-names.cpp14
-rw-r--r--clang/test/SemaTemplate/destructor-template.cpp14
-rw-r--r--clang/test/SemaTemplate/make_integer_seq.cpp4
-rw-r--r--clang/test/SemaTemplate/ms-function-specialization-class-scope.cpp52
-rw-r--r--clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp12
-rw-r--r--clang/test/SemaTemplate/typo-dependent-name.cpp7
-rw-r--r--clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp2
-rw-r--r--clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp2
-rw-r--r--clang/tools/clang-format/ClangFormat.cpp22
-rw-r--r--clang/tools/clang-installapi/InstallAPIOpts.td3
-rw-r--r--clang/tools/clang-installapi/Options.cpp74
-rw-r--r--clang/tools/clang-installapi/Options.h2
-rw-r--r--clang/tools/clang-repl/CMakeLists.txt43
-rw-r--r--clang/tools/clang-scan-deps/ClangScanDeps.cpp9
-rw-r--r--clang/tools/driver/cc1as_main.cpp3
-rw-r--r--clang/tools/libclang/CIndex.cpp16
-rw-r--r--clang/tools/libclang/CMakeLists.txt2
-rw-r--r--clang/tools/scan-build-py/tests/functional/exec/CMakeLists.txt6
-rw-r--r--clang/unittests/AST/ASTImporterTest.cpp4
-rw-r--r--clang/unittests/AST/DeclTest.cpp31
-rw-r--r--clang/unittests/AST/Interp/Descriptor.cpp24
-rw-r--r--clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp6
-rw-r--r--clang/unittests/CMakeLists.txt2
-rw-r--r--clang/unittests/Driver/DXCModeTest.cpp19
-rw-r--r--clang/unittests/Format/CleanupTest.cpp14
-rw-r--r--clang/unittests/Format/ConfigParseTest.cpp2
-rw-r--r--clang/unittests/Format/DefinitionBlockSeparatorTest.cpp29
-rw-r--r--clang/unittests/Format/FormatTest.cpp30
-rw-r--r--clang/unittests/Format/FormatTestBase.h27
-rw-r--r--clang/unittests/Format/FormatTestCSharp.cpp6
-rw-r--r--clang/unittests/Format/FormatTestJS.cpp12
-rw-r--r--clang/unittests/Format/FormatTestJson.cpp13
-rw-r--r--clang/unittests/Format/FormatTestProto.cpp10
-rw-r--r--clang/unittests/Format/FormatTestRawStrings.cpp5
-rw-r--r--clang/unittests/Format/FormatTestSelective.cpp4
-rw-r--r--clang/unittests/Format/FormatTestTableGen.cpp14
-rw-r--r--clang/unittests/Format/FormatTestUtils.h2
-rw-r--r--clang/unittests/Format/FormatTestVerilog.cpp2
-rw-r--r--clang/unittests/Format/FormatTokenSourceTest.cpp5
-rw-r--r--clang/unittests/Format/MacroCallReconstructorTest.cpp44
-rw-r--r--clang/unittests/Format/MacroExpanderTest.cpp19
-rw-r--r--clang/unittests/Format/MatchFilePathTest.cpp2
-rw-r--r--clang/unittests/Format/NamespaceEndCommentsFixerTest.cpp11
-rw-r--r--clang/unittests/Format/ObjCPropertyAttributeOrderFixerTest.cpp2
-rw-r--r--clang/unittests/Format/QualifierFixerTest.cpp2
-rw-r--r--clang/unittests/Format/SortImportsTestJS.cpp9
-rw-r--r--clang/unittests/Format/SortImportsTestJava.cpp2
-rw-r--r--clang/unittests/Format/SortIncludesTest.cpp1950
-rw-r--r--clang/unittests/Format/TestLexer.h14
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp81
-rw-r--r--clang/unittests/Format/UsingDeclarationsSorterTest.cpp8
-rw-r--r--clang/unittests/Interpreter/CMakeLists.txt43
-rw-r--r--clang/unittests/StaticAnalyzer/CallEventTest.cpp2
-rw-r--r--clang/unittests/StaticAnalyzer/MemRegionDescriptiveNameTest.cpp2
-rw-r--r--clang/utils/ClangVisualizers/CMakeLists.txt2
-rw-r--r--clang/utils/TableGen/CMakeLists.txt2
-rw-r--r--clang/utils/TableGen/ClangAttrEmitter.cpp2
-rw-r--r--clang/utils/TableGen/ClangDiagnosticsEmitter.cpp2
-rw-r--r--clang/utils/TableGen/SveEmitter.cpp21
-rw-r--r--clang/utils/analyzer/entrypoint.py2
-rw-r--r--clang/utils/ci/buildkite-pipeline.yml105
-rwxr-xr-xclang/utils/ci/run-buildbot25
-rw-r--r--clang/utils/hmaptool/CMakeLists.txt2
-rwxr-xr-xclang/www/cxx_dr_status.html42
-rwxr-xr-xclang/www/cxx_status.html11
-rw-r--r--compiler-rt/cmake/config-ix.cmake16
-rw-r--r--compiler-rt/lib/ctx_profile/CMakeLists.txt5
-rw-r--r--compiler-rt/lib/dfsan/dfsan_allocator.cpp2
-rw-r--r--compiler-rt/lib/dfsan/dfsan_custom.cpp26
-rw-r--r--compiler-rt/lib/lsan/lsan_allocator.cpp2
-rw-r--r--compiler-rt/lib/msan/msan_allocator.cpp2
-rwxr-xr-xcompiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh1
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h12
-rw-r--r--compiler-rt/lib/xray/tests/CMakeLists.txt5
-rw-r--r--compiler-rt/lib/xray/xray_buffer_queue.h4
-rw-r--r--compiler-rt/lib/xray/xray_trampoline_x86_64.S15
-rw-r--r--compiler-rt/test/asan/TestCases/Windows/bitfield_uaf.cpp4
-rw-r--r--compiler-rt/test/asan/TestCases/Windows/calloc_left_oob.cpp2
-rw-r--r--compiler-rt/test/asan/TestCases/Windows/calloc_right_oob.cpp2
-rw-r--r--compiler-rt/test/asan/TestCases/Windows/calloc_uaf.cpp4
-rw-r--r--compiler-rt/test/ctx_profile/TestCases/generate-context.cpp2
-rw-r--r--compiler-rt/test/ctx_profile/lit.cfg.py7
-rw-r--r--compiler-rt/test/dfsan/custom.cpp67
-rw-r--r--compiler-rt/test/profile/Linux/counter_promo_for.c20
-rw-r--r--compiler-rt/test/profile/Linux/counter_promo_while.c16
-rw-r--r--cross-project-tests/CMakeLists.txt6
-rw-r--r--flang/CMakeLists.txt5
-rw-r--r--flang/cmake/modules/AddFlang.cmake3
-rw-r--r--flang/docs/CMakeLists.txt2
-rw-r--r--flang/docs/Extensions.md4
-rw-r--r--flang/include/flang/Common/Fortran-features.h3
-rw-r--r--flang/include/flang/Common/api-attrs.h22
-rw-r--r--flang/include/flang/Common/visit.h7
-rw-r--r--flang/include/flang/Evaluate/characteristics.h2
-rw-r--r--flang/include/flang/Evaluate/constant.h3
-rw-r--r--flang/include/flang/Evaluate/expression.h3
-rw-r--r--flang/include/flang/Evaluate/type.h3
-rw-r--r--flang/include/flang/Optimizer/Builder/FIRBuilder.h6
-rw-r--r--flang/include/flang/Optimizer/Builder/IntrinsicCall.h3
-rw-r--r--flang/include/flang/Optimizer/Builder/Runtime/Numeric.h8
-rw-r--r--flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h6
-rw-r--r--flang/include/flang/Optimizer/Builder/Runtime/Support.h31
-rw-r--r--flang/include/flang/Optimizer/Dialect/CMakeLists.txt2
-rw-r--r--flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td10
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIRAttr.td11
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIROps.td37
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIRType.h1
-rw-r--r--flang/include/flang/Optimizer/HLFIR/Passes.h5
-rw-r--r--flang/include/flang/Optimizer/HLFIR/Passes.td11
-rw-r--r--flang/include/flang/Optimizer/Transforms/Passes.h1
-rw-r--r--flang/include/flang/Optimizer/Transforms/Passes.td12
-rw-r--r--flang/include/flang/Runtime/support.h19
-rw-r--r--flang/include/flang/Semantics/openmp-directive-sets.h2
-rw-r--r--flang/include/flang/Semantics/scope.h2
-rw-r--r--flang/include/flang/Semantics/semantics.h4
-rw-r--r--flang/include/flang/Semantics/symbol.h1
-rw-r--r--flang/include/flang/Tools/CLOptions.inc13
-rw-r--r--flang/lib/Evaluate/characteristics.cpp17
-rw-r--r--flang/lib/Evaluate/formatting.cpp213
-rw-r--r--flang/lib/Evaluate/shape.cpp8
-rw-r--r--flang/lib/Lower/Bridge.cpp138
-rw-r--r--flang/lib/Lower/OpenMP/ClauseProcessor.cpp5
-rw-r--r--flang/lib/Lower/OpenMP/DataSharingProcessor.cpp66
-rw-r--r--flang/lib/Lower/OpenMP/DataSharingProcessor.h41
-rw-r--r--flang/lib/Lower/OpenMP/OpenMP.cpp10
-rw-r--r--flang/lib/Optimizer/Builder/CMakeLists.txt1
-rw-r--r--flang/lib/Optimizer/Builder/IntrinsicCall.cpp31
-rw-r--r--flang/lib/Optimizer/Builder/Runtime/Numeric.cpp41
-rw-r--r--flang/lib/Optimizer/Builder/Runtime/Support.cpp46
-rw-r--r--flang/lib/Optimizer/CodeGen/CodeGen.cpp40
-rw-r--r--flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt1
-rw-r--r--flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp18
-rw-r--r--flang/lib/Optimizer/Dialect/FIROps.cpp46
-rw-r--r--flang/lib/Optimizer/Dialect/FIRType.cpp11
-rw-r--r--flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp2
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/InlineElementals.cpp12
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIRIntrinsics.cpp11
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp7
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp9
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/SimplifyHLFIRIntrinsics.cpp11
-rw-r--r--flang/lib/Optimizer/Transforms/AddDebugInfo.cpp89
-rw-r--r--flang/lib/Optimizer/Transforms/AssumedRankOpConversion.cpp131
-rw-r--r--flang/lib/Optimizer/Transforms/CMakeLists.txt1
-rw-r--r--flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp53
-rw-r--r--flang/lib/Optimizer/Transforms/DebugTypeGenerator.h4
-rw-r--r--flang/lib/Parser/openmp-parsers.cpp11
-rw-r--r--flang/lib/Parser/unparse.cpp18
-rw-r--r--flang/lib/Semantics/check-call.cpp38
-rw-r--r--flang/lib/Semantics/check-declarations.cpp87
-rw-r--r--flang/lib/Semantics/check-omp-structure.cpp59
-rw-r--r--flang/lib/Semantics/check-omp-structure.h1
-rw-r--r--flang/lib/Semantics/expression.cpp74
-rw-r--r--flang/lib/Semantics/mod-file.cpp156
-rw-r--r--flang/lib/Semantics/mod-file.h1
-rw-r--r--flang/lib/Semantics/resolve-directives.cpp8
-rw-r--r--flang/lib/Semantics/resolve-names-utils.cpp44
-rw-r--r--flang/lib/Semantics/resolve-names.cpp47
-rw-r--r--flang/lib/Semantics/symbol.cpp12
-rw-r--r--flang/runtime/CMakeLists.txt6
-rw-r--r--flang/runtime/edit-output.cpp12
-rw-r--r--flang/runtime/external-unit.cpp8
-rw-r--r--flang/runtime/numeric.cpp8
-rw-r--r--flang/runtime/support.cpp22
-rw-r--r--flang/runtime/terminator.h2
-rw-r--r--flang/runtime/unit.cpp1
-rw-r--r--flang/test/CMakeLists.txt3
-rw-r--r--flang/test/Driver/bbc-mlir-pass-pipeline.f901
-rw-r--r--flang/test/Driver/fopenmp.f909
-rw-r--r--flang/test/Driver/mlir-debug-pass-pipeline.f908
-rw-r--r--flang/test/Driver/mlir-pass-pipeline.f9026
-rw-r--r--flang/test/Driver/w-arg-unsupported.f9052
-rw-r--r--flang/test/Driver/wextra-ok.f902
-rw-r--r--flang/test/Evaluate/triplets01.f9011
-rw-r--r--flang/test/Fir/basic-program.fir20
-rw-r--r--flang/test/Fir/fir-ops.fir12
-rw-r--r--flang/test/Fir/invalid.fir24
-rw-r--r--flang/test/Fir/rebox_assumed_rank_codegen.fir111
-rw-r--r--flang/test/Integration/debug-complex-1.f9026
-rw-r--r--flang/test/Integration/debug-fixed-array-type-2.f9043
-rw-r--r--flang/test/Integration/debug-module-2.f9039
-rw-r--r--flang/test/Lower/CUDA/cuda-data-transfer.cuf28
-rw-r--r--flang/test/Lower/Intrinsics/selected_char_kind.f9017
-rw-r--r--flang/test/Lower/Intrinsics/selected_logical_kind.f9071
-rw-r--r--flang/test/Lower/OpenMP/Todo/masked-directive.f9013
-rw-r--r--flang/test/Lower/OpenMP/invalid-reduction-modifier.f904
-rw-r--r--flang/test/Lower/OpenMP/lastprivate-iv.f9019
-rw-r--r--flang/test/Lower/branching-directive.f9077
-rw-r--r--flang/test/Lower/unstructured-control-flow.f9031
-rw-r--r--flang/test/Parser/OpenMP/masked-unparse.f9092
-rw-r--r--flang/test/Semantics/OpenMP/do02.f902
-rw-r--r--flang/test/Semantics/OpenMP/masked.f9013
-rw-r--r--flang/test/Semantics/OpenMP/reduction-modifiers.f9089
-rw-r--r--flang/test/Semantics/OpenMP/sections03.f9027
-rw-r--r--flang/test/Semantics/OpenMP/simd03.f902
-rw-r--r--flang/test/Semantics/OpenMP/taskgroup01.f902
-rw-r--r--flang/test/Semantics/OpenMP/taskloop03.f902
-rw-r--r--flang/test/Semantics/bind-c12.f904
-rw-r--r--flang/test/Semantics/call05.f906
-rw-r--r--flang/test/Semantics/call39.f9023
-rw-r--r--flang/test/Semantics/modfile03.f9099
-rw-r--r--flang/test/Semantics/procinterface05.f9014
-rw-r--r--flang/test/Semantics/shape.f9010
-rw-r--r--flang/test/Transforms/debug-complex-1.fir39
-rw-r--r--flang/test/Transforms/debug-fixed-array-type.fir34
-rw-r--r--flang/test/Transforms/debug-module-1.fir40
-rw-r--r--flang/test/Transforms/debug-module-2.fir35
-rw-r--r--flang/tools/f18/CMakeLists.txt1
-rw-r--r--flang/unittests/CMakeLists.txt3
-rw-r--r--flang/unittests/Evaluate/CMakeLists.txt1
-rw-r--r--flang/unittests/Runtime/CMakeLists.txt1
-rw-r--r--flang/unittests/Runtime/Support.cpp58
-rw-r--r--libc/CMakeLists.txt1
-rw-r--r--libc/cmake/modules/LLVMLibCObjectRules.cmake3
-rw-r--r--libc/config/baremetal/arm/entrypoints.txt4
-rw-r--r--libc/config/baremetal/riscv/entrypoints.txt4
-rw-r--r--libc/docs/ctype.rst25
-rw-r--r--libc/docs/fenv.rst117
-rw-r--r--libc/docs/signal.rst170
-rw-r--r--libc/docs/stdbit.rst166
-rw-r--r--libc/docs/threads.rst57
-rw-r--r--libc/include/llvm-libc-macros/linux/CMakeLists.txt6
-rw-r--r--libc/include/llvm-libc-macros/linux/error-number-macros.h8
-rw-r--r--libc/include/llvm-libc-macros/linux/mips/CMakeLists.txt5
-rw-r--r--libc/include/llvm-libc-macros/linux/mips/error-number-macros.h24
-rw-r--r--libc/include/llvm-libc-macros/linux/sparc/CMakeLists.txt5
-rw-r--r--libc/include/llvm-libc-macros/linux/sparc/error-number-macros.h24
-rw-r--r--libc/src/__support/threads/CMakeLists.txt9
-rw-r--r--libc/src/__support/threads/CndVar.h52
-rw-r--r--libc/src/__support/threads/linux/CMakeLists.txt13
-rw-r--r--libc/src/__support/threads/linux/CndVar.cpp103
-rw-r--r--libc/src/setjmp/x86_64/CMakeLists.txt5
-rw-r--r--libc/src/threads/linux/CMakeLists.txt11
-rw-r--r--libc/src/threads/linux/CndVar.h148
-rw-r--r--libc/src/threads/linux/cnd_broadcast.cpp11
-rw-r--r--libc/src/threads/linux/cnd_destroy.cpp7
-rw-r--r--libc/src/threads/linux/cnd_init.cpp9
-rw-r--r--libc/src/threads/linux/cnd_signal.cpp10
-rw-r--r--libc/src/threads/linux/cnd_wait.cpp11
-rw-r--r--libc/src/time/gpu/time_utils.cpp3
-rw-r--r--libc/src/time/gpu/time_utils.h5
-rw-r--r--libc/startup/baremetal/CMakeLists.txt11
-rw-r--r--libc/startup/baremetal/fini.cpp27
-rw-r--r--libc/startup/baremetal/init.cpp32
-rw-r--r--libc/test/integration/scudo/CMakeLists.txt4
-rw-r--r--libc/utils/docgen/ctype.json28
-rwxr-xr-xlibc/utils/docgen/docgen.py189
-rw-r--r--libc/utils/docgen/fenv.json72
-rw-r--r--libc/utils/docgen/header.py87
-rw-r--r--libc/utils/docgen/signal.json145
-rw-r--r--libc/utils/docgen/stdbit.json176
-rw-r--r--libc/utils/docgen/threads.json54
-rw-r--r--libcxx/CMakeLists.txt1
-rw-r--r--libcxx/docs/ReleaseNotes/19.rst1
-rw-r--r--libcxx/docs/Status/Cxx20Issues.csv2
-rw-r--r--libcxx/docs/Status/Cxx20Papers.csv4
-rw-r--r--libcxx/docs/Status/Cxx23Issues.csv2
-rw-r--r--libcxx/docs/Status/ParallelismProjects.csv2
-rw-r--r--libcxx/include/CMakeLists.txt22
-rw-r--r--libcxx/include/__algorithm/copy_move_common.h1
-rw-r--r--libcxx/include/__algorithm/pstl.h1366
-rw-r--r--libcxx/include/__algorithm/pstl_any_all_none_of.h152
-rw-r--r--libcxx/include/__algorithm/pstl_copy.h134
-rw-r--r--libcxx/include/__algorithm/pstl_count.h126
-rw-r--r--libcxx/include/__algorithm/pstl_equal.h184
-rw-r--r--libcxx/include/__algorithm/pstl_fill.h114
-rw-r--r--libcxx/include/__algorithm/pstl_find.h141
-rw-r--r--libcxx/include/__algorithm/pstl_for_each.h108
-rw-r--r--libcxx/include/__algorithm/pstl_generate.h113
-rw-r--r--libcxx/include/__algorithm/pstl_is_partitioned.h79
-rw-r--r--libcxx/include/__algorithm/pstl_merge.h97
-rw-r--r--libcxx/include/__algorithm/pstl_move.h89
-rw-r--r--libcxx/include/__algorithm/pstl_replace.h260
-rw-r--r--libcxx/include/__algorithm/pstl_rotate_copy.h90
-rw-r--r--libcxx/include/__algorithm/pstl_sort.h85
-rw-r--r--libcxx/include/__algorithm/pstl_stable_sort.h63
-rw-r--r--libcxx/include/__algorithm/pstl_transform.h122
-rw-r--r--libcxx/include/__atomic/atomic_ref.h360
-rw-r--r--libcxx/include/__atomic/atomic_sync.h1
-rw-r--r--libcxx/include/__atomic/check_memory_order.h4
-rw-r--r--libcxx/include/__atomic/cxx_atomic_impl.h27
-rw-r--r--libcxx/include/__atomic/to_gcc_order.h54
-rw-r--r--libcxx/include/__exception/exception_ptr.h17
-rw-r--r--libcxx/include/__locale4
-rw-r--r--libcxx/include/__numeric/pstl.h (renamed from libcxx/include/__numeric/pstl_transform_reduce.h)83
-rw-r--r--libcxx/include/__numeric/pstl_reduce.h112
-rw-r--r--libcxx/include/__type_traits/has_unique_object_representation.h6
-rw-r--r--libcxx/include/algorithm17
-rw-r--r--libcxx/include/atomic1
-rw-r--r--libcxx/include/experimental/__simd/scalar.h7
-rw-r--r--libcxx/include/experimental/__simd/simd.h11
-rw-r--r--libcxx/include/experimental/__simd/simd_mask.h11
-rw-r--r--libcxx/include/experimental/__simd/vec_ext.h11
-rw-r--r--libcxx/include/forward_list1
-rw-r--r--libcxx/include/list1
-rw-r--r--libcxx/include/locale53
-rw-r--r--libcxx/include/module.modulemap40
-rw-r--r--libcxx/include/numeric11
-rw-r--r--libcxx/include/vector64
-rw-r--r--libcxx/modules/std/atomic.inc2
-rw-r--r--libcxx/src/chrono.cpp4
-rw-r--r--libcxx/src/locale.cpp4
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp58
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp58
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp40
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp55
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp63
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp55
-rw-r--r--libcxx/test/libcxx/language.support/support.dynamic/libcpp_deallocate.sh.cpp3
-rw-r--r--libcxx/test/libcxx/lit.local.cfg5
-rw-r--r--libcxx/test/std/algorithms/alg.modifying.operations/alg.fill/pstl.exception_handling.pass.cpp58
-rw-r--r--libcxx/test/std/algorithms/alg.modifying.operations/alg.move/pstl.exception_handling.pass.cpp40
-rw-r--r--libcxx/test/std/algorithms/alg.modifying.operations/alg.replace/pstl.exception_handling.pass.cpp118
-rw-r--r--libcxx/test/std/algorithms/alg.modifying.operations/alg.rotate/pstl.exception_handling.pass.cpp43
-rw-r--r--libcxx/test/std/algorithms/alg.modifying.operations/alg.transform/pstl.exception_handling.pass.cpp73
-rw-r--r--libcxx/test/std/algorithms/alg.nonmodifying/alg.all_of/pstl.exception_handling.pass.cpp44
-rw-r--r--libcxx/test/std/algorithms/alg.nonmodifying/alg.any_of/pstl.exception_handling.pass.cpp44
-rw-r--r--libcxx/test/std/algorithms/alg.nonmodifying/alg.equal/pstl.exception_handling.pass.cpp53
-rw-r--r--libcxx/test/std/algorithms/alg.nonmodifying/alg.find/pstl.exception_handling.pass.cpp87
-rw-r--r--libcxx/test/std/algorithms/alg.nonmodifying/alg.foreach/pstl.exception_handling.pass.cpp53
-rw-r--r--libcxx/test/std/algorithms/alg.nonmodifying/alg.none_of/pstl.exception_handling.pass.cpp44
-rw-r--r--libcxx/test/std/algorithms/alg.sorting/alg.merge/pstl.exception_handling.pass.cpp51
-rw-r--r--libcxx/test/std/algorithms/alg.sorting/alg.sort/stable.sort/pstl.exception_handling.pass.cpp41
-rw-r--r--libcxx/test/std/algorithms/numeric.ops/reduce/pstl.exception_handling.pass.cpp52
-rw-r--r--libcxx/test/std/algorithms/numeric.ops/transform.reduce/pstl.exception_handling.pass.cpp62
-rw-r--r--libcxx/test/std/algorithms/pstl.exception_handling.pass.cpp339
-rw-r--r--libcxx/test/std/atomics/atomics.ref/assign.pass.cpp50
-rw-r--r--libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp60
-rw-r--r--libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp56
-rw-r--r--libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp56
-rw-r--r--libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp221
-rw-r--r--libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp226
-rw-r--r--libcxx/test/std/atomics/atomics.ref/convert.pass.cpp45
-rw-r--r--libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp37
-rw-r--r--libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp33
-rw-r--r--libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp45
-rw-r--r--libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp113
-rw-r--r--libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp69
-rw-r--r--libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp68
-rw-r--r--libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp113
-rw-r--r--libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp68
-rw-r--r--libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp97
-rw-r--r--libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp71
-rw-r--r--libcxx/test/std/atomics/atomics.ref/load.pass.cpp62
-rw-r--r--libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp132
-rw-r--r--libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp78
-rw-r--r--libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp46
-rw-r--r--libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp79
-rw-r--r--libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp79
-rw-r--r--libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp39
-rw-r--r--libcxx/test/std/atomics/atomics.ref/requires-trivially-copyable.verify.cpp26
-rw-r--r--libcxx/test/std/atomics/atomics.ref/store.pass.cpp61
-rw-r--r--libcxx/test/std/atomics/atomics.ref/test_helper.h136
-rw-r--r--libcxx/test/std/atomics/atomics.ref/wait.pass.cpp88
-rw-r--r--libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/types.compile.pass.cpp3
-rw-r--r--libcxx/test/std/containers/associative/map/map.value_compare/types.pass.cpp2
-rw-r--r--libcxx/test/std/containers/associative/multimap/multimap.value_compare/types.pass.cpp2
-rw-r--r--libcxx/test/std/experimental/simd/simd.class/simd_copy.pass.cpp173
-rw-r--r--libcxx/test/std/experimental/simd/simd.mask.class/simd_mask_copy.pass.cpp127
-rw-r--r--libcxx/test/std/iterators/predef.iterators/counted.iterator/implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/iterators/predef.iterators/insert.iterators/back.insert.iterator/implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/iterators/predef.iterators/insert.iterators/front.insert.iterator/implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/iterators/predef.iterators/move.iterators/move.iterator/implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/sized_delete_array14.pass.cpp8
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/sized_delete14.pass.cpp8
-rw-r--r--libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp161
-rw-r--r--libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp161
-rw-r--r--libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp161
-rw-r--r--libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/thousands_sep.pass.cpp17
-rw-r--r--libcxx/test/std/numerics/numeric.ops/reduce/pstl.reduce.pass.cpp (renamed from libcxx/test/std/algorithms/numeric.ops/reduce/pstl.reduce.pass.cpp)2
-rw-r--r--libcxx/test/std/numerics/numeric.ops/transform.reduce/pstl.transform_reduce.binary.pass.cpp (renamed from libcxx/test/std/algorithms/numeric.ops/transform.reduce/pstl.transform_reduce.binary.pass.cpp)2
-rw-r--r--libcxx/test/std/numerics/numeric.ops/transform.reduce/pstl.transform_reduce.unary.pass.cpp (renamed from libcxx/test/std/algorithms/numeric.ops/transform.reduce/pstl.transform_reduce.unary.pass.cpp)2
-rw-r--r--libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/assign.pass.cpp4
-rw-r--r--libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/copy.pass.cpp4
-rw-r--r--libcxx/test/std/strings/string.view/string.view.deduct/implicit.pass.cpp2
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp146
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp200
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp128
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp175
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until_pred.pass.cpp213
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for.pass.cpp161
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_pred.pass.cpp212
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_pred.pass.cpp133
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until.pass.cpp179
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_pred.pass.cpp241
-rw-r--r--libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.guard/implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.scoped/implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.cons/mutex.pass.cpp132
-rw-r--r--libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/lock.pass.cpp140
-rw-r--r--libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/try_lock.pass.cpp135
-rw-r--r--libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.unique/implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/function.objects/func.search/func.search.bm/implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/function.objects/func.search/func.search.bmh/implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/function.objects/func.search/func.search.default/implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/function.objects/operations.implicit_ctad.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/has_unique_object_representations.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/utility/mem.res/mem.res.global/new_delete_resource.pass.cpp2
-rw-r--r--libcxx/utils/libcxx/test/dsl.py4
-rw-r--r--libcxx/utils/libcxx/test/features.py35
-rw-r--r--libcxxabi/CMakeLists.txt1
-rw-r--r--libcxxabi/include/cxxabi.h8
-rw-r--r--libcxxabi/src/cxa_exception.cpp7
-rw-r--r--libcxxabi/src/cxa_exception.h2
-rw-r--r--libcxxabi/src/cxa_personality.cpp36
-rw-r--r--libunwind/CMakeLists.txt1
-rw-r--r--libunwind/include/__libunwind_config.h4
-rw-r--r--libunwind/src/Unwind-wasm.c4
-rw-r--r--libunwind/src/UnwindCursor.hpp2
-rw-r--r--libunwind/src/UnwindLevel1.c3
-rw-r--r--libunwind/src/UnwindRegistersRestore.S4
-rw-r--r--libunwind/src/UnwindRegistersSave.S4
-rw-r--r--libunwind/src/libunwind.cpp5
-rw-r--r--lld/COFF/DriverUtils.cpp40
-rw-r--r--lld/ELF/Arch/AVR.cpp3
-rw-r--r--lld/ELF/Config.h5
-rw-r--r--lld/ELF/Driver.cpp36
-rw-r--r--lld/ELF/Options.td1
-rw-r--r--lld/ELF/OutputSections.cpp19
-rw-r--r--lld/ELF/SyntheticSections.cpp2
-rw-r--r--lld/MachO/Config.h1
-rw-r--r--lld/MachO/Driver.cpp1
-rw-r--r--lld/MachO/Options.td3
-rw-r--r--lld/MachO/SyntheticSections.cpp15
-rw-r--r--lld/docs/ld.lld.16
-rw-r--r--lld/test/COFF/arm64ec-exports.s121
-rw-r--r--lld/test/ELF/aarch64-feature-gcs.s134
-rw-r--r--lld/test/ELF/arm-gotoff.s70
-rw-r--r--lld/test/ELF/avr-reloc-error.s5
-rw-r--r--lld/test/ELF/avr-reloc.s12
-rw-r--r--lld/test/ELF/compress-debug-sections-zstd.s29
-rw-r--r--lld/test/ELF/compress-sections-special.s4
-rw-r--r--lld/test/ELF/compress-sections.s24
-rw-r--r--lld/test/ELF/compressed-debug-level.test6
-rw-r--r--lld/test/ELF/linkerscript/compress-debug-sections.s2
-rw-r--r--lld/test/ELF/linkerscript/compress-sections.s8
-rw-r--r--lld/test/ELF/mips-eh_frame-pic.s9
-rw-r--r--lld/test/MachO/stabs-icf.s27
-rw-r--r--lld/test/wasm/shared64.s14
-rw-r--r--lld/wasm/Driver.cpp10
-rw-r--r--lld/wasm/Symbols.cpp2
-rw-r--r--lld/wasm/Symbols.h5
-rw-r--r--lld/wasm/SyntheticSections.cpp8
-rw-r--r--lld/wasm/Writer.cpp10
-rw-r--r--lldb/CMakeLists.txt1
-rw-r--r--lldb/cmake/modules/AddLLDB.cmake6
-rw-r--r--lldb/cmake/modules/LLDBConfig.cmake22
-rw-r--r--lldb/cmake/modules/LLDBFramework.cmake2
-rw-r--r--lldb/cmake/modules/LLDBStandalone.cmake4
-rw-r--r--lldb/docs/CMakeLists.txt1
-rw-r--r--lldb/docs/resources/build.rst1
-rw-r--r--lldb/include/lldb/API/SBCommandInterpreter.h8
-rw-r--r--lldb/include/lldb/API/SBDebugger.h13
-rw-r--r--lldb/include/lldb/Core/Debugger.h31
-rw-r--r--lldb/include/lldb/Interpreter/CommandInterpreter.h18
-rw-r--r--lldb/include/lldb/Symbol/CompilerType.h2
-rw-r--r--lldb/include/lldb/Symbol/TypeSystem.h2
-rw-r--r--lldb/include/lldb/Target/Process.h4
-rw-r--r--lldb/include/lldb/lldb-types.h2
-rw-r--r--lldb/packages/Python/lldbsuite/test/dotest.py25
-rw-r--r--lldb/source/API/CMakeLists.txt3
-rw-r--r--lldb/source/API/SBCommandInterpreter.cpp16
-rw-r--r--lldb/source/API/SBDebugger.cpp20
-rw-r--r--lldb/source/Breakpoint/BreakpointResolverFileLine.cpp10
-rw-r--r--lldb/source/Commands/CommandObjectThread.cpp4
-rw-r--r--lldb/source/Core/CMakeLists.txt3
-rw-r--r--lldb/source/Core/Debugger.cpp45
-rw-r--r--lldb/source/Core/ValueObject.cpp31
-rw-r--r--lldb/source/Core/ValueObjectConstResultImpl.cpp12
-rw-r--r--lldb/source/Host/common/Socket.cpp3
-rw-r--r--lldb/source/Interpreter/CommandInterpreter.cpp53
-rw-r--r--lldb/source/Interpreter/InterpreterProperties.td4
-rw-r--r--lldb/source/Interpreter/Options.cpp29
-rw-r--r--lldb/source/Plugins/ABI/PowerPC/ABISysV_ppc64.cpp13
-rw-r--r--lldb/source/Plugins/Instruction/ARM64/EmulateInstructionARM64.cpp2
-rw-r--r--lldb/source/Plugins/Language/CPlusPlus/BlockPointer.cpp18
-rw-r--r--lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp11
-rw-r--r--lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp14
-rw-r--r--lldb/source/Plugins/Platform/POSIX/PlatformPOSIX.cpp4
-rw-r--r--lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp4
-rw-r--r--lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp71
-rw-r--r--lldb/source/Plugins/Process/elf-core/ProcessElfCore.h10
-rw-r--r--lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp9
-rw-r--r--lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.h5
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.cpp6
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.h5
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfoEntry.cpp40
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfoEntry.h2
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFUnit.cpp6
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp28
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h2
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp23
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h2
-rw-r--r--lldb/source/Plugins/UnwindAssembly/InstEmulation/UnwindAssemblyInstEmulation.cpp4
-rw-r--r--lldb/source/Symbol/CompilerType.cpp5
-rw-r--r--lldb/source/Symbol/Symbol.cpp18
-rw-r--r--lldb/source/Symbol/SymbolFileOnDemand.cpp5
-rw-r--r--lldb/source/Symbol/TypeSystem.cpp32
-rw-r--r--lldb/source/Target/RegisterContextUnwind.cpp6
-rw-r--r--lldb/source/Target/Target.cpp36
-rw-r--r--lldb/source/Utility/Status.cpp3
-rw-r--r--lldb/test/API/CMakeLists.txt1
-rw-r--r--lldb/test/API/commands/session/save/TestSessionSave.py12
-rw-r--r--lldb/test/API/functionalities/breakpoint/breakpoint_command/TestBreakpointCommand.py18
-rw-r--r--lldb/test/API/functionalities/bt-interrupt/main.c1
-rw-r--r--lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx/chrono/TestDataFormatterLibcxxChrono.py57
-rw-r--r--lldb/test/API/functionalities/thread/exit_during_expression/main.c2
-rw-r--r--lldb/test/API/lang/c/enum_types/TestEnumTypes.py6
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/Makefile2
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/TestWithLimitDebugInfo.py40
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/base.cpp9
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/base.h22
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/derived.cpp11
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/derived.h37
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/main.cpp6
-rw-r--r--lldb/test/API/python_api/debugger/TestDebuggerAPI.py126
-rw-r--r--lldb/test/API/python_api/interpreter/TestCommandInterpreterAPI.py172
-rw-r--r--lldb/test/API/python_api/interpreter/main.c5
-rw-r--r--lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_logpoints.py4
-rw-r--r--lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setBreakpoints.py4
-rw-r--r--lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setExceptionBreakpoints.py1
-rw-r--r--lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setFunctionBreakpoints.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/commands/TestDAP_commands.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/completions/TestDAP_completions.py1
-rw-r--r--lldb/test/API/tools/lldb-dap/console/TestDAP_console.py5
-rw-r--r--lldb/test/API/tools/lldb-dap/console/TestDAP_redirection_to_console.py1
-rw-r--r--lldb/test/API/tools/lldb-dap/coreFile/TestDAP_coreFile.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py3
-rw-r--r--lldb/test/API/tools/lldb-dap/disassemble/TestDAP_disassemble.py1
-rw-r--r--lldb/test/API/tools/lldb-dap/disconnect/TestDAP_disconnect.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/evaluate/TestDAP_evaluate.py7
-rw-r--r--lldb/test/API/tools/lldb-dap/exception/TestDAP_exception.py1
-rw-r--r--lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py14
-rw-r--r--lldb/test/API/tools/lldb-dap/module/TestDAP_module.py3
-rw-r--r--lldb/test/API/tools/lldb-dap/optimized/TestDAP_optimized.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/restart/TestDAP_restart.py3
-rw-r--r--lldb/test/API/tools/lldb-dap/restart/TestDAP_restart_runInTerminal.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/runInTerminal/TestDAP_runInTerminal.py7
-rw-r--r--lldb/test/API/tools/lldb-dap/stackTrace/TestDAP_stackTrace.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/stackTraceMissingFunctionName/TestDAP_stackTraceMissingFunctionName.py1
-rw-r--r--lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py1
-rw-r--r--lldb/test/API/tools/lldb-dap/step/TestDAP_step.py1
-rw-r--r--lldb/test/API/tools/lldb-dap/stop-hooks/TestDAP_stop_hooks.py1
-rw-r--r--lldb/test/API/tools/lldb-dap/terminated-event/TestDAP_terminatedEvent.py1
-rw-r--r--lldb/test/API/tools/lldb-dap/threads/TestDAP_threads.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/variables/TestDAP_variables.py37
-rw-r--r--lldb/test/CMakeLists.txt4
-rw-r--r--lldb/test/Shell/CMakeLists.txt1
-rw-r--r--lldb/test/Shell/SymbolFile/DWARF/x86/invalid_abbreviation.s47
-rw-r--r--lldb/test/Shell/Unwind/Inputs/signal-in-leaf-function-aarch64.c15
-rw-r--r--lldb/test/Shell/Unwind/signal-in-leaf-function-aarch64.test30
-rw-r--r--lldb/test/Unit/CMakeLists.txt1
-rw-r--r--lldb/tools/driver/CMakeLists.txt2
-rw-r--r--lldb/tools/lldb-dap/DAP.h2
-rw-r--r--lldb/tools/lldb-dap/JSONUtils.cpp6
-rw-r--r--lldb/tools/lldb-dap/lldb-dap.cpp13
-rw-r--r--lldb/tools/lldb-fuzzer/lldb-commandinterpreter-fuzzer/CMakeLists.txt1
-rw-r--r--lldb/tools/lldb-fuzzer/lldb-target-fuzzer/CMakeLists.txt1
-rw-r--r--lldb/tools/lldb-server/CMakeLists.txt1
-rw-r--r--lldb/unittests/CMakeLists.txt2
-rw-r--r--lldb/unittests/SymbolFile/DWARF/DWARFDIETest.cpp24
-rw-r--r--lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp24
-rw-r--r--lldb/unittests/tools/lldb-server/CMakeLists.txt2
-rw-r--r--lldb/utils/TableGen/CMakeLists.txt1
-rw-r--r--lldb/utils/lit-cpuid/CMakeLists.txt2
-rw-r--r--lldb/utils/lldb-dotest/CMakeLists.txt2
-rw-r--r--lldb/utils/lldb-repro/CMakeLists.txt2
-rw-r--r--llvm-libgcc/CMakeLists.txt1
-rw-r--r--llvm/CMakeLists.txt14
-rw-r--r--llvm/cmake/config-ix.cmake33
-rw-r--r--llvm/cmake/modules/AddLLVM.cmake97
-rw-r--r--llvm/cmake/modules/AddOCaml.cmake6
-rw-r--r--llvm/cmake/modules/AddSphinxTarget.cmake3
-rw-r--r--llvm/cmake/modules/CrossCompile.cmake4
-rw-r--r--llvm/cmake/modules/FindTerminfo.cmake55
-rw-r--r--llvm/cmake/modules/HandleLLVMOptions.cmake149
-rw-r--r--llvm/cmake/modules/LLVMConfig.cmake.in5
-rw-r--r--llvm/cmake/modules/LLVMDistributionSupport.cmake10
-rw-r--r--llvm/cmake/modules/LLVMExternalProjectUtils.cmake25
-rw-r--r--llvm/cmake/modules/TableGen.cmake5
-rw-r--r--llvm/docs/AMDGPUUsage.rst2
-rw-r--r--llvm/docs/CMakeLists.txt1
-rw-r--r--llvm/docs/DeveloperPolicy.rst34
-rw-r--r--llvm/docs/GettingInvolved.rst5
-rw-r--r--llvm/docs/LangRef.rst84
-rw-r--r--llvm/docs/MemorySSA.rst17
-rw-r--r--llvm/docs/ORCv2.rst2
-rw-r--r--llvm/docs/RISCVUsage.rst5
-rw-r--r--llvm/docs/ReleaseNotes.rst15
-rw-r--r--llvm/docs/SPIRVUsage.rst8
-rw-r--r--llvm/examples/ExceptionDemo/ExceptionDemo.cpp2
-rw-r--r--llvm/examples/Kaleidoscope/CMakeLists.txt2
-rw-r--r--llvm/include/llvm/ADT/GenericUniformityImpl.h5
-rw-r--r--llvm/include/llvm/Analysis/CFG.h12
-rw-r--r--llvm/include/llvm/Analysis/ConstantFolding.h15
-rw-r--r--llvm/include/llvm/Analysis/InstSimplifyFolder.h13
-rw-r--r--llvm/include/llvm/Analysis/TargetFolder.h11
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfo.h2
-rw-r--r--llvm/include/llvm/Analysis/VecFuncs.def16
-rw-r--r--llvm/include/llvm/AsmParser/LLToken.h1
-rw-r--r--llvm/include/llvm/BinaryFormat/ELF.h6
-rw-r--r--llvm/include/llvm/Bitcode/BitcodeWriter.h9
-rw-r--r--llvm/include/llvm/Bitcode/LLVMBitCodes.h11
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h3
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h4
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h17
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h11
-rw-r--r--llvm/include/llvm/CodeGen/MachineInstr.h6
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAG.h6
-rw-r--r--llvm/include/llvm/CodeGen/ValueTypes.h6
-rw-r--r--llvm/include/llvm/CodeGen/ValueTypes.td5
-rw-r--r--llvm/include/llvm/CodeGenTypes/MachineValueType.h8
-rw-r--r--llvm/include/llvm/Config/config.h.cmake3
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/ClauseT.h2
-rw-r--r--llvm/include/llvm/IR/ConstantFolder.h11
-rw-r--r--llvm/include/llvm/IR/ConstantRange.h9
-rw-r--r--llvm/include/llvm/IR/Constants.h20
-rw-r--r--llvm/include/llvm/IR/GEPNoWrapFlags.h93
-rw-r--r--llvm/include/llvm/IR/IRBuilder.h9
-rw-r--r--llvm/include/llvm/IR/IRBuilderFolder.h11
-rw-r--r--llvm/include/llvm/IR/Instructions.h14
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td4
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td36
-rw-r--r--llvm/include/llvm/IR/IntrinsicsSPIRV.td1
-rw-r--r--llvm/include/llvm/IR/IntrinsicsWebAssembly.td8
-rw-r--r--llvm/include/llvm/IR/IntrinsicsX86.td84
-rw-r--r--llvm/include/llvm/IR/ModuleSummaryIndex.h7
-rw-r--r--llvm/include/llvm/IR/NoFolder.h11
-rw-r--r--llvm/include/llvm/IR/Operator.h29
-rw-r--r--llvm/include/llvm/IR/VPIntrinsics.def4
-rw-r--r--llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h5
-rw-r--r--llvm/include/llvm/MC/MCStreamer.h4
-rw-r--r--llvm/include/llvm/MCA/IncrementalSourceMgr.h2
-rw-r--r--llvm/include/llvm/MCA/InstrBuilder.h6
-rw-r--r--llvm/include/llvm/Object/ELF.h2
-rw-r--r--llvm/include/llvm/Object/ELFTypes.h1
-rw-r--r--llvm/include/llvm/Object/ObjectFile.h1
-rw-r--r--llvm/include/llvm/Option/ArgList.h8
-rw-r--r--llvm/include/llvm/ProfileData/InstrProf.h30
-rw-r--r--llvm/include/llvm/ProfileData/InstrProfReader.h2
-rw-r--r--llvm/include/llvm/ProfileData/InstrProfWriter.h9
-rw-r--r--llvm/include/llvm/ProfileData/SampleProfReader.h10
-rw-r--r--llvm/include/llvm/Support/CMakeLists.txt2
-rw-r--r--llvm/include/llvm/Support/Error.h24
-rw-r--r--llvm/include/llvm/Support/KnownBits.h12
-rw-r--r--llvm/include/llvm/Target/GlobalISel/Combine.td85
-rw-r--r--llvm/include/llvm/Target/TargetSelectionDAG.td3
-rw-r--r--llvm/include/llvm/TargetParser/X86TargetParser.def33
-rw-r--r--llvm/include/llvm/Transforms/IPO/FunctionImport.h21
-rw-r--r--llvm/include/llvm/Transforms/Utils/CallPromotionUtils.h33
-rw-r--r--llvm/lib/Analysis/CFG.cpp74
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp100
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp13
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp149
-rw-r--r--llvm/lib/Analysis/LoopCacheAnalysis.cpp7
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp31
-rw-r--r--llvm/lib/Analysis/TargetLibraryInfo.cpp13
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp36
-rw-r--r--llvm/lib/AsmParser/LLLexer.cpp1
-rw-r--r--llvm/lib/AsmParser/LLParser.cpp36
-rw-r--r--llvm/lib/Bitcode/Reader/BitcodeReader.cpp55
-rw-r--r--llvm/lib/Bitcode/Writer/BitcodeWriter.cpp54
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp4
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp3
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp4
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h4
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp73
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h10
-rw-r--r--llvm/lib/CodeGen/AtomicExpandPass.cpp34
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp64
-rw-r--r--llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp7
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp8
-rw-r--r--llvm/lib/CodeGen/GlobalISel/Utils.cpp24
-rw-r--r--llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp12
-rw-r--r--llvm/lib/CodeGen/LiveRangeEdit.cpp2
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp21
-rw-r--r--llvm/lib/CodeGen/ParallelCG.cpp4
-rw-r--r--llvm/lib/CodeGen/RegisterPressure.cpp6
-rw-r--r--llvm/lib/CodeGen/ScheduleDAG.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectOptimize.cpp82
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp92
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp10
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp7
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp11
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp27
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp44
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp72
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp3
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp120
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp28
-rw-r--r--llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp2
-rw-r--r--llvm/lib/CodeGen/ValueTypes.cpp17
-rw-r--r--llvm/lib/DWARFLinker/Parallel/OutputSections.h2
-rw-r--r--llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp4
-rw-r--r--llvm/lib/IR/AsmWriter.cpp4
-rw-r--r--llvm/lib/IR/ConstantFold.cpp5
-rw-r--r--llvm/lib/IR/ConstantRange.cpp22
-rw-r--r--llvm/lib/IR/Constants.cpp12
-rw-r--r--llvm/lib/IR/IRBuilder.cpp9
-rw-r--r--llvm/lib/IR/Instruction.cpp8
-rw-r--r--llvm/lib/IR/Instructions.cpp23
-rw-r--r--llvm/lib/IR/MDBuilder.cpp14
-rw-r--r--llvm/lib/IR/Mangler.cpp2
-rw-r--r--llvm/lib/IR/Module.cpp2
-rw-r--r--llvm/lib/IR/Operator.cpp3
-rw-r--r--llvm/lib/LTO/LTO.cpp40
-rw-r--r--llvm/lib/LTO/LTOBackend.cpp14
-rw-r--r--llvm/lib/LTO/ThinLTOCodeGenerator.cpp10
-rw-r--r--llvm/lib/MC/ELFObjectWriter.cpp22
-rw-r--r--llvm/lib/MC/MCDwarf.cpp6
-rw-r--r--llvm/lib/MC/MCObjectStreamer.cpp3
-rw-r--r--llvm/lib/MC/MCStreamer.cpp2
-rw-r--r--llvm/lib/MCA/InstrBuilder.cpp20
-rw-r--r--llvm/lib/Object/ELF.cpp21
-rw-r--r--llvm/lib/ObjectYAML/ELFEmitter.cpp3
-rw-r--r--llvm/lib/Option/OptTable.cpp2
-rw-r--r--llvm/lib/ProfileData/InstrProf.cpp98
-rw-r--r--llvm/lib/ProfileData/InstrProfCorrelator.cpp10
-rw-r--r--llvm/lib/ProfileData/InstrProfReader.cpp150
-rw-r--r--llvm/lib/ProfileData/InstrProfWriter.cpp141
-rw-r--r--llvm/lib/ProfileData/MemProf.cpp6
-rw-r--r--llvm/lib/ProfileData/MemProfReader.cpp18
-rw-r--r--llvm/lib/ProfileData/SampleProfReader.cpp8
-rw-r--r--llvm/lib/Support/BLAKE3/CMakeLists.txt1
-rw-r--r--llvm/lib/Support/CMakeLists.txt11
-rw-r--r--llvm/lib/Support/Error.cpp5
-rw-r--r--llvm/lib/Support/KnownBits.cpp31
-rw-r--r--llvm/lib/Support/LockFileManager.cpp2
-rw-r--r--llvm/lib/Support/Unix/Process.inc60
-rw-r--r--llvm/lib/Support/raw_socket_stream.cpp23
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallingConvention.cpp29
-rw-r--r--llvm/lib/Target/AArch64/AArch64Combine.td3
-rw-r--r--llvm/lib/Target/AArch64/AArch64ExpandImm.cpp8
-rw-r--r--llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp13
-rw-r--r--llvm/lib/Target/AArch64/AArch64Features.td46
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp29
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64PointerAuth.cpp41
-rw-r--r--llvm/lib/Target/AArch64/AArch64PointerAuth.h12
-rw-r--r--llvm/lib/Target/AArch64/AArch64Subtarget.cpp27
-rw-r--r--llvm/lib/Target/AArch64/AArch64Subtarget.h21
-rw-r--r--llvm/lib/Target/AArch64/AArch64SystemOperands.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetMachine.cpp27
-rw-r--r--llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp3
-rw-r--r--llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp49
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp39
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h6
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCodeGenPassBuilder.cpp38
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCodeGenPassBuilder.h33
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUGISel.td5
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp27
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h5
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp17
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructions.td22
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp61
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp91
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp44
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUSplitModule.cpp744
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUSplitModule.h30
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp17
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h10
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp65
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp51
-rw-r--r--llvm/lib/Target/AMDGPU/BUFInstructions.td9
-rw-r--r--llvm/lib/Target/AMDGPU/CMakeLists.txt4
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h3
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h9
-rw-r--r--llvm/lib/Target/AMDGPU/R600CodeGenPassBuilder.cpp33
-rw-r--r--llvm/lib/Target/AMDGPU/R600CodeGenPassBuilder.h32
-rw-r--r--llvm/lib/Target/AMDGPU/R600TargetMachine.cpp9
-rw-r--r--llvm/lib/Target/AMDGPU/R600TargetMachine.h6
-rw-r--r--llvm/lib/Target/AMDGPU/SIDefines.h2
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp112
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.h5
-rw-r--r--llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp76
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp51
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td10
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstructions.td5
-rw-r--r--llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp64
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp45
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h3
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTInfo.h35
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp540
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.h79
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/AMDGPU/VOP1Instructions.td40
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp14
-rw-r--r--llvm/lib/Target/BPF/BPFMIChecking.cpp9
-rw-r--r--llvm/lib/Target/DirectX/DXILOpLowering.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelLowering.cpp15
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelLowering.h2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonPatterns.td6
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp13
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.h4
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchInstrInfo.td6
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchSubtarget.h5
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp4
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h1
-rw-r--r--llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h9
-rw-r--r--llvm/lib/Target/Mips/Mips32r6InstrInfo.td14
-rw-r--r--llvm/lib/Target/Mips/MipsAsmPrinter.cpp11
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp14
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp161
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.td100
-rw-r--r--llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp22
-rw-r--r--llvm/lib/Target/PowerPC/PPCFastISel.cpp14
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp29
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.td2
-rw-r--r--llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp4
-rw-r--r--llvm/lib/Target/PowerPC/PPCMergeStringPool.cpp37
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp1
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp18
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td11
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp37
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.h5
-rw-r--r--llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp138
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoV.td18
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td25
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td12
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td38
-rw-r--r--llvm/lib/Target/RISCV/RISCVProcessors.td15
-rw-r--r--llvm/lib/Target/RISCV/RISCVSchedSiFive7.td13
-rw-r--r--llvm/lib/Target/RISCV/RISCVSchedSiFiveP400.td1
-rw-r--r--llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td13
-rw-r--r--llvm/lib/Target/RISCV/RISCVScheduleV.td16
-rw-r--r--llvm/lib/Target/RISCV/RISCVSubtarget.h4
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetMachine.cpp19
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp25
-rw-r--r--llvm/lib/Target/SPIRV/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp13
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp23
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp37
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVBuiltins.td9
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp6
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp4
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp40
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp22
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVISelLowering.h9
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInlineAsmLowering.cpp46
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInlineAsmLowering.h33
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp13
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstrInfo.h1
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstrInfo.td15
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp16
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp3
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp150
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVRegisterBanks.td2
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVRegisterInfo.td8
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp1
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVSubtarget.h6
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td5
-rw-r--r--llvm/lib/Target/VE/VVPNodes.def4
-rw-r--r--llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h2
-rw-r--r--llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp1
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp4
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp12
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp17
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td24
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td5
-rw-r--r--llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp2
-rw-r--r--llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp3
-rw-r--r--llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimizationForImmediate.def6
-rw-r--r--llvm/lib/Target/X86/X86.td12
-rw-r--r--llvm/lib/Target/X86/X86FixupBWInsts.cpp7
-rw-r--r--llvm/lib/Target/X86/X86FlagsCopyLowering.cpp500
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp37
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp349
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h19
-rw-r--r--llvm/lib/Target/X86/X86Instr3DNow.td3
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td91
-rw-r--r--llvm/lib/Target/X86/X86InstrConditionalCompare.td46
-rw-r--r--llvm/lib/Target/X86/X86InstrFragments.td27
-rw-r--r--llvm/lib/Target/X86/X86InstrFragmentsSIMD.td11
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp57
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h3
-rw-r--r--llvm/lib/Target/X86/X86InstrPredicates.td3
-rw-r--r--llvm/lib/Target/X86/X86IntrinsicsInfo.h27
-rw-r--r--llvm/lib/Target/X86/X86MCInstLower.cpp40
-rw-r--r--llvm/lib/Target/X86/X86SelectionDAGInfo.cpp60
-rw-r--r--llvm/lib/Target/X86/X86Subtarget.h8
-rw-r--r--llvm/lib/TargetParser/Host.cpp9
-rw-r--r--llvm/lib/TargetParser/RISCVISAInfo.cpp8
-rw-r--r--llvm/lib/TargetParser/X86TargetParser.cpp13
-rw-r--r--llvm/lib/TextAPI/Utils.cpp2
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroElide.cpp6
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroFrame.cpp82
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroSplit.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/Attributor.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/AttributorAttributes.cpp10
-rw-r--r--llvm/lib/Transforms/IPO/FunctionImport.cpp273
-rw-r--r--llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp16
-rw-r--r--llvm/lib/Transforms/IPO/OpenMPOpt.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp11
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp20
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp3
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp11
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp6
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp47
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp33
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/ConstraintElimination.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/DivRemPairs.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/GVNSink.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/NaryReassociate.cpp1
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp27
-rw-r--r--llvm/lib/Transforms/Utils/CallPromotionUtils.cpp32
-rw-r--r--llvm/lib/Transforms/Utils/CloneFunction.cpp7
-rw-r--r--llvm/lib/Transforms/Utils/FunctionComparator.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/InlineFunction.cpp6
-rw-r--r--llvm/lib/Transforms/Utils/LowerSwitch.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/SCCPSolver.cpp21
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp40
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp7
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp61
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp311
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.cpp59
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h22
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h49
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp26
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.h3
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanValue.h5
-rw-r--r--llvm/lib/Transforms/Vectorize/VectorCombine.cpp234
-rw-r--r--llvm/lib/WindowsManifest/WindowsManifestMerger.cpp1
-rw-r--r--llvm/runtimes/CMakeLists.txt23
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/cast.ll2
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/cttz_elts.ll32
-rw-r--r--llvm/test/Analysis/CostModel/AMDGPU/shufflevector.ll1621
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/cmp-select.ll258
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll64
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll187
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll37
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/invariant-dependence-before.ll114
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/loop-invariant-dep-with-backedge-taken-count.ll14
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/non-constant-strides-backward.ll7
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/pr64637.ll30
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/select-dependence.ll55
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll346
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll121
-rw-r--r--llvm/test/Analysis/LoopCacheAnalysis/PowerPC/LoopnestFixedSize.ll6
-rw-r--r--llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost.ll8
-rw-r--r--llvm/test/Analysis/LoopCacheAnalysis/PowerPC/loads-store.ll4
-rw-r--r--llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matmul.ll4
-rw-r--r--llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matvecmul.ll4
-rw-r--r--llvm/test/Analysis/LoopCacheAnalysis/PowerPC/multi-store.ll2
-rw-r--r--llvm/test/Analysis/LoopCacheAnalysis/PowerPC/single-store.ll4
-rw-r--r--llvm/test/Analysis/LoopCacheAnalysis/PowerPC/stencil.ll4
-rw-r--r--llvm/test/Analysis/LoopCacheAnalysis/compute-cost.ll19
-rw-r--r--llvm/test/Analysis/LoopCacheAnalysis/interchange-cost-beneficial.ll62
-rw-r--r--llvm/test/Analysis/ScalarEvolution/exhaustive-trip-counts.ll152
-rw-r--r--llvm/test/Analysis/ScalarEvolution/exit-count-non-strict.ll50
-rw-r--r--llvm/test/Assembler/ConstantExprFold.ll32
-rw-r--r--llvm/test/Assembler/flags.ll102
-rw-r--r--llvm/test/CMakeLists.txt6
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir252
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir42
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vhadd.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/bitfield-insert.ll11
-rw-r--r--llvm/test/CodeGen/AArch64/exp10-libcall-names.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/frem-power2.ll92
-rw-r--r--llvm/test/CodeGen/AArch64/hadd-combine.ll67
-rw-r--r--llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll146
-rw-r--r--llvm/test/CodeGen/AArch64/movimm-expand-ldst.ll95
-rw-r--r--llvm/test/CodeGen/AArch64/movimm-expand-ldst.mir34
-rw-r--r--llvm/test/CodeGen/AArch64/neon-dotreduce.ll536
-rw-r--r--llvm/test/CodeGen/AArch64/pr58431.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/selectopt-not.ll326
-rw-r--r--llvm/test/CodeGen/AArch64/sign-return-address-tailcall.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll197
-rw-r--r--llvm/test/CodeGen/AArch64/sve-calling-convention.ll124
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-pr92779.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-compares.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce-fa64.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mla-neon-fa64.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-test-register-mov.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/trunc-to-tbl.ll118
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir47
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap-gfx11.mir89
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll1515
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll1577
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll130
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll152
-rw-r--r--llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll57
-rw-r--r--llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll258
-rw-r--r--llvm/test/CodeGen/AMDGPU/dpp_combine.ll15
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmaximum3.ll3349
-rw-r--r--llvm/test/CodeGen/AMDGPU/fminimum3.ll3349
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp_to_sint.ll395
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp_to_uint.ll395
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll76
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll76
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll3872
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll3498
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll3498
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll3872
-rw-r--r--llvm/test/CodeGen/AMDGPU/kernel_code_t_recurse.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.pops.exiting.wave.id.ll34
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll820
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll820
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp.ll1592
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp10.ll1592
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-ctor-dtor-constexpr-alias.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-ctor-dtor.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll109
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-global.ll1716
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-local.ll1296
-rw-r--r--llvm/test/CodeGen/AMDGPU/permute_i8.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs.ll2231
-rw-r--r--llvm/test/CodeGen/AMDGPU/sad.ll369
-rw-r--r--llvm/test/CodeGen/AMDGPU/shl.ll216
-rw-r--r--llvm/test/CodeGen/AMDGPU/simplify-libcalls.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/trap-abis.ll150
-rw-r--r--llvm/test/CodeGen/ARM/exp10-libcall-names.ll39
-rw-r--r--llvm/test/CodeGen/ARM/frem-power2.ll24
-rw-r--r--llvm/test/CodeGen/BPF/xadd.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/readsteadycounter.ll11
-rw-r--r--llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll96
-rw-r--r--llvm/test/CodeGen/Mips/mipsr6-minmaxnum.ll32
-rw-r--r--llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll73
-rw-r--r--llvm/test/CodeGen/Mips/msa/inline-asm.ll16
-rw-r--r--llvm/test/CodeGen/NVPTX/param-overalign.ll2
-rw-r--r--llvm/test/CodeGen/NVPTX/st-param-imm.ll2002
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-tocdata-fastisel.ll15
-rw-r--r--llvm/test/CodeGen/PowerPC/ctrloop-le.ll15
-rw-r--r--llvm/test/CodeGen/PowerPC/mergeable-string-pool-pr92991.ll20
-rw-r--r--llvm/test/CodeGen/PowerPC/pr92233.ll19
-rw-r--r--llvm/test/CodeGen/PowerPC/toc-data-no-data-sections.ll18
-rw-r--r--llvm/test/CodeGen/PowerPC/toc-data.ll75
-rw-r--r--llvm/test/CodeGen/PowerPC/vec_shuffle.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/constbarrier-rv32.ll60
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/constbarrier-rv64.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll201
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv32.mir83
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir120
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv32.mir90
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv64.mir90
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv32.mir62
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv64.mir96
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-frem-rv32.mir130
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-frem-rv64.mir130
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-lshr-rv64.mir26
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv32.mir404
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir358
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/libcalls.ll51
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/shift.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/O0-pipeline.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/O3-pipeline.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/attributes.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/loop-strength-reduce-loop-invar.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/mul.ll149
-rw-r--r--llvm/test/CodeGen/RISCV/pr69586.ll210
-rw-r--r--llvm/test/CodeGen/RISCV/pr90730.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zba.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/abs-vp.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll84
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll234
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll258
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll39
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll92
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/compressstore.ll53
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll129
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll182
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll35
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll410
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll234
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll7
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll94
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll1150
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll523
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll1066
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll17
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll23
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll94
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll31
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll31
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll114
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll (renamed from llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll)0
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll19
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll29
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll181
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll69
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll192
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll76
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll903
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll105
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll127
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll1243
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll1938
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll41
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll71
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll140
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll52
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll58
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll104
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll94
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll94
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll94
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll55
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll42
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll3
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-transpose.ll25
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll156
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll5
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll29
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll70
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll29
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll29
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll70
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll80
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll87
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll86
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll21
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll21
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/floor-vp.ll92
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll99
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll99
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll34
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll132
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll (renamed from llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll)0
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll135
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/masked-tama.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll31
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll57
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll124
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/pr63596.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rint-vp.ll137
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/round-vp.ll143
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll143
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll143
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll342
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll40
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll1
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll68
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/stepvector.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vcpop.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll50
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll46
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll192
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-splice.ll58
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfirst.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll38
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll156
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll144
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll149
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll38
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll165
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll141
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfeq.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfge.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfgt.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfle.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmflt.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfne.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsbf.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmseq.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsge.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll176
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsgt.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsif.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsle.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsleu.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmslt.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsltu.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsne.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsof.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll80
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vpload.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll44
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vpstore.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll76
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll112
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/sextw-removal.ll19
-rw-r--r--llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll5
-rw-r--r--llvm/test/CodeGen/SPIRV/execution-mode-reqd_work_group_size.ll35
-rw-r--r--llvm/test/CodeGen/SPIRV/execution-mode-work_group_size_hint.ll34
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_inline_assembly/inline_asm.ll93
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll59
-rw-r--r--llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll94
-rw-r--r--llvm/test/CodeGen/Thumb/shift-and.ll5
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/add_reduce.mir100
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/begin-vpt-without-inst.mir55
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-default.mir192
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-optsize-strd-lr.mir167
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-optsize.mir183
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/cmplx_cong.mir49
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-mov.mir42
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/count_dominates_start.mir100
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir212
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/disjoint-vcmp.mir96
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-ignore-vctp.mir51
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-remove-loop-update.mir74
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/end-positive-offset.mir109
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/extract-element.mir61
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-16.mir68
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-32.mir68
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-8.mir68
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-1.mir96
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-2.mir96
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-3.mir96
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir104
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir102
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir231
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain-store.mir124
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain.mir69
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-itercount.mir51
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir161
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-random.mir67
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-two-vcmp-reordered.mir99
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-two-vcmp.mir99
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-vcmp.mir72
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/livereg-no-loop-def.mir63
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-copy-chain.mir258
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-copy-prev-iteration.mir285
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-liveout.mir283
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir187
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/massive.mir71
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix-debug.mir150
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir317
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dls.mir42
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir114
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-lr-terminator.mir64
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-def-before-start.mir86
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-start-after-def.mir86
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/multi-block-cond-iter-count.mir244
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/multi-cond-iter-count.mir74
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiblock-massive.mir100
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiple-do-loops.mir426
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-cbnz.mir261
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-reorder.mir151
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec.mir262
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir61
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir95
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-store.mir70
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/out-of-range-cbz.mir296
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-invariant.mir65
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir67
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions-vpt-liveout.mir392
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/remove-elem-moves.mir176
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-while.mir61
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/revertcallearly.mir76
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-def-no-mov.mir50
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir122
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/size-limit.mir67
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-debug.mir121
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-vpt-debug.mir139
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/subreg-liveness.mir87
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir78
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/unrolled-and-vector.mir284
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-def.mir59
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir61
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir140
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-use-after.mir50
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir1524
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination-across-blocks.mir234
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir91
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-in-vpt-2.mir73
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-in-vpt.mir323
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subi3.mir52
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subri.mir52
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subri12.mir52
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir102
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector_spill_in_loop.mir152
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vmaxmin_vpred_r.mir74
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vmldava_in_vpt.mir77
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-block-debug.mir1
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-blocks.mir658
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/while.mir48
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll124
-rw-r--r--llvm/test/CodeGen/WebAssembly/fast-isel-call-indirect64.ll14
-rw-r--r--llvm/test/CodeGen/WebAssembly/function-pointer64.ll5
-rw-r--r--llvm/test/CodeGen/WebAssembly/half-precision.ll20
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-exceptions.ll6
-rw-r--r--llvm/test/CodeGen/X86/abds-vector-128.ll350
-rw-r--r--llvm/test/CodeGen/X86/abds-vector-256.ll72
-rw-r--r--llvm/test/CodeGen/X86/abdu-vector-128.ll280
-rw-r--r--llvm/test/CodeGen/X86/abdu-vector-256.ll72
-rw-r--r--llvm/test/CodeGen/X86/apx/ccmp-flags-copy-lowering.mir8
-rw-r--r--llvm/test/CodeGen/X86/apx/ccmp.ll1102
-rw-r--r--llvm/test/CodeGen/X86/apx/ctest.ll905
-rw-r--r--llvm/test/CodeGen/X86/avx512-cmp-kor-sequence.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512-gather-scatter-intrin-deprecated.ll24
-rw-r--r--llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll24
-rw-r--r--llvm/test/CodeGen/X86/avx512er-intrinsics.ll306
-rw-r--r--llvm/test/CodeGen/X86/coalescer-add-implicit-def-subreg-to-reg-regression.ll45
-rw-r--r--llvm/test/CodeGen/X86/combine-srem.ll4
-rw-r--r--llvm/test/CodeGen/X86/crc32-target-feature.ll4
-rw-r--r--llvm/test/CodeGen/X86/exp10-libcall-names.ll40
-rw-r--r--llvm/test/CodeGen/X86/fat-lto-section.ll2
-rw-r--r--llvm/test/CodeGen/X86/freeze-binary.ll6
-rw-r--r--llvm/test/CodeGen/X86/funnel-shift.ll821
-rw-r--r--llvm/test/CodeGen/X86/insert-prefetch-invalid-instr.ll7
-rw-r--r--llvm/test/CodeGen/X86/issue76416.ll78
-rw-r--r--llvm/test/CodeGen/X86/midpoint-int-vec-128.ll669
-rw-r--r--llvm/test/CodeGen/X86/midpoint-int-vec-256.ll154
-rw-r--r--llvm/test/CodeGen/X86/misched-critical-path.ll35
-rw-r--r--llvm/test/CodeGen/X86/opt-pipeline.ll2
-rw-r--r--llvm/test/CodeGen/X86/pmul.ll11
-rw-r--r--llvm/test/CodeGen/X86/pr59305.ll69
-rw-r--r--llvm/test/CodeGen/X86/pr90703.ll21
-rw-r--r--llvm/test/CodeGen/X86/pr90844.ll17
-rw-r--r--llvm/test/CodeGen/X86/pr92569.ll29
-rw-r--r--llvm/test/CodeGen/X86/pr92720.ll15
-rw-r--r--llvm/test/CodeGen/X86/pr93000.ll44
-rw-r--r--llvm/test/CodeGen/X86/prefetch.ll17
-rw-r--r--llvm/test/CodeGen/X86/shrink_vmul.ll4
-rw-r--r--llvm/test/CodeGen/X86/speculative-load-hardening-gather.ll22
-rw-r--r--llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16.ll1
-rw-r--r--llvm/test/CodeGen/X86/stack-frame-layout-remarks.ll2
-rw-r--r--llvm/test/CodeGen/X86/unfoldMemoryOperand.mir2
-rw-r--r--llvm/test/CodeGen/X86/vec-strict-cmp-512-skx.ll40
-rw-r--r--llvm/test/CodeGen/X86/xray-custom-log.ll15
-rw-r--r--llvm/test/CodeGen/X86/xray-tail-call-sled.ll51
-rw-r--r--llvm/test/DebugInfo/X86/debug-names-types.ll24
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/mem-attr.ll15
-rw-r--r--llvm/test/Linker/darwin-target-variant.ll42
-rw-r--r--llvm/test/MC/AArch64/FP8/system-regs.s22
-rw-r--r--llvm/test/MC/AArch64/SVE/condition-codes.s (renamed from llvm/test/MC/AArch64/SVE/condtion-codes.s)0
-rw-r--r--llvm/test/MC/AArch64/SVE/sqdecd-diagnostics.s4
-rw-r--r--llvm/test/MC/AArch64/SVE/sqincp-diagnostics.s32
-rw-r--r--llvm/test/MC/AMDGPU/amd_kernel_code_t.s171
-rw-r--r--llvm/test/MC/AsmParser/assembler-expressions-inlineasm.ll16
-rw-r--r--llvm/test/MC/MachO/darwin-target-variant-reverse.ll2
-rw-r--r--llvm/test/MC/MachO/darwin-target-variant.ll2
-rw-r--r--llvm/test/MC/RISCV/attribute-arch.s2
-rw-r--r--llvm/test/MC/RISCV/rv32zaamo-invalid.s2
-rw-r--r--llvm/test/MC/RISCV/rv32zaamo-valid.s12
-rw-r--r--llvm/test/MC/RISCV/rv32zalrsc-invalid.s2
-rw-r--r--llvm/test/MC/RISCV/rv32zalrsc-valid.s12
-rw-r--r--llvm/test/MC/RISCV/rv64zaamo-invalid.s2
-rw-r--r--llvm/test/MC/RISCV/rv64zaamo-valid.s8
-rw-r--r--llvm/test/MC/RISCV/rv64zalrsc-invalid.s2
-rw-r--r--llvm/test/MC/RISCV/rv64zalrsc-valid.s8
-rw-r--r--llvm/test/MC/WebAssembly/simd-encodings.s6
-rw-r--r--llvm/test/MC/X86/apx/ccmp-reloc.s14
-rw-r--r--llvm/test/Other/constant-fold-gep.ll14
-rw-r--r--llvm/test/Other/optimize-inrange-gep.ll2
-rw-r--r--llvm/test/TableGen/predicate-patfags.td30
-rw-r--r--llvm/test/ThinLTO/X86/funcimport-stats.ll4
-rw-r--r--llvm/test/ThinLTO/X86/import_callee_declaration.ll221
-rw-r--r--llvm/test/ThinLTO/X86/memprof-tailcall-nonunique.ll41
-rw-r--r--llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll112
-rw-r--r--llvm/test/Transforms/Attributor/issue87856.ll61
-rw-r--r--llvm/test/Transforms/Attributor/nofpclass.ll2
-rw-r--r--llvm/test/Transforms/ConstraintElimination/sext-unsigned-predicates.ll6
-rw-r--r--llvm/test/Transforms/ConstraintElimination/transfer-signed-facts-to-unsigned.ll6
-rw-r--r--llvm/test/Transforms/Coroutines/coro-await-suspend-handle-in-ramp.ll59
-rw-r--r--llvm/test/Transforms/Coroutines/coro-debug-frame-variable-inlined.ll (renamed from llvm/test/Transforms/Coroutines/coro-debug-frame-variable-O1.ll)4
-rw-r--r--llvm/test/Transforms/Coroutines/coro-lifetime-end.ll142
-rw-r--r--llvm/test/Transforms/Coroutines/no-suspend.ll2
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/mul.ll6
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/simple.ll13
-rw-r--r--llvm/test/Transforms/DivRemPairs/AMDGPU/div-rem-pairs.ll129
-rw-r--r--llvm/test/Transforms/DivRemPairs/AMDGPU/lit.local.cfg2
-rw-r--r--llvm/test/Transforms/EntryExitInstrumenter/mcount-aix.ll12
-rw-r--r--llvm/test/Transforms/EntryExitInstrumenter/mcount.ll157
-rw-r--r--llvm/test/Transforms/FunctionAttrs/nocapture.ll8
-rw-r--r--llvm/test/Transforms/FunctionAttrs/nonnull.ll166
-rw-r--r--llvm/test/Transforms/FunctionAttrs/norecurse.ll31
-rw-r--r--llvm/test/Transforms/FunctionAttrs/read-write-scc.ll4
-rw-r--r--llvm/test/Transforms/FunctionAttrs/willreturn.ll10
-rw-r--r--llvm/test/Transforms/FunctionImport/funcimport.ll5
-rw-r--r--llvm/test/Transforms/FunctionSpecialization/function-specialization-constant-expression.ll6
-rw-r--r--llvm/test/Transforms/GVN/PRE/load-pre-licm.ll2
-rw-r--r--llvm/test/Transforms/GVN/PRE/phi-translate-2.ll4
-rw-r--r--llvm/test/Transforms/GVNHoist/infinite-loop-indirect.ll6
-rw-r--r--llvm/test/Transforms/GVNSink/sink-common-code-dbg.ll112
-rw-r--r--llvm/test/Transforms/GVNSink/sink-ignore-dbg-intrinsics.ll92
-rw-r--r--llvm/test/Transforms/IndVarSimplify/AArch64/widen-loop-comp.ll26
-rw-r--r--llvm/test/Transforms/IndVarSimplify/D108043.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/eliminate-exit-no-dl.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/floating-point-small-iv.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll6
-rw-r--r--llvm/test/Transforms/IndVarSimplify/lftr.ll2
-rw-r--r--llvm/test/Transforms/Inline/access-attributes-prop.ll18
-rw-r--r--llvm/test/Transforms/Inline/inline_invoke.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/addrspacecast.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/align-addr.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/and-fcmp.ll27
-rw-r--r--llvm/test/Transforms/InstCombine/binop-select-cast-of-select-cond.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/cast_ptr.ll151
-rw-r--r--llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/constant-fold-gep.ll40
-rw-r--r--llvm/test/Transforms/InstCombine/fma.ll31
-rw-r--r--llvm/test/Transforms/InstCombine/fmul.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/force-opaque-ptr.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/fortify-folding.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/freeze.ll22
-rw-r--r--llvm/test/Transforms/InstCombine/gep-custom-dl.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/getelementptr.ll32
-rw-r--r--llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/known-bits.ll38
-rw-r--r--llvm/test/Transforms/InstCombine/load-cmp.ll17
-rw-r--r--llvm/test/Transforms/InstCombine/loadstore-alignment.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/memchr-2.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/memchr-4.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/memchr-6.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/memchr-7.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/memchr-8.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/memchr-9.ll36
-rw-r--r--llvm/test/Transforms/InstCombine/memchr.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/memcmp-8.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/memcpy-from-global.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/memrchr-3.ll20
-rw-r--r--llvm/test/Transforms/InstCombine/memrchr-4.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/objsize.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/or-fcmp.ll49
-rw-r--r--llvm/test/Transforms/InstCombine/pow-to-ldexp.ll69
-rw-r--r--llvm/test/Transforms/InstCombine/pr25342.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/pr33453.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/pr38984-inseltpoison.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/pr38984.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/pr83947.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/ptr-replace-alloca.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/rem.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/select-and-or.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/shl-bo.ll37
-rw-r--r--llvm/test/Transforms/InstCombine/simplify-libcalls-i16.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/simplify-libcalls.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/snprintf-2.ll48
-rw-r--r--llvm/test/Transforms/InstCombine/snprintf-3.ll48
-rw-r--r--llvm/test/Transforms/InstCombine/snprintf-4.ll30
-rw-r--r--llvm/test/Transforms/InstCombine/stpcpy-1.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/stpncpy-1.ll35
-rw-r--r--llvm/test/Transforms/InstCombine/str-int-2.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/str-int-3.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/str-int-4.ll40
-rw-r--r--llvm/test/Transforms/InstCombine/str-int-5.ll50
-rw-r--r--llvm/test/Transforms/InstCombine/str-int.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/strcall-bad-sig.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/strcall-no-nul.ll20
-rw-r--r--llvm/test/Transforms/InstCombine/strchr-1.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/strchr-3.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/strcmp-4.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/strlcpy-1.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/strlen-1.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/strlen-6.ll18
-rw-r--r--llvm/test/Transforms/InstCombine/strpbrk-1.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/strrchr-1.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/strrchr-3.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/strstr-1.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/vec_demanded_elts-inseltpoison.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/vec_demanded_elts.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll17
-rw-r--r--llvm/test/Transforms/InstCombine/vec_shuffle.ll51
-rw-r--r--llvm/test/Transforms/InstCombine/wcslen-1.ll4
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/gep-alias.ll2
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/gep-constanfolding-error.ll3
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/gep.ll6
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll10
-rw-r--r--llvm/test/Transforms/InstSimplify/compare.ll2
-rw-r--r--llvm/test/Transforms/InstSimplify/known-non-zero.ll183
-rw-r--r--llvm/test/Transforms/InstSimplify/past-the-end.ll4
-rw-r--r--llvm/test/Transforms/InstSimplify/shufflevector.ll65
-rw-r--r--llvm/test/Transforms/InterleavedAccess/AArch64/fixed-deinterleave-intrinsics.ll4
-rw-r--r--llvm/test/Transforms/LICM/scalar-promote-unwind.ll6
-rw-r--r--llvm/test/Transforms/LoopInterchange/pr43176-move-to-new-latch.ll93
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll20
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/pr40514.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll532
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/streaming-compatible-sve-no-maximize-bandwidth.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll22
-rw-r--r--llvm/test/Transforms/LoopVectorize/LoongArch/loongarch-interleaved.ll39
-rw-r--r--llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization-profitability.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll70
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll118
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll66
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll189
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr23997.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr42674.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr54634.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/outer_loop_test1.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll101
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform-blend.ll5
-rw-r--r--llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-printing.ll52
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll19
-rw-r--r--llvm/test/Transforms/LoopVersioning/add-phi-update-users.ll6
-rw-r--r--llvm/test/Transforms/LoopVersioning/bound-check-partially-known.ll10
-rw-r--r--llvm/test/Transforms/LowerSwitch/93152.ll97
-rw-r--r--llvm/test/Transforms/LowerTypeTests/cfi-unwind-direct-call.ll6
-rw-r--r--llvm/test/Transforms/MemProfContextDisambiguation/tailcall-nonunique.ll35
-rw-r--r--llvm/test/Transforms/NaryReassociate/preserving-debugloc-add-mul.ll69
-rw-r--r--llvm/test/Transforms/NewGVN/2011-09-07-TypeIdFor.ll14
-rw-r--r--llvm/test/Transforms/NewGVN/loadforward.ll2
-rw-r--r--llvm/test/Transforms/OpenMP/custom_state_machines.ll2
-rw-r--r--llvm/test/Transforms/OpenMP/custom_state_machines_pre_lto.ll2
-rw-r--r--llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll10
-rw-r--r--llvm/test/Transforms/OpenMP/spmdization.ll4
-rw-r--r--llvm/test/Transforms/OpenMP/spmdization_guarding.ll4
-rw-r--r--llvm/test/Transforms/OpenMP/spmdization_remarks.ll14
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll343
-rw-r--r--llvm/test/Transforms/PhaseOrdering/SystemZ/sub-xor.ll48
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll180
-rw-r--r--llvm/test/Transforms/SCCP/2009-09-24-byval-ptr.ll2
-rw-r--r--llvm/test/Transforms/SCCP/apint-bigint2.ll6
-rw-r--r--llvm/test/Transforms/SCCP/ip-add-range-to-call.ll2
-rw-r--r--llvm/test/Transforms/SCCP/range-mul-nuw-nsw-flags.ll26
-rw-r--r--llvm/test/Transforms/SCCP/range-with-undef.ll118
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll64
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll64
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AMDGPU/crash_extract_subvector_cost.ll13
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll46
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll130
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/math-function.ll144
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/scatter-vectorize-reversed.ll30
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/alternate-calls-inseltpoison.ll28
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/alternate-calls.ll28
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/blending-shuffle.ll7
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/hadd-inseltpoison.ll74
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/hadd.ll74
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/pr47623.ll28
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/scalarazied-result.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/vector_gep.ll2
-rw-r--r--llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/streaming-compatible-expand-masked-gather-scatter.ll2
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll8
-rw-r--r--llvm/test/Transforms/SimplifyCFG/HoistCode.ll60
-rw-r--r--llvm/test/Transforms/SimplifyCFG/switch-dead-default-lookup-table.ll61
-rw-r--r--llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll89
-rw-r--r--llvm/test/Transforms/Util/add-TLI-mappings.ll8
-rw-r--r--llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll390
-rw-r--r--llvm/test/Transforms/VectorCombine/X86/select-shuffle.ll9
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.ll6
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.ll.expected11
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.s32
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_llc_test_checks/amdgpu_function_alt.test22
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/phi-labels.ll.expected36
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/phi-labels.test2
-rw-r--r--llvm/test/tools/llvm-driver/symlink-call.test2
-rw-r--r--llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-clear-upper-regs.s791
-rw-r--r--llvm/test/tools/llvm-mca/AArch64/Neoverse/V2-clear-upper-regs.s812
-rw-r--r--llvm/test/tools/llvm-mca/X86/call-latency.s58
-rw-r--r--llvm/test/tools/llvm-objcopy/tool-options.test6
-rw-r--r--llvm/test/tools/llvm-profdata/show-order-error.proftext27
-rw-r--r--llvm/test/tools/llvm-profdata/show-order.proftext11
-rw-r--r--llvm/test/tools/llvm-profgen/profile-density.test16
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/note-core-ntfile.test53
-rw-r--r--llvm/test/tools/llvm-reduce/reduce-flags.ll18
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/address-taken-externalize-with-call.ll46
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/address-taken-externalize.ll37
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/debug-name-hiding.ll20
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/kernels-alias-dependencies.ll45
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/kernels-cost-ranking.ll54
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/kernels-dependencies.ll50
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-duplication.ll41
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-external.ll64
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-indirect.ll76
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-overridable.ll40
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/kernels-global-variables-noexternal.ll42
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/kernels-global-variables.ll44
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/kernels-load-balancing.ll75
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/kernels-no-dependencies.ll39
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/large-kernels-merging.ll98
-rw-r--r--llvm/test/tools/llvm-split/AMDGPU/lit.local.cfg2
-rw-r--r--llvm/tools/dsymutil/MachODebugMapParser.cpp2
-rw-r--r--llvm/tools/llvm-cxxfilt/CMakeLists.txt4
-rw-r--r--llvm/tools/llvm-link/llvm-link.cpp6
-rw-r--r--llvm/tools/llvm-lto/llvm-lto.cpp5
-rw-r--r--llvm/tools/llvm-mc/llvm-mc.cpp3
-rw-r--r--llvm/tools/llvm-mca/llvm-mca.cpp7
-rw-r--r--llvm/tools/llvm-ml/llvm-ml.cpp3
-rw-r--r--llvm/tools/llvm-objcopy/ObjcopyOptions.cpp6
-rw-r--r--llvm/tools/llvm-profdata/llvm-profdata.cpp64
-rw-r--r--llvm/tools/llvm-profgen/PerfReader.cpp2
-rw-r--r--llvm/tools/llvm-profgen/ProfileGenerator.cpp147
-rw-r--r--llvm/tools/llvm-profgen/ProfileGenerator.h9
-rw-r--r--llvm/tools/llvm-readobj/ELFDumper.cpp19
-rw-r--r--llvm/tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp10
-rw-r--r--llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp4
-rw-r--r--llvm/tools/opt-viewer/CMakeLists.txt1
-rw-r--r--llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt3
-rw-r--r--llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt1
-rw-r--r--llvm/unittests/Analysis/ValueTrackingTest.cpp2
-rw-r--r--llvm/unittests/CMakeLists.txt2
-rw-r--r--llvm/unittests/DebugInfo/BTF/CMakeLists.txt2
-rw-r--r--llvm/unittests/DebugInfo/CodeView/CMakeLists.txt2
-rw-r--r--llvm/unittests/DebugInfo/DWARF/CMakeLists.txt2
-rw-r--r--llvm/unittests/DebugInfo/GSYM/CMakeLists.txt2
-rw-r--r--llvm/unittests/DebugInfo/MSF/CMakeLists.txt2
-rw-r--r--llvm/unittests/DebugInfo/PDB/CMakeLists.txt2
-rw-r--r--llvm/unittests/ExecutionEngine/CMakeLists.txt2
-rw-r--r--llvm/unittests/ExecutionEngine/JITLink/CMakeLists.txt2
-rw-r--r--llvm/unittests/ExecutionEngine/MCJIT/CMakeLists.txt2
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/CMakeLists.txt2
-rw-r--r--llvm/unittests/IR/ConstantRangeTest.cpp102
-rw-r--r--llvm/unittests/IR/MDBuilderTest.cpp39
-rw-r--r--llvm/unittests/ProfileData/BPFunctionNodeTest.cpp33
-rw-r--r--llvm/unittests/Support/CommandLineInit/CMakeLists.txt4
-rw-r--r--llvm/unittests/Support/DynamicLibrary/CMakeLists.txt4
-rw-r--r--llvm/unittests/Support/KnownBitsTest.cpp12
-rw-r--r--llvm/unittests/Support/LEB128Test.cpp20
-rw-r--r--llvm/unittests/Support/raw_socket_stream_test.cpp19
-rw-r--r--llvm/unittests/Target/AArch64/CMakeLists.txt2
-rw-r--r--llvm/unittests/Target/AMDGPU/CMakeLists.txt2
-rw-r--r--llvm/unittests/Target/ARM/CMakeLists.txt2
-rw-r--r--llvm/unittests/Target/CMakeLists.txt3
-rw-r--r--llvm/unittests/Target/LoongArch/CMakeLists.txt2
-rw-r--r--llvm/unittests/Target/PowerPC/CMakeLists.txt2
-rw-r--r--llvm/unittests/Target/RISCV/CMakeLists.txt2
-rw-r--r--llvm/unittests/Target/WebAssembly/CMakeLists.txt2
-rw-r--r--llvm/unittests/Target/X86/CMakeLists.txt2
-rw-r--r--llvm/unittests/TargetParser/RISCVISAInfoTest.cpp4
-rw-r--r--llvm/unittests/TargetParser/TargetParserTest.cpp6
-rw-r--r--llvm/unittests/Transforms/Coroutines/CMakeLists.txt2
-rw-r--r--llvm/unittests/Transforms/IPO/CMakeLists.txt2
-rw-r--r--llvm/unittests/Transforms/Scalar/CMakeLists.txt2
-rw-r--r--llvm/unittests/Transforms/Utils/CMakeLists.txt2
-rw-r--r--llvm/unittests/Transforms/Utils/CallPromotionUtilsTest.cpp88
-rw-r--r--llvm/unittests/Transforms/Vectorize/CMakeLists.txt2
-rw-r--r--llvm/unittests/tools/llvm-cfi-verify/CMakeLists.txt2
-rw-r--r--llvm/unittests/tools/llvm-exegesis/CMakeLists.txt2
-rw-r--r--llvm/unittests/tools/llvm-mca/CMakeLists.txt2
-rw-r--r--llvm/unittests/tools/llvm-mca/MCATestBase.cpp2
-rw-r--r--llvm/unittests/tools/llvm-mca/X86/TestIncrementalMCA.cpp4
-rw-r--r--llvm/unittests/tools/llvm-profdata/CMakeLists.txt2
-rw-r--r--llvm/unittests/tools/llvm-profgen/CMakeLists.txt2
-rw-r--r--llvm/utils/LLVMVisualizers/CMakeLists.txt2
-rw-r--r--llvm/utils/TableGen/ARMTargetDefEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/Basic/CMakeLists.txt1
-rw-r--r--llvm/utils/TableGen/CMakeLists.txt2
-rw-r--r--llvm/utils/TableGen/Common/CMakeLists.txt2
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp7
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.h2
-rw-r--r--llvm/utils/TableGen/Common/CodeGenTarget.cpp209
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h23
-rw-r--r--llvm/utils/TableGen/GlobalISelEmitter.cpp6
-rw-r--r--llvm/utils/UpdateTestChecks/asm.py8
-rw-r--r--llvm/utils/UpdateTestChecks/common.py925
-rw-r--r--llvm/utils/UpdateTestChecks/isel.py5
-rw-r--r--llvm/utils/gn/README.rst2
-rw-r--r--llvm/utils/gn/build/libs/terminfo/BUILD.gn12
-rw-r--r--llvm/utils/gn/build/libs/terminfo/enable.gni4
-rw-r--r--llvm/utils/gn/secondary/clang/lib/Analysis/FlowSensitive/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn3
-rw-r--r--llvm/utils/gn/secondary/clang/lib/StaticAnalyzer/Checkers/BUILD.gn3
-rw-r--r--llvm/utils/gn/secondary/libcxx/include/BUILD.gn22
-rw-r--r--llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn7
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn3
-rw-r--r--llvm/utils/gn/secondary/llvm/tools/llvm-config/BUILD.gn6
-rw-r--r--llvm/utils/lit/CMakeLists.txt4
-rw-r--r--llvm/utils/llvm-locstats/CMakeLists.txt2
-rw-r--r--llvm/utils/mlgo-utils/CMakeLists.txt2
-rwxr-xr-xllvm/utils/revert_checker.py15
-rwxr-xr-xllvm/utils/update_analyze_test_checks.py5
-rwxr-xr-xllvm/utils/update_cc_test_checks.py49
-rwxr-xr-xllvm/utils/update_llc_test_checks.py30
-rwxr-xr-xllvm/utils/update_test_checks.py15
-rw-r--r--mlir/CMakeLists.txt17
-rw-r--r--mlir/cmake/modules/AddMLIR.cmake5
-rw-r--r--mlir/docs/CMakeLists.txt1
-rw-r--r--mlir/docs/PassManagement.md39
-rw-r--r--mlir/examples/toy/CMakeLists.txt2
-rw-r--r--mlir/examples/transform/CMakeLists.txt1
-rw-r--r--mlir/include/mlir-c/Debug.h13
-rw-r--r--mlir/include/mlir/Analysis/SliceAnalysis.h5
-rw-r--r--mlir/include/mlir/Analysis/TopologicalSortUtils.h (renamed from mlir/include/mlir/Transforms/TopologicalSortUtils.h)14
-rw-r--r--mlir/include/mlir/Config/mlir-config.h.cmake4
-rw-r--r--mlir/include/mlir/Dialect/Arith/IR/ArithOps.td32
-rw-r--r--mlir/include/mlir/Dialect/CommonFolders.h5
-rw-r--r--mlir/include/mlir/Dialect/IRDL/IR/IRDLOps.td3
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td2
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td2
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.h1
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.td18
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/Transforms/TypeConsistency.h73
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt2
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h16
-rw-r--r--mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td4
-rw-r--r--mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h4
-rw-r--r--mlir/include/mlir/Dialect/Math/Transforms/Passes.h4
-rw-r--r--mlir/include/mlir/Dialect/Mesh/IR/MeshBase.td2
-rw-r--r--mlir/include/mlir/Dialect/Mesh/IR/MeshOps.h28
-rw-r--r--mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterface.h9
-rw-r--r--mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterface.td25
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPAttrDefs.td79
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPDialect.td22
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td211
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td48
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td291
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td6
-rw-r--r--mlir/include/mlir/Dialect/Polynomial/IR/Polynomial.td57
-rw-r--r--mlir/include/mlir/Dialect/Polynomial/IR/PolynomialAttributes.td70
-rw-r--r--mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td32
-rw-r--r--mlir/include/mlir/Dialect/SCF/Transforms/TileUsingInterface.h4
-rw-r--r--mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h4
-rw-r--r--mlir/include/mlir/Dialect/Vector/IR/VectorOps.td119
-rw-r--r--mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h9
-rw-r--r--mlir/include/mlir/IR/OpBase.td12
-rw-r--r--mlir/include/mlir/InitAllPasses.h3
-rw-r--r--mlir/include/mlir/Interfaces/TilingInterface.td4
-rw-r--r--mlir/include/mlir/Interfaces/Utils/InferIntRangeCommon.h25
-rw-r--r--mlir/include/mlir/Pass/PassManager.h39
-rw-r--r--mlir/include/mlir/Transforms/RegionUtils.h4
-rw-r--r--mlir/lib/Analysis/CMakeLists.txt2
-rw-r--r--mlir/lib/Analysis/Liveness.cpp4
-rw-r--r--mlir/lib/Analysis/SliceAnalysis.cpp59
-rw-r--r--mlir/lib/Analysis/TopologicalSortUtils.cpp (renamed from mlir/lib/Transforms/Utils/TopologicalSortUtils.cpp)141
-rw-r--r--mlir/lib/Bindings/Python/IRAttributes.cpp77
-rw-r--r--mlir/lib/Bindings/Python/IRCore.cpp15
-rw-r--r--mlir/lib/CAPI/Debug/Debug.cpp18
-rw-r--r--mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp92
-rw-r--r--mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp64
-rw-r--r--mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp22
-rw-r--r--mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp16
-rw-r--r--mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp1
-rw-r--r--mlir/lib/Conversion/VectorToSPIRV/CMakeLists.txt1
-rw-r--r--mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp49
-rw-r--r--mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp1
-rw-r--r--mlir/lib/Dialect/Arith/IR/InferIntRangeInterfaceImpls.cpp22
-rw-r--r--mlir/lib/Dialect/ArmSME/Transforms/TileAllocation.cpp1
-rw-r--r--mlir/lib/Dialect/EmitC/IR/EmitC.cpp2
-rw-r--r--mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp5
-rw-r--r--mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp3
-rw-r--r--mlir/lib/Dialect/IRDL/IR/IRDL.cpp31
-rw-r--r--mlir/lib/Dialect/Index/IR/InferIntRangeInterfaceImpls.cpp22
-rw-r--r--mlir/lib/Dialect/LLVMIR/Transforms/CMakeLists.txt1
-rw-r--r--mlir/lib/Dialect/LLVMIR/Transforms/TypeConsistency.cpp575
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp93
-rw-r--r--mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp6
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp3
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/MeshShardingInterfaceImpl.cpp26
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp72
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp17
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp243
-rw-r--r--mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp22
-rw-r--r--mlir/lib/Dialect/Mesh/IR/MeshOps.cpp94
-rw-r--r--mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp119
-rw-r--r--mlir/lib/Dialect/Mesh/Transforms/ShardingPropagation.cpp231
-rw-r--r--mlir/lib/Dialect/Mesh/Transforms/Spmdization.cpp3
-rw-r--r--mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp17
-rw-r--r--mlir/lib/Dialect/Polynomial/IR/PolynomialAttributes.cpp9
-rw-r--r--mlir/lib/Dialect/Polynomial/IR/PolynomialCanonicalization.td41
-rw-r--r--mlir/lib/Dialect/Polynomial/IR/PolynomialOps.cpp86
-rw-r--r--mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp42
-rw-r--r--mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp14
-rw-r--r--mlir/lib/Dialect/Tensor/IR/TensorOps.cpp3
-rw-r--r--mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp45
-rw-r--r--mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp6
-rw-r--r--mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp32
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp15
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp70
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp6
-rw-r--r--mlir/lib/IR/Operation.cpp2
-rw-r--r--mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp99
-rw-r--r--mlir/lib/Pass/IRPrinting.cpp162
-rw-r--r--mlir/lib/Pass/PassManagerOptions.cpp11
-rw-r--r--mlir/lib/TableGen/CMakeLists.txt1
-rw-r--r--mlir/lib/Target/LLVM/CMakeLists.txt2
-rw-r--r--mlir/lib/Target/LLVM/NVVM/Target.cpp34
-rw-r--r--mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp2
-rw-r--r--mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp108
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleTranslation.cpp2
-rw-r--r--mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp2
-rw-r--r--mlir/lib/Tools/mlir-opt/MlirOptMain.cpp2
-rw-r--r--mlir/lib/Transforms/Mem2Reg.cpp2
-rw-r--r--mlir/lib/Transforms/SROA.cpp1
-rw-r--r--mlir/lib/Transforms/TopologicalSort.cpp2
-rw-r--r--mlir/lib/Transforms/Utils/CMakeLists.txt1
-rw-r--r--mlir/lib/Transforms/Utils/RegionUtils.cpp19
-rw-r--r--mlir/lib/Transforms/ViewOpGraph.cpp2
-rw-r--r--mlir/python/mlir/dialects/linalg/__init__.py5
-rw-r--r--mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py10
-rw-r--r--mlir/test/Analysis/DataFlow/test-next-access.mlir4
-rw-r--r--mlir/test/Analysis/test-liveness.mlir24
-rw-r--r--mlir/test/Analysis/test-topoligical-sort.mlir53
-rw-r--r--mlir/test/Analysis/test-toposort.mlir (renamed from mlir/test/Transforms/test-toposort.mlir)0
-rw-r--r--mlir/test/CAPI/CMakeLists.txt2
-rw-r--r--mlir/test/CMakeLists.txt4
-rw-r--r--mlir/test/Conversion/ArithToEmitC/arith-to-emitc-unsupported.mlir7
-rw-r--r--mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir63
-rw-r--r--mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir8
-rw-r--r--mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir4
-rw-r--r--mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir14
-rw-r--r--mlir/test/Conversion/PDLToPDLInterp/pdl-to-pdl-interp-matcher.mlir6
-rw-r--r--mlir/test/Conversion/SPIRVToLLVM/spirv-storage-class-mapping.mlir2
-rw-r--r--mlir/test/Conversion/VectorToArmSME/vector-to-arm-sme.mlir33
-rw-r--r--mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir22
-rw-r--r--mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir24
-rw-r--r--mlir/test/Dialect/Affine/slicing-utils.mlir160
-rw-r--r--mlir/test/Dialect/Arith/canonicalize.mlir8
-rw-r--r--mlir/test/Dialect/Arith/int-range-interface.mlir135
-rw-r--r--mlir/test/Dialect/Arith/int-range-opts.mlir4
-rw-r--r--mlir/test/Dialect/Arith/unsigned-when-equivalent.mlir4
-rw-r--r--mlir/test/Dialect/ArmSME/tile-allocation-liveness.mlir32
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/lower-deallocations-func.mlir8
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/lower-deallocations.mlir8
-rw-r--r--mlir/test/Dialect/GPU/barrier-elimination.mlir2
-rw-r--r--mlir/test/Dialect/GPU/ops.mlir2
-rw-r--r--mlir/test/Dialect/GPU/outlining.mlir2
-rw-r--r--mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir2
-rw-r--r--mlir/test/Dialect/IRDL/invalid.irdl.mlir17
-rw-r--r--mlir/test/Dialect/LLVMIR/nvvm.mlir14
-rw-r--r--mlir/test/Dialect/LLVMIR/type-consistency.mlir533
-rw-r--r--mlir/test/Dialect/Linalg/block-pack-matmul.mlir29
-rw-r--r--mlir/test/Dialect/Linalg/data-layout-propagation.mlir2
-rw-r--r--mlir/test/Dialect/Linalg/mesh-sharding-propagation.mlir34
-rw-r--r--mlir/test/Dialect/Linalg/transform-op-specialize.mlir25
-rw-r--r--mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_binary.mlir76
-rw-r--r--mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_unary.mlir25
-rw-r--r--mlir/test/Dialect/Linalg/transform-tile-reduction.mlir50
-rw-r--r--mlir/test/Dialect/Math/expand-math.mlir2
-rw-r--r--mlir/test/Dialect/MemRef/canonicalize.mlir10
-rw-r--r--mlir/test/Dialect/Mesh/sharding-propagation.mlir38
-rw-r--r--mlir/test/Dialect/Mesh/spmdization.mlir15
-rw-r--r--mlir/test/Dialect/OpenMP/invalid.mlir3
-rw-r--r--mlir/test/Dialect/OpenMP/ops.mlir10
-rw-r--r--mlir/test/Dialect/Polynomial/canonicalization.mlir57
-rw-r--r--mlir/test/Dialect/Polynomial/ops.mlir12
-rw-r--r--mlir/test/Dialect/SCF/transform-ops.mlir6
-rw-r--r--mlir/test/Dialect/SPIRV/IR/logical-ops.mlir12
-rw-r--r--mlir/test/Dialect/SPIRV/IR/structure-ops.mlir4
-rw-r--r--mlir/test/Dialect/Tensor/canonicalize.mlir13
-rw-r--r--mlir/test/Dialect/Tensor/fold-empty-op.mlir73
-rw-r--r--mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir2
-rw-r--r--mlir/test/Dialect/Tensor/fold-reassociative-reshapes.mlir102
-rw-r--r--mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir128
-rw-r--r--mlir/test/Dialect/Vector/canonicalize.mlir7
-rw-r--r--mlir/test/Dialect/Vector/invalid.mlir56
-rw-r--r--mlir/test/Dialect/Vector/ops.mlir54
-rw-r--r--mlir/test/Dialect/Vector/vector-interleave-lowering-transforms.mlir20
-rw-r--r--mlir/test/Dialect/Vector/vector-interleave-to-shuffle.mlir5
-rw-r--r--mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir76
-rw-r--r--mlir/test/IR/parser.mlir2
-rw-r--r--mlir/test/IR/properties.mlir11
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir2
-rw-r--r--mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x8_8x128_noswizzle.mlir9
-rw-r--r--mlir/test/Pass/ir-printing-file-tree.mlir41
-rw-r--r--mlir/test/Target/LLVMIR/Import/global-variables.ll2
-rw-r--r--mlir/test/Target/LLVMIR/Import/intrinsic.ll4
-rw-r--r--mlir/test/Target/LLVMIR/Import/metadata-loop.ll2
-rw-r--r--mlir/test/Target/LLVMIR/llvmir-debug.mlir2
-rw-r--r--mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir2
-rw-r--r--mlir/test/Target/LLVMIR/omptarget-array-sectioning-host.mlir2
-rw-r--r--mlir/test/Transforms/test-convert-func-op.mlir12
-rw-r--r--mlir/test/lib/Analysis/CMakeLists.txt1
-rw-r--r--mlir/test/lib/Analysis/TestSlice.cpp33
-rw-r--r--mlir/test/lib/Analysis/TestTopologicalSort.cpp (renamed from mlir/test/lib/Transforms/TestTopologicalSort.cpp)2
-rw-r--r--mlir/test/lib/Conversion/FuncToLLVM/CMakeLists.txt1
-rw-r--r--mlir/test/lib/Conversion/FuncToLLVM/TestConvertFuncOp.cpp93
-rw-r--r--mlir/test/lib/Dialect/Test/TestOpDefs.cpp19
-rw-r--r--mlir/test/lib/Dialect/Test/TestOps.td11
-rw-r--r--mlir/test/lib/Transforms/CMakeLists.txt1
-rw-r--r--mlir/test/lit.cfg.py2
-rw-r--r--mlir/test/lit.site.cfg.py.in2
-rw-r--r--mlir/test/mlir-tblgen/op-decl-and-defs.td21
-rw-r--r--mlir/test/mlir-tblgen/op-operand.td3
-rw-r--r--mlir/test/mlir-tblgen/pattern.mlir8
-rw-r--r--mlir/test/mlir-vulkan-runner/addui_extended.mlir (renamed from mlir/test/mlir-vulkan-runner/iaddcarry_extended.mlir)0
-rw-r--r--mlir/test/python/dialects/transform_structured_ext.py2
-rw-r--r--mlir/test/python/ir/array_attributes.py82
-rw-r--r--mlir/tools/mlir-linalg-ods-gen/CMakeLists.txt1
-rw-r--r--mlir/tools/mlir-opt/mlir-opt.cpp2
-rw-r--r--mlir/tools/mlir-pdll/CMakeLists.txt1
-rw-r--r--mlir/tools/mlir-src-sharder/CMakeLists.txt2
-rw-r--r--mlir/tools/mlir-tblgen/CMakeLists.txt1
-rw-r--r--mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp29
-rw-r--r--mlir/tools/mlir-tblgen/RewriterGen.cpp27
-rw-r--r--mlir/unittests/CMakeLists.txt2
-rw-r--r--mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp2
-rw-r--r--offload/CMakeLists.txt23
-rw-r--r--offload/DeviceRTL/include/Utils.h2
-rw-r--r--offload/DeviceRTL/src/Mapping.cpp4
-rw-r--r--offload/DeviceRTL/src/Utils.cpp14
-rw-r--r--offload/cmake/Modules/LibomptargetGetDependencies.cmake8
-rw-r--r--offload/plugins-nextgen/amdgpu/CMakeLists.txt8
-rw-r--r--offload/plugins-nextgen/common/include/JIT.h4
-rw-r--r--offload/plugins-nextgen/common/include/PluginInterface.h12
-rw-r--r--offload/plugins-nextgen/common/src/JIT.cpp16
-rw-r--r--offload/plugins-nextgen/common/src/PluginInterface.cpp34
-rw-r--r--offload/plugins-nextgen/cuda/CMakeLists.txt11
-rw-r--r--offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h9
-rw-r--r--offload/plugins-nextgen/exports6
-rw-r--r--offload/plugins-nextgen/host/CMakeLists.txt4
-rw-r--r--offload/src/PluginManager.cpp34
-rw-r--r--offload/test/offloading/dynamic_module.c2
-rw-r--r--offload/test/offloading/fortran/dump_map_tables.f9038
-rw-r--r--offload/test/offloading/ompx_bare_ballot_sync.c45
-rw-r--r--openmp/CMakeLists.txt1
-rw-r--r--openmp/cmake/OpenMPTesting.cmake2
-rw-r--r--openmp/docs/CMakeLists.txt1
-rw-r--r--openmp/docs/SupportAndFAQ.rst9
-rw-r--r--openmp/docs/remarks/OMP121.rst6
-rw-r--r--openmp/docs/remarks/OMP133.rst6
-rw-r--r--openmp/docs/remarks/OptimizationRemarks.rst4
-rw-r--r--openmp/runtime/cmake/LibompMicroTests.cmake5
-rw-r--r--openmp/runtime/src/CMakeLists.txt7
-rw-r--r--openmp/runtime/src/include/ompx.h.var12
-rw-r--r--openmp/runtime/test/lit.cfg4
-rw-r--r--openmp/runtime/test/transform/tile/foreach.cpp228
-rw-r--r--openmp/runtime/test/transform/tile/iterfor.cpp233
-rw-r--r--openmp/runtime/test/transform/tile/parallel-wsloop-collapse-foreach.cpp366
-rw-r--r--openmp/runtime/test/transform/unroll/factor_foreach.cpp162
-rw-r--r--openmp/runtime/test/transform/unroll/factor_intfor.c25
-rw-r--r--openmp/runtime/test/transform/unroll/factor_iterfor.cpp169
-rw-r--r--openmp/runtime/test/transform/unroll/factor_parallel-wsloop-collapse-foreach.cpp199
-rw-r--r--openmp/runtime/test/transform/unroll/factor_parallel-wsloop-collapse-intfor.cpp32
-rw-r--r--openmp/runtime/test/transform/unroll/full_intfor.c25
-rw-r--r--openmp/runtime/test/transform/unroll/heuristic_intfor.c25
-rw-r--r--openmp/runtime/test/transform/unroll/partial_intfor.c25
-rw-r--r--polly/CMakeLists.txt6
-rw-r--r--polly/cmake/polly_macros.cmake4
-rw-r--r--polly/docs/CMakeLists.txt1
-rw-r--r--polly/lib/CMakeLists.txt4
-rw-r--r--polly/lib/External/CMakeLists.txt2
-rw-r--r--polly/test/CMakeLists.txt7
-rw-r--r--polly/test/CodeGen/20100617.ll2
-rw-r--r--polly/test/CodeGen/20100622.ll4
-rw-r--r--polly/test/CodeGen/20100707.ll2
-rw-r--r--polly/test/CodeGen/20100707_2.ll2
-rw-r--r--polly/test/CodeGen/20100708.ll2
-rw-r--r--polly/test/CodeGen/20100708_2.ll2
-rw-r--r--polly/test/CodeGen/20100713.ll2
-rw-r--r--polly/test/CodeGen/20100713_2.ll2
-rw-r--r--polly/test/CodeGen/20100717.ll2
-rw-r--r--polly/test/CodeGen/20100718-DomInfo-2.ll2
-rw-r--r--polly/test/CodeGen/20100718-DomInfo.ll2
-rw-r--r--polly/test/CodeGen/20100720-MultipleConditions.ll2
-rw-r--r--polly/test/CodeGen/20100809-IndependentBlock.ll2
-rw-r--r--polly/test/CodeGen/20100811-ScalarDependencyBetweenBrAndCnd.ll2
-rw-r--r--polly/test/CodeGen/20101030-Overflow.ll2
-rw-r--r--polly/test/CodeGen/20101103-Overflow3.ll2
-rw-r--r--polly/test/CodeGen/20101103-signmissmatch.ll2
-rw-r--r--polly/test/CodeGen/20110226-Ignore-Dead-Code.ll2
-rw-r--r--polly/test/CodeGen/20110226-PHI-Node-removed.ll2
-rw-r--r--polly/test/CodeGen/20120316-InvalidCast.ll2
-rw-r--r--polly/test/CodeGen/20120403-RHS-type-mismatch.ll2
-rw-r--r--polly/test/CodeGen/20130221.ll2
-rw-r--r--polly/test/CodeGen/20150328-SCEVExpanderIntroducesNewIV.ll2
-rw-r--r--polly/test/CodeGen/Intrinsics/llvm-expect.ll2
-rw-r--r--polly/test/CodeGen/LoopParallelMD/do_not_mutate_debug_info.ll2
-rw-r--r--polly/test/CodeGen/LoopParallelMD/loop_nest_param_parallel.ll2
-rw-r--r--polly/test/CodeGen/LoopParallelMD/single_loop_param_parallel.ll4
-rw-r--r--polly/test/CodeGen/MemAccess/bad_alignment.ll2
-rw-r--r--polly/test/CodeGen/MemAccess/codegen_address_space.ll2
-rw-r--r--polly/test/CodeGen/MemAccess/codegen_constant_offset.ll2
-rw-r--r--polly/test/CodeGen/MemAccess/codegen_simple.ll2
-rw-r--r--polly/test/CodeGen/MemAccess/codegen_simple_float.ll2
-rw-r--r--polly/test/CodeGen/MemAccess/codegen_simple_md.ll4
-rw-r--r--polly/test/CodeGen/MemAccess/codegen_simple_md_float.ll4
-rw-r--r--polly/test/CodeGen/MemAccess/different_types.ll4
-rw-r--r--polly/test/CodeGen/MemAccess/generate-all.ll4
-rw-r--r--polly/test/CodeGen/MemAccess/invariant_base_ptr.ll4
-rw-r--r--polly/test/CodeGen/MemAccess/multiple_types.ll4
-rw-r--r--polly/test/CodeGen/MemAccess/simple.ll2
-rw-r--r--polly/test/CodeGen/MemAccess/update_access_functions.ll4
-rw-r--r--polly/test/CodeGen/OpenMP/alias-metadata.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/floord-as-argument-to-subfunction.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/inlineasm.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded_different_bb.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded_pass_only_needed.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/invariant_base_pointers_preloaded.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/loop-body-references-outer-iv.ll4
-rw-r--r--polly/test/CodeGen/OpenMP/loop-body-references-outer-values-2.ll4
-rw-r--r--polly/test/CodeGen/OpenMP/loop-body-references-outer-values-3.ll4
-rw-r--r--polly/test/CodeGen/OpenMP/loop-body-references-outer-values.ll4
-rw-r--r--polly/test/CodeGen/OpenMP/loop-bounds-reference-outer-ids.ll4
-rw-r--r--polly/test/CodeGen/OpenMP/mapped-phi-access.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/matmul-parallel.ll4
-rw-r--r--polly/test/CodeGen/OpenMP/recomputed-srem.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/reference-argument-from-non-affine-region.ll12
-rw-r--r--polly/test/CodeGen/OpenMP/reference-other-bb.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/reference-preceeding-loop.ll4
-rw-r--r--polly/test/CodeGen/OpenMP/reference_latest.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/scev-rewriting.ll2
-rw-r--r--polly/test/CodeGen/OpenMP/single_loop.ll18
-rw-r--r--polly/test/CodeGen/OpenMP/single_loop_with_loop_invariant_baseptr.ll4
-rw-r--r--polly/test/CodeGen/OpenMP/single_loop_with_param.ll12
-rw-r--r--polly/test/CodeGen/OpenMP/two-parallel-loops-reference-outer-indvar.ll4
-rw-r--r--polly/test/CodeGen/PHIInExit.ll2
-rw-r--r--polly/test/CodeGen/RuntimeDebugBuilder/combine_different_values.ll2
-rw-r--r--polly/test/CodeGen/RuntimeDebugBuilder/stmt_tracing.ll2
-rw-r--r--polly/test/CodeGen/alias-check-multi-dim.ll2
-rw-r--r--polly/test/CodeGen/alias_metadata_too_many_arrays.ll2
-rw-r--r--polly/test/CodeGen/aliasing_different_base_and_access_type.ll2
-rw-r--r--polly/test/CodeGen/aliasing_different_pointer_types.ll2
-rw-r--r--polly/test/CodeGen/aliasing_multidimensional_access.ll2
-rw-r--r--polly/test/CodeGen/aliasing_parametric_simple_1.ll2
-rw-r--r--polly/test/CodeGen/aliasing_parametric_simple_2.ll2
-rw-r--r--polly/test/CodeGen/aliasing_struct_element.ll2
-rw-r--r--polly/test/CodeGen/alignment.ll2
-rw-r--r--polly/test/CodeGen/annotated_alias_scopes.ll2
-rw-r--r--polly/test/CodeGen/blas_sscal_simplified.ll2
-rw-r--r--polly/test/CodeGen/conflict-between-loop-invariant-code-hosting-and-escape-map-computation.ll2
-rw-r--r--polly/test/CodeGen/constant_condition.ll2
-rw-r--r--polly/test/CodeGen/create-conditional-scop.ll2
-rw-r--r--polly/test/CodeGen/dead_invariant_load_instruction_referenced_by_parameter_1.ll2
-rw-r--r--polly/test/CodeGen/dead_invariant_load_instruction_referenced_by_parameter_2.ll2
-rw-r--r--polly/test/CodeGen/debug-intrinsics.ll8
-rw-r--r--polly/test/CodeGen/dominance_problem_after_early_codegen_bailout.ll2
-rw-r--r--polly/test/CodeGen/empty_domain_in_context.ll2
-rw-r--r--polly/test/CodeGen/entry_with_trivial_phi.ll2
-rw-r--r--polly/test/CodeGen/entry_with_trivial_phi_other_bb.ll2
-rw-r--r--polly/test/CodeGen/error-stmt-in-non-affine-region.ll2
-rw-r--r--polly/test/CodeGen/error_block_contains_invalid_memory_access.ll2
-rw-r--r--polly/test/CodeGen/exprModDiv.ll8
-rw-r--r--polly/test/CodeGen/hoisted_load_escapes_through_phi.ll4
-rw-r--r--polly/test/CodeGen/hoisting_1.ll2
-rw-r--r--polly/test/CodeGen/hoisting_2.ll2
-rw-r--r--polly/test/CodeGen/inner_scev_sdiv_1.ll2
-rw-r--r--polly/test/CodeGen/inner_scev_sdiv_2.ll2
-rw-r--r--polly/test/CodeGen/inner_scev_sdiv_3.ll2
-rw-r--r--polly/test/CodeGen/inner_scev_sdiv_in_lb.ll4
-rw-r--r--polly/test/CodeGen/inner_scev_sdiv_in_lb_invariant.ll2
-rw-r--r--polly/test/CodeGen/inner_scev_sdiv_in_rtc.ll2
-rw-r--r--polly/test/CodeGen/intrinsics_lifetime.ll2
-rw-r--r--polly/test/CodeGen/intrinsics_misc.ll2
-rw-r--r--polly/test/CodeGen/inv-load-lnt-crash-wrong-order-2.ll2
-rw-r--r--polly/test/CodeGen/inv-load-lnt-crash-wrong-order-3.ll2
-rw-r--r--polly/test/CodeGen/inv-load-lnt-crash-wrong-order.ll2
-rw-r--r--polly/test/CodeGen/invariant-load-dimension.ll4
-rw-r--r--polly/test/CodeGen/invariant-load-preload-base-pointer-origin-first.ll2
-rw-r--r--polly/test/CodeGen/invariant_cannot_handle_void.ll4
-rw-r--r--polly/test/CodeGen/invariant_load.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_address_space.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_alias_metadata.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_base_pointer.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_base_pointer_conditional.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_base_pointer_conditional_2.ll6
-rw-r--r--polly/test/CodeGen/invariant_load_canonicalize_array_baseptrs.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_condition.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_different_sized_types.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_escaping.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_escaping_second_scop.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_in_non_affine_subregion.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_loop_ub.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_not_executed_but_in_parameters.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_outermost.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_parameters_cyclic_dependence.ll4
-rw-r--r--polly/test/CodeGen/invariant_load_ptr_ptr_noalias.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_scalar_dep.ll2
-rw-r--r--polly/test/CodeGen/invariant_load_scalar_escape_alloca_sharing.ll2
-rw-r--r--polly/test/CodeGen/invariant_loads_from_struct_with_different_types_1.ll2
-rw-r--r--polly/test/CodeGen/invariant_loads_from_struct_with_different_types_2.ll2
-rw-r--r--polly/test/CodeGen/invariant_loads_ignore_parameter_bounds.ll2
-rw-r--r--polly/test/CodeGen/invariant_verify_function_failed.ll2
-rw-r--r--polly/test/CodeGen/invariant_verify_function_failed_2.ll4
-rw-r--r--polly/test/CodeGen/issue56692.ll2
-rw-r--r--polly/test/CodeGen/large-numbers-in-boundary-context.ll2
-rw-r--r--polly/test/CodeGen/load_subset_with_context.ll2
-rw-r--r--polly/test/CodeGen/loop-invariant-load-type-mismatch.ll2
-rw-r--r--polly/test/CodeGen/loop_with_condition.ll2
-rw-r--r--polly/test/CodeGen/loop_with_condition_2.ll2
-rw-r--r--polly/test/CodeGen/loop_with_condition_ineq.ll2
-rw-r--r--polly/test/CodeGen/loop_with_condition_nested.ll4
-rw-r--r--polly/test/CodeGen/loop_with_conditional_entry_edge_split_hard_case.ll2
-rw-r--r--polly/test/CodeGen/memcpy_annotations.ll2
-rw-r--r--polly/test/CodeGen/multidim-non-matching-typesize-2.ll2
-rw-r--r--polly/test/CodeGen/multidim-non-matching-typesize.ll2
-rw-r--r--polly/test/CodeGen/multidim_2d_parametric_array_static_loop_bounds.ll2
-rw-r--r--polly/test/CodeGen/multidim_alias_check.ll2
-rw-r--r--polly/test/CodeGen/multiple-codegens.ll1
-rw-r--r--polly/test/CodeGen/multiple-scops-in-a-row.ll2
-rw-r--r--polly/test/CodeGen/multiple-types-invariant-load-2.ll2
-rw-r--r--polly/test/CodeGen/multiple-types-invariant-load.ll2
-rw-r--r--polly/test/CodeGen/multiple_sai_fro_same_base_address.ll4
-rw-r--r--polly/test/CodeGen/no-overflow-tracking.ll4
-rw-r--r--polly/test/CodeGen/no_guard_bb.ll2
-rw-r--r--polly/test/CodeGen/non-affine-dominance-generated-entering.ll2
-rw-r--r--polly/test/CodeGen/non-affine-exit-node-dominance.ll2
-rw-r--r--polly/test/CodeGen/non-affine-phi-node-expansion-2.ll2
-rw-r--r--polly/test/CodeGen/non-affine-phi-node-expansion-3.ll2
-rw-r--r--polly/test/CodeGen/non-affine-phi-node-expansion-4.ll2
-rw-r--r--polly/test/CodeGen/non-affine-phi-node-expansion.ll2
-rw-r--r--polly/test/CodeGen/non-affine-region-exit-phi-incoming-synthesize-2.ll2
-rw-r--r--polly/test/CodeGen/non-affine-region-exit-phi-incoming-synthesize.ll2
-rw-r--r--polly/test/CodeGen/non-affine-region-implicit-store.ll2
-rw-r--r--polly/test/CodeGen/non-affine-region-phi-references-in-scop-value.ll2
-rw-r--r--polly/test/CodeGen/non-affine-subregion-dominance-reuse.ll2
-rw-r--r--polly/test/CodeGen/non-affine-switch.ll4
-rw-r--r--polly/test/CodeGen/non-affine-synthesized-in-branch.ll2
-rw-r--r--polly/test/CodeGen/non-affine-update.ll4
-rw-r--r--polly/test/CodeGen/non-hoisted-load-needed-as-base-ptr.ll2
-rw-r--r--polly/test/CodeGen/non_affine_float_compare.ll2
-rw-r--r--polly/test/CodeGen/only_non_affine_error_region.ll2
-rw-r--r--polly/test/CodeGen/openmp_limit_threads.ll12
-rw-r--r--polly/test/CodeGen/out-of-scop-phi-node-use.ll2
-rw-r--r--polly/test/CodeGen/param_div_div_div_2.ll4
-rw-r--r--polly/test/CodeGen/partial_write_array.ll2
-rw-r--r--polly/test/CodeGen/partial_write_emptyset.ll2
-rw-r--r--polly/test/CodeGen/partial_write_full_write_that_appears_partial.ll2
-rw-r--r--polly/test/CodeGen/partial_write_impossible_restriction.ll2
-rw-r--r--polly/test/CodeGen/partial_write_in_region.ll4
-rw-r--r--polly/test/CodeGen/partial_write_in_region_with_loop.ll4
-rw-r--r--polly/test/CodeGen/partial_write_mapped_scalar.ll2
-rw-r--r--polly/test/CodeGen/partial_write_mapped_scalar_subregion.ll2
-rw-r--r--polly/test/CodeGen/perf_monitoring.ll2
-rw-r--r--polly/test/CodeGen/perf_monitoring_cycles_per_scop.ll2
-rw-r--r--polly/test/CodeGen/perf_monitoring_trip_counts_per_scop.ll2
-rw-r--r--polly/test/CodeGen/phi-defined-before-scop.ll2
-rw-r--r--polly/test/CodeGen/phi_after_error_block_outside_of_scop.ll2
-rw-r--r--polly/test/CodeGen/phi_condition_modeling_1.ll2
-rw-r--r--polly/test/CodeGen/phi_condition_modeling_2.ll2
-rw-r--r--polly/test/CodeGen/phi_conditional_simple_1.ll4
-rw-r--r--polly/test/CodeGen/phi_in_exit_early_lnt_failure_1.ll2
-rw-r--r--polly/test/CodeGen/phi_in_exit_early_lnt_failure_2.ll2
-rw-r--r--polly/test/CodeGen/phi_in_exit_early_lnt_failure_3.ll2
-rw-r--r--polly/test/CodeGen/phi_in_exit_early_lnt_failure_5.ll2
-rw-r--r--polly/test/CodeGen/phi_loop_carried_float.ll2
-rw-r--r--polly/test/CodeGen/phi_loop_carried_float_escape.ll8
-rw-r--r--polly/test/CodeGen/phi_scalar_simple_1.ll2
-rw-r--r--polly/test/CodeGen/phi_scalar_simple_2.ll2
-rw-r--r--polly/test/CodeGen/phi_with_multi_exiting_edges_2.ll2
-rw-r--r--polly/test/CodeGen/phi_with_one_exit_edge.ll2
-rw-r--r--polly/test/CodeGen/pointer-type-expressions-2.ll4
-rw-r--r--polly/test/CodeGen/pointer-type-expressions.ll4
-rw-r--r--polly/test/CodeGen/pointer-type-pointer-type-comparison.ll4
-rw-r--r--polly/test/CodeGen/pointer_rem.ll4
-rw-r--r--polly/test/CodeGen/pr25241.ll2
-rw-r--r--polly/test/CodeGen/ptrtoint_as_parameter.ll2
-rw-r--r--polly/test/CodeGen/read-only-scalars.ll4
-rw-r--r--polly/test/CodeGen/reduction.ll2
-rw-r--r--polly/test/CodeGen/reduction_2.ll2
-rw-r--r--polly/test/CodeGen/reduction_simple_binary.ll2
-rw-r--r--polly/test/CodeGen/region-with-instructions.ll2
-rw-r--r--polly/test/CodeGen/region_exiting-domtree.ll2
-rw-r--r--polly/test/CodeGen/region_multiexit_partialwrite.ll2
-rw-r--r--polly/test/CodeGen/run-time-condition-with-scev-parameters.ll4
-rw-r--r--polly/test/CodeGen/run-time-condition.ll2
-rw-r--r--polly/test/CodeGen/scalar-references-used-in-scop-compute.ll2
-rw-r--r--polly/test/CodeGen/scalar-store-from-same-bb.ll4
-rw-r--r--polly/test/CodeGen/scalar_codegen_crash.ll4
-rw-r--r--polly/test/CodeGen/scev-backedgetaken.ll2
-rw-r--r--polly/test/CodeGen/scev-division-invariant-load.ll2
-rw-r--r--polly/test/CodeGen/scev.ll2
-rw-r--r--polly/test/CodeGen/scev_expansion_in_nonaffine.ll2
-rw-r--r--polly/test/CodeGen/scev_looking_through_bitcasts.ll2
-rw-r--r--polly/test/CodeGen/scop_expander_insert_point.ll2
-rw-r--r--polly/test/CodeGen/scop_expander_segfault.ll2
-rw-r--r--polly/test/CodeGen/scop_never_executed_runtime_check_location.ll2
-rw-r--r--polly/test/CodeGen/select-base-pointer.ll2
-rw-r--r--polly/test/CodeGen/sequential_loops.ll2
-rw-r--r--polly/test/CodeGen/simple_loop_non_single_exit.ll2
-rw-r--r--polly/test/CodeGen/simple_loop_non_single_exit_2.ll2
-rw-r--r--polly/test/CodeGen/simple_non_single_entry.ll2
-rw-r--r--polly/test/CodeGen/simple_nonaffine_loop.ll2
-rw-r--r--polly/test/CodeGen/single_do_loop_int_max_iterations.ll2
-rw-r--r--polly/test/CodeGen/single_do_loop_int_param_iterations.ll2
-rw-r--r--polly/test/CodeGen/single_do_loop_ll_max_iterations.ll4
-rw-r--r--polly/test/CodeGen/single_do_loop_one_iteration.ll2
-rw-r--r--polly/test/CodeGen/single_do_loop_scev_replace.ll2
-rw-r--r--polly/test/CodeGen/single_loop.ll2
-rw-r--r--polly/test/CodeGen/single_loop_int_max_iterations.ll2
-rw-r--r--polly/test/CodeGen/single_loop_ll_max_iterations.ll2
-rw-r--r--polly/test/CodeGen/single_loop_one_iteration.ll2
-rw-r--r--polly/test/CodeGen/single_loop_param.ll2
-rw-r--r--polly/test/CodeGen/single_loop_param_less_equal.ll6
-rw-r--r--polly/test/CodeGen/single_loop_param_less_than.ll4
-rw-r--r--polly/test/CodeGen/single_loop_zero_iterations.ll2
-rw-r--r--polly/test/CodeGen/split_edge_of_exit.ll4
-rw-r--r--polly/test/CodeGen/split_edges.ll2
-rw-r--r--polly/test/CodeGen/split_edges_2.ll2
-rw-r--r--polly/test/CodeGen/srem-in-other-bb.ll2
-rw-r--r--polly/test/CodeGen/stack-overflow-in-load-hoisting.ll2
-rw-r--r--polly/test/CodeGen/stmt_split_no_dependence.ll2
-rw-r--r--polly/test/CodeGen/switch-in-non-affine-region.ll4
-rw-r--r--polly/test/CodeGen/synthesizable_phi_write_after_loop.ll2
-rw-r--r--polly/test/CodeGen/test-invalid-operands-for-select-2.ll2
-rw-r--r--polly/test/CodeGen/test-invalid-operands-for-select.ll2
-rw-r--r--polly/test/CodeGen/test.ll2
-rw-r--r--polly/test/CodeGen/two-loops-right-after-each-other-2.ll2
-rw-r--r--polly/test/CodeGen/two-scops-in-row-invalidate-scevs.ll2
-rw-r--r--polly/test/CodeGen/two-scops-in-row.ll4
-rw-r--r--polly/test/CodeGen/udiv_expansion_position.ll2
-rw-r--r--polly/test/CodeGen/uninitialized_scalar_memory.ll2
-rw-r--r--polly/test/CodeGen/unpredictable-loop-unsynthesizable.ll6
-rw-r--r--polly/test/CodeGen/variant_load_empty_domain.ll2
-rw-r--r--polly/test/CodeGen/whole-scop-non-affine-subregion.ll4
-rw-r--r--polly/test/DeLICM/confused_order.ll4
-rw-r--r--polly/test/DeLICM/contradicting_assumed_context_and_domain.ll2
-rw-r--r--polly/test/DeLICM/load-in-cond-inf-loop.ll2
-rw-r--r--polly/test/DeLICM/map_memset_zero.ll4
-rw-r--r--polly/test/DeLICM/nomap_alreadymapped.ll2
-rw-r--r--polly/test/DeLICM/nomap_escaping.ll2
-rw-r--r--polly/test/DeLICM/nomap_occupied.ll2
-rw-r--r--polly/test/DeLICM/nomap_readonly.ll2
-rw-r--r--polly/test/DeLICM/nomap_spuriouswrite.ll2
-rw-r--r--polly/test/DeLICM/nomap_storagesize.ll2
-rw-r--r--polly/test/DeLICM/nomap_writewrite.ll2
-rw-r--r--polly/test/DeLICM/outofquota-reverseDomain.ll2
-rw-r--r--polly/test/DeLICM/pass_existence.ll6
-rw-r--r--polly/test/DeLICM/pr41656.ll2
-rw-r--r--polly/test/DeLICM/pr48783.ll2
-rw-r--r--polly/test/DeLICM/reduction.ll2
-rw-r--r--polly/test/DeLICM/reduction_looprotate_gvnpre_cond1.ll2
-rw-r--r--polly/test/DeLICM/reduction_looprotate_gvnpre_cond2.ll2
-rw-r--r--polly/test/DeLICM/reduction_looprotate_gvnpre_nopreheader.ll2
-rw-r--r--polly/test/DeLICM/reduction_looprotate_licm_nopreheader.ll2
-rw-r--r--polly/test/DeLICM/reduction_looprotate_loopguard_gvnpre.ll2
-rw-r--r--polly/test/DeLICM/reduction_looprotate_loopguard_licm1.ll2
-rw-r--r--polly/test/DeLICM/reduction_looprotate_loopguard_licm2.ll2
-rw-r--r--polly/test/DeLICM/reduction_looprotate_loopguard_licm3.ll2
-rw-r--r--polly/test/DeLICM/reduction_unrelatedunusual.ll2
-rw-r--r--polly/test/DeLICM/reject_loadafterstore.ll2
-rw-r--r--polly/test/DeLICM/reject_outofquota.ll4
-rw-r--r--polly/test/DeLICM/reject_storeafterstore.ll2
-rw-r--r--polly/test/DeLICM/reject_storeinsubregion.ll2
-rw-r--r--polly/test/DeLICM/reject_unusualstore.ll4
-rw-r--r--polly/test/DeLICM/skip_maywrite.ll2
-rw-r--r--polly/test/DeLICM/skip_multiaccess.ll2
-rw-r--r--polly/test/DeLICM/skip_notinloop.ll2
-rw-r--r--polly/test/DeLICM/skip_scalaraccess.ll2
-rw-r--r--polly/test/DeadCodeElimination/chained_iterations.ll4
-rw-r--r--polly/test/DeadCodeElimination/chained_iterations_2.ll4
-rw-r--r--polly/test/DeadCodeElimination/computeout.ll3
-rw-r--r--polly/test/DeadCodeElimination/dead_iteration_elimination.ll1
-rw-r--r--polly/test/DeadCodeElimination/non-affine-affine-mix.ll2
-rw-r--r--polly/test/DeadCodeElimination/non-affine.ll2
-rw-r--r--polly/test/DeadCodeElimination/null_schedule.ll2
-rw-r--r--polly/test/DependenceInfo/computeout.ll6
-rw-r--r--polly/test/DependenceInfo/different_schedule_dimensions.ll4
-rw-r--r--polly/test/DependenceInfo/do_pluto_matmult.ll6
-rw-r--r--polly/test/DependenceInfo/fine_grain_dep_0.ll7
-rw-r--r--polly/test/DependenceInfo/generate_may_write_dependence_info.ll2
-rw-r--r--polly/test/DependenceInfo/infeasible_context.ll5
-rw-r--r--polly/test/DependenceInfo/may_writes_do_not_block_must_writes_for_war.ll2
-rw-r--r--polly/test/DependenceInfo/nonaffine-condition-buildMemoryAccess.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_complex_location.ll6
-rw-r--r--polly/test/DependenceInfo/reduction_dependences_equal_non_reduction_dependences.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_dependences_not_null.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_mixed_reduction_and_non_reduction_dependences.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_multiple_loops_array_sum.ll6
-rw-r--r--polly/test/DependenceInfo/reduction_multiple_loops_array_sum_2.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_multiple_loops_array_sum_3.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_multiple_reductions.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_multiple_reductions_2.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_only_reduction_like_access.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_partially_escaping_intermediate_in_other_stmt.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_privatization_deps.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_privatization_deps_2.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_privatization_deps_3.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_privatization_deps_4.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_privatization_deps_5.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_sequence.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_simple_iv.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_simple_iv_debug_wrapped_dependences.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_simple_privatization_deps_2.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_simple_privatization_deps_w_parameter.ll2
-rw-r--r--polly/test/DependenceInfo/reduction_two_reductions_different_rloops.ll2
-rw-r--r--polly/test/DependenceInfo/sequential_loops.ll79
-rw-r--r--polly/test/ForwardOpTree/atax.ll2
-rw-r--r--polly/test/ForwardOpTree/changed-kind.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_from_region.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_hoisted.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_instruction.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_into_region.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_into_region_redundant_use.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_load.ll1
-rw-r--r--polly/test/ForwardOpTree/forward_load_differentarray.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_load_double_write.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_load_fromloop.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_load_indirect.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_load_memset_after.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_load_memset_before.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_load_tripleuse.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_load_unrelatedunusual.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_phi_load.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_readonly.ll4
-rw-r--r--polly/test/ForwardOpTree/forward_reusue.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_store.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_synthesizable_definloop.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_synthesizable_indvar.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_synthesizable_useinloop.ll2
-rw-r--r--polly/test/ForwardOpTree/forward_transitive.ll2
-rw-r--r--polly/test/ForwardOpTree/jacobi-1d.ll2
-rw-r--r--polly/test/ForwardOpTree/noforward_from_region.ll2
-rw-r--r--polly/test/ForwardOpTree/noforward_load_conditional.ll2
-rw-r--r--polly/test/ForwardOpTree/noforward_load_writebetween.ll2
-rw-r--r--polly/test/ForwardOpTree/noforward_outofquota.ll4
-rw-r--r--polly/test/ForwardOpTree/noforward_partial.ll2
-rw-r--r--polly/test/ForwardOpTree/noforward_phi.ll2
-rw-r--r--polly/test/ForwardOpTree/noforward_selfrefphi.ll2
-rw-r--r--polly/test/ForwardOpTree/noforward_sideffects.ll2
-rw-r--r--polly/test/ForwardOpTree/noforward_synthesizable_unknownit.ll2
-rw-r--r--polly/test/ForwardOpTree/out-of-quota1.ll2
-rw-r--r--polly/test/IstAstInfo/alias_checks_with_empty_context.ll2
-rw-r--r--polly/test/IstAstInfo/alias_simple_1.ll10
-rw-r--r--polly/test/IstAstInfo/alias_simple_2.ll12
-rw-r--r--polly/test/IstAstInfo/alias_simple_3.ll10
-rw-r--r--polly/test/IstAstInfo/aliasing_arrays_with_identical_base.ll2
-rw-r--r--polly/test/IstAstInfo/aliasing_multiple_alias_groups.ll4
-rw-r--r--polly/test/IstAstInfo/aliasing_parametric_simple_1.ll2
-rw-r--r--polly/test/IstAstInfo/aliasing_parametric_simple_2.ll2
-rw-r--r--polly/test/IstAstInfo/dependence_distance_minimal.ll2
-rw-r--r--polly/test/IstAstInfo/domain_bounded_only_with_context.ll2
-rw-r--r--polly/test/IstAstInfo/non_affine_access.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_clauses_onedimensional_access.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_dependences_equal_non_reduction_dependences.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_different_reduction_clauses.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_modulo_and_loop_reversal_schedule.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_modulo_and_loop_reversal_schedule_2.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_2.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_3.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_4.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_5.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_multiple_dimensions.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_multiple_dimensions_2.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_multiple_dimensions_3.ll2
-rw-r--r--polly/test/IstAstInfo/reduction_multiple_dimensions_4.ll2
-rw-r--r--polly/test/IstAstInfo/run-time-condition.ll2
-rw-r--r--polly/test/IstAstInfo/runtime_context_with_error_blocks.ll2
-rw-r--r--polly/test/IstAstInfo/simple-run-time-condition.ll2
-rw-r--r--polly/test/IstAstInfo/single_loop_strip_mine.ll4
-rw-r--r--polly/test/IstAstInfo/single_loop_uint_max_iterations.ll2
-rw-r--r--polly/test/IstAstInfo/single_loop_ull_max_iterations.ll2
-rw-r--r--polly/test/JSONExporter/ImportAccesses/ImportAccesses-Bad-relation.ll2
-rw-r--r--polly/test/JSONExporter/ImportAccesses/ImportAccesses-No-accesses-key.ll2
-rw-r--r--polly/test/JSONExporter/ImportAccesses/ImportAccesses-Not-enough-MemAcc.ll2
-rw-r--r--polly/test/JSONExporter/ImportAccesses/ImportAccesses-Not-enough-statements.ll2
-rw-r--r--polly/test/JSONExporter/ImportAccesses/ImportAccesses-Relation-mispelled.ll2
-rw-r--r--polly/test/JSONExporter/ImportAccesses/ImportAccesses-Statements-mispelled.ll2
-rw-r--r--polly/test/JSONExporter/ImportAccesses/ImportAccesses-Undeclared-ScopArrayInfo.ll2
-rw-r--r--polly/test/JSONExporter/ImportAccesses/ImportAccesses-Wrong-number-dimensions.ll2
-rw-r--r--polly/test/JSONExporter/ImportArrays/ImportArrays-Mispelled-type.ll2
-rw-r--r--polly/test/JSONExporter/ImportArrays/ImportArrays-Negative-size.ll2
-rw-r--r--polly/test/JSONExporter/ImportArrays/ImportArrays-No-name.ll2
-rw-r--r--polly/test/JSONExporter/ImportArrays/ImportArrays-No-sizes-key.ll2
-rw-r--r--polly/test/JSONExporter/ImportArrays/ImportArrays-No-type-key.ll2
-rw-r--r--polly/test/JSONExporter/ImportContext/ImportContext-Context-mispelled.ll2
-rw-r--r--polly/test/JSONExporter/ImportContext/ImportContext-Not-parameter-set.ll2
-rw-r--r--polly/test/JSONExporter/ImportContext/ImportContext-Unvalid-Context.ll2
-rw-r--r--polly/test/JSONExporter/ImportContext/ImportContext-Wrong-dimension.ll2
-rw-r--r--polly/test/JSONExporter/ImportSchedule/ImportSchedule-No-schedule-key.ll2
-rw-r--r--polly/test/JSONExporter/ImportSchedule/ImportSchedule-Schedule-not-valid.ll2
-rw-r--r--polly/test/JSONExporter/ImportSchedule/ImportSchedule-Statements-mispelled.ll2
-rw-r--r--polly/test/JSONExporter/ImportSchedule/ImportSchedule-Wrong-number-statements.ll2
-rw-r--r--polly/test/MaximalStaticExpansion/load_after_store_same_statement.ll2
-rw-r--r--polly/test/MaximalStaticExpansion/read_from_original.ll2
-rw-r--r--polly/test/MaximalStaticExpansion/too_many_writes.ll2
-rw-r--r--polly/test/MaximalStaticExpansion/working_deps_between_inners.ll1
-rw-r--r--polly/test/MaximalStaticExpansion/working_deps_between_inners_phi.ll2
-rw-r--r--polly/test/MaximalStaticExpansion/working_expansion.ll1
-rw-r--r--polly/test/MaximalStaticExpansion/working_expansion_multiple_dependences_per_statement.ll1
-rw-r--r--polly/test/MaximalStaticExpansion/working_expansion_multiple_instruction_per_statement.ll1
-rw-r--r--polly/test/MaximalStaticExpansion/working_phi_expansion.ll2
-rw-r--r--polly/test/MaximalStaticExpansion/working_phi_two_scalars.ll2
-rw-r--r--polly/test/MaximalStaticExpansion/working_value_expansion.ll1
-rw-r--r--polly/test/PruneUnprofitable/prune_only_scalardeps.ll1
-rw-r--r--polly/test/ScheduleOptimizer/2012-03-16-Empty-Domain.ll2
-rw-r--r--polly/test/ScheduleOptimizer/2013-04-11-Empty-Domain-two.ll2
-rw-r--r--polly/test/ScheduleOptimizer/GreedyFuse/fuse-double.ll4
-rw-r--r--polly/test/ScheduleOptimizer/GreedyFuse/fuse-except-first.ll4
-rw-r--r--polly/test/ScheduleOptimizer/GreedyFuse/fuse-except-third.ll4
-rw-r--r--polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner-carried.ll4
-rw-r--r--polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner-third.ll4
-rw-r--r--polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner.ll4
-rw-r--r--polly/test/ScheduleOptimizer/GreedyFuse/fuse-simple.ll4
-rw-r--r--polly/test/ScheduleOptimizer/GreedyFuse/nofuse-simple.ll4
-rw-r--r--polly/test/ScheduleOptimizer/GreedyFuse/nofuse-with-middle.ll4
-rw-r--r--polly/test/ScheduleOptimizer/ManualOptimization/disable_nonforced.ll2
-rw-r--r--polly/test/ScheduleOptimizer/ManualOptimization/distribute_heuristic.ll4
-rw-r--r--polly/test/ScheduleOptimizer/ManualOptimization/distribute_illegal_looploc.ll2
-rw-r--r--polly/test/ScheduleOptimizer/ManualOptimization/distribute_illegal_pragmaloc.ll2
-rw-r--r--polly/test/ScheduleOptimizer/ManualOptimization/unroll_disable.ll2
-rw-r--r--polly/test/ScheduleOptimizer/ManualOptimization/unroll_double.ll2
-rw-r--r--polly/test/ScheduleOptimizer/ManualOptimization/unroll_full.ll2
-rw-r--r--polly/test/ScheduleOptimizer/ManualOptimization/unroll_heuristic.ll4
-rw-r--r--polly/test/ScheduleOptimizer/ManualOptimization/unroll_partial.ll4
-rw-r--r--polly/test/ScheduleOptimizer/ManualOptimization/unroll_partial_followup.ll8
-rw-r--r--polly/test/ScheduleOptimizer/SIMDInParallelFor.ll2
-rw-r--r--polly/test/ScheduleOptimizer/computeout.ll2
-rw-r--r--polly/test/ScheduleOptimizer/ensure-correct-tile-sizes.ll6
-rw-r--r--polly/test/ScheduleOptimizer/focaltech_test_detail_threshold-7bc17e.ll1
-rw-r--r--polly/test/ScheduleOptimizer/full_partial_tile_separation.ll2
-rw-r--r--polly/test/ScheduleOptimizer/line-tiling-2.ll2
-rw-r--r--polly/test/ScheduleOptimizer/line-tiling.ll2
-rw-r--r--polly/test/ScheduleOptimizer/mat_mul_pattern_data_layout.ll2
-rw-r--r--polly/test/ScheduleOptimizer/mat_mul_pattern_data_layout_2.ll6
-rw-r--r--polly/test/ScheduleOptimizer/one-dimensional-band.ll2
-rw-r--r--polly/test/ScheduleOptimizer/outer_coincidence.ll4
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts-after-delicm.ll6
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts-after-delicm_2.ll2
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts.ll8
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_11.ll4
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_12.ll4
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_13.ll4
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_14.ll4
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_15.ll2
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_16.ll2
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_17.ll2
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_18.ll2
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_19.ll2
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_2.ll2
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_20.ll2
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_21.ll2
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_22.ll2
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_24.ll2
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_25.ll4
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_3.ll12
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_4.ll8
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_5.ll8
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_6.ll8
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_7.ll4
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_8.ll4
-rw-r--r--polly/test/ScheduleOptimizer/pattern-matching-based-opts_9.ll6
-rw-r--r--polly/test/ScheduleOptimizer/pattern_matching_based_opts_splitmap.ll2
-rw-r--r--polly/test/ScheduleOptimizer/prevectorization-without-tiling.ll2
-rw-r--r--polly/test/ScheduleOptimizer/prevectorization.ll4
-rw-r--r--polly/test/ScheduleOptimizer/rectangular-tiling.ll8
-rw-r--r--polly/test/ScheduleOptimizer/schedule_computeout.ll2
-rw-r--r--polly/test/ScheduleOptimizer/statistics.ll2
-rw-r--r--polly/test/ScheduleOptimizer/tile_after_fusion.ll4
-rw-r--r--polly/test/ScheduleOptimizer/vivid-vbi-gen-vivid_vbi_gen_sliced-before-llvmreduced.ll2
-rw-r--r--polly/test/ScopDetect/aliasing_parametric_simple_1.ll2
-rw-r--r--polly/test/ScopDetect/aliasing_parametric_simple_2.ll2
-rw-r--r--polly/test/ScopDetect/aliasing_simple_1.ll2
-rw-r--r--polly/test/ScopDetect/aliasing_simple_2.ll2
-rw-r--r--polly/test/ScopDetect/base_pointer_load_setNewAccessRelation.ll2
-rw-r--r--polly/test/ScopDetect/base_pointer_setNewAccessRelation.ll2
-rw-r--r--polly/test/ScopDetect/callbr.ll4
-rw-r--r--polly/test/ScopDetect/collective_invariant_loads.ll2
-rw-r--r--polly/test/ScopDetect/cross_loop_non_single_exit.ll2
-rw-r--r--polly/test/ScopDetect/cross_loop_non_single_exit_2.ll2
-rw-r--r--polly/test/ScopDetect/dependency_to_phi_node_outside_of_region.ll2
-rw-r--r--polly/test/ScopDetect/dot-scops-npm.ll2
-rw-r--r--polly/test/ScopDetect/dot-scops.ll2
-rw-r--r--polly/test/ScopDetect/error-block-always-executed.ll2
-rw-r--r--polly/test/ScopDetect/error-block-referenced-from-scop.ll2
-rw-r--r--polly/test/ScopDetect/error-block-unreachable.ll2
-rw-r--r--polly/test/ScopDetect/expand-region-correctly-2.ll2
-rw-r--r--polly/test/ScopDetect/expand-region-correctly.ll2
-rw-r--r--polly/test/ScopDetect/ignore_func_flag_regex.ll2
-rw-r--r--polly/test/ScopDetect/index_from_unpredictable_loop.ll4
-rw-r--r--polly/test/ScopDetect/index_from_unpredictable_loop2.ll4
-rw-r--r--polly/test/ScopDetect/indvars.ll2
-rw-r--r--polly/test/ScopDetect/intrinsics_1.ll2
-rw-r--r--polly/test/ScopDetect/intrinsics_2.ll2
-rw-r--r--polly/test/ScopDetect/intrinsics_3.ll2
-rw-r--r--polly/test/ScopDetect/invalid-latch-conditions.ll6
-rw-r--r--polly/test/ScopDetect/invalidate_scalar_evolution.ll2
-rw-r--r--polly/test/ScopDetect/invariant-load-before-scop.ll2
-rw-r--r--polly/test/ScopDetect/keep_going_expansion.ll2
-rw-r--r--polly/test/ScopDetect/mod_ref_read_pointer.ll4
-rw-r--r--polly/test/ScopDetect/more-than-one-loop.ll4
-rw-r--r--polly/test/ScopDetect/multidim-with-undef-size.ll2
-rw-r--r--polly/test/ScopDetect/multidim.ll2
-rw-r--r--polly/test/ScopDetect/multidim_indirect_access.ll2
-rw-r--r--polly/test/ScopDetect/multidim_two_accesses_different_delinearization.ll2
-rw-r--r--polly/test/ScopDetect/nested_loop_single_exit.ll4
-rw-r--r--polly/test/ScopDetect/non-affine-conditional.ll2
-rw-r--r--polly/test/ScopDetect/non-affine-float-compare.ll2
-rw-r--r--polly/test/ScopDetect/non-affine-loop-condition-dependent-access.ll8
-rw-r--r--polly/test/ScopDetect/non-affine-loop-condition-dependent-access_2.ll6
-rw-r--r--polly/test/ScopDetect/non-affine-loop-condition-dependent-access_3.ll6
-rw-r--r--polly/test/ScopDetect/non-affine-loop.ll10
-rw-r--r--polly/test/ScopDetect/non-beneficial-loops-small-trip-count.ll2
-rw-r--r--polly/test/ScopDetect/non-constant-add-rec-start-expr.ll2
-rw-r--r--polly/test/ScopDetect/non-simple-memory-accesses.ll2
-rw-r--r--polly/test/ScopDetect/non_affine_loop_condition.ll4
-rw-r--r--polly/test/ScopDetect/only-one-affine-loop.ll2
-rw-r--r--polly/test/ScopDetect/only_func_flag.ll2
-rw-r--r--polly/test/ScopDetect/only_func_flag_regex.ll2
-rw-r--r--polly/test/ScopDetect/parametric-multiply-in-scev-2.ll2
-rw-r--r--polly/test/ScopDetect/parametric-multiply-in-scev.ll2
-rw-r--r--polly/test/ScopDetect/phi_with_multi_exiting_edges.ll2
-rw-r--r--polly/test/ScopDetect/profitability-large-basic-blocks.ll12
-rw-r--r--polly/test/ScopDetect/profitability-two-nested-loops.ll2
-rw-r--r--polly/test/ScopDetect/remove_all_children.ll2
-rw-r--r--polly/test/ScopDetect/report-scop-location.ll2
-rw-r--r--polly/test/ScopDetect/restrict-undef-size-scopdetect.ll2
-rw-r--r--polly/test/ScopDetect/run_time_alias_check.ll2
-rw-r--r--polly/test/ScopDetect/scev_remove_max.ll2
-rw-r--r--polly/test/ScopDetect/sequential_loops.ll6
-rw-r--r--polly/test/ScopDetect/simple_loop.ll2
-rw-r--r--polly/test/ScopDetect/simple_loop_non_single_entry.ll2
-rw-r--r--polly/test/ScopDetect/simple_loop_non_single_exit.ll2
-rw-r--r--polly/test/ScopDetect/simple_loop_non_single_exit_2.ll2
-rw-r--r--polly/test/ScopDetect/simple_loop_two_phi_nodes.ll2
-rw-r--r--polly/test/ScopDetect/simple_loop_with_param.ll2
-rw-r--r--polly/test/ScopDetect/simple_loop_with_param_2.ll2
-rw-r--r--polly/test/ScopDetect/simple_non_single_entry.ll2
-rw-r--r--polly/test/ScopDetect/skip_function_attribute.ll2
-rw-r--r--polly/test/ScopDetect/srem_with_parametric_divisor.ll2
-rw-r--r--polly/test/ScopDetect/statistics.ll2
-rw-r--r--polly/test/ScopDetect/switch-in-loop-patch.ll2
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportAlias-01.ll2
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportEntry.ll2
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportFuncCall-01.ll2
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportIrreducibleRegion.ll2
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportIrreducibleRegionWithoutDebugLoc.ll2
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportLoopBound-01.ll12
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportLoopHasNoExit.ll4
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportMultipleNonAffineAccesses.ll12
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportNonAffineAccess-01.ll2
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportUnprofitable.ll8
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportUnreachableInExit.ll2
-rw-r--r--polly/test/ScopDetectionDiagnostics/ReportVariantBasePtr-01.ll2
-rw-r--r--polly/test/ScopDetectionDiagnostics/loop_has_multiple_exits.ll2
-rw-r--r--polly/test/ScopDetectionDiagnostics/loop_partially_in_scop-2.ll2
-rw-r--r--polly/test/ScopDetectionDiagnostics/loop_partially_in_scop.ll2
-rw-r--r--polly/test/ScopInfo/20110312-Fail-without-basicaa.ll2
-rw-r--r--polly/test/ScopInfo/20111108-Parameter-not-detected.ll2
-rw-r--r--polly/test/ScopInfo/2012-03-16-Crash-because-of-unsigned-in-scev.ll2
-rw-r--r--polly/test/ScopInfo/2015-10-04-Crash-in-domain-generation.ll2
-rw-r--r--polly/test/ScopInfo/Alias-0.ll4
-rw-r--r--polly/test/ScopInfo/Alias-1.ll4
-rw-r--r--polly/test/ScopInfo/Alias-2.ll4
-rw-r--r--polly/test/ScopInfo/Alias-3.ll4
-rw-r--r--polly/test/ScopInfo/Alias-4.ll4
-rw-r--r--polly/test/ScopInfo/BoundChecks/single-loop.ll4
-rw-r--r--polly/test/ScopInfo/BoundChecks/two-loops.ll4
-rw-r--r--polly/test/ScopInfo/NonAffine/div_backedge.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/div_domain.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/invariant_loads_dependent_in_non_affine_region.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/modulo_backedge.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/modulo_domain.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_1.ll4
-rw-r--r--polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_2.ll6
-rw-r--r--polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_3.ll6
-rw-r--r--polly/test/ScopInfo/NonAffine/non_affine_access_with_range_2.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/non_affine_but_sdiv.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/non_affine_but_srem.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/non_affine_conditional_nested.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/non_affine_conditional_surrounding_affine_loop.ll8
-rw-r--r--polly/test/ScopInfo/NonAffine/non_affine_conditional_surrounding_non_affine_loop.ll12
-rw-r--r--polly/test/ScopInfo/NonAffine/non_affine_float_compare.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/non_affine_loop_condition.ll6
-rw-r--r--polly/test/ScopInfo/NonAffine/non_affine_loop_used_later.ll4
-rw-r--r--polly/test/ScopInfo/NonAffine/non_affine_parametric_loop.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/non_affine_region_guaranteed_non-entry.ll2
-rw-r--r--polly/test/ScopInfo/NonAffine/whole-scop-non-affine-subregion-in-loop.ll2
-rw-r--r--polly/test/ScopInfo/aliasing_conditional_alias_groups_1.ll2
-rw-r--r--polly/test/ScopInfo/aliasing_conditional_alias_groups_2.ll2
-rw-r--r--polly/test/ScopInfo/aliasing_dead_access.ll2
-rw-r--r--polly/test/ScopInfo/aliasing_many_arrays_to_compare.ll8
-rw-r--r--polly/test/ScopInfo/aliasing_many_read_only_acesses.ll2
-rw-r--r--polly/test/ScopInfo/aliasing_multiple_alias_groups.ll4
-rw-r--r--polly/test/ScopInfo/aliasing_with_non_affine_access.ll2
-rw-r--r--polly/test/ScopInfo/allow-all-parameters-dereferencable.ll12
-rw-r--r--polly/test/ScopInfo/assume_gep_bounds.ll4
-rw-r--r--polly/test/ScopInfo/assume_gep_bounds_2.ll2
-rw-r--r--polly/test/ScopInfo/assume_gep_bounds_many.ll4
-rw-r--r--polly/test/ScopInfo/avoid_new_parameters_from_geps.ll2
-rw-r--r--polly/test/ScopInfo/bool-addrec.ll2
-rw-r--r--polly/test/ScopInfo/bounded_loop_assumptions.ll2
-rw-r--r--polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations-2.ll4
-rw-r--r--polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations-3.ll6
-rw-r--r--polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations.ll6
-rw-r--r--polly/test/ScopInfo/bug_2010_10_22.ll2
-rw-r--r--polly/test/ScopInfo/bug_2011_1_5.ll2
-rw-r--r--polly/test/ScopInfo/bug_scev_not_fully_eval.ll2
-rw-r--r--polly/test/ScopInfo/cfg_consequences.ll2
-rw-r--r--polly/test/ScopInfo/complex-branch-structure.ll2
-rw-r--r--polly/test/ScopInfo/complex-condition.ll2
-rw-r--r--polly/test/ScopInfo/complex-expression.ll2
-rw-r--r--polly/test/ScopInfo/complex-loop-nesting.ll2
-rw-r--r--polly/test/ScopInfo/complex-successor-structure-2.ll2
-rw-r--r--polly/test/ScopInfo/complex-successor-structure-3.ll4
-rw-r--r--polly/test/ScopInfo/complex-successor-structure.ll2
-rw-r--r--polly/test/ScopInfo/complex_domain_binary_condition.ll2
-rw-r--r--polly/test/ScopInfo/complex_execution_context.ll2
-rw-r--r--polly/test/ScopInfo/cond_constant_in_loop.ll2
-rw-r--r--polly/test/ScopInfo/cond_in_loop.ll2
-rw-r--r--polly/test/ScopInfo/condition-after-error-block-2.ll2
-rw-r--r--polly/test/ScopInfo/condition-after-error-block-before-scop.ll2
-rw-r--r--polly/test/ScopInfo/condtion-after-error-block.ll2
-rw-r--r--polly/test/ScopInfo/const_srem_sdiv.ll4
-rw-r--r--polly/test/ScopInfo/constant-non-integer-branch-condition.ll2
-rw-r--r--polly/test/ScopInfo/constant_factor_in_parameter.ll4
-rw-r--r--polly/test/ScopInfo/constant_functions_outside_scop_as_unknown.ll2
-rw-r--r--polly/test/ScopInfo/constant_start_integer.ll2
-rw-r--r--polly/test/ScopInfo/debug_call.ll2
-rw-r--r--polly/test/ScopInfo/delinearize-together-all-data-refs.ll2
-rw-r--r--polly/test/ScopInfo/div_by_zero.ll2
-rw-r--r--polly/test/ScopInfo/do-not-model-error-block-accesses.ll2
-rw-r--r--polly/test/ScopInfo/eager-binary-and-or-conditions.ll4
-rw-r--r--polly/test/ScopInfo/early_exit_for_complex_domains.ll2
-rw-r--r--polly/test/ScopInfo/error-blocks-1.ll2
-rw-r--r--polly/test/ScopInfo/error-blocks-2.ll4
-rw-r--r--polly/test/ScopInfo/escaping_empty_scop.ll2
-rw-r--r--polly/test/ScopInfo/exit-phi-1.ll4
-rw-r--r--polly/test/ScopInfo/exit-phi-2.ll2
-rw-r--r--polly/test/ScopInfo/exit_phi_accesses-2.ll2
-rw-r--r--polly/test/ScopInfo/exit_phi_accesses.ll2
-rw-r--r--polly/test/ScopInfo/expensive-boundary-context.ll4
-rw-r--r--polly/test/ScopInfo/extract_constant_factor_introduces_new_parameter.ll4
-rw-r--r--polly/test/ScopInfo/full-function.ll4
-rw-r--r--polly/test/ScopInfo/granularity_same_name.ll8
-rw-r--r--polly/test/ScopInfo/granularity_scalar-indep.ll2
-rw-r--r--polly/test/ScopInfo/granularity_scalar-indep_cross-referencing-phi1.ll2
-rw-r--r--polly/test/ScopInfo/granularity_scalar-indep_cross-referencing-phi2.ll2
-rw-r--r--polly/test/ScopInfo/granularity_scalar-indep_epilogue.ll2
-rw-r--r--polly/test/ScopInfo/granularity_scalar-indep_epilogue_last.ll2
-rw-r--r--polly/test/ScopInfo/granularity_scalar-indep_noepilogue.ll2
-rw-r--r--polly/test/ScopInfo/granularity_scalar-indep_ordered-2.ll2
-rw-r--r--polly/test/ScopInfo/granularity_scalar-indep_ordered.ll2
-rw-r--r--polly/test/ScopInfo/i1_params.ll2
-rw-r--r--polly/test/ScopInfo/infeasible-rtc.ll4
-rw-r--r--polly/test/ScopInfo/infeasible_invalid_context.ll4
-rw-r--r--polly/test/ScopInfo/int2ptr_ptr2int.ll4
-rw-r--r--polly/test/ScopInfo/int2ptr_ptr2int_2.ll8
-rw-r--r--polly/test/ScopInfo/integers.ll2
-rw-r--r--polly/test/ScopInfo/inter-error-bb-dependence.ll2
-rw-r--r--polly/test/ScopInfo/inter_bb_scalar_dep.ll4
-rw-r--r--polly/test/ScopInfo/intra-non-affine-stmt-phi-node.ll4
-rw-r--r--polly/test/ScopInfo/intra_and_inter_bb_scalar_dep.ll4
-rw-r--r--polly/test/ScopInfo/intra_bb_scalar_dep.ll4
-rw-r--r--polly/test/ScopInfo/intrinsics.ll2
-rw-r--r--polly/test/ScopInfo/invalid_add_rec_after_invariant_load_remapping.ll2
-rw-r--r--polly/test/ScopInfo/invalidate_iterator_during_MA_removal.ll2
-rw-r--r--polly/test/ScopInfo/invariant-load-instlist.ll2
-rw-r--r--polly/test/ScopInfo/invariant-loads-leave-read-only-statements.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_access_classes_different_base_type.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_access_classes_different_base_type_escaping.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_access_classes_different_base_type_same_pointer.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_access_classes_different_base_type_same_pointer_escaping.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_addrec_sum.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_base_pointer.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_base_pointer_conditional.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_base_pointer_in_conditional.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_branch_condition.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_2.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_3.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4b.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4c.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_5.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_complex_condition.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_condition.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_dereferenceable.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_distinct_parameter_valuations.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_in_non_affine.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_loop_ub.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_ptr_ptr_noalias.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_scalar_dep.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_stmt_domain.ll2
-rw-r--r--polly/test/ScopInfo/invariant_load_zext_parameter-2.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_zext_parameter.ll4
-rw-r--r--polly/test/ScopInfo/invariant_load_zextended_in_own_execution_context.ll4
-rw-r--r--polly/test/ScopInfo/invariant_loads_complicated_dependences.ll2
-rw-r--r--polly/test/ScopInfo/invariant_loads_cyclic_dependences.ll2
-rw-r--r--polly/test/ScopInfo/invariant_loop_bounds.ll2
-rw-r--r--polly/test/ScopInfo/invariant_same_loop_bound_multiple_times-1.ll2
-rw-r--r--polly/test/ScopInfo/invariant_same_loop_bound_multiple_times-2.ll2
-rw-r--r--polly/test/ScopInfo/isl_aff_out_of_bounds.ll2
-rw-r--r--polly/test/ScopInfo/isl_trip_count_01.ll2
-rw-r--r--polly/test/ScopInfo/isl_trip_count_02.ll2
-rw-r--r--polly/test/ScopInfo/isl_trip_count_03.ll2
-rw-r--r--polly/test/ScopInfo/isl_trip_count_multiple_exiting_blocks.ll2
-rw-r--r--polly/test/ScopInfo/licm_reduction_nested.ll4
-rw-r--r--polly/test/ScopInfo/long-compile-time-alias-analysis.ll2
-rw-r--r--polly/test/ScopInfo/long-sequence-of-error-blocks-2.ll2
-rw-r--r--polly/test/ScopInfo/long-sequence-of-error-blocks.ll4
-rw-r--r--polly/test/ScopInfo/loop-multiexit-succ-cond.ll4
-rw-r--r--polly/test/ScopInfo/loop_affine_bound_0.ll4
-rw-r--r--polly/test/ScopInfo/loop_affine_bound_1.ll4
-rw-r--r--polly/test/ScopInfo/loop_affine_bound_2.ll4
-rw-r--r--polly/test/ScopInfo/loop_carry.ll2
-rw-r--r--polly/test/ScopInfo/many-scalar-dependences.ll2
-rw-r--r--polly/test/ScopInfo/max-loop-depth.ll2
-rw-r--r--polly/test/ScopInfo/memcpy-raw-source.ll2
-rw-r--r--polly/test/ScopInfo/memcpy.ll4
-rw-r--r--polly/test/ScopInfo/memmove.ll4
-rw-r--r--polly/test/ScopInfo/memset.ll4
-rw-r--r--polly/test/ScopInfo/memset_null.ll4
-rw-r--r--polly/test/ScopInfo/mismatching-array-dimensions.ll2
-rw-r--r--polly/test/ScopInfo/mod_ref_access_pointee_arguments.ll6
-rw-r--r--polly/test/ScopInfo/mod_ref_read_pointee_arguments.ll6
-rw-r--r--polly/test/ScopInfo/mod_ref_read_pointer.ll4
-rw-r--r--polly/test/ScopInfo/mod_ref_read_pointers.ll6
-rw-r--r--polly/test/ScopInfo/modulo_zext_1.ll2
-rw-r--r--polly/test/ScopInfo/modulo_zext_2.ll2
-rw-r--r--polly/test/ScopInfo/modulo_zext_3.ll2
-rw-r--r--polly/test/ScopInfo/multi-scop.ll2
-rw-r--r--polly/test/ScopInfo/multidim_2d-diagonal-matrix.ll4
-rw-r--r--polly/test/ScopInfo/multidim_2d_outer_parametric_offset.ll2
-rw-r--r--polly/test/ScopInfo/multidim_2d_parametric_array_static_loop_bounds.ll2
-rw-r--r--polly/test/ScopInfo/multidim_2d_with_modref_call.ll8
-rw-r--r--polly/test/ScopInfo/multidim_2d_with_modref_call_2.ll8
-rw-r--r--polly/test/ScopInfo/multidim_3d_parametric_array_static_loop_bounds.ll2
-rw-r--r--polly/test/ScopInfo/multidim_fixedsize_different_dimensionality.ll2
-rw-r--r--polly/test/ScopInfo/multidim_fixedsize_multi_offset.ll2
-rw-r--r--polly/test/ScopInfo/multidim_fold_constant_dim.ll2
-rw-r--r--polly/test/ScopInfo/multidim_fold_constant_dim_zero.ll2
-rw-r--r--polly/test/ScopInfo/multidim_fortran_2d.ll4
-rw-r--r--polly/test/ScopInfo/multidim_fortran_2d_params.ll4
-rw-r--r--polly/test/ScopInfo/multidim_fortran_2d_with_modref_call.ll8
-rw-r--r--polly/test/ScopInfo/multidim_fortran_srem.ll2
-rw-r--r--polly/test/ScopInfo/multidim_gep_pointercast.ll2
-rw-r--r--polly/test/ScopInfo/multidim_gep_pointercast2.ll2
-rw-r--r--polly/test/ScopInfo/multidim_ivs_and_integer_offsets_3d.ll2
-rw-r--r--polly/test/ScopInfo/multidim_ivs_and_parameteric_offsets_3d.ll2
-rw-r--r--polly/test/ScopInfo/multidim_many_references.ll4
-rw-r--r--polly/test/ScopInfo/multidim_nested_start_integer.ll4
-rw-r--r--polly/test/ScopInfo/multidim_nested_start_share_parameter.ll2
-rw-r--r--polly/test/ScopInfo/multidim_only_ivs_2d.ll2
-rw-r--r--polly/test/ScopInfo/multidim_only_ivs_3d.ll2
-rw-r--r--polly/test/ScopInfo/multidim_only_ivs_3d_cast.ll2
-rw-r--r--polly/test/ScopInfo/multidim_only_ivs_3d_reverse.ll2
-rw-r--r--polly/test/ScopInfo/multidim_param_in_subscript-2.ll2
-rw-r--r--polly/test/ScopInfo/multidim_param_in_subscript.ll2
-rw-r--r--polly/test/ScopInfo/multidim_parameter_addrec_product.ll2
-rw-r--r--polly/test/ScopInfo/multidim_single_and_multidim_array.ll16
-rw-r--r--polly/test/ScopInfo/multidim_srem.ll2
-rw-r--r--polly/test/ScopInfo/multidim_with_bitcast.ll2
-rw-r--r--polly/test/ScopInfo/multiple-binary-or-conditions.ll4
-rw-r--r--polly/test/ScopInfo/multiple-types-access-offset-not-dividable-by-element-size.ll2
-rw-r--r--polly/test/ScopInfo/multiple-types-non-affine-2.ll4
-rw-r--r--polly/test/ScopInfo/multiple-types-non-affine.ll4
-rw-r--r--polly/test/ScopInfo/multiple-types-non-power-of-two-2.ll2
-rw-r--r--polly/test/ScopInfo/multiple-types-non-power-of-two.ll2
-rw-r--r--polly/test/ScopInfo/multiple-types-two-dimensional-2.ll2
-rw-r--r--polly/test/ScopInfo/multiple-types-two-dimensional.ll2
-rw-r--r--polly/test/ScopInfo/multiple-types.ll4
-rw-r--r--polly/test/ScopInfo/multiple_exiting_blocks.ll2
-rw-r--r--polly/test/ScopInfo/multiple_exiting_blocks_two_loop.ll2
-rw-r--r--polly/test/ScopInfo/multiple_latch_blocks.ll2
-rw-r--r--polly/test/ScopInfo/nested-loops.ll2
-rw-r--r--polly/test/ScopInfo/no-scalar-deps-in-non-affine-subregion.ll2
-rw-r--r--polly/test/ScopInfo/non-affine-region-phi.ll4
-rw-r--r--polly/test/ScopInfo/non-affine-region-with-loop-2.ll2
-rw-r--r--polly/test/ScopInfo/non-affine-region-with-loop.ll4
-rw-r--r--polly/test/ScopInfo/non-precise-inv-load-1.ll2
-rw-r--r--polly/test/ScopInfo/non-precise-inv-load-2.ll2
-rw-r--r--polly/test/ScopInfo/non-precise-inv-load-3.ll2
-rw-r--r--polly/test/ScopInfo/non-precise-inv-load-4.ll2
-rw-r--r--polly/test/ScopInfo/non-precise-inv-load-5.ll2
-rw-r--r--polly/test/ScopInfo/non-precise-inv-load-6.ll2
-rw-r--r--polly/test/ScopInfo/non-pure-function-call.ll2
-rw-r--r--polly/test/ScopInfo/non-pure-function-calls-causes-dead-blocks.ll2
-rw-r--r--polly/test/ScopInfo/non-pure-function-calls.ll2
-rw-r--r--polly/test/ScopInfo/non_affine_access.ll4
-rw-r--r--polly/test/ScopInfo/non_affine_region_1.ll2
-rw-r--r--polly/test/ScopInfo/non_affine_region_2.ll2
-rw-r--r--polly/test/ScopInfo/non_affine_region_3.ll4
-rw-r--r--polly/test/ScopInfo/non_affine_region_4.ll2
-rw-r--r--polly/test/ScopInfo/nonaffine-buildMemoryAccess.ll2
-rw-r--r--polly/test/ScopInfo/not-a-reduction.ll2
-rw-r--r--polly/test/ScopInfo/opaque-struct.ll2
-rw-r--r--polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node-nonaffine-subregion.ll2
-rw-r--r--polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node.ll2
-rw-r--r--polly/test/ScopInfo/parameter-constant-division.ll4
-rw-r--r--polly/test/ScopInfo/parameter_in_dead_statement.ll8
-rw-r--r--polly/test/ScopInfo/parameter_product.ll2
-rw-r--r--polly/test/ScopInfo/parameter_with_constant_factor_in_add.ll2
-rw-r--r--polly/test/ScopInfo/partially_invariant_load_1.ll4
-rw-r--r--polly/test/ScopInfo/partially_invariant_load_2.ll2
-rw-r--r--polly/test/ScopInfo/phi-in-non-affine-region.ll2
-rw-r--r--polly/test/ScopInfo/phi_after_error_block.ll2
-rw-r--r--polly/test/ScopInfo/phi_condition_modeling_1.ll2
-rw-r--r--polly/test/ScopInfo/phi_condition_modeling_2.ll2
-rw-r--r--polly/test/ScopInfo/phi_conditional_simple_1.ll2
-rw-r--r--polly/test/ScopInfo/phi_loop_carried_float.ll2
-rw-r--r--polly/test/ScopInfo/phi_not_grouped_at_top.ll2
-rw-r--r--polly/test/ScopInfo/phi_scalar_simple_1.ll2
-rw-r--r--polly/test/ScopInfo/phi_scalar_simple_2.ll2
-rw-r--r--polly/test/ScopInfo/phi_with_invoke_edge.ll2
-rw-r--r--polly/test/ScopInfo/pointer-comparison-no-nsw.ll2
-rw-r--r--polly/test/ScopInfo/pointer-comparison.ll2
-rw-r--r--polly/test/ScopInfo/pointer-type-expressions.ll2
-rw-r--r--polly/test/ScopInfo/pointer-used-as-base-pointer-and-scalar-read.ll2
-rw-r--r--polly/test/ScopInfo/polly-timeout-parameter-bounds.ll2
-rw-r--r--polly/test/ScopInfo/preserve-equiv-class-order-in-basic_block.ll2
-rw-r--r--polly/test/ScopInfo/process_added_dimensions.ll2
-rw-r--r--polly/test/ScopInfo/pwaff-complexity-bailout.ll2
-rw-r--r--polly/test/ScopInfo/ranged_parameter.ll2
-rw-r--r--polly/test/ScopInfo/ranged_parameter_2.ll2
-rw-r--r--polly/test/ScopInfo/ranged_parameter_wrap.ll2
-rw-r--r--polly/test/ScopInfo/ranged_parameter_wrap_2.ll2
-rw-r--r--polly/test/ScopInfo/read-only-scalar-used-in-phi-2.ll2
-rw-r--r--polly/test/ScopInfo/read-only-scalar-used-in-phi.ll2
-rw-r--r--polly/test/ScopInfo/read-only-scalars.ll4
-rw-r--r--polly/test/ScopInfo/read-only-statements.ll2
-rw-r--r--polly/test/ScopInfo/reduction_alternating_base.ll2
-rw-r--r--polly/test/ScopInfo/reduction_chain_partially_outside_the_scop.ll2
-rw-r--r--polly/test/ScopInfo/reduction_different_index.ll2
-rw-r--r--polly/test/ScopInfo/reduction_different_index1.ll2
-rw-r--r--polly/test/ScopInfo/reduction_disabled_multiplicative.ll2
-rw-r--r--polly/test/ScopInfo/reduction_escaping_intermediate.ll2
-rw-r--r--polly/test/ScopInfo/reduction_escaping_intermediate_2.ll2
-rw-r--r--polly/test/ScopInfo/reduction_invalid_different_operators.ll2
-rw-r--r--polly/test/ScopInfo/reduction_invalid_overlapping_accesses.ll2
-rw-r--r--polly/test/ScopInfo/reduction_multiple_loops_array_sum.ll2
-rw-r--r--polly/test/ScopInfo/reduction_multiple_loops_array_sum_1.ll2
-rw-r--r--polly/test/ScopInfo/reduction_multiple_simple_binary.ll2
-rw-r--r--polly/test/ScopInfo/reduction_non_overlapping_chains.ll2
-rw-r--r--polly/test/ScopInfo/reduction_only_reduction_like_access.ll2
-rw-r--r--polly/test/ScopInfo/reduction_simple_fp.ll2
-rw-r--r--polly/test/ScopInfo/reduction_simple_w_constant.ll2
-rw-r--r--polly/test/ScopInfo/reduction_simple_w_iv.ll2
-rw-r--r--polly/test/ScopInfo/reduction_two_identical_reads.ll4
-rw-r--r--polly/test/ScopInfo/redundant_parameter_constraint.ll2
-rw-r--r--polly/test/ScopInfo/region-with-instructions.ll2
-rw-r--r--polly/test/ScopInfo/remarks.ll2
-rw-r--r--polly/test/ScopInfo/required-invariant-loop-bounds.ll4
-rw-r--r--polly/test/ScopInfo/restriction_in_dead_block.ll2
-rw-r--r--polly/test/ScopInfo/run-time-check-many-array-disjuncts.ll4
-rw-r--r--polly/test/ScopInfo/run-time-check-many-parameters.ll2
-rw-r--r--polly/test/ScopInfo/run-time-check-many-piecewise-aliasing.ll4
-rw-r--r--polly/test/ScopInfo/run-time-check-read-only-arrays.ll2
-rw-r--r--polly/test/ScopInfo/same-base-address-scalar-and-array.ll2
-rw-r--r--polly/test/ScopInfo/scalar.ll2
-rw-r--r--polly/test/ScopInfo/scalar_dependence_cond_br.ll2
-rw-r--r--polly/test/ScopInfo/scalar_to_array.ll4
-rw-r--r--polly/test/ScopInfo/scev-div-with-evaluatable-divisor.ll2
-rw-r--r--polly/test/ScopInfo/scev-invalidated.ll2
-rw-r--r--polly/test/ScopInfo/schedule-const-post-dominator-walk-2.ll2
-rw-r--r--polly/test/ScopInfo/schedule-const-post-dominator-walk.ll2
-rw-r--r--polly/test/ScopInfo/schedule-constuction-endless-loop1.ll2
-rw-r--r--polly/test/ScopInfo/schedule-constuction-endless-loop2.ll2
-rw-r--r--polly/test/ScopInfo/schedule-incorrectly-contructed-in-case-of-infinite-loop.ll2
-rw-r--r--polly/test/ScopInfo/scop-affine-parameter-ordering.ll2
-rw-r--r--polly/test/ScopInfo/sign_wrapped_set.ll2
-rw-r--r--polly/test/ScopInfo/simple_loop_1.ll2
-rw-r--r--polly/test/ScopInfo/simple_loop_2.ll2
-rw-r--r--polly/test/ScopInfo/simple_loop_unsigned.ll2
-rw-r--r--polly/test/ScopInfo/simple_loop_unsigned_2.ll2
-rw-r--r--polly/test/ScopInfo/simple_loop_unsigned_3.ll2
-rw-r--r--polly/test/ScopInfo/simple_nonaffine_loop_not.ll2
-rw-r--r--polly/test/ScopInfo/smax.ll2
-rw-r--r--polly/test/ScopInfo/statistics.ll2
-rw-r--r--polly/test/ScopInfo/stmt_split_exit_of_region_stmt.ll2
-rw-r--r--polly/test/ScopInfo/stmt_split_no_after_split.ll2
-rw-r--r--polly/test/ScopInfo/stmt_split_no_dependence.ll2
-rw-r--r--polly/test/ScopInfo/stmt_split_on_store.ll2
-rw-r--r--polly/test/ScopInfo/stmt_split_on_synthesizable.ll2
-rw-r--r--polly/test/ScopInfo/stmt_split_phi_in_beginning_bb.ll2
-rw-r--r--polly/test/ScopInfo/stmt_split_phi_in_stmt.ll2
-rw-r--r--polly/test/ScopInfo/stmt_split_scalar_dependence.ll2
-rw-r--r--polly/test/ScopInfo/stmt_split_within_loop.ll2
-rw-r--r--polly/test/ScopInfo/stmt_with_read_but_without_sideffect.ll2
-rw-r--r--polly/test/ScopInfo/switch-1.ll4
-rw-r--r--polly/test/ScopInfo/switch-2.ll4
-rw-r--r--polly/test/ScopInfo/switch-3.ll4
-rw-r--r--polly/test/ScopInfo/switch-4.ll4
-rw-r--r--polly/test/ScopInfo/switch-5.ll4
-rw-r--r--polly/test/ScopInfo/switch-6.ll4
-rw-r--r--polly/test/ScopInfo/switch-7.ll5
-rw-r--r--polly/test/ScopInfo/tempscop-printing.ll2
-rw-r--r--polly/test/ScopInfo/test-wrapping-in-condition.ll4
-rw-r--r--polly/test/ScopInfo/truncate-1.ll2
-rw-r--r--polly/test/ScopInfo/truncate-2.ll2
-rw-r--r--polly/test/ScopInfo/truncate-3.ll2
-rw-r--r--polly/test/ScopInfo/two-loops-one-infinite.ll2
-rw-r--r--polly/test/ScopInfo/two-loops-right-after-each-other.ll2
-rw-r--r--polly/test/ScopInfo/undef_in_cond.ll2
-rw-r--r--polly/test/ScopInfo/unnamed_nonaffine.ll4
-rw-r--r--polly/test/ScopInfo/unnamed_stmts.ll2
-rw-r--r--polly/test/ScopInfo/unpredictable_nonscop_loop.ll2
-rw-r--r--polly/test/ScopInfo/unprofitable_scalar-accs.ll4
-rw-r--r--polly/test/ScopInfo/unsigned-condition.ll2
-rw-r--r--polly/test/ScopInfo/unsigned-division-1.ll2
-rw-r--r--polly/test/ScopInfo/unsigned-division-2.ll2
-rw-r--r--polly/test/ScopInfo/unsigned-division-3.ll2
-rw-r--r--polly/test/ScopInfo/unsigned-division-4.ll2
-rw-r--r--polly/test/ScopInfo/unsigned-division-5.ll2
-rw-r--r--polly/test/ScopInfo/unsigned_wrap_uge.ll2
-rw-r--r--polly/test/ScopInfo/unsigned_wrap_ugt.ll2
-rw-r--r--polly/test/ScopInfo/unsigned_wrap_ule.ll2
-rw-r--r--polly/test/ScopInfo/unsigned_wrap_ult.ll2
-rw-r--r--polly/test/ScopInfo/user_context.ll8
-rw-r--r--polly/test/ScopInfo/user_provided_assumptions-in-bb-signed-conditional.ll4
-rw-r--r--polly/test/ScopInfo/user_provided_assumptions-in-bb-signed.ll2
-rw-r--r--polly/test/ScopInfo/user_provided_assumptions-in-bb-unsigned.ll4
-rw-r--r--polly/test/ScopInfo/user_provided_assumptions.ll4
-rw-r--r--polly/test/ScopInfo/user_provided_assumptions_2.ll4
-rw-r--r--polly/test/ScopInfo/user_provided_assumptions_3.ll4
-rw-r--r--polly/test/ScopInfo/user_provided_non_dominating_assumptions.ll4
-rw-r--r--polly/test/ScopInfo/variant_base_pointer.ll4
-rw-r--r--polly/test/ScopInfo/variant_load_empty_domain.ll2
-rw-r--r--polly/test/ScopInfo/wraping_signed_expr_0.ll2
-rw-r--r--polly/test/ScopInfo/wraping_signed_expr_1.ll2
-rw-r--r--polly/test/ScopInfo/wraping_signed_expr_2.ll2
-rw-r--r--polly/test/ScopInfo/wraping_signed_expr_3.ll2
-rw-r--r--polly/test/ScopInfo/wraping_signed_expr_4.ll2
-rw-r--r--polly/test/ScopInfo/wraping_signed_expr_5.ll2
-rw-r--r--polly/test/ScopInfo/wraping_signed_expr_6.ll2
-rw-r--r--polly/test/ScopInfo/wraping_signed_expr_7.ll2
-rw-r--r--polly/test/ScopInfo/wraping_signed_expr_slow_1.ll2
-rw-r--r--polly/test/ScopInfo/wraping_signed_expr_slow_2.ll2
-rw-r--r--polly/test/ScopInfo/zero_ext_of_truncate.ll2
-rw-r--r--polly/test/ScopInfo/zero_ext_of_truncate_2.ll2
-rw-r--r--polly/test/ScopInfo/zero_ext_space_mismatch.ll2
-rw-r--r--polly/test/ScopInliner/invariant-load-func.ll4
-rw-r--r--polly/test/Simplify/coalesce_3partials.ll2
-rw-r--r--polly/test/Simplify/coalesce_disjointelements.ll2
-rw-r--r--polly/test/Simplify/coalesce_overlapping.ll2
-rw-r--r--polly/test/Simplify/coalesce_partial.ll2
-rw-r--r--polly/test/Simplify/dead_access_load.ll1
-rw-r--r--polly/test/Simplify/dead_access_phi.ll1
-rw-r--r--polly/test/Simplify/dead_access_value.ll1
-rw-r--r--polly/test/Simplify/dead_instruction.ll1
-rw-r--r--polly/test/Simplify/emptyaccessdomain.ll2
-rw-r--r--polly/test/Simplify/exit_phi_accesses-2.ll2
-rw-r--r--polly/test/Simplify/func-b320a7.ll2
-rw-r--r--polly/test/Simplify/gemm.ll2
-rw-r--r--polly/test/Simplify/nocoalesce_differentvalues.ll2
-rw-r--r--polly/test/Simplify/nocoalesce_elementmismatch.ll2
-rw-r--r--polly/test/Simplify/nocoalesce_readbetween.ll2
-rw-r--r--polly/test/Simplify/nocoalesce_writebetween.ll2
-rw-r--r--polly/test/Simplify/notdead_region_exitphi.ll1
-rw-r--r--polly/test/Simplify/notdead_region_innerphi.ll1
-rw-r--r--polly/test/Simplify/notredundant_region_loop.ll2
-rw-r--r--polly/test/Simplify/notredundant_region_middle.ll1
-rw-r--r--polly/test/Simplify/notredundant_synthesizable_unknownit.ll1
-rw-r--r--polly/test/Simplify/out-of-scop-use-in-region-entry-phi-node.ll2
-rw-r--r--polly/test/Simplify/overwritten.ll1
-rw-r--r--polly/test/Simplify/overwritten_3phi.ll2
-rw-r--r--polly/test/Simplify/overwritten_3store.ll1
-rw-r--r--polly/test/Simplify/overwritten_implicit_and_explicit.ll2
-rw-r--r--polly/test/Simplify/overwritten_loadbetween.ll1
-rw-r--r--polly/test/Simplify/overwritten_scalar.ll2
-rw-r--r--polly/test/Simplify/pass_existence.ll1
-rw-r--r--polly/test/Simplify/phi_in_regionstmt.ll1
-rw-r--r--polly/test/Simplify/pr33323.ll2
-rw-r--r--polly/test/Simplify/redundant.ll1
-rw-r--r--polly/test/Simplify/redundant_differentindex.ll1
-rw-r--r--polly/test/Simplify/redundant_region.ll2
-rw-r--r--polly/test/Simplify/redundant_region_scalar.ll2
-rw-r--r--polly/test/Simplify/redundant_scalarwrite.ll2
-rw-r--r--polly/test/Simplify/redundant_storebetween.ll1
-rw-r--r--polly/test/Simplify/scalability1.ll2
-rw-r--r--polly/test/Simplify/scalability2.ll2
-rw-r--r--polly/test/Simplify/sweep_mapped_phi.ll2
-rw-r--r--polly/test/Simplify/sweep_mapped_value.ll2
-rw-r--r--polly/test/Simplify/ununsed_read_in_region_entry.ll4
-rw-r--r--polly/test/Support/Plugins.ll2
-rw-r--r--polly/test/Support/isl-args.ll8
-rw-r--r--polly/test/lit.site.cfg.in1
-rw-r--r--polly/test/polly.ll2
-rw-r--r--polly/unittests/CMakeLists.txt5
-rw-r--r--pstl/CMakeLists.txt1
-rw-r--r--runtimes/CMakeLists.txt2
-rw-r--r--utils/bazel/.bazelrc3
-rw-r--r--utils/bazel/llvm-project-overlay/bolt/BUILD.bazel5
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/BUILD.bazel2
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/driver.bzl1
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h3
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel19
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel7
-rw-r--r--utils/bazel/llvm_configs/config.h.cmake3
3790 files changed, 110672 insertions, 61656 deletions
diff --git a/.ci/generate-buildkite-pipeline-premerge b/.ci/generate-buildkite-pipeline-premerge
index 78a9cb77ff7d..3ed5eb96eceb 100755
--- a/.ci/generate-buildkite-pipeline-premerge
+++ b/.ci/generate-buildkite-pipeline-premerge
@@ -68,7 +68,7 @@ function compute-projects-to-test() {
done
;;
clang)
- for p in clang-tools-extra compiler-rt flang lldb cross-project-tests; do
+ for p in clang-tools-extra compiler-rt lldb cross-project-tests; do
echo $p
done
;;
@@ -85,6 +85,22 @@ function compute-projects-to-test() {
done
}
+function compute-runtimes-to-test() {
+ projects=${@}
+ for project in ${projects}; do
+ case ${project} in
+ clang)
+ for p in libcxx libcxxabi libunwind; do
+ echo $p
+ done
+ ;;
+ *)
+ # Nothing to do
+ ;;
+ esac
+ done
+}
+
function add-dependencies() {
projects=${@}
for project in ${projects}; do
@@ -178,6 +194,15 @@ function check-targets() {
cross-project-tests)
echo "check-cross-project"
;;
+ libcxx)
+ echo "check-cxx"
+ ;;
+ libcxxabi)
+ echo "check-cxxabi"
+ ;;
+ libunwind)
+ echo "check-unwind"
+ ;;
lldb)
echo "check-all" # TODO: check-lldb may not include all the LLDB tests?
;;
@@ -207,17 +232,6 @@ if echo "$modified_dirs" | grep -q -E "^(libcxx|libcxxabi|libunwind|runtimes|cma
EOF
fi
-# If clang changed.
-if echo "$modified_dirs" | grep -q -E "^(clang)$"; then
- cat <<EOF
-- trigger: "clang-ci"
- build:
- message: "${buildMessage}"
- commit: "${BUILDKITE_COMMIT}"
- branch: "${BUILDKITE_BRANCH}"
-EOF
-fi
-
# Generic pipeline for projects that have not defined custom steps.
#
# Individual projects should instead define the pre-commit CI tests that suits their
@@ -231,6 +245,10 @@ linux_projects_to_test=$(exclude-linux $(compute-projects-to-test ${modified_pro
linux_check_targets=$(check-targets ${linux_projects_to_test} | sort | uniq)
linux_projects=$(add-dependencies ${linux_projects_to_test} | sort | uniq)
+linux_runtimes_to_test=$(compute-runtimes-to-test ${linux_projects_to_test})
+linux_runtime_check_targets=$(check-targets ${linux_runtimes_to_test} | sort | uniq)
+linux_runtimes=$(echo ${linux_runtimes_to_test} | sort | uniq)
+
windows_projects_to_test=$(exclude-windows $(compute-projects-to-test ${modified_projects}))
windows_check_targets=$(check-targets ${windows_projects_to_test} | sort | uniq)
windows_projects=$(add-dependencies ${windows_projects_to_test} | sort | uniq)
@@ -255,7 +273,7 @@ if [[ "${linux_projects}" != "" ]]; then
CC: 'clang'
CXX: 'clang++'
commands:
- - './.ci/monolithic-linux.sh "$(echo ${linux_projects} | tr ' ' ';')" "$(echo ${linux_check_targets})"'
+ - './.ci/monolithic-linux.sh "$(echo ${linux_projects} | tr ' ' ';')" "$(echo ${linux_check_targets})" "$(echo ${linux_runtimes} | tr ' ' ';')" "$(echo ${linux_runtime_check_targets})"'
EOF
fi
diff --git a/.ci/monolithic-linux.sh b/.ci/monolithic-linux.sh
index b00a4b984a1d..38d7128f241b 100755
--- a/.ci/monolithic-linux.sh
+++ b/.ci/monolithic-linux.sh
@@ -18,6 +18,7 @@ set -o pipefail
MONOREPO_ROOT="${MONOREPO_ROOT:="$(git rev-parse --show-toplevel)"}"
BUILD_DIR="${BUILD_DIR:=${MONOREPO_ROOT}/build}"
+INSTALL_DIR="${BUILD_DIR}/install"
rm -rf "${BUILD_DIR}"
ccache --zero-stats
@@ -49,8 +50,79 @@ cmake -S "${MONOREPO_ROOT}"/llvm -B "${BUILD_DIR}" \
-D LLVM_ENABLE_LLD=ON \
-D CMAKE_CXX_FLAGS=-gmlt \
-D LLVM_CCACHE_BUILD=ON \
- -D MLIR_ENABLE_BINDINGS_PYTHON=ON
+ -D MLIR_ENABLE_BINDINGS_PYTHON=ON \
+ -D CMAKE_INSTALL_PREFIX="${INSTALL_DIR}"
echo "--- ninja"
# Targets are not escaped as they are passed as separate arguments.
ninja -C "${BUILD_DIR}" -k 0 ${targets}
+
+runtimes="${3}"
+runtime_targets="${4}"
+
+# Compiling runtimes with just-built Clang and running their tests
+# as an additional testing for Clang.
+if [[ "${runtimes}" != "" ]]; then
+ if [[ "${runtime_targets}" == "" ]]; then
+ echo "Runtimes to build are specified, but targets are not."
+ exit 1
+ fi
+
+ echo "--- ninja install-clang"
+
+ ninja -C ${BUILD_DIR} install-clang install-clang-resource-headers
+
+ RUNTIMES_BUILD_DIR="${MONOREPO_ROOT}/build-runtimes"
+ INSTALL_DIR="${BUILD_DIR}/install"
+ mkdir -p ${RUNTIMES_BUILD_DIR}
+
+ echo "--- cmake runtimes C++03"
+
+ cmake -S "${MONOREPO_ROOT}/runtimes" -B "${RUNTIMES_BUILD_DIR}" -GNinja \
+ -D CMAKE_C_COMPILER="${INSTALL_DIR}/bin/clang" \
+ -D CMAKE_CXX_COMPILER="${INSTALL_DIR}/bin/clang++" \
+ -D LLVM_ENABLE_RUNTIMES="${runtimes}" \
+ -D LIBCXX_CXX_ABI=libcxxabi \
+ -D CMAKE_BUILD_TYPE=RelWithDebInfo \
+ -D CMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \
+ -D LIBCXX_TEST_PARAMS="std=c++03" \
+ -D LIBCXXABI_TEST_PARAMS="std=c++03"
+
+ echo "--- ninja runtimes C++03"
+
+ ninja -vC "${RUNTIMES_BUILD_DIR}" ${runtime_targets}
+
+ echo "--- cmake runtimes C++26"
+
+ rm -rf "${RUNTIMES_BUILD_DIR}"
+ cmake -S "${MONOREPO_ROOT}/runtimes" -B "${RUNTIMES_BUILD_DIR}" -GNinja \
+ -D CMAKE_C_COMPILER="${INSTALL_DIR}/bin/clang" \
+ -D CMAKE_CXX_COMPILER="${INSTALL_DIR}/bin/clang++" \
+ -D LLVM_ENABLE_RUNTIMES="${runtimes}" \
+ -D LIBCXX_CXX_ABI=libcxxabi \
+ -D CMAKE_BUILD_TYPE=RelWithDebInfo \
+ -D CMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \
+ -D LIBCXX_TEST_PARAMS="std=c++26" \
+ -D LIBCXXABI_TEST_PARAMS="std=c++26"
+
+ echo "--- ninja runtimes C++26"
+
+ ninja -vC "${RUNTIMES_BUILD_DIR}" ${runtime_targets}
+
+ echo "--- cmake runtimes clang modules"
+
+ rm -rf "${RUNTIMES_BUILD_DIR}"
+ cmake -S "${MONOREPO_ROOT}/runtimes" -B "${RUNTIMES_BUILD_DIR}" -GNinja \
+ -D CMAKE_C_COMPILER="${INSTALL_DIR}/bin/clang" \
+ -D CMAKE_CXX_COMPILER="${INSTALL_DIR}/bin/clang++" \
+ -D LLVM_ENABLE_RUNTIMES="${runtimes}" \
+ -D LIBCXX_CXX_ABI=libcxxabi \
+ -D CMAKE_BUILD_TYPE=RelWithDebInfo \
+ -D CMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \
+ -D LIBCXX_TEST_PARAMS="enable_modules=clang" \
+ -D LIBCXXABI_TEST_PARAMS="enable_modules=clang"
+
+ echo "--- ninja runtimes clang modules"
+
+ ninja -vC "${RUNTIMES_BUILD_DIR}" ${runtime_targets}
+fi
diff --git a/.github/workflows/llvm-bugs.yml b/.github/workflows/llvm-bugs.yml
index f592dd6ccd90..c392078fa452 100644
--- a/.github/workflows/llvm-bugs.yml
+++ b/.github/workflows/llvm-bugs.yml
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository == 'llvm/llvm-project'
steps:
- - uses: actions/setup-node@v3
+ - uses: actions/setup-node@v4
with:
node-version: 18
check-latest: true
diff --git a/bolt/CMakeLists.txt b/bolt/CMakeLists.txt
index cc3a70fa35e0..74907ad118d1 100644
--- a/bolt/CMakeLists.txt
+++ b/bolt/CMakeLists.txt
@@ -1,3 +1,5 @@
+set(LLVM_SUBPROJECT_TITLE "BOLT")
+
include(ExternalProject)
set(BOLT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
@@ -121,7 +123,7 @@ option(BOLT_BUILD_TOOLS
"Build the BOLT tools. If OFF, just generate build targets." ON)
add_custom_target(bolt)
-set_target_properties(bolt PROPERTIES FOLDER "BOLT")
+set_target_properties(bolt PROPERTIES FOLDER "BOLT/Metatargets")
add_llvm_install_targets(install-bolt DEPENDS bolt COMPONENT bolt)
include_directories(
diff --git a/bolt/cmake/modules/AddBOLT.cmake b/bolt/cmake/modules/AddBOLT.cmake
index 1f69b9046320..c7ac662c6b12 100644
--- a/bolt/cmake/modules/AddBOLT.cmake
+++ b/bolt/cmake/modules/AddBOLT.cmake
@@ -3,7 +3,6 @@ include(LLVMDistributionSupport)
macro(add_bolt_executable name)
add_llvm_executable(${name} ${ARGN})
- set_target_properties(${name} PROPERTIES FOLDER "BOLT")
endmacro()
macro(add_bolt_tool name)
diff --git a/bolt/docs/BAT.md b/bolt/docs/BAT.md
index 7ffb5d7c0081..817ad288aa34 100644
--- a/bolt/docs/BAT.md
+++ b/bolt/docs/BAT.md
@@ -106,9 +106,14 @@ equals output offset.
`BRANCHENTRY` bit denotes whether a given offset pair is a control flow source
(branch or call instruction). If not set, it signifies a control flow target
(basic block offset).
+
`InputAddr` is omitted for equal offsets in input and output function. In this
case, `BRANCHENTRY` bits are encoded separately in a `BranchEntries` bitvector.
+Deleted basic blocks are emitted as having `OutputOffset` equal to the size of
+the function. They don't affect address translation and only participate in
+input basic block mapping.
+
### Secondary Entry Points table
The table is emitted for hot fragments only. It contains `NumSecEntryPoints`
offsets denoting secondary entry points, delta encoded, implicitly starting at zero.
diff --git a/bolt/docs/CMakeLists.txt b/bolt/docs/CMakeLists.txt
index b230512fe571..12ae85256678 100644
--- a/bolt/docs/CMakeLists.txt
+++ b/bolt/docs/CMakeLists.txt
@@ -79,6 +79,7 @@ if (LLVM_ENABLE_DOXYGEN)
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating bolt doxygen documentation." VERBATIM)
+ set_target_properties(doxygen-bolt PROPERTIES FOLDER "BOLT/Docs")
if (LLVM_BUILD_DOCS)
add_dependencies(doxygen doxygen-bolt)
diff --git a/bolt/include/bolt/Core/BinaryContext.h b/bolt/include/bolt/Core/BinaryContext.h
index 75765819ac46..4ec3de3da1bf 100644
--- a/bolt/include/bolt/Core/BinaryContext.h
+++ b/bolt/include/bolt/Core/BinaryContext.h
@@ -17,6 +17,7 @@
#include "bolt/Core/BinaryData.h"
#include "bolt/Core/BinarySection.h"
#include "bolt/Core/DebugData.h"
+#include "bolt/Core/DynoStats.h"
#include "bolt/Core/JumpTable.h"
#include "bolt/Core/MCPlusBuilder.h"
#include "bolt/RuntimeLibs/RuntimeLibrary.h"
@@ -359,7 +360,7 @@ public:
void setFileBuildID(StringRef ID) { FileBuildID = std::string(ID); }
bool hasSymbolsWithFileName() const { return HasSymbolsWithFileName; }
- void setHasSymbolsWithFileName(bool Value) { HasSymbolsWithFileName = true; }
+ void setHasSymbolsWithFileName(bool Value) { HasSymbolsWithFileName = Value; }
/// Return true if relocations against symbol with a given name
/// must be created.
@@ -677,6 +678,9 @@ public:
/// have an origin file name available.
bool HasSymbolsWithFileName{false};
+ /// Does the binary have BAT section.
+ bool HasBATSection{false};
+
/// Sum of execution count of all functions
uint64_t SumExecutionCount{0};
@@ -714,6 +718,9 @@ public:
uint64_t NumStaleBlocksWithEqualIcount{0};
} Stats;
+ // Original binary execution count stats.
+ DynoStats InitialDynoStats;
+
// Address of the first allocated segment.
uint64_t FirstAllocAddress{std::numeric_limits<uint64_t>::max()};
@@ -1217,8 +1224,7 @@ public:
/// Return a signed value of \p Size stored at \p Address. The address has
/// to be a valid statically allocated address for the binary.
- ErrorOr<uint64_t> getSignedValueAtAddress(uint64_t Address,
- size_t Size) const;
+ ErrorOr<int64_t> getSignedValueAtAddress(uint64_t Address, size_t Size) const;
/// Special case of getUnsignedValueAtAddress() that uses a pointer size.
ErrorOr<uint64_t> getPointerAtAddress(uint64_t Address) const {
diff --git a/bolt/include/bolt/Passes/BinaryPasses.h b/bolt/include/bolt/Passes/BinaryPasses.h
index 5d7692559eda..ad8473c4aae0 100644
--- a/bolt/include/bolt/Passes/BinaryPasses.h
+++ b/bolt/include/bolt/Passes/BinaryPasses.h
@@ -16,6 +16,7 @@
#include "bolt/Core/BinaryContext.h"
#include "bolt/Core/BinaryFunction.h"
#include "bolt/Core/DynoStats.h"
+#include "bolt/Profile/BoltAddressTranslation.h"
#include "llvm/Support/CommandLine.h"
#include <atomic>
#include <set>
@@ -52,15 +53,31 @@ public:
virtual Error runOnFunctions(BinaryContext &BC) = 0;
};
+/// A pass to set initial program-wide dynostats.
+class DynoStatsSetPass : public BinaryFunctionPass {
+public:
+ DynoStatsSetPass() : BinaryFunctionPass(false) {}
+
+ const char *getName() const override {
+ return "set dyno-stats before optimizations";
+ }
+
+ bool shouldPrint(const BinaryFunction &BF) const override { return false; }
+
+ Error runOnFunctions(BinaryContext &BC) override {
+ BC.InitialDynoStats = getDynoStats(BC.getBinaryFunctions(), BC.isAArch64());
+ return Error::success();
+ }
+};
+
/// A pass to print program-wide dynostats.
class DynoStatsPrintPass : public BinaryFunctionPass {
protected:
- DynoStats PrevDynoStats;
std::string Title;
public:
- DynoStatsPrintPass(const DynoStats &PrevDynoStats, const char *Title)
- : BinaryFunctionPass(false), PrevDynoStats(PrevDynoStats), Title(Title) {}
+ DynoStatsPrintPass(const char *Title)
+ : BinaryFunctionPass(false), Title(Title) {}
const char *getName() const override {
return "print dyno-stats after optimizations";
@@ -69,6 +86,7 @@ public:
bool shouldPrint(const BinaryFunction &BF) const override { return false; }
Error runOnFunctions(BinaryContext &BC) override {
+ const DynoStats PrevDynoStats = BC.InitialDynoStats;
const DynoStats NewDynoStats =
getDynoStats(BC.getBinaryFunctions(), BC.isAArch64());
const bool Changed = (NewDynoStats != PrevDynoStats);
@@ -399,8 +417,11 @@ public:
/// Prints a list of the top 100 functions sorted by a set of
/// dyno stats categories.
class PrintProgramStats : public BinaryFunctionPass {
+ BoltAddressTranslation *BAT = nullptr;
+
public:
- explicit PrintProgramStats() : BinaryFunctionPass(false) {}
+ explicit PrintProgramStats(BoltAddressTranslation *BAT = nullptr)
+ : BinaryFunctionPass(false), BAT(BAT) {}
const char *getName() const override { return "print-stats"; }
bool shouldPrint(const BinaryFunction &) const override { return false; }
diff --git a/bolt/include/bolt/Passes/MCF.h b/bolt/include/bolt/Passes/MCF.h
index feac7f88ac11..3fe674463bf1 100644
--- a/bolt/include/bolt/Passes/MCF.h
+++ b/bolt/include/bolt/Passes/MCF.h
@@ -9,20 +9,14 @@
#ifndef BOLT_PASSES_MCF_H
#define BOLT_PASSES_MCF_H
+#include "bolt/Passes/BinaryPasses.h"
+#include "llvm/Support/CommandLine.h"
+
namespace llvm {
namespace bolt {
-class BinaryFunction;
class DataflowInfoManager;
-enum MCFCostFunction : char {
- MCF_DISABLE = 0,
- MCF_LINEAR,
- MCF_QUADRATIC,
- MCF_LOG,
- MCF_BLAMEFTS
-};
-
/// Implement the idea in "SamplePGO - The Power of Profile Guided Optimizations
/// without the Usability Burden" by Diego Novillo to make basic block counts
/// equal if we show that A dominates B, B post-dominates A and they are in the
@@ -31,23 +25,18 @@ void equalizeBBCounts(DataflowInfoManager &Info, BinaryFunction &BF);
/// Fill edge counts based on the basic block count. Used in nonLBR mode when
/// we only have bb count.
-void estimateEdgeCounts(BinaryFunction &BF);
-
-/// Entry point for computing a min-cost flow for the CFG with the goal
-/// of fixing the flow of the CFG edges, that is, making sure it obeys the
-/// flow-conservation equation SumInEdges = SumOutEdges.
-///
-/// To do this, we create an instance of the min-cost flow problem in a
-/// similar way as the one discussed in the work of Roy Levin "Completing
-/// Incomplete Edge Profile by Applying Minimum Cost Circulation Algorithms".
-/// We do a few things differently, though. We don't populate edge counts using
-/// weights coming from a static branch prediction technique and we don't
-/// use the same cost function.
-///
-/// If cost function BlameFTs is used, assign all remaining flow to
-/// fall-throughs. This is used when the sampling is based on taken branches
-/// that do not account for them.
-void solveMCF(BinaryFunction &BF, MCFCostFunction CostFunction);
+class EstimateEdgeCounts : public BinaryFunctionPass {
+ void runOnFunction(BinaryFunction &BF);
+
+public:
+ explicit EstimateEdgeCounts(const cl::opt<bool> &PrintPass)
+ : BinaryFunctionPass(PrintPass) {}
+
+ const char *getName() const override { return "estimate-edge-counts"; }
+
+ /// Pass entry point
+ Error runOnFunctions(BinaryContext &BC) override;
+};
} // end namespace bolt
} // end namespace llvm
diff --git a/bolt/include/bolt/Passes/StokeInfo.h b/bolt/include/bolt/Passes/StokeInfo.h
index 76417e6a2c3b..a18c2a05d015 100644
--- a/bolt/include/bolt/Passes/StokeInfo.h
+++ b/bolt/include/bolt/Passes/StokeInfo.h
@@ -87,10 +87,10 @@ struct StokeFuncInfo {
<< "," << NumBlocks << "," << IsLoopFree << "," << NumLoops << ","
<< MaxLoopDepth << "," << HotSize << "," << TotalSize << ","
<< Score << "," << HasCall << ",\"{ ";
- for (std::string S : DefIn)
+ for (const std::string &S : DefIn)
Outfile << "%" << S << " ";
Outfile << "}\",\"{ ";
- for (std::string S : LiveOut)
+ for (const std::string &S : LiveOut)
Outfile << "%" << S << " ";
Outfile << "}\"," << HeapOut << "," << StackOut << "," << HasRipAddr
<< "," << Omitted << "\n";
diff --git a/bolt/include/bolt/Profile/BoltAddressTranslation.h b/bolt/include/bolt/Profile/BoltAddressTranslation.h
index 68b993ee363c..65b9ba874368 100644
--- a/bolt/include/bolt/Profile/BoltAddressTranslation.h
+++ b/bolt/include/bolt/Profile/BoltAddressTranslation.h
@@ -70,7 +70,7 @@ class BinaryFunction;
class BoltAddressTranslation {
public:
// In-memory representation of the address translation table
- using MapTy = std::map<uint32_t, uint32_t>;
+ using MapTy = std::multimap<uint32_t, uint32_t>;
// List of taken fall-throughs
using FallthroughListTy = SmallVector<std::pair<uint64_t, uint64_t>, 16>;
@@ -90,7 +90,7 @@ public:
std::error_code parse(raw_ostream &OS, StringRef Buf);
/// Dump the parsed address translation tables
- void dump(raw_ostream &OS);
+ void dump(raw_ostream &OS) const;
/// If the maps are loaded in memory, perform the lookup to translate LBR
/// addresses in function located at \p FuncAddress.
@@ -107,7 +107,12 @@ public:
/// If available, fetch the address of the hot part linked to the cold part
/// at \p Address. Return 0 otherwise.
- uint64_t fetchParentAddress(uint64_t Address) const;
+ uint64_t fetchParentAddress(uint64_t Address) const {
+ auto Iter = ColdPartSource.find(Address);
+ if (Iter == ColdPartSource.end())
+ return 0;
+ return Iter->second;
+ }
/// True if the input binary has a translation table we can use to convert
/// addresses when aggregating profile
@@ -132,7 +137,8 @@ private:
/// emitted for the start of the BB. More entries may be emitted to cover
/// the location of calls or any instruction that may change control flow.
void writeEntriesForBB(MapTy &Map, const BinaryBasicBlock &BB,
- uint64_t FuncInputAddress, uint64_t FuncOutputAddress);
+ uint64_t FuncInputAddress,
+ uint64_t FuncOutputAddress) const;
/// Write the serialized address translation table for a function.
template <bool Cold>
@@ -147,7 +153,7 @@ private:
/// Returns the bitmask with set bits corresponding to indices of BRANCHENTRY
/// entries in function address translation map.
- APInt calculateBranchEntriesBitMask(MapTy &Map, size_t EqualElems);
+ APInt calculateBranchEntriesBitMask(MapTy &Map, size_t EqualElems) const;
/// Calculate the number of equal offsets (output = input - skew) in the
/// beginning of the function.
@@ -178,14 +184,9 @@ private:
public:
/// Map basic block input offset to a basic block index and hash pair.
class BBHashMapTy {
- class EntryTy {
+ struct EntryTy {
unsigned Index;
size_t Hash;
-
- public:
- unsigned getBBIndex() const { return Index; }
- size_t getBBHash() const { return Hash; }
- EntryTy(unsigned Index, size_t Hash) : Index(Index), Hash(Hash) {}
};
std::map<uint32_t, EntryTy> Map;
@@ -201,15 +202,15 @@ public:
}
unsigned getBBIndex(uint32_t BBInputOffset) const {
- return getEntry(BBInputOffset).getBBIndex();
+ return getEntry(BBInputOffset).Index;
}
size_t getBBHash(uint32_t BBInputOffset) const {
- return getEntry(BBInputOffset).getBBHash();
+ return getEntry(BBInputOffset).Hash;
}
void addEntry(uint32_t BBInputOffset, unsigned BBIndex, size_t BBHash) {
- Map.emplace(BBInputOffset, EntryTy(BBIndex, BBHash));
+ Map.emplace(BBInputOffset, EntryTy{BBIndex, BBHash});
}
size_t getNumBasicBlocks() const { return Map.size(); }
@@ -217,18 +218,14 @@ public:
auto begin() const { return Map.begin(); }
auto end() const { return Map.end(); }
auto upper_bound(uint32_t Offset) const { return Map.upper_bound(Offset); }
+ auto size() const { return Map.size(); }
};
/// Map function output address to its hash and basic blocks hash map.
class FuncHashesTy {
- class EntryTy {
+ struct EntryTy {
size_t Hash;
BBHashMapTy BBHashMap;
-
- public:
- size_t getBFHash() const { return Hash; }
- const BBHashMapTy &getBBHashMap() const { return BBHashMap; }
- EntryTy(size_t Hash) : Hash(Hash) {}
};
std::unordered_map<uint64_t, EntryTy> Map;
@@ -240,15 +237,15 @@ public:
public:
size_t getBFHash(uint64_t FuncOutputAddress) const {
- return getEntry(FuncOutputAddress).getBFHash();
+ return getEntry(FuncOutputAddress).Hash;
}
const BBHashMapTy &getBBHashMap(uint64_t FuncOutputAddress) const {
- return getEntry(FuncOutputAddress).getBBHashMap();
+ return getEntry(FuncOutputAddress).BBHashMap;
}
void addEntry(uint64_t FuncOutputAddress, size_t BFHash) {
- Map.emplace(FuncOutputAddress, EntryTy(BFHash));
+ Map.emplace(FuncOutputAddress, EntryTy{BFHash, BBHashMapTy()});
}
size_t getNumFunctions() const { return Map.size(); };
@@ -256,7 +253,7 @@ public:
size_t getNumBasicBlocks() const {
size_t NumBasicBlocks{0};
for (auto &I : Map)
- NumBasicBlocks += I.second.getBBHashMap().getNumBasicBlocks();
+ NumBasicBlocks += I.second.BBHashMap.getNumBasicBlocks();
return NumBasicBlocks;
}
};
@@ -278,7 +275,9 @@ public:
/// Returns the number of basic blocks in a function.
size_t getNumBasicBlocks(uint64_t OutputAddress) const {
- return NumBasicBlocksMap.at(OutputAddress);
+ auto It = NumBasicBlocksMap.find(OutputAddress);
+ assert(It != NumBasicBlocksMap.end());
+ return It->second;
}
private:
diff --git a/bolt/include/bolt/Profile/DataAggregator.h b/bolt/include/bolt/Profile/DataAggregator.h
index c158a9bb3e3f..6453b3070ceb 100644
--- a/bolt/include/bolt/Profile/DataAggregator.h
+++ b/bolt/include/bolt/Profile/DataAggregator.h
@@ -15,6 +15,7 @@
#define BOLT_PROFILE_DATA_AGGREGATOR_H
#include "bolt/Profile/DataReader.h"
+#include "bolt/Profile/YAMLProfileWriter.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Program.h"
@@ -248,7 +249,7 @@ private:
BinaryFunction *getBATParentFunction(const BinaryFunction &Func) const;
/// Retrieve the location name to be used for samples recorded in \p Func.
- StringRef getLocationName(const BinaryFunction &Func) const;
+ static StringRef getLocationName(const BinaryFunction &Func, bool BAT);
/// Semantic actions - parser hooks to interpret parsed perf samples
/// Register a sample (non-LBR mode), i.e. a new hit at \p Address
@@ -490,6 +491,8 @@ public:
/// Parse the output generated by "perf buildid-list" to extract build-ids
/// and return a file name matching a given \p FileBuildID.
std::optional<StringRef> getFileNameForBuildID(StringRef FileBuildID);
+
+ friend class YAMLProfileWriter;
};
} // namespace bolt
} // namespace llvm
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index ad2eb18caf10..db02dc0fae4e 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -142,7 +142,7 @@ BinaryContext::BinaryContext(std::unique_ptr<MCContext> Ctx,
AsmInfo(std::move(AsmInfo)), MII(std::move(MII)), STI(std::move(STI)),
InstPrinter(std::move(InstPrinter)), MIA(std::move(MIA)),
MIB(std::move(MIB)), MRI(std::move(MRI)), DisAsm(std::move(DisAsm)),
- Logger(Logger) {
+ Logger(Logger), InitialDynoStats(isAArch64()) {
Relocation::Arch = this->TheTriple->getArch();
RegularPageSize = isAArch64() ? RegularPageSizeAArch64 : RegularPageSizeX86;
PageAlign = opts::NoHugePages ? RegularPageSize : HugePageSize;
@@ -934,10 +934,13 @@ std::string BinaryContext::generateJumpTableName(const BinaryFunction &BF,
uint64_t Offset = 0;
if (const JumpTable *JT = BF.getJumpTableContainingAddress(Address)) {
Offset = Address - JT->getAddress();
- auto Itr = JT->Labels.find(Offset);
- if (Itr != JT->Labels.end())
- return std::string(Itr->second->getName());
- Id = JumpTableIds.at(JT->getAddress());
+ auto JTLabelsIt = JT->Labels.find(Offset);
+ if (JTLabelsIt != JT->Labels.end())
+ return std::string(JTLabelsIt->second->getName());
+
+ auto JTIdsIt = JumpTableIds.find(JT->getAddress());
+ assert(JTIdsIt != JumpTableIds.end());
+ Id = JTIdsIt->second;
} else {
Id = JumpTableIds[Address] = BF.JumpTables.size();
}
@@ -1322,7 +1325,9 @@ void BinaryContext::processInterproceduralReferences() {
InterproceduralReferences) {
BinaryFunction &Function = *It.first;
uint64_t Address = It.second;
- if (!Address || Function.isIgnored())
+ // Process interprocedural references from ignored functions in BAT mode
+ // (non-simple in non-relocation mode) to properly register entry points
+ if (!Address || (Function.isIgnored() && !HasBATSection))
continue;
BinaryFunction *TargetFunction =
@@ -2212,8 +2217,8 @@ ErrorOr<uint64_t> BinaryContext::getUnsignedValueAtAddress(uint64_t Address,
return DE.getUnsigned(&ValueOffset, Size);
}
-ErrorOr<uint64_t> BinaryContext::getSignedValueAtAddress(uint64_t Address,
- size_t Size) const {
+ErrorOr<int64_t> BinaryContext::getSignedValueAtAddress(uint64_t Address,
+ size_t Size) const {
const ErrorOr<const BinarySection &> Section = getSectionForAddress(Address);
if (!Section)
return std::make_error_code(std::errc::bad_address);
diff --git a/bolt/lib/Core/BinaryEmitter.cpp b/bolt/lib/Core/BinaryEmitter.cpp
index 6f86ddc77454..0b44acb0816f 100644
--- a/bolt/lib/Core/BinaryEmitter.cpp
+++ b/bolt/lib/Core/BinaryEmitter.cpp
@@ -813,7 +813,9 @@ void BinaryEmitter::emitJumpTable(const JumpTable &JT, MCSection *HotSection,
// determining its destination.
std::map<MCSymbol *, uint64_t> LabelCounts;
if (opts::JumpTables > JTS_SPLIT && !JT.Counts.empty()) {
- MCSymbol *CurrentLabel = JT.Labels.at(0);
+ auto It = JT.Labels.find(0);
+ assert(It != JT.Labels.end());
+ MCSymbol *CurrentLabel = It->second;
uint64_t CurrentLabelCount = 0;
for (unsigned Index = 0; Index < JT.Entries.size(); ++Index) {
auto LI = JT.Labels.find(Index * JT.EntrySize);
diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp
index 10b93e702984..c897392f2a57 100644
--- a/bolt/lib/Core/BinaryFunction.cpp
+++ b/bolt/lib/Core/BinaryFunction.cpp
@@ -851,15 +851,19 @@ BinaryFunction::processIndirectBranch(MCInst &Instruction, unsigned Size,
return IndirectBranchType::UNKNOWN;
}
- // RIP-relative addressing should be converted to symbol form by now
- // in processed instructions (but not in jump).
- if (DispExpr) {
+ auto getExprValue = [&](const MCExpr *Expr) {
const MCSymbol *TargetSym;
uint64_t TargetOffset;
- std::tie(TargetSym, TargetOffset) = BC.MIB->getTargetSymbolInfo(DispExpr);
+ std::tie(TargetSym, TargetOffset) = BC.MIB->getTargetSymbolInfo(Expr);
ErrorOr<uint64_t> SymValueOrError = BC.getSymbolValue(*TargetSym);
- assert(SymValueOrError && "global symbol needs a value");
- ArrayStart = *SymValueOrError + TargetOffset;
+ assert(SymValueOrError && "Global symbol needs a value");
+ return *SymValueOrError + TargetOffset;
+ };
+
+ // RIP-relative addressing should be converted to symbol form by now
+ // in processed instructions (but not in jump).
+ if (DispExpr) {
+ ArrayStart = getExprValue(DispExpr);
BaseRegNum = BC.MIB->getNoRegister();
if (BC.isAArch64()) {
ArrayStart &= ~0xFFFULL;
@@ -1666,7 +1670,8 @@ void BinaryFunction::postProcessEntryPoints() {
// In non-relocation mode there's potentially an external undetectable
// reference to the entry point and hence we cannot move this entry
// point. Optimizing without moving could be difficult.
- if (!BC.HasRelocations)
+ // In BAT mode, register any known entry points for CFG construction.
+ if (!BC.HasRelocations && !BC.HasBATSection)
setSimple(false);
const uint32_t Offset = KV.first;
@@ -3697,6 +3702,13 @@ BinaryFunction::BasicBlockListType BinaryFunction::dfs() const {
size_t BinaryFunction::computeHash(bool UseDFS, HashFunction HashFunction,
OperandHashFuncTy OperandHashFunc) const {
+ LLVM_DEBUG({
+ dbgs() << "BOLT-DEBUG: computeHash " << getPrintName() << ' '
+ << (UseDFS ? "dfs" : "bin") << " order "
+ << (HashFunction == HashFunction::StdHash ? "std::hash" : "xxh3")
+ << '\n';
+ });
+
if (size() == 0)
return 0;
diff --git a/bolt/lib/Core/DebugNames.cpp b/bolt/lib/Core/DebugNames.cpp
index 049244c4b515..791cbc6df082 100644
--- a/bolt/lib/Core/DebugNames.cpp
+++ b/bolt/lib/Core/DebugNames.cpp
@@ -112,8 +112,6 @@ void DWARF5AcceleratorTable::addUnit(DWARFUnit &Unit,
// Returns true if DW_TAG_variable should be included in .debug-names based on
// section 6.1.1.1 for DWARF5 spec.
static bool shouldIncludeVariable(const DWARFUnit &Unit, const DIE &Die) {
- if (Die.findAttribute(dwarf::Attribute::DW_AT_declaration))
- return false;
const DIEValue LocAttrInfo =
Die.findAttribute(dwarf::Attribute::DW_AT_location);
if (!LocAttrInfo)
@@ -148,6 +146,8 @@ static bool shouldIncludeVariable(const DWARFUnit &Unit, const DIE &Die) {
bool static canProcess(const DWARFUnit &Unit, const DIE &Die,
std::string &NameToUse, const bool TagsOnly) {
+ if (Die.findAttribute(dwarf::Attribute::DW_AT_declaration))
+ return false;
switch (Die.getTag()) {
case dwarf::DW_TAG_base_type:
case dwarf::DW_TAG_class_type:
diff --git a/bolt/lib/Core/DynoStats.cpp b/bolt/lib/Core/DynoStats.cpp
index 5de0f9e0d6b8..1d9818777596 100644
--- a/bolt/lib/Core/DynoStats.cpp
+++ b/bolt/lib/Core/DynoStats.cpp
@@ -114,8 +114,9 @@ void DynoStats::print(raw_ostream &OS, const DynoStats *Other,
for (auto &Stat : llvm::reverse(SortedHistogram)) {
OS << format("%20s,%'18lld", Printer->getOpcodeName(Stat.second).data(),
Stat.first * opts::DynoStatsScale);
-
- MaxOpcodeHistogramTy MaxMultiMap = OpcodeHistogram.at(Stat.second).second;
+ auto It = OpcodeHistogram.find(Stat.second);
+ assert(It != OpcodeHistogram.end());
+ MaxOpcodeHistogramTy MaxMultiMap = It->second.second;
// Start with function name:BB offset with highest execution count.
for (auto &Max : llvm::reverse(MaxMultiMap)) {
OS << format(", %'18lld, ", Max.first * opts::DynoStatsScale)
diff --git a/bolt/lib/Passes/BinaryFunctionCallGraph.cpp b/bolt/lib/Passes/BinaryFunctionCallGraph.cpp
index 2373710c9edd..bbcc9751c0cb 100644
--- a/bolt/lib/Passes/BinaryFunctionCallGraph.cpp
+++ b/bolt/lib/Passes/BinaryFunctionCallGraph.cpp
@@ -56,7 +56,9 @@ std::deque<BinaryFunction *> BinaryFunctionCallGraph::buildTraversalOrder() {
std::stack<NodeId> Worklist;
for (BinaryFunction *Func : Funcs) {
- const NodeId Id = FuncToNodeId.at(Func);
+ auto It = FuncToNodeId.find(Func);
+ assert(It != FuncToNodeId.end());
+ const NodeId Id = It->second;
Worklist.push(Id);
NodeStatus[Id] = NEW;
}
diff --git a/bolt/lib/Passes/BinaryPasses.cpp b/bolt/lib/Passes/BinaryPasses.cpp
index 867f977cebca..2810f723719d 100644
--- a/bolt/lib/Passes/BinaryPasses.cpp
+++ b/bolt/lib/Passes/BinaryPasses.cpp
@@ -674,7 +674,8 @@ static uint64_t fixDoubleJumps(BinaryFunction &Function, bool MarkInvalid) {
MCPlusBuilder *MIB = Function.getBinaryContext().MIB.get();
for (BinaryBasicBlock &BB : Function) {
auto checkAndPatch = [&](BinaryBasicBlock *Pred, BinaryBasicBlock *Succ,
- const MCSymbol *SuccSym) {
+ const MCSymbol *SuccSym,
+ std::optional<uint32_t> Offset) {
// Ignore infinite loop jumps or fallthrough tail jumps.
if (Pred == Succ || Succ == &BB)
return false;
@@ -715,9 +716,11 @@ static uint64_t fixDoubleJumps(BinaryFunction &Function, bool MarkInvalid) {
Pred->removeSuccessor(&BB);
Pred->eraseInstruction(Pred->findInstruction(Branch));
Pred->addTailCallInstruction(SuccSym);
- MCInst *TailCall = Pred->getLastNonPseudoInstr();
- assert(TailCall);
- MIB->setOffset(*TailCall, BB.getOffset());
+ if (Offset) {
+ MCInst *TailCall = Pred->getLastNonPseudoInstr();
+ assert(TailCall);
+ MIB->setOffset(*TailCall, *Offset);
+ }
} else {
return false;
}
@@ -760,7 +763,8 @@ static uint64_t fixDoubleJumps(BinaryFunction &Function, bool MarkInvalid) {
if (Pred->getSuccessor() == &BB ||
(Pred->getConditionalSuccessor(true) == &BB && !IsTailCall) ||
Pred->getConditionalSuccessor(false) == &BB)
- if (checkAndPatch(Pred, Succ, SuccSym) && MarkInvalid)
+ if (checkAndPatch(Pred, Succ, SuccSym, MIB->getOffset(*Inst)) &&
+ MarkInvalid)
BB.markValid(BB.pred_size() != 0 || BB.isLandingPad() ||
BB.isEntryPoint());
}
@@ -1386,9 +1390,19 @@ Error PrintProgramStats::runOnFunctions(BinaryContext &BC) {
if (Function.isPLTFunction())
continue;
+ // Adjustment for BAT mode: the profile for BOLT split fragments is combined
+ // so only count the hot fragment.
+ const uint64_t Address = Function.getAddress();
+ bool IsHotParentOfBOLTSplitFunction = !Function.getFragments().empty() &&
+ BAT && BAT->isBATFunction(Address) &&
+ !BAT->fetchParentAddress(Address);
+
++NumRegularFunctions;
- if (!Function.isSimple()) {
+ // In BOLTed binaries split functions are non-simple (due to non-relocation
+ // mode), but the original function is known to be simple and we have a
+ // valid profile for it.
+ if (!Function.isSimple() && !IsHotParentOfBOLTSplitFunction) {
if (Function.hasProfile())
++NumNonSimpleProfiledFunctions;
continue;
@@ -1549,23 +1563,28 @@ Error PrintProgramStats::runOnFunctions(BinaryContext &BC) {
const bool Ascending =
opts::DynoStatsSortOrderOpt == opts::DynoStatsSortOrder::Ascending;
- if (SortAll) {
- llvm::stable_sort(Functions,
- [Ascending, &Stats](const BinaryFunction *A,
- const BinaryFunction *B) {
- return Ascending ? Stats.at(A) < Stats.at(B)
- : Stats.at(B) < Stats.at(A);
- });
- } else {
- llvm::stable_sort(
- Functions, [Ascending, &Stats](const BinaryFunction *A,
- const BinaryFunction *B) {
- const DynoStats &StatsA = Stats.at(A);
- const DynoStats &StatsB = Stats.at(B);
- return Ascending ? StatsA.lessThan(StatsB, opts::PrintSortedBy)
- : StatsB.lessThan(StatsA, opts::PrintSortedBy);
- });
- }
+ std::function<bool(const DynoStats &, const DynoStats &)>
+ DynoStatsComparator =
+ SortAll ? [](const DynoStats &StatsA,
+ const DynoStats &StatsB) { return StatsA < StatsB; }
+ : [](const DynoStats &StatsA, const DynoStats &StatsB) {
+ return StatsA.lessThan(StatsB, opts::PrintSortedBy);
+ };
+
+ llvm::stable_sort(Functions,
+ [Ascending, &Stats, DynoStatsComparator](
+ const BinaryFunction *A, const BinaryFunction *B) {
+ auto StatsItr = Stats.find(A);
+ assert(StatsItr != Stats.end());
+ const DynoStats &StatsA = StatsItr->second;
+
+ StatsItr = Stats.find(B);
+ assert(StatsItr != Stats.end());
+ const DynoStats &StatsB = StatsItr->second;
+
+ return Ascending ? DynoStatsComparator(StatsA, StatsB)
+ : DynoStatsComparator(StatsB, StatsA);
+ });
BC.outs() << "BOLT-INFO: top functions sorted by ";
if (SortAll) {
diff --git a/bolt/lib/Passes/CacheMetrics.cpp b/bolt/lib/Passes/CacheMetrics.cpp
index b02d4303110b..21b420a5c2b0 100644
--- a/bolt/lib/Passes/CacheMetrics.cpp
+++ b/bolt/lib/Passes/CacheMetrics.cpp
@@ -67,7 +67,20 @@ calcTSPScore(const std::vector<BinaryFunction *> &BinaryFunctions,
for (BinaryBasicBlock *DstBB : SrcBB->successors()) {
if (SrcBB != DstBB && BI->Count != BinaryBasicBlock::COUNT_NO_PROFILE) {
JumpCount += BI->Count;
- if (BBAddr.at(SrcBB) + BBSize.at(SrcBB) == BBAddr.at(DstBB))
+
+ auto BBAddrIt = BBAddr.find(SrcBB);
+ assert(BBAddrIt != BBAddr.end());
+ uint64_t SrcBBAddr = BBAddrIt->second;
+
+ auto BBSizeIt = BBSize.find(SrcBB);
+ assert(BBSizeIt != BBSize.end());
+ uint64_t SrcBBSize = BBSizeIt->second;
+
+ BBAddrIt = BBAddr.find(DstBB);
+ assert(BBAddrIt != BBAddr.end());
+ uint64_t DstBBAddr = BBAddrIt->second;
+
+ if (SrcBBAddr + SrcBBSize == DstBBAddr)
Score += BI->Count;
}
++BI;
@@ -149,20 +162,28 @@ double expectedCacheHitRatio(
for (BinaryFunction *BF : BinaryFunctions) {
if (BF->getLayout().block_empty())
continue;
- const uint64_t Page =
- BBAddr.at(BF->getLayout().block_front()) / ITLBPageSize;
- PageSamples[Page] += FunctionSamples.at(BF);
+ auto BBAddrIt = BBAddr.find(BF->getLayout().block_front());
+ assert(BBAddrIt != BBAddr.end());
+ const uint64_t Page = BBAddrIt->second / ITLBPageSize;
+
+ auto FunctionSamplesIt = FunctionSamples.find(BF);
+ assert(FunctionSamplesIt != FunctionSamples.end());
+ PageSamples[Page] += FunctionSamplesIt->second;
}
// Computing the expected number of misses for every function
double Misses = 0;
for (BinaryFunction *BF : BinaryFunctions) {
// Skip the function if it has no samples
- if (BF->getLayout().block_empty() || FunctionSamples.at(BF) == 0.0)
+ auto FunctionSamplesIt = FunctionSamples.find(BF);
+ assert(FunctionSamplesIt != FunctionSamples.end());
+ double Samples = FunctionSamplesIt->second;
+ if (BF->getLayout().block_empty() || Samples == 0.0)
continue;
- double Samples = FunctionSamples.at(BF);
- const uint64_t Page =
- BBAddr.at(BF->getLayout().block_front()) / ITLBPageSize;
+
+ auto BBAddrIt = BBAddr.find(BF->getLayout().block_front());
+ assert(BBAddrIt != BBAddr.end());
+ const uint64_t Page = BBAddrIt->second / ITLBPageSize;
// The probability that the page is not present in the cache
const double MissProb =
pow(1.0 - PageSamples[Page] / TotalSamples, ITLBEntries);
@@ -170,8 +191,10 @@ double expectedCacheHitRatio(
// Processing all callers of the function
for (std::pair<BinaryFunction *, uint64_t> Pair : Calls[BF]) {
BinaryFunction *SrcFunction = Pair.first;
- const uint64_t SrcPage =
- BBAddr.at(SrcFunction->getLayout().block_front()) / ITLBPageSize;
+
+ BBAddrIt = BBAddr.find(SrcFunction->getLayout().block_front());
+ assert(BBAddrIt != BBAddr.end());
+ const uint64_t SrcPage = BBAddrIt->second / ITLBPageSize;
// Is this a 'long' or a 'short' call?
if (Page != SrcPage) {
// This is a miss
diff --git a/bolt/lib/Passes/Inliner.cpp b/bolt/lib/Passes/Inliner.cpp
index 84e7d97067b0..f004a8eeea18 100644
--- a/bolt/lib/Passes/Inliner.cpp
+++ b/bolt/lib/Passes/Inliner.cpp
@@ -355,7 +355,9 @@ Inliner::inlineCall(BinaryBasicBlock &CallerBB,
std::vector<BinaryBasicBlock *> Successors(BB.succ_size());
llvm::transform(BB.successors(), Successors.begin(),
[&InlinedBBMap](const BinaryBasicBlock *BB) {
- return InlinedBBMap.at(BB);
+ auto It = InlinedBBMap.find(BB);
+ assert(It != InlinedBBMap.end());
+ return It->second;
});
if (CallerFunction.hasValidProfile() && Callee.hasValidProfile())
diff --git a/bolt/lib/Passes/MCF.cpp b/bolt/lib/Passes/MCF.cpp
index c3898d2dce98..77dea7369140 100644
--- a/bolt/lib/Passes/MCF.cpp
+++ b/bolt/lib/Passes/MCF.cpp
@@ -12,9 +12,11 @@
#include "bolt/Passes/MCF.h"
#include "bolt/Core/BinaryFunction.h"
+#include "bolt/Core/ParallelUtilities.h"
#include "bolt/Passes/DataflowInfoManager.h"
#include "bolt/Utils/CommandLineOpts.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CommandLine.h"
#include <algorithm>
#include <vector>
@@ -29,19 +31,10 @@ namespace opts {
extern cl::OptionCategory BoltOptCategory;
-extern cl::opt<bool> TimeOpts;
-
static cl::opt<bool> IterativeGuess(
"iterative-guess",
cl::desc("in non-LBR mode, guess edge counts using iterative technique"),
cl::Hidden, cl::cat(BoltOptCategory));
-
-static cl::opt<bool> UseRArcs(
- "mcf-use-rarcs",
- cl::desc("in MCF, consider the possibility of cancelling flow to balance "
- "edges"),
- cl::Hidden, cl::cat(BoltOptCategory));
-
} // namespace opts
namespace llvm {
@@ -441,7 +434,7 @@ void equalizeBBCounts(DataflowInfoManager &Info, BinaryFunction &BF) {
}
}
-void estimateEdgeCounts(BinaryFunction &BF) {
+void EstimateEdgeCounts::runOnFunction(BinaryFunction &BF) {
EdgeWeightMap PredEdgeWeights;
EdgeWeightMap SuccEdgeWeights;
if (!opts::IterativeGuess) {
@@ -462,8 +455,24 @@ void estimateEdgeCounts(BinaryFunction &BF) {
recalculateBBCounts(BF, /*AllEdges=*/false);
}
-void solveMCF(BinaryFunction &BF, MCFCostFunction CostFunction) {
- llvm_unreachable("not implemented");
+Error EstimateEdgeCounts::runOnFunctions(BinaryContext &BC) {
+ if (llvm::none_of(llvm::make_second_range(BC.getBinaryFunctions()),
+ [](const BinaryFunction &BF) {
+ return BF.getProfileFlags() == BinaryFunction::PF_SAMPLE;
+ }))
+ return Error::success();
+
+ ParallelUtilities::WorkFuncTy WorkFun = [&](BinaryFunction &BF) {
+ runOnFunction(BF);
+ };
+ ParallelUtilities::PredicateTy SkipFunc = [&](const BinaryFunction &BF) {
+ return BF.getProfileFlags() != BinaryFunction::PF_SAMPLE;
+ };
+
+ ParallelUtilities::runOnEachFunction(
+ BC, ParallelUtilities::SchedulingPolicy::SP_BB_QUADRATIC, WorkFun,
+ SkipFunc, "EstimateEdgeCounts");
+ return Error::success();
}
} // namespace bolt
diff --git a/bolt/lib/Profile/BoltAddressTranslation.cpp b/bolt/lib/Profile/BoltAddressTranslation.cpp
index 7cfb9c132c2c..cdfca2b9871a 100644
--- a/bolt/lib/Profile/BoltAddressTranslation.cpp
+++ b/bolt/lib/Profile/BoltAddressTranslation.cpp
@@ -20,10 +20,9 @@ namespace bolt {
const char *BoltAddressTranslation::SECTION_NAME = ".note.bolt_bat";
-void BoltAddressTranslation::writeEntriesForBB(MapTy &Map,
- const BinaryBasicBlock &BB,
- uint64_t FuncInputAddress,
- uint64_t FuncOutputAddress) {
+void BoltAddressTranslation::writeEntriesForBB(
+ MapTy &Map, const BinaryBasicBlock &BB, uint64_t FuncInputAddress,
+ uint64_t FuncOutputAddress) const {
const uint64_t BBOutputOffset =
BB.getOutputAddressRange().first - FuncOutputAddress;
const uint32_t BBInputOffset = BB.getInputOffset();
@@ -55,7 +54,7 @@ void BoltAddressTranslation::writeEntriesForBB(MapTy &Map,
// and this deleted block will both share the same output address (the same
// key), and we need to map back. We choose here to privilege the successor by
// allowing it to overwrite the previously inserted key in the map.
- Map[BBOutputOffset] = BBInputOffset << 1;
+ Map.emplace(BBOutputOffset, BBInputOffset << 1);
const auto &IOAddressMap =
BB.getFunction()->getBinaryContext().getIOAddressMap();
@@ -72,8 +71,7 @@ void BoltAddressTranslation::writeEntriesForBB(MapTy &Map,
LLVM_DEBUG(dbgs() << " Key: " << Twine::utohexstr(OutputOffset) << " Val: "
<< Twine::utohexstr(InputOffset) << " (branch)\n");
- Map.insert(std::pair<uint32_t, uint32_t>(OutputOffset,
- (InputOffset << 1) | BRANCHENTRY));
+ Map.emplace(OutputOffset, (InputOffset << 1) | BRANCHENTRY);
}
}
@@ -108,6 +106,19 @@ void BoltAddressTranslation::write(const BinaryContext &BC, raw_ostream &OS) {
for (const BinaryBasicBlock *const BB :
Function.getLayout().getMainFragment())
writeEntriesForBB(Map, *BB, InputAddress, OutputAddress);
+ // Add entries for deleted blocks. They are still required for correct BB
+ // mapping of branches modified by SCTC. By convention, they would have the
+ // end of the function as output address.
+ const BBHashMapTy &BBHashMap = getBBHashMap(InputAddress);
+ if (BBHashMap.size() != Function.size()) {
+ const uint64_t EndOffset = Function.getOutputSize();
+ std::unordered_set<uint32_t> MappedInputOffsets;
+ for (const BinaryBasicBlock &BB : Function)
+ MappedInputOffsets.emplace(BB.getInputOffset());
+ for (const auto &[InputOffset, _] : BBHashMap)
+ if (!llvm::is_contained(MappedInputOffsets, InputOffset))
+ Map.emplace(EndOffset, InputOffset << 1);
+ }
Maps.emplace(Function.getOutputAddress(), std::move(Map));
ReverseMap.emplace(OutputAddress, InputAddress);
@@ -138,8 +149,8 @@ void BoltAddressTranslation::write(const BinaryContext &BC, raw_ostream &OS) {
<< " basic block hashes\n";
}
-APInt BoltAddressTranslation::calculateBranchEntriesBitMask(MapTy &Map,
- size_t EqualElems) {
+APInt BoltAddressTranslation::calculateBranchEntriesBitMask(
+ MapTy &Map, size_t EqualElems) const {
APInt BitMask(alignTo(EqualElems, 8), 0);
size_t Index = 0;
for (std::pair<const uint32_t, uint32_t> &KeyVal : Map) {
@@ -422,7 +433,7 @@ void BoltAddressTranslation::parseMaps(std::vector<uint64_t> &HotFuncs,
}
}
-void BoltAddressTranslation::dump(raw_ostream &OS) {
+void BoltAddressTranslation::dump(raw_ostream &OS) const {
const size_t NumTables = Maps.size();
OS << "BAT tables for " << NumTables << " functions:\n";
for (const auto &MapEntry : Maps) {
@@ -447,11 +458,15 @@ void BoltAddressTranslation::dump(raw_ostream &OS) {
OS << formatv(" hash: {0:x}", BBHashMap.getBBHash(Val));
OS << "\n";
}
- if (IsHotFunction)
- OS << "NumBlocks: " << NumBasicBlocksMap[Address] << '\n';
- if (SecondaryEntryPointsMap.count(Address)) {
+ if (IsHotFunction) {
+ auto NumBasicBlocksIt = NumBasicBlocksMap.find(Address);
+ assert(NumBasicBlocksIt != NumBasicBlocksMap.end());
+ OS << "NumBlocks: " << NumBasicBlocksIt->second << '\n';
+ }
+ auto SecondaryEntryPointsIt = SecondaryEntryPointsMap.find(Address);
+ if (SecondaryEntryPointsIt != SecondaryEntryPointsMap.end()) {
const std::vector<uint32_t> &SecondaryEntryPoints =
- SecondaryEntryPointsMap[Address];
+ SecondaryEntryPointsIt->second;
OS << SecondaryEntryPoints.size() << " secondary entry points:\n";
for (uint32_t EntryPointOffset : SecondaryEntryPoints)
OS << formatv("{0:x}\n", EntryPointOffset);
@@ -547,13 +562,6 @@ BoltAddressTranslation::getFallthroughsInTrace(uint64_t FuncAddress,
return Res;
}
-uint64_t BoltAddressTranslation::fetchParentAddress(uint64_t Address) const {
- auto Iter = ColdPartSource.find(Address);
- if (Iter == ColdPartSource.end())
- return 0;
- return Iter->second;
-}
-
bool BoltAddressTranslation::enabledFor(
llvm::object::ELFObjectFileBase *InputFile) const {
for (const SectionRef &Section : InputFile->sections()) {
diff --git a/bolt/lib/Profile/CMakeLists.txt b/bolt/lib/Profile/CMakeLists.txt
index 045ac47edb95..ca8b9c34e63b 100644
--- a/bolt/lib/Profile/CMakeLists.txt
+++ b/bolt/lib/Profile/CMakeLists.txt
@@ -17,6 +17,5 @@ add_llvm_library(LLVMBOLTProfile
target_link_libraries(LLVMBOLTProfile
PRIVATE
LLVMBOLTCore
- LLVMBOLTPasses
LLVMBOLTUtils
)
diff --git a/bolt/lib/Profile/DataAggregator.cpp b/bolt/lib/Profile/DataAggregator.cpp
index e06debcee741..ce6ec0a04ac1 100644
--- a/bolt/lib/Profile/DataAggregator.cpp
+++ b/bolt/lib/Profile/DataAggregator.cpp
@@ -613,7 +613,6 @@ Error DataAggregator::readProfile(BinaryContext &BC) {
if (std::error_code EC = writeBATYAML(BC, opts::SaveProfile))
report_error("cannot create output data file", EC);
}
- BC.logBOLTErrorsAndQuitOnFatal(PrintProgramStats().runOnFunctions(BC));
}
return Error::success();
@@ -673,7 +672,8 @@ DataAggregator::getBATParentFunction(const BinaryFunction &Func) const {
return nullptr;
}
-StringRef DataAggregator::getLocationName(const BinaryFunction &Func) const {
+StringRef DataAggregator::getLocationName(const BinaryFunction &Func,
+ bool BAT) {
if (!BAT)
return Func.getOneName();
@@ -702,7 +702,7 @@ bool DataAggregator::doSample(BinaryFunction &OrigFunc, uint64_t Address,
auto I = NamesToSamples.find(Func.getOneName());
if (I == NamesToSamples.end()) {
bool Success;
- StringRef LocName = getLocationName(Func);
+ StringRef LocName = getLocationName(Func, BAT);
std::tie(I, Success) = NamesToSamples.insert(
std::make_pair(Func.getOneName(),
FuncSampleData(LocName, FuncSampleData::ContainerTy())));
@@ -722,7 +722,7 @@ bool DataAggregator::doIntraBranch(BinaryFunction &Func, uint64_t From,
FuncBranchData *AggrData = getBranchData(Func);
if (!AggrData) {
AggrData = &NamesToBranches[Func.getOneName()];
- AggrData->Name = getLocationName(Func);
+ AggrData->Name = getLocationName(Func, BAT);
setBranchData(Func, AggrData);
}
@@ -741,7 +741,7 @@ bool DataAggregator::doInterBranch(BinaryFunction *FromFunc,
StringRef SrcFunc;
StringRef DstFunc;
if (FromFunc) {
- SrcFunc = getLocationName(*FromFunc);
+ SrcFunc = getLocationName(*FromFunc, BAT);
FromAggrData = getBranchData(*FromFunc);
if (!FromAggrData) {
FromAggrData = &NamesToBranches[FromFunc->getOneName()];
@@ -752,7 +752,7 @@ bool DataAggregator::doInterBranch(BinaryFunction *FromFunc,
recordExit(*FromFunc, From, Mispreds, Count);
}
if (ToFunc) {
- DstFunc = getLocationName(*ToFunc);
+ DstFunc = getLocationName(*ToFunc, BAT);
ToAggrData = getBranchData(*ToFunc);
if (!ToAggrData) {
ToAggrData = &NamesToBranches[ToFunc->getOneName()];
@@ -1227,7 +1227,7 @@ ErrorOr<Location> DataAggregator::parseLocationOrOffset() {
if (Sep == StringRef::npos)
return parseOffset();
StringRef LookAhead = ParsingBuf.substr(0, Sep);
- if (LookAhead.find_first_of(":") == StringRef::npos)
+ if (!LookAhead.contains(':'))
return parseOffset();
ErrorOr<StringRef> BuildID = parseString(':');
@@ -2340,7 +2340,7 @@ std::error_code DataAggregator::writeBATYAML(BinaryContext &BC,
continue;
BinaryFunction *BF = BC.getBinaryFunctionAtAddress(FuncAddress);
assert(BF);
- YamlBF.Name = getLocationName(*BF);
+ YamlBF.Name = getLocationName(*BF, BAT);
YamlBF.Id = BF->getFunctionNumber();
YamlBF.Hash = BAT->getBFHash(FuncAddress);
YamlBF.ExecCount = BF->getKnownExecutionCount();
@@ -2349,11 +2349,11 @@ std::error_code DataAggregator::writeBATYAML(BinaryContext &BC,
BAT->getBBHashMap(FuncAddress);
YamlBF.Blocks.resize(YamlBF.NumBasicBlocks);
- for (auto &&[Idx, YamlBB] : llvm::enumerate(YamlBF.Blocks))
- YamlBB.Index = Idx;
-
- for (auto BI = BlockMap.begin(), BE = BlockMap.end(); BI != BE; ++BI)
- YamlBF.Blocks[BI->second.getBBIndex()].Hash = BI->second.getBBHash();
+ for (auto &&[Entry, YamlBB] : llvm::zip(BlockMap, YamlBF.Blocks)) {
+ const auto &Block = Entry.second;
+ YamlBB.Hash = Block.Hash;
+ YamlBB.Index = Block.Index;
+ }
// Lookup containing basic block offset and index
auto getBlock = [&BlockMap](uint32_t Offset) {
@@ -2363,7 +2363,7 @@ std::error_code DataAggregator::writeBATYAML(BinaryContext &BC,
exit(1);
}
--BlockIt;
- return std::pair(BlockIt->first, BlockIt->second.getBBIndex());
+ return std::pair(BlockIt->first, BlockIt->second.Index);
};
for (const BranchInfo &BI : Branches.Data) {
diff --git a/bolt/lib/Profile/DataReader.cpp b/bolt/lib/Profile/DataReader.cpp
index 06c5e96b7806..f2e999bbfdc6 100644
--- a/bolt/lib/Profile/DataReader.cpp
+++ b/bolt/lib/Profile/DataReader.cpp
@@ -598,8 +598,6 @@ void DataReader::readSampleData(BinaryFunction &BF) {
}
BF.ExecutionCount = TotalEntryCount;
-
- estimateEdgeCounts(BF);
}
void DataReader::convertBranchData(BinaryFunction &BF) const {
diff --git a/bolt/lib/Profile/StaleProfileMatching.cpp b/bolt/lib/Profile/StaleProfileMatching.cpp
index 016962ff34d8..365bc5389266 100644
--- a/bolt/lib/Profile/StaleProfileMatching.cpp
+++ b/bolt/lib/Profile/StaleProfileMatching.cpp
@@ -30,6 +30,7 @@
#include "llvm/ADT/Bitfields.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Timer.h"
#include "llvm/Support/xxhash.h"
#include "llvm/Transforms/Utils/SampleProfileInference.h"
@@ -42,6 +43,7 @@ using namespace llvm;
namespace opts {
+extern cl::opt<bool> TimeRewrite;
extern cl::OptionCategory BoltOptCategory;
cl::opt<bool>
@@ -372,8 +374,10 @@ createFlowFunction(const BinaryFunction::BasicBlockOrderType &BlockOrder) {
// Create necessary metadata for the flow function
for (FlowJump &Jump : Func.Jumps) {
- Func.Blocks.at(Jump.Source).SuccJumps.push_back(&Jump);
- Func.Blocks.at(Jump.Target).PredJumps.push_back(&Jump);
+ assert(Jump.Source < Func.Blocks.size());
+ Func.Blocks[Jump.Source].SuccJumps.push_back(&Jump);
+ assert(Jump.Target < Func.Blocks.size());
+ Func.Blocks[Jump.Target].PredJumps.push_back(&Jump);
}
return Func;
}
@@ -705,6 +709,10 @@ void assignProfile(BinaryFunction &BF,
bool YAMLProfileReader::inferStaleProfile(
BinaryFunction &BF, const yaml::bolt::BinaryFunctionProfile &YamlBF) {
+
+ NamedRegionTimer T("inferStaleProfile", "stale profile inference", "rewrite",
+ "Rewrite passes", opts::TimeRewrite);
+
if (!BF.hasCFG())
return false;
diff --git a/bolt/lib/Profile/YAMLProfileReader.cpp b/bolt/lib/Profile/YAMLProfileReader.cpp
index 978a7cadfe79..f25f59201f1c 100644
--- a/bolt/lib/Profile/YAMLProfileReader.cpp
+++ b/bolt/lib/Profile/YAMLProfileReader.cpp
@@ -99,11 +99,17 @@ bool YAMLProfileReader::parseFunctionProfile(
FuncRawBranchCount += YamlSI.Count;
BF.setRawBranchCount(FuncRawBranchCount);
- if (!opts::IgnoreHash &&
- YamlBF.Hash != BF.computeHash(IsDFSOrder, HashFunction)) {
- if (opts::Verbosity >= 1)
- errs() << "BOLT-WARNING: function hash mismatch\n";
- ProfileMatched = false;
+ if (BF.empty())
+ return true;
+
+ if (!opts::IgnoreHash) {
+ if (!BF.getHash())
+ BF.computeHash(IsDFSOrder, HashFunction);
+ if (YamlBF.Hash != BF.getHash()) {
+ if (opts::Verbosity >= 1)
+ errs() << "BOLT-WARNING: function hash mismatch\n";
+ ProfileMatched = false;
+ }
}
if (YamlBF.NumBasicBlocks != BF.size()) {
@@ -250,10 +256,8 @@ bool YAMLProfileReader::parseFunctionProfile(
if (BB.getExecutionCount() == BinaryBasicBlock::COUNT_NO_PROFILE)
BB.setExecutionCount(0);
- if (YamlBP.Header.Flags & BinaryFunction::PF_SAMPLE) {
+ if (YamlBP.Header.Flags & BinaryFunction::PF_SAMPLE)
BF.setExecutionCount(FunctionExecutionCount);
- estimateEdgeCounts(BF);
- }
ProfileMatched &= !MismatchedBlocks && !MismatchedCalls && !MismatchedEdges;
diff --git a/bolt/lib/Profile/YAMLProfileWriter.cpp b/bolt/lib/Profile/YAMLProfileWriter.cpp
index ef04ba0d21ad..cf6b61ddd603 100644
--- a/bolt/lib/Profile/YAMLProfileWriter.cpp
+++ b/bolt/lib/Profile/YAMLProfileWriter.cpp
@@ -10,6 +10,7 @@
#include "bolt/Core/BinaryBasicBlock.h"
#include "bolt/Core/BinaryFunction.h"
#include "bolt/Profile/BoltAddressTranslation.h"
+#include "bolt/Profile/DataAggregator.h"
#include "bolt/Profile/ProfileReaderBase.h"
#include "bolt/Rewrite/RewriteInstance.h"
#include "llvm/Support/CommandLine.h"
@@ -39,6 +40,10 @@ const BinaryFunction *YAMLProfileWriter::setCSIDestination(
BC.getFunctionForSymbol(Symbol, &EntryID)) {
if (BAT && BAT->isBATFunction(Callee->getAddress()))
std::tie(Callee, EntryID) = BAT->translateSymbol(BC, *Symbol, Offset);
+ else if (const BinaryBasicBlock *BB =
+ Callee->getBasicBlockContainingOffset(Offset))
+ BC.getFunctionForSymbol(Callee->getSecondaryEntryPointSymbol(*BB),
+ &EntryID);
CSI.DestId = Callee->getFunctionNumber();
CSI.EntryDiscriminator = EntryID;
return Callee;
@@ -59,7 +64,7 @@ YAMLProfileWriter::convert(const BinaryFunction &BF, bool UseDFS,
BF.computeHash(UseDFS);
BF.computeBlockHashes();
- YamlBF.Name = BF.getPrintName();
+ YamlBF.Name = DataAggregator::getLocationName(BF, BAT);
YamlBF.Id = BF.getFunctionNumber();
YamlBF.Hash = BF.getHash();
YamlBF.NumBasicBlocks = BF.size();
diff --git a/bolt/lib/Rewrite/BinaryPassManager.cpp b/bolt/lib/Rewrite/BinaryPassManager.cpp
index cbb7199a53dd..aaa0e1ff4d46 100644
--- a/bolt/lib/Rewrite/BinaryPassManager.cpp
+++ b/bolt/lib/Rewrite/BinaryPassManager.cpp
@@ -23,6 +23,7 @@
#include "bolt/Passes/JTFootprintReduction.h"
#include "bolt/Passes/LongJmp.h"
#include "bolt/Passes/LoopInversionPass.h"
+#include "bolt/Passes/MCF.h"
#include "bolt/Passes/PLTCall.h"
#include "bolt/Passes/PatchEntries.h"
#include "bolt/Passes/RegReAssign.h"
@@ -90,6 +91,11 @@ PrintAfterLowering("print-after-lowering",
cl::desc("print function after instruction lowering"),
cl::Hidden, cl::cat(BoltOptCategory));
+static cl::opt<bool> PrintEstimateEdgeCounts(
+ "print-estimate-edge-counts",
+ cl::desc("print function after edge counts are set for no-LBR profile"),
+ cl::Hidden, cl::cat(BoltOptCategory));
+
cl::opt<bool>
PrintFinalized("print-finalized",
cl::desc("print function after CFG is finalized"),
@@ -334,8 +340,10 @@ Error BinaryFunctionPassManager::runPasses() {
Error BinaryFunctionPassManager::runAllPasses(BinaryContext &BC) {
BinaryFunctionPassManager Manager(BC);
- const DynoStats InitialDynoStats =
- getDynoStats(BC.getBinaryFunctions(), BC.isAArch64());
+ Manager.registerPass(
+ std::make_unique<EstimateEdgeCounts>(PrintEstimateEdgeCounts));
+
+ Manager.registerPass(std::make_unique<DynoStatsSetPass>());
Manager.registerPass(std::make_unique<AsmDumpPass>(),
opts::AsmDump.getNumOccurrences());
@@ -447,10 +455,9 @@ Error BinaryFunctionPassManager::runAllPasses(BinaryContext &BC) {
Manager.registerPass(std::make_unique<SplitFunctions>(PrintSplit));
// Print final dyno stats right while CFG and instruction analysis are intact.
- Manager.registerPass(
- std::make_unique<DynoStatsPrintPass>(
- InitialDynoStats, "after all optimizations before SCTC and FOP"),
- opts::PrintDynoStats || opts::DynoStatsAll);
+ Manager.registerPass(std::make_unique<DynoStatsPrintPass>(
+ "after all optimizations before SCTC and FOP"),
+ opts::PrintDynoStats || opts::DynoStatsAll);
// Add the StokeInfo pass, which extract functions for stoke optimization and
// get the liveness information for them
diff --git a/bolt/lib/Rewrite/DWARFRewriter.cpp b/bolt/lib/Rewrite/DWARFRewriter.cpp
index d582ce7b33a2..ab46503621e9 100644
--- a/bolt/lib/Rewrite/DWARFRewriter.cpp
+++ b/bolt/lib/Rewrite/DWARFRewriter.cpp
@@ -73,8 +73,7 @@ static void printDie(DWARFUnit &DU, uint64_t DIEOffset) {
DWARFDataExtractor DebugInfoData = DU.getDebugInfoExtractor();
DWARFDebugInfoEntry DIEEntry;
if (DIEEntry.extractFast(DU, &DIEOffset, DebugInfoData, NextCUOffset, 0)) {
- if (const DWARFAbbreviationDeclaration *AbbrDecl =
- DIEEntry.getAbbreviationDeclarationPtr()) {
+ if (DIEEntry.getAbbreviationDeclarationPtr()) {
DWARFDie DDie(&DU, &DIEEntry);
printDie(DDie);
} else {
diff --git a/bolt/lib/Rewrite/LinuxKernelRewriter.cpp b/bolt/lib/Rewrite/LinuxKernelRewriter.cpp
index 99775ccfe38d..b2c8b2446f7e 100644
--- a/bolt/lib/Rewrite/LinuxKernelRewriter.cpp
+++ b/bolt/lib/Rewrite/LinuxKernelRewriter.cpp
@@ -393,7 +393,7 @@ void LinuxKernelRewriter::processLKKSymtab(bool IsGPL) {
for (uint64_t I = 0; I < SectionSize; I += 4) {
const uint64_t EntryAddress = SectionAddress + I;
- ErrorOr<uint64_t> Offset = BC.getSignedValueAtAddress(EntryAddress, 4);
+ ErrorOr<int64_t> Offset = BC.getSignedValueAtAddress(EntryAddress, 4);
assert(Offset && "Reading valid PC-relative offset for a ksymtab entry");
const int32_t SignedOffset = *Offset;
const uint64_t RefAddress = EntryAddress + SignedOffset;
diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp
index 85b39176754b..4b4913dd7a16 100644
--- a/bolt/lib/Rewrite/RewriteInstance.cpp
+++ b/bolt/lib/Rewrite/RewriteInstance.cpp
@@ -17,6 +17,7 @@
#include "bolt/Core/MCPlusBuilder.h"
#include "bolt/Core/ParallelUtilities.h"
#include "bolt/Core/Relocation.h"
+#include "bolt/Passes/BinaryPasses.h"
#include "bolt/Passes/CacheMetrics.h"
#include "bolt/Passes/ReorderFunctions.h"
#include "bolt/Profile/BoltAddressTranslation.h"
@@ -86,6 +87,7 @@ extern cl::list<std::string> ReorderData;
extern cl::opt<bolt::ReorderFunctions::ReorderType> ReorderFunctions;
extern cl::opt<bool> TerminalTrap;
extern cl::opt<bool> TimeBuild;
+extern cl::opt<bool> TimeRewrite;
cl::opt<bool> AllowStripped("allow-stripped",
cl::desc("allow processing of stripped binaries"),
@@ -236,11 +238,6 @@ UseGnuStack("use-gnu-stack",
cl::cat(BoltCategory));
static cl::opt<bool>
- TimeRewrite("time-rewrite",
- cl::desc("print time spent in rewriting passes"), cl::Hidden,
- cl::cat(BoltCategory));
-
-static cl::opt<bool>
SequentialDisassembly("sequential-disassembly",
cl::desc("performs disassembly sequentially"),
cl::init(false),
@@ -1500,7 +1497,7 @@ void RewriteInstance::registerFragments() {
if (!BC->hasSymbolsWithFileName()) {
BC->errs() << "BOLT-ERROR: input file has split functions but does not "
"have FILE symbols. If the binary was stripped, preserve "
- "FILE symbols with --keep-file-symbols strip option";
+ "FILE symbols with --keep-file-symbols strip option\n";
exit(1);
}
@@ -1988,6 +1985,7 @@ Error RewriteInstance::readSpecialSections() {
if (ErrorOr<BinarySection &> BATSec =
BC->getUniqueSectionByName(BoltAddressTranslation::SECTION_NAME)) {
+ BC->HasBATSection = true;
// Do not read BAT when plotting a heatmap
if (!opts::HeatmapMode) {
if (std::error_code EC = BAT->parse(BC->outs(), BATSec->getContents())) {
@@ -3208,12 +3206,14 @@ void RewriteInstance::preprocessProfileData() {
if (Error E = ProfileReader->preprocessProfile(*BC.get()))
report_error("cannot pre-process profile", std::move(E));
- if (!BC->hasSymbolsWithFileName() && ProfileReader->hasLocalsWithFileName()) {
+ if (!BC->hasSymbolsWithFileName() && ProfileReader->hasLocalsWithFileName() &&
+ !opts::AllowStripped) {
BC->errs()
<< "BOLT-ERROR: input binary does not have local file symbols "
"but profile data includes function names with embedded file "
"names. It appears that the input binary was stripped while a "
- "profiled binary was not\n";
+ "profiled binary was not. If you know what you are doing and "
+ "wish to proceed, use -allow-stripped option.\n";
exit(1);
}
}
@@ -3284,8 +3284,11 @@ void RewriteInstance::processProfileData() {
// Release memory used by profile reader.
ProfileReader.reset();
- if (opts::AggregateOnly)
+ if (opts::AggregateOnly) {
+ PrintProgramStats PPS(&*BAT);
+ BC->logBOLTErrorsAndQuitOnFatal(PPS.runOnFunctions(*BC));
exit(0);
+ }
}
void RewriteInstance::disassembleFunctions() {
@@ -4808,6 +4811,40 @@ void RewriteInstance::updateELFSymbolTable(
// Create a new symbol based on the existing symbol.
ELFSymTy NewSymbol = Symbol;
+ // Handle special symbols based on their name.
+ Expected<StringRef> SymbolName = Symbol.getName(StringSection);
+ assert(SymbolName && "cannot get symbol name");
+
+ auto updateSymbolValue = [&](const StringRef Name,
+ std::optional<uint64_t> Value = std::nullopt) {
+ NewSymbol.st_value = Value ? *Value : getNewValueForSymbol(Name);
+ NewSymbol.st_shndx = ELF::SHN_ABS;
+ BC->outs() << "BOLT-INFO: setting " << Name << " to 0x"
+ << Twine::utohexstr(NewSymbol.st_value) << '\n';
+ };
+
+ if (*SymbolName == "__hot_start" || *SymbolName == "__hot_end") {
+ if (opts::HotText) {
+ updateSymbolValue(*SymbolName);
+ ++NumHotTextSymsUpdated;
+ }
+ goto registerSymbol;
+ }
+
+ if (*SymbolName == "__hot_data_start" || *SymbolName == "__hot_data_end") {
+ if (opts::HotData) {
+ updateSymbolValue(*SymbolName);
+ ++NumHotDataSymsUpdated;
+ }
+ goto registerSymbol;
+ }
+
+ if (*SymbolName == "_end") {
+ if (NextAvailableAddress > Symbol.st_value)
+ updateSymbolValue(*SymbolName, NextAvailableAddress);
+ goto registerSymbol;
+ }
+
if (Function) {
// If the symbol matched a function that was not emitted, update the
// corresponding section index but otherwise leave it unchanged.
@@ -4904,33 +4941,7 @@ void RewriteInstance::updateELFSymbolTable(
}
}
- // Handle special symbols based on their name.
- Expected<StringRef> SymbolName = Symbol.getName(StringSection);
- assert(SymbolName && "cannot get symbol name");
-
- auto updateSymbolValue = [&](const StringRef Name,
- std::optional<uint64_t> Value = std::nullopt) {
- NewSymbol.st_value = Value ? *Value : getNewValueForSymbol(Name);
- NewSymbol.st_shndx = ELF::SHN_ABS;
- BC->outs() << "BOLT-INFO: setting " << Name << " to 0x"
- << Twine::utohexstr(NewSymbol.st_value) << '\n';
- };
-
- if (opts::HotText &&
- (*SymbolName == "__hot_start" || *SymbolName == "__hot_end")) {
- updateSymbolValue(*SymbolName);
- ++NumHotTextSymsUpdated;
- }
-
- if (opts::HotData && (*SymbolName == "__hot_data_start" ||
- *SymbolName == "__hot_data_end")) {
- updateSymbolValue(*SymbolName);
- ++NumHotDataSymsUpdated;
- }
-
- if (*SymbolName == "_end" && NextAvailableAddress > Symbol.st_value)
- updateSymbolValue(*SymbolName, NextAvailableAddress);
-
+ registerSymbol:
if (IsDynSym)
Write((&Symbol - cantFail(Obj.symbols(&SymTabSection)).begin()) *
sizeof(ELFSymTy),
diff --git a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp
index 8fdacffcb147..a33a9dc8c013 100644
--- a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp
+++ b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp
@@ -1932,6 +1932,19 @@ public:
// = R_X86_64_PC32(Ln) + En - JT
// = R_X86_64_PC32(Ln + offsetof(En))
//
+ auto isRIPRel = [&](X86MemOperand &MO) {
+ // NB: DispExpr should be set
+ return MO.DispExpr != nullptr &&
+ MO.BaseRegNum == RegInfo->getProgramCounter() &&
+ MO.IndexRegNum == X86::NoRegister &&
+ MO.SegRegNum == X86::NoRegister;
+ };
+ auto isIndexed = [](X86MemOperand &MO, MCPhysReg R) {
+ // NB: IndexRegNum should be set.
+ return MO.IndexRegNum != X86::NoRegister && MO.BaseRegNum == R &&
+ MO.ScaleImm == 4 && MO.DispImm == 0 &&
+ MO.SegRegNum == X86::NoRegister;
+ };
LLVM_DEBUG(dbgs() << "Checking for PIC jump table\n");
MCInst *MemLocInstr = nullptr;
const MCInst *MovInstr = nullptr;
@@ -1965,9 +1978,8 @@ public:
std::optional<X86MemOperand> MO = evaluateX86MemoryOperand(Instr);
if (!MO)
break;
- if (MO->BaseRegNum != R1 || MO->ScaleImm != 4 ||
- MO->IndexRegNum == X86::NoRegister || MO->DispImm != 0 ||
- MO->SegRegNum != X86::NoRegister)
+ if (!isIndexed(*MO, R1))
+ // POSSIBLE_PIC_JUMP_TABLE
break;
MovInstr = &Instr;
} else {
@@ -1986,9 +1998,7 @@ public:
std::optional<X86MemOperand> MO = evaluateX86MemoryOperand(Instr);
if (!MO)
break;
- if (MO->BaseRegNum != RegInfo->getProgramCounter() ||
- MO->IndexRegNum != X86::NoRegister ||
- MO->SegRegNum != X86::NoRegister || MO->DispExpr == nullptr)
+ if (!isRIPRel(*MO))
break;
MemLocInstr = &Instr;
break;
@@ -2105,13 +2115,15 @@ public:
return IndirectBranchType::POSSIBLE_FIXED_BRANCH;
}
- if (Type == IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE &&
- (MO->ScaleImm != 1 || MO->BaseRegNum != RIPRegister))
- return IndirectBranchType::UNKNOWN;
-
- if (Type != IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE &&
- MO->ScaleImm != PtrSize)
- return IndirectBranchType::UNKNOWN;
+ switch (Type) {
+ case IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE:
+ if (MO->ScaleImm != 1 || MO->BaseRegNum != RIPRegister)
+ return IndirectBranchType::UNKNOWN;
+ break;
+ default:
+ if (MO->ScaleImm != PtrSize)
+ return IndirectBranchType::UNKNOWN;
+ }
MemLocInstrOut = MemLocInstr;
diff --git a/bolt/lib/Utils/CommandLineOpts.cpp b/bolt/lib/Utils/CommandLineOpts.cpp
index ba296c10c00a..41c89bc8aeba 100644
--- a/bolt/lib/Utils/CommandLineOpts.cpp
+++ b/bolt/lib/Utils/CommandLineOpts.cpp
@@ -179,6 +179,10 @@ cl::opt<bool> TimeOpts("time-opts",
cl::desc("print time spent in each optimization"),
cl::cat(BoltOptCategory));
+cl::opt<bool> TimeRewrite("time-rewrite",
+ cl::desc("print time spent in rewriting passes"),
+ cl::Hidden, cl::cat(BoltCategory));
+
cl::opt<bool> UseOldText(
"use-old-text",
cl::desc("re-use space in old .text if possible (relocation mode)"),
diff --git a/bolt/runtime/instr.cpp b/bolt/runtime/instr.cpp
index 16e0bbd55f90..d1f8a216badc 100644
--- a/bolt/runtime/instr.cpp
+++ b/bolt/runtime/instr.cpp
@@ -1245,7 +1245,6 @@ void Graph::computeEdgeFrequencies(const uint64_t *Counters,
continue;
assert(SpanningTreeNodes[Cur].NumInEdges == 1, "must have 1 parent");
- const uint32_t Parent = SpanningTreeNodes[Cur].InEdges[0].Node;
const uint32_t ParentEdge = SpanningTreeNodes[Cur].InEdges[0].ID;
// Calculate parent edge freq.
@@ -1464,9 +1463,8 @@ void visitCallFlowEntry(CallFlowHashTable::MapEntry &Entry, int FD,
int openProfile() {
// Build the profile name string by appending our PID
char Buf[BufSize];
- char *Ptr = Buf;
uint64_t PID = __getpid();
- Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);
+ char *Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);
if (__bolt_instr_use_pid) {
Ptr = strCopy(Ptr, ".", BufSize - (Ptr - Buf + 1));
Ptr = intToStr(Ptr, PID, 10);
diff --git a/bolt/test/CMakeLists.txt b/bolt/test/CMakeLists.txt
index 89862fd59eb8..d468ff984840 100644
--- a/bolt/test/CMakeLists.txt
+++ b/bolt/test/CMakeLists.txt
@@ -56,7 +56,7 @@ list(APPEND BOLT_TEST_DEPS
)
add_custom_target(bolt-test-depends DEPENDS ${BOLT_TEST_DEPS})
-set_target_properties(bolt-test-depends PROPERTIES FOLDER "BOLT")
+set_target_properties(bolt-test-depends PROPERTIES FOLDER "BOLT/Tests")
add_lit_testsuite(check-bolt "Running the BOLT regression tests"
${CMAKE_CURRENT_BINARY_DIR}
@@ -64,7 +64,6 @@ add_lit_testsuite(check-bolt "Running the BOLT regression tests"
DEPENDS ${BOLT_TEST_DEPS}
ARGS ${BOLT_TEST_EXTRA_ARGS}
)
-set_target_properties(check-bolt PROPERTIES FOLDER "BOLT")
add_lit_testsuites(BOLT ${CMAKE_CURRENT_SOURCE_DIR}
PARAMS ${BOLT_TEST_PARAMS}
diff --git a/bolt/test/X86/bb-with-two-tail-calls.s b/bolt/test/X86/bb-with-two-tail-calls.s
index bb2b0cd4cc23..8bbecc498ed7 100644
--- a/bolt/test/X86/bb-with-two-tail-calls.s
+++ b/bolt/test/X86/bb-with-two-tail-calls.s
@@ -1,8 +1,6 @@
# This reproduces a bug with dynostats when trying to compute branch stats
# at a block with two tails calls (one conditional and one unconditional).
-# REQUIRES: system-linux
-
# RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown \
# RUN: %s -o %t.o
# RUN: link_fdata %s %t.o %t.fdata
@@ -10,10 +8,20 @@
# RUN: %clang %cflags %t.o -o %t.exe -Wl,-q -nostdlib
# RUN: llvm-bolt %t.exe -o %t.out --data %t.fdata --lite=0 --dyno-stats \
# RUN: --print-sctc --print-only=_start -enable-bat 2>&1 | FileCheck %s
+# RUN: llvm-objdump --syms %t.out > %t.log
+# RUN: llvm-bat-dump %t.out --dump-all >> %t.log
+# RUN: FileCheck %s --input-file %t.log --check-prefix=CHECK-BAT
+
# CHECK-NOT: Assertion `BranchInfo.size() == 2 && "could only be called for blocks with 2 successors"' failed.
# Two tail calls in the same basic block after SCTC:
# CHECK: {{.*}}: ja {{.*}} # TAILCALL # Offset: 7 # CTCTakenCount: 4
-# CHECK-NEXT: {{.*}}: jmp {{.*}} # TAILCALL # Offset: 12
+# CHECK-NEXT: {{.*}}: jmp {{.*}} # TAILCALL # Offset: 13
+
+# Confirm that a deleted basic block is emitted at function end offset (0xe)
+# CHECK-BAT: [[#%x,ADDR:]] g .text [[#%x,SIZE:]] _start
+# CHECK-BAT: Function Address: 0x[[#%x,ADDR]]
+# CHECK-BAT: 0x[[#%x,SIZE]]
+# CHECK-BAT: NumBlocks: 5
.globl _start
_start:
@@ -23,7 +31,9 @@ a: ja b
x: ret
# FDATA: 1 _start #a# 1 _start #b# 2 4
b: jmp e
-c: jmp f
+c:
+ .nops 1
+ jmp f
.globl e
e:
diff --git a/bolt/test/X86/bolt-address-translation-yaml.test b/bolt/test/X86/bolt-address-translation-yaml.test
index e21513b7dfe5..8f65eaba891e 100644
--- a/bolt/test/X86/bolt-address-translation-yaml.test
+++ b/bolt/test/X86/bolt-address-translation-yaml.test
@@ -31,7 +31,8 @@ RUN: perf2bolt %t.out --pa -p %p/Inputs/blarge_new_bat.preagg.txt -w %t.yaml -o
RUN: 2>&1 | FileCheck --check-prefix READ-BAT-CHECK %s
RUN: FileCheck --input-file %t.yaml --check-prefix YAML-BAT-CHECK %s
# Check that YAML converted from fdata matches YAML created directly with BAT.
-RUN: llvm-bolt %t.exe -data %t.fdata -w %t.yaml-fdata -o /dev/null
+RUN: llvm-bolt %t.exe -data %t.fdata -w %t.yaml-fdata -o /dev/null \
+RUN: 2>&1 | FileCheck --check-prefix READ-BAT-FDATA-CHECK %s
RUN: FileCheck --input-file %t.yaml-fdata --check-prefix YAML-BAT-CHECK %s
# Test resulting YAML profile with the original binary (no-stale mode)
@@ -40,11 +41,13 @@ RUN: | FileCheck --check-prefix CHECK-BOLT-YAML %s
WRITE-BAT-CHECK: BOLT-INFO: Wrote 5 BAT maps
WRITE-BAT-CHECK: BOLT-INFO: Wrote 4 function and 22 basic block hashes
-WRITE-BAT-CHECK: BOLT-INFO: BAT section size (bytes): 384
+WRITE-BAT-CHECK: BOLT-INFO: BAT section size (bytes): 404
READ-BAT-CHECK-NOT: BOLT-ERROR: unable to save profile in YAML format for input file processed by BOLT
READ-BAT-CHECK: BOLT-INFO: Parsed 5 BAT entries
READ-BAT-CHECK: PERF2BOLT: read 79 aggregated LBR entries
+READ-BAT-CHECK: BOLT-INFO: 5 out of 21 functions in the binary (23.8%) have non-empty execution profile
+READ-BAT-FDATA-CHECK: BOLT-INFO: 5 out of 16 functions in the binary (31.2%) have non-empty execution profile
YAML-BAT-CHECK: functions:
# Function not covered by BAT - has insns in basic block
diff --git a/bolt/test/X86/bolt-address-translation.test b/bolt/test/X86/bolt-address-translation.test
index e6b21c14077b..dfdd1eea3233 100644
--- a/bolt/test/X86/bolt-address-translation.test
+++ b/bolt/test/X86/bolt-address-translation.test
@@ -37,7 +37,7 @@
# CHECK: BOLT: 3 out of 7 functions were overwritten.
# CHECK: BOLT-INFO: Wrote 6 BAT maps
# CHECK: BOLT-INFO: Wrote 3 function and 58 basic block hashes
-# CHECK: BOLT-INFO: BAT section size (bytes): 928
+# CHECK: BOLT-INFO: BAT section size (bytes): 940
#
# usqrt mappings (hot part). We match against any key (left side containing
# the bolted binary offsets) because BOLT may change where it puts instructions
diff --git a/bolt/test/X86/dwarf5-debug-names-class-type-decl.s b/bolt/test/X86/dwarf5-debug-names-class-type-decl.s
new file mode 100644
index 000000000000..587eaaf6f4ff
--- /dev/null
+++ b/bolt/test/X86/dwarf5-debug-names-class-type-decl.s
@@ -0,0 +1,670 @@
+# REQUIRES: system-linux
+
+# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %s -o %t1.o
+# RUN: %clang %cflags -dwarf-5 %t1.o -o %t.exe -Wl,-q
+# RUN: llvm-bolt %t.exe -o %t.bolt --update-debug-sections
+# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.bolt > %t.txt
+# RUN: llvm-dwarfdump --show-form --verbose --debug-names %t.bolt >> %t.txt
+# RUN: cat %t.txt | FileCheck --check-prefix=POSTCHECK %s
+
+## This tests that BOLT doesn't generate entry for a DW_TAG_class_type declaration with DW_AT_name.
+
+# POSTCHECK: DW_TAG_type_unit
+# POSTCHECK: DW_TAG_class_type [7]
+# POSTCHECK-NEXT: DW_AT_name [DW_FORM_strx1] (indexed (00000006) string = "InnerState")
+# POSTCHECK-NEXT: DW_AT_declaration [DW_FORM_flag_present] (true)
+# POSTCHECK: Name Index
+# POSTCHECK-NOT: "InnerState"
+
+## -g2 -O0 -fdebug-types-section -gpubnames
+## namespace A {
+## namespace B {
+## class State {
+## public:
+## class InnerState{
+## InnerState() {}
+## };
+## State(){}
+## State(InnerState S){}
+## };
+## }
+## }
+##
+## int main() {
+## A::B::State S;
+## return 0;
+## }
+
+ .text
+ .file "main.cpp"
+ .file 0 "/DW_TAG_class_type" "main.cpp" md5 0x80f261b124b76c481b8761c040ab4802
+ .section .debug_info,"G",@progbits,16664150534606561860,comdat
+.Ltu_begin0:
+ .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .short 5 # DWARF version number
+ .byte 2 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .quad -1782593539102989756 # Type Signature
+ .long 39 # Type DIE Offset
+ .byte 1 # Abbrev [1] 0x18:0x3b DW_TAG_type_unit
+ .short 33 # DW_AT_language
+ .long .Lline_table_start0 # DW_AT_stmt_list
+ .long .Lstr_offsets_base0 # DW_AT_str_offsets_base
+ .byte 2 # Abbrev [2] 0x23:0x2a DW_TAG_namespace
+ .byte 3 # DW_AT_name
+ .byte 2 # Abbrev [2] 0x25:0x27 DW_TAG_namespace
+ .byte 4 # DW_AT_name
+ .byte 3 # Abbrev [3] 0x27:0x24 DW_TAG_class_type
+ .byte 5 # DW_AT_calling_convention
+ .byte 5 # DW_AT_name
+ .byte 1 # DW_AT_byte_size
+ .byte 0 # DW_AT_decl_file
+ .byte 3 # DW_AT_decl_line
+ .byte 4 # Abbrev [4] 0x2d:0xb DW_TAG_subprogram
+ .byte 5 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 8 # DW_AT_decl_line
+ # DW_AT_declaration
+ # DW_AT_external
+ .byte 1 # DW_AT_accessibility
+ # DW_ACCESS_public
+ .byte 5 # Abbrev [5] 0x32:0x5 DW_TAG_formal_parameter
+ .long 77 # DW_AT_type
+ # DW_AT_artificial
+ .byte 0 # End Of Children Mark
+ .byte 4 # Abbrev [4] 0x38:0x10 DW_TAG_subprogram
+ .byte 5 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 9 # DW_AT_decl_line
+ # DW_AT_declaration
+ # DW_AT_external
+ .byte 1 # DW_AT_accessibility
+ # DW_ACCESS_public
+ .byte 5 # Abbrev [5] 0x3d:0x5 DW_TAG_formal_parameter
+ .long 77 # DW_AT_type
+ # DW_AT_artificial
+ .byte 6 # Abbrev [6] 0x42:0x5 DW_TAG_formal_parameter
+ .long 72 # DW_AT_type
+ .byte 0 # End Of Children Mark
+ .byte 7 # Abbrev [7] 0x48:0x2 DW_TAG_class_type
+ .byte 6 # DW_AT_name
+ # DW_AT_declaration
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 8 # Abbrev [8] 0x4d:0x5 DW_TAG_pointer_type
+ .long 39 # DW_AT_type
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end0:
+ .text
+ .globl main # -- Begin function main
+ .p2align 4, 0x90
+ .type main,@function
+main: # @main
+.Lfunc_begin0:
+ .loc 0 14 0 # main.cpp:14:0
+ .cfi_startproc
+# %bb.0: # %entry
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ subq $16, %rsp
+ movl $0, -4(%rbp)
+.Ltmp0:
+ .loc 0 15 15 prologue_end # main.cpp:15:15
+ leaq -5(%rbp), %rdi
+ callq _ZN1A1B5StateC2Ev
+ .loc 0 16 3 # main.cpp:16:3
+ xorl %eax, %eax
+ .loc 0 16 3 epilogue_begin is_stmt 0 # main.cpp:16:3
+ addq $16, %rsp
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.Ltmp1:
+.Lfunc_end0:
+ .size main, .Lfunc_end0-main
+ .cfi_endproc
+ # -- End function
+ .section .text._ZN1A1B5StateC2Ev,"axG",@progbits,_ZN1A1B5StateC2Ev,comdat
+ .weak _ZN1A1B5StateC2Ev # -- Begin function _ZN1A1B5StateC2Ev
+ .p2align 4, 0x90
+ .type _ZN1A1B5StateC2Ev,@function
+_ZN1A1B5StateC2Ev: # @_ZN1A1B5StateC2Ev
+.Lfunc_begin1:
+ .loc 0 8 0 is_stmt 1 # main.cpp:8:0
+ .cfi_startproc
+# %bb.0: # %entry
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ movq %rdi, -8(%rbp)
+.Ltmp2:
+ .loc 0 8 15 prologue_end epilogue_begin # main.cpp:8:15
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.Ltmp3:
+.Lfunc_end1:
+ .size _ZN1A1B5StateC2Ev, .Lfunc_end1-_ZN1A1B5StateC2Ev
+ .cfi_endproc
+ # -- End function
+ .section .debug_abbrev,"",@progbits
+ .byte 1 # Abbreviation Code
+ .byte 65 # DW_TAG_type_unit
+ .byte 1 # DW_CHILDREN_yes
+ .byte 19 # DW_AT_language
+ .byte 5 # DW_FORM_data2
+ .byte 16 # DW_AT_stmt_list
+ .byte 23 # DW_FORM_sec_offset
+ .byte 114 # DW_AT_str_offsets_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 2 # Abbreviation Code
+ .byte 57 # DW_TAG_namespace
+ .byte 1 # DW_CHILDREN_yes
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 3 # Abbreviation Code
+ .byte 2 # DW_TAG_class_type
+ .byte 1 # DW_CHILDREN_yes
+ .byte 54 # DW_AT_calling_convention
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 11 # DW_AT_byte_size
+ .byte 11 # DW_FORM_data1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 4 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 60 # DW_AT_declaration
+ .byte 25 # DW_FORM_flag_present
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 50 # DW_AT_accessibility
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 5 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 52 # DW_AT_artificial
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 6 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 7 # Abbreviation Code
+ .byte 2 # DW_TAG_class_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 60 # DW_AT_declaration
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 8 # Abbreviation Code
+ .byte 15 # DW_TAG_pointer_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 9 # Abbreviation Code
+ .byte 17 # DW_TAG_compile_unit
+ .byte 1 # DW_CHILDREN_yes
+ .byte 37 # DW_AT_producer
+ .byte 37 # DW_FORM_strx1
+ .byte 19 # DW_AT_language
+ .byte 5 # DW_FORM_data2
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 114 # DW_AT_str_offsets_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 16 # DW_AT_stmt_list
+ .byte 23 # DW_FORM_sec_offset
+ .byte 27 # DW_AT_comp_dir
+ .byte 37 # DW_FORM_strx1
+ .byte 17 # DW_AT_low_pc
+ .byte 1 # DW_FORM_addr
+ .byte 85 # DW_AT_ranges
+ .byte 35 # DW_FORM_rnglistx
+ .byte 115 # DW_AT_addr_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 116 # DW_AT_rnglists_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 10 # Abbreviation Code
+ .byte 2 # DW_TAG_class_type
+ .byte 1 # DW_CHILDREN_yes
+ .byte 60 # DW_AT_declaration
+ .byte 25 # DW_FORM_flag_present
+ .byte 105 # DW_AT_signature
+ .byte 32 # DW_FORM_ref_sig8
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 11 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 12 # Abbreviation Code
+ .byte 52 # DW_TAG_variable
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 13 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 100 # DW_AT_object_pointer
+ .byte 19 # DW_FORM_ref4
+ .byte 110 # DW_AT_linkage_name
+ .byte 37 # DW_FORM_strx1
+ .byte 71 # DW_AT_specification
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 14 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 52 # DW_AT_artificial
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 15 # Abbreviation Code
+ .byte 36 # DW_TAG_base_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 62 # DW_AT_encoding
+ .byte 11 # DW_FORM_data1
+ .byte 11 # DW_AT_byte_size
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 0 # EOM(3)
+ .section .debug_info,"",@progbits
+.Lcu_begin0:
+ .long .Ldebug_info_end1-.Ldebug_info_start1 # Length of Unit
+.Ldebug_info_start1:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .byte 9 # Abbrev [9] 0xc:0x7f DW_TAG_compile_unit
+ .byte 0 # DW_AT_producer
+ .short 33 # DW_AT_language
+ .byte 1 # DW_AT_name
+ .long .Lstr_offsets_base0 # DW_AT_str_offsets_base
+ .long .Lline_table_start0 # DW_AT_stmt_list
+ .byte 2 # DW_AT_comp_dir
+ .quad 0 # DW_AT_low_pc
+ .byte 0 # DW_AT_ranges
+ .long .Laddr_table_base0 # DW_AT_addr_base
+ .long .Lrnglists_table_base0 # DW_AT_rnglists_base
+ .byte 2 # Abbrev [2] 0x2b:0x1b DW_TAG_namespace
+ .byte 3 # DW_AT_name
+ .byte 2 # Abbrev [2] 0x2d:0x18 DW_TAG_namespace
+ .byte 4 # DW_AT_name
+ .byte 10 # Abbrev [10] 0x2f:0x15 DW_TAG_class_type
+ # DW_AT_declaration
+ .quad -1782593539102989756 # DW_AT_signature
+ .byte 4 # Abbrev [4] 0x38:0xb DW_TAG_subprogram
+ .byte 5 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 8 # DW_AT_decl_line
+ # DW_AT_declaration
+ # DW_AT_external
+ .byte 1 # DW_AT_accessibility
+ # DW_ACCESS_public
+ .byte 5 # Abbrev [5] 0x3d:0x5 DW_TAG_formal_parameter
+ .long 97 # DW_AT_type
+ # DW_AT_artificial
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 11 # Abbrev [11] 0x46:0x1b DW_TAG_subprogram
+ .byte 0 # DW_AT_low_pc
+ .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .byte 1 # DW_AT_frame_base
+ .byte 86
+ .byte 7 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 14 # DW_AT_decl_line
+ .long 129 # DW_AT_type
+ # DW_AT_external
+ .byte 12 # Abbrev [12] 0x55:0xb DW_TAG_variable
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 123
+ .byte 10 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 15 # DW_AT_decl_line
+ .long 47 # DW_AT_type
+ .byte 0 # End Of Children Mark
+ .byte 8 # Abbrev [8] 0x61:0x5 DW_TAG_pointer_type
+ .long 47 # DW_AT_type
+ .byte 13 # Abbrev [13] 0x66:0x1b DW_TAG_subprogram
+ .byte 1 # DW_AT_low_pc
+ .long .Lfunc_end1-.Lfunc_begin1 # DW_AT_high_pc
+ .byte 1 # DW_AT_frame_base
+ .byte 86
+ .long 119 # DW_AT_object_pointer
+ .byte 9 # DW_AT_linkage_name
+ .long 56 # DW_AT_specification
+ .byte 14 # Abbrev [14] 0x77:0x9 DW_TAG_formal_parameter
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 120
+ .byte 11 # DW_AT_name
+ .long 133 # DW_AT_type
+ # DW_AT_artificial
+ .byte 0 # End Of Children Mark
+ .byte 15 # Abbrev [15] 0x81:0x4 DW_TAG_base_type
+ .byte 8 # DW_AT_name
+ .byte 5 # DW_AT_encoding
+ .byte 4 # DW_AT_byte_size
+ .byte 8 # Abbrev [8] 0x85:0x5 DW_TAG_pointer_type
+ .long 47 # DW_AT_type
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end1:
+ .section .debug_rnglists,"",@progbits
+ .long .Ldebug_list_header_end0-.Ldebug_list_header_start0 # Length
+.Ldebug_list_header_start0:
+ .short 5 # Version
+ .byte 8 # Address size
+ .byte 0 # Segment selector size
+ .long 1 # Offset entry count
+.Lrnglists_table_base0:
+ .long .Ldebug_ranges0-.Lrnglists_table_base0
+.Ldebug_ranges0:
+ .byte 3 # DW_RLE_startx_length
+ .byte 0 # start index
+ .uleb128 .Lfunc_end0-.Lfunc_begin0 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 1 # start index
+ .uleb128 .Lfunc_end1-.Lfunc_begin1 # length
+ .byte 0 # DW_RLE_end_of_list
+.Ldebug_list_header_end0:
+ .section .debug_str_offsets,"",@progbits
+ .long 52 # Length of String Offsets Set
+ .short 5
+ .short 0
+.Lstr_offsets_base0:
+ .section .debug_str,"MS",@progbits,1
+.Linfo_string0:
+ .asciz "clang version 19.0.0git" # string offset=0
+.Linfo_string1:
+ .asciz "main.cpp" # string offset=24
+.Linfo_string2:
+ .asciz "/home/ayermolo/local/tasks/T190087639/DW_TAG_class_type" # string offset=33
+.Linfo_string3:
+ .asciz "A" # string offset=89
+.Linfo_string4:
+ .asciz "B" # string offset=91
+.Linfo_string5:
+ .asciz "State" # string offset=93
+.Linfo_string6:
+ .asciz "InnerState" # string offset=99
+.Linfo_string7:
+ .asciz "main" # string offset=110
+.Linfo_string8:
+ .asciz "_ZN1A1B5StateC2Ev" # string offset=115
+.Linfo_string9:
+ .asciz "int" # string offset=133
+.Linfo_string10:
+ .asciz "S" # string offset=137
+.Linfo_string11:
+ .asciz "this" # string offset=139
+ .section .debug_str_offsets,"",@progbits
+ .long .Linfo_string0
+ .long .Linfo_string1
+ .long .Linfo_string2
+ .long .Linfo_string3
+ .long .Linfo_string4
+ .long .Linfo_string5
+ .long .Linfo_string6
+ .long .Linfo_string7
+ .long .Linfo_string9
+ .long .Linfo_string8
+ .long .Linfo_string10
+ .long .Linfo_string11
+ .section .debug_addr,"",@progbits
+ .long .Ldebug_addr_end0-.Ldebug_addr_start0 # Length of contribution
+.Ldebug_addr_start0:
+ .short 5 # DWARF version number
+ .byte 8 # Address size
+ .byte 0 # Segment selector size
+.Laddr_table_base0:
+ .quad .Lfunc_begin0
+ .quad .Lfunc_begin1
+.Ldebug_addr_end0:
+ .section .debug_names,"",@progbits
+ .long .Lnames_end0-.Lnames_start0 # Header: unit length
+.Lnames_start0:
+ .short 5 # Header: version
+ .short 0 # Header: padding
+ .long 1 # Header: compilation unit count
+ .long 1 # Header: local type unit count
+ .long 0 # Header: foreign type unit count
+ .long 6 # Header: bucket count
+ .long 6 # Header: name count
+ .long .Lnames_abbrev_end0-.Lnames_abbrev_start0 # Header: abbreviation table size
+ .long 8 # Header: augmentation string size
+ .ascii "LLVM0700" # Header: augmentation string
+ .long .Lcu_begin0 # Compilation unit 0
+ .long .Ltu_begin0 # Type unit 0
+ .long 0 # Bucket 0
+ .long 0 # Bucket 1
+ .long 1 # Bucket 2
+ .long 2 # Bucket 3
+ .long 3 # Bucket 4
+ .long 6 # Bucket 5
+ .long 193495088 # Hash in Bucket 2
+ .long 1059643959 # Hash in Bucket 3
+ .long 177670 # Hash in Bucket 4
+ .long 274811398 # Hash in Bucket 4
+ .long 2090499946 # Hash in Bucket 4
+ .long 177671 # Hash in Bucket 5
+ .long .Linfo_string9 # String in Bucket 2: int
+ .long .Linfo_string8 # String in Bucket 3: _ZN1A1B5StateC2Ev
+ .long .Linfo_string3 # String in Bucket 4: A
+ .long .Linfo_string5 # String in Bucket 4: State
+ .long .Linfo_string7 # String in Bucket 4: main
+ .long .Linfo_string4 # String in Bucket 5: B
+ .long .Lnames5-.Lnames_entries0 # Offset in Bucket 2
+ .long .Lnames4-.Lnames_entries0 # Offset in Bucket 3
+ .long .Lnames0-.Lnames_entries0 # Offset in Bucket 4
+ .long .Lnames2-.Lnames_entries0 # Offset in Bucket 4
+ .long .Lnames3-.Lnames_entries0 # Offset in Bucket 4
+ .long .Lnames1-.Lnames_entries0 # Offset in Bucket 5
+.Lnames_abbrev_start0:
+ .byte 1 # Abbrev code
+ .byte 36 # DW_TAG_base_type
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 2 # Abbrev code
+ .byte 46 # DW_TAG_subprogram
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 3 # Abbrev code
+ .byte 57 # DW_TAG_namespace
+ .byte 2 # DW_IDX_type_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 4 # Abbrev code
+ .byte 57 # DW_TAG_namespace
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 5 # Abbrev code
+ .byte 2 # DW_TAG_class_type
+ .byte 2 # DW_IDX_type_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 6 # Abbrev code
+ .byte 57 # DW_TAG_namespace
+ .byte 2 # DW_IDX_type_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 7 # Abbrev code
+ .byte 57 # DW_TAG_namespace
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev list
+.Lnames_abbrev_end0:
+.Lnames_entries0:
+.Lnames5:
+.L2:
+ .byte 1 # Abbreviation code
+ .long 129 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: int
+.Lnames4:
+.L3:
+ .byte 2 # Abbreviation code
+ .long 102 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: _ZN1A1B5StateC2Ev
+.Lnames0:
+.L4:
+ .byte 3 # Abbreviation code
+ .byte 0 # DW_IDX_type_unit
+ .long 35 # DW_IDX_die_offset
+.L7: # DW_IDX_parent
+ .byte 4 # Abbreviation code
+ .long 43 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: A
+.Lnames2:
+.L1:
+ .byte 5 # Abbreviation code
+ .byte 0 # DW_IDX_type_unit
+ .long 39 # DW_IDX_die_offset
+ .long .L5-.Lnames_entries0 # DW_IDX_parent
+ .byte 2 # Abbreviation code
+ .long 102 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: State
+.Lnames3:
+.L0:
+ .byte 2 # Abbreviation code
+ .long 70 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: main
+.Lnames1:
+.L5:
+ .byte 6 # Abbreviation code
+ .byte 0 # DW_IDX_type_unit
+ .long 37 # DW_IDX_die_offset
+ .long .L4-.Lnames_entries0 # DW_IDX_parent
+.L6:
+ .byte 7 # Abbreviation code
+ .long 45 # DW_IDX_die_offset
+ .long .L7-.Lnames_entries0 # DW_IDX_parent
+ .byte 0 # End of list: B
+ .p2align 2, 0x0
+.Lnames_end0:
+ .ident "clang version 19.0.0git"
+ .section ".note.GNU-stack","",@progbits
+ .addrsig
+ .section .debug_line,"",@progbits
+.Lline_table_start0:
diff --git a/bolt/test/X86/dwarf5-debug-names-enumeration-type-decl.s b/bolt/test/X86/dwarf5-debug-names-enumeration-type-decl.s
new file mode 100644
index 000000000000..031175763d79
--- /dev/null
+++ b/bolt/test/X86/dwarf5-debug-names-enumeration-type-decl.s
@@ -0,0 +1,485 @@
+# REQUIRES: system-linux
+
+# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %s -o %t1.o
+# RUN: %clang %cflags -dwarf-5 %t1.o -o %t.exe -Wl,-q
+# RUN: llvm-bolt %t.exe -o %t.bolt --update-debug-sections
+# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.bolt > %t.txt
+# RUN: llvm-dwarfdump --show-form --verbose --debug-names %t.bolt >> %t.txt
+# RUN: cat %t.txt | FileCheck --check-prefix=POSTCHECK %s
+
+## This tests that BOLT doesn't generate entry for a DW_TAG_enumeration_type declaration with DW_AT_name.
+
+# POSTCHECK: DW_TAG_type_unit
+# POSTCHECK: DW_TAG_enumeration_type [6]
+# POSTCHECK-NEXT: DW_AT_name [DW_FORM_strx1] (indexed (00000009) string = "InnerState")
+# POSTCHECK-NEXT: DW_AT_byte_size [DW_FORM_data1] (0x04)
+# POSTCHECK-NEXT: DW_AT_declaration [DW_FORM_flag_present] (true)
+# POSTCHECK: Name Index
+# POSTCHECK-NOT: "InnerState"
+
+## -g2 -O0 -fdebug-types-section -gpubnames
+## namespace B {
+## template <typename Task>
+## class State {
+## public:
+## enum class InnerState { STATE0 };
+## InnerState St;
+## };
+## }
+##
+## int main() {
+## B::State<int> S;
+## return 0;
+## }
+
+ .text
+ .file "main.cpp"
+ .globl main # -- Begin function main
+ .p2align 4, 0x90
+ .type main,@function
+main: # @main
+.Lfunc_begin0:
+ .file 0 "/DW_TAG_enumeration_type" "main.cpp" md5 0x2e8962f8ef4bf6eb6f8bd92966c0848b
+ .loc 0 10 0 # main.cpp:10:0
+ .cfi_startproc
+# %bb.0: # %entry
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ movl $0, -4(%rbp)
+.Ltmp0:
+ .loc 0 12 3 prologue_end # main.cpp:12:3
+ xorl %eax, %eax
+ .loc 0 12 3 epilogue_begin is_stmt 0 # main.cpp:12:3
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.Ltmp1:
+.Lfunc_end0:
+ .size main, .Lfunc_end0-main
+ .cfi_endproc
+ # -- End function
+ .section .debug_info,"G",@progbits,8822129917070965541,comdat
+.Ltu_begin0:
+ .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .short 5 # DWARF version number
+ .byte 2 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .quad 8822129917070965541 # Type Signature
+ .long 37 # Type DIE Offset
+ .byte 1 # Abbrev [1] 0x18:0x2d DW_TAG_type_unit
+ .short 33 # DW_AT_language
+ .long .Lline_table_start0 # DW_AT_stmt_list
+ .long .Lstr_offsets_base0 # DW_AT_str_offsets_base
+ .byte 2 # Abbrev [2] 0x23:0x1d DW_TAG_namespace
+ .byte 6 # DW_AT_name
+ .byte 3 # Abbrev [3] 0x25:0x1a DW_TAG_class_type
+ .byte 5 # DW_AT_calling_convention
+ .byte 10 # DW_AT_name
+ .byte 4 # DW_AT_byte_size
+ .byte 0 # DW_AT_decl_file
+ .byte 3 # DW_AT_decl_line
+ .byte 4 # Abbrev [4] 0x2b:0x6 DW_TAG_template_type_parameter
+ .long 64 # DW_AT_type
+ .byte 7 # DW_AT_name
+ .byte 5 # Abbrev [5] 0x31:0xa DW_TAG_member
+ .byte 8 # DW_AT_name
+ .long 59 # DW_AT_type
+ .byte 0 # DW_AT_decl_file
+ .byte 6 # DW_AT_decl_line
+ .byte 0 # DW_AT_data_member_location
+ .byte 1 # DW_AT_accessibility
+ # DW_ACCESS_public
+ .byte 6 # Abbrev [6] 0x3b:0x3 DW_TAG_enumeration_type
+ .byte 9 # DW_AT_name
+ .byte 4 # DW_AT_byte_size
+ # DW_AT_declaration
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 7 # Abbrev [7] 0x40:0x4 DW_TAG_base_type
+ .byte 4 # DW_AT_name
+ .byte 5 # DW_AT_encoding
+ .byte 4 # DW_AT_byte_size
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end0:
+ .section .debug_abbrev,"",@progbits
+ .byte 1 # Abbreviation Code
+ .byte 65 # DW_TAG_type_unit
+ .byte 1 # DW_CHILDREN_yes
+ .byte 19 # DW_AT_language
+ .byte 5 # DW_FORM_data2
+ .byte 16 # DW_AT_stmt_list
+ .byte 23 # DW_FORM_sec_offset
+ .byte 114 # DW_AT_str_offsets_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 2 # Abbreviation Code
+ .byte 57 # DW_TAG_namespace
+ .byte 1 # DW_CHILDREN_yes
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 3 # Abbreviation Code
+ .byte 2 # DW_TAG_class_type
+ .byte 1 # DW_CHILDREN_yes
+ .byte 54 # DW_AT_calling_convention
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 11 # DW_AT_byte_size
+ .byte 11 # DW_FORM_data1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 4 # Abbreviation Code
+ .byte 47 # DW_TAG_template_type_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 5 # Abbreviation Code
+ .byte 13 # DW_TAG_member
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 56 # DW_AT_data_member_location
+ .byte 11 # DW_FORM_data1
+ .byte 50 # DW_AT_accessibility
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 6 # Abbreviation Code
+ .byte 4 # DW_TAG_enumeration_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 11 # DW_AT_byte_size
+ .byte 11 # DW_FORM_data1
+ .byte 60 # DW_AT_declaration
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 7 # Abbreviation Code
+ .byte 36 # DW_TAG_base_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 62 # DW_AT_encoding
+ .byte 11 # DW_FORM_data1
+ .byte 11 # DW_AT_byte_size
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 8 # Abbreviation Code
+ .byte 17 # DW_TAG_compile_unit
+ .byte 1 # DW_CHILDREN_yes
+ .byte 37 # DW_AT_producer
+ .byte 37 # DW_FORM_strx1
+ .byte 19 # DW_AT_language
+ .byte 5 # DW_FORM_data2
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 114 # DW_AT_str_offsets_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 16 # DW_AT_stmt_list
+ .byte 23 # DW_FORM_sec_offset
+ .byte 27 # DW_AT_comp_dir
+ .byte 37 # DW_FORM_strx1
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 115 # DW_AT_addr_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 9 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 10 # Abbreviation Code
+ .byte 52 # DW_TAG_variable
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 11 # Abbreviation Code
+ .byte 2 # DW_TAG_class_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 60 # DW_AT_declaration
+ .byte 25 # DW_FORM_flag_present
+ .byte 105 # DW_AT_signature
+ .byte 32 # DW_FORM_ref_sig8
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 0 # EOM(3)
+ .section .debug_info,"",@progbits
+.Lcu_begin0:
+ .long .Ldebug_info_end1-.Ldebug_info_start1 # Length of Unit
+.Ldebug_info_start1:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .byte 8 # Abbrev [8] 0xc:0x43 DW_TAG_compile_unit
+ .byte 0 # DW_AT_producer
+ .short 33 # DW_AT_language
+ .byte 1 # DW_AT_name
+ .long .Lstr_offsets_base0 # DW_AT_str_offsets_base
+ .long .Lline_table_start0 # DW_AT_stmt_list
+ .byte 2 # DW_AT_comp_dir
+ .byte 0 # DW_AT_low_pc
+ .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .long .Laddr_table_base0 # DW_AT_addr_base
+ .byte 9 # Abbrev [9] 0x23:0x1b DW_TAG_subprogram
+ .byte 0 # DW_AT_low_pc
+ .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .byte 1 # DW_AT_frame_base
+ .byte 86
+ .byte 3 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 10 # DW_AT_decl_line
+ .long 62 # DW_AT_type
+ # DW_AT_external
+ .byte 10 # Abbrev [10] 0x32:0xb DW_TAG_variable
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 120
+ .byte 5 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 11 # DW_AT_decl_line
+ .long 68 # DW_AT_type
+ .byte 0 # End Of Children Mark
+ .byte 7 # Abbrev [7] 0x3e:0x4 DW_TAG_base_type
+ .byte 4 # DW_AT_name
+ .byte 5 # DW_AT_encoding
+ .byte 4 # DW_AT_byte_size
+ .byte 2 # Abbrev [2] 0x42:0xc DW_TAG_namespace
+ .byte 6 # DW_AT_name
+ .byte 11 # Abbrev [11] 0x44:0x9 DW_TAG_class_type
+ # DW_AT_declaration
+ .quad 8822129917070965541 # DW_AT_signature
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end1:
+ .section .debug_str_offsets,"",@progbits
+ .long 48 # Length of String Offsets Set
+ .short 5
+ .short 0
+.Lstr_offsets_base0:
+ .section .debug_str,"MS",@progbits,1
+.Linfo_string0:
+ .asciz "clang version 19.0.0git" # string offset=0
+.Linfo_string1:
+ .asciz "main.cpp" # string offset=24
+.Linfo_string2:
+ .asciz "/home/ayermolo/local/tasks/T190087639/DW_TAG_enumeration_type" # string offset=33
+.Linfo_string3:
+ .asciz "main" # string offset=95
+.Linfo_string4:
+ .asciz "int" # string offset=100
+.Linfo_string5:
+ .asciz "S" # string offset=104
+.Linfo_string6:
+ .asciz "B" # string offset=106
+.Linfo_string7:
+ .asciz "Task" # string offset=108
+.Linfo_string8:
+ .asciz "St" # string offset=113
+.Linfo_string9:
+ .asciz "InnerState" # string offset=116
+.Linfo_string10:
+ .asciz "State<int>" # string offset=127
+ .section .debug_str_offsets,"",@progbits
+ .long .Linfo_string0
+ .long .Linfo_string1
+ .long .Linfo_string2
+ .long .Linfo_string3
+ .long .Linfo_string4
+ .long .Linfo_string5
+ .long .Linfo_string6
+ .long .Linfo_string7
+ .long .Linfo_string8
+ .long .Linfo_string9
+ .long .Linfo_string10
+ .section .debug_addr,"",@progbits
+ .long .Ldebug_addr_end0-.Ldebug_addr_start0 # Length of contribution
+.Ldebug_addr_start0:
+ .short 5 # DWARF version number
+ .byte 8 # Address size
+ .byte 0 # Segment selector size
+.Laddr_table_base0:
+ .quad .Lfunc_begin0
+.Ldebug_addr_end0:
+ .section .debug_names,"",@progbits
+ .long .Lnames_end0-.Lnames_start0 # Header: unit length
+.Lnames_start0:
+ .short 5 # Header: version
+ .short 0 # Header: padding
+ .long 1 # Header: compilation unit count
+ .long 1 # Header: local type unit count
+ .long 0 # Header: foreign type unit count
+ .long 4 # Header: bucket count
+ .long 4 # Header: name count
+ .long .Lnames_abbrev_end0-.Lnames_abbrev_start0 # Header: abbreviation table size
+ .long 8 # Header: augmentation string size
+ .ascii "LLVM0700" # Header: augmentation string
+ .long .Lcu_begin0 # Compilation unit 0
+ .long .Ltu_begin0 # Type unit 0
+ .long 1 # Bucket 0
+ .long 0 # Bucket 1
+ .long 2 # Bucket 2
+ .long 3 # Bucket 3
+ .long 193495088 # Hash in Bucket 0
+ .long 2090499946 # Hash in Bucket 2
+ .long 177671 # Hash in Bucket 3
+ .long 624407275 # Hash in Bucket 3
+ .long .Linfo_string4 # String in Bucket 0: int
+ .long .Linfo_string3 # String in Bucket 2: main
+ .long .Linfo_string6 # String in Bucket 3: B
+ .long .Linfo_string10 # String in Bucket 3: State<int>
+ .long .Lnames1-.Lnames_entries0 # Offset in Bucket 0
+ .long .Lnames0-.Lnames_entries0 # Offset in Bucket 2
+ .long .Lnames2-.Lnames_entries0 # Offset in Bucket 3
+ .long .Lnames3-.Lnames_entries0 # Offset in Bucket 3
+.Lnames_abbrev_start0:
+ .byte 1 # Abbrev code
+ .byte 36 # DW_TAG_base_type
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 2 # Abbrev code
+ .byte 36 # DW_TAG_base_type
+ .byte 2 # DW_IDX_type_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 3 # Abbrev code
+ .byte 46 # DW_TAG_subprogram
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 4 # Abbrev code
+ .byte 57 # DW_TAG_namespace
+ .byte 2 # DW_IDX_type_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 5 # Abbrev code
+ .byte 57 # DW_TAG_namespace
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 6 # Abbrev code
+ .byte 2 # DW_TAG_class_type
+ .byte 2 # DW_IDX_type_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev list
+.Lnames_abbrev_end0:
+.Lnames_entries0:
+.Lnames1:
+.L0:
+ .byte 1 # Abbreviation code
+ .long 62 # DW_IDX_die_offset
+.L2: # DW_IDX_parent
+ .byte 2 # Abbreviation code
+ .byte 0 # DW_IDX_type_unit
+ .long 64 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: int
+.Lnames0:
+.L3:
+ .byte 3 # Abbreviation code
+ .long 35 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: main
+.Lnames2:
+ .byte 4 # Abbreviation code
+ .byte 0 # DW_IDX_type_unit
+ .long 35 # DW_IDX_die_offset
+.L1: # DW_IDX_parent
+ .byte 5 # Abbreviation code
+ .long 66 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: B
+.Lnames3:
+.L4:
+ .byte 6 # Abbreviation code
+ .byte 0 # DW_IDX_type_unit
+ .long 37 # DW_IDX_die_offset
+ .long .L3-.Lnames_entries0 # DW_IDX_parent
+ .byte 0 # End of list: State<int>
+ .p2align 2, 0x0
+.Lnames_end0:
+ .ident "clang version 19.0.0git"
+ .section ".note.GNU-stack","",@progbits
+ .addrsig
+ .section .debug_line,"",@progbits
+.Lline_table_start0:
diff --git a/bolt/test/X86/dwarf5-debug-names-structure-type-decl.s b/bolt/test/X86/dwarf5-debug-names-structure-type-decl.s
new file mode 100644
index 000000000000..6eb2852c26ba
--- /dev/null
+++ b/bolt/test/X86/dwarf5-debug-names-structure-type-decl.s
@@ -0,0 +1,671 @@
+# REQUIRES: system-linux
+
+# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %s -o %t1.o
+# RUN: %clang %cflags -dwarf-5 %t1.o -o %t.exe -Wl,-q
+# RUN: llvm-bolt %t.exe -o %t.bolt --update-debug-sections
+# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.bolt > %t.txt
+# RUN: llvm-dwarfdump --show-form --verbose --debug-names %t.bolt >> %t.txt
+# RUN: cat %t.txt | FileCheck --check-prefix=POSTCHECK %s
+
+## This tests that BOLT doesn't generate entry for a DW_TAG_structure_type declaration with DW_AT_name.
+
+# POSTCHECK: DW_TAG_type_unit
+# POSTCHECK: DW_TAG_structure_type [7]
+# POSTCHECK-NEXT: DW_AT_name [DW_FORM_strx1] (indexed (00000006) string = "InnerState")
+# POSTCHECK-NEXT: DW_AT_declaration [DW_FORM_flag_present] (true)
+# POSTCHECK: Name Index
+# POSTCHECK-NOT: "InnerState"
+
+## -g2 -O0 -fdebug-types-section -gpubnames
+## namespace A {
+## namespace B {
+## class State {
+## public:
+## struct InnerState{
+## InnerState() {}
+## };
+## State(){}
+## State(InnerState S){}
+## };
+## }
+## }
+##
+## int main() {
+## A::B::State S;
+## return 0;
+## }
+
+
+ .text
+ .file "main.cpp"
+ .file 0 "/DW_TAG_structure_type" "main.cpp" md5 0xd43ba503b70d00353c195087e1fe16e2
+ .section .debug_info,"G",@progbits,16664150534606561860,comdat
+.Ltu_begin0:
+ .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .short 5 # DWARF version number
+ .byte 2 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .quad -1782593539102989756 # Type Signature
+ .long 39 # Type DIE Offset
+ .byte 1 # Abbrev [1] 0x18:0x3b DW_TAG_type_unit
+ .short 33 # DW_AT_language
+ .long .Lline_table_start0 # DW_AT_stmt_list
+ .long .Lstr_offsets_base0 # DW_AT_str_offsets_base
+ .byte 2 # Abbrev [2] 0x23:0x2a DW_TAG_namespace
+ .byte 3 # DW_AT_name
+ .byte 2 # Abbrev [2] 0x25:0x27 DW_TAG_namespace
+ .byte 4 # DW_AT_name
+ .byte 3 # Abbrev [3] 0x27:0x24 DW_TAG_class_type
+ .byte 5 # DW_AT_calling_convention
+ .byte 5 # DW_AT_name
+ .byte 1 # DW_AT_byte_size
+ .byte 0 # DW_AT_decl_file
+ .byte 3 # DW_AT_decl_line
+ .byte 4 # Abbrev [4] 0x2d:0xb DW_TAG_subprogram
+ .byte 5 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 8 # DW_AT_decl_line
+ # DW_AT_declaration
+ # DW_AT_external
+ .byte 1 # DW_AT_accessibility
+ # DW_ACCESS_public
+ .byte 5 # Abbrev [5] 0x32:0x5 DW_TAG_formal_parameter
+ .long 77 # DW_AT_type
+ # DW_AT_artificial
+ .byte 0 # End Of Children Mark
+ .byte 4 # Abbrev [4] 0x38:0x10 DW_TAG_subprogram
+ .byte 5 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 9 # DW_AT_decl_line
+ # DW_AT_declaration
+ # DW_AT_external
+ .byte 1 # DW_AT_accessibility
+ # DW_ACCESS_public
+ .byte 5 # Abbrev [5] 0x3d:0x5 DW_TAG_formal_parameter
+ .long 77 # DW_AT_type
+ # DW_AT_artificial
+ .byte 6 # Abbrev [6] 0x42:0x5 DW_TAG_formal_parameter
+ .long 72 # DW_AT_type
+ .byte 0 # End Of Children Mark
+ .byte 7 # Abbrev [7] 0x48:0x2 DW_TAG_structure_type
+ .byte 6 # DW_AT_name
+ # DW_AT_declaration
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 8 # Abbrev [8] 0x4d:0x5 DW_TAG_pointer_type
+ .long 39 # DW_AT_type
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end0:
+ .text
+ .globl main # -- Begin function main
+ .p2align 4, 0x90
+ .type main,@function
+main: # @main
+.Lfunc_begin0:
+ .loc 0 14 0 # main.cpp:14:0
+ .cfi_startproc
+# %bb.0: # %entry
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ subq $16, %rsp
+ movl $0, -4(%rbp)
+.Ltmp0:
+ .loc 0 15 15 prologue_end # main.cpp:15:15
+ leaq -5(%rbp), %rdi
+ callq _ZN1A1B5StateC2Ev
+ .loc 0 16 3 # main.cpp:16:3
+ xorl %eax, %eax
+ .loc 0 16 3 epilogue_begin is_stmt 0 # main.cpp:16:3
+ addq $16, %rsp
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.Ltmp1:
+.Lfunc_end0:
+ .size main, .Lfunc_end0-main
+ .cfi_endproc
+ # -- End function
+ .section .text._ZN1A1B5StateC2Ev,"axG",@progbits,_ZN1A1B5StateC2Ev,comdat
+ .weak _ZN1A1B5StateC2Ev # -- Begin function _ZN1A1B5StateC2Ev
+ .p2align 4, 0x90
+ .type _ZN1A1B5StateC2Ev,@function
+_ZN1A1B5StateC2Ev: # @_ZN1A1B5StateC2Ev
+.Lfunc_begin1:
+ .loc 0 8 0 is_stmt 1 # main.cpp:8:0
+ .cfi_startproc
+# %bb.0: # %entry
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ movq %rdi, -8(%rbp)
+.Ltmp2:
+ .loc 0 8 15 prologue_end epilogue_begin # main.cpp:8:15
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.Ltmp3:
+.Lfunc_end1:
+ .size _ZN1A1B5StateC2Ev, .Lfunc_end1-_ZN1A1B5StateC2Ev
+ .cfi_endproc
+ # -- End function
+ .section .debug_abbrev,"",@progbits
+ .byte 1 # Abbreviation Code
+ .byte 65 # DW_TAG_type_unit
+ .byte 1 # DW_CHILDREN_yes
+ .byte 19 # DW_AT_language
+ .byte 5 # DW_FORM_data2
+ .byte 16 # DW_AT_stmt_list
+ .byte 23 # DW_FORM_sec_offset
+ .byte 114 # DW_AT_str_offsets_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 2 # Abbreviation Code
+ .byte 57 # DW_TAG_namespace
+ .byte 1 # DW_CHILDREN_yes
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 3 # Abbreviation Code
+ .byte 2 # DW_TAG_class_type
+ .byte 1 # DW_CHILDREN_yes
+ .byte 54 # DW_AT_calling_convention
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 11 # DW_AT_byte_size
+ .byte 11 # DW_FORM_data1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 4 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 60 # DW_AT_declaration
+ .byte 25 # DW_FORM_flag_present
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 50 # DW_AT_accessibility
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 5 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 52 # DW_AT_artificial
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 6 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 7 # Abbreviation Code
+ .byte 19 # DW_TAG_structure_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 60 # DW_AT_declaration
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 8 # Abbreviation Code
+ .byte 15 # DW_TAG_pointer_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 9 # Abbreviation Code
+ .byte 17 # DW_TAG_compile_unit
+ .byte 1 # DW_CHILDREN_yes
+ .byte 37 # DW_AT_producer
+ .byte 37 # DW_FORM_strx1
+ .byte 19 # DW_AT_language
+ .byte 5 # DW_FORM_data2
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 114 # DW_AT_str_offsets_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 16 # DW_AT_stmt_list
+ .byte 23 # DW_FORM_sec_offset
+ .byte 27 # DW_AT_comp_dir
+ .byte 37 # DW_FORM_strx1
+ .byte 17 # DW_AT_low_pc
+ .byte 1 # DW_FORM_addr
+ .byte 85 # DW_AT_ranges
+ .byte 35 # DW_FORM_rnglistx
+ .byte 115 # DW_AT_addr_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 116 # DW_AT_rnglists_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 10 # Abbreviation Code
+ .byte 2 # DW_TAG_class_type
+ .byte 1 # DW_CHILDREN_yes
+ .byte 60 # DW_AT_declaration
+ .byte 25 # DW_FORM_flag_present
+ .byte 105 # DW_AT_signature
+ .byte 32 # DW_FORM_ref_sig8
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 11 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 12 # Abbreviation Code
+ .byte 52 # DW_TAG_variable
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 13 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 100 # DW_AT_object_pointer
+ .byte 19 # DW_FORM_ref4
+ .byte 110 # DW_AT_linkage_name
+ .byte 37 # DW_FORM_strx1
+ .byte 71 # DW_AT_specification
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 14 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 52 # DW_AT_artificial
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 15 # Abbreviation Code
+ .byte 36 # DW_TAG_base_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 62 # DW_AT_encoding
+ .byte 11 # DW_FORM_data1
+ .byte 11 # DW_AT_byte_size
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 0 # EOM(3)
+ .section .debug_info,"",@progbits
+.Lcu_begin0:
+ .long .Ldebug_info_end1-.Ldebug_info_start1 # Length of Unit
+.Ldebug_info_start1:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .byte 9 # Abbrev [9] 0xc:0x7f DW_TAG_compile_unit
+ .byte 0 # DW_AT_producer
+ .short 33 # DW_AT_language
+ .byte 1 # DW_AT_name
+ .long .Lstr_offsets_base0 # DW_AT_str_offsets_base
+ .long .Lline_table_start0 # DW_AT_stmt_list
+ .byte 2 # DW_AT_comp_dir
+ .quad 0 # DW_AT_low_pc
+ .byte 0 # DW_AT_ranges
+ .long .Laddr_table_base0 # DW_AT_addr_base
+ .long .Lrnglists_table_base0 # DW_AT_rnglists_base
+ .byte 2 # Abbrev [2] 0x2b:0x1b DW_TAG_namespace
+ .byte 3 # DW_AT_name
+ .byte 2 # Abbrev [2] 0x2d:0x18 DW_TAG_namespace
+ .byte 4 # DW_AT_name
+ .byte 10 # Abbrev [10] 0x2f:0x15 DW_TAG_class_type
+ # DW_AT_declaration
+ .quad -1782593539102989756 # DW_AT_signature
+ .byte 4 # Abbrev [4] 0x38:0xb DW_TAG_subprogram
+ .byte 5 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 8 # DW_AT_decl_line
+ # DW_AT_declaration
+ # DW_AT_external
+ .byte 1 # DW_AT_accessibility
+ # DW_ACCESS_public
+ .byte 5 # Abbrev [5] 0x3d:0x5 DW_TAG_formal_parameter
+ .long 97 # DW_AT_type
+ # DW_AT_artificial
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 11 # Abbrev [11] 0x46:0x1b DW_TAG_subprogram
+ .byte 0 # DW_AT_low_pc
+ .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .byte 1 # DW_AT_frame_base
+ .byte 86
+ .byte 7 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 14 # DW_AT_decl_line
+ .long 129 # DW_AT_type
+ # DW_AT_external
+ .byte 12 # Abbrev [12] 0x55:0xb DW_TAG_variable
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 123
+ .byte 10 # DW_AT_name
+ .byte 0 # DW_AT_decl_file
+ .byte 15 # DW_AT_decl_line
+ .long 47 # DW_AT_type
+ .byte 0 # End Of Children Mark
+ .byte 8 # Abbrev [8] 0x61:0x5 DW_TAG_pointer_type
+ .long 47 # DW_AT_type
+ .byte 13 # Abbrev [13] 0x66:0x1b DW_TAG_subprogram
+ .byte 1 # DW_AT_low_pc
+ .long .Lfunc_end1-.Lfunc_begin1 # DW_AT_high_pc
+ .byte 1 # DW_AT_frame_base
+ .byte 86
+ .long 119 # DW_AT_object_pointer
+ .byte 9 # DW_AT_linkage_name
+ .long 56 # DW_AT_specification
+ .byte 14 # Abbrev [14] 0x77:0x9 DW_TAG_formal_parameter
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 120
+ .byte 11 # DW_AT_name
+ .long 133 # DW_AT_type
+ # DW_AT_artificial
+ .byte 0 # End Of Children Mark
+ .byte 15 # Abbrev [15] 0x81:0x4 DW_TAG_base_type
+ .byte 8 # DW_AT_name
+ .byte 5 # DW_AT_encoding
+ .byte 4 # DW_AT_byte_size
+ .byte 8 # Abbrev [8] 0x85:0x5 DW_TAG_pointer_type
+ .long 47 # DW_AT_type
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end1:
+ .section .debug_rnglists,"",@progbits
+ .long .Ldebug_list_header_end0-.Ldebug_list_header_start0 # Length
+.Ldebug_list_header_start0:
+ .short 5 # Version
+ .byte 8 # Address size
+ .byte 0 # Segment selector size
+ .long 1 # Offset entry count
+.Lrnglists_table_base0:
+ .long .Ldebug_ranges0-.Lrnglists_table_base0
+.Ldebug_ranges0:
+ .byte 3 # DW_RLE_startx_length
+ .byte 0 # start index
+ .uleb128 .Lfunc_end0-.Lfunc_begin0 # length
+ .byte 3 # DW_RLE_startx_length
+ .byte 1 # start index
+ .uleb128 .Lfunc_end1-.Lfunc_begin1 # length
+ .byte 0 # DW_RLE_end_of_list
+.Ldebug_list_header_end0:
+ .section .debug_str_offsets,"",@progbits
+ .long 52 # Length of String Offsets Set
+ .short 5
+ .short 0
+.Lstr_offsets_base0:
+ .section .debug_str,"MS",@progbits,1
+.Linfo_string0:
+ .asciz "clang version 19.0.0git" # string offset=0
+.Linfo_string1:
+ .asciz "main.cpp" # string offset=24
+.Linfo_string2:
+ .asciz "/home/ayermolo/local/tasks/T190087639/DW_TAG_structure_type" # string offset=33
+.Linfo_string3:
+ .asciz "A" # string offset=93
+.Linfo_string4:
+ .asciz "B" # string offset=95
+.Linfo_string5:
+ .asciz "State" # string offset=97
+.Linfo_string6:
+ .asciz "InnerState" # string offset=103
+.Linfo_string7:
+ .asciz "main" # string offset=114
+.Linfo_string8:
+ .asciz "_ZN1A1B5StateC2Ev" # string offset=119
+.Linfo_string9:
+ .asciz "int" # string offset=137
+.Linfo_string10:
+ .asciz "S" # string offset=141
+.Linfo_string11:
+ .asciz "this" # string offset=143
+ .section .debug_str_offsets,"",@progbits
+ .long .Linfo_string0
+ .long .Linfo_string1
+ .long .Linfo_string2
+ .long .Linfo_string3
+ .long .Linfo_string4
+ .long .Linfo_string5
+ .long .Linfo_string6
+ .long .Linfo_string7
+ .long .Linfo_string9
+ .long .Linfo_string8
+ .long .Linfo_string10
+ .long .Linfo_string11
+ .section .debug_addr,"",@progbits
+ .long .Ldebug_addr_end0-.Ldebug_addr_start0 # Length of contribution
+.Ldebug_addr_start0:
+ .short 5 # DWARF version number
+ .byte 8 # Address size
+ .byte 0 # Segment selector size
+.Laddr_table_base0:
+ .quad .Lfunc_begin0
+ .quad .Lfunc_begin1
+.Ldebug_addr_end0:
+ .section .debug_names,"",@progbits
+ .long .Lnames_end0-.Lnames_start0 # Header: unit length
+.Lnames_start0:
+ .short 5 # Header: version
+ .short 0 # Header: padding
+ .long 1 # Header: compilation unit count
+ .long 1 # Header: local type unit count
+ .long 0 # Header: foreign type unit count
+ .long 6 # Header: bucket count
+ .long 6 # Header: name count
+ .long .Lnames_abbrev_end0-.Lnames_abbrev_start0 # Header: abbreviation table size
+ .long 8 # Header: augmentation string size
+ .ascii "LLVM0700" # Header: augmentation string
+ .long .Lcu_begin0 # Compilation unit 0
+ .long .Ltu_begin0 # Type unit 0
+ .long 0 # Bucket 0
+ .long 0 # Bucket 1
+ .long 1 # Bucket 2
+ .long 2 # Bucket 3
+ .long 3 # Bucket 4
+ .long 6 # Bucket 5
+ .long 193495088 # Hash in Bucket 2
+ .long 1059643959 # Hash in Bucket 3
+ .long 177670 # Hash in Bucket 4
+ .long 274811398 # Hash in Bucket 4
+ .long 2090499946 # Hash in Bucket 4
+ .long 177671 # Hash in Bucket 5
+ .long .Linfo_string9 # String in Bucket 2: int
+ .long .Linfo_string8 # String in Bucket 3: _ZN1A1B5StateC2Ev
+ .long .Linfo_string3 # String in Bucket 4: A
+ .long .Linfo_string5 # String in Bucket 4: State
+ .long .Linfo_string7 # String in Bucket 4: main
+ .long .Linfo_string4 # String in Bucket 5: B
+ .long .Lnames5-.Lnames_entries0 # Offset in Bucket 2
+ .long .Lnames4-.Lnames_entries0 # Offset in Bucket 3
+ .long .Lnames0-.Lnames_entries0 # Offset in Bucket 4
+ .long .Lnames2-.Lnames_entries0 # Offset in Bucket 4
+ .long .Lnames3-.Lnames_entries0 # Offset in Bucket 4
+ .long .Lnames1-.Lnames_entries0 # Offset in Bucket 5
+.Lnames_abbrev_start0:
+ .byte 1 # Abbrev code
+ .byte 36 # DW_TAG_base_type
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 2 # Abbrev code
+ .byte 46 # DW_TAG_subprogram
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 3 # Abbrev code
+ .byte 57 # DW_TAG_namespace
+ .byte 2 # DW_IDX_type_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 4 # Abbrev code
+ .byte 57 # DW_TAG_namespace
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 5 # Abbrev code
+ .byte 2 # DW_TAG_class_type
+ .byte 2 # DW_IDX_type_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 6 # Abbrev code
+ .byte 57 # DW_TAG_namespace
+ .byte 2 # DW_IDX_type_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 7 # Abbrev code
+ .byte 57 # DW_TAG_namespace
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev list
+.Lnames_abbrev_end0:
+.Lnames_entries0:
+.Lnames5:
+.L2:
+ .byte 1 # Abbreviation code
+ .long 129 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: int
+.Lnames4:
+.L3:
+ .byte 2 # Abbreviation code
+ .long 102 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: _ZN1A1B5StateC2Ev
+.Lnames0:
+.L4:
+ .byte 3 # Abbreviation code
+ .byte 0 # DW_IDX_type_unit
+ .long 35 # DW_IDX_die_offset
+.L7: # DW_IDX_parent
+ .byte 4 # Abbreviation code
+ .long 43 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: A
+.Lnames2:
+.L1:
+ .byte 5 # Abbreviation code
+ .byte 0 # DW_IDX_type_unit
+ .long 39 # DW_IDX_die_offset
+ .long .L5-.Lnames_entries0 # DW_IDX_parent
+ .byte 2 # Abbreviation code
+ .long 102 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: State
+.Lnames3:
+.L0:
+ .byte 2 # Abbreviation code
+ .long 70 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: main
+.Lnames1:
+.L5:
+ .byte 6 # Abbreviation code
+ .byte 0 # DW_IDX_type_unit
+ .long 37 # DW_IDX_die_offset
+ .long .L4-.Lnames_entries0 # DW_IDX_parent
+.L6:
+ .byte 7 # Abbreviation code
+ .long 45 # DW_IDX_die_offset
+ .long .L7-.Lnames_entries0 # DW_IDX_parent
+ .byte 0 # End of list: B
+ .p2align 2, 0x0
+.Lnames_end0:
+ .ident "clang version 19.0.0git"
+ .section ".note.GNU-stack","",@progbits
+ .addrsig
+ .section .debug_line,"",@progbits
+.Lline_table_start0:
diff --git a/bolt/test/X86/ignored-interprocedural-reference.s b/bolt/test/X86/ignored-interprocedural-reference.s
new file mode 100644
index 000000000000..12e4fb92adcc
--- /dev/null
+++ b/bolt/test/X86/ignored-interprocedural-reference.s
@@ -0,0 +1,49 @@
+# This reproduces a bug with not processing interprocedural references from
+# ignored functions.
+
+# REQUIRES: system-linux
+
+# RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %s -o %t.o
+# RUN: %clang %cflags %t.o -o %t.exe -nostdlib -Wl,-q
+# RUN: llvm-bolt %t.exe -o %t.out --enable-bat -funcs=main
+# RUN: link_fdata %s %t.out %t.preagg PREAGG
+# RUN: perf2bolt %t.out -p %t.preagg --pa -o %t.fdata -w %t.yaml
+# RUN: FileCheck %s --input-file=%t.fdata --check-prefix=CHECK-FDATA
+# RUN: FileCheck %s --input-file=%t.yaml --check-prefix=CHECK-YAML
+
+# CHECK-FDATA: 1 main 0 1 foo a 1 1
+# CHECK-YAML: name: main
+# CHECK-YAML: calls: {{.*}} disc: 1
+
+# PREAGG: B #main# #foo_secondary# 1 1
+# main calls foo at valid instruction offset past nops that are to be stripped.
+ .globl main
+main:
+ .cfi_startproc
+ call foo_secondary
+ ret
+ .cfi_endproc
+.size main,.-main
+
+# Placeholder cold fragment to force main to be ignored in non-relocation mode.
+ .globl main.cold
+main.cold:
+ .cfi_startproc
+ ud2
+ .cfi_endproc
+.size main.cold,.-main.cold
+
+# foo is set up to contain a valid instruction at called offset, and trapping
+# instructions past that.
+ .globl foo
+foo:
+ .cfi_startproc
+ .nops 10
+ .globl foo_secondary
+foo_secondary:
+ ret
+ .rept 20
+ int3
+ .endr
+ .cfi_endproc
+.size foo,.-foo
diff --git a/bolt/test/X86/register-fragments-bolt-symbols.s b/bolt/test/X86/register-fragments-bolt-symbols.s
index 6478adf19372..90c402b2234d 100644
--- a/bolt/test/X86/register-fragments-bolt-symbols.s
+++ b/bolt/test/X86/register-fragments-bolt-symbols.s
@@ -18,6 +18,11 @@
# RUN: FileCheck --input-file %t.bat.fdata --check-prefix=CHECK-FDATA %s
# RUN: FileCheck --input-file %t.bat.yaml --check-prefix=CHECK-YAML %s
+# RUN: link_fdata --no-redefine %s %t.bolt %t.preagg2 PREAGG2
+# PREAGG2: B X:0 #chain# 1 0
+# RUN: perf2bolt %t.bolt -p %t.preagg2 --pa -o %t.bat2.fdata -w %t.bat2.yaml
+# RUN: FileCheck %s --input-file %t.bat2.yaml --check-prefix=CHECK-YAML2
+
# CHECK-SYMS: l df *ABS* [[#]] chain.s
# CHECK-SYMS: l F .bolt.org.text [[#]] chain
# CHECK-SYMS: l F .text.cold [[#]] chain.cold.0
@@ -28,6 +33,9 @@
# CHECK-FDATA: 0 [unknown] 0 1 chain/chain.s/2 10 0 1
# CHECK-YAML: - name: 'chain/chain.s/2'
+# CHECK-YAML2: - name: 'chain/chain.s/1'
+## non-BAT function has non-zero insns:
+# CHECK-YAML2: insns: 1
.file "chain.s"
.text
diff --git a/bolt/test/X86/yaml-non-simple.test b/bolt/test/X86/yaml-non-simple.test
new file mode 100644
index 000000000000..fef98f692a71
--- /dev/null
+++ b/bolt/test/X86/yaml-non-simple.test
@@ -0,0 +1,71 @@
+## Check that YAML profile for non-simple function is not reported as stale.
+
+# RUN: split-file %s %t
+# RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %t/main.s -o %t.o
+# RUN: %clang %cflags %t.o -o %t.exe -nostdlib
+# RUN: llvm-bolt %t.exe -o %t.out --data %t/yaml --profile-ignore-hash -v=1 \
+# RUN: --report-stale 2>&1 | FileCheck %s
+
+# CHECK: BOLT-INFO: could not disassemble function main. Will ignore.
+# CHECK: BOLT-INFO: could not disassemble function main.cold. Will ignore.
+# CHECK: BOLT-INFO: 0 out of 2 functions in the binary (0.0%) have non-empty execution profile
+# CHECK: BOLT-INFO: 1 function with profile could not be optimized
+
+#--- main.s
+.globl main
+.type main, @function
+main:
+ .cfi_startproc
+.LBB00:
+ pushq %rbp
+ movq %rsp, %rbp
+ subq $16, %rsp
+ testq %rax, %rax
+ js .LBB03
+.LBB01:
+ jne .LBB04
+.LBB02:
+ nop
+.LBB03:
+ xorl %eax, %eax
+ addq $16, %rsp
+ popq %rbp
+ retq
+.LBB04:
+ xorl %eax, %eax
+ addq $16, %rsp
+ popq %rbp
+ retq
+ .cfi_endproc
+ .size main, .-main
+
+.globl main.cold
+.type main.cold, @function
+main.cold:
+ .cfi_startproc
+ nop
+ .cfi_endproc
+ .size main.cold, .-main.cold
+
+#--- yaml
+---
+header:
+ profile-version: 1
+ binary-name: 'yaml-non-simple.s.tmp.exe'
+ binary-build-id: '<unknown>'
+ profile-flags: [ lbr ]
+ profile-origin: branch profile reader
+ profile-events: ''
+ dfs-order: false
+ hash-func: xxh3
+functions:
+ - name: main
+ fid: 0
+ hash: 0x0000000000000000
+ exec: 1
+ nblocks: 5
+ blocks:
+ - bid: 1
+ insns: 1
+ succ: [ { bid: 3, cnt: 1} ]
+...
diff --git a/bolt/test/link_fdata.py b/bolt/test/link_fdata.py
index 0232dd3211e9..3837e394ccc8 100755
--- a/bolt/test/link_fdata.py
+++ b/bolt/test/link_fdata.py
@@ -19,6 +19,7 @@ parser.add_argument("output")
parser.add_argument("prefix", nargs="?", default="FDATA", help="Custom FDATA prefix")
parser.add_argument("--nmtool", default="nm", help="Path to nm tool")
parser.add_argument("--no-lbr", action="store_true")
+parser.add_argument("--no-redefine", action="store_true")
args = parser.parse_args()
@@ -90,6 +91,8 @@ nm_output = subprocess.run(
symbols = {}
for symline in nm_output.splitlines():
symval, _, symname = symline.split(maxsplit=2)
+ if symname in symbols and args.no_redefine:
+ continue
symbols[symname] = symval
diff --git a/bolt/test/runtime/X86/hot-end-symbol.s b/bolt/test/runtime/X86/hot-end-symbol.s
index e6d83d77167a..6ae771cead75 100755
--- a/bolt/test/runtime/X86/hot-end-symbol.s
+++ b/bolt/test/runtime/X86/hot-end-symbol.s
@@ -12,6 +12,7 @@
# RUN: %clang %cflags -no-pie %t.o -o %t.exe -Wl,-q
# RUN: llvm-bolt %t.exe --relocs=1 --hot-text --reorder-functions=hfsort \
+# RUN: --split-functions --split-strategy=all \
# RUN: --data %t.fdata -o %t.out | FileCheck %s
# RUN: %t.out 1
@@ -30,12 +31,12 @@
# CHECK-OUTPUT: __hot_start
# CHECK-OUTPUT-NEXT: main
# CHECK-OUTPUT-NEXT: __hot_end
+# CHECK-OUTPUT-NOT: __hot_start.cold
.text
.globl main
.type main, %function
.globl __hot_start
- .type __hot_start, %object
.p2align 4
main:
__hot_start:
diff --git a/bolt/unittests/CMakeLists.txt b/bolt/unittests/CMakeLists.txt
index 77159e92dec5..64414b83d39f 100644
--- a/bolt/unittests/CMakeLists.txt
+++ b/bolt/unittests/CMakeLists.txt
@@ -1,5 +1,5 @@
add_custom_target(BoltUnitTests)
-set_target_properties(BoltUnitTests PROPERTIES FOLDER "BOLT tests")
+set_target_properties(BoltUnitTests PROPERTIES FOLDER "BOLT/Tests")
function(add_bolt_unittest test_dirname)
add_unittest(BoltUnitTests ${test_dirname} ${ARGN})
diff --git a/clang-tools-extra/CMakeLists.txt b/clang-tools-extra/CMakeLists.txt
index 6a3f741721ee..f6a6b57b5ef0 100644
--- a/clang-tools-extra/CMakeLists.txt
+++ b/clang-tools-extra/CMakeLists.txt
@@ -1,3 +1,5 @@
+set(LLVM_SUBPROJECT_TITLE "Clang Tools Extra")
+
include(CMakeDependentOption)
include(GNUInstallDirs)
diff --git a/clang-tools-extra/clang-tidy/CMakeLists.txt b/clang-tools-extra/clang-tidy/CMakeLists.txt
index 7e1905aa897b..430ea4cdbb38 100644
--- a/clang-tools-extra/clang-tidy/CMakeLists.txt
+++ b/clang-tools-extra/clang-tidy/CMakeLists.txt
@@ -121,7 +121,7 @@ if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
PATTERN "*.h"
)
add_custom_target(clang-tidy-headers)
- set_target_properties(clang-tidy-headers PROPERTIES FOLDER "Misc")
+ set_target_properties(clang-tidy-headers PROPERTIES FOLDER "Clang Tools Extra/Resources")
if(NOT LLVM_ENABLE_IDE)
add_llvm_install_targets(install-clang-tidy-headers
DEPENDS clang-tidy-headers
diff --git a/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp
index 36687a8e761e..c87b3ea7e261 100644
--- a/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp
@@ -54,7 +54,9 @@ AST_MATCHER(QualType, isEnableIf) {
AST_MATCHER_P(TemplateTypeParmDecl, hasDefaultArgument,
clang::ast_matchers::internal::Matcher<QualType>, TypeMatcher) {
return Node.hasDefaultArgument() &&
- TypeMatcher.matches(Node.getDefaultArgument(), Finder, Builder);
+ TypeMatcher.matches(
+ Node.getDefaultArgument().getArgument().getAsType(), Finder,
+ Builder);
}
AST_MATCHER(TemplateDecl, hasAssociatedConstraints) {
return Node.hasAssociatedConstraints();
diff --git a/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp
index 09aaf3e31d5d..75f1107904fc 100644
--- a/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp
@@ -19,10 +19,11 @@ namespace {
AST_MATCHER_P(TemplateTypeParmDecl, hasUnnamedDefaultArgument,
ast_matchers::internal::Matcher<TypeLoc>, InnerMatcher) {
if (Node.getIdentifier() != nullptr || !Node.hasDefaultArgument() ||
- Node.getDefaultArgumentInfo() == nullptr)
+ Node.getDefaultArgument().getArgument().isNull())
return false;
- TypeLoc DefaultArgTypeLoc = Node.getDefaultArgumentInfo()->getTypeLoc();
+ TypeLoc DefaultArgTypeLoc =
+ Node.getDefaultArgument().getTypeSourceInfo()->getTypeLoc();
return InnerMatcher.matches(DefaultArgTypeLoc, Finder, Builder);
}
diff --git a/clang-tools-extra/clang-tidy/bugprone/SizeofExpressionCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/SizeofExpressionCheck.cpp
index a1cffbc66619..5e64d23874ec 100644
--- a/clang-tools-extra/clang-tidy/bugprone/SizeofExpressionCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/SizeofExpressionCheck.cpp
@@ -144,16 +144,13 @@ void SizeofExpressionCheck::registerMatchers(MatchFinder *Finder) {
unaryOperator(hasUnaryOperand(ArrayExpr), unless(hasOperatorName("*"))),
binaryOperator(hasEitherOperand(ArrayExpr)),
castExpr(hasSourceExpression(ArrayExpr))));
- const auto PointerToArrayExpr = ignoringParenImpCasts(
- hasType(hasCanonicalType(pointerType(pointee(arrayType())))));
+ const auto PointerToArrayExpr =
+ hasType(hasCanonicalType(pointerType(pointee(arrayType()))));
- const auto StructAddrOfExpr = unaryOperator(
- hasOperatorName("&"), hasUnaryOperand(ignoringParenImpCasts(
- hasType(hasCanonicalType(recordType())))));
const auto PointerToStructType =
hasUnqualifiedDesugaredType(pointerType(pointee(recordType())));
- const auto PointerToStructExpr = ignoringParenImpCasts(expr(
- hasType(hasCanonicalType(PointerToStructType)), unless(cxxThisExpr())));
+ const auto PointerToStructExpr = expr(
+ hasType(hasCanonicalType(PointerToStructType)), unless(cxxThisExpr()));
const auto ArrayOfPointersExpr = ignoringParenImpCasts(
hasType(hasCanonicalType(arrayType(hasElementType(pointerType()))
@@ -166,18 +163,19 @@ void SizeofExpressionCheck::registerMatchers(MatchFinder *Finder) {
ignoringParenImpCasts(arraySubscriptExpr(
hasBase(ArrayOfSamePointersExpr), hasIndex(ZeroLiteral)));
const auto ArrayLengthExprDenom =
- expr(hasParent(expr(ignoringParenImpCasts(binaryOperator(
- hasOperatorName("/"), hasLHS(ignoringParenImpCasts(sizeOfExpr(
- has(ArrayOfPointersExpr)))))))),
+ expr(hasParent(binaryOperator(hasOperatorName("/"),
+ hasLHS(ignoringParenImpCasts(sizeOfExpr(
+ has(ArrayOfPointersExpr)))))),
sizeOfExpr(has(ArrayOfSamePointersZeroSubscriptExpr)));
- Finder->addMatcher(expr(anyOf(sizeOfExpr(has(ignoringParenImpCasts(anyOf(
- ArrayCastExpr, PointerToArrayExpr,
- StructAddrOfExpr, PointerToStructExpr)))),
- sizeOfExpr(has(PointerToStructType))),
- unless(ArrayLengthExprDenom))
- .bind("sizeof-pointer-to-aggregate"),
- this);
+ Finder->addMatcher(
+ expr(sizeOfExpr(anyOf(
+ has(ignoringParenImpCasts(anyOf(
+ ArrayCastExpr, PointerToArrayExpr, PointerToStructExpr))),
+ has(PointerToStructType))),
+ unless(ArrayLengthExprDenom))
+ .bind("sizeof-pointer-to-aggregate"),
+ this);
}
// Detect expression like: sizeof(expr) <= k for a suspicious constant 'k'.
diff --git a/clang-tools-extra/clang-tidy/misc/CMakeLists.txt b/clang-tools-extra/clang-tidy/misc/CMakeLists.txt
index d9ec268650c0..35e29b9a7d13 100644
--- a/clang-tools-extra/clang-tidy/misc/CMakeLists.txt
+++ b/clang-tools-extra/clang-tidy/misc/CMakeLists.txt
@@ -15,6 +15,7 @@ add_custom_command(
DEPENDS ${clang_tidy_confusable_chars_gen_target} ConfusableTable/confusables.txt)
add_custom_target(genconfusable DEPENDS Confusables.inc)
+set_target_properties(genconfusable PROPERTIES FOLDER "Clang Tools Extra/Sourcegenning")
add_clang_library(clangTidyMiscModule
ConstCorrectnessCheck.cpp
diff --git a/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp
index 7a021fe14436..ea4d99586c71 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp
@@ -177,9 +177,11 @@ matchTrailingTemplateParam(const FunctionTemplateDecl *FunctionTemplate) {
dyn_cast<TemplateTypeParmDecl>(LastParam)) {
if (LastTemplateParam->hasDefaultArgument() &&
LastTemplateParam->getIdentifier() == nullptr) {
- return {matchEnableIfSpecialization(
- LastTemplateParam->getDefaultArgumentInfo()->getTypeLoc()),
- LastTemplateParam};
+ return {
+ matchEnableIfSpecialization(LastTemplateParam->getDefaultArgument()
+ .getTypeSourceInfo()
+ ->getTypeLoc()),
+ LastTemplateParam};
}
}
return {};
diff --git a/clang-tools-extra/clang-tidy/readability/ImplicitBoolConversionCheck.cpp b/clang-tools-extra/clang-tidy/readability/ImplicitBoolConversionCheck.cpp
index 74152c603451..28f5eada6d82 100644
--- a/clang-tools-extra/clang-tidy/readability/ImplicitBoolConversionCheck.cpp
+++ b/clang-tools-extra/clang-tidy/readability/ImplicitBoolConversionCheck.cpp
@@ -50,7 +50,9 @@ StringRef getZeroLiteralToCompareWithForType(CastKind CastExprKind,
case CK_PointerToBoolean:
case CK_MemberPointerToBoolean: // Fall-through on purpose.
- return Context.getLangOpts().CPlusPlus11 ? "nullptr" : "0";
+ return (Context.getLangOpts().CPlusPlus11 || Context.getLangOpts().C23)
+ ? "nullptr"
+ : "0";
default:
llvm_unreachable("Unexpected cast kind");
@@ -165,6 +167,12 @@ bool needsSpacePrefix(SourceLocation Loc, ASTContext &Context) {
void fixGenericExprCastFromBool(DiagnosticBuilder &Diag,
const ImplicitCastExpr *Cast,
ASTContext &Context, StringRef OtherType) {
+ if (!Context.getLangOpts().CPlusPlus) {
+ Diag << FixItHint::CreateInsertion(Cast->getBeginLoc(),
+ (Twine("(") + OtherType + ")").str());
+ return;
+ }
+
const Expr *SubExpr = Cast->getSubExpr();
const bool NeedParens = !isa<ParenExpr>(SubExpr->IgnoreImplicit());
const bool NeedSpace = needsSpacePrefix(Cast->getBeginLoc(), Context);
@@ -267,6 +275,10 @@ void ImplicitBoolConversionCheck::registerMatchers(MatchFinder *Finder) {
auto BoolXor =
binaryOperator(hasOperatorName("^"), hasLHS(ImplicitCastFromBool),
hasRHS(ImplicitCastFromBool));
+ auto ComparisonInCall = allOf(
+ hasParent(callExpr()),
+ hasSourceExpression(binaryOperator(hasAnyOperatorName("==", "!="))));
+
Finder->addMatcher(
traverse(TK_AsIs,
implicitCastExpr(
@@ -281,6 +293,8 @@ void ImplicitBoolConversionCheck::registerMatchers(MatchFinder *Finder) {
stmt(anyOf(ifStmt(), whileStmt()), has(declStmt())))),
// Exclude cases common to implicit cast to and from bool.
unless(ExceptionCases), unless(has(BoolXor)),
+ // Exclude C23 cases common to implicit cast to bool.
+ unless(ComparisonInCall),
// Retrieve also parent statement, to check if we need
// additional parens in replacement.
optionally(hasParent(stmt().bind("parentStmt"))),
diff --git a/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp b/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp
index e811f5519de2..88e4886cd0df 100644
--- a/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp
+++ b/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp
@@ -123,6 +123,9 @@ static const NamedDecl *getFailureForNamedDecl(const NamedDecl *ND) {
if (const auto *Method = dyn_cast<CXXMethodDecl>(ND)) {
if (const CXXMethodDecl *Overridden = getOverrideMethod(Method))
Canonical = cast<NamedDecl>(Overridden->getCanonicalDecl());
+ else if (const FunctionTemplateDecl *Primary = Method->getPrimaryTemplate())
+ if (const FunctionDecl *TemplatedDecl = Primary->getTemplatedDecl())
+ Canonical = cast<NamedDecl>(TemplatedDecl->getCanonicalDecl());
if (Canonical != ND)
return Canonical;
diff --git a/clang-tools-extra/clangd/Hover.cpp b/clang-tools-extra/clangd/Hover.cpp
index 06b949bc4a2b..de103e011c70 100644
--- a/clang-tools-extra/clangd/Hover.cpp
+++ b/clang-tools-extra/clangd/Hover.cpp
@@ -247,8 +247,12 @@ fetchTemplateParameters(const TemplateParameterList *Params,
if (!TTP->getName().empty())
P.Name = TTP->getNameAsString();
- if (TTP->hasDefaultArgument())
- P.Default = TTP->getDefaultArgument().getAsString(PP);
+ if (TTP->hasDefaultArgument()) {
+ P.Default.emplace();
+ llvm::raw_string_ostream Out(*P.Default);
+ TTP->getDefaultArgument().getArgument().print(PP, Out,
+ /*IncludeType=*/false);
+ }
} else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
P.Type = printType(NTTP, PP);
@@ -258,7 +262,8 @@ fetchTemplateParameters(const TemplateParameterList *Params,
if (NTTP->hasDefaultArgument()) {
P.Default.emplace();
llvm::raw_string_ostream Out(*P.Default);
- NTTP->getDefaultArgument()->printPretty(Out, nullptr, PP);
+ NTTP->getDefaultArgument().getArgument().print(PP, Out,
+ /*IncludeType=*/false);
}
} else if (const auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(Param)) {
P.Type = printType(TTPD, PP);
diff --git a/clang-tools-extra/clangd/test/infinite-instantiation.test b/clang-tools-extra/clangd/test/infinite-instantiation.test
index 85a1b656f490..a9c787c77027 100644
--- a/clang-tools-extra/clangd/test/infinite-instantiation.test
+++ b/clang-tools-extra/clangd/test/infinite-instantiation.test
@@ -1,5 +1,6 @@
-// RUN: cp %s %t.cpp
-// RUN: not clangd -check=%t.cpp 2>&1 | FileCheck -strict-whitespace %s
+// RUN: rm -rf %t.dir && mkdir -p %t.dir
+// RUN: echo '[{"directory": "%/t.dir", "command": "clang -ftemplate-depth=100 -x c++ %/s", "file": "%/s"}]' > %t.dir/compile_commands.json
+// RUN: not clangd --compile-commands-dir=%t.dir -check=%s 2>&1 | FileCheck -strict-whitespace %s
// CHECK: [template_recursion_depth_exceeded]
diff --git a/clang-tools-extra/clangd/unittests/CMakeLists.txt b/clang-tools-extra/clangd/unittests/CMakeLists.txt
index 7f1ae5c43d80..0d4628ccf25d 100644
--- a/clang-tools-extra/clangd/unittests/CMakeLists.txt
+++ b/clang-tools-extra/clangd/unittests/CMakeLists.txt
@@ -29,6 +29,7 @@ include(${CMAKE_CURRENT_SOURCE_DIR}/../quality/CompletionModel.cmake)
gen_decision_forest(${CMAKE_CURRENT_SOURCE_DIR}/decision_forest_model DecisionForestRuntimeTest ::ns1::ns2::test::Example)
add_custom_target(ClangdUnitTests)
+set_target_properties(ClangdUnitTests PROPERTIES FOLDER "Clang Tools Extra/Tests")
add_unittest(ClangdUnitTests ClangdTests
Annotations.cpp
ASTTests.cpp
diff --git a/clang-tools-extra/clangd/unittests/ClangdTests.cpp b/clang-tools-extra/clangd/unittests/ClangdTests.cpp
index 864337b98f44..c324643498d9 100644
--- a/clang-tools-extra/clangd/unittests/ClangdTests.cpp
+++ b/clang-tools-extra/clangd/unittests/ClangdTests.cpp
@@ -392,7 +392,7 @@ TEST(ClangdServerTest, SearchLibDir) {
ErrorCheckingCallbacks DiagConsumer;
MockCompilationDatabase CDB;
CDB.ExtraClangFlags.insert(CDB.ExtraClangFlags.end(),
- {"-xc++", "-target", "x86_64-linux-unknown",
+ {"-xc++", "--target=x86_64-unknown-linux-gnu",
"-m64", "--gcc-toolchain=/randomusr",
"-stdlib=libstdc++"});
ClangdServer Server(CDB, FS, ClangdServer::optsForTest(), &DiagConsumer);
diff --git a/clang-tools-extra/clangd/unittests/FindTargetTests.cpp b/clang-tools-extra/clangd/unittests/FindTargetTests.cpp
index 0b2273f0a9a6..3220a5a6a982 100644
--- a/clang-tools-extra/clangd/unittests/FindTargetTests.cpp
+++ b/clang-tools-extra/clangd/unittests/FindTargetTests.cpp
@@ -836,7 +836,9 @@ TEST_F(TargetDeclTest, OverloadExpr) {
[[delete]] x;
}
)cpp";
- EXPECT_DECLS("CXXDeleteExpr", "void operator delete(void *) noexcept");
+ // Sized deallocation is enabled by default in C++14 onwards.
+ EXPECT_DECLS("CXXDeleteExpr",
+ "void operator delete(void *, unsigned long) noexcept");
}
TEST_F(TargetDeclTest, DependentExprs) {
diff --git a/clang-tools-extra/docs/CMakeLists.txt b/clang-tools-extra/docs/CMakeLists.txt
index 8f442e1f661e..272db266b505 100644
--- a/clang-tools-extra/docs/CMakeLists.txt
+++ b/clang-tools-extra/docs/CMakeLists.txt
@@ -77,6 +77,7 @@ if (DOXYGEN_FOUND)
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating clang doxygen documentation." VERBATIM)
+ set_target_properties(doxygen-clang-tools PROPERTIES FOLDER "Clang Tools Extra/Docs")
if (LLVM_BUILD_DOCS)
add_dependencies(doxygen doxygen-clang-tools)
diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst
index 6a9892bada91..3e3195f6f681 100644
--- a/clang-tools-extra/docs/ReleaseNotes.rst
+++ b/clang-tools-extra/docs/ReleaseNotes.rst
@@ -375,12 +375,15 @@ Changes in existing checks
<clang-tidy/checks/readability/identifier-naming>` check in `GetConfigPerFile`
mode by resolving symbolic links to header files. Fixed handling of Hungarian
Prefix when configured to `LowerCase`. Added support for renaming designated
- initializers. Added support for renaming macro arguments.
+ initializers. Added support for renaming macro arguments. Fixed renaming
+ conflicts arising from out-of-line member function template definitions.
- Improved :doc:`readability-implicit-bool-conversion
<clang-tidy/checks/readability/implicit-bool-conversion>` check to provide
valid fix suggestions for ``static_cast`` without a preceding space and
- fixed problem with duplicate parentheses in double implicit casts.
+ fixed problem with duplicate parentheses in double implicit casts. Corrected
+ the fix suggestions for C23 and later by using C-style casts instead of
+ ``static_cast``.
- Improved :doc:`readability-redundant-inline-specifier
<clang-tidy/checks/readability/redundant-inline-specifier>` check to properly
diff --git a/clang-tools-extra/docs/clang-tidy/checks/readability/implicit-bool-conversion.rst b/clang-tools-extra/docs/clang-tidy/checks/readability/implicit-bool-conversion.rst
index 1ea67a0b55e9..1ab21ffeb422 100644
--- a/clang-tools-extra/docs/clang-tidy/checks/readability/implicit-bool-conversion.rst
+++ b/clang-tools-extra/docs/clang-tidy/checks/readability/implicit-bool-conversion.rst
@@ -96,8 +96,8 @@ The rules for generating fix-it hints are:
- ``if (!pointer)`` is changed to ``if (pointer == nullptr)``,
- in case of conversions from bool to other built-in types, an explicit
- ``static_cast`` is proposed to make it clear that a conversion is taking
- place:
+ ``static_cast`` (or a C-style cast since C23) is proposed to make it clear
+ that a conversion is taking place:
- ``int integer = boolean;`` is changed to
``int integer = static_cast<int>(boolean);``,
diff --git a/clang-tools-extra/include-cleaner/unittests/CMakeLists.txt b/clang-tools-extra/include-cleaner/unittests/CMakeLists.txt
index 1e89534b5111..416535649f62 100644
--- a/clang-tools-extra/include-cleaner/unittests/CMakeLists.txt
+++ b/clang-tools-extra/include-cleaner/unittests/CMakeLists.txt
@@ -4,6 +4,7 @@ set(LLVM_LINK_COMPONENTS
)
add_custom_target(ClangIncludeCleanerUnitTests)
+set_target_properties(ClangIncludeCleanerUnitTests PROPERTIES FOLDER "Clang Tools Extra/Tests")
add_unittest(ClangIncludeCleanerUnitTests ClangIncludeCleanerTests
AnalysisTest.cpp
FindHeadersTest.cpp
diff --git a/clang-tools-extra/modularize/ModularizeUtilities.cpp b/clang-tools-extra/modularize/ModularizeUtilities.cpp
index 53e8a49d1a54..b202b3aae8f8 100644
--- a/clang-tools-extra/modularize/ModularizeUtilities.cpp
+++ b/clang-tools-extra/modularize/ModularizeUtilities.cpp
@@ -435,11 +435,9 @@ static std::string replaceDotDot(StringRef Path) {
llvm::sys::path::const_iterator B = llvm::sys::path::begin(Path),
E = llvm::sys::path::end(Path);
while (B != E) {
- if (B->compare(".") == 0) {
- }
- else if (B->compare("..") == 0)
+ if (*B == "..")
llvm::sys::path::remove_filename(Buffer);
- else
+ else if (*B != ".")
llvm::sys::path::append(Buffer, *B);
++B;
}
diff --git a/clang-tools-extra/pseudo/include/CMakeLists.txt b/clang-tools-extra/pseudo/include/CMakeLists.txt
index 2334cfa12e33..619b00f34a5c 100644
--- a/clang-tools-extra/pseudo/include/CMakeLists.txt
+++ b/clang-tools-extra/pseudo/include/CMakeLists.txt
@@ -29,3 +29,4 @@ add_custom_command(OUTPUT ${cxx_bnf_inc}
add_custom_target(cxx_gen
DEPENDS ${cxx_symbols_inc} ${cxx_bnf_inc}
VERBATIM)
+set_target_properties(cxx_gen PROPERTIES FOLDER "Clang Tools Extra/Sourcegenning")
diff --git a/clang-tools-extra/pseudo/tool/CMakeLists.txt b/clang-tools-extra/pseudo/tool/CMakeLists.txt
index 49e1dc29a5a4..bead38322839 100644
--- a/clang-tools-extra/pseudo/tool/CMakeLists.txt
+++ b/clang-tools-extra/pseudo/tool/CMakeLists.txt
@@ -26,4 +26,5 @@ add_custom_command(OUTPUT HTMLForestResources.inc
DEPENDS ${CLANG_SOURCE_DIR}/utils/bundle_resources.py HTMLForest.css HTMLForest.js HTMLForest.html
VERBATIM)
add_custom_target(clang-pseudo-resources DEPENDS HTMLForestResources.inc)
+set_target_properties(clang-pseudo-resources PROPERTIES FOLDER "Clang Tools Extra/Resources")
add_dependencies(clang-pseudo clang-pseudo-resources)
diff --git a/clang-tools-extra/pseudo/unittests/CMakeLists.txt b/clang-tools-extra/pseudo/unittests/CMakeLists.txt
index 821ca4d0652e..53583ceb6186 100644
--- a/clang-tools-extra/pseudo/unittests/CMakeLists.txt
+++ b/clang-tools-extra/pseudo/unittests/CMakeLists.txt
@@ -3,6 +3,7 @@ set(LLVM_LINK_COMPONENTS
)
add_custom_target(ClangPseudoUnitTests)
+set_target_properties(ClangPseudoUnitTests PROPERTIES FOLDER "Clang Tools Extra/Tests")
add_unittest(ClangPseudoUnitTests ClangPseudoTests
BracketTest.cpp
CXXTest.cpp
diff --git a/clang-tools-extra/test/CMakeLists.txt b/clang-tools-extra/test/CMakeLists.txt
index 7a1c168e22f9..50546f62259c 100644
--- a/clang-tools-extra/test/CMakeLists.txt
+++ b/clang-tools-extra/test/CMakeLists.txt
@@ -97,7 +97,6 @@ add_lit_testsuite(check-clang-extra "Running clang-tools-extra/test"
${CMAKE_CURRENT_BINARY_DIR}
DEPENDS ${CLANG_TOOLS_TEST_DEPS}
)
-set_target_properties(check-clang-extra PROPERTIES FOLDER "Clang extra tools' tests")
add_lit_testsuites(CLANG-EXTRA ${CMAKE_CURRENT_SOURCE_DIR}
DEPENDS ${CLANG_TOOLS_TEST_DEPS}
diff --git a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init-no-crash.cpp b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init-no-crash.cpp
index 300fff6cb179..2e2964dda1da 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init-no-crash.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init-no-crash.cpp
@@ -5,3 +5,11 @@ struct X {
// CHECK-MESSAGES: :[[@LINE-1]]:5: error: field has incomplete type 'X' [clang-diagnostic-error]
int a = 10;
};
+
+template <typename T> class NoCrash {
+ // CHECK-MESSAGES: :[[@LINE+2]]:20: error: base class has incomplete type
+ // CHECK-MESSAGES: :[[@LINE-2]]:29: note: definition of 'NoCrash<T>' is not complete until the closing '}'
+ class B : public NoCrash {
+ template <typename U> B(U u) {}
+ };
+};
diff --git a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init.cpp b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init.cpp
index 8d6992afef08..eaa73b906ce0 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init.cpp
@@ -463,12 +463,6 @@ struct NegativeIncompleteArrayMember {
char e[];
};
-template <typename T> class NoCrash {
- class B : public NoCrash {
- template <typename U> B(U u) {}
- };
-};
-
struct PositiveBitfieldMember {
PositiveBitfieldMember() {}
// CHECK-MESSAGES: :[[@LINE-1]]:3: warning: constructor does not initialize these fields: F
diff --git a/clang-tools-extra/test/clang-tidy/checkers/misc/new-delete-overloads.cpp b/clang-tools-extra/test/clang-tidy/checkers/misc/new-delete-overloads.cpp
index 78f021144b2e..f86fe8a4c5b1 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/misc/new-delete-overloads.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/misc/new-delete-overloads.cpp
@@ -12,16 +12,6 @@ struct S {
// CHECK-MESSAGES: :[[@LINE+1]]:7: warning: declaration of 'operator new' has no matching declaration of 'operator delete' at the same scope
void *operator new(size_t size) noexcept(false);
-struct T {
- // Sized deallocations are not enabled by default, and so this new/delete pair
- // does not match. However, we expect only one warning, for the new, because
- // the operator delete is a placement delete and we do not warn on mismatching
- // placement operations.
- // CHECK-MESSAGES: :[[@LINE+1]]:9: warning: declaration of 'operator new' has no matching declaration of 'operator delete' at the same scope
- void *operator new(size_t size) noexcept;
- void operator delete(void *ptr, size_t) noexcept; // ok only if sized deallocation is enabled
-};
-
struct U {
void *operator new(size_t size) noexcept;
void operator delete(void *ptr) noexcept;
diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-outofline.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-outofline.cpp
new file mode 100644
index 000000000000..f807875e2769
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-outofline.cpp
@@ -0,0 +1,30 @@
+// RUN: %check_clang_tidy %s readability-identifier-naming %t -std=c++20 \
+// RUN: --config='{CheckOptions: { \
+// RUN: readability-identifier-naming.MethodCase: CamelCase, \
+// RUN: }}'
+
+namespace SomeNamespace {
+namespace Inner {
+
+class SomeClass {
+public:
+ template <typename T>
+ int someMethod();
+// CHECK-MESSAGES: :[[@LINE-1]]:9: warning: invalid case style for method 'someMethod' [readability-identifier-naming]
+// CHECK-FIXES: {{^}} int SomeMethod();
+};
+template <typename T>
+int SomeClass::someMethod() {
+// CHECK-FIXES: {{^}}int SomeClass::SomeMethod() {
+ return 5;
+}
+
+} // namespace Inner
+
+void someFunc() {
+ Inner::SomeClass S;
+ S.someMethod<int>();
+// CHECK-FIXES: {{^}} S.SomeMethod<int>();
+}
+
+} // namespace SomeNamespace
diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/implicit-bool-conversion.c b/clang-tools-extra/test/clang-tidy/checkers/readability/implicit-bool-conversion.c
new file mode 100644
index 000000000000..a8c69858f76b
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/readability/implicit-bool-conversion.c
@@ -0,0 +1,354 @@
+// RUN: %check_clang_tidy %s readability-implicit-bool-conversion %t -- -- -std=c23
+
+#undef NULL
+#define NULL 0L
+
+void functionTakingBool(bool);
+void functionTakingInt(int);
+void functionTakingUnsignedLong(unsigned long);
+void functionTakingChar(char);
+void functionTakingFloat(float);
+void functionTakingDouble(double);
+void functionTakingSignedChar(signed char);
+
+
+////////// Implicit conversion from bool.
+
+void implicitConversionFromBoolSimpleCases() {
+ bool boolean = true;
+
+ functionTakingBool(boolean);
+
+ functionTakingInt(boolean);
+ // CHECK-MESSAGES: :[[@LINE-1]]:21: warning: implicit conversion 'bool' -> 'int' [readability-implicit-bool-conversion]
+ // CHECK-FIXES: functionTakingInt((int)boolean);
+
+ functionTakingUnsignedLong(boolean);
+ // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: implicit conversion 'bool' -> 'unsigned long'
+ // CHECK-FIXES: functionTakingUnsignedLong((unsigned long)boolean);
+
+ functionTakingChar(boolean);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'bool' -> 'char'
+ // CHECK-FIXES: functionTakingChar((char)boolean);
+
+ functionTakingFloat(boolean);
+ // CHECK-MESSAGES: :[[@LINE-1]]:23: warning: implicit conversion 'bool' -> 'float'
+ // CHECK-FIXES: functionTakingFloat((float)boolean);
+
+ functionTakingDouble(boolean);
+ // CHECK-MESSAGES: :[[@LINE-1]]:24: warning: implicit conversion 'bool' -> 'double'
+ // CHECK-FIXES: functionTakingDouble((double)boolean);
+}
+
+float implicitConversionFromBoolInReturnValue() {
+ bool boolean = false;
+ return boolean;
+ // CHECK-MESSAGES: :[[@LINE-1]]:10: warning: implicit conversion 'bool' -> 'float'
+ // CHECK-FIXES: return (float)boolean;
+}
+
+void implicitConversionFromBoolInSingleBoolExpressions(bool b1, bool b2) {
+ bool boolean = true;
+ boolean = b1 ^ b2;
+ boolean |= !b1 || !b2;
+ boolean &= b1;
+
+ int integer = boolean - 3;
+ // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-FIXES: int integer = (int)boolean - 3;
+
+ float floating = boolean / 0.3f;
+ // CHECK-MESSAGES: :[[@LINE-1]]:20: warning: implicit conversion 'bool' -> 'float'
+ // CHECK-FIXES: float floating = (float)boolean / 0.3f;
+
+ char character = boolean;
+ // CHECK-MESSAGES: :[[@LINE-1]]:20: warning: implicit conversion 'bool' -> 'char'
+ // CHECK-FIXES: char character = (char)boolean;
+}
+
+void implicitConversionFromBoolInComplexBoolExpressions() {
+ bool boolean = true;
+ bool anotherBoolean = false;
+
+ int integer = boolean && anotherBoolean;
+ // CHECK-MESSAGES: :[[@LINE-1]]:17: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-MESSAGES: :[[@LINE-2]]:28: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-FIXES: int integer = (int)boolean && (int)anotherBoolean;
+
+ float floating = (boolean || anotherBoolean) * 0.3f;
+ // CHECK-MESSAGES: :[[@LINE-1]]:21: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-MESSAGES: :[[@LINE-2]]:32: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-FIXES: float floating = ((int)boolean || (int)anotherBoolean) * 0.3f;
+
+ double doubleFloating = (boolean && (anotherBoolean || boolean)) * 0.3;
+ // CHECK-MESSAGES: :[[@LINE-1]]:28: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-MESSAGES: :[[@LINE-2]]:40: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-MESSAGES: :[[@LINE-3]]:58: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-FIXES: double doubleFloating = ((int)boolean && ((int)anotherBoolean || (int)boolean)) * 0.3;
+}
+
+void implicitConversionFromBoolLiterals() {
+ functionTakingInt(true);
+ // CHECK-MESSAGES: :[[@LINE-1]]:21: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-FIXES: functionTakingInt(1);
+
+ functionTakingUnsignedLong(false);
+ // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: implicit conversion 'bool' -> 'unsigned long'
+ // CHECK-FIXES: functionTakingUnsignedLong(0u);
+
+ functionTakingSignedChar(true);
+ // CHECK-MESSAGES: :[[@LINE-1]]:28: warning: implicit conversion 'bool' -> 'signed char'
+ // CHECK-FIXES: functionTakingSignedChar(1);
+
+ functionTakingFloat(false);
+ // CHECK-MESSAGES: :[[@LINE-1]]:23: warning: implicit conversion 'bool' -> 'float'
+ // CHECK-FIXES: functionTakingFloat(0.0f);
+
+ functionTakingDouble(true);
+ // CHECK-MESSAGES: :[[@LINE-1]]:24: warning: implicit conversion 'bool' -> 'double'
+ // CHECK-FIXES: functionTakingDouble(1.0);
+}
+
+void implicitConversionFromBoolInComparisons() {
+ bool boolean = true;
+ int integer = 0;
+
+ functionTakingBool(boolean == integer);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-FIXES: functionTakingBool((int)boolean == integer);
+
+ functionTakingBool(integer != boolean);
+ // CHECK-MESSAGES: :[[@LINE-1]]:33: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-FIXES: functionTakingBool(integer != (int)boolean);
+}
+
+void ignoreBoolComparisons() {
+ bool boolean = true;
+ bool anotherBoolean = false;
+
+ functionTakingBool(boolean == anotherBoolean);
+ functionTakingBool(boolean != anotherBoolean);
+}
+
+void ignoreExplicitCastsFromBool() {
+ bool boolean = true;
+
+ int integer = (int)boolean + 3;
+ float floating = (float)boolean * 0.3f;
+ char character = (char)boolean;
+}
+
+void ignoreImplicitConversionFromBoolInMacroExpansions() {
+ bool boolean = true;
+
+ #define CAST_FROM_BOOL_IN_MACRO_BODY boolean + 3
+ int integerFromMacroBody = CAST_FROM_BOOL_IN_MACRO_BODY;
+
+ #define CAST_FROM_BOOL_IN_MACRO_ARGUMENT(x) x + 3
+ int integerFromMacroArgument = CAST_FROM_BOOL_IN_MACRO_ARGUMENT(boolean);
+}
+
+////////// Implicit conversions to bool.
+
+void implicitConversionToBoolSimpleCases() {
+ int integer = 10;
+ functionTakingBool(integer);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'int' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(integer != 0);
+
+ unsigned long unsignedLong = 10;
+ functionTakingBool(unsignedLong);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'unsigned long' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(unsignedLong != 0u);
+
+ float floating = 0.0f;
+ functionTakingBool(floating);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'float' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(floating != 0.0f);
+
+ double doubleFloating = 1.0f;
+ functionTakingBool(doubleFloating);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'double' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(doubleFloating != 0.0);
+
+ signed char character = 'a';
+ functionTakingBool(character);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'signed char' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(character != 0);
+
+ int* pointer = nullptr;
+ functionTakingBool(pointer);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'int *' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(pointer != nullptr);
+}
+
+void implicitConversionToBoolInSingleExpressions() {
+ int integer = 10;
+ bool boolComingFromInt;
+ boolComingFromInt = integer;
+ // CHECK-MESSAGES: :[[@LINE-1]]:23: warning: implicit conversion 'int' -> 'bool'
+ // CHECK-FIXES: boolComingFromInt = (integer != 0);
+
+ float floating = 10.0f;
+ bool boolComingFromFloat;
+ boolComingFromFloat = floating;
+ // CHECK-MESSAGES: :[[@LINE-1]]:25: warning: implicit conversion 'float' -> 'bool'
+ // CHECK-FIXES: boolComingFromFloat = (floating != 0.0f);
+
+ signed char character = 'a';
+ bool boolComingFromChar;
+ boolComingFromChar = character;
+ // CHECK-MESSAGES: :[[@LINE-1]]:24: warning: implicit conversion 'signed char' -> 'bool'
+ // CHECK-FIXES: boolComingFromChar = (character != 0);
+
+ int* pointer = nullptr;
+ bool boolComingFromPointer;
+ boolComingFromPointer = pointer;
+ // CHECK-MESSAGES: :[[@LINE-1]]:27: warning: implicit conversion 'int *' -> 'bool'
+ // CHECK-FIXES: boolComingFromPointer = (pointer != nullptr);
+}
+
+void implicitConversionToBoolInComplexExpressions() {
+ bool boolean = true;
+
+ int integer = 10;
+ int anotherInteger = 20;
+ bool boolComingFromInteger;
+ boolComingFromInteger = integer + anotherInteger;
+ // CHECK-MESSAGES: :[[@LINE-1]]:27: warning: implicit conversion 'int' -> 'bool'
+ // CHECK-FIXES: boolComingFromInteger = ((integer + anotherInteger) != 0);
+}
+
+void implicitConversionInNegationExpressions() {
+ int integer = 10;
+ bool boolComingFromNegatedInt;
+ boolComingFromNegatedInt = !integer;
+ // CHECK-MESSAGES: :[[@LINE-1]]:30: warning: implicit conversion 'int' -> 'bool'
+ // CHECK-FIXES: boolComingFromNegatedInt = ((!integer) != 0);
+}
+
+bool implicitConversionToBoolInReturnValue() {
+ float floating = 1.0f;
+ return floating;
+ // CHECK-MESSAGES: :[[@LINE-1]]:10: warning: implicit conversion 'float' -> 'bool'
+ // CHECK-FIXES: return floating != 0.0f;
+}
+
+void implicitConversionToBoolFromLiterals() {
+ functionTakingBool(0);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'int' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(false);
+
+ functionTakingBool(1);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'int' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(true);
+
+ functionTakingBool(2ul);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'unsigned long' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(true);
+
+ functionTakingBool(0.0f);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'float' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(false);
+
+ functionTakingBool(1.0f);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'float' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(true);
+
+ functionTakingBool(2.0);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'double' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(true);
+
+ functionTakingBool('\0');
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'int' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(false);
+
+ functionTakingBool('a');
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'int' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(true);
+
+ functionTakingBool("");
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'char *' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(true);
+
+ functionTakingBool("abc");
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'char *' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(true);
+
+ functionTakingBool(NULL);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'long' -> 'bool'
+ // CHECK-FIXES: functionTakingBool(false);
+}
+
+void implicitConversionToBoolFromUnaryMinusAndZeroLiterals() {
+ functionTakingBool(-0);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'int' -> 'bool'
+ // CHECK-FIXES: functionTakingBool((-0) != 0);
+
+ functionTakingBool(-0.0f);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'float' -> 'bool'
+ // CHECK-FIXES: functionTakingBool((-0.0f) != 0.0f);
+
+ functionTakingBool(-0.0);
+ // CHECK-MESSAGES: :[[@LINE-1]]:22: warning: implicit conversion 'double' -> 'bool'
+ // CHECK-FIXES: functionTakingBool((-0.0) != 0.0);
+}
+
+void ignoreExplicitCastsToBool() {
+ int integer = 10;
+ bool boolComingFromInt = (bool)integer;
+
+ float floating = 10.0f;
+ bool boolComingFromFloat = (bool)floating;
+
+ char character = 'a';
+ bool boolComingFromChar = (bool)character;
+
+ int* pointer = nullptr;
+ bool booleanComingFromPointer = (bool)pointer;
+}
+
+void ignoreImplicitConversionToBoolInMacroExpansions() {
+ int integer = 3;
+
+ #define CAST_TO_BOOL_IN_MACRO_BODY integer && false
+ bool boolFromMacroBody = CAST_TO_BOOL_IN_MACRO_BODY;
+
+ #define CAST_TO_BOOL_IN_MACRO_ARGUMENT(x) x || true
+ bool boolFromMacroArgument = CAST_TO_BOOL_IN_MACRO_ARGUMENT(integer);
+}
+
+int implicitConversionReturnInt()
+{
+ return true;
+ // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-FIXES: return 1
+}
+
+int implicitConversionReturnIntWithParens()
+{
+ return (true);
+ // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: implicit conversion 'bool' -> 'int'
+ // CHECK-FIXES: return 1
+}
+
+bool implicitConversionReturnBool()
+{
+ return 1;
+ // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: implicit conversion 'int' -> 'bool'
+ // CHECK-FIXES: return true
+}
+
+bool implicitConversionReturnBoolWithParens()
+{
+ return (1);
+ // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: implicit conversion 'int' -> 'bool'
+ // CHECK-FIXES: return true
+}
+
+int keepCompactReturnInC_PR71848() {
+ bool foo = false;
+ return( foo );
+// CHECK-MESSAGES: :[[@LINE-1]]:9: warning: implicit conversion 'bool' -> 'int' [readability-implicit-bool-conversion]
+// CHECK-FIXES: return(int)( foo );
+}
diff --git a/clang-tools-extra/unittests/CMakeLists.txt b/clang-tools-extra/unittests/CMakeLists.txt
index 086a68e63830..77311540e719 100644
--- a/clang-tools-extra/unittests/CMakeLists.txt
+++ b/clang-tools-extra/unittests/CMakeLists.txt
@@ -1,5 +1,5 @@
add_custom_target(ExtraToolsUnitTests)
-set_target_properties(ExtraToolsUnitTests PROPERTIES FOLDER "Extra Tools Unit Tests")
+set_target_properties(ExtraToolsUnitTests PROPERTIES FOLDER "Clang Tools Extra/Tests")
function(add_extra_unittest test_dirname)
add_unittest(ExtraToolsUnitTests ${test_dirname} ${ARGN})
diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt
index c20ce47a12ab..2ac0bccb42f5 100644
--- a/clang/CMakeLists.txt
+++ b/clang/CMakeLists.txt
@@ -1,4 +1,5 @@
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "Clang")
if(NOT DEFINED LLVM_COMMON_CMAKE_UTILS)
set(LLVM_COMMON_CMAKE_UTILS ${CMAKE_CURRENT_SOURCE_DIR}/../cmake)
@@ -349,10 +350,7 @@ if (LLVM_COMPILER_IS_GCC_COMPATIBLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pedantic -Wno-long-long")
endif ()
- check_cxx_compiler_flag("-Werror -Wnested-anon-types" CXX_SUPPORTS_NO_NESTED_ANON_TYPES_FLAG)
- if( CXX_SUPPORTS_NO_NESTED_ANON_TYPES_FLAG )
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-nested-anon-types" )
- endif()
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-nested-anon-types" )
endif ()
# Determine HOST_LINK_VERSION on Darwin.
@@ -394,7 +392,7 @@ if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
# Installing the headers needs to depend on generating any public
# tablegen'd headers.
add_custom_target(clang-headers DEPENDS clang-tablegen-targets)
- set_target_properties(clang-headers PROPERTIES FOLDER "Misc")
+ set_target_properties(clang-headers PROPERTIES FOLDER "Clang/Resources")
if(NOT LLVM_ENABLE_IDE)
add_llvm_install_targets(install-clang-headers
DEPENDS clang-headers
@@ -402,6 +400,7 @@ if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
endif()
add_custom_target(bash-autocomplete DEPENDS utils/bash-autocomplete.sh)
+ set_target_properties(bash-autocomplete PROPERTIES FOLDER "Clang/Misc")
install(FILES utils/bash-autocomplete.sh
DESTINATION "${CMAKE_INSTALL_DATADIR}/clang"
COMPONENT bash-autocomplete)
@@ -482,7 +481,7 @@ add_custom_target(clang-tablegen-targets
omp_gen
ClangDriverOptions
${CLANG_TABLEGEN_TARGETS})
-set_target_properties(clang-tablegen-targets PROPERTIES FOLDER "Misc")
+set_target_properties(clang-tablegen-targets PROPERTIES FOLDER "Clang/Tablegenning/Targets")
list(APPEND LLVM_COMMON_DEPENDS clang-tablegen-targets)
# Force target to be built as soon as possible. Clang modules builds depend
@@ -547,7 +546,7 @@ endif()
# Custom target to install all clang libraries.
add_custom_target(clang-libraries)
-set_target_properties(clang-libraries PROPERTIES FOLDER "Misc")
+set_target_properties(clang-libraries PROPERTIES FOLDER "Clang/Install")
if(NOT LLVM_ENABLE_IDE)
add_llvm_install_targets(install-clang-libraries
diff --git a/clang/bindings/python/tests/CMakeLists.txt b/clang/bindings/python/tests/CMakeLists.txt
index c4cd2539e9d6..2543cf739463 100644
--- a/clang/bindings/python/tests/CMakeLists.txt
+++ b/clang/bindings/python/tests/CMakeLists.txt
@@ -11,7 +11,7 @@ add_custom_target(check-clang-python
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..)
set(RUN_PYTHON_TESTS TRUE)
-set_target_properties(check-clang-python PROPERTIES FOLDER "Clang tests")
+set_target_properties(check-clang-python PROPERTIES FOLDER "Clang/Tests")
# Tests require libclang.so which is only built with LLVM_ENABLE_PIC=ON
if(NOT LLVM_ENABLE_PIC)
diff --git a/clang/cmake/caches/CrossWinToARMLinux.cmake b/clang/cmake/caches/CrossWinToARMLinux.cmake
index 736a54ece550..62e87c6c62f8 100644
--- a/clang/cmake/caches/CrossWinToARMLinux.cmake
+++ b/clang/cmake/caches/CrossWinToARMLinux.cmake
@@ -89,6 +89,13 @@ endif()
message(STATUS "Toolchain target to build: ${LLVM_TARGETS_TO_BUILD}")
+# Allow to override libc++ ABI version. Use 2 by default.
+if (NOT DEFINED LIBCXX_ABI_VERSION)
+ set(LIBCXX_ABI_VERSION 2)
+endif()
+
+message(STATUS "Toolchain's Libc++ ABI version: ${LIBCXX_ABI_VERSION}")
+
if (NOT DEFINED CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "Release" CACHE STRING "")
endif()
@@ -109,8 +116,15 @@ set(CLANG_DEFAULT_OBJCOPY "llvm-objcopy" CACHE STRING "")
set(CLANG_DEFAULT_RTLIB "compiler-rt" CACHE STRING "")
set(CLANG_DEFAULT_UNWINDLIB "libunwind" CACHE STRING "")
-if(WIN32)
- set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded" CACHE STRING "")
+if (NOT DEFINED CMAKE_MSVC_RUNTIME_LIBRARY AND WIN32)
+ #Note: Always specify MT DLL for the LLDB build configurations on Windows host.
+ if (CMAKE_BUILD_TYPE STREQUAL "Debug")
+ set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreadedDebugDLL" CACHE STRING "")
+ else()
+ set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreadedDLL" CACHE STRING "")
+ endif()
+ # Grab all ucrt/vcruntime related DLLs into the binary installation folder.
+ set(CMAKE_INSTALL_UCRT_LIBRARIES ON CACHE BOOL "")
endif()
# Set up RPATH for the target runtime/builtin libraries.
@@ -127,6 +141,15 @@ set(BUILTINS_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_INSTALL_RPATH
set(BUILTINS_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_BUILD_WITH_INSTALL_RPATH ON CACHE BOOL "")
set(BUILTINS_${TOOLCHAIN_TARGET_TRIPLE}_LLVM_CMAKE_DIR "${LLVM_PROJECT_DIR}/llvm/cmake/modules" CACHE PATH "")
+if (DEFINED TOOLCHAIN_TARGET_COMPILER_FLAGS)
+ foreach(lang C;CXX;ASM)
+ set(BUILTINS_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_${lang}_FLAGS "${TOOLCHAIN_TARGET_COMPILER_FLAGS}" CACHE STRING "")
+ endforeach()
+endif()
+foreach(type SHARED;MODULE;EXE)
+ set(BUILTINS_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_${type}_LINKER_FLAGS "-fuse-ld=lld" CACHE STRING "")
+endforeach()
+
set(LLVM_RUNTIME_TARGETS "${TOOLCHAIN_TARGET_TRIPLE}" CACHE STRING "")
set(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR ON CACHE BOOL "")
@@ -137,6 +160,15 @@ set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_SYSROOT
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_INSTALL_RPATH "${RUNTIMES_INSTALL_RPATH}" CACHE STRING "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_BUILD_WITH_INSTALL_RPATH ON CACHE BOOL "")
+if (DEFINED TOOLCHAIN_TARGET_COMPILER_FLAGS)
+ foreach(lang C;CXX;ASM)
+ set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_${lang}_FLAGS "${TOOLCHAIN_TARGET_COMPILER_FLAGS}" CACHE STRING "")
+ endforeach()
+endif()
+foreach(type SHARED;MODULE;EXE)
+ set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_${type}_LINKER_FLAGS "-fuse-ld=lld" CACHE STRING "")
+endforeach()
+
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_BUILD_BUILTINS ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_BUILD_SANITIZERS OFF CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_BUILD_XRAY OFF CACHE BOOL "")
@@ -164,7 +196,7 @@ set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXXABI_ENABLE_SHARED
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_USE_COMPILER_RT ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_ENABLE_SHARED OFF CACHE BOOL "")
-set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_ABI_VERSION 2 CACHE STRING "")
+set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_ABI_VERSION ${LIBCXX_ABI_VERSION} CACHE STRING "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_CXX_ABI "libcxxabi" CACHE STRING "") #!!!
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_ENABLE_NEW_DELETE_DEFINITIONS ON CACHE BOOL "")
diff --git a/clang/cmake/caches/Fuchsia-stage2.cmake b/clang/cmake/caches/Fuchsia-stage2.cmake
index d5546e20873b..66e764968e85 100644
--- a/clang/cmake/caches/Fuchsia-stage2.cmake
+++ b/clang/cmake/caches/Fuchsia-stage2.cmake
@@ -19,7 +19,6 @@ set(LLVM_ENABLE_LLD ON CACHE BOOL "")
set(LLVM_ENABLE_LTO ON CACHE BOOL "")
set(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR ON CACHE BOOL "")
set(LLVM_ENABLE_PLUGINS OFF CACHE BOOL "")
-set(LLVM_ENABLE_TERMINFO OFF CACHE BOOL "")
set(LLVM_ENABLE_UNWIND_TABLES OFF CACHE BOOL "")
set(LLVM_ENABLE_Z3_SOLVER OFF CACHE BOOL "")
set(LLVM_ENABLE_ZLIB ON CACHE BOOL "")
diff --git a/clang/cmake/caches/Fuchsia.cmake b/clang/cmake/caches/Fuchsia.cmake
index 30a3b9116a46..4d3af3ad3f40 100644
--- a/clang/cmake/caches/Fuchsia.cmake
+++ b/clang/cmake/caches/Fuchsia.cmake
@@ -12,7 +12,6 @@ set(LLVM_ENABLE_DIA_SDK OFF CACHE BOOL "")
set(LLVM_ENABLE_LIBEDIT OFF CACHE BOOL "")
set(LLVM_ENABLE_LIBXML2 OFF CACHE BOOL "")
set(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR ON CACHE BOOL "")
-set(LLVM_ENABLE_TERMINFO OFF CACHE BOOL "")
set(LLVM_ENABLE_UNWIND_TABLES OFF CACHE BOOL "")
set(LLVM_ENABLE_Z3_SOLVER OFF CACHE BOOL "")
set(LLVM_ENABLE_ZLIB OFF CACHE BOOL "")
@@ -34,7 +33,6 @@ set(_FUCHSIA_BOOTSTRAP_PASSTHROUGH
LibXml2_ROOT
LLVM_ENABLE_CURL
LLVM_ENABLE_HTTPLIB
- LLVM_ENABLE_TERMINFO
LLVM_ENABLE_LIBEDIT
CURL_ROOT
OpenSSL_ROOT
@@ -48,11 +46,6 @@ set(_FUCHSIA_BOOTSTRAP_PASSTHROUGH
PANEL_LIBRARIES
# Deprecated
- Terminfo_ROOT
-
- Terminfo_LIBRARIES
-
- # Deprecated
LibEdit_ROOT
LibEdit_INCLUDE_DIRS
diff --git a/clang/cmake/caches/HLSL.cmake b/clang/cmake/caches/HLSL.cmake
index 27f848fdccf0..ed813f60c9c6 100644
--- a/clang/cmake/caches/HLSL.cmake
+++ b/clang/cmake/caches/HLSL.cmake
@@ -12,7 +12,7 @@ set(LLVM_ENABLE_PROJECTS "clang;clang-tools-extra" CACHE STRING "")
set(CLANG_ENABLE_HLSL On CACHE BOOL "")
-if (NOT CMAKE_CONFIGURATION_TYPES)
+if (HLSL_ENABLE_DISTRIBUTION)
set(LLVM_DISTRIBUTION_COMPONENTS
"clang;hlsl-resource-headers;clangd"
CACHE STRING "")
diff --git a/clang/cmake/caches/VectorEngine.cmake b/clang/cmake/caches/VectorEngine.cmake
index 2f968a21cc40..b429fb0997d7 100644
--- a/clang/cmake/caches/VectorEngine.cmake
+++ b/clang/cmake/caches/VectorEngine.cmake
@@ -13,9 +13,7 @@
# ninja
#
-# Disable TERMINFO, ZLIB, and ZSTD for VE since there is no pre-compiled
-# libraries.
-set(LLVM_ENABLE_TERMINFO OFF CACHE BOOL "")
+# Disable ZLIB, and ZSTD for VE since there is no pre-compiled libraries.
set(LLVM_ENABLE_ZLIB OFF CACHE BOOL "")
set(LLVM_ENABLE_ZSTD OFF CACHE BOOL "")
diff --git a/clang/cmake/modules/AddClang.cmake b/clang/cmake/modules/AddClang.cmake
index 75b0080f6715..a5ef639187d9 100644
--- a/clang/cmake/modules/AddClang.cmake
+++ b/clang/cmake/modules/AddClang.cmake
@@ -26,7 +26,6 @@ function(clang_tablegen)
if(CTG_TARGET)
add_public_tablegen_target(${CTG_TARGET})
- set_target_properties( ${CTG_TARGET} PROPERTIES FOLDER "Clang tablegenning")
set_property(GLOBAL APPEND PROPERTY CLANG_TABLEGEN_TARGETS ${CTG_TARGET})
endif()
endfunction(clang_tablegen)
@@ -138,13 +137,11 @@ macro(add_clang_library name)
endif()
endforeach()
- set_target_properties(${name} PROPERTIES FOLDER "Clang libraries")
set_clang_windows_version_resource_properties(${name})
endmacro(add_clang_library)
macro(add_clang_executable name)
add_llvm_executable( ${name} ${ARGN} )
- set_target_properties(${name} PROPERTIES FOLDER "Clang executables")
set_clang_windows_version_resource_properties(${name})
endmacro(add_clang_executable)
diff --git a/clang/docs/CMakeLists.txt b/clang/docs/CMakeLists.txt
index 4163dd2d90ad..51e9db29f887 100644
--- a/clang/docs/CMakeLists.txt
+++ b/clang/docs/CMakeLists.txt
@@ -78,6 +78,7 @@ if (LLVM_ENABLE_DOXYGEN)
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating clang doxygen documentation." VERBATIM)
+ set_target_properties(doxygen-clang PROPERTIES FOLDER "Clang/Docs")
if (LLVM_BUILD_DOCS)
add_dependencies(doxygen doxygen-clang)
diff --git a/clang/docs/ClangFormatStyleOptions.rst b/clang/docs/ClangFormatStyleOptions.rst
index 6d092219877f..1a7d0e6a05e3 100644
--- a/clang/docs/ClangFormatStyleOptions.rst
+++ b/clang/docs/ClangFormatStyleOptions.rst
@@ -1421,13 +1421,21 @@ the configuration (without a prefix: ``Auto``).
.. code-block:: c++
- true:
#define A \
int aaaa; \
int b; \
int dddddddddd;
- false:
+ * ``ENAS_LeftWithLastLine`` (in configuration: ``LeftWithLastLine``)
+ Align escaped newlines as far left as possible, using the last line of
+ the preprocessor directive as the reference if it's the longest.
+
+ .. code-block:: c++
+
+ #define A \
+ int aaaa; \
+ int b; \
+ int dddddddddd;
* ``ENAS_Right`` (in configuration: ``Right``)
Align escaped newlines in the right-most column.
diff --git a/clang/docs/HLSL/AvailabilityDiagnostics.rst b/clang/docs/HLSL/AvailabilityDiagnostics.rst
new file mode 100644
index 000000000000..bb9d02f21dde
--- /dev/null
+++ b/clang/docs/HLSL/AvailabilityDiagnostics.rst
@@ -0,0 +1,137 @@
+=============================
+HLSL Availability Diagnostics
+=============================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+HLSL availability diagnostics emits errors or warning when unavailable shader APIs are used. Unavailable shader APIs are APIs that are exposed in HLSL code but are not available in the target shader stage or shader model version.
+
+There are three modes of HLSL availability diagnostic:
+
+#. **Default mode** - compiler emits an error when an unavailable API is found in a code that is reachable from the shader entry point function or from an exported library function (when compiling a shader library)
+
+#. **Relaxed mode** - same as default mode except the compiler emits a warning. This mode is enabled by ``-Wno-error=hlsl-availability``.
+
+#. **Strict mode** - compiler emits an error when an unavailable API is found in parsed code regardless of whether it can be reached from the shader entry point or exported functions, or not. This mode is enabled by ``-fhlsl-strict-availability``.
+
+Implementation Details
+======================
+
+Environment Parameter
+---------------------
+
+In order to encode API availability based on the shader model version and shader model stage a new ``environment`` parameter was added to the existing Clang ``availability`` attribute.
+
+The values allowed for this parameter are a subset of values allowed as the ``llvm::Triple`` environment component. If the environment parameters is present, the declared availability attribute applies only to targets with the same platform and environment.
+
+Default and Relaxed Diagnostic Modes
+------------------------------------
+
+This mode is implemented in ``DiagnoseHLSLAvailability`` class in ``SemaHLSL.cpp`` and it is invoked after the whole translation unit is parsed (from ``Sema::ActOnEndOfTranslationUnit``). The implementation iterates over all shader entry points and exported library functions in the translation unit and performs an AST traversal of each function body.
+
+When a reference to another function or member method is found (``DeclRefExpr`` or ``MemberExpr``) and it has a body, the AST of the referenced function is also scanned. This chain of AST traversals will reach all of the code that is reachable from the initial shader entry point or exported library function and avoids the need to generate a call graph.
+
+All shader APIs have an availability attribute that specifies the shader model version (and environment, if applicable) when this API was first introduced.When a reference to a function without a definition is found and it has an availability attribute, the version of the attribute is checked against the target shader model version and shader stage (if shader stage context is known), and an appropriate diagnostic is generated as needed.
+
+All shader entry functions have ``HLSLShaderAttr`` attribute that specifies what type of shader this function represents. However, for exported library functions the target shader stage is unknown, so in this case the HLSL API availability will be only checked against the shader model version. It means that for exported library functions the diagnostic of APIs with availability specific to shader stage will be deferred until DXIL linking time.
+
+A list of functions that were already scanned is kept in order to avoid duplicate scans and diagnostics (see ``DiagnoseHLSLAvailability::ScannedDecls``). It might happen that a shader library has multiple shader entry points for different shader stages that all call into the same shared function. It is therefore important to record not just that a function has been scanned, but also in which shader stage context. This is done by using ``llvm::DenseMap`` that maps ``FunctionDecl *`` to a ``unsigned`` bitmap that represents a set of shader stages (or environments) the function has been scanned for. The ``N``'th bit in the set is set if the function has been scanned in shader environment whose ``HLSLShaderAttr::ShaderType`` integer value equals ``N``.
+
+The emitted diagnostic messages belong to ``hlsl-availability`` diagnostic group and are reported as errors by default. With ``-Wno-error=hlsl-availability`` flag they become warning, making it relaxed HLSL diagnostics mode.
+
+Strict Diagnostic Mode
+----------------------
+
+When strict HLSL availability diagnostic mode is enabled the compiler must report all HLSL API availability issues regardless of code reachability. The implementation of this mode takes advantage of an existing diagnostic scan in ``DiagnoseUnguardedAvailability`` class which is already traversing AST of each function as soon as the function body has been parsed. For HLSL, this pass was only slightly modified, such as making sure diagnostic messages are in the ``hlsl-availability`` group and that availability checks based on shader stage are not included if the shader stage context is unknown.
+
+If the compilation target is a shader library, only availability based on shader model version can be diagnosed during this scan. To diagnose availability based on shader stage, the compiler needs to run the AST traversals implementated in ``DiagnoseHLSLAvailability`` at the end of the translation unit as described above.
+
+As a result, availability based on specific shader stage will only be diagnosed in code that is reachable from a shader entry point or library export function. It also means that function bodies might be scanned multiple time. When that happens, care should be taken not to produce duplicated diagnostics.
+
+========
+Examples
+========
+
+**Note**
+For the example below, the ``WaveActiveCountBits`` API function became available in shader model 6.0 and ``WaveMultiPrefixSum`` in shader model 6.5.
+
+The availability of ``ddx`` function depends on a shader stage. It is available for pixel shaders in shader model 2.1 and higher, for compute, mesh and amplification shaders in shader model 6.6 and higher. For any other shader stages it is not available.
+
+Compute shader example
+======================
+
+.. code-block:: c++
+
+ float unusedFunction(float f) {
+ return ddx(f);
+ }
+
+ [numthreads(4, 4, 1)]
+ void main(uint3 threadId : SV_DispatchThreadId) {
+ float f1 = ddx(threadId.x);
+ float f2 = WaveActiveCountBits(threadId.y == 1.0);
+ }
+
+When compiled as compute shader for shader model version 5.0, Clang will emit the following error by default:
+
+.. code-block:: console
+
+ <>:7:13: error: 'ddx' is only available in compute shader environment on Shader Model 6.6 or newer
+ <>:8:13: error: 'WaveActiveCountBits' is only available on Shader Model 6.5 or newer
+
+With relaxed diagnostic mode this errors will become warnings.
+
+With strict diagnostic mode, in addition to the 2 errors above Clang will also emit error for the ``ddx`` call in ``unusedFunction``.:
+
+.. code-block:: console
+
+ <>:2:9: error: 'ddx' is only available in compute shader environment on Shader Model 6.5 or newer
+ <>:7:13: error: 'ddx' is only available in compute shader environment on Shader Model 6.5 or newer
+ <>:7:13: error: 'WaveActiveCountBits' is only available on Shader Model 6.5 or newer
+
+Shader library example
+======================
+
+.. code-block:: c++
+
+ float myFunction(float f) {
+ return ddx(f);
+ }
+
+ float unusedFunction(float f) {
+ return WaveMultiPrefixSum(f, 1.0);
+ }
+
+ [shader("compute")]
+ [numthreads(4, 4, 1)]
+ void main(uint3 threadId : SV_DispatchThreadId) {
+ float f = 3;
+ float e = myFunction(f);
+ }
+
+ [shader("pixel")]
+ void main() {
+ float f = 3;
+ float e = myFunction(f);
+ }
+
+When compiled as shader library vshader model version 6.4, Clang will emit the following error by default:
+
+.. code-block:: console
+
+ <>:2:9: error: 'ddx' is only available in compute shader environment on Shader Model 6.5 or newer
+
+With relaxed diagnostic mode this errors will become warnings.
+
+With strict diagnostic mode Clang will also emit errors for availability issues in code that is not used by any of the entry points:
+
+.. code-block:: console
+
+ <>2:9: error: 'ddx' is only available in compute shader environment on Shader Model 6.6 or newer
+ <>:6:9: error: 'WaveActiveCountBits' is only available on Shader Model 6.5 or newer
+
+Note that ``myFunction`` is reachable from both pixel and compute shader entry points is therefore scanned twice - once for each context. The diagnostic is emitted only for the compute shader context.
diff --git a/clang/docs/HLSL/HLSLDocs.rst b/clang/docs/HLSL/HLSLDocs.rst
index 97b2425f013b..1e50a66d984b 100644
--- a/clang/docs/HLSL/HLSLDocs.rst
+++ b/clang/docs/HLSL/HLSLDocs.rst
@@ -16,3 +16,4 @@ HLSL Design and Implementation
ResourceTypes
EntryFunctions
FunctionCalls
+ AvailabilityDiagnostics
diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index a09c409f8f91..46f99d0bbdd0 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -4403,6 +4403,7 @@ immediately after the name being declared.
For example, this applies the GNU ``unused`` attribute to ``a`` and ``f``, and
also applies the GNU ``noreturn`` attribute to ``f``.
+Examples:
.. code-block:: c++
[[gnu::unused]] int a, f [[gnu::noreturn]] ();
@@ -4412,6 +4413,42 @@ Target-Specific Extensions
Clang supports some language features conditionally on some targets.
+AMDGPU Language Extensions
+--------------------------
+
+__builtin_amdgcn_fence
+^^^^^^^^^^^^^^^^^^^^^^
+
+``__builtin_amdgcn_fence`` emits a fence.
+
+* ``unsigned`` atomic ordering, e.g. ``__ATOMIC_ACQUIRE``
+* ``const char *`` synchronization scope, e.g. ``workgroup``
+* Zero or more ``const char *`` address spaces names.
+
+The address spaces arguments must be one of the following string literals:
+
+* ``"local"``
+* ``"global"``
+
+If one or more address space name are provided, the code generator will attempt
+to emit potentially faster instructions that order access to at least those
+address spaces.
+Emitting such instructions may not always be possible and the compiler is free
+to fence more aggressively.
+
+If no address spaces names are provided, all address spaces are fenced.
+
+.. code-block:: c++
+
+ // Fence all address spaces.
+ __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup");
+ __builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "agent");
+
+ // Fence only requested address spaces.
+ __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup", "local")
+ __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup", "local", "global")
+
+
ARM/AArch64 Language Extensions
-------------------------------
@@ -5602,4 +5639,4 @@ Compiling different TUs depending on these flags (including use of
``std::hardware_constructive_interference`` or
``std::hardware_destructive_interference``) with different compilers, macro
definitions, or architecture flags will lead to ODR violations and should be
-avoided. \ No newline at end of file
+avoided.
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 2f83f5c6d54e..182f8b582425 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -59,6 +59,18 @@ C++ Specific Potentially Breaking Changes
- Clang now performs semantic analysis for unary operators with dependent operands
that are known to be of non-class non-enumeration type prior to instantiation.
+ This change uncovered a bug in libstdc++ 14.1.0 which may cause compile failures
+ on systems using that version of libstdc++ and Clang 19, with an error that looks
+ something like this:
+
+ .. code-block:: text
+
+ <source>:4:5: error: expression is not assignable
+ 4 | ++this;
+ | ^ ~~~~
+
+ To fix this, update libstdc++ to version 14.1.1 or greater.
+
ABI Changes in This Version
---------------------------
- Fixed Microsoft name mangling of implicitly defined variables used for thread
@@ -155,6 +167,11 @@ C++17 Feature Support
files because they may not be stable across multiple TUs (the values may vary
based on compiler version as well as CPU tuning). #GH60174
+C++14 Feature Support
+^^^^^^^^^^^^^^^^^^^^^
+- Sized deallocation is enabled by default in C++14 onwards. The user may specify
+ ``-fno-sized-deallocation`` to disable it if there are some regressions.
+
C++20 Feature Support
^^^^^^^^^^^^^^^^^^^^^
@@ -325,6 +342,10 @@ New Compiler Flags
``__attribute__((section(...)))``. This enables linker GC to collect unused
symbols without having to use a per-symbol section.
+- ``-fms-define-stdc`` and its clang-cl counterpart ``/Zc:__STDC__``.
+ Matches MSVC behaviour by defining ``__STDC__`` to ``1`` when
+ MSVC compatibility mode is used. It has no effect for C++ code.
+
Deprecated Compiler Flags
-------------------------
@@ -605,9 +626,14 @@ Bug Fixes in This Version
- Clang now correctly disallows VLA type compound literals, e.g. ``(int[size]){}``,
as the C standard mandates. (#GH89835)
+- ``__is_array`` and ``__is_bounded_array`` no longer return ``true`` for
+ zero-sized arrays. Fixes (#GH54705).
+
Bug Fixes to Compiler Builtins
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+- Fix crash when atomic builtins are called with pointer to zero-size struct (#GH90330)
+
Bug Fixes to Attribute Support
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -710,7 +736,6 @@ Bug Fixes to C++ Support
from being explicitly specialized for a given implicit instantiation of the class template.
- Fixed a crash when ``this`` is used in a dependent class scope function template specialization
that instantiates to a static member function.
-
- Fix crash when inheriting from a cv-qualified type. Fixes #GH35603
- Fix a crash when the using enum declaration uses an anonymous enumeration. Fixes (#GH86790).
- Handled an edge case in ``getFullyPackExpandedSize`` so that we now avoid a false-positive diagnostic. (#GH84220)
@@ -758,6 +783,25 @@ Bug Fixes to C++ Support
- Fix a bug with checking constrained non-type template parameters for equivalence. Fixes (#GH77377).
- Fix a bug where the last argument was not considered when considering the most viable function for
explicit object argument member functions. Fixes (#GH92188).
+- Fix a C++11 crash when a non-const non-static member function is defined out-of-line with
+ the ``constexpr`` specifier. Fixes (#GH61004).
+- Clang no longer transforms dependent qualified names into implicit class member access expressions
+ until it can be determined whether the name is that of a non-static member.
+- Clang now correctly diagnoses when the current instantiation is used as an incomplete base class.
+- Clang no longer treats ``constexpr`` class scope function template specializations of non-static members
+ as implicitly ``const`` in language modes after C++11.
+- Fixed a crash when trying to emit captures in a lambda call operator with an explicit object
+ parameter that is called on a derived type of the lambda.
+ Fixes (#GH87210), (GH89541).
+- Clang no longer tries to check if an expression is immediate-escalating in an unevaluated context.
+ Fixes (#GH91308).
+- Fix a crash caused by a regression in the handling of ``source_location``
+ in dependent contexts. Fixes (#GH92680).
+- Fixed a crash when diagnosing failed conversions involving template parameter
+ packs. (#GH93076)
+- Fixed a regression introduced in Clang 18 causing a static function overloading a non-static function
+ with the same parameters not to be diagnosed. (Fixes #GH93456).
+- Clang now diagnoses unexpanded parameter packs in attributes. (Fixes #GH93269).
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -770,12 +814,15 @@ Miscellaneous Bug Fixes
- Fixed an infinite recursion in ASTImporter, on return type declared inside
body of C++11 lambda without trailing return (#GH68775).
+- Fixed declaration name source location of instantiated function definitions (GH71161).
+- Improve diagnostic output to print an expression instead of 'no argument` when comparing Values as template arguments.
Miscellaneous Clang Crashes Fixed
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Do not attempt to dump the layout of dependent types or invalid declarations
when ``-fdump-record-layouts-complete`` is passed. Fixes #GH83684.
+- Unhandled StructuralValues in the template differ (#GH93068).
OpenACC Specific Changes
------------------------
@@ -789,6 +836,8 @@ AMDGPU Support
X86 Support
^^^^^^^^^^^
+- Remove knl/knm specific ISA supports: AVX512PF, AVX512ER, PREFETCHWT1
+
Arm and AArch64 Support
^^^^^^^^^^^^^^^^^^^^^^^
@@ -841,6 +890,10 @@ Windows Support
including STL headers will no longer slow down compile times since ``intrin.h``
is not included from MSVC STL.
+- When the target triple is `*-windows-msvc` strict aliasing is now disabled by default
+ to ensure compatibility with msvc. Previously strict aliasing was only disabled if the
+ driver mode was cl.
+
LoongArch Support
^^^^^^^^^^^^^^^^^
@@ -911,9 +964,10 @@ clang-format
``BreakTemplateDeclarations``.
- ``AlwaysBreakAfterReturnType`` is deprecated and renamed to
``BreakAfterReturnType``.
-- Handles Java ``switch`` expressions.
+- Handles Java switch expressions.
- Adds ``AllowShortCaseExpressionOnASingleLine`` option.
- Adds ``AlignCaseArrows`` suboption to ``AlignConsecutiveShortCaseStatements``.
+- Adds ``LeftWithLastLine`` suboption to ``AlignEscapedNewlines``.
libclang
--------
diff --git a/clang/docs/analyzer/checkers.rst b/clang/docs/analyzer/checkers.rst
index eb8b58323da4..3a31708a1e9d 100644
--- a/clang/docs/analyzer/checkers.rst
+++ b/clang/docs/analyzer/checkers.rst
@@ -1179,6 +1179,47 @@ security.insecureAPI.DeprecatedOrUnsafeBufferHandling (C)
strncpy(buf, "a", 1); // warn
}
+security.SetgidSetuidOrder (C)
+""""""""""""""""""""""""""""""
+When dropping user-level and group-level privileges in a program by using
+``setuid`` and ``setgid`` calls, it is important to reset the group-level
+privileges (with ``setgid``) first. Function ``setgid`` will likely fail if
+the superuser privileges are already dropped.
+
+The checker checks for sequences of ``setuid(getuid())`` and
+``setgid(getgid())`` calls (in this order). If such a sequence is found and
+there is no other privilege-changing function call (``seteuid``, ``setreuid``,
+``setresuid`` and the GID versions of these) in between, a warning is
+generated. The checker finds only exactly ``setuid(getuid())`` calls (and the
+GID versions), not for example if the result of ``getuid()`` is stored in a
+variable.
+
+.. code-block:: c
+
+ void test1() {
+ // ...
+ // end of section with elevated privileges
+ // reset privileges (user and group) to normal user
+ if (setuid(getuid()) != 0) {
+ handle_error();
+ return;
+ }
+ if (setgid(getgid()) != 0) { // warning: A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail
+ handle_error();
+ return;
+ }
+ // user-ID and group-ID are reset to normal user now
+ // ...
+ }
+
+In the code above the problem is that ``setuid(getuid())`` removes superuser
+privileges before ``setgid(getgid())`` is called. To fix the problem the
+``setgid(getgid())`` should be called first. Further attention is needed to
+avoid code like ``setgid(getuid())`` (this checker does not detect bugs like
+this) and always check the return value of these calls.
+
+This check corresponds to SEI CERT Rule `POS36-C <https://wiki.sei.cmu.edu/confluence/display/c/POS36-C.+Observe+correct+revocation+order+while+relinquishing+privileges>`_.
+
.. _unix-checkers:
unix
@@ -2792,6 +2833,41 @@ Warn on mmap() calls that are both writable and executable.
// code
}
+.. _alpha-security-putenv-stack-array:
+
+alpha.security.PutenvStackArray (C)
+"""""""""""""""""""""""""""""""""""
+Finds calls to the ``putenv`` function which pass a pointer to a stack-allocated
+(automatic) array as the argument. Function ``putenv`` does not copy the passed
+string, only a pointer to the data is stored and this data can be read even by
+other threads. Content of a stack-allocated array is likely to be overwritten
+after returning from the parent function.
+
+The problem can be solved by using a static array variable or dynamically
+allocated memory. Even better is to avoid using ``putenv`` (it has other
+problems related to memory leaks) and use ``setenv`` instead.
+
+The check corresponds to CERT rule
+`POS34-C. Do not call putenv() with a pointer to an automatic variable as the argument
+<https://wiki.sei.cmu.edu/confluence/display/c/POS34-C.+Do+not+call+putenv%28%29+with+a+pointer+to+an+automatic+variable+as+the+argument>`_.
+
+.. code-block:: c
+
+ int f() {
+ char env[] = "NAME=value";
+ return putenv(env); // putenv function should not be called with stack-allocated string
+ }
+
+There is one case where the checker can report a false positive. This is when
+the stack-allocated array is used at `putenv` in a function or code branch that
+does not return (calls `fork` or `exec` like function).
+
+Another special case is if the `putenv` is called from function `main`. Here
+the stack is deallocated at the end of the program and it should be no problem
+to use the stack-allocated string (a multi-threaded program may require more
+attention). The checker does not warn for cases when stack space of `main` is
+used at the `putenv` call.
+
.. _alpha-security-ReturnPtrRange:
alpha.security.ReturnPtrRange (C)
@@ -2818,55 +2894,6 @@ alpha.security.cert
SEI CERT checkers which tries to find errors based on their `C coding rules <https://wiki.sei.cmu.edu/confluence/display/c/2+Rules>`_.
-.. _alpha-security-cert-pos-checkers:
-
-alpha.security.cert.pos
-^^^^^^^^^^^^^^^^^^^^^^^
-
-SEI CERT checkers of `POSIX C coding rules <https://wiki.sei.cmu.edu/confluence/pages/viewpage.action?pageId=87152405>`_.
-
-.. _alpha-security-cert-pos-34c:
-
-alpha.security.cert.pos.34c
-"""""""""""""""""""""""""""
-Finds calls to the ``putenv`` function which pass a pointer to an automatic variable as the argument.
-
-.. code-block:: c
-
- int func(const char *var) {
- char env[1024];
- int retval = snprintf(env, sizeof(env),"TEST=%s", var);
- if (retval < 0 || (size_t)retval >= sizeof(env)) {
- /* Handle error */
- }
-
- return putenv(env); // putenv function should not be called with auto variables
- }
-
-Limitations:
-
- - Technically, one can pass automatic variables to ``putenv``,
- but one needs to ensure that the given environment key stays
- alive until it's removed or overwritten.
- Since the analyzer cannot keep track of which envvars get overwritten
- and when, it needs to be slightly more aggressive and warn for such
- cases too, leading in some cases to false-positive reports like this:
-
- .. code-block:: c
-
- void baz() {
- char env[] = "NAME=value";
- putenv(env); // false-positive warning: putenv function should not be called...
- // More code...
- putenv((char *)"NAME=anothervalue");
- // This putenv call overwrites the previous entry, thus that can no longer dangle.
- } // 'env' array becomes dead only here.
-
-alpha.security.cert.env
-^^^^^^^^^^^^^^^^^^^^^^^
-
-SEI CERT checkers of `Environment C coding rules <https://wiki.sei.cmu.edu/confluence/x/JdcxBQ>`_.
-
alpha.security.taint
^^^^^^^^^^^^^^^^^^^^
diff --git a/clang/docs/tools/clang-formatted-files.txt b/clang/docs/tools/clang-formatted-files.txt
index b747adfb6992..dee51e402b68 100644
--- a/clang/docs/tools/clang-formatted-files.txt
+++ b/clang/docs/tools/clang-formatted-files.txt
@@ -124,6 +124,7 @@ clang/include/clang/Analysis/Analyses/CFGReachabilityAnalysis.h
clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h
clang/include/clang/Analysis/FlowSensitive/AdornedCFG.h
clang/include/clang/Analysis/FlowSensitive/ASTOps.h
+clang/include/clang/Analysis/FlowSensitive/CNFFormula.h
clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h
clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
@@ -621,6 +622,7 @@ clang/tools/libclang/CXCursor.h
clang/tools/scan-build-py/tests/functional/src/include/clean-one.h
clang/unittests/Analysis/CFGBuildResult.h
clang/unittests/Analysis/MacroExpansionContextTest.cpp
+clang/unittests/Analysis/FlowSensitive/CNFFormula.cpp
clang/unittests/Analysis/FlowSensitive/DataflowAnalysisContextTest.cpp
clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp
clang/unittests/Analysis/FlowSensitive/MapLatticeTest.cpp
@@ -632,6 +634,7 @@ clang/unittests/Analysis/FlowSensitive/TestingSupport.cpp
clang/unittests/Analysis/FlowSensitive/TestingSupport.h
clang/unittests/Analysis/FlowSensitive/TestingSupportTest.cpp
clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp
+clang/unittests/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
clang/unittests/Analysis/FlowSensitive/WatchedLiteralsSolverTest.cpp
clang/unittests/AST/ASTImporterFixtures.cpp
clang/unittests/AST/ASTImporterFixtures.h
diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h
index e03b11219478..a1d1d1c51cd4 100644
--- a/clang/include/clang/AST/ASTContext.h
+++ b/clang/include/clang/AST/ASTContext.h
@@ -110,6 +110,9 @@ class VarTemplateDecl;
class VTableContextBase;
class XRayFunctionFilter;
+/// A simple array of base specifiers.
+typedef SmallVector<CXXBaseSpecifier *, 4> CXXCastPath;
+
namespace Builtin {
class Context;
@@ -1170,6 +1173,12 @@ public:
/// in device compilation.
llvm::DenseSet<const FunctionDecl *> CUDAImplicitHostDeviceFunUsedByDevice;
+ /// For capturing lambdas with an explicit object parameter whose type is
+ /// derived from the lambda type, we need to perform derived-to-base
+ /// conversion so we can access the captures; the cast paths for that
+ /// are stored here.
+ llvm::DenseMap<const CXXMethodDecl *, CXXCastPath> LambdaCastPaths;
+
ASTContext(LangOptions &LOpts, SourceManager &SM, IdentifierTable &idents,
SelectorTable &sels, Builtin::Context &builtins,
TranslationUnitKind TUKind);
@@ -2611,7 +2620,7 @@ public:
///
/// \returns if this is an array type, the completely unqualified array type
/// that corresponds to it. Otherwise, returns T.getUnqualifiedType().
- QualType getUnqualifiedArrayType(QualType T, Qualifiers &Quals);
+ QualType getUnqualifiedArrayType(QualType T, Qualifiers &Quals) const;
/// Determine whether the given types are equivalent after
/// cvr-qualifiers have been removed.
diff --git a/clang/include/clang/AST/ASTNodeTraverser.h b/clang/include/clang/AST/ASTNodeTraverser.h
index bf7c204e4ad7..616f92691ec3 100644
--- a/clang/include/clang/AST/ASTNodeTraverser.h
+++ b/clang/include/clang/AST/ASTNodeTraverser.h
@@ -695,7 +695,7 @@ public:
if (const auto *TC = D->getTypeConstraint())
Visit(TC->getImmediatelyDeclaredConstraint());
if (D->hasDefaultArgument())
- Visit(D->getDefaultArgument(), SourceRange(),
+ Visit(D->getDefaultArgument().getArgument(), SourceRange(),
D->getDefaultArgStorage().getInheritedFrom(),
D->defaultArgumentWasInherited() ? "inherited from" : "previous");
}
@@ -704,9 +704,9 @@ public:
if (const auto *E = D->getPlaceholderTypeConstraint())
Visit(E);
if (D->hasDefaultArgument())
- Visit(D->getDefaultArgument(), SourceRange(),
- D->getDefaultArgStorage().getInheritedFrom(),
- D->defaultArgumentWasInherited() ? "inherited from" : "previous");
+ dumpTemplateArgumentLoc(
+ D->getDefaultArgument(), D->getDefaultArgStorage().getInheritedFrom(),
+ D->defaultArgumentWasInherited() ? "inherited from" : "previous");
}
void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D) {
diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h
index 5e485ccb85a1..7fd80b90d103 100644
--- a/clang/include/clang/AST/Decl.h
+++ b/clang/include/clang/AST/Decl.h
@@ -2188,6 +2188,8 @@ public:
void setRangeEnd(SourceLocation E) { EndRangeLoc = E; }
+ void setDeclarationNameLoc(DeclarationNameLoc L) { DNLoc = L; }
+
/// Returns the location of the ellipsis of a variadic function.
SourceLocation getEllipsisLoc() const {
const auto *FPT = getType()->getAs<FunctionProtoType>();
diff --git a/clang/include/clang/AST/DeclTemplate.h b/clang/include/clang/AST/DeclTemplate.h
index f3d6a321ecf1..5b6a6b40b28e 100644
--- a/clang/include/clang/AST/DeclTemplate.h
+++ b/clang/include/clang/AST/DeclTemplate.h
@@ -1185,7 +1185,7 @@ class TemplateTypeParmDecl final : public TypeDecl,
/// The default template argument, if any.
using DefArgStorage =
- DefaultArgStorage<TemplateTypeParmDecl, TypeSourceInfo *>;
+ DefaultArgStorage<TemplateTypeParmDecl, TemplateArgumentLoc *>;
DefArgStorage DefaultArgument;
TemplateTypeParmDecl(DeclContext *DC, SourceLocation KeyLoc,
@@ -1225,13 +1225,9 @@ public:
bool hasDefaultArgument() const { return DefaultArgument.isSet(); }
/// Retrieve the default argument, if any.
- QualType getDefaultArgument() const {
- return DefaultArgument.get()->getType();
- }
-
- /// Retrieves the default argument's source information, if any.
- TypeSourceInfo *getDefaultArgumentInfo() const {
- return DefaultArgument.get();
+ const TemplateArgumentLoc &getDefaultArgument() const {
+ static const TemplateArgumentLoc NoneLoc;
+ return DefaultArgument.isSet() ? *DefaultArgument.get() : NoneLoc;
}
/// Retrieves the location of the default argument declaration.
@@ -1244,9 +1240,8 @@ public:
}
/// Set the default argument for this template parameter.
- void setDefaultArgument(TypeSourceInfo *DefArg) {
- DefaultArgument.set(DefArg);
- }
+ void setDefaultArgument(const ASTContext &C,
+ const TemplateArgumentLoc &DefArg);
/// Set that this default argument was inherited from another
/// parameter.
@@ -1365,7 +1360,8 @@ class NonTypeTemplateParmDecl final
/// The default template argument, if any, and whether or not
/// it was inherited.
- using DefArgStorage = DefaultArgStorage<NonTypeTemplateParmDecl, Expr *>;
+ using DefArgStorage =
+ DefaultArgStorage<NonTypeTemplateParmDecl, TemplateArgumentLoc *>;
DefArgStorage DefaultArgument;
// FIXME: Collapse this into TemplateParamPosition; or, just move depth/index
@@ -1435,7 +1431,10 @@ public:
bool hasDefaultArgument() const { return DefaultArgument.isSet(); }
/// Retrieve the default argument, if any.
- Expr *getDefaultArgument() const { return DefaultArgument.get(); }
+ const TemplateArgumentLoc &getDefaultArgument() const {
+ static const TemplateArgumentLoc NoneLoc;
+ return DefaultArgument.isSet() ? *DefaultArgument.get() : NoneLoc;
+ }
/// Retrieve the location of the default argument, if any.
SourceLocation getDefaultArgumentLoc() const;
@@ -1449,7 +1448,8 @@ public:
/// Set the default argument for this template parameter, and
/// whether that default argument was inherited from another
/// declaration.
- void setDefaultArgument(Expr *DefArg) { DefaultArgument.set(DefArg); }
+ void setDefaultArgument(const ASTContext &C,
+ const TemplateArgumentLoc &DefArg);
void setInheritedDefaultArgument(const ASTContext &C,
NonTypeTemplateParmDecl *Parm) {
DefaultArgument.setInherited(C, Parm);
diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h
index fac65628ffed..dbf693611a7f 100644
--- a/clang/include/clang/AST/ExprCXX.h
+++ b/clang/include/clang/AST/ExprCXX.h
@@ -4377,15 +4377,21 @@ class PackIndexingExpr final
// The pack being indexed, followed by the index
Stmt *SubExprs[2];
- size_t TransformedExpressions;
+ // The size of the trailing expressions.
+ unsigned TransformedExpressions : 31;
+
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned ExpandedToEmptyPack : 1;
PackIndexingExpr(QualType Type, SourceLocation EllipsisLoc,
SourceLocation RSquareLoc, Expr *PackIdExpr, Expr *IndexExpr,
- ArrayRef<Expr *> SubstitutedExprs = {})
+ ArrayRef<Expr *> SubstitutedExprs = {},
+ bool ExpandedToEmptyPack = false)
: Expr(PackIndexingExprClass, Type, VK_LValue, OK_Ordinary),
EllipsisLoc(EllipsisLoc), RSquareLoc(RSquareLoc),
SubExprs{PackIdExpr, IndexExpr},
- TransformedExpressions(SubstitutedExprs.size()) {
+ TransformedExpressions(SubstitutedExprs.size()),
+ ExpandedToEmptyPack(ExpandedToEmptyPack) {
auto *Exprs = getTrailingObjects<Expr *>();
std::uninitialized_copy(SubstitutedExprs.begin(), SubstitutedExprs.end(),
@@ -4408,10 +4414,14 @@ public:
SourceLocation EllipsisLoc,
SourceLocation RSquareLoc, Expr *PackIdExpr,
Expr *IndexExpr, std::optional<int64_t> Index,
- ArrayRef<Expr *> SubstitutedExprs = {});
+ ArrayRef<Expr *> SubstitutedExprs = {},
+ bool ExpandedToEmptyPack = false);
static PackIndexingExpr *CreateDeserialized(ASTContext &Context,
unsigned NumTransformedExprs);
+ /// Determine if the expression was expanded to empty.
+ bool expandsToEmptyPack() const { return ExpandedToEmptyPack; }
+
/// Determine the location of the 'sizeof' keyword.
SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
@@ -4445,6 +4455,7 @@ public:
return getTrailingObjects<Expr *>()[*Index];
}
+ /// Return the trailing expressions, regardless of the expansion.
ArrayRef<Expr *> getExpressions() const {
return {getTrailingObjects<Expr *>(), TransformedExpressions};
}
diff --git a/clang/include/clang/AST/OpenACCClause.h b/clang/include/clang/AST/OpenACCClause.h
index 607a2b9d6536..28ff8c44bd25 100644
--- a/clang/include/clang/AST/OpenACCClause.h
+++ b/clang/include/clang/AST/OpenACCClause.h
@@ -677,6 +677,35 @@ public:
ArrayRef<Expr *> VarList, SourceLocation EndLoc);
};
+class OpenACCReductionClause final
+ : public OpenACCClauseWithVarList,
+ public llvm::TrailingObjects<OpenACCReductionClause, Expr *> {
+ OpenACCReductionOperator Op;
+
+ OpenACCReductionClause(SourceLocation BeginLoc, SourceLocation LParenLoc,
+ OpenACCReductionOperator Operator,
+ ArrayRef<Expr *> VarList, SourceLocation EndLoc)
+ : OpenACCClauseWithVarList(OpenACCClauseKind::Reduction, BeginLoc,
+ LParenLoc, EndLoc),
+ Op(Operator) {
+ std::uninitialized_copy(VarList.begin(), VarList.end(),
+ getTrailingObjects<Expr *>());
+ setExprs(MutableArrayRef(getTrailingObjects<Expr *>(), VarList.size()));
+ }
+
+public:
+ static bool classof(const OpenACCClause *C) {
+ return C->getClauseKind() == OpenACCClauseKind::Reduction;
+ }
+
+ static OpenACCReductionClause *
+ Create(const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc,
+ OpenACCReductionOperator Operator, ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc);
+
+ OpenACCReductionOperator getReductionOp() const { return Op; }
+};
+
template <class Impl> class OpenACCClauseVisitor {
Impl &getDerived() { return static_cast<Impl &>(*this); }
diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h
index f5cefedb07e0..4bbb4380cdd7 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -30,6 +30,7 @@
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/OpenACCClause.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
@@ -510,6 +511,7 @@ private:
bool
TraverseOpenACCAssociatedStmtConstruct(OpenACCAssociatedStmtConstruct *S);
bool VisitOpenACCClauseList(ArrayRef<const OpenACCClause *>);
+ bool VisitOpenACCClause(const OpenACCClause *);
};
template <typename Derived>
@@ -1960,7 +1962,7 @@ DEF_TRAVERSE_DECL(TemplateTypeParmDecl, {
TRY_TO(TraverseType(QualType(D->getTypeForDecl(), 0)));
TRY_TO(TraverseTemplateTypeParamDeclConstraints(D));
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
- TRY_TO(TraverseTypeLoc(D->getDefaultArgumentInfo()->getTypeLoc()));
+ TRY_TO(TraverseTemplateArgumentLoc(D->getDefaultArgument()));
})
DEF_TRAVERSE_DECL(TypedefDecl, {
@@ -2320,7 +2322,7 @@ DEF_TRAVERSE_DECL(NonTypeTemplateParmDecl, {
// A non-type template parameter, e.g. "S" in template<int S> class Foo ...
TRY_TO(TraverseDeclaratorHelper(D));
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
- TRY_TO(TraverseStmt(D->getDefaultArgument()));
+ TRY_TO(TraverseTemplateArgumentLoc(D->getDefaultArgument()));
})
DEF_TRAVERSE_DECL(ParmVarDecl, {
@@ -3968,8 +3970,25 @@ bool RecursiveASTVisitor<Derived>::TraverseOpenACCAssociatedStmtConstruct(
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOpenACCClause(const OpenACCClause *C) {
+ for (const Stmt *Child : C->children())
+ TRY_TO(TraverseStmt(const_cast<Stmt *>(Child)));
+ return true;
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOpenACCClauseList(
- ArrayRef<const OpenACCClause *>) {
+ ArrayRef<const OpenACCClause *> Clauses) {
+
+ for (const auto *C : Clauses)
+ TRY_TO(VisitOpenACCClause(C));
+// if (const auto *WithCond = dyn_cast<OopenACCClauseWithCondition>(C);
+// WithCond && WIthCond->hasConditionExpr()) {
+// TRY_TO(TraverseStmt(WithCond->getConditionExpr());
+// } else if (const auto *
+// }
+// OpenACCClauseWithCondition::getConditionExpr/hasConditionExpr
+//OpenACCClauseWithExprs::children (might be null?)
// TODO OpenACC: When we have Clauses with expressions, we should visit them
// here.
return true;
diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h
index c7a8e785913b..263b632df23c 100644
--- a/clang/include/clang/AST/Type.h
+++ b/clang/include/clang/AST/Type.h
@@ -2524,6 +2524,7 @@ public:
bool isVectorType() const; // GCC vector type.
bool isExtVectorType() const; // Extended vector type.
bool isExtVectorBoolType() const; // Extended vector type with bool element.
+ bool isSubscriptableVectorType() const;
bool isMatrixType() const; // Matrix type.
bool isConstantMatrixType() const; // Constant matrix type.
bool isDependentAddressSpaceType() const; // value-dependent address space qualifier
@@ -7730,6 +7731,10 @@ inline bool Type::isExtVectorBoolType() const {
return cast<ExtVectorType>(CanonicalType)->getElementType()->isBooleanType();
}
+inline bool Type::isSubscriptableVectorType() const {
+ return isVectorType() || isSveVLSBuiltinType();
+}
+
inline bool Type::isMatrixType() const {
return isa<MatrixType>(CanonicalType);
}
diff --git a/clang/include/clang/Analysis/FlowSensitive/CNFFormula.h b/clang/include/clang/Analysis/FlowSensitive/CNFFormula.h
new file mode 100644
index 000000000000..fb13e774c67f
--- /dev/null
+++ b/clang/include/clang/Analysis/FlowSensitive/CNFFormula.h
@@ -0,0 +1,179 @@
+//===- CNFFormula.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A representation of a boolean formula in 3-CNF.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CNFFORMULA_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CNFFORMULA_H
+
+#include <cstdint>
+#include <vector>
+
+#include "clang/Analysis/FlowSensitive/Formula.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Boolean variables are represented as positive integers.
+using Variable = uint32_t;
+
+/// A null boolean variable is used as a placeholder in various data structures
+/// and algorithms.
+constexpr Variable NullVar = 0;
+
+/// Literals are represented as positive integers. Specifically, for a boolean
+/// variable `V` that is represented as the positive integer `I`, the positive
+/// literal `V` is represented as the integer `2*I` and the negative literal
+/// `!V` is represented as the integer `2*I+1`.
+using Literal = uint32_t;
+
+/// A null literal is used as a placeholder in various data structures and
+/// algorithms.
+constexpr Literal NullLit = 0;
+
+/// Clause identifiers are represented as positive integers.
+using ClauseID = uint32_t;
+
+/// A null clause identifier is used as a placeholder in various data structures
+/// and algorithms.
+constexpr ClauseID NullClause = 0;
+
+/// Returns the positive literal `V`.
+inline constexpr Literal posLit(Variable V) { return 2 * V; }
+
+/// Returns the negative literal `!V`.
+inline constexpr Literal negLit(Variable V) { return 2 * V + 1; }
+
+/// Returns whether `L` is a positive literal.
+inline constexpr bool isPosLit(Literal L) { return 0 == (L & 1); }
+
+/// Returns whether `L` is a negative literal.
+inline constexpr bool isNegLit(Literal L) { return 1 == (L & 1); }
+
+/// Returns the negated literal `!L`.
+inline constexpr Literal notLit(Literal L) { return L ^ 1; }
+
+/// Returns the variable of `L`.
+inline constexpr Variable var(Literal L) { return L >> 1; }
+
+/// A boolean formula in 3-CNF (conjunctive normal form with at most 3 literals
+/// per clause).
+class CNFFormula {
+ /// `LargestVar` is equal to the largest positive integer that represents a
+ /// variable in the formula.
+ const Variable LargestVar;
+
+ /// Literals of all clauses in the formula.
+ ///
+ /// The element at index 0 stands for the literal in the null clause. It is
+ /// set to 0 and isn't used. Literals of clauses in the formula start from the
+ /// element at index 1.
+ ///
+ /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
+ /// `Clauses` will be `[0, L1, L2, L2, L3, L4]`.
+ std::vector<Literal> Clauses;
+
+ /// Start indices of clauses of the formula in `Clauses`.
+ ///
+ /// The element at index 0 stands for the start index of the null clause. It
+ /// is set to 0 and isn't used. Start indices of clauses in the formula start
+ /// from the element at index 1.
+ ///
+ /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
+ /// `ClauseStarts` will be `[0, 1, 3]`. Note that the literals of the first
+ /// clause always start at index 1. The start index for the literals of the
+ /// second clause depends on the size of the first clause and so on.
+ std::vector<size_t> ClauseStarts;
+
+ /// Indicates that we already know the formula is unsatisfiable.
+ /// During construction, we catch simple cases of conflicting unit-clauses.
+ bool KnownContradictory;
+
+public:
+ explicit CNFFormula(Variable LargestVar);
+
+ /// Adds the `L1 v ... v Ln` clause to the formula.
+ /// Requirements:
+ ///
+ /// `Li` must not be `NullLit`.
+ ///
+ /// All literals in the input that are not `NullLit` must be distinct.
+ void addClause(ArrayRef<Literal> lits);
+
+ /// Returns whether the formula is known to be contradictory.
+ /// This is the case if any of the clauses is empty.
+ bool knownContradictory() const { return KnownContradictory; }
+
+ /// Returns the largest variable in the formula.
+ Variable largestVar() const { return LargestVar; }
+
+ /// Returns the number of clauses in the formula.
+ /// Valid clause IDs are in the range [1, `numClauses()`].
+ ClauseID numClauses() const { return ClauseStarts.size() - 1; }
+
+ /// Returns the number of literals in clause `C`.
+ size_t clauseSize(ClauseID C) const {
+ return C == ClauseStarts.size() - 1 ? Clauses.size() - ClauseStarts[C]
+ : ClauseStarts[C + 1] - ClauseStarts[C];
+ }
+
+ /// Returns the literals of clause `C`.
+ /// If `knownContradictory()` is false, each clause has at least one literal.
+ llvm::ArrayRef<Literal> clauseLiterals(ClauseID C) const {
+ size_t S = clauseSize(C);
+ if (S == 0)
+ return llvm::ArrayRef<Literal>();
+ return llvm::ArrayRef<Literal>(&Clauses[ClauseStarts[C]], S);
+ }
+
+ /// An iterator over all literals of all clauses in the formula.
+ /// The iterator allows mutation of the literal through the `*` operator.
+ /// This is to support solvers that mutate the formula during solving.
+ class Iterator {
+ friend class CNFFormula;
+ CNFFormula *CNF;
+ size_t Idx;
+ Iterator(CNFFormula *CNF, size_t Idx) : CNF(CNF), Idx(Idx) {}
+
+ public:
+ Iterator(const Iterator &) = default;
+ Iterator &operator=(const Iterator &) = default;
+
+ Iterator &operator++() {
+ ++Idx;
+ assert(Idx < CNF->Clauses.size() && "Iterator out of bounds");
+ return *this;
+ }
+
+ Iterator next() const {
+ Iterator I = *this;
+ ++I;
+ return I;
+ }
+
+ Literal &operator*() const { return CNF->Clauses[Idx]; }
+ };
+ friend class Iterator;
+
+ /// Returns an iterator to the first literal of clause `C`.
+ Iterator startOfClause(ClauseID C) { return Iterator(this, ClauseStarts[C]); }
+};
+
+/// Converts the conjunction of `Vals` into a formula in conjunctive normal
+/// form where each clause has at least one and at most three literals.
+/// `Atomics` is populated with a mapping from `Variables` to the corresponding
+/// `Atom`s for atomic booleans in the input formulas.
+CNFFormula buildCNF(const llvm::ArrayRef<const Formula *> &Formulas,
+ llvm::DenseMap<Variable, Atom> &Atomics);
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CNFFORMULA_H
diff --git a/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h b/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h
index b5cd7aa10fd7..d74380b78e93 100644
--- a/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h
+++ b/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h
@@ -17,16 +17,17 @@
#include "clang/Analysis/FlowSensitive/Formula.h"
#include "clang/Analysis/FlowSensitive/Solver.h"
#include "llvm/ADT/ArrayRef.h"
-#include <limits>
namespace clang {
namespace dataflow {
/// A SAT solver that is an implementation of Algorithm D from Knuth's The Art
/// of Computer Programming Volume 4: Satisfiability, Fascicle 6. It is based on
-/// the Davis-Putnam-Logemann-Loveland (DPLL) algorithm, keeps references to a
-/// single "watched" literal per clause, and uses a set of "active" variables
+/// the Davis-Putnam-Logemann-Loveland (DPLL) algorithm [1], keeps references to
+/// a single "watched" literal per clause, and uses a set of "active" variables
/// for unit propagation.
+//
+// [1] https://en.wikipedia.org/wiki/DPLL_algorithm
class WatchedLiteralsSolver : public Solver {
// Count of the iterations of the main loop of the solver. This spans *all*
// calls to the underlying solver across the life of this object. It is
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index 7a7721239a28..e59cccccdd36 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -999,7 +999,7 @@ def Availability : InheritableAttr {
VersionArgument<"deprecated">, VersionArgument<"obsoleted">,
BoolArgument<"unavailable">, StringArgument<"message">,
BoolArgument<"strict">, StringArgument<"replacement">,
- IntArgument<"priority">];
+ IntArgument<"priority">, IdentifierArgument<"environment">];
let AdditionalMembers =
[{static llvm::StringRef getPrettyPlatformName(llvm::StringRef Platform) {
return llvm::StringSwitch<llvm::StringRef>(Platform)
@@ -1019,7 +1019,7 @@ def Availability : InheritableAttr {
.Case("xros", "visionOS")
.Case("xros_app_extension", "visionOS (App Extension)")
.Case("swift", "Swift")
- .Case("shadermodel", "HLSL ShaderModel")
+ .Case("shadermodel", "Shader Model")
.Case("ohos", "OpenHarmony OS")
.Default(llvm::StringRef());
}
@@ -1059,7 +1059,34 @@ static llvm::StringRef canonicalizePlatformName(llvm::StringRef Platform) {
.Case("visionos_app_extension", "xros_app_extension")
.Case("ShaderModel", "shadermodel")
.Default(Platform);
-} }];
+}
+static llvm::StringRef getPrettyEnviromentName(llvm::StringRef Environment) {
+ return llvm::StringSwitch<llvm::StringRef>(Environment)
+ .Case("pixel", "pixel shader")
+ .Case("vertex", "vertex shader")
+ .Case("geometry", "geometry shader")
+ .Case("hull", "hull shader")
+ .Case("domain", "domain shader")
+ .Case("compute", "compute shader")
+ .Case("mesh", "mesh shader")
+ .Case("amplification", "amplification shader")
+ .Case("library", "shader library")
+ .Default(Environment);
+}
+static llvm::Triple::EnvironmentType getEnvironmentType(llvm::StringRef Environment) {
+ return llvm::StringSwitch<llvm::Triple::EnvironmentType>(Environment)
+ .Case("pixel", llvm::Triple::Pixel)
+ .Case("vertex", llvm::Triple::Vertex)
+ .Case("geometry", llvm::Triple::Geometry)
+ .Case("hull", llvm::Triple::Hull)
+ .Case("domain", llvm::Triple::Domain)
+ .Case("compute", llvm::Triple::Compute)
+ .Case("mesh", llvm::Triple::Mesh)
+ .Case("amplification", llvm::Triple::Amplification)
+ .Case("library", llvm::Triple::Library)
+ .Default(llvm::Triple::UnknownEnvironment);
+}
+}];
let HasCustomParsing = 1;
let InheritEvenIfAlreadyPresent = 1;
let Subjects = SubjectList<[Named]>;
@@ -1613,10 +1640,11 @@ def Unlikely : StmtAttr {
def : MutualExclusions<[Likely, Unlikely]>;
def CXXAssume : StmtAttr {
- let Spellings = [CXX11<"", "assume", 202207>];
+ let Spellings = [CXX11<"", "assume", 202207>, Clang<"assume">];
let Subjects = SubjectList<[NullStmt], ErrorDiag, "empty statements">;
let Args = [ExprArgument<"Assumption">];
let Documentation = [CXXAssumeDocs];
+ let HasCustomParsing = 1;
}
def NoMerge : DeclOrStmtAttr {
@@ -4229,7 +4257,7 @@ def OMPDeclareVariant : InheritableAttr {
}
def OMPAssume : InheritableAttr {
- let Spellings = [Clang<"assume">, CXX11<"omp", "assume">];
+ let Spellings = [CXX11<"omp", "assume">];
let Subjects = SubjectList<[Function, ObjCMethod]>;
let InheritEvenIfAlreadyPresent = 1;
let Documentation = [OMPAssumeDocs];
diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td
index b48aaf65558a..a313e811c9d2 100644
--- a/clang/include/clang/Basic/AttrDocs.td
+++ b/clang/include/clang/Basic/AttrDocs.td
@@ -1593,6 +1593,11 @@ replacement=\ *string-literal*
a warning about use of a deprecated declaration. The Fix-It will replace
the deprecated declaration with the new declaration specified.
+environment=\ *identifier*
+ Target environment in which this declaration is available. If present,
+ the availability attribute applies only to targets with the same platform
+ and environment. The parameter is currently supported only in HLSL.
+
Multiple availability attributes can be placed on a declaration, which may
correspond to different platforms. For most platforms, the availability
attribute with the platform corresponding to the target platform will be used;
@@ -2022,9 +2027,6 @@ Different optimisers are likely to react differently to the presence of
this attribute; in some cases, adding ``assume`` may affect performance
negatively. It should be used with parsimony and care.
-Note that `clang::assume` is a different attribute. Always write ``assume``
-without a namespace if you intend to use the standard C++ attribute.
-
Example:
.. code-block:: c++
@@ -4735,7 +4737,7 @@ def OMPAssumeDocs : Documentation {
let Category = DocCatFunction;
let Heading = "assume";
let Content = [{
-Clang supports the ``__attribute__((assume("assumption")))`` attribute to
+Clang supports the ``[[omp::assume("assumption")]]`` attribute to
provide additional information to the optimizer. The string-literal, here
"assumption", will be attached to the function declaration such that later
analysis and optimization passes can assume the "assumption" to hold.
@@ -4747,7 +4749,7 @@ A function can have multiple assume attributes and they propagate from prior
declarations to later definitions. Multiple assumptions are aggregated into a
single comma separated string. Thus, one can provide multiple assumptions via
a comma separated string, i.a.,
-``__attribute__((assume("assumption1,assumption2")))``.
+``[[omp::assume("assumption1,assumption2")]]``.
While LLVM plugins might provide more assumption strings, the default LLVM
optimization passes are aware of the following assumptions:
diff --git a/clang/include/clang/Basic/BuiltinsAArch64.def b/clang/include/clang/Basic/BuiltinsAArch64.def
index cf8711c6eaee..5f53c98167df 100644
--- a/clang/include/clang/Basic/BuiltinsAArch64.def
+++ b/clang/include/clang/Basic/BuiltinsAArch64.def
@@ -290,7 +290,7 @@ TARGET_HEADER_BUILTIN(_CountLeadingZeros64, "UiULLi", "nh", INTRIN_H, ALL_MS_LAN
TARGET_HEADER_BUILTIN(_CountOneBits, "UiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_CountOneBits64, "UiULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__prefetch, "vv*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__prefetch, "vvC*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
#undef BUILTIN
#undef LANGBUILTIN
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index 3e21a2fe2ac6..433c7795325f 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -68,7 +68,7 @@ BUILTIN(__builtin_amdgcn_sched_group_barrier, "vIiIiIi", "n")
BUILTIN(__builtin_amdgcn_iglp_opt, "vIi", "n")
BUILTIN(__builtin_amdgcn_s_dcache_inv, "v", "n")
BUILTIN(__builtin_amdgcn_buffer_wbinvl1, "v", "n")
-BUILTIN(__builtin_amdgcn_fence, "vUicC*", "n")
+BUILTIN(__builtin_amdgcn_fence, "vUicC*.", "n")
BUILTIN(__builtin_amdgcn_groupstaticsize, "Ui", "n")
BUILTIN(__builtin_amdgcn_wavefrontsize, "Ui", "nc")
@@ -240,6 +240,7 @@ TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_v2bf16, "V2sV2s*0V2s", "t", "at
TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2bf16, "V2sV2s*1V2s", "t", "atomic-global-pk-add-bf16-inst")
TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2bf16, "V2sV2s*3V2s", "t", "atomic-ds-pk-add-16-insts")
TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2f16, "V2hV2h*3V2h", "t", "atomic-ds-pk-add-16-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_load_lds, "vv*1v*3UiiUi", "t", "gfx940-insts")
//===----------------------------------------------------------------------===//
// Deep learning builtins.
diff --git a/clang/include/clang/Basic/BuiltinsWebAssembly.def b/clang/include/clang/Basic/BuiltinsWebAssembly.def
index 8645cff1e867..fd8c1b480d6d 100644
--- a/clang/include/clang/Basic/BuiltinsWebAssembly.def
+++ b/clang/include/clang/Basic/BuiltinsWebAssembly.def
@@ -193,6 +193,8 @@ TARGET_BUILTIN(__builtin_wasm_relaxed_dot_bf16x8_add_f32_f32x4, "V4fV8UsV8UsV4f"
// Half-Precision (fp16)
TARGET_BUILTIN(__builtin_wasm_loadf16_f32, "fh*", "nU", "half-precision")
TARGET_BUILTIN(__builtin_wasm_storef16_f32, "vfh*", "n", "half-precision")
+TARGET_BUILTIN(__builtin_wasm_splat_f16x8, "V8hf", "nc", "half-precision")
+TARGET_BUILTIN(__builtin_wasm_extract_lane_f16x8, "fV8hi", "nc", "half-precision")
// Reference Types builtins
// Some builtins are custom type-checked - see 't' as part of the third argument,
diff --git a/clang/include/clang/Basic/BuiltinsX86.def b/clang/include/clang/Basic/BuiltinsX86.def
index eafcc219c109..7074479786b9 100644
--- a/clang/include/clang/Basic/BuiltinsX86.def
+++ b/clang/include/clang/Basic/BuiltinsX86.def
@@ -832,23 +832,11 @@ TARGET_BUILTIN(__builtin_ia32_rsqrt14ss_mask, "V4fV4fV4fV4fUc", "ncV:128:", "avx
TARGET_BUILTIN(__builtin_ia32_rsqrt14pd512_mask, "V8dV8dV8dUc", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_rsqrt14ps512_mask, "V16fV16fV16fUs", "ncV:512:", "avx512f,evex512")
-TARGET_BUILTIN(__builtin_ia32_rsqrt28sd_round_mask, "V2dV2dV2dV2dUcIi", "ncV:128:", "avx512er")
-TARGET_BUILTIN(__builtin_ia32_rsqrt28ss_round_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512er")
-TARGET_BUILTIN(__builtin_ia32_rsqrt28pd_mask, "V8dV8dV8dUcIi", "ncV:512:", "avx512er,evex512")
-TARGET_BUILTIN(__builtin_ia32_rsqrt28ps_mask, "V16fV16fV16fUsIi", "ncV:512:", "avx512er,evex512")
-
TARGET_BUILTIN(__builtin_ia32_rcp14sd_mask, "V2dV2dV2dV2dUc", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_rcp14ss_mask, "V4fV4fV4fV4fUc", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_rcp14pd512_mask, "V8dV8dV8dUc", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_rcp14ps512_mask, "V16fV16fV16fUs", "ncV:512:", "avx512f,evex512")
-TARGET_BUILTIN(__builtin_ia32_rcp28sd_round_mask, "V2dV2dV2dV2dUcIi", "ncV:128:", "avx512er")
-TARGET_BUILTIN(__builtin_ia32_rcp28ss_round_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512er")
-TARGET_BUILTIN(__builtin_ia32_rcp28pd_mask, "V8dV8dV8dUcIi", "ncV:512:", "avx512er,evex512")
-TARGET_BUILTIN(__builtin_ia32_rcp28ps_mask, "V16fV16fV16fUsIi", "ncV:512:", "avx512er,evex512")
-TARGET_BUILTIN(__builtin_ia32_exp2pd_mask, "V8dV8dV8dUcIi", "ncV:512:", "avx512er,evex512")
-TARGET_BUILTIN(__builtin_ia32_exp2ps_mask, "V16fV16fV16fUsIi", "ncV:512:", "avx512er,evex512")
-
TARGET_BUILTIN(__builtin_ia32_cvttps2dq512_mask, "V16iV16fV16iUsIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_cvttps2udq512_mask, "V16iV16fV16iUsIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_cvttpd2dq512_mask, "V8iV8dV8iUcIi", "ncV:512:", "avx512f,evex512")
@@ -960,15 +948,6 @@ TARGET_BUILTIN(__builtin_ia32_scattersiv16si, "vv*UsV16iV16iIi", "nV:512:", "avx
TARGET_BUILTIN(__builtin_ia32_scatterdiv8di, "vv*UcV8OiV8OiIi", "nV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_scatterdiv16si, "vv*UcV8OiV8iIi", "nV:512:", "avx512f,evex512")
-TARGET_BUILTIN(__builtin_ia32_gatherpfdpd, "vUcV8ivC*IiIi", "nV:512:", "avx512pf,evex512")
-TARGET_BUILTIN(__builtin_ia32_gatherpfdps, "vUsV16ivC*IiIi", "nV:512:", "avx512pf,evex512")
-TARGET_BUILTIN(__builtin_ia32_gatherpfqpd, "vUcV8OivC*IiIi", "nV:512:", "avx512pf,evex512")
-TARGET_BUILTIN(__builtin_ia32_gatherpfqps, "vUcV8OivC*IiIi", "nV:512:", "avx512pf,evex512")
-TARGET_BUILTIN(__builtin_ia32_scatterpfdpd, "vUcV8iv*IiIi", "nV:512:", "avx512pf,evex512")
-TARGET_BUILTIN(__builtin_ia32_scatterpfdps, "vUsV16iv*IiIi", "nV:512:", "avx512pf,evex512")
-TARGET_BUILTIN(__builtin_ia32_scatterpfqpd, "vUcV8Oiv*IiIi", "nV:512:", "avx512pf,evex512")
-TARGET_BUILTIN(__builtin_ia32_scatterpfqps, "vUcV8Oiv*IiIi", "nV:512:", "avx512pf,evex512")
-
TARGET_BUILTIN(__builtin_ia32_knotqi, "UcUc", "nc", "avx512dq")
TARGET_BUILTIN(__builtin_ia32_knothi, "UsUs", "nc", "avx512f")
TARGET_BUILTIN(__builtin_ia32_knotsi, "UiUi", "nc", "avx512bw")
diff --git a/clang/include/clang/Basic/DiagnosticCommonKinds.td b/clang/include/clang/Basic/DiagnosticCommonKinds.td
index 0738f43ca555..1e44bc4ad09b 100644
--- a/clang/include/clang/Basic/DiagnosticCommonKinds.td
+++ b/clang/include/clang/Basic/DiagnosticCommonKinds.td
@@ -361,9 +361,6 @@ def warn_invalid_feature_combination : Warning<
def warn_target_unrecognized_env : Warning<
"mismatch between architecture and environment in target triple '%0'; did you mean '%1'?">,
InGroup<InvalidCommandLineArgument>;
-def warn_knl_knm_isa_support_removed : Warning<
- "KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.">,
- InGroup<DiagGroup<"knl-knm-isa-support-removed">>;
def err_target_unsupported_abi_with_fpu : Error<
"'%0' ABI is not supported with FPU">;
diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td
index 9d97a75f696f..773b234cd68f 100644
--- a/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -58,7 +58,7 @@ def warn_drv_avr_stdlib_not_linked: Warning<
def err_drv_cuda_bad_gpu_arch : Error<"unsupported CUDA gpu architecture: %0">;
def err_drv_offload_bad_gpu_arch : Error<"unsupported %0 gpu architecture: %1">;
def err_drv_offload_missing_gpu_arch : Error<
- "Must pass in an explicit %0 gpu architecture to '%1'">;
+ "must pass in an explicit %0 gpu architecture to '%1'">;
def err_drv_no_cuda_installation : Error<
"cannot find CUDA installation; provide its path via '--cuda-path', or pass "
"'-nocudainc' to build without CUDA includes">;
@@ -90,8 +90,8 @@ def err_drv_no_hipspv_device_lib : Error<
"'--hip-path' or '--hip-device-lib-path', or pass '-nogpulib' to build "
"without HIP device library">;
def err_drv_hipspv_no_hip_path : Error<
- "'--hip-path' must be specified when offloading to "
- "SPIR-V%select{| unless %1 is given}0.">;
+ "'--hip-path' must be specified when offloading to SPIR-V unless '-nogpuinc' "
+ "is given">;
// TODO: Remove when COV6 is fully supported by ROCm.
def warn_drv_amdgpu_cov6: Warning<
@@ -137,13 +137,13 @@ def warn_drv_unsupported_option_for_flang : Warning<
"the argument '%0' is not supported for option '%1'. Mapping to '%1%2'">,
InGroup<OptionIgnored>;
def warn_drv_unsupported_diag_option_for_flang : Warning<
- "The warning option '-%0' is not supported">,
+ "the warning option '-%0' is not supported">,
InGroup<OptionIgnored>;
def warn_drv_unsupported_option_for_processor : Warning<
"ignoring '%0' option as it is not currently supported for processor '%1'">,
InGroup<OptionIgnored>;
def warn_drv_unsupported_openmp_library : Warning<
- "The library '%0=%1' is not supported, openmp is not be enabled">,
+ "the library '%0=%1' is not supported, OpenMP will not be enabled">,
InGroup<OptionIgnored>;
def err_drv_invalid_thread_model_for_target : Error<
@@ -356,7 +356,7 @@ def err_drv_expecting_fopenmp_with_fopenmp_targets : Error<
"compatible with offloading; e.g., '-fopenmp=libomp' or '-fopenmp=libiomp5'">;
def err_drv_failed_to_deduce_target_from_arch : Error<
"failed to deduce triple for target architecture '%0'; specify the triple "
- "using '-fopenmp-targets' and '-Xopenmp-target' instead.">;
+ "using '-fopenmp-targets' and '-Xopenmp-target' instead">;
def err_drv_omp_offload_target_missingbcruntime : Error<
"no library '%0' found in the default clang lib directory or in LIBRARY_PATH"
"; use '--libomptarget-%1-bc-path' to specify %1 bitcode library">;
@@ -515,14 +515,6 @@ def err_analyzer_checker_incompatible_analyzer_option : Error<
def err_analyzer_not_built_with_z3 : Error<
"analyzer constraint manager 'z3' is only available if LLVM was built with "
"-DLLVM_ENABLE_Z3_SOLVER=ON">;
-def warn_analyzer_deprecated_option : Warning<
- "analyzer option '%0' is deprecated. This flag will be removed in %1, and "
- "passing this option will be an error.">,
- InGroup<DeprecatedStaticAnalyzerFlag>;
-def warn_analyzer_deprecated_option_with_alternative : Warning<
- "analyzer option '%0' is deprecated. This flag will be removed in %1, and "
- "passing this option will be an error. Use '%2' instead.">,
- InGroup<DeprecatedStaticAnalyzerFlag>;
def warn_drv_needs_hvx : Warning<
"%0 requires HVX, use -mhvx/-mhvx= to enable it">,
@@ -555,10 +547,12 @@ def err_drv_extract_api_wrong_kind : Error<
"in api extraction; use '-x %2' to override">;
def err_drv_missing_symbol_graph_dir: Error<
- "Must provide a symbol graph output directory using --symbol-graph-dir=<directory>">;
+ "must provide a symbol graph output directory using "
+ "'--symbol-graph-dir=<directory>'">;
def err_drv_unexpected_symbol_graph_output : Error<
- "Unexpected output symbol graph '%1'; please provide --symbol-graph-dir=<directory> instead">;
+ "unexpected output symbol graph '%1'; please provide "
+ "'--symbol-graph-dir=<directory>' instead">;
def warn_slash_u_filename : Warning<"'/U%0' treated as the '/U' option">,
InGroup<DiagGroup<"slash-u-filename">>;
@@ -599,9 +593,6 @@ def warn_drv_unsupported_gpopt : Warning<
"ignoring '-mgpopt' option as it cannot be used with %select{|the implicit"
" usage of }0-mabicalls">,
InGroup<UnsupportedGPOpt>;
-def warn_drv_unsupported_tocdata: Warning<
- "ignoring '-mtocdata' as it is only supported for -mcmodel=small">,
- InGroup<OptionIgnored>;
def warn_drv_unsupported_sdata : Warning<
"ignoring '-msmall-data-limit=' with -mcmodel=large for -fpic or RV64">,
InGroup<OptionIgnored>;
@@ -770,19 +761,19 @@ def err_drv_hlsl_16bit_types_unsupported: Error<
"'%0' option requires target HLSL Version >= 2018%select{| and shader model >= 6.2}1, but HLSL Version is '%2'%select{| and shader model is '%3'}1">;
def err_drv_hlsl_bad_shader_unsupported : Error<
"%select{shader model|Vulkan environment|shader stage}0 '%1' in target '%2' is invalid for HLSL code generation">;
-def warn_drv_dxc_missing_dxv : Warning<"dxv not found. "
- "Resulting DXIL will not be validated or signed for use in release environments.">,
- InGroup<DXILValidation>;
+def warn_drv_dxc_missing_dxv : Warning<
+ "dxv not found; resulting DXIL will not be validated or signed for use in "
+ "release environment">, InGroup<DXILValidation>;
def err_drv_invalid_range_dxil_validator_version : Error<
- "invalid validator version : %0\n"
- "Validator version must be less than or equal to current internal version.">;
+ "invalid validator version : %0; validator version must be less than or "
+ "equal to current internal version">;
def err_drv_invalid_format_dxil_validator_version : Error<
- "invalid validator version : %0\n"
- "Format of validator version is \"<major>.<minor>\" (ex:\"1.4\").">;
+ "invalid validator version : %0; format of validator version is "
+ "\"<major>.<minor>\" (ex:\"1.4\")">;
def err_drv_invalid_empty_dxil_validator_version : Error<
- "invalid validator version : %0\n"
- "If validator major version is 0, minor version must also be 0.">;
+ "invalid validator version : %0; if validator major version is 0, minor "
+ "version must also be 0">;
def warn_drv_sarif_format_unstable : Warning<
"diagnostic formatting in SARIF mode is currently unstable">,
@@ -796,12 +787,10 @@ def warn_drv_loongarch_conflicting_implied_val : Warning<
InGroup<OptionIgnored>;
def err_drv_loongarch_invalid_mfpu_EQ : Error<
"invalid argument '%0' to -mfpu=; must be one of: 64, 32, none, 0 (alias for none)">;
-def err_drv_loongarch_wrong_fpu_width_for_lsx : Error<
- "wrong fpu width; LSX depends on 64-bit FPU.">;
-def err_drv_loongarch_wrong_fpu_width_for_lasx : Error<
- "wrong fpu width; LASX depends on 64-bit FPU.">;
+def err_drv_loongarch_wrong_fpu_width : Error<
+ "wrong fpu width; %select{LSX|LASX}0 depends on 64-bit FPU">;
def err_drv_loongarch_invalid_simd_option_combination : Error<
- "invalid option combination; LASX depends on LSX.">;
+ "invalid option combination; LASX depends on LSX">;
def err_drv_expand_response_file : Error<
"failed to expand response file: %0">;
@@ -813,9 +802,9 @@ def note_drv_available_multilibs : Note<
"available multilibs are:%0">;
def warn_android_unversioned_fallback : Warning<
- "Using unversioned Android target directory %0 for target %1. Unversioned"
- " directories will not be used in Clang 19. Provide a versioned directory"
- " for the target version or lower instead.">,
+ "using unversioned Android target directory %0 for target %1; unversioned "
+ "directories will not be used in Clang 19 -- provide a versioned directory "
+ "for the target version or lower instead">,
InGroup<DiagGroup<"android-unversioned-fallback">>;
def err_drv_triple_version_invalid : Error<
diff --git a/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index e456ec2cac46..85c32e55bdab 100644
--- a/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -71,14 +71,14 @@ def remark_fe_backend_optimization_remark_analysis : Remark<"%0">, BackendInfo,
InGroup<BackendOptimizationRemarkAnalysis>;
def remark_fe_backend_optimization_remark_analysis_fpcommute : Remark<"%0; "
"allow reordering by specifying '#pragma clang loop vectorize(enable)' "
- "before the loop or by providing the compiler option '-ffast-math'.">,
+ "before the loop or by providing the compiler option '-ffast-math'">,
BackendInfo, InGroup<BackendOptimizationRemarkAnalysis>;
def remark_fe_backend_optimization_remark_analysis_aliasing : Remark<"%0; "
"allow reordering by specifying '#pragma clang loop vectorize(enable)' "
- "before the loop. If the arrays will always be independent specify "
+ "before the loop; if the arrays will always be independent, specify "
"'#pragma clang loop vectorize(assume_safety)' before the loop or provide "
- "the '__restrict__' qualifier with the independent array arguments. "
- "Erroneous results will occur if these options are incorrectly applied!">,
+ "the '__restrict__' qualifier with the independent array arguments -- "
+ "erroneous results will occur if these options are incorrectly applied">,
BackendInfo, InGroup<BackendOptimizationRemarkAnalysis>;
def warn_fe_backend_optimization_failure : Warning<"%0">, BackendInfo,
@@ -152,8 +152,8 @@ def warn_fe_serialized_diag_merge_failure : Warning<
def warn_fe_serialized_diag_failure : Warning<
"unable to open file %0 for serializing diagnostics (%1)">,
InGroup<SerializedDiagnostics>;
-def warn_fe_serialized_diag_failure_during_finalisation : Warning<
- "Received warning after diagnostic serialization teardown was underway: %0">,
+def warn_fe_serialized_diag_failure_during_finalization : Warning<
+ "received warning after diagnostic serialization teardown was underway: %0">,
InGroup<SerializedDiagnostics>;
def err_verify_missing_line : Error<
@@ -337,7 +337,7 @@ def warn_atomic_op_oversized : Warning<
InGroup<AtomicAlignment>;
def warn_sync_op_misaligned : Warning<
- "__sync builtin operation MUST have natural alignment (consider using __atomic).">,
+ "__sync builtin operation must have natural alignment (consider using __atomic)">,
InGroup<SyncAlignment>;
def warn_alias_with_section : Warning<
@@ -359,17 +359,16 @@ def warn_profile_data_unprofiled : Warning<
"no profile data available for file \"%0\"">,
InGroup<ProfileInstrUnprofiled>;
def warn_profile_data_misexpect : Warning<
- "Potential performance regression from use of __builtin_expect(): "
- "Annotation was correct on %0 of profiled executions.">,
- BackendInfo,
- InGroup<MisExpect>;
+ "potential performance regression from use of __builtin_expect(): "
+ "annotation was correct on %0 of profiled executions">,
+ BackendInfo, InGroup<MisExpect>;
} // end of instrumentation issue category
def err_extract_api_ignores_file_not_found :
Error<"file '%0' specified by '--extract-api-ignores=' not found">, DefaultFatal;
def warn_missing_symbol_graph_dir : Warning<
- "Missing symbol graph output directory, defaulting to working directory">,
+ "missing symbol graph output directory, defaulting to working directory">,
InGroup<ExtractAPIMisuse>;
def err_ast_action_on_llvm_ir : Error<
diff --git a/clang/include/clang/Basic/DiagnosticGroups.td b/clang/include/clang/Basic/DiagnosticGroups.td
index 4fad4d1a0eca..6b595a356793 100644
--- a/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/clang/include/clang/Basic/DiagnosticGroups.td
@@ -15,8 +15,6 @@ def Implicit : DiagGroup<"implicit", [
ImplicitInt
]>;
-def DeprecatedStaticAnalyzerFlag : DiagGroup<"deprecated-static-analyzer-flag">;
-
// Empty DiagGroups are recognized by clang but ignored.
def ODR : DiagGroup<"odr">;
def : DiagGroup<"abi">;
diff --git a/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td b/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td
index 674742431dcb..cdf27247602f 100644
--- a/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td
+++ b/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td
@@ -24,7 +24,7 @@ def err_no_matching_target : Error<"no matching target found for target variant
def err_unsupported_vendor : Error<"vendor '%0' is not supported: '%1'">;
def err_unsupported_environment : Error<"environment '%0' is not supported: '%1'">;
def err_unsupported_os : Error<"os '%0' is not supported: '%1'">;
-def err_cannot_read_input_list : Error<"could not read %select{alias list|filelist}0 '%1': %2">;
+def err_cannot_read_input_list : Error<"could not read %0 input list '%1': %2">;
def err_invalid_label: Error<"label '%0' is reserved: use a different label name for -X<label>">;
} // end of command line category.
@@ -59,8 +59,8 @@ def err_platform_mismatch : Error<"platform does not match: '%0' (provided) vs '
def err_install_name_mismatch : Error<"install_name does not match: '%0' (provided) vs '%1' (found)">;
def err_current_version_mismatch : Error<"current_version does not match: '%0' (provided) vs '%1' (found)">;
def err_compatibility_version_mismatch : Error<"compatibility_version does not match: '%0' (provided) vs '%1' (found)">;
-def err_appextension_safe_mismatch : Error<"ApplicationExtensionSafe flag does not match: '%0' (provided) vs '%1' (found)">;
-def err_shared_cache_eligiblity_mismatch : Error<"NotForDyldSharedCache flag does not match: '%0' (provided) vs '%1' (found)">;
+def err_appextension_safe_mismatch : Error<"the ApplicationExtensionSafe flag does not match: '%0' (provided) vs '%1' (found)">;
+def err_shared_cache_eligiblity_mismatch : Error<"the NotForDyldSharedCache flag does not match: '%0' (provided) vs '%1' (found)">;
def err_no_twolevel_namespace : Error<"flat namespace libraries are not supported">;
def err_parent_umbrella_missing: Error<"parent umbrella missing from %0: '%1'">;
def err_parent_umbrella_mismatch : Error<"parent umbrella does not match: '%0' (provided) vs '%1' (found)">;
diff --git a/clang/include/clang/Basic/DiagnosticLexKinds.td b/clang/include/clang/Basic/DiagnosticLexKinds.td
index ad6bacfb118d..5a4551a96ca4 100644
--- a/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -991,5 +991,5 @@ def err_pp_unclosed_pragma_unsafe_buffer_usage :
Error<"'#pragma unsafe_buffer_usage' was not ended">;
def err_pp_pragma_unsafe_buffer_usage_syntax :
-Error<"Expected 'begin' or 'end'">;
+Error<"expected 'begin' or 'end'">;
}
diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td
index 8316845844cb..f8328be5890d 100644
--- a/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -1112,10 +1112,12 @@ def err_zero_version : Error<
"version number must have non-zero major, minor, or sub-minor version">;
def err_availability_expected_platform : Error<
"expected a platform name, e.g., 'macos'">;
+def err_availability_expected_environment : Error<
+ "expected an environment name, e.g., 'compute'">;
// objc_bridge_related attribute
def err_objcbridge_related_expected_related_class : Error<
- "expected a related ObjectiveC class name, e.g., 'NSColor'">;
+ "expected a related Objective-C class name, e.g., 'NSColor'">;
def err_objcbridge_related_selector_name : Error<
"expected a class method selector with single argument, e.g., 'colorWithCGColor:'">;
@@ -1343,8 +1345,8 @@ def note_pragma_attribute_namespace_on_attribute : Note<
"omit the namespace to add attributes to the most-recently"
" pushed attribute group">;
def warn_no_support_for_eval_method_source_on_m32 : Warning<
- "Setting the floating point evaluation method to `source` on a target"
- " without SSE is not supported.">, InGroup<Pragmas>;
+ "setting the floating point evaluation method to `source` on a target "
+ "without SSE is not supported">, InGroup<Pragmas>;
// - #pragma __debug
def warn_pragma_debug_dependent_argument : Warning<
"%select{value|type}0-dependent expression passed as an argument to debug "
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 1efa3af121c1..f15cba63624e 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -310,7 +310,7 @@ def err_invalid_vector_long_double_decl_spec : Error<
def err_invalid_vector_complex_decl_spec : Error<
"cannot use '_Complex' with '__vector'">;
def warn_vector_long_decl_spec_combination : Warning<
- "Use of 'long' with '__vector' is deprecated">, InGroup<Deprecated>;
+ "use of 'long' with '__vector' is deprecated">, InGroup<Deprecated>;
def err_redeclaration_different_type : Error<
"redeclaration of %0 with a different type%diff{: $ vs $|}1,2">;
@@ -754,7 +754,7 @@ def note_include_header_or_declare : Note<
def note_previous_builtin_declaration : Note<"%0 is a builtin with type %1">;
def warn_implicit_decl_no_jmp_buf
: Warning<"declaration of built-in function '%0' requires the declaration"
- " of the 'jmp_buf' type, commonly provided in the header <setjmp.h>.">,
+ " of the 'jmp_buf' type, commonly provided in the header <setjmp.h>">,
InGroup<DiagGroup<"incomplete-setjmp-declaration">>;
def warn_implicit_decl_requires_sysheader : Warning<
"declaration of built-in function '%1' requires inclusion of the header <%0>">,
@@ -855,7 +855,7 @@ def note_strncat_wrong_size : Note<
"the terminating null byte">;
def warn_assume_side_effects : Warning<
- "the argument to %0 has side effects that will be discarded">,
+ "assumption is ignored because it contains (potential) side-effects">,
InGroup<DiagGroup<"assume">>;
def warn_omp_assume_attribute_string_unknown : Warning<
"unknown assumption string '%0'; attribute is potentially ignored">,
@@ -3197,7 +3197,7 @@ def err_attribute_bad_sve_vector_size : Error<
"'-msve-vector-bits' ('%1')">;
def err_attribute_arm_feature_sve_bits_unsupported : Error<
"%0 is only supported when '-msve-vector-bits=<bits>' is specified with a "
- "value of 128, 256, 512, 1024 or 2048.">;
+ "value of 128, 256, 512, 1024 or 2048">;
def warn_attribute_arm_sm_incompat_builtin : Warning<
"builtin call has undefined behaviour when called from a %0 function">,
InGroup<DiagGroup<"undefined-arm-streaming">>;
@@ -3837,6 +3837,9 @@ def note_cannot_use_trivial_abi_reason : Note<
// Availability attribute
def warn_availability_unknown_platform : Warning<
"unknown platform %0 in availability macro">, InGroup<Availability>;
+def warn_availability_unknown_environment : Warning<
+ "unknown environment %0 in availability macro">, InGroup<Availability>;
+
def warn_availability_version_ordering : Warning<
"feature cannot be %select{introduced|deprecated|obsoleted}0 in %1 version "
"%2 before it was %select{introduced|deprecated|obsoleted}3 in version %4; "
@@ -3867,13 +3870,21 @@ def note_protocol_method : Note<
def warn_availability_fuchsia_unavailable_minor : Warning<
"Fuchsia API Level prohibits specifying a minor or sub-minor version">,
InGroup<Availability>;
+def err_availability_unexpected_parameter: Error<
+ "unexpected parameter '%0' in availability attribute, not permitted in %select{HLSL|C/C++}1">;
def warn_unguarded_availability :
- Warning<"%0 is only available on %1 %2 or newer">,
+ Warning<"%0 is only available %select{|in %4 environment }3on %1 %2 or newer">,
+ InGroup<UnguardedAvailability>, DefaultIgnore;
+def warn_unguarded_availability_unavailable :
+ Warning<"%0 is unavailable">,
InGroup<UnguardedAvailability>, DefaultIgnore;
def warn_unguarded_availability_new :
Warning<warn_unguarded_availability.Summary>,
InGroup<UnguardedAvailabilityNew>;
+def warn_unguarded_availability_unavailable_new :
+ Warning<warn_unguarded_availability_unavailable.Summary>,
+ InGroup<UnguardedAvailabilityNew>;
def note_decl_unguarded_availability_silence : Note<
"annotate %select{%1|anonymous %1}0 with an availability attribute to silence this warning">;
def note_unguarded_available_silence : Note<
@@ -3964,7 +3975,7 @@ def warn_acquired_before : Warning<
"%0 '%1' must be acquired before '%2'">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_acquired_before_after_cycle : Warning<
- "Cycle in acquired_before/after dependencies, starting with '%0'">,
+ "cycle in acquired_before/after dependencies, starting with '%0'">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
@@ -4515,7 +4526,7 @@ def err_objc_attr_typedef_not_void_pointer : Error<
def err_objc_cf_bridged_not_interface : Error<
"CF object of type %0 is bridged to %1, which is not an Objective-C class">;
def err_objc_ns_bridged_invalid_cfobject : Error<
- "ObjectiveC object of type %0 is bridged to %1, which is not valid CF object">;
+ "Objective-C object of type %0 is bridged to %1, which is not valid CF object">;
def warn_objc_invalid_bridge : Warning<
"%0 bridges to %1, not %2">, InGroup<ObjCBridge>;
def warn_objc_invalid_bridge_to_cf : Warning<
@@ -5870,8 +5881,8 @@ def note_availability_specified_here : Note<
"%0 has been explicitly marked "
"%select{unavailable|deleted|deprecated}1 here">;
def note_partial_availability_specified_here : Note<
- "%0 has been marked as being introduced in %1 %2 here, "
- "but the deployment target is %1 %3">;
+ "%0 has been marked as being introduced in %1 %2 %select{|in %5 environment }4here, "
+ "but the deployment target is %1 %3%select{| %6 environment }4">;
def note_implicitly_deleted : Note<
"explicitly defaulted function was implicitly deleted here">;
def warn_not_enough_argument : Warning<
@@ -7533,6 +7544,11 @@ def err_explicit_object_parameter_mutable: Error<
def err_invalid_explicit_object_type_in_lambda: Error<
"invalid explicit object parameter type %0 in lambda with capture; "
"the type must be the same as, or derived from, the lambda">;
+def err_explicit_object_lambda_ambiguous_base : Error<
+ "lambda %0 is inaccessible due to ambiguity:%1">;
+def err_explicit_object_lambda_inaccessible_base : Error<
+ "invalid explicit object parameter type %0 in lambda with capture; "
+ "the type must derive publicly from the lambda">;
def err_ref_qualifier_overload : Error<
"cannot overload a member function %select{without a ref-qualifier|with "
@@ -8005,15 +8021,15 @@ def warn_deprecated_volatile_structured_binding : Warning<
InGroup<DeprecatedVolatile>;
def warn_deprecated_altivec_src_compat : Warning<
- "Current handling of vector bool and vector pixel types in this context are "
- "deprecated. The default behaviour will soon change to that implied by the "
+ "current handling of vector bool and vector pixel types in this context are "
+ "deprecated; the default behaviour will soon change to that implied by the "
"'-altivec-compat=xl' option">,
InGroup<DiagGroup<"deprecated-altivec-src-compat">>;
def warn_deprecated_lax_vec_conv_all : Warning<
- "Implicit conversion between vector types ('%0' and '%1') is deprecated. "
- "In the future, the behavior implied by '-fno-lax-vector-conversions' "
- "will be the default.">,
+ "implicit conversion between vector types ('%0' and '%1') is deprecated; "
+ "in the future, the behavior implied by '-fno-lax-vector-conversions' "
+ "will be the default">,
InGroup<DiagGroup<"deprecate-lax-vec-conv-all">>;
def err_catch_incomplete_ptr : Error<
@@ -8844,8 +8860,10 @@ def err_builtin_fn_use : Error<"builtin functions must be directly called">;
def warn_call_wrong_number_of_arguments : Warning<
"too %select{few|many}0 arguments in call to %1">;
+
def err_atomic_builtin_must_be_pointer : Error<
- "address argument to atomic builtin must be a pointer (%0 invalid)">;
+ "address argument to atomic builtin must be a pointer %select{|to a non-zero-sized object }1(%0 invalid)">;
+
def err_atomic_builtin_must_be_pointer_intptr : Error<
"address argument to atomic builtin must be a pointer to integer or pointer"
" (%0 invalid)">;
@@ -8861,7 +8879,7 @@ def err_atomic_exclusive_builtin_pointer_size : Error<
"address argument to load or store exclusive builtin must be a pointer to"
" 1,2,4 or 8 byte type (%0 invalid)">;
def err_atomic_builtin_ext_int_size : Error<
- "Atomic memory operand must have a power-of-two size">;
+ "atomic memory operand must have a power-of-two size">;
def err_atomic_builtin_bit_int_prohibit : Error<
"argument to atomic builtin of type '_BitInt' is not supported">;
def err_atomic_op_needs_atomic : Error<
@@ -8969,8 +8987,8 @@ def err_va_arg_in_device : Error<
def err_alias_not_supported_on_nvptx : Error<"CUDA older than 10.0 does not support .alias">;
def err_cuda_unattributed_constexpr_cannot_overload_device : Error<
"constexpr function %0 without __host__ or __device__ attributes cannot "
- "overload __device__ function with same signature. Add a __host__ "
- "attribute, or build with -fno-cuda-host-device-constexpr.">;
+ "overload __device__ function with the same signature; add a __host__ "
+ "attribute, or build with -fno-cuda-host-device-constexpr">;
def note_cuda_conflicting_device_function_declared_here : Note<
"conflicting __device__ function declared here">;
def err_cuda_device_exceptions : Error<
@@ -8978,9 +8996,9 @@ def err_cuda_device_exceptions : Error<
"%select{__device__|__global__|__host__|__host__ __device__}1 function">;
def err_dynamic_var_init : Error<
"dynamic initialization is not supported for "
- "__device__, __constant__, __shared__, and __managed__ variables.">;
+ "__device__, __constant__, __shared__, and __managed__ variables">;
def err_shared_var_init : Error<
- "initialization is not supported for __shared__ variables.">;
+ "initialization is not supported for __shared__ variables">;
def err_cuda_vla : Error<
"cannot use variable-length arrays in "
"%select{__device__|__global__|__host__|__host__ __device__}0 functions">;
@@ -9472,8 +9490,6 @@ def err_static_data_member_not_allowed_in_local_class : Error<
def err_base_clause_on_union : Error<"unions cannot have base classes">;
def err_base_must_be_class : Error<"base specifier must name a class">;
def err_union_as_base_class : Error<"unions cannot be base classes">;
-def err_circular_inheritance : Error<
- "circular inheritance between %0 and %1">;
def err_base_class_has_flexible_array_member : Error<
"base class %0 has a flexible array member">;
def err_incomplete_base_class : Error<"base class has incomplete type">;
@@ -10066,12 +10082,6 @@ def warn_new_dangling_initializer_list : Warning<
"the allocated initializer list}0 "
"will be destroyed at the end of the full-expression">,
InGroup<DanglingInitializerList>;
-def warn_unsupported_lifetime_extension : Warning<
- "lifetime extension of "
- "%select{temporary|backing array of initializer list}0 created "
- "by aggregate initialization using a default member initializer "
- "is not yet supported; lifetime of %select{temporary|backing array}0 "
- "will end at the end of the full-expression">, InGroup<Dangling>;
// For non-floating point, expressions of the form x == x or x != x
// should result in a warning, since these always evaluate to a constant.
@@ -10247,9 +10257,6 @@ def err_fallthrough_attr_outside_switch : Error<
def err_fallthrough_attr_invalid_placement : Error<
"fallthrough annotation does not directly precede switch label">;
-def err_assume_attr_args : Error<
- "attribute '%0' requires a single expression argument">;
-
def warn_unreachable_default : Warning<
"default label in switch which covers all enumeration values">,
InGroup<CoveredSwitchDefault>, DefaultIgnore;
@@ -10375,12 +10382,12 @@ def err_shufflevector_argument_too_large : Error<
"index for __builtin_shufflevector must be less than the total number "
"of vector elements">;
def err_shufflevector_minus_one_is_undefined_behavior_constexpr : Error<
- "index for __builtin_shufflevector not within the bounds of the input vectors; index of -1 found at position %0 not permitted in a constexpr context.">;
+ "index for __builtin_shufflevector not within the bounds of the input vectors; index of -1 found at position %0 is not permitted in a constexpr context">;
def err_convertvector_non_vector : Error<
"first argument to __builtin_convertvector must be a vector">;
def err_convertvector_constexpr_unsupported_vector_cast : Error<
- "unsupported vector cast from %0 to %1 in a constant expression.">;
+ "unsupported vector cast from %0 to %1 in a constant expression">;
def err_builtin_non_vector_type : Error<
"%0 argument to %1 must be of vector type">;
def err_convertvector_incompatible_vector : Error<
@@ -10708,7 +10715,7 @@ def err_kernel_arg_address_space : Error<
"pointer arguments to kernel functions must reside in '__global', "
"'__constant' or '__local' address space">;
def err_opencl_ext_vector_component_invalid_length : Error<
- "vector component access has invalid length %0. Supported: 1,2,3,4,8,16.">;
+ "vector component access has invalid length %0; supported lengths are: 1,2,3,4,8,16">;
def err_opencl_function_variable : Error<
"%select{non-kernel function|function scope}0 variable cannot be declared in %1 address space">;
def err_opencl_addrspace_scope : Error<
@@ -11156,12 +11163,12 @@ def err_omp_atomic_compare : Error<
"the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}',"
" '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}',"
" 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type,"
- " and 'ordop' is one of '<' or '>'.">;
+ " and 'ordop' is one of '<' or '>'">;
def err_omp_atomic_compare_capture : Error<
"the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}',"
" '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}',"
" 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x', 'r', and 'v' are lvalue expressions with scalar type, 'expr', 'e', and 'd' are expressions with scalar type,"
- " and 'ordop' is one of '<' or '>'.">;
+ " and 'ordop' is one of '<' or '>'">;
def note_omp_atomic_compare: Note<
"%select{expected compound statement|expected exactly one expression statement|expected assignment statement|expected conditional operator|expect result value to be at false expression|"
"expect binary operator in conditional expression|expect '<', '>' or '==' as order operator|expect comparison in a form of 'x == e', 'e == x', 'x ordop expr', or 'expr ordop x'|"
@@ -11327,7 +11334,7 @@ def err_omp_expected_int_param : Error<
def err_omp_at_least_one_motion_clause_required : Error<
"expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'">;
def err_omp_cannot_update_with_internal_linkage : Error<
- "the host cannot update a declare target variable that is not externally visible.">;
+ "the host cannot update a declare target variable that is not externally visible">;
def err_omp_usedeviceptr_not_a_pointer : Error<
"expected pointer or reference to pointer in 'use_device_ptr' clause">;
def err_omp_argument_type_isdeviceptr : Error <
@@ -11348,10 +11355,10 @@ def err_omp_reduction_vla_unsupported : Error<
def err_omp_linear_distribute_var_non_loop_iteration : Error<
"only loop iteration variables are allowed in 'linear' clause in distribute directives">;
def warn_omp_non_trivial_type_mapped : Warning<
- "Type %0 is not trivially copyable and not guaranteed to be mapped correctly">,
+ "type %0 is not trivially copyable and not guaranteed to be mapped correctly">,
InGroup<OpenMPMapping>;
def err_omp_requires_clause_redeclaration : Error <
- "Only one %0 clause can appear on a requires directive in a single translation unit">;
+ "only one %0 clause can appear on a requires directive in a single translation unit">;
def note_omp_requires_previous_clause : Note <
"%0 clause previously used here">;
def err_omp_directive_before_requires : Error <
@@ -11359,7 +11366,7 @@ def err_omp_directive_before_requires : Error <
def note_omp_requires_encountered_directive : Note <
"'%0' previously encountered here">;
def err_omp_device_ancestor_without_requires_reverse_offload : Error <
- "Device clause with ancestor device-modifier used without specifying 'requires reverse_offload'">;
+ "device clause with ancestor device-modifier used without specifying 'requires reverse_offload'">;
def err_omp_invalid_scope : Error <
"'#pragma omp %0' directive must appear only in file scope">;
def note_omp_invalid_length_on_this_ptr_mapping : Note <
@@ -11771,7 +11778,7 @@ def note_await_ready_no_bool_conversion : Note<
"return type of 'await_ready' is required to be contextually convertible to 'bool'"
>;
def warn_coroutine_handle_address_invalid_return_type : Warning <
- "return type of 'coroutine_handle<>::address should be 'void*' (have %0) in order to get capability with existing async C API.">,
+ "return type of 'coroutine_handle<>::address should be 'void*' (have %0) in order to get capability with existing async C API">,
InGroup<Coroutine>;
def err_coroutine_promise_final_suspend_requires_nothrow : Error<
"the expression 'co_await __promise.final_suspend()' is required to be non-throwing"
@@ -11799,7 +11806,7 @@ def err_conflicting_aligned_options : Error <
"conflicting option '-fcoro-aligned-allocation' and '-fno-aligned-allocation'"
>;
def err_coro_invalid_addr_of_label : Error<
- "the GNU address of label extension is not allowed in coroutines."
+ "the GNU address of label extension is not allowed in coroutines"
>;
def err_coroutine_return_type : Error<
"function returns a type %0 marked with [[clang::coro_return_type]] but is neither a coroutine nor a coroutine wrapper; "
@@ -12353,7 +12360,8 @@ def err_acc_num_gangs_num_args
"provided}0">;
def err_acc_not_a_var_ref
: Error<"OpenACC variable is not a valid variable name, sub-array, array "
- "element, or composite variable member">;
+ "element,%select{| member of a composite variable,}0 or composite "
+ "variable member">;
def err_acc_typecheck_subarray_value
: Error<"OpenACC sub-array subscripted value is not an array or pointer">;
def err_acc_subarray_function_type
@@ -12384,5 +12392,22 @@ def note_acc_expected_pointer_var : Note<"expected variable of pointer type">;
def err_acc_clause_after_device_type
: Error<"OpenACC clause '%0' may not follow a '%1' clause in a "
"compute construct">;
-
+def err_acc_reduction_num_gangs_conflict
+ : Error<
+ "OpenACC 'reduction' clause may not appear on a 'parallel' construct "
+ "with a 'num_gangs' clause with more than 1 argument, have %0">;
+def err_acc_reduction_type
+ : Error<"OpenACC 'reduction' variable must be of scalar type, sub-array, or a "
+ "composite of scalar types;%select{| sub-array base}1 type is %0">;
+def err_acc_reduction_composite_type
+ : Error<"OpenACC 'reduction' variable must be a composite of scalar types; "
+ "%1 %select{is not a class or struct|is incomplete|is not an "
+ "aggregate}0">;
+def err_acc_reduction_composite_member_type :Error<
+ "OpenACC 'reduction' composite variable must not have non-scalar field">;
+def note_acc_reduction_composite_member_loc : Note<"invalid field is here">;
+
+// AMDGCN builtins diagnostics
+def err_amdgcn_global_load_lds_size_invalid_value : Error<"invalid size value">;
+def note_amdgcn_global_load_lds_size_valid_value : Note<"size must be 1, 2, or 4">;
} // end of sema component.
diff --git a/clang/include/clang/Basic/FileManager.h b/clang/include/clang/Basic/FileManager.h
index 8b4206e52cd4..e1f33d57a898 100644
--- a/clang/include/clang/Basic/FileManager.h
+++ b/clang/include/clang/Basic/FileManager.h
@@ -299,6 +299,8 @@ private:
getBufferForFileImpl(StringRef Filename, int64_t FileSize, bool isVolatile,
bool RequiresNullTerminator) const;
+ DirectoryEntry *&getRealDirEntry(const llvm::vfs::Status &Status);
+
public:
/// Get the 'stat' information for the given \p Path.
///
diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def
index 09eb92d6f10d..4061451b2150 100644
--- a/clang/include/clang/Basic/LangOptions.def
+++ b/clang/include/clang/Basic/LangOptions.def
@@ -300,6 +300,7 @@ LANGOPT(HIPStdParInterposeAlloc, 1, 0, "Replace allocations / deallocations with
LANGOPT(OpenACC , 1, 0, "OpenACC Enabled")
+LANGOPT(MSVCEnableStdcMacro , 1, 0, "Define __STDC__ with '-fms-compatibility'")
LANGOPT(SizedDeallocation , 1, 0, "sized deallocation")
LANGOPT(AlignedAllocation , 1, 0, "aligned allocation")
LANGOPT(AlignedAllocationUnavailable, 1, 0, "aligned allocation functions are unavailable")
diff --git a/clang/include/clang/Basic/OpenACCClauses.def b/clang/include/clang/Basic/OpenACCClauses.def
index 7ecc51799468..3e464abaafd9 100644
--- a/clang/include/clang/Basic/OpenACCClauses.def
+++ b/clang/include/clang/Basic/OpenACCClauses.def
@@ -46,6 +46,7 @@ VISIT_CLAUSE(NumGangs)
VISIT_CLAUSE(NumWorkers)
VISIT_CLAUSE(Present)
VISIT_CLAUSE(Private)
+VISIT_CLAUSE(Reduction)
VISIT_CLAUSE(Self)
VISIT_CLAUSE(VectorLength)
VISIT_CLAUSE(Wait)
diff --git a/clang/include/clang/Basic/OpenACCKinds.h b/clang/include/clang/Basic/OpenACCKinds.h
index 0e38a04e7164..7b9d619a8aec 100644
--- a/clang/include/clang/Basic/OpenACCKinds.h
+++ b/clang/include/clang/Basic/OpenACCKinds.h
@@ -514,6 +514,42 @@ enum class OpenACCReductionOperator {
/// Invalid Reduction Clause Kind.
Invalid,
};
+
+template <typename StreamTy>
+inline StreamTy &printOpenACCReductionOperator(StreamTy &Out,
+ OpenACCReductionOperator Op) {
+ switch (Op) {
+ case OpenACCReductionOperator::Addition:
+ return Out << "+";
+ case OpenACCReductionOperator::Multiplication:
+ return Out << "*";
+ case OpenACCReductionOperator::Max:
+ return Out << "max";
+ case OpenACCReductionOperator::Min:
+ return Out << "min";
+ case OpenACCReductionOperator::BitwiseAnd:
+ return Out << "&";
+ case OpenACCReductionOperator::BitwiseOr:
+ return Out << "|";
+ case OpenACCReductionOperator::BitwiseXOr:
+ return Out << "^";
+ case OpenACCReductionOperator::And:
+ return Out << "&&";
+ case OpenACCReductionOperator::Or:
+ return Out << "||";
+ case OpenACCReductionOperator::Invalid:
+ return Out << "<invalid>";
+ }
+ llvm_unreachable("Unknown reduction operator kind");
+}
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &Out,
+ OpenACCReductionOperator Op) {
+ return printOpenACCReductionOperator(Out, Op);
+}
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &Out,
+ OpenACCReductionOperator Op) {
+ return printOpenACCReductionOperator(Out, Op);
+}
} // namespace clang
#endif // LLVM_CLANG_BASIC_OPENACCKINDS_H
diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index a9ea71cd0777..03570f94de66 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -2186,9 +2186,6 @@ let TargetGuard = "sme2" in {
def SVSQRSHRUN_X4 : SInst<"svqrshrun[_n]_{0}[_{d}_x4]", "b4i", "il", MergeNone, "aarch64_sve_sqrshrun_x4", [IsStreaming], [ImmCheck<1, ImmCheckShiftRight, 0>]>;
- def REINTERPRET_SVBOOL_TO_SVCOUNT : Inst<"svreinterpret[_c]", "}P", "Pc", MergeNone, "", [IsStreamingCompatible], []>;
- def REINTERPRET_SVCOUNT_TO_SVBOOL : Inst<"svreinterpret[_b]", "P}", "Pc", MergeNone, "", [IsStreamingCompatible], []>;
-
// SQDMULH
def SVSQDMULH_SINGLE_X2 : SInst<"svqdmulh[_single_{d}_x2]", "22d", "csil", MergeNone, "aarch64_sve_sqdmulh_single_vgx2", [IsStreaming], []>;
def SVSQDMULH_SINGLE_X4 : SInst<"svqdmulh[_single_{d}_x4]", "44d", "csil", MergeNone, "aarch64_sve_sqdmulh_single_vgx4", [IsStreaming], []>;
@@ -2197,6 +2194,9 @@ let TargetGuard = "sme2" in {
}
let TargetGuard = "sve2p1|sme2" in {
+ def REINTERPRET_SVBOOL_TO_SVCOUNT : Inst<"svreinterpret[_c]", "}P", "Pc", MergeNone, "", [IsStreamingCompatible], []>;
+ def REINTERPRET_SVCOUNT_TO_SVBOOL : Inst<"svreinterpret[_b]", "P}", "Pc", MergeNone, "", [IsStreamingCompatible], []>;
+
// SQRSHRN / UQRSHRN
def SVQRSHRN_X2 : SInst<"svqrshrn[_n]_{0}[_{d}_x2]", "h2i", "i", MergeNone, "aarch64_sve_sqrshrn_x2", [IsStreamingCompatible], [ImmCheck<1, ImmCheck1_16>]>;
def SVUQRSHRN_X2 : SInst<"svqrshrn[_n]_{0}[_{d}_x2]", "e2i", "Ui", MergeNone, "aarch64_sve_uqrshrn_x2", [IsStreamingCompatible], [ImmCheck<1, ImmCheck1_16>]>;
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 7bb781667e92..de2f245fb29f 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -603,6 +603,7 @@ class MarshallingInfoVisibility<KeyPathAndMacro kpm, code default>
// Key paths that are constant during parsing of options with the same key path prefix.
defvar cplusplus = LangOpts<"CPlusPlus">;
defvar cpp11 = LangOpts<"CPlusPlus11">;
+defvar cpp14 = LangOpts<"CPlusPlus14">;
defvar cpp17 = LangOpts<"CPlusPlus17">;
defvar cpp20 = LangOpts<"CPlusPlus20">;
defvar c99 = LangOpts<"C99">;
@@ -2980,6 +2981,10 @@ def fms_compatibility : Flag<["-"], "fms-compatibility">, Group<f_Group>,
Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Enable full Microsoft Visual C++ compatibility">,
MarshallingInfoFlag<LangOpts<"MSVCCompat">>;
+def fms_define_stdc : Flag<["-"], "fms-define-stdc">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
+ HelpText<"Define '__STDC__' to '1' in MSVC Compatibility mode">,
+ MarshallingInfoFlag<LangOpts<"MSVCEnableStdcMacro">>;
def fms_extensions : Flag<["-"], "fms-extensions">, Group<f_Group>,
Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">,
@@ -3388,10 +3393,9 @@ defm relaxed_template_template_args : BoolFOption<"relaxed-template-template-arg
NegFlag<SetFalse, [], [CC1Option], "Disable">,
BothFlags<[], [ClangOption], " C++17 relaxed template template argument matching">>;
defm sized_deallocation : BoolFOption<"sized-deallocation",
- LangOpts<"SizedDeallocation">, DefaultFalse,
- PosFlag<SetTrue, [], [ClangOption, CC1Option],
- "Enable C++14 sized global deallocation functions">,
- NegFlag<SetFalse>>;
+ LangOpts<"SizedDeallocation">, Default<cpp14.KeyPath>,
+ PosFlag<SetTrue, [], [], "Enable C++14 sized global deallocation functions">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option]>>;
defm aligned_allocation : BoolFOption<"aligned-allocation",
LangOpts<"AlignedAllocation">, Default<cpp17.KeyPath>,
PosFlag<SetTrue, [], [ClangOption], "Enable C++17 aligned allocation functions">,
@@ -6111,14 +6115,10 @@ def mavx512cd : Flag<["-"], "mavx512cd">, Group<m_x86_Features_Group>;
def mno_avx512cd : Flag<["-"], "mno-avx512cd">, Group<m_x86_Features_Group>;
def mavx512dq : Flag<["-"], "mavx512dq">, Group<m_x86_Features_Group>;
def mno_avx512dq : Flag<["-"], "mno-avx512dq">, Group<m_x86_Features_Group>;
-def mavx512er : Flag<["-"], "mavx512er">, Group<m_x86_Features_Group>;
-def mno_avx512er : Flag<["-"], "mno-avx512er">, Group<m_x86_Features_Group>;
def mavx512fp16 : Flag<["-"], "mavx512fp16">, Group<m_x86_Features_Group>;
def mno_avx512fp16 : Flag<["-"], "mno-avx512fp16">, Group<m_x86_Features_Group>;
def mavx512ifma : Flag<["-"], "mavx512ifma">, Group<m_x86_Features_Group>;
def mno_avx512ifma : Flag<["-"], "mno-avx512ifma">, Group<m_x86_Features_Group>;
-def mavx512pf : Flag<["-"], "mavx512pf">, Group<m_x86_Features_Group>;
-def mno_avx512pf : Flag<["-"], "mno-avx512pf">, Group<m_x86_Features_Group>;
def mavx512vbmi : Flag<["-"], "mavx512vbmi">, Group<m_x86_Features_Group>;
def mno_avx512vbmi : Flag<["-"], "mno-avx512vbmi">, Group<m_x86_Features_Group>;
def mavx512vbmi2 : Flag<["-"], "mavx512vbmi2">, Group<m_x86_Features_Group>;
@@ -6209,8 +6209,6 @@ def mpopcnt : Flag<["-"], "mpopcnt">, Group<m_x86_Features_Group>;
def mno_popcnt : Flag<["-"], "mno-popcnt">, Group<m_x86_Features_Group>;
def mprefetchi : Flag<["-"], "mprefetchi">, Group<m_x86_Features_Group>;
def mno_prefetchi : Flag<["-"], "mno-prefetchi">, Group<m_x86_Features_Group>;
-def mprefetchwt1 : Flag<["-"], "mprefetchwt1">, Group<m_x86_Features_Group>;
-def mno_prefetchwt1 : Flag<["-"], "mno-prefetchwt1">, Group<m_x86_Features_Group>;
def mprfchw : Flag<["-"], "mprfchw">, Group<m_x86_Features_Group>;
def mno_prfchw : Flag<["-"], "mno-prfchw">, Group<m_x86_Features_Group>;
def mptwrite : Flag<["-"], "mptwrite">, Group<m_x86_Features_Group>;
@@ -8312,6 +8310,9 @@ def _SLASH_vd : CLJoined<"vd">, HelpText<"Control vtordisp placement">,
Alias<vtordisp_mode_EQ>;
def _SLASH_X : CLFlag<"X">,
HelpText<"Do not add %INCLUDE% to include search path">, Alias<nostdlibinc>;
+def _SLASH_Zc___STDC__ : CLFlag<"Zc:__STDC__">,
+ HelpText<"Define __STDC__">,
+ Alias<fms_define_stdc>;
def _SLASH_Zc_sizedDealloc : CLFlag<"Zc:sizedDealloc">,
HelpText<"Enable C++14 sized global deallocation functions">,
Alias<fsized_deallocation>;
diff --git a/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h b/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h
index 724b087f7aea..27e9167ca1ad 100644
--- a/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h
+++ b/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h
@@ -102,6 +102,8 @@ private:
const bool EmitSymbolLabelsForTesting = false;
+ const bool SkipSymbolsInCategoriesToExternalTypes = false;
+
/// The object instantiated by the last call to serializeAPIRecord.
Object *CurrentSymbol = nullptr;
@@ -271,10 +273,13 @@ public:
SymbolGraphSerializer(const APISet &API, const APIIgnoresList &IgnoresList,
bool EmitSymbolLabelsForTesting = false,
- bool ForceEmitToMainModule = false)
+ bool ForceEmitToMainModule = false,
+ bool SkipSymbolsInCategoriesToExternalTypes = false)
: Base(API), ForceEmitToMainModule(ForceEmitToMainModule),
IgnoresList(IgnoresList),
- EmitSymbolLabelsForTesting(EmitSymbolLabelsForTesting) {}
+ EmitSymbolLabelsForTesting(EmitSymbolLabelsForTesting),
+ SkipSymbolsInCategoriesToExternalTypes(
+ SkipSymbolsInCategoriesToExternalTypes) {}
};
} // namespace extractapi
diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h
index 74893f23210c..eb6647038403 100644
--- a/clang/include/clang/Format/Format.h
+++ b/clang/include/clang/Format/Format.h
@@ -480,15 +480,21 @@ struct FormatStyle {
ENAS_DontAlign,
/// Align escaped newlines as far left as possible.
/// \code
- /// true:
/// #define A \
/// int aaaa; \
/// int b; \
/// int dddddddddd;
- ///
- /// false:
/// \endcode
ENAS_Left,
+ /// Align escaped newlines as far left as possible, using the last line of
+ /// the preprocessor directive as the reference if it's the longest.
+ /// \code
+ /// #define A \
+ /// int aaaa; \
+ /// int b; \
+ /// int dddddddddd;
+ /// \endcode
+ ENAS_LeftWithLastLine,
/// Align escaped newlines in the right-most column.
/// \code
/// #define A \
@@ -5239,7 +5245,7 @@ tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
/// Returns the replacements corresponding to applying and formatting
/// \p Replaces on success; otheriwse, return an llvm::Error carrying
/// llvm::StringError.
-llvm::Expected<tooling::Replacements>
+Expected<tooling::Replacements>
formatReplacements(StringRef Code, const tooling::Replacements &Replaces,
const FormatStyle &Style);
@@ -5256,7 +5262,7 @@ formatReplacements(StringRef Code, const tooling::Replacements &Replaces,
/// The include manipulation is done via ``tooling::HeaderInclude``, see its
/// documentation for more details on how include insertion points are found and
/// what edits are produced.
-llvm::Expected<tooling::Replacements>
+Expected<tooling::Replacements>
cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces,
const FormatStyle &Style);
@@ -5381,11 +5387,10 @@ extern const char *DefaultFallbackStyle;
/// \returns FormatStyle as specified by ``StyleName``. If ``StyleName`` is
/// "file" and no file is found, returns ``FallbackStyle``. If no style could be
/// determined, returns an Error.
-llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
- StringRef FallbackStyle,
- StringRef Code = "",
- llvm::vfs::FileSystem *FS = nullptr,
- bool AllowUnknownOptions = false);
+Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
+ StringRef FallbackStyle, StringRef Code = "",
+ llvm::vfs::FileSystem *FS = nullptr,
+ bool AllowUnknownOptions = false);
// Guesses the language from the ``FileName`` and ``Code`` to be formatted.
// Defaults to FormatStyle::LK_Cpp.
diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h
index af50164a8f93..8493026f5f7a 100644
--- a/clang/include/clang/Parse/Parser.h
+++ b/clang/include/clang/Parse/Parser.h
@@ -153,6 +153,9 @@ class Parser : public CodeCompletionHandler {
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
+ /// Identifier for "environment".
+ IdentifierInfo *Ident_environment;
+
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration, *Ident_USR;
@@ -1643,9 +1646,11 @@ private:
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
+ void ParseLexedCAttributeList(LateParsedAttrList &LA, bool EnterScope,
+ ParsedAttributes *OutAttrs = nullptr);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
- void ParseLexedCAttribute(LateParsedAttribute &LA,
+ void ParseLexedCAttribute(LateParsedAttribute &LA, bool EnterScope,
ParsedAttributes *OutAttrs = nullptr);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
@@ -2814,7 +2819,7 @@ private:
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributes &Attrs, DeclSpec &DS,
- Sema::TagUseKind TUK);
+ TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributes &Attrs,
@@ -2997,7 +3002,8 @@ private:
bool ParseCXXAssumeAttributeArg(ParsedAttributes &Attrs,
IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
- SourceLocation *EndLoc);
+ SourceLocation *EndLoc,
+ ParsedAttr::Form Form);
IdentifierInfo *TryParseCXX11AttributeIdentifier(
SourceLocation &Loc,
@@ -3688,9 +3694,9 @@ private:
using OpenACCVarParseResult = std::pair<ExprResult, OpenACCParseCanContinue>;
/// Parses a single variable in a variable list for OpenACC.
- OpenACCVarParseResult ParseOpenACCVar();
+ OpenACCVarParseResult ParseOpenACCVar(OpenACCClauseKind CK);
/// Parses the variable list for the variety of places that take a var-list.
- llvm::SmallVector<Expr *> ParseOpenACCVarList();
+ llvm::SmallVector<Expr *> ParseOpenACCVarList(OpenACCClauseKind CK);
/// Parses any parameters for an OpenACC Clause, including required/optional
/// parens.
OpenACCClauseParseResult
diff --git a/clang/include/clang/Sema/ParsedAttr.h b/clang/include/clang/Sema/ParsedAttr.h
index 8368d9ce6146..22cbd0d90ee4 100644
--- a/clang/include/clang/Sema/ParsedAttr.h
+++ b/clang/include/clang/Sema/ParsedAttr.h
@@ -40,6 +40,7 @@ class LangOptions;
class Sema;
class Stmt;
class TargetInfo;
+struct IdentifierLoc;
/// Represents information about a change in availability for
/// an entity, which is part of the encoding of the 'availability'
@@ -68,12 +69,14 @@ struct AvailabilityData {
AvailabilityChange Changes[NumAvailabilitySlots];
SourceLocation StrictLoc;
const Expr *Replacement;
+ const IdentifierLoc *EnvironmentLoc;
AvailabilityData(const AvailabilityChange &Introduced,
const AvailabilityChange &Deprecated,
- const AvailabilityChange &Obsoleted,
- SourceLocation Strict, const Expr *ReplaceExpr)
- : StrictLoc(Strict), Replacement(ReplaceExpr) {
+ const AvailabilityChange &Obsoleted, SourceLocation Strict,
+ const Expr *ReplaceExpr, const IdentifierLoc *EnvironmentLoc)
+ : StrictLoc(Strict), Replacement(ReplaceExpr),
+ EnvironmentLoc(EnvironmentLoc) {
Changes[IntroducedSlot] = Introduced;
Changes[DeprecatedSlot] = Deprecated;
Changes[ObsoletedSlot] = Obsoleted;
@@ -234,7 +237,7 @@ private:
const AvailabilityChange &deprecated,
const AvailabilityChange &obsoleted, SourceLocation unavailable,
const Expr *messageExpr, Form formUsed, SourceLocation strict,
- const Expr *replacementExpr)
+ const Expr *replacementExpr, const IdentifierLoc *environmentLoc)
: AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
NumArgs(1), Invalid(false), UsedAsTypeAttr(false), IsAvailability(true),
IsTypeTagForDatatype(false), IsProperty(false), HasParsedType(false),
@@ -243,8 +246,9 @@ private:
Info(ParsedAttrInfo::get(*this)) {
ArgsUnion PVal(Parm);
memcpy(getArgsBuffer(), &PVal, sizeof(ArgsUnion));
- new (getAvailabilityData()) detail::AvailabilityData(
- introduced, deprecated, obsoleted, strict, replacementExpr);
+ new (getAvailabilityData())
+ detail::AvailabilityData(introduced, deprecated, obsoleted, strict,
+ replacementExpr, environmentLoc);
}
/// Constructor for objc_bridge_related attributes.
@@ -445,6 +449,12 @@ public:
return getAvailabilityData()->Replacement;
}
+ const IdentifierLoc *getEnvironment() const {
+ assert(getParsedKind() == AT_Availability &&
+ "Not an availability attribute");
+ return getAvailabilityData()->EnvironmentLoc;
+ }
+
const ParsedType &getMatchingCType() const {
assert(getParsedKind() == AT_TypeTagForDatatype &&
"Not a type_tag_for_datatype attribute");
@@ -759,11 +769,13 @@ public:
const AvailabilityChange &obsoleted,
SourceLocation unavailable, const Expr *MessageExpr,
ParsedAttr::Form form, SourceLocation strict,
- const Expr *ReplacementExpr) {
+ const Expr *ReplacementExpr,
+ IdentifierLoc *EnvironmentLoc) {
void *memory = allocate(AttributeFactory::AvailabilityAllocSize);
- return add(new (memory) ParsedAttr(
- attrName, attrRange, scopeName, scopeLoc, Param, introduced, deprecated,
- obsoleted, unavailable, MessageExpr, form, strict, ReplacementExpr));
+ return add(new (memory) ParsedAttr(attrName, attrRange, scopeName, scopeLoc,
+ Param, introduced, deprecated, obsoleted,
+ unavailable, MessageExpr, form, strict,
+ ReplacementExpr, EnvironmentLoc));
}
ParsedAttr *create(IdentifierInfo *attrName, SourceRange attrRange,
@@ -994,10 +1006,12 @@ public:
const AvailabilityChange &obsoleted,
SourceLocation unavailable, const Expr *MessageExpr,
ParsedAttr::Form form, SourceLocation strict,
- const Expr *ReplacementExpr) {
- ParsedAttr *attr = pool.create(
- attrName, attrRange, scopeName, scopeLoc, Param, introduced, deprecated,
- obsoleted, unavailable, MessageExpr, form, strict, ReplacementExpr);
+ const Expr *ReplacementExpr,
+ IdentifierLoc *EnvironmentLoc) {
+ ParsedAttr *attr =
+ pool.create(attrName, attrRange, scopeName, scopeLoc, Param, introduced,
+ deprecated, obsoleted, unavailable, MessageExpr, form,
+ strict, ReplacementExpr, EnvironmentLoc);
addAtEnd(attr);
return attr;
}
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index d4d4a82525a0..ec083f7cc09b 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -39,6 +39,7 @@
#include "clang/Basic/Cuda.h"
#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/ExpressionTraits.h"
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/PragmaKinds.h"
@@ -173,7 +174,10 @@ class SemaHLSL;
class SemaObjC;
class SemaOpenACC;
class SemaOpenMP;
+class SemaPseudoObject;
+class SemaRISCV;
class SemaSYCL;
+class SemaX86;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
@@ -445,6 +449,13 @@ enum class CheckedConversionKind {
ForBuiltinOverloadedOp
};
+enum class TagUseKind {
+ Reference, // Reference to a tag: 'struct foo *X;'
+ Declaration, // Fwd decl of a tag: 'struct foo;'
+ Definition, // Definition of a tag: 'struct foo { int X; } Y;'
+ Friend // Friend declaration: 'friend struct foo;'
+};
+
/// Sema - This implements semantic analysis and AST building for C.
/// \nosubgrouping
class Sema final : public SemaBase {
@@ -470,20 +481,18 @@ class Sema final : public SemaBase {
// 18. Name Lookup (SemaLookup.cpp)
// 19. Modules (SemaModule.cpp)
// 20. C++ Overloading (SemaOverload.cpp)
- // 21. Pseudo-Object (SemaPseudoObject.cpp)
- // 22. Statements (SemaStmt.cpp)
- // 23. `inline asm` Statement (SemaStmtAsm.cpp)
- // 24. Statement Attribute Handling (SemaStmtAttr.cpp)
- // 25. C++ Templates (SemaTemplate.cpp)
- // 26. C++ Template Argument Deduction (SemaTemplateDeduction.cpp)
- // 27. C++ Template Instantiation (SemaTemplateInstantiate.cpp)
- // 28. C++ Template Declaration Instantiation
+ // 21. Statements (SemaStmt.cpp)
+ // 22. `inline asm` Statement (SemaStmtAsm.cpp)
+ // 23. Statement Attribute Handling (SemaStmtAttr.cpp)
+ // 24. C++ Templates (SemaTemplate.cpp)
+ // 25. C++ Template Argument Deduction (SemaTemplateDeduction.cpp)
+ // 26. C++ Template Instantiation (SemaTemplateInstantiate.cpp)
+ // 27. C++ Template Declaration Instantiation
// (SemaTemplateInstantiateDecl.cpp)
- // 29. C++ Variadic Templates (SemaTemplateVariadic.cpp)
- // 30. Constraints and Concepts (SemaConcept.cpp)
- // 31. Types (SemaType.cpp)
- // 32. FixIt Helpers (SemaFixItUtils.cpp)
- // 33. Name Lookup for RISC-V Vector Intrinsic (SemaRISCVVectorLookup.cpp)
+ // 28. C++ Variadic Templates (SemaTemplateVariadic.cpp)
+ // 29. Constraints and Concepts (SemaConcept.cpp)
+ // 30. Types (SemaType.cpp)
+ // 31. FixIt Helpers (SemaFixItUtils.cpp)
/// \name Semantic Analysis
/// Implementations are in Sema.cpp
@@ -1014,11 +1023,26 @@ public:
return *OpenMPPtr;
}
+ SemaPseudoObject &PseudoObject() {
+ assert(PseudoObjectPtr);
+ return *PseudoObjectPtr;
+ }
+
+ SemaRISCV &RISCV() {
+ assert(RISCVPtr);
+ return *RISCVPtr;
+ }
+
SemaSYCL &SYCL() {
assert(SYCLPtr);
return *SYCLPtr;
}
+ SemaX86 &X86() {
+ assert(X86Ptr);
+ return *X86Ptr;
+ }
+
/// Source of additional semantic information.
IntrusiveRefCntPtr<ExternalSemaSource> ExternalSource;
@@ -1055,7 +1079,10 @@ private:
std::unique_ptr<SemaObjC> ObjCPtr;
std::unique_ptr<SemaOpenACC> OpenACCPtr;
std::unique_ptr<SemaOpenMP> OpenMPPtr;
+ std::unique_ptr<SemaPseudoObject> PseudoObjectPtr;
+ std::unique_ptr<SemaRISCV> RISCVPtr;
std::unique_ptr<SemaSYCL> SYCLPtr;
+ std::unique_ptr<SemaX86> X86Ptr;
///@}
@@ -2030,6 +2057,23 @@ public:
void CheckConstrainedAuto(const AutoType *AutoT, SourceLocation Loc);
+ bool BuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result);
+ bool BuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High,
+ bool RangeIsError = true);
+ bool BuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
+ unsigned Multiple);
+ bool BuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
+ bool BuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
+ unsigned ArgBits);
+ bool BuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
+ unsigned ArgBits);
+
+ bool checkArgCountAtLeast(CallExpr *Call, unsigned MinArgCount);
+ bool checkArgCountAtMost(CallExpr *Call, unsigned MaxArgCount);
+ bool checkArgCountRange(CallExpr *Call, unsigned MinArgCount,
+ unsigned MaxArgCount);
+ bool checkArgCount(CallExpr *Call, unsigned DesiredArgCount);
+
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE = nullptr,
@@ -2085,24 +2129,10 @@ private:
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
- ArrayRef<int> ArgNums);
- bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
- bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
- ArrayRef<int> ArgNums);
- bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
- bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
- void checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
- const llvm::StringMap<bool> &FeatureMap);
+
bool CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
@@ -2132,16 +2162,6 @@ private:
ExprResult BuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult AtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
- bool BuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result);
- bool BuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High,
- bool RangeIsError = true);
- bool BuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
- unsigned Multiple);
- bool BuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
- bool BuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
- unsigned ArgBits);
- bool BuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
- unsigned ArgBits);
bool BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum,
unsigned ExpectedFieldNum, bool AllowName);
bool BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
@@ -3161,13 +3181,6 @@ public:
bool isDefinition, SourceLocation NewTagLoc,
const IdentifierInfo *Name);
- enum TagUseKind {
- TUK_Reference, // Reference to a tag: 'struct foo *X;'
- TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
- TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
- TUK_Friend // Friend declaration: 'friend struct foo;'
- };
-
enum OffsetOfKind {
// Not parsing a type within __builtin_offsetof.
OOK_Outside,
@@ -3580,13 +3593,13 @@ public:
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
- AvailabilityAttr *
- mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
- IdentifierInfo *Platform, bool Implicit,
- VersionTuple Introduced, VersionTuple Deprecated,
- VersionTuple Obsoleted, bool IsUnavailable,
- StringRef Message, bool IsStrict, StringRef Replacement,
- AvailabilityMergeKind AMK, int Priority);
+ AvailabilityAttr *mergeAvailabilityAttr(
+ NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform,
+ bool Implicit, VersionTuple Introduced, VersionTuple Deprecated,
+ VersionTuple Obsoleted, bool IsUnavailable, StringRef Message,
+ bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK,
+ int Priority, IdentifierInfo *IIEnvironment);
+
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
@@ -5099,6 +5112,13 @@ public:
Context == ExpressionEvaluationContext::UnevaluatedList;
}
+ bool isPotentiallyEvaluated() const {
+ return Context == ExpressionEvaluationContext::PotentiallyEvaluated ||
+ Context ==
+ ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed ||
+ Context == ExpressionEvaluationContext::ConstantEvaluated;
+ }
+
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated ||
Context == ExpressionEvaluationContext::ImmediateFunctionContext;
@@ -5133,6 +5153,16 @@ public:
return ExprEvalContexts.back();
};
+ ExpressionEvaluationContextRecord &parentEvaluationContext() {
+ assert(ExprEvalContexts.size() >= 2 &&
+ "Must be in an expression evaluation context");
+ return ExprEvalContexts[ExprEvalContexts.size() - 2];
+ };
+
+ const ExpressionEvaluationContextRecord &parentEvaluationContext() const {
+ return const_cast<Sema *>(this)->parentEvaluationContext();
+ };
+
bool isBoundsAttrContext() const {
return ExprEvalContexts.back().ExprContext ==
ExpressionEvaluationContextRecord::ExpressionKind::
@@ -5374,11 +5404,9 @@ public:
bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R,
bool HasTrailingLParen);
- ExprResult
- BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
- const DeclarationNameInfo &NameInfo,
- bool IsAddressOfOperand, const Scope *S,
- TypeSourceInfo **RecoveryTSI = nullptr);
+ ExprResult BuildQualifiedDeclarationNameExpr(
+ CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
+ bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R,
bool NeedsADL,
@@ -5885,7 +5913,6 @@ public:
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
- bool isValidRVVBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
@@ -6266,10 +6293,9 @@ public:
/// flag from previous context.
void keepInLifetimeExtendingContext() {
if (ExprEvalContexts.size() > 2 &&
- ExprEvalContexts[ExprEvalContexts.size() - 2]
- .InLifetimeExtendingContext) {
+ parentEvaluationContext().InLifetimeExtendingContext) {
auto &LastRecord = ExprEvalContexts.back();
- auto &PrevRecord = ExprEvalContexts[ExprEvalContexts.size() - 2];
+ auto &PrevRecord = parentEvaluationContext();
LastRecord.InLifetimeExtendingContext =
PrevRecord.InLifetimeExtendingContext;
}
@@ -6370,6 +6396,8 @@ public:
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
+ void maybeExtendBlockObject(ExprResult &E);
+
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
@@ -7056,7 +7084,9 @@ public:
StorageClass SC, ArrayRef<ParmVarDecl *> Params,
bool HasExplicitResultType);
- void DiagnoseInvalidExplicitObjectParameterInLambda(CXXMethodDecl *Method);
+ /// Returns true if the explicit object parameter was invalid.
+ bool DiagnoseInvalidExplicitObjectParameterInLambda(CXXMethodDecl *Method,
+ SourceLocation CallLoc);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
@@ -8368,29 +8398,6 @@ public:
//
//
- /// \name Pseudo-Object
- /// Implementations are in SemaPseudoObject.cpp
- ///@{
-
-public:
- void maybeExtendBlockObject(ExprResult &E);
-
- ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
- UnaryOperatorKind Opcode, Expr *Op);
- ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
- BinaryOperatorKind Opcode, Expr *LHS,
- Expr *RHS);
- ExprResult checkPseudoObjectRValue(Expr *E);
- Expr *recreateSyntacticForm(PseudoObjectExpr *E);
-
- ///@}
-
- //
- //
- // -------------------------------------------------------------------------
- //
- //
-
/// \name Statements
/// Implementations are in SemaStmt.cpp
///@{
@@ -8990,7 +8997,8 @@ public:
ExprResult
BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *TemplateArgs);
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool IsAddressOfOperand);
TemplateNameKind ActOnTemplateName(Scope *S, CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
@@ -10082,7 +10090,9 @@ public:
bool SubstTemplateArgument(const TemplateArgumentLoc &Input,
const MultiLevelTemplateArgumentList &TemplateArgs,
- TemplateArgumentLoc &Output);
+ TemplateArgumentLoc &Output,
+ SourceLocation Loc = {},
+ const DeclarationName &Entity = {});
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
@@ -11701,27 +11711,6 @@ public:
void ProcessAPINotes(Decl *D);
///@}
- //
- //
- // -------------------------------------------------------------------------
- //
- //
-
- /// \name Name Lookup for RISC-V Vector Intrinsic
- /// Implementations are in SemaRISCVVectorLookup.cpp
- ///@{
-
-public:
- /// Indicate RISC-V vector builtin functions enabled or not.
- bool DeclareRISCVVBuiltins = false;
-
- /// Indicate RISC-V SiFive vector builtin functions enabled or not.
- bool DeclareRISCVSiFiveVectorBuiltins = false;
-
-private:
- std::unique_ptr<sema::RISCVIntrinsicManager> RVIntrinsicManager;
-
- ///@}
};
DeductionFailureInfo
@@ -11743,9 +11732,6 @@ void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
-
-std::unique_ptr<sema::RISCVIntrinsicManager>
-CreateRISCVIntrinsicManager(Sema &S);
} // end namespace clang
#endif
diff --git a/clang/include/clang/Sema/SemaOpenACC.h b/clang/include/clang/Sema/SemaOpenACC.h
index f838fa97d33a..6f69fa08939b 100644
--- a/clang/include/clang/Sema/SemaOpenACC.h
+++ b/clang/include/clang/Sema/SemaOpenACC.h
@@ -66,9 +66,14 @@ public:
struct DeviceTypeDetails {
SmallVector<DeviceTypeArgument> Archs;
};
+ struct ReductionDetails {
+ OpenACCReductionOperator Op;
+ SmallVector<Expr *> VarList;
+ };
std::variant<std::monostate, DefaultDetails, ConditionDetails,
- IntExprDetails, VarListDetails, WaitDetails, DeviceTypeDetails>
+ IntExprDetails, VarListDetails, WaitDetails, DeviceTypeDetails,
+ ReductionDetails>
Details = std::monostate{};
public:
@@ -170,6 +175,10 @@ public:
return const_cast<OpenACCParsedClause *>(this)->getIntExprs();
}
+ OpenACCReductionOperator getReductionOp() const {
+ return std::get<ReductionDetails>(Details).Op;
+ }
+
ArrayRef<Expr *> getVarList() {
assert((ClauseKind == OpenACCClauseKind::Private ||
ClauseKind == OpenACCClauseKind::NoCreate ||
@@ -188,8 +197,13 @@ public:
ClauseKind == OpenACCClauseKind::PresentOrCreate ||
ClauseKind == OpenACCClauseKind::Attach ||
ClauseKind == OpenACCClauseKind::DevicePtr ||
+ ClauseKind == OpenACCClauseKind::Reduction ||
ClauseKind == OpenACCClauseKind::FirstPrivate) &&
"Parsed clause kind does not have a var-list");
+
+ if (ClauseKind == OpenACCClauseKind::Reduction)
+ return std::get<ReductionDetails>(Details).VarList;
+
return std::get<VarListDetails>(Details).VarList;
}
@@ -334,6 +348,13 @@ public:
Details = VarListDetails{std::move(VarList), IsReadOnly, IsZero};
}
+ void setReductionDetails(OpenACCReductionOperator Op,
+ llvm::SmallVector<Expr *> &&VarList) {
+ assert(ClauseKind == OpenACCClauseKind::Reduction &&
+ "reduction details only valid on reduction");
+ Details = ReductionDetails{Op, std::move(VarList)};
+ }
+
void setWaitDetails(Expr *DevNum, SourceLocation QueuesLoc,
llvm::SmallVector<Expr *> &&IntExprs) {
assert(ClauseKind == OpenACCClauseKind::Wait &&
@@ -394,7 +415,11 @@ public:
/// Called when encountering a 'var' for OpenACC, ensures it is actually a
/// declaration reference to a variable of the correct type.
- ExprResult ActOnVar(Expr *VarExpr);
+ ExprResult ActOnVar(OpenACCClauseKind CK, Expr *VarExpr);
+
+ /// Called while semantically analyzing the reduction clause, ensuring the var
+ /// is the correct kind of reference.
+ ExprResult CheckReductionVar(Expr *VarExpr);
/// Called to check the 'var' type is a variable of pointer type, necessary
/// for 'deviceptr' and 'attach' clauses. Returns true on success.
diff --git a/clang/include/clang/Sema/SemaOpenMP.h b/clang/include/clang/Sema/SemaOpenMP.h
index 9927459bbc59..51981e1c9a8b 100644
--- a/clang/include/clang/Sema/SemaOpenMP.h
+++ b/clang/include/clang/Sema/SemaOpenMP.h
@@ -1390,9 +1390,7 @@ private:
bool checkTransformableLoopNest(
OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
- Stmt *&Body,
- SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
- &OriginalInits);
+ Stmt *&Body, SmallVectorImpl<SmallVector<Stmt *, 0>> &OriginalInits);
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
diff --git a/clang/include/clang/Sema/SemaPseudoObject.h b/clang/include/clang/Sema/SemaPseudoObject.h
new file mode 100644
index 000000000000..22d8be2b3726
--- /dev/null
+++ b/clang/include/clang/Sema/SemaPseudoObject.h
@@ -0,0 +1,40 @@
+//===----- SemaPseudoObject.h --- Semantic Analysis for Pseudo-Objects ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis for expressions involving
+// pseudo-object references.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMAPSEUDOOBJECT_H
+#define LLVM_CLANG_SEMA_SEMAPSEUDOOBJECT_H
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Sema/Ownership.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+
+class SemaPseudoObject : public SemaBase {
+public:
+ SemaPseudoObject(Sema &S);
+
+ ExprResult checkIncDec(Scope *S, SourceLocation OpLoc,
+ UnaryOperatorKind Opcode, Expr *Op);
+ ExprResult checkAssignment(Scope *S, SourceLocation OpLoc,
+ BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS);
+ ExprResult checkRValue(Expr *E);
+ Expr *recreateSyntacticForm(PseudoObjectExpr *E);
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMAPSEUDOOBJECT_H \ No newline at end of file
diff --git a/clang/include/clang/Sema/SemaRISCV.h b/clang/include/clang/Sema/SemaRISCV.h
new file mode 100644
index 000000000000..b6dd81f8d4d8
--- /dev/null
+++ b/clang/include/clang/Sema/SemaRISCV.h
@@ -0,0 +1,52 @@
+//===----- SemaRISCV.h ---- RISC-V target-specific routines ---*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to RISC-V.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMARISCV_H
+#define LLVM_CLANG_SEMA_SEMARISCV_H
+
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/RISCVIntrinsicManager.h"
+#include "clang/Sema/SemaBase.h"
+#include "llvm/ADT/StringMap.h"
+#include <memory>
+
+namespace clang {
+class SemaRISCV : public SemaBase {
+public:
+ SemaRISCV(Sema &S);
+
+ bool CheckLMUL(CallExpr *TheCall, unsigned ArgNum);
+ bool CheckBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ void checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
+ const llvm::StringMap<bool> &FeatureMap);
+
+ bool isValidRVVBitcast(QualType srcType, QualType destType);
+
+ /// Indicate RISC-V vector builtin functions enabled or not.
+ bool DeclareRVVBuiltins = false;
+
+ /// Indicate RISC-V SiFive vector builtin functions enabled or not.
+ bool DeclareSiFiveVectorBuiltins = false;
+
+ std::unique_ptr<sema::RISCVIntrinsicManager> IntrinsicManager;
+};
+
+std::unique_ptr<sema::RISCVIntrinsicManager>
+CreateRISCVIntrinsicManager(Sema &S);
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMARISCV_H
diff --git a/clang/include/clang/Sema/SemaX86.h b/clang/include/clang/Sema/SemaX86.h
new file mode 100644
index 000000000000..e322483294ec
--- /dev/null
+++ b/clang/include/clang/Sema/SemaX86.h
@@ -0,0 +1,38 @@
+//===----- SemaX86.h ------- X86 target-specific routines -----*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to X86.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMAX86_H
+#define LLVM_CLANG_SEMA_SEMAX86_H
+
+#include "clang/AST/Expr.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+class SemaX86 : public SemaBase {
+public:
+ SemaX86(Sema &S);
+
+ bool CheckBuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckBuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckBuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckBuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums);
+ bool CheckBuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
+ bool CheckBuiltinTileRangeAndDuplicate(CallExpr *TheCall,
+ ArrayRef<int> ArgNums);
+ bool CheckBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMAX86_H
diff --git a/clang/include/clang/Serialization/ASTReader.h b/clang/include/clang/Serialization/ASTReader.h
index 1bb5fa27a241..4ece4593f073 100644
--- a/clang/include/clang/Serialization/ASTReader.h
+++ b/clang/include/clang/Serialization/ASTReader.h
@@ -601,11 +601,11 @@ private:
/// An array of lexical contents of a declaration context, as a sequence of
/// Decl::Kind, DeclID pairs.
- using unalighed_decl_id_t =
+ using unaligned_decl_id_t =
llvm::support::detail::packed_endian_specific_integral<
serialization::DeclID, llvm::endianness::native,
llvm::support::unaligned>;
- using LexicalContents = ArrayRef<unalighed_decl_id_t>;
+ using LexicalContents = ArrayRef<unaligned_decl_id_t>;
/// Map from a DeclContext to its lexical contents.
llvm::DenseMap<const DeclContext*, std::pair<ModuleFile*, LexicalContents>>
@@ -2246,7 +2246,7 @@ public:
auto [Loc, ModuleFileIndex] = ReadUntranslatedSourceLocation(Raw, Seq);
ModuleFile *OwningModuleFile =
- ModuleFileIndex == 0 ? &MF : MF.DependentModules[ModuleFileIndex - 1];
+ ModuleFileIndex == 0 ? &MF : MF.TransitiveImports[ModuleFileIndex - 1];
assert(!SourceMgr.isLoadedSourceLocation(Loc) &&
"Run out source location space");
diff --git a/clang/include/clang/Serialization/ASTWriter.h b/clang/include/clang/Serialization/ASTWriter.h
index 6aa2796a41e0..88192e439a3f 100644
--- a/clang/include/clang/Serialization/ASTWriter.h
+++ b/clang/include/clang/Serialization/ASTWriter.h
@@ -715,9 +715,6 @@ public:
/// Force a type to be emitted and get its ID.
serialization::TypeID GetOrCreateTypeID(QualType T);
- /// Determine the type ID of an already-emitted type.
- serialization::TypeID getTypeID(QualType T) const;
-
/// Find the first local declaration of a given local redeclarable
/// decl.
const Decl *getFirstLocalDecl(const Decl *D);
diff --git a/clang/include/clang/Serialization/ModuleFile.h b/clang/include/clang/Serialization/ModuleFile.h
index 7d8cbe3d40f5..992d26a8b88c 100644
--- a/clang/include/clang/Serialization/ModuleFile.h
+++ b/clang/include/clang/Serialization/ModuleFile.h
@@ -513,11 +513,11 @@ public:
/// List of modules which this modules dependent on. Different
/// from `Imports`, this includes indirectly imported modules too.
- /// The order of DependentModules is significant. It should keep
+ /// The order of TransitiveImports is significant. It should keep
/// the same order with that module file manager when we write
/// the current module file. The value of the member will be initialized
/// in `ASTReader::ReadModuleOffsetMap`.
- llvm::SmallVector<ModuleFile *, 16> DependentModules;
+ llvm::SmallVector<ModuleFile *, 16> TransitiveImports;
/// Determine whether this module was directly imported at
/// any point during translation.
diff --git a/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td b/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
index 64414e3d37f7..40f443047bd4 100644
--- a/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
+++ b/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
@@ -1011,6 +1011,11 @@ def FloatLoopCounter : Checker<"FloatLoopCounter">,
Dependencies<[SecuritySyntaxChecker]>,
Documentation<HasDocumentation>;
+def SetgidSetuidOrderChecker : Checker<"SetgidSetuidOrder">,
+ HelpText<"Warn on possible reversed order of 'setgid(getgid()))' and "
+ "'setuid(getuid())' (CERT: POS36-C)">,
+ Documentation<HasDocumentation>;
+
} // end "security"
let ParentPackage = ENV in {
@@ -1030,15 +1035,6 @@ let ParentPackage = ENV in {
} // end "security.cert.env"
-let ParentPackage = POSAlpha in {
-
- def PutenvWithAuto : Checker<"34c">,
- HelpText<"Finds calls to the 'putenv' function which pass a pointer to "
- "an automatic variable as the argument.">,
- Documentation<HasDocumentation>;
-
-} // end "alpha.cert.pos"
-
let ParentPackage = SecurityAlpha in {
def ArrayBoundChecker : Checker<"ArrayBound">,
@@ -1049,10 +1045,6 @@ def ArrayBoundCheckerV2 : Checker<"ArrayBoundV2">,
HelpText<"Warn about buffer overflows (newer checker)">,
Documentation<HasDocumentation>;
-def ReturnPointerRangeChecker : Checker<"ReturnPtrRange">,
- HelpText<"Check for an out-of-bound pointer being returned to callers">,
- Documentation<HasDocumentation>;
-
def MallocOverflowSecurityChecker : Checker<"MallocOverflow">,
HelpText<"Check for overflows in the arguments to malloc()">,
Documentation<HasDocumentation>;
@@ -1073,6 +1065,15 @@ def MmapWriteExecChecker : Checker<"MmapWriteExec">,
]>,
Documentation<HasDocumentation>;
+def PutenvStackArray : Checker<"PutenvStackArray">,
+ HelpText<"Finds calls to the function 'putenv' which pass a pointer to "
+ "an automatic (stack-allocated) array as the argument.">,
+ Documentation<HasDocumentation>;
+
+def ReturnPointerRangeChecker : Checker<"ReturnPtrRange">,
+ HelpText<"Check for an out-of-bound pointer being returned to callers">,
+ Documentation<HasDocumentation>;
+
} // end "alpha.security"
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/ARCMigrate/ARCMT.cpp b/clang/lib/ARCMigrate/ARCMT.cpp
index b410d5f3b42a..5835559bff6b 100644
--- a/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/clang/lib/ARCMigrate/ARCMT.cpp
@@ -606,8 +606,7 @@ bool MigrationProcess::applyTransform(TransformFn trans,
llvm::raw_svector_ostream vecOS(newText);
buf.write(vecOS);
std::unique_ptr<llvm::MemoryBuffer> memBuf(
- llvm::MemoryBuffer::getMemBufferCopy(
- StringRef(newText.data(), newText.size()), newFname));
+ llvm::MemoryBuffer::getMemBufferCopy(newText.str(), newFname));
SmallString<64> filePath(file->getName());
Unit->getFileManager().FixupRelativePath(filePath);
Remapper.remap(filePath.str(), std::move(memBuf));
diff --git a/clang/lib/ARCMigrate/ObjCMT.cpp b/clang/lib/ARCMigrate/ObjCMT.cpp
index aaf41dc4039c..4357c8e3f09a 100644
--- a/clang/lib/ARCMigrate/ObjCMT.cpp
+++ b/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -1963,8 +1963,7 @@ void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
llvm::raw_svector_ostream vecOS(newText);
buf.write(vecOS);
std::unique_ptr<llvm::MemoryBuffer> memBuf(
- llvm::MemoryBuffer::getMemBufferCopy(
- StringRef(newText.data(), newText.size()), file->getName()));
+ llvm::MemoryBuffer::getMemBufferCopy(newText.str(), file->getName()));
SmallString<64> filePath(file->getName());
FileMgr.FixupRelativePath(filePath);
Remapper.remap(filePath.str(), std::move(memBuf));
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 8fc2bb8c401c..a2398fef623e 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -3054,21 +3054,27 @@ QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
if (!T.hasAddressSpace())
return T;
- // If we are composing extended qualifiers together, merge together
- // into one ExtQuals node.
QualifierCollector Quals;
const Type *TypeNode;
+ // For arrays, strip the qualifier off the element type, then reconstruct the
+ // array type
+ if (T.getTypePtr()->isArrayType()) {
+ T = getUnqualifiedArrayType(T, Quals);
+ TypeNode = T.getTypePtr();
+ } else {
+ // If we are composing extended qualifiers together, merge together
+ // into one ExtQuals node.
+ while (T.hasAddressSpace()) {
+ TypeNode = Quals.strip(T);
+
+ // If the type no longer has an address space after stripping qualifiers,
+ // jump out.
+ if (!QualType(TypeNode, 0).hasAddressSpace())
+ break;
- while (T.hasAddressSpace()) {
- TypeNode = Quals.strip(T);
-
- // If the type no longer has an address space after stripping qualifiers,
- // jump out.
- if (!QualType(TypeNode, 0).hasAddressSpace())
- break;
-
- // There might be sugar in the way. Strip it and try again.
- T = T.getSingleStepDesugaredType(*this);
+ // There might be sugar in the way. Strip it and try again.
+ T = T.getSingleStepDesugaredType(*this);
+ }
}
Quals.removeAddressSpace();
@@ -6093,7 +6099,7 @@ CanQualType ASTContext::getCanonicalParamType(QualType T) const {
}
QualType ASTContext::getUnqualifiedArrayType(QualType type,
- Qualifiers &quals) {
+ Qualifiers &quals) const {
SplitQualType splitType = type.getSplitUnqualifiedType();
// FIXME: getSplitUnqualifiedType() actually walks all the way to
@@ -6488,7 +6494,8 @@ bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
return false;
- return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument());
+ return hasSameType(TTPX->getDefaultArgument().getArgument().getAsType(),
+ TTPY->getDefaultArgument().getArgument().getAsType());
}
if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
@@ -6496,8 +6503,10 @@ bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument())
return false;
- Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts();
- Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts();
+ Expr *DefaultArgumentX =
+ NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
+ Expr *DefaultArgumentY =
+ NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
llvm::FoldingSetNodeID XID, YID;
DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true);
DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true);
diff --git a/clang/lib/AST/ASTDiagnostic.cpp b/clang/lib/AST/ASTDiagnostic.cpp
index 7b0d5f9cc1a9..0680ff5e3a38 100644
--- a/clang/lib/AST/ASTDiagnostic.cpp
+++ b/clang/lib/AST/ASTDiagnostic.cpp
@@ -1215,46 +1215,19 @@ class TemplateDiff {
bool &NeedAddressOf) {
if (!Iter.isEnd()) {
switch (Iter->getKind()) {
- default:
- llvm_unreachable("unknown ArgumentKind");
- case TemplateArgument::Integral:
- Value = Iter->getAsIntegral();
- HasInt = true;
- IntType = Iter->getIntegralType();
- return;
- case TemplateArgument::Declaration: {
- VD = Iter->getAsDecl();
- QualType ArgType = Iter->getParamTypeForDecl();
- QualType VDType = VD->getType();
- if (ArgType->isPointerType() &&
- Context.hasSameType(ArgType->getPointeeType(), VDType))
- NeedAddressOf = true;
- return;
- }
- case TemplateArgument::NullPtr:
- IsNullPtr = true;
- return;
- case TemplateArgument::Expression:
- E = Iter->getAsExpr();
- }
- } else if (!Default->isParameterPack()) {
- E = Default->getDefaultArgument();
- }
-
- if (!Iter.hasDesugaredTA()) return;
-
- const TemplateArgument& TA = Iter.getDesugaredTA();
- switch (TA.getKind()) {
- default:
- llvm_unreachable("unknown ArgumentKind");
+ case TemplateArgument::StructuralValue:
+ // FIXME: Diffing of structural values is not implemented.
+ // There is no possible fallback in this case, this will show up
+ // as '(no argument)'.
+ return;
case TemplateArgument::Integral:
- Value = TA.getAsIntegral();
+ Value = Iter->getAsIntegral();
HasInt = true;
- IntType = TA.getIntegralType();
+ IntType = Iter->getIntegralType();
return;
case TemplateArgument::Declaration: {
- VD = TA.getAsDecl();
- QualType ArgType = TA.getParamTypeForDecl();
+ VD = Iter->getAsDecl();
+ QualType ArgType = Iter->getParamTypeForDecl();
QualType VDType = VD->getType();
if (ArgType->isPointerType() &&
Context.hasSameType(ArgType->getPointeeType(), VDType))
@@ -1265,13 +1238,62 @@ class TemplateDiff {
IsNullPtr = true;
return;
case TemplateArgument::Expression:
- // TODO: Sometimes, the desugared template argument Expr differs from
- // the sugared template argument Expr. It may be useful in the future
- // but for now, it is just discarded.
- if (!E)
- E = TA.getAsExpr();
- return;
+ E = Iter->getAsExpr();
+ break;
+ case TemplateArgument::Null:
+ case TemplateArgument::Type:
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("TemplateArgument kind is not expected for NTTP");
+ case TemplateArgument::Pack:
+ llvm_unreachable("TemplateArgument kind should be handled elsewhere");
+ }
+ } else if (!Default->isParameterPack()) {
+ E = Default->getDefaultArgument().getArgument().getAsExpr();
}
+
+ if (!Iter.hasDesugaredTA())
+ return;
+
+ const TemplateArgument &TA = Iter.getDesugaredTA();
+ switch (TA.getKind()) {
+ case TemplateArgument::StructuralValue:
+ // FIXME: Diffing of structural values is not implemented.
+ // Just fall back to the expression.
+ return;
+ case TemplateArgument::Integral:
+ Value = TA.getAsIntegral();
+ HasInt = true;
+ IntType = TA.getIntegralType();
+ return;
+ case TemplateArgument::Declaration: {
+ VD = TA.getAsDecl();
+ QualType ArgType = TA.getParamTypeForDecl();
+ QualType VDType = VD->getType();
+ if (ArgType->isPointerType() &&
+ Context.hasSameType(ArgType->getPointeeType(), VDType))
+ NeedAddressOf = true;
+ return;
+ }
+ case TemplateArgument::NullPtr:
+ IsNullPtr = true;
+ return;
+ case TemplateArgument::Expression:
+ // TODO: Sometimes, the desugared template argument Expr differs from
+ // the sugared template argument Expr. It may be useful in the future
+ // but for now, it is just discarded.
+ if (!E)
+ E = TA.getAsExpr();
+ return;
+ case TemplateArgument::Null:
+ case TemplateArgument::Type:
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("TemplateArgument kind is not expected for NTTP");
+ case TemplateArgument::Pack:
+ llvm_unreachable("TemplateArgument kind should be handled elsewhere");
+ }
+ llvm_unreachable("Unexpected TemplateArgument kind");
}
/// DiffNonTypes - Handles any template parameters not handled by DiffTypes
@@ -1914,6 +1936,11 @@ class TemplateDiff {
return;
}
+ if (E) {
+ PrintExpr(E);
+ return;
+ }
+
OS << "(no argument)";
}
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index 9ff8e1ea78d8..cab5ee604795 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -5917,11 +5917,11 @@ ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
}
if (D->hasDefaultArgument()) {
- Expected<TypeSourceInfo *> ToDefaultArgOrErr =
- import(D->getDefaultArgumentInfo());
+ Expected<TemplateArgumentLoc> ToDefaultArgOrErr =
+ import(D->getDefaultArgument());
if (!ToDefaultArgOrErr)
return ToDefaultArgOrErr.takeError();
- ToD->setDefaultArgument(*ToDefaultArgOrErr);
+ ToD->setDefaultArgument(ToD->getASTContext(), *ToDefaultArgOrErr);
}
return ToD;
@@ -5949,10 +5949,11 @@ ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
return ToD;
if (D->hasDefaultArgument()) {
- ExpectedExpr ToDefaultArgOrErr = import(D->getDefaultArgument());
+ Expected<TemplateArgumentLoc> ToDefaultArgOrErr =
+ import(D->getDefaultArgument());
if (!ToDefaultArgOrErr)
return ToDefaultArgOrErr.takeError();
- ToD->setDefaultArgument(*ToDefaultArgOrErr);
+ ToD->setDefaultArgument(Importer.getToContext(), *ToDefaultArgOrErr);
}
return ToD;
diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp
index 03e1055251c2..65d5eeb6354e 100644
--- a/clang/lib/AST/DeclBase.cpp
+++ b/clang/lib/AST/DeclBase.cpp
@@ -666,12 +666,28 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
// Make sure that this declaration has already been introduced.
if (!A->getIntroduced().empty() &&
EnclosingVersion < A->getIntroduced()) {
- if (Message) {
- Message->clear();
- llvm::raw_string_ostream Out(*Message);
- VersionTuple VTI(A->getIntroduced());
- Out << "introduced in " << PrettyPlatformName << ' '
- << VTI << HintMessage;
+ IdentifierInfo *IIEnv = A->getEnvironment();
+ StringRef TargetEnv =
+ Context.getTargetInfo().getTriple().getEnvironmentName();
+ StringRef EnvName = AvailabilityAttr::getPrettyEnviromentName(TargetEnv);
+ // Matching environment or no environment on attribute
+ if (!IIEnv || (!TargetEnv.empty() && IIEnv->getName() == TargetEnv)) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ VersionTuple VTI(A->getIntroduced());
+ Out << "introduced in " << PrettyPlatformName << " " << VTI << " "
+ << EnvName << HintMessage;
+ }
+ }
+ // Non-matching environment or no environment on target
+ else {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ Out << "not available on " << PrettyPlatformName << " " << EnvName
+ << HintMessage;
+ }
}
return A->getStrict() ? AR_Unavailable : AR_NotYetIntroduced;
diff --git a/clang/lib/AST/DeclPrinter.cpp b/clang/lib/AST/DeclPrinter.cpp
index c5868256b440..0cf4e64f83b8 100644
--- a/clang/lib/AST/DeclPrinter.cpp
+++ b/clang/lib/AST/DeclPrinter.cpp
@@ -1883,7 +1883,8 @@ void DeclPrinter::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP) {
if (TTP->hasDefaultArgument()) {
Out << " = ";
- Out << TTP->getDefaultArgument().getAsString(Policy);
+ TTP->getDefaultArgument().getArgument().print(Policy, Out,
+ /*IncludeType=*/false);
}
}
@@ -1897,7 +1898,7 @@ void DeclPrinter::VisitNonTypeTemplateParmDecl(
if (NTTP->hasDefaultArgument()) {
Out << " = ";
- NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy, Indentation,
- "\n", &Context);
+ NTTP->getDefaultArgument().getArgument().print(Policy, Out,
+ /*IncludeType=*/false);
}
}
diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp
index 26765a5da1dc..95ffd4784641 100644
--- a/clang/lib/AST/DeclTemplate.cpp
+++ b/clang/lib/AST/DeclTemplate.cpp
@@ -669,23 +669,30 @@ TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, GlobalDeclID ID,
}
SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
- return hasDefaultArgument()
- ? getDefaultArgumentInfo()->getTypeLoc().getBeginLoc()
- : SourceLocation();
+ return hasDefaultArgument() ? getDefaultArgument().getLocation()
+ : SourceLocation();
}
SourceRange TemplateTypeParmDecl::getSourceRange() const {
if (hasDefaultArgument() && !defaultArgumentWasInherited())
return SourceRange(getBeginLoc(),
- getDefaultArgumentInfo()->getTypeLoc().getEndLoc());
+ getDefaultArgument().getSourceRange().getEnd());
// TypeDecl::getSourceRange returns a range containing name location, which is
// wrong for unnamed template parameters. e.g:
// it will return <[[typename>]] instead of <[[typename]]>
- else if (getDeclName().isEmpty())
+ if (getDeclName().isEmpty())
return SourceRange(getBeginLoc());
return TypeDecl::getSourceRange();
}
+void TemplateTypeParmDecl::setDefaultArgument(
+ const ASTContext &C, const TemplateArgumentLoc &DefArg) {
+ if (DefArg.getArgument().isNull())
+ DefaultArgument.set(nullptr);
+ else
+ DefaultArgument.set(new (C) TemplateArgumentLoc(DefArg));
+}
+
unsigned TemplateTypeParmDecl::getDepth() const {
return getTypeForDecl()->castAs<TemplateTypeParmType>()->getDepth();
}
@@ -788,14 +795,21 @@ NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID,
SourceRange NonTypeTemplateParmDecl::getSourceRange() const {
if (hasDefaultArgument() && !defaultArgumentWasInherited())
return SourceRange(getOuterLocStart(),
- getDefaultArgument()->getSourceRange().getEnd());
+ getDefaultArgument().getSourceRange().getEnd());
return DeclaratorDecl::getSourceRange();
}
SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
- return hasDefaultArgument()
- ? getDefaultArgument()->getSourceRange().getBegin()
- : SourceLocation();
+ return hasDefaultArgument() ? getDefaultArgument().getSourceRange().getBegin()
+ : SourceLocation();
+}
+
+void NonTypeTemplateParmDecl::setDefaultArgument(
+ const ASTContext &C, const TemplateArgumentLoc &DefArg) {
+ if (DefArg.getArgument().isNull())
+ DefaultArgument.set(nullptr);
+ else
+ DefaultArgument.set(new (C) TemplateArgumentLoc(DefArg));
}
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp
index 7e9343271ac3..2abc0acbfde3 100644
--- a/clang/lib/AST/ExprCXX.cpp
+++ b/clang/lib/AST/ExprCXX.cpp
@@ -1665,12 +1665,10 @@ NonTypeTemplateParmDecl *SubstNonTypeTemplateParmExpr::getParameter() const {
getReplacedTemplateParameterList(getAssociatedDecl())->asArray()[Index]);
}
-PackIndexingExpr *PackIndexingExpr::Create(ASTContext &Context,
- SourceLocation EllipsisLoc,
- SourceLocation RSquareLoc,
- Expr *PackIdExpr, Expr *IndexExpr,
- std::optional<int64_t> Index,
- ArrayRef<Expr *> SubstitutedExprs) {
+PackIndexingExpr *PackIndexingExpr::Create(
+ ASTContext &Context, SourceLocation EllipsisLoc, SourceLocation RSquareLoc,
+ Expr *PackIdExpr, Expr *IndexExpr, std::optional<int64_t> Index,
+ ArrayRef<Expr *> SubstitutedExprs, bool ExpandedToEmptyPack) {
QualType Type;
if (Index && !SubstitutedExprs.empty())
Type = SubstitutedExprs[*Index]->getType();
@@ -1679,8 +1677,9 @@ PackIndexingExpr *PackIndexingExpr::Create(ASTContext &Context,
void *Storage =
Context.Allocate(totalSizeToAlloc<Expr *>(SubstitutedExprs.size()));
- return new (Storage) PackIndexingExpr(
- Type, EllipsisLoc, RSquareLoc, PackIdExpr, IndexExpr, SubstitutedExprs);
+ return new (Storage)
+ PackIndexingExpr(Type, EllipsisLoc, RSquareLoc, PackIdExpr, IndexExpr,
+ SubstitutedExprs, ExpandedToEmptyPack);
}
NamedDecl *PackIndexingExpr::getPackDecl() const {
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
index 70328c1f52af..3eb7e7544df7 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
@@ -1050,34 +1050,85 @@ bool ByteCodeExprGen<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
if (T->isRecordType()) {
const Record *R = getRecord(E->getType());
- if (Inits.size() == 1 && E->getType() == Inits[0]->getType()) {
+ if (Inits.size() == 1 && E->getType() == Inits[0]->getType())
return this->visitInitializer(Inits[0]);
+
+ auto initPrimitiveField = [=](const Record::Field *FieldToInit,
+ const Expr *Init, PrimType T) -> bool {
+ if (!this->visit(Init))
+ return false;
+
+ if (FieldToInit->isBitField()) {
+ if (!this->emitInitBitField(T, FieldToInit, E))
+ return false;
+ } else {
+ if (!this->emitInitField(T, FieldToInit->Offset, E))
+ return false;
+ }
+ return this->emitPopPtr(E);
+ };
+
+ auto initCompositeField = [=](const Record::Field *FieldToInit,
+ const Expr *Init) -> bool {
+ // Non-primitive case. Get a pointer to the field-to-initialize
+ // on the stack and recurse into visitInitializer().
+ if (!this->emitGetPtrField(FieldToInit->Offset, Init))
+ return false;
+ if (!this->visitInitializer(Init))
+ return false;
+ return this->emitPopPtr(E);
+ };
+
+ if (R->isUnion()) {
+ if (Inits.size() == 0) {
+ // Zero-initialize the first union field.
+ if (R->getNumFields() == 0)
+ return this->emitFinishInit(E);
+ const Record::Field *FieldToInit = R->getField(0u);
+ QualType FieldType = FieldToInit->Desc->getType();
+ if (std::optional<PrimType> T = classify(FieldType)) {
+ if (!this->visitZeroInitializer(*T, FieldType, E))
+ return false;
+ if (!this->emitInitField(*T, FieldToInit->Offset, E))
+ return false;
+ }
+ // FIXME: Non-primitive case?
+ } else {
+ const Expr *Init = Inits[0];
+ const FieldDecl *FToInit = nullptr;
+ if (const auto *ILE = dyn_cast<InitListExpr>(E))
+ FToInit = ILE->getInitializedFieldInUnion();
+ else
+ FToInit = cast<CXXParenListInitExpr>(E)->getInitializedFieldInUnion();
+
+ if (!this->emitDupPtr(E))
+ return false;
+
+ const Record::Field *FieldToInit = R->getField(FToInit);
+ if (std::optional<PrimType> T = classify(Init)) {
+ if (!initPrimitiveField(FieldToInit, Init, *T))
+ return false;
+ } else {
+ if (!initCompositeField(FieldToInit, Init))
+ return false;
+ }
+ }
+ return this->emitFinishInit(E);
}
+ assert(!R->isUnion());
unsigned InitIndex = 0;
for (const Expr *Init : Inits) {
// Skip unnamed bitfields.
while (InitIndex < R->getNumFields() &&
R->getField(InitIndex)->Decl->isUnnamedBitField())
++InitIndex;
-
if (!this->emitDupPtr(E))
return false;
if (std::optional<PrimType> T = classify(Init)) {
const Record::Field *FieldToInit = R->getField(InitIndex);
- if (!this->visit(Init))
- return false;
-
- if (FieldToInit->isBitField()) {
- if (!this->emitInitBitField(*T, FieldToInit, E))
- return false;
- } else {
- if (!this->emitInitField(*T, FieldToInit->Offset, E))
- return false;
- }
-
- if (!this->emitPopPtr(E))
+ if (!initPrimitiveField(FieldToInit, Init, *T))
return false;
++InitIndex;
} else {
@@ -1095,21 +1146,13 @@ bool ByteCodeExprGen<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
// into the Record's fields.
} else {
const Record::Field *FieldToInit = R->getField(InitIndex);
- // Non-primitive case. Get a pointer to the field-to-initialize
- // on the stack and recurse into visitInitializer().
- if (!this->emitGetPtrField(FieldToInit->Offset, Init))
- return false;
-
- if (!this->visitInitializer(Init))
- return false;
-
- if (!this->emitPopPtr(E))
+ if (!initCompositeField(FieldToInit, Init))
return false;
++InitIndex;
}
}
}
- return true;
+ return this->emitFinishInit(E);
}
if (T->isArrayType()) {
@@ -1133,7 +1176,7 @@ bool ByteCodeExprGen<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
}
}
- return true;
+ return this->emitFinishInit(E);
}
if (const auto *ComplexTy = E->getType()->getAs<ComplexType>()) {
@@ -1595,6 +1638,36 @@ bool ByteCodeExprGen<Emitter>::VisitStringLiteral(const StringLiteral *E) {
}
template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitObjCStringLiteral(
+ const ObjCStringLiteral *E) {
+ return this->delegate(E->getString());
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitSYCLUniqueStableNameExpr(
+ const SYCLUniqueStableNameExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ assert(!Initializing);
+
+ auto &A = Ctx.getASTContext();
+ std::string ResultStr = E->ComputeName(A);
+
+ QualType CharTy = A.CharTy.withConst();
+ APInt Size(A.getTypeSize(A.getSizeType()), ResultStr.size() + 1);
+ QualType ArrayTy = A.getConstantArrayType(CharTy, Size, nullptr,
+ ArraySizeModifier::Normal, 0);
+
+ StringLiteral *SL =
+ StringLiteral::Create(A, ResultStr, StringLiteralKind::Ordinary,
+ /*Pascal=*/false, ArrayTy, E->getLocation());
+
+ unsigned StringIndex = P.createGlobalString(SL);
+ return this->emitGetPtrGlobal(StringIndex, E);
+}
+
+template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitCharacterLiteral(
const CharacterLiteral *E) {
if (DiscardResult)
@@ -2088,6 +2161,21 @@ bool ByteCodeExprGen<Emitter>::VisitCXXConstructExpr(
if (T->isRecordType()) {
const CXXConstructorDecl *Ctor = E->getConstructor();
+ // If we're discarding a construct expression, we still need
+ // to allocate a variable and call the constructor and destructor.
+ if (DiscardResult) {
+ if (Ctor->isTrivial())
+ return true;
+ assert(!Initializing);
+ std::optional<unsigned> LocalIndex = allocateLocal(E);
+
+ if (!LocalIndex)
+ return false;
+
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+
// Zero initialization.
if (E->requiresZeroInitialization()) {
const Record *R = getRecord(E->getType());
@@ -2108,19 +2196,6 @@ bool ByteCodeExprGen<Emitter>::VisitCXXConstructExpr(
assert(Func->hasThisPointer());
assert(!Func->hasRVO());
- // If we're discarding a construct expression, we still need
- // to allocate a variable and call the constructor and destructor.
- if (DiscardResult) {
- assert(!Initializing);
- std::optional<unsigned> LocalIndex = allocateLocal(E);
-
- if (!LocalIndex)
- return false;
-
- if (!this->emitGetPtrLocal(*LocalIndex, E))
- return false;
- }
-
// The This pointer is already on the stack because this is an initializer,
// but we need to dup() so the call() below has its own copy.
if (!this->emitDupPtr(E))
@@ -2538,8 +2613,6 @@ bool ByteCodeExprGen<Emitter>::VisitShuffleVectorExpr(
assert(E->getNumSubExprs() > 2);
const Expr *Vecs[] = {E->getExpr(0), E->getExpr(1)};
- assert(Vecs[0]->getType() == Vecs[1]->getType());
-
const VectorType *VT = Vecs[0]->getType()->castAs<VectorType>();
PrimType ElemT = classifyPrim(VT->getElementType());
unsigned NumInputElems = VT->getNumElements();
@@ -2576,6 +2649,14 @@ bool ByteCodeExprGen<Emitter>::VisitShuffleVectorExpr(
return true;
}
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
+ if (!E->isExpressibleAsConstantInitializer())
+ return this->emitInvalid(E);
+
+ return this->delegate(E->getSubExpr());
+}
+
template <class Emitter> bool ByteCodeExprGen<Emitter>::discard(const Expr *E) {
OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/true,
/*NewInitializing=*/false);
@@ -2591,6 +2672,9 @@ bool ByteCodeExprGen<Emitter>::delegate(const Expr *E) {
}
template <class Emitter> bool ByteCodeExprGen<Emitter>::visit(const Expr *E) {
+ if (E->getType().isNull())
+ return false;
+
if (E->getType()->isVoidType())
return this->discard(E);
@@ -3257,7 +3341,8 @@ bool ByteCodeExprGen<Emitter>::VisitCallExpr(const CallExpr *E) {
// write the result into.
if (IsVirtual && !HasQualifier) {
uint32_t VarArgSize = 0;
- unsigned NumParams = Func->getNumWrittenParams();
+ unsigned NumParams =
+ Func->getNumWrittenParams() + isa<CXXOperatorCallExpr>(E);
for (unsigned I = NumParams, N = E->getNumArgs(); I != N; ++I)
VarArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr)));
@@ -3371,6 +3456,9 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
switch (E->getOpcode()) {
case UO_PostInc: { // x++
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+
if (!this->visit(SubExpr))
return false;
@@ -3389,6 +3477,9 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return DiscardResult ? this->emitIncPop(*T, E) : this->emitInc(*T, E);
}
case UO_PostDec: { // x--
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+
if (!this->visit(SubExpr))
return false;
@@ -3407,6 +3498,9 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return DiscardResult ? this->emitDecPop(*T, E) : this->emitDec(*T, E);
}
case UO_PreInc: { // ++x
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+
if (!this->visit(SubExpr))
return false;
@@ -3451,6 +3545,9 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return E->isGLValue() || this->emitLoadPop(*T, E);
}
case UO_PreDec: { // --x
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+
if (!this->visit(SubExpr))
return false;
@@ -3679,13 +3776,13 @@ bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
return this->emitGetPtrLocal(Offset, E);
} else if (auto GlobalIndex = P.getGlobal(D)) {
if (IsReference)
- return this->emitGetGlobalPtr(*GlobalIndex, E);
+ return this->emitGetGlobal(classifyPrim(E), *GlobalIndex, E);
return this->emitGetPtrGlobal(*GlobalIndex, E);
} else if (const auto *PVD = dyn_cast<ParmVarDecl>(D)) {
if (auto It = this->Params.find(PVD); It != this->Params.end()) {
if (IsReference || !It->second.IsPtr)
- return this->emitGetParamPtr(It->second.Offset, E);
+ return this->emitGetParam(classifyPrim(E), It->second.Offset, E);
return this->emitGetPtrParam(It->second.Offset, E);
}
@@ -3716,7 +3813,8 @@ bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
}
} else {
if (const auto *VD = dyn_cast<VarDecl>(D);
- VD && VD->getAnyInitializer() && VD->getType().isConstQualified()) {
+ VD && VD->getAnyInitializer() && VD->getType().isConstQualified() &&
+ !VD->isWeak()) {
if (!this->visitVarDecl(VD))
return false;
// Retry.
@@ -3724,8 +3822,18 @@ bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
}
}
- if (std::optional<unsigned> I = P.getOrCreateDummy(D))
- return this->emitGetPtrGlobal(*I, E);
+ if (std::optional<unsigned> I = P.getOrCreateDummy(D)) {
+ if (!this->emitGetPtrGlobal(*I, E))
+ return false;
+ if (E->getType()->isVoidType())
+ return true;
+ // Convert the dummy pointer to another pointer type if we have to.
+ if (PrimType PT = classifyPrim(E); PT != PT_Ptr) {
+ if (!this->emitDecayPtr(PT_Ptr, PT, E))
+ return false;
+ }
+ return true;
+ }
return this->emitInvalidDeclRef(E, E);
}
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.h b/clang/lib/AST/Interp/ByteCodeExprGen.h
index e73a2f0334cf..44c495240289 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.h
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.h
@@ -90,6 +90,8 @@ public:
bool VisitOpaqueValueExpr(const OpaqueValueExpr *E);
bool VisitAbstractConditionalOperator(const AbstractConditionalOperator *E);
bool VisitStringLiteral(const StringLiteral *E);
+ bool VisitObjCStringLiteral(const ObjCStringLiteral *E);
+ bool VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E);
bool VisitCharacterLiteral(const CharacterLiteral *E);
bool VisitCompoundAssignOperator(const CompoundAssignOperator *E);
bool VisitFloatCompoundAssignOperator(const CompoundAssignOperator *E);
@@ -125,6 +127,7 @@ public:
bool VisitAddrLabelExpr(const AddrLabelExpr *E);
bool VisitConvertVectorExpr(const ConvertVectorExpr *E);
bool VisitShuffleVectorExpr(const ShuffleVectorExpr *E);
+ bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E);
protected:
bool visitExpr(const Expr *E) override;
diff --git a/clang/lib/AST/Interp/Context.cpp b/clang/lib/AST/Interp/Context.cpp
index d51a57e5e92e..4ecfa0f9bfd7 100644
--- a/clang/lib/AST/Interp/Context.cpp
+++ b/clang/lib/AST/Interp/Context.cpp
@@ -164,7 +164,8 @@ std::optional<PrimType> Context::classify(QualType T) const {
T->isFunctionType() || T->isSpecificBuiltinType(BuiltinType::BoundMember))
return PT_FnPtr;
- if (T->isReferenceType() || T->isPointerType())
+ if (T->isReferenceType() || T->isPointerType() ||
+ T->isObjCObjectPointerType())
return PT_Ptr;
if (const auto *AT = T->getAs<AtomicType>())
diff --git a/clang/lib/AST/Interp/Descriptor.cpp b/clang/lib/AST/Interp/Descriptor.cpp
index d0466902247b..746b765ca421 100644
--- a/clang/lib/AST/Interp/Descriptor.cpp
+++ b/clang/lib/AST/Interp/Descriptor.cpp
@@ -137,9 +137,8 @@ static void moveArrayDesc(Block *B, const std::byte *Src, std::byte *Dst,
}
static void initField(Block *B, std::byte *Ptr, bool IsConst, bool IsMutable,
- bool IsActive, const Descriptor *D,
+ bool IsActive, bool IsUnion, const Descriptor *D,
unsigned FieldOffset) {
- bool IsUnion = false; // FIXME
auto *Desc = reinterpret_cast<InlineDescriptor *>(Ptr + FieldOffset) - 1;
Desc->Offset = FieldOffset;
Desc->Desc = D;
@@ -174,7 +173,7 @@ static void initBase(Block *B, std::byte *Ptr, bool IsConst, bool IsMutable,
initBase(B, Ptr + FieldOffset, IsConst, IsMutable, IsActive, V.Desc,
V.Offset, false);
for (const auto &F : D->ElemRecord->fields())
- initField(B, Ptr + FieldOffset, IsConst, IsMutable, IsActive, F.Desc,
+ initField(B, Ptr + FieldOffset, IsConst, IsMutable, IsActive, IsUnion, F.Desc,
F.Offset);
// If this is initializing a virtual base, we do NOT want to consider its
@@ -193,7 +192,7 @@ static void ctorRecord(Block *B, std::byte *Ptr, bool IsConst, bool IsMutable,
for (const auto &V : D->ElemRecord->bases())
initBase(B, Ptr, IsConst, IsMutable, IsActive, V.Desc, V.Offset, false);
for (const auto &F : D->ElemRecord->fields())
- initField(B, Ptr, IsConst, IsMutable, IsActive, F.Desc, F.Offset);
+ initField(B, Ptr, IsConst, IsMutable, IsActive, D->ElemRecord->isUnion(), F.Desc, F.Offset);
for (const auto &V : D->ElemRecord->virtual_bases())
initBase(B, Ptr, IsConst, IsMutable, IsActive, V.Desc, V.Offset, true);
}
diff --git a/clang/lib/AST/Interp/EvaluationResult.cpp b/clang/lib/AST/Interp/EvaluationResult.cpp
index e92d686c724c..150a793da881 100644
--- a/clang/lib/AST/Interp/EvaluationResult.cpp
+++ b/clang/lib/AST/Interp/EvaluationResult.cpp
@@ -101,6 +101,10 @@ static bool CheckFieldsInitialized(InterpState &S, SourceLocation Loc,
Pointer FieldPtr = BasePtr.atField(F.Offset);
QualType FieldType = F.Decl->getType();
+ // Don't check inactive union members.
+ if (R->isUnion() && !FieldPtr.isActive())
+ continue;
+
if (FieldType->isRecordType()) {
Result &= CheckFieldsInitialized(S, Loc, FieldPtr, FieldPtr.getRecord());
} else if (FieldType->isIncompleteArrayType()) {
diff --git a/clang/lib/AST/Interp/Interp.cpp b/clang/lib/AST/Interp/Interp.cpp
index 3e4da487e43c..145fa65791da 100644
--- a/clang/lib/AST/Interp/Interp.cpp
+++ b/clang/lib/AST/Interp/Interp.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "llvm/ADT/APSInt.h"
@@ -76,18 +77,15 @@ static bool diagnoseUnknownDecl(InterpState &S, CodePtr OpPC,
} else {
S.FFDiag(E);
}
- } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
- if (!VD->getType().isConstQualified()) {
- diagnoseNonConstVariable(S, OpPC, VD);
- return false;
- }
-
- // const, but no initializer.
- if (!VD->getAnyInitializer()) {
- diagnoseMissingInitializer(S, OpPC, VD);
- return false;
- }
+ return false;
}
+
+ if (!D->getType().isConstQualified())
+ diagnoseNonConstVariable(S, OpPC, D);
+ else if (const auto *VD = dyn_cast<VarDecl>(D);
+ VD && !VD->getAnyInitializer())
+ diagnoseMissingInitializer(S, OpPC, VD);
+
return false;
}
@@ -104,6 +102,11 @@ static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC,
return;
}
+ // Rather random, but this is to match the diagnostic output of the current
+ // interpreter.
+ if (isa<ObjCIvarDecl>(VD))
+ return;
+
if (VD->getType()->isIntegralOrEnumerationType()) {
S.FFDiag(Loc, diag::note_constexpr_ltor_non_const_int, 1) << VD;
S.Note(VD->getLocation(), diag::note_declared_at);
@@ -454,16 +457,16 @@ bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!CheckConstant(S, OpPC, Ptr))
return false;
- if (!CheckDummy(S, OpPC, Ptr))
+ if (!CheckDummy(S, OpPC, Ptr, AK_Read))
return false;
if (!CheckExtern(S, OpPC, Ptr))
return false;
if (!CheckRange(S, OpPC, Ptr, AK_Read))
return false;
- if (!CheckInitialized(S, OpPC, Ptr, AK_Read))
- return false;
if (!CheckActive(S, OpPC, Ptr, AK_Read))
return false;
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Read))
+ return false;
if (!CheckTemporary(S, OpPC, Ptr, AK_Read))
return false;
if (!CheckMutable(S, OpPC, Ptr))
@@ -474,7 +477,7 @@ bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!CheckLive(S, OpPC, Ptr, AK_Assign))
return false;
- if (!CheckDummy(S, OpPC, Ptr))
+ if (!CheckDummy(S, OpPC, Ptr, AK_Assign))
return false;
if (!CheckExtern(S, OpPC, Ptr))
return false;
@@ -657,7 +660,8 @@ bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
return diagnoseUnknownDecl(S, OpPC, D);
}
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK) {
if (!Ptr.isDummy())
return true;
@@ -666,7 +670,15 @@ bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!D)
return false;
- return diagnoseUnknownDecl(S, OpPC, D);
+ if (AK == AK_Read || AK == AK_Increment || AK == AK_Decrement)
+ return diagnoseUnknownDecl(S, OpPC, D);
+
+ assert(AK == AK_Assign);
+ if (S.getLangOpts().CPlusPlus11) {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.FFDiag(E, diag::note_constexpr_modify_global);
+ }
+ return false;
}
bool CheckNonNullArgs(InterpState &S, CodePtr OpPC, const Function *F,
diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h
index d9f23a4b8c96..eca1792e6471 100644
--- a/clang/lib/AST/Interp/Interp.h
+++ b/clang/lib/AST/Interp/Interp.h
@@ -56,7 +56,8 @@ bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK);
/// Checks if a pointer is a dummy pointer.
-bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
/// Checks if a pointer is null.
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
@@ -588,7 +589,7 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Inc(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr))
+ if (!CheckDummy(S, OpPC, Ptr, AK_Increment))
return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
return false;
@@ -602,7 +603,7 @@ bool Inc(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool IncPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr))
+ if (!CheckDummy(S, OpPC, Ptr, AK_Increment))
return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
return false;
@@ -617,7 +618,7 @@ bool IncPop(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Dec(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr))
+ if (!CheckDummy(S, OpPC, Ptr, AK_Decrement))
return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
return false;
@@ -631,7 +632,7 @@ bool Dec(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool DecPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckDummy(S, OpPC, Ptr))
+ if (!CheckDummy(S, OpPC, Ptr, AK_Decrement))
return false;
if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
return false;
@@ -1245,6 +1246,8 @@ inline bool GetPtrField(InterpState &S, CodePtr OpPC, uint32_t Off) {
return false;
if (!CheckRange(S, OpPC, Ptr, CSK_Field))
return false;
+ if (!CheckArray(S, OpPC, Ptr))
+ return false;
if (!CheckSubobject(S, OpPC, Ptr, CSK_Field))
return false;
@@ -1333,16 +1336,19 @@ inline bool GetPtrThisBase(InterpState &S, CodePtr OpPC, uint32_t Off) {
inline bool FinishInitPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (Ptr.canBeInitialized())
+ if (Ptr.canBeInitialized()) {
Ptr.initialize();
+ Ptr.activate();
+ }
return true;
}
inline bool FinishInit(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
-
- if (Ptr.canBeInitialized())
+ if (Ptr.canBeInitialized()) {
Ptr.initialize();
+ Ptr.activate();
+ }
return true;
}
@@ -1368,9 +1374,6 @@ inline bool GetPtrVirtBasePop(InterpState &S, CodePtr OpPC,
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckNull(S, OpPC, Ptr, CSK_Base))
return false;
- if (Ptr.isDummy()) // FIXME: Once we have type info for dummy pointers, this
- // needs to go.
- return false;
return VirtBaseHelper(S, OpPC, D, Ptr);
}
@@ -1536,9 +1539,6 @@ inline bool Memcpy(InterpState &S, CodePtr OpPC) {
template <class T, ArithOp Op>
bool OffsetHelper(InterpState &S, CodePtr OpPC, const T &Offset,
const Pointer &Ptr) {
- if (!CheckRange(S, OpPC, Ptr, CSK_ArrayToPointer))
- return false;
-
// A zero offset does not change the pointer.
if (Offset.isZero()) {
S.Stk.push<Pointer>(Ptr);
@@ -1556,8 +1556,12 @@ bool OffsetHelper(InterpState &S, CodePtr OpPC, const T &Offset,
if (!CheckArray(S, OpPC, Ptr))
return false;
- uint64_t Index = Ptr.getIndex();
uint64_t MaxIndex = static_cast<uint64_t>(Ptr.getNumElems());
+ uint64_t Index;
+ if (Ptr.isOnePastEnd())
+ Index = MaxIndex;
+ else
+ Index = Ptr.getIndex();
bool Invalid = false;
// Helper to report an invalid offset, computed as APSInt.
diff --git a/clang/lib/AST/Interp/InterpBuiltin.cpp b/clang/lib/AST/Interp/InterpBuiltin.cpp
index 565c85bc2e0c..00206d09c113 100644
--- a/clang/lib/AST/Interp/InterpBuiltin.cpp
+++ b/clang/lib/AST/Interp/InterpBuiltin.cpp
@@ -214,7 +214,7 @@ static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
if (!CheckLive(S, OpPC, StrPtr, AK_Read))
return false;
- if (!CheckDummy(S, OpPC, StrPtr))
+ if (!CheckDummy(S, OpPC, StrPtr, AK_Read))
return false;
assert(StrPtr.getFieldDesc()->isPrimitiveArray());
diff --git a/clang/lib/AST/Interp/Pointer.cpp b/clang/lib/AST/Interp/Pointer.cpp
index ee8cedccb8d4..252f7ea46086 100644
--- a/clang/lib/AST/Interp/Pointer.cpp
+++ b/clang/lib/AST/Interp/Pointer.cpp
@@ -144,13 +144,18 @@ APValue Pointer::toAPValue() const {
// TODO: compute the offset into the object.
CharUnits Offset = CharUnits::Zero();
- bool IsOnePastEnd = isOnePastEnd();
// Build the path into the object.
Pointer Ptr = *this;
while (Ptr.isField() || Ptr.isArrayElement()) {
- if (Ptr.isArrayElement()) {
- Path.push_back(APValue::LValuePathEntry::ArrayIndex(Ptr.getIndex()));
+ if (Ptr.isArrayRoot()) {
+ Path.push_back(APValue::LValuePathEntry::ArrayIndex(0));
+ Ptr = Ptr.getBase();
+ } else if (Ptr.isArrayElement()) {
+ if (Ptr.isOnePastEnd())
+ Path.push_back(APValue::LValuePathEntry::ArrayIndex(Ptr.getArray().getNumElems()));
+ else
+ Path.push_back(APValue::LValuePathEntry::ArrayIndex(Ptr.getIndex()));
Ptr = Ptr.getArray();
} else {
// TODO: figure out if base is virtual
@@ -173,7 +178,7 @@ APValue Pointer::toAPValue() const {
// Just invert the order of the elements.
std::reverse(Path.begin(), Path.end());
- return APValue(Base, Offset, Path, IsOnePastEnd, /*IsNullPtr=*/false);
+ return APValue(Base, Offset, Path, /*IsOnePastEnd=*/false, /*IsNullPtr=*/false);
}
void Pointer::print(llvm::raw_ostream &OS) const {
@@ -346,6 +351,7 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx) const {
} else {
Ok &= Composite(FieldTy, FP, Value);
}
+ ActiveField = FP.getFieldDesc()->asFieldDecl();
break;
}
}
diff --git a/clang/lib/AST/Interp/Pointer.h b/clang/lib/AST/Interp/Pointer.h
index 9900f37e60d4..93ca754d04a6 100644
--- a/clang/lib/AST/Interp/Pointer.h
+++ b/clang/lib/AST/Interp/Pointer.h
@@ -314,12 +314,14 @@ public:
/// Returns the type of the innermost field.
QualType getType() const {
if (inPrimitiveArray() && Offset != asBlockPointer().Base) {
- // Unfortunately, complex types are not array types in clang, but they are
- // for us.
+ // Unfortunately, complex and vector types are not array types in clang,
+ // but they are for us.
if (const auto *AT = getFieldDesc()->getType()->getAsArrayTypeUnsafe())
return AT->getElementType();
if (const auto *CT = getFieldDesc()->getType()->getAs<ComplexType>())
return CT->getElementType();
+ if (const auto *CT = getFieldDesc()->getType()->getAs<VectorType>())
+ return CT->getElementType();
}
return getFieldDesc()->getType();
}
@@ -535,9 +537,6 @@ public:
if (isZero())
return 0;
- if (isElementPastEnd())
- return 1;
-
// narrow()ed element in a composite array.
if (asBlockPointer().Base > sizeof(InlineDescriptor) &&
asBlockPointer().Base == Offset)
@@ -556,12 +555,16 @@ public:
if (!asBlockPointer().Pointee)
return false;
- return isElementPastEnd() || getSize() == getOffset();
+ return isElementPastEnd() ||
+ (getSize() == getOffset() && !isZeroSizeArray());
}
/// Checks if the pointer is an out-of-bounds element pointer.
bool isElementPastEnd() const { return Offset == PastEndMark; }
+ /// Checks if the pointer is pointing to a zero-size array.
+ bool isZeroSizeArray() const { return getFieldDesc()->isZeroSizeArray(); }
+
/// Dereferences the pointer, if it's live.
template <typename T> T &deref() const {
assert(isLive() && "Invalid pointer");
diff --git a/clang/lib/AST/Interp/PrimType.h b/clang/lib/AST/Interp/PrimType.h
index 05a094d0c5b1..604fb5dfde1e 100644
--- a/clang/lib/AST/Interp/PrimType.h
+++ b/clang/lib/AST/Interp/PrimType.h
@@ -30,20 +30,20 @@ template <unsigned Bits, bool Signed> class Integral;
/// Enumeration of the primitive types of the VM.
enum PrimType : unsigned {
- PT_Sint8,
- PT_Uint8,
- PT_Sint16,
- PT_Uint16,
- PT_Sint32,
- PT_Uint32,
- PT_Sint64,
- PT_Uint64,
- PT_IntAP,
- PT_IntAPS,
- PT_Bool,
- PT_Float,
- PT_Ptr,
- PT_FnPtr,
+ PT_Sint8 = 0,
+ PT_Uint8 = 1,
+ PT_Sint16 = 2,
+ PT_Uint16 = 3,
+ PT_Sint32 = 4,
+ PT_Uint32 = 5,
+ PT_Sint64 = 6,
+ PT_Uint64 = 7,
+ PT_IntAP = 8,
+ PT_IntAPS = 9,
+ PT_Bool = 10,
+ PT_Float = 11,
+ PT_Ptr = 12,
+ PT_FnPtr = 13,
};
inline constexpr bool isPtrType(PrimType T) {
diff --git a/clang/lib/AST/Interp/Record.cpp b/clang/lib/AST/Interp/Record.cpp
index 6a0a28bc9124..8ded765fc1c4 100644
--- a/clang/lib/AST/Interp/Record.cpp
+++ b/clang/lib/AST/Interp/Record.cpp
@@ -16,7 +16,7 @@ Record::Record(const RecordDecl *Decl, BaseList &&SrcBases,
FieldList &&SrcFields, VirtualBaseList &&SrcVirtualBases,
unsigned VirtualSize, unsigned BaseSize)
: Decl(Decl), Bases(std::move(SrcBases)), Fields(std::move(SrcFields)),
- BaseSize(BaseSize), VirtualSize(VirtualSize) {
+ BaseSize(BaseSize), VirtualSize(VirtualSize), IsUnion(Decl->isUnion()) {
for (Base &V : SrcVirtualBases)
VirtualBases.push_back({ V.Decl, V.Offset + BaseSize, V.Desc, V.R });
diff --git a/clang/lib/AST/Interp/Record.h b/clang/lib/AST/Interp/Record.h
index cf0480b3f62f..83e15b125f77 100644
--- a/clang/lib/AST/Interp/Record.h
+++ b/clang/lib/AST/Interp/Record.h
@@ -53,7 +53,7 @@ public:
/// Returns the name of the underlying declaration.
const std::string getName() const;
/// Checks if the record is a union.
- bool isUnion() const { return getDecl()->isUnion(); }
+ bool isUnion() const { return IsUnion; }
/// Returns the size of the record.
unsigned getSize() const { return BaseSize; }
/// Returns the full size of the record, including records.
@@ -132,6 +132,8 @@ private:
unsigned BaseSize;
/// Size of all virtual bases.
unsigned VirtualSize;
+ /// If this record is a union.
+ bool IsUnion;
};
} // namespace interp
diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp
index 42608476b1c1..3bbb3a905e9b 100644
--- a/clang/lib/AST/JSONNodeDumper.cpp
+++ b/clang/lib/AST/JSONNodeDumper.cpp
@@ -1028,7 +1028,7 @@ void JSONNodeDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
if (D->hasDefaultArgument())
JOS.attributeObject("defaultArg", [=] {
- Visit(D->getDefaultArgument(), SourceRange(),
+ Visit(D->getDefaultArgument().getArgument(), SourceRange(),
D->getDefaultArgStorage().getInheritedFrom(),
D->defaultArgumentWasInherited() ? "inherited from" : "previous");
});
@@ -1044,7 +1044,7 @@ void JSONNodeDumper::VisitNonTypeTemplateParmDecl(
if (D->hasDefaultArgument())
JOS.attributeObject("defaultArg", [=] {
- Visit(D->getDefaultArgument(), SourceRange(),
+ Visit(D->getDefaultArgument().getArgument(), SourceRange(),
D->getDefaultArgStorage().getInheritedFrom(),
D->defaultArgumentWasInherited() ? "inherited from" : "previous");
});
diff --git a/clang/lib/AST/ODRDiagsEmitter.cpp b/clang/lib/AST/ODRDiagsEmitter.cpp
index 5b1cdc16e2ea..37f0f68c9235 100644
--- a/clang/lib/AST/ODRDiagsEmitter.cpp
+++ b/clang/lib/AST/ODRDiagsEmitter.cpp
@@ -1409,13 +1409,15 @@ bool ODRDiagsEmitter::diagnoseMismatch(
}
if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
- QualType FirstType = FirstTTPD->getDefaultArgument();
- QualType SecondType = SecondTTPD->getDefaultArgument();
- if (computeODRHash(FirstType) != computeODRHash(SecondType)) {
+ TemplateArgument FirstTA =
+ FirstTTPD->getDefaultArgument().getArgument();
+ TemplateArgument SecondTA =
+ SecondTTPD->getDefaultArgument().getArgument();
+ if (computeODRHash(FirstTA) != computeODRHash(SecondTA)) {
DiagTemplateError(FunctionTemplateParameterDifferentDefaultArgument)
- << (i + 1) << FirstType;
+ << (i + 1) << FirstTA;
DiagTemplateNote(FunctionTemplateParameterDifferentDefaultArgument)
- << (i + 1) << SecondType;
+ << (i + 1) << SecondTA;
return true;
}
}
@@ -1521,8 +1523,11 @@ bool ODRDiagsEmitter::diagnoseMismatch(
}
if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
- Expr *FirstDefaultArgument = FirstNTTPD->getDefaultArgument();
- Expr *SecondDefaultArgument = SecondNTTPD->getDefaultArgument();
+ TemplateArgument FirstDefaultArgument =
+ FirstNTTPD->getDefaultArgument().getArgument();
+ TemplateArgument SecondDefaultArgument =
+ SecondNTTPD->getDefaultArgument().getArgument();
+
if (computeODRHash(FirstDefaultArgument) !=
computeODRHash(SecondDefaultArgument)) {
DiagTemplateError(FunctionTemplateParameterDifferentDefaultArgument)
diff --git a/clang/lib/AST/ODRHash.cpp b/clang/lib/AST/ODRHash.cpp
index 6f04739cf669..246e56231539 100644
--- a/clang/lib/AST/ODRHash.cpp
+++ b/clang/lib/AST/ODRHash.cpp
@@ -462,7 +462,7 @@ public:
D->hasDefaultArgument() && !D->defaultArgumentWasInherited();
Hash.AddBoolean(hasDefaultArgument);
if (hasDefaultArgument) {
- AddTemplateArgument(D->getDefaultArgument());
+ AddTemplateArgument(D->getDefaultArgument().getArgument());
}
Hash.AddBoolean(D->isParameterPack());
@@ -480,7 +480,7 @@ public:
D->hasDefaultArgument() && !D->defaultArgumentWasInherited();
Hash.AddBoolean(hasDefaultArgument);
if (hasDefaultArgument) {
- AddStmt(D->getDefaultArgument());
+ AddTemplateArgument(D->getDefaultArgument().getArgument());
}
Hash.AddBoolean(D->isParameterPack());
diff --git a/clang/lib/AST/OpenACCClause.cpp b/clang/lib/AST/OpenACCClause.cpp
index 8ff6dabcbc48..cb2c7f98be75 100644
--- a/clang/lib/AST/OpenACCClause.cpp
+++ b/clang/lib/AST/OpenACCClause.cpp
@@ -35,7 +35,7 @@ bool OpenACCClauseWithVarList::classof(const OpenACCClause *C) {
OpenACCAttachClause::classof(C) || OpenACCNoCreateClause::classof(C) ||
OpenACCPresentClause::classof(C) || OpenACCCopyClause::classof(C) ||
OpenACCCopyInClause::classof(C) || OpenACCCopyOutClause::classof(C) ||
- OpenACCCreateClause::classof(C);
+ OpenACCReductionClause::classof(C) || OpenACCCreateClause::classof(C);
}
bool OpenACCClauseWithCondition::classof(const OpenACCClause *C) {
return OpenACCIfClause::classof(C) || OpenACCSelfClause::classof(C);
@@ -310,6 +310,16 @@ OpenACCDeviceTypeClause *OpenACCDeviceTypeClause::Create(
OpenACCDeviceTypeClause(K, BeginLoc, LParenLoc, Archs, EndLoc);
}
+OpenACCReductionClause *OpenACCReductionClause::Create(
+ const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc,
+ OpenACCReductionOperator Operator, ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(
+ OpenACCReductionClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem)
+ OpenACCReductionClause(BeginLoc, LParenLoc, Operator, VarList, EndLoc);
+}
+
//===----------------------------------------------------------------------===//
// OpenACC clauses printing methods
//===----------------------------------------------------------------------===//
@@ -445,6 +455,14 @@ void OpenACCClausePrinter::VisitCreateClause(const OpenACCCreateClause &C) {
OS << ")";
}
+void OpenACCClausePrinter::VisitReductionClause(
+ const OpenACCReductionClause &C) {
+ OS << "reduction(" << C.getReductionOp() << ": ";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
void OpenACCClausePrinter::VisitWaitClause(const OpenACCWaitClause &C) {
OS << "wait";
if (!C.getLParenLoc().isInvalid()) {
diff --git a/clang/lib/AST/ParentMap.cpp b/clang/lib/AST/ParentMap.cpp
index 3d6a1cc84c7b..534793b837bb 100644
--- a/clang/lib/AST/ParentMap.cpp
+++ b/clang/lib/AST/ParentMap.cpp
@@ -97,6 +97,22 @@ static void BuildParentMap(MapTy& M, Stmt* S,
BuildParentMap(M, SubStmt, OVMode);
}
break;
+ case Stmt::CXXDefaultArgExprClass:
+ if (auto *Arg = dyn_cast<CXXDefaultArgExpr>(S)) {
+ if (Arg->hasRewrittenInit()) {
+ M[Arg->getExpr()] = S;
+ BuildParentMap(M, Arg->getExpr(), OVMode);
+ }
+ }
+ break;
+ case Stmt::CXXDefaultInitExprClass:
+ if (auto *Init = dyn_cast<CXXDefaultInitExpr>(S)) {
+ if (Init->hasRewrittenInit()) {
+ M[Init->getExpr()] = S;
+ BuildParentMap(M, Init->getExpr(), OVMode);
+ }
+ }
+ break;
default:
for (Stmt *SubStmt : S->children()) {
if (SubStmt) {
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index caab4ab0ef16..00b8c43af035 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -2588,6 +2588,12 @@ void OpenACCClauseProfiler::VisitWaitClause(const OpenACCWaitClause &Clause) {
/// Nothing to do here, there are no sub-statements.
void OpenACCClauseProfiler::VisitDeviceTypeClause(
const OpenACCDeviceTypeClause &Clause) {}
+
+void OpenACCClauseProfiler::VisitReductionClause(
+ const OpenACCReductionClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
} // namespace
void StmtProfiler::VisitOpenACCComputeConstruct(
diff --git a/clang/lib/AST/TemplateBase.cpp b/clang/lib/AST/TemplateBase.cpp
index 3310d7dc24c5..a7ee973b7f7d 100644
--- a/clang/lib/AST/TemplateBase.cpp
+++ b/clang/lib/AST/TemplateBase.cpp
@@ -538,9 +538,19 @@ void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out,
Out << "nullptr";
break;
- case Template:
- getAsTemplate().print(Out, Policy, TemplateName::Qualified::Fully);
+ case Template: {
+ TemplateName TN = getAsTemplate();
+ if (const auto *TD = TN.getAsTemplateDecl();
+ TD && TD->getDeclName().isEmpty()) {
+ assert(isa<TemplateTemplateParmDecl>(TD) &&
+ "Unexpected anonymous template");
+ const auto *TTP = cast<TemplateTemplateParmDecl>(TD);
+ Out << "template-parameter-" << TTP->getDepth() << "-" << TTP->getIndex();
+ } else {
+ TN.print(Out, Policy, TemplateName::Qualified::Fully);
+ }
break;
+ }
case TemplateExpansion:
getAsTemplateOrTemplatePattern().print(Out, Policy);
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index efcd74717a4e..4a1e94ffe283 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -457,6 +457,10 @@ void TextNodeDumper::Visit(const OpenACCClause *C) {
});
OS << ")";
break;
+ case OpenACCClauseKind::Reduction:
+ OS << " clause Operator: "
+ << cast<OpenACCReductionClause>(C)->getReductionOp();
+ break;
default:
// Nothing to do here.
break;
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index f69a8f80a639..04f105c12887 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -2382,6 +2382,14 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
*Def = Rec;
return !Rec->isCompleteDefinition();
}
+ case InjectedClassName: {
+ CXXRecordDecl *Rec = cast<InjectedClassNameType>(CanonicalType)->getDecl();
+ if (!Rec->isBeingDefined())
+ return false;
+ if (Def)
+ *Def = Rec;
+ return true;
+ }
case ConstantArray:
case VariableArray:
// An array is incomplete if its element type is incomplete
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index 87f0a8728d85..5ed56b367a46 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -2273,16 +2273,17 @@ bool clang::isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
if (auto *TTPD = dyn_cast<TemplateTypeParmDecl>(Param)) {
return TTPD->hasDefaultArgument() &&
- isSubstitutedTemplateArgument(Ctx, Arg, TTPD->getDefaultArgument(),
- Args, Depth);
+ isSubstitutedTemplateArgument(
+ Ctx, Arg, TTPD->getDefaultArgument().getArgument(), Args, Depth);
} else if (auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(Param)) {
return TTPD->hasDefaultArgument() &&
isSubstitutedTemplateArgument(
Ctx, Arg, TTPD->getDefaultArgument().getArgument(), Args, Depth);
} else if (auto *NTTPD = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
return NTTPD->hasDefaultArgument() &&
- isSubstitutedTemplateArgument(Ctx, Arg, NTTPD->getDefaultArgument(),
- Args, Depth);
+ isSubstitutedTemplateArgument(
+ Ctx, Arg, NTTPD->getDefaultArgument().getArgument(), Args,
+ Depth);
}
return false;
}
diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp
index 64e6155de090..02317257c274 100644
--- a/clang/lib/Analysis/CFG.cpp
+++ b/clang/lib/Analysis/CFG.cpp
@@ -556,6 +556,10 @@ public:
private:
// Visitors to walk an AST and construct the CFG.
+ CFGBlock *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Default,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *Default,
+ AddStmtChoice asc);
CFGBlock *VisitInitListExpr(InitListExpr *ILE, AddStmtChoice asc);
CFGBlock *VisitAddrLabelExpr(AddrLabelExpr *A, AddStmtChoice asc);
CFGBlock *VisitAttributedStmt(AttributedStmt *A, AddStmtChoice asc);
@@ -2254,16 +2258,10 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc,
asc, ExternallyDestructed);
case Stmt::CXXDefaultArgExprClass:
+ return VisitCXXDefaultArgExpr(cast<CXXDefaultArgExpr>(S), asc);
+
case Stmt::CXXDefaultInitExprClass:
- // FIXME: The expression inside a CXXDefaultArgExpr is owned by the
- // called function's declaration, not by the caller. If we simply add
- // this expression to the CFG, we could end up with the same Expr
- // appearing multiple times (PR13385).
- //
- // It's likewise possible for multiple CXXDefaultInitExprs for the same
- // expression to be used in the same function (through aggregate
- // initialization).
- return VisitStmt(S, asc);
+ return VisitCXXDefaultInitExpr(cast<CXXDefaultInitExpr>(S), asc);
case Stmt::CXXBindTemporaryExprClass:
return VisitCXXBindTemporaryExpr(cast<CXXBindTemporaryExpr>(S), asc);
@@ -2433,6 +2431,40 @@ CFGBlock *CFGBuilder::VisitChildren(Stmt *S) {
return B;
}
+CFGBlock *CFGBuilder::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Arg,
+ AddStmtChoice asc) {
+ if (Arg->hasRewrittenInit()) {
+ if (asc.alwaysAdd(*this, Arg)) {
+ autoCreateBlock();
+ appendStmt(Block, Arg);
+ }
+ return VisitStmt(Arg->getExpr(), asc);
+ }
+
+ // We can't add the default argument if it's not rewritten because the
+ // expression inside a CXXDefaultArgExpr is owned by the called function's
+ // declaration, not by the caller, we could end up with the same expression
+ // appearing multiple times.
+ return VisitStmt(Arg, asc);
+}
+
+CFGBlock *CFGBuilder::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *Init,
+ AddStmtChoice asc) {
+ if (Init->hasRewrittenInit()) {
+ if (asc.alwaysAdd(*this, Init)) {
+ autoCreateBlock();
+ appendStmt(Block, Init);
+ }
+ return VisitStmt(Init->getExpr(), asc);
+ }
+
+ // We can't add the default initializer if it's not rewritten because multiple
+ // CXXDefaultInitExprs for the same sub-expression to be used in the same
+ // function (through aggregate initialization). we could end up with the same
+ // expression appearing multiple times.
+ return VisitStmt(Init, asc);
+}
+
CFGBlock *CFGBuilder::VisitInitListExpr(InitListExpr *ILE, AddStmtChoice asc) {
if (asc.alwaysAdd(*this, ILE)) {
autoCreateBlock();
diff --git a/clang/lib/Analysis/FlowSensitive/CMakeLists.txt b/clang/lib/Analysis/FlowSensitive/CMakeLists.txt
index 6631fe27f3d9..05cdaa7e2782 100644
--- a/clang/lib/Analysis/FlowSensitive/CMakeLists.txt
+++ b/clang/lib/Analysis/FlowSensitive/CMakeLists.txt
@@ -2,6 +2,7 @@ add_clang_library(clangAnalysisFlowSensitive
AdornedCFG.cpp
Arena.cpp
ASTOps.cpp
+ CNFFormula.cpp
DataflowAnalysisContext.cpp
DataflowEnvironment.cpp
Formula.cpp
@@ -36,3 +37,4 @@ add_custom_command(OUTPUT HTMLLogger.inc
DEPENDS ${CLANG_SOURCE_DIR}/utils/bundle_resources.py HTMLLogger.html HTMLLogger.css HTMLLogger.js
VERBATIM)
add_custom_target(clangAnalysisFlowSensitiveResources DEPENDS HTMLLogger.inc)
+set_target_properties(clangAnalysisFlowSensitiveResources PROPERTIES FOLDER "Clang/Misc")
diff --git a/clang/lib/Analysis/FlowSensitive/CNFFormula.cpp b/clang/lib/Analysis/FlowSensitive/CNFFormula.cpp
new file mode 100644
index 000000000000..2410ce1e7bd6
--- /dev/null
+++ b/clang/lib/Analysis/FlowSensitive/CNFFormula.cpp
@@ -0,0 +1,303 @@
+//===- CNFFormula.cpp -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A representation of a boolean formula in 3-CNF.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/CNFFormula.h"
+#include "llvm/ADT/DenseSet.h"
+
+#include <queue>
+
+namespace clang {
+namespace dataflow {
+
+namespace {
+
+/// Applies simplifications while building up a BooleanFormula.
+/// We keep track of unit clauses, which tell us variables that must be
+/// true/false in any model that satisfies the overall formula.
+/// Such variables can be dropped from subsequently-added clauses, which
+/// may in turn yield more unit clauses or even a contradiction.
+/// The total added complexity of this preprocessing is O(N) where we
+/// for every clause, we do a lookup for each unit clauses.
+/// The lookup is O(1) on average. This method won't catch all
+/// contradictory formulas, more passes can in principle catch
+/// more cases but we leave all these and the general case to the
+/// proper SAT solver.
+struct CNFFormulaBuilder {
+ // Formula should outlive CNFFormulaBuilder.
+ explicit CNFFormulaBuilder(CNFFormula &CNF) : Formula(CNF) {}
+
+ /// Adds the `L1 v ... v Ln` clause to the formula. Applies
+ /// simplifications, based on single-literal clauses.
+ ///
+ /// Requirements:
+ ///
+ /// `Li` must not be `NullLit`.
+ ///
+ /// All literals must be distinct.
+ void addClause(ArrayRef<Literal> Literals) {
+ // We generate clauses with up to 3 literals in this file.
+ assert(!Literals.empty() && Literals.size() <= 3);
+ // Contains literals of the simplified clause.
+ llvm::SmallVector<Literal> Simplified;
+ for (auto L : Literals) {
+ assert(L != NullLit &&
+ llvm::all_of(Simplified, [L](Literal S) { return S != L; }));
+ auto X = var(L);
+ if (trueVars.contains(X)) { // X must be true
+ if (isPosLit(L))
+ return; // Omit clause `(... v X v ...)`, it is `true`.
+ else
+ continue; // Omit `!X` from `(... v !X v ...)`.
+ }
+ if (falseVars.contains(X)) { // X must be false
+ if (isNegLit(L))
+ return; // Omit clause `(... v !X v ...)`, it is `true`.
+ else
+ continue; // Omit `X` from `(... v X v ...)`.
+ }
+ Simplified.push_back(L);
+ }
+ if (Simplified.empty()) {
+ // Simplification made the clause empty, which is equivalent to `false`.
+ // We already know that this formula is unsatisfiable.
+ Formula.addClause(Simplified);
+ return;
+ }
+ if (Simplified.size() == 1) {
+ // We have new unit clause.
+ const Literal lit = Simplified.front();
+ const Variable v = var(lit);
+ if (isPosLit(lit))
+ trueVars.insert(v);
+ else
+ falseVars.insert(v);
+ }
+ Formula.addClause(Simplified);
+ }
+
+ /// Returns true if we observed a contradiction while adding clauses.
+ /// In this case then the formula is already known to be unsatisfiable.
+ bool isKnownContradictory() { return Formula.knownContradictory(); }
+
+private:
+ CNFFormula &Formula;
+ llvm::DenseSet<Variable> trueVars;
+ llvm::DenseSet<Variable> falseVars;
+};
+
+} // namespace
+
+CNFFormula::CNFFormula(Variable LargestVar)
+ : LargestVar(LargestVar), KnownContradictory(false) {
+ Clauses.push_back(0);
+ ClauseStarts.push_back(0);
+}
+
+void CNFFormula::addClause(ArrayRef<Literal> lits) {
+ assert(llvm::all_of(lits, [](Literal L) { return L != NullLit; }));
+
+ if (lits.empty())
+ KnownContradictory = true;
+
+ const size_t S = Clauses.size();
+ ClauseStarts.push_back(S);
+ Clauses.insert(Clauses.end(), lits.begin(), lits.end());
+}
+
+CNFFormula buildCNF(const llvm::ArrayRef<const Formula *> &Formulas,
+ llvm::DenseMap<Variable, Atom> &Atomics) {
+ // The general strategy of the algorithm implemented below is to map each
+ // of the sub-values in `Vals` to a unique variable and use these variables in
+ // the resulting CNF expression to avoid exponential blow up. The number of
+ // literals in the resulting formula is guaranteed to be linear in the number
+ // of sub-formulas in `Vals`.
+
+ // Map each sub-formula in `Vals` to a unique variable.
+ llvm::DenseMap<const Formula *, Variable> FormulaToVar;
+ // Store variable identifiers and Atom of atomic booleans.
+ Variable NextVar = 1;
+ {
+ std::queue<const Formula *> UnprocessedFormulas;
+ for (const Formula *F : Formulas)
+ UnprocessedFormulas.push(F);
+ while (!UnprocessedFormulas.empty()) {
+ Variable Var = NextVar;
+ const Formula *F = UnprocessedFormulas.front();
+ UnprocessedFormulas.pop();
+
+ if (!FormulaToVar.try_emplace(F, Var).second)
+ continue;
+ ++NextVar;
+
+ for (const Formula *Op : F->operands())
+ UnprocessedFormulas.push(Op);
+ if (F->kind() == Formula::AtomRef)
+ Atomics[Var] = F->getAtom();
+ }
+ }
+
+ auto GetVar = [&FormulaToVar](const Formula *F) {
+ auto ValIt = FormulaToVar.find(F);
+ assert(ValIt != FormulaToVar.end());
+ return ValIt->second;
+ };
+
+ CNFFormula CNF(NextVar - 1);
+ std::vector<bool> ProcessedSubVals(NextVar, false);
+ CNFFormulaBuilder builder(CNF);
+
+ // Add a conjunct for each variable that represents a top-level conjunction
+ // value in `Vals`.
+ for (const Formula *F : Formulas)
+ builder.addClause(posLit(GetVar(F)));
+
+ // Add conjuncts that represent the mapping between newly-created variables
+ // and their corresponding sub-formulas.
+ std::queue<const Formula *> UnprocessedFormulas;
+ for (const Formula *F : Formulas)
+ UnprocessedFormulas.push(F);
+ while (!UnprocessedFormulas.empty()) {
+ const Formula *F = UnprocessedFormulas.front();
+ UnprocessedFormulas.pop();
+ const Variable Var = GetVar(F);
+
+ if (ProcessedSubVals[Var])
+ continue;
+ ProcessedSubVals[Var] = true;
+
+ switch (F->kind()) {
+ case Formula::AtomRef:
+ break;
+ case Formula::Literal:
+ CNF.addClause(F->literal() ? posLit(Var) : negLit(Var));
+ break;
+ case Formula::And: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A ^ A)` is equivalent to `(!X v A) ^ (X v !A)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ } else {
+ // `X <=> (A ^ B)` is equivalent to `(!X v A) ^ (!X v B) ^ (X v !A v
+ // !B)` which is already in conjunctive normal form. Below we add each
+ // of the conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({negLit(Var), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
+ }
+ break;
+ }
+ case Formula::Or: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A v A)` is equivalent to `(!X v A) ^ (X v !A)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ } else {
+ // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v
+ // !B)` which is already in conjunctive normal form. Below we add each
+ // of the conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ builder.addClause({posLit(Var), negLit(RHS)});
+ }
+ break;
+ }
+ case Formula::Not: {
+ const Variable Operand = GetVar(F->operands()[0]);
+
+ // `X <=> !Y` is equivalent to `(!X v !Y) ^ (X v Y)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), negLit(Operand)});
+ builder.addClause({posLit(Var), posLit(Operand)});
+ break;
+ }
+ case Formula::Implies: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ // `X <=> (A => B)` is equivalent to
+ // `(X v A) ^ (X v !B) ^ (!X v !A v B)` which is already in
+ // conjunctive normal form. Below we add each of the conjuncts of
+ // the latter expression to the result.
+ builder.addClause({posLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(RHS)});
+ builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
+ break;
+ }
+ case Formula::Equal: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A <=> A)` is equivalent to `X` which is already in
+ // conjunctive normal form. Below we add each of the conjuncts of the
+ // latter expression to the result.
+ builder.addClause(posLit(Var));
+
+ // No need to visit the sub-values of `Val`.
+ continue;
+ }
+ // `X <=> (A <=> B)` is equivalent to
+ // `(X v A v B) ^ (X v !A v !B) ^ (!X v A v !B) ^ (!X v !A v B)` which
+ // is already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({posLit(Var), posLit(LHS), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
+ builder.addClause({negLit(Var), posLit(LHS), negLit(RHS)});
+ builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
+ break;
+ }
+ }
+ if (builder.isKnownContradictory()) {
+ return CNF;
+ }
+ for (const Formula *Child : F->operands())
+ UnprocessedFormulas.push(Child);
+ }
+
+ // Unit clauses that were added later were not
+ // considered for the simplification of earlier clauses. Do a final
+ // pass to find more opportunities for simplification.
+ CNFFormula FinalCNF(NextVar - 1);
+ CNFFormulaBuilder FinalBuilder(FinalCNF);
+
+ // Collect unit clauses.
+ for (ClauseID C = 1; C <= CNF.numClauses(); ++C) {
+ if (CNF.clauseSize(C) == 1) {
+ FinalBuilder.addClause(CNF.clauseLiterals(C)[0]);
+ }
+ }
+
+ // Add all clauses that were added previously, preserving the order.
+ for (ClauseID C = 1; C <= CNF.numClauses(); ++C) {
+ FinalBuilder.addClause(CNF.clauseLiterals(C));
+ if (FinalBuilder.isKnownContradictory()) {
+ break;
+ }
+ }
+ // It is possible there were new unit clauses again, but
+ // we stop here and leave the rest to the solver algorithm.
+ return FinalCNF;
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp b/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
index 3ef363753532..a39f0e0b29ad 100644
--- a/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
+++ b/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
@@ -12,105 +12,31 @@
//===----------------------------------------------------------------------===//
#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <queue>
#include <vector>
+#include "clang/Analysis/FlowSensitive/CNFFormula.h"
#include "clang/Analysis/FlowSensitive/Formula.h"
#include "clang/Analysis/FlowSensitive/Solver.h"
#include "clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
namespace clang {
namespace dataflow {
-// `WatchedLiteralsSolver` is an implementation of Algorithm D from Knuth's
-// The Art of Computer Programming Volume 4: Satisfiability, Fascicle 6. It is
-// based on the backtracking DPLL algorithm [1], keeps references to a single
-// "watched" literal per clause, and uses a set of "active" variables to perform
-// unit propagation.
-//
-// The solver expects that its input is a boolean formula in conjunctive normal
-// form that consists of clauses of at least one literal. A literal is either a
-// boolean variable or its negation. Below we define types, data structures, and
-// utilities that are used to represent boolean formulas in conjunctive normal
-// form.
-//
-// [1] https://en.wikipedia.org/wiki/DPLL_algorithm
-
-/// Boolean variables are represented as positive integers.
-using Variable = uint32_t;
-
-/// A null boolean variable is used as a placeholder in various data structures
-/// and algorithms.
-static constexpr Variable NullVar = 0;
-
-/// Literals are represented as positive integers. Specifically, for a boolean
-/// variable `V` that is represented as the positive integer `I`, the positive
-/// literal `V` is represented as the integer `2*I` and the negative literal
-/// `!V` is represented as the integer `2*I+1`.
-using Literal = uint32_t;
-
-/// A null literal is used as a placeholder in various data structures and
-/// algorithms.
-[[maybe_unused]] static constexpr Literal NullLit = 0;
-
-/// Returns the positive literal `V`.
-static constexpr Literal posLit(Variable V) { return 2 * V; }
-
-static constexpr bool isPosLit(Literal L) { return 0 == (L & 1); }
-
-static constexpr bool isNegLit(Literal L) { return 1 == (L & 1); }
-
-/// Returns the negative literal `!V`.
-static constexpr Literal negLit(Variable V) { return 2 * V + 1; }
-
-/// Returns the negated literal `!L`.
-static constexpr Literal notLit(Literal L) { return L ^ 1; }
-
-/// Returns the variable of `L`.
-static constexpr Variable var(Literal L) { return L >> 1; }
-
-/// Clause identifiers are represented as positive integers.
-using ClauseID = uint32_t;
-
-/// A null clause identifier is used as a placeholder in various data structures
-/// and algorithms.
-static constexpr ClauseID NullClause = 0;
+namespace {
-/// A boolean formula in conjunctive normal form.
-struct CNFFormula {
- /// `LargestVar` is equal to the largest positive integer that represents a
- /// variable in the formula.
- const Variable LargestVar;
-
- /// Literals of all clauses in the formula.
- ///
- /// The element at index 0 stands for the literal in the null clause. It is
- /// set to 0 and isn't used. Literals of clauses in the formula start from the
- /// element at index 1.
- ///
- /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
- /// `Clauses` will be `[0, L1, L2, L2, L3, L4]`.
- std::vector<Literal> Clauses;
+class WatchedLiteralsSolverImpl {
+ /// Stores the variable identifier and Atom for atomic booleans in the
+ /// formula.
+ llvm::DenseMap<Variable, Atom> Atomics;
- /// Start indices of clauses of the formula in `Clauses`.
- ///
- /// The element at index 0 stands for the start index of the null clause. It
- /// is set to 0 and isn't used. Start indices of clauses in the formula start
- /// from the element at index 1.
- ///
- /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
- /// `ClauseStarts` will be `[0, 1, 3]`. Note that the literals of the first
- /// clause always start at index 1. The start index for the literals of the
- /// second clause depends on the size of the first clause and so on.
- std::vector<size_t> ClauseStarts;
+ /// A boolean formula in conjunctive normal form that the solver will attempt
+ /// to prove satisfiable. The formula will be modified in the process.
+ CNFFormula CNF;
/// Maps literals (indices of the vector) to clause identifiers (elements of
/// the vector) that watch the respective literals.
@@ -127,328 +53,6 @@ struct CNFFormula {
/// clauses in the formula start from the element at index 1.
std::vector<ClauseID> NextWatched;
- /// Stores the variable identifier and Atom for atomic booleans in the
- /// formula.
- llvm::DenseMap<Variable, Atom> Atomics;
-
- /// Indicates that we already know the formula is unsatisfiable.
- /// During construction, we catch simple cases of conflicting unit-clauses.
- bool KnownContradictory;
-
- explicit CNFFormula(Variable LargestVar,
- llvm::DenseMap<Variable, Atom> Atomics)
- : LargestVar(LargestVar), Atomics(std::move(Atomics)),
- KnownContradictory(false) {
- Clauses.push_back(0);
- ClauseStarts.push_back(0);
- NextWatched.push_back(0);
- const size_t NumLiterals = 2 * LargestVar + 1;
- WatchedHead.resize(NumLiterals + 1, 0);
- }
-
- /// Adds the `L1 v ... v Ln` clause to the formula.
- /// Requirements:
- ///
- /// `Li` must not be `NullLit`.
- ///
- /// All literals in the input that are not `NullLit` must be distinct.
- void addClause(ArrayRef<Literal> lits) {
- assert(!lits.empty());
- assert(llvm::all_of(lits, [](Literal L) { return L != NullLit; }));
-
- const ClauseID C = ClauseStarts.size();
- const size_t S = Clauses.size();
- ClauseStarts.push_back(S);
- Clauses.insert(Clauses.end(), lits.begin(), lits.end());
-
- // Designate the first literal as the "watched" literal of the clause.
- NextWatched.push_back(WatchedHead[lits.front()]);
- WatchedHead[lits.front()] = C;
- }
-
- /// Returns the number of literals in clause `C`.
- size_t clauseSize(ClauseID C) const {
- return C == ClauseStarts.size() - 1 ? Clauses.size() - ClauseStarts[C]
- : ClauseStarts[C + 1] - ClauseStarts[C];
- }
-
- /// Returns the literals of clause `C`.
- llvm::ArrayRef<Literal> clauseLiterals(ClauseID C) const {
- return llvm::ArrayRef<Literal>(&Clauses[ClauseStarts[C]], clauseSize(C));
- }
-};
-
-/// Applies simplifications while building up a BooleanFormula.
-/// We keep track of unit clauses, which tell us variables that must be
-/// true/false in any model that satisfies the overall formula.
-/// Such variables can be dropped from subsequently-added clauses, which
-/// may in turn yield more unit clauses or even a contradiction.
-/// The total added complexity of this preprocessing is O(N) where we
-/// for every clause, we do a lookup for each unit clauses.
-/// The lookup is O(1) on average. This method won't catch all
-/// contradictory formulas, more passes can in principle catch
-/// more cases but we leave all these and the general case to the
-/// proper SAT solver.
-struct CNFFormulaBuilder {
- // Formula should outlive CNFFormulaBuilder.
- explicit CNFFormulaBuilder(CNFFormula &CNF)
- : Formula(CNF) {}
-
- /// Adds the `L1 v ... v Ln` clause to the formula. Applies
- /// simplifications, based on single-literal clauses.
- ///
- /// Requirements:
- ///
- /// `Li` must not be `NullLit`.
- ///
- /// All literals must be distinct.
- void addClause(ArrayRef<Literal> Literals) {
- // We generate clauses with up to 3 literals in this file.
- assert(!Literals.empty() && Literals.size() <= 3);
- // Contains literals of the simplified clause.
- llvm::SmallVector<Literal> Simplified;
- for (auto L : Literals) {
- assert(L != NullLit &&
- llvm::all_of(Simplified,
- [L](Literal S) { return S != L; }));
- auto X = var(L);
- if (trueVars.contains(X)) { // X must be true
- if (isPosLit(L))
- return; // Omit clause `(... v X v ...)`, it is `true`.
- else
- continue; // Omit `!X` from `(... v !X v ...)`.
- }
- if (falseVars.contains(X)) { // X must be false
- if (isNegLit(L))
- return; // Omit clause `(... v !X v ...)`, it is `true`.
- else
- continue; // Omit `X` from `(... v X v ...)`.
- }
- Simplified.push_back(L);
- }
- if (Simplified.empty()) {
- // Simplification made the clause empty, which is equivalent to `false`.
- // We already know that this formula is unsatisfiable.
- Formula.KnownContradictory = true;
- // We can add any of the input literals to get an unsatisfiable formula.
- Formula.addClause(Literals[0]);
- return;
- }
- if (Simplified.size() == 1) {
- // We have new unit clause.
- const Literal lit = Simplified.front();
- const Variable v = var(lit);
- if (isPosLit(lit))
- trueVars.insert(v);
- else
- falseVars.insert(v);
- }
- Formula.addClause(Simplified);
- }
-
- /// Returns true if we observed a contradiction while adding clauses.
- /// In this case then the formula is already known to be unsatisfiable.
- bool isKnownContradictory() { return Formula.KnownContradictory; }
-
-private:
- CNFFormula &Formula;
- llvm::DenseSet<Variable> trueVars;
- llvm::DenseSet<Variable> falseVars;
-};
-
-/// Converts the conjunction of `Vals` into a formula in conjunctive normal
-/// form where each clause has at least one and at most three literals.
-CNFFormula buildCNF(const llvm::ArrayRef<const Formula *> &Vals) {
- // The general strategy of the algorithm implemented below is to map each
- // of the sub-values in `Vals` to a unique variable and use these variables in
- // the resulting CNF expression to avoid exponential blow up. The number of
- // literals in the resulting formula is guaranteed to be linear in the number
- // of sub-formulas in `Vals`.
-
- // Map each sub-formula in `Vals` to a unique variable.
- llvm::DenseMap<const Formula *, Variable> SubValsToVar;
- // Store variable identifiers and Atom of atomic booleans.
- llvm::DenseMap<Variable, Atom> Atomics;
- Variable NextVar = 1;
- {
- std::queue<const Formula *> UnprocessedSubVals;
- for (const Formula *Val : Vals)
- UnprocessedSubVals.push(Val);
- while (!UnprocessedSubVals.empty()) {
- Variable Var = NextVar;
- const Formula *Val = UnprocessedSubVals.front();
- UnprocessedSubVals.pop();
-
- if (!SubValsToVar.try_emplace(Val, Var).second)
- continue;
- ++NextVar;
-
- for (const Formula *F : Val->operands())
- UnprocessedSubVals.push(F);
- if (Val->kind() == Formula::AtomRef)
- Atomics[Var] = Val->getAtom();
- }
- }
-
- auto GetVar = [&SubValsToVar](const Formula *Val) {
- auto ValIt = SubValsToVar.find(Val);
- assert(ValIt != SubValsToVar.end());
- return ValIt->second;
- };
-
- CNFFormula CNF(NextVar - 1, std::move(Atomics));
- std::vector<bool> ProcessedSubVals(NextVar, false);
- CNFFormulaBuilder builder(CNF);
-
- // Add a conjunct for each variable that represents a top-level conjunction
- // value in `Vals`.
- for (const Formula *Val : Vals)
- builder.addClause(posLit(GetVar(Val)));
-
- // Add conjuncts that represent the mapping between newly-created variables
- // and their corresponding sub-formulas.
- std::queue<const Formula *> UnprocessedSubVals;
- for (const Formula *Val : Vals)
- UnprocessedSubVals.push(Val);
- while (!UnprocessedSubVals.empty()) {
- const Formula *Val = UnprocessedSubVals.front();
- UnprocessedSubVals.pop();
- const Variable Var = GetVar(Val);
-
- if (ProcessedSubVals[Var])
- continue;
- ProcessedSubVals[Var] = true;
-
- switch (Val->kind()) {
- case Formula::AtomRef:
- break;
- case Formula::Literal:
- CNF.addClause(Val->literal() ? posLit(Var) : negLit(Var));
- break;
- case Formula::And: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- if (LHS == RHS) {
- // `X <=> (A ^ A)` is equivalent to `(!X v A) ^ (X v !A)` which is
- // already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS)});
- builder.addClause({posLit(Var), negLit(LHS)});
- } else {
- // `X <=> (A ^ B)` is equivalent to `(!X v A) ^ (!X v B) ^ (X v !A v
- // !B)` which is already in conjunctive normal form. Below we add each
- // of the conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS)});
- builder.addClause({negLit(Var), posLit(RHS)});
- builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
- }
- break;
- }
- case Formula::Or: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- if (LHS == RHS) {
- // `X <=> (A v A)` is equivalent to `(!X v A) ^ (X v !A)` which is
- // already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS)});
- builder.addClause({posLit(Var), negLit(LHS)});
- } else {
- // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v
- // !B)` which is already in conjunctive normal form. Below we add each
- // of the conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS), posLit(RHS)});
- builder.addClause({posLit(Var), negLit(LHS)});
- builder.addClause({posLit(Var), negLit(RHS)});
- }
- break;
- }
- case Formula::Not: {
- const Variable Operand = GetVar(Val->operands()[0]);
-
- // `X <=> !Y` is equivalent to `(!X v !Y) ^ (X v Y)` which is
- // already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), negLit(Operand)});
- builder.addClause({posLit(Var), posLit(Operand)});
- break;
- }
- case Formula::Implies: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- // `X <=> (A => B)` is equivalent to
- // `(X v A) ^ (X v !B) ^ (!X v !A v B)` which is already in
- // conjunctive normal form. Below we add each of the conjuncts of
- // the latter expression to the result.
- builder.addClause({posLit(Var), posLit(LHS)});
- builder.addClause({posLit(Var), negLit(RHS)});
- builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
- break;
- }
- case Formula::Equal: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- if (LHS == RHS) {
- // `X <=> (A <=> A)` is equivalent to `X` which is already in
- // conjunctive normal form. Below we add each of the conjuncts of the
- // latter expression to the result.
- builder.addClause(posLit(Var));
-
- // No need to visit the sub-values of `Val`.
- continue;
- }
- // `X <=> (A <=> B)` is equivalent to
- // `(X v A v B) ^ (X v !A v !B) ^ (!X v A v !B) ^ (!X v !A v B)` which
- // is already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({posLit(Var), posLit(LHS), posLit(RHS)});
- builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
- builder.addClause({negLit(Var), posLit(LHS), negLit(RHS)});
- builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
- break;
- }
- }
- if (builder.isKnownContradictory()) {
- return CNF;
- }
- for (const Formula *Child : Val->operands())
- UnprocessedSubVals.push(Child);
- }
-
- // Unit clauses that were added later were not
- // considered for the simplification of earlier clauses. Do a final
- // pass to find more opportunities for simplification.
- CNFFormula FinalCNF(NextVar - 1, std::move(CNF.Atomics));
- CNFFormulaBuilder FinalBuilder(FinalCNF);
-
- // Collect unit clauses.
- for (ClauseID C = 1; C < CNF.ClauseStarts.size(); ++C) {
- if (CNF.clauseSize(C) == 1) {
- FinalBuilder.addClause(CNF.clauseLiterals(C)[0]);
- }
- }
-
- // Add all clauses that were added previously, preserving the order.
- for (ClauseID C = 1; C < CNF.ClauseStarts.size(); ++C) {
- FinalBuilder.addClause(CNF.clauseLiterals(C));
- if (FinalBuilder.isKnownContradictory()) {
- break;
- }
- }
- // It is possible there were new unit clauses again, but
- // we stop here and leave the rest to the solver algorithm.
- return FinalCNF;
-}
-
-class WatchedLiteralsSolverImpl {
- /// A boolean formula in conjunctive normal form that the solver will attempt
- /// to prove satisfiable. The formula will be modified in the process.
- CNFFormula CNF;
-
/// The search for a satisfying assignment of the variables in `Formula` will
/// proceed in levels, starting from 1 and going up to `Formula.LargestVar`
/// (inclusive). The current level is stored in `Level`. At each level the
@@ -501,20 +105,37 @@ class WatchedLiteralsSolverImpl {
public:
explicit WatchedLiteralsSolverImpl(
const llvm::ArrayRef<const Formula *> &Vals)
- : CNF(buildCNF(Vals)), LevelVars(CNF.LargestVar + 1),
- LevelStates(CNF.LargestVar + 1) {
+ // `Atomics` needs to be initialized first so that we can use it as an
+ // output argument of `buildCNF()`.
+ : Atomics(), CNF(buildCNF(Vals, Atomics)),
+ LevelVars(CNF.largestVar() + 1), LevelStates(CNF.largestVar() + 1) {
assert(!Vals.empty());
+ // Skip initialization if the formula is known to be contradictory.
+ if (CNF.knownContradictory())
+ return;
+
+ // Initialize `NextWatched` and `WatchedHead`.
+ NextWatched.push_back(0);
+ const size_t NumLiterals = 2 * CNF.largestVar() + 1;
+ WatchedHead.resize(NumLiterals + 1, 0);
+ for (ClauseID C = 1; C <= CNF.numClauses(); ++C) {
+ // Designate the first literal as the "watched" literal of the clause.
+ Literal FirstLit = CNF.clauseLiterals(C).front();
+ NextWatched.push_back(WatchedHead[FirstLit]);
+ WatchedHead[FirstLit] = C;
+ }
+
// Initialize the state at the root level to a decision so that in
// `reverseForcedMoves` we don't have to check that `Level >= 0` on each
// iteration.
LevelStates[0] = State::Decision;
// Initialize all variables as unassigned.
- VarAssignments.resize(CNF.LargestVar + 1, Assignment::Unassigned);
+ VarAssignments.resize(CNF.largestVar() + 1, Assignment::Unassigned);
// Initialize the active variables.
- for (Variable Var = CNF.LargestVar; Var != NullVar; --Var) {
+ for (Variable Var = CNF.largestVar(); Var != NullVar; --Var) {
if (isWatched(posLit(Var)) || isWatched(negLit(Var)))
ActiveVars.push_back(Var);
}
@@ -523,7 +144,7 @@ public:
// Returns the `Result` and the number of iterations "remaining" from
// `MaxIterations` (that is, `MaxIterations` - iterations in this call).
std::pair<Solver::Result, std::int64_t> solve(std::int64_t MaxIterations) && {
- if (CNF.KnownContradictory) {
+ if (CNF.knownContradictory()) {
// Short-cut the solving process. We already found out at CNF
// construction time that the formula is unsatisfiable.
return std::make_pair(Solver::Result::Unsatisfiable(), MaxIterations);
@@ -625,7 +246,7 @@ private:
/// Returns a satisfying truth assignment to the atoms in the boolean formula.
llvm::DenseMap<Atom, Solver::Result::Assignment> buildSolution() {
llvm::DenseMap<Atom, Solver::Result::Assignment> Solution;
- for (auto &Atomic : CNF.Atomics) {
+ for (auto &Atomic : Atomics) {
// A variable may have a definite true/false assignment, or it may be
// unassigned indicating its truth value does not affect the result of
// the formula. Unassigned variables are assigned to true as a default.
@@ -661,24 +282,25 @@ private:
const Literal FalseLit = VarAssignments[Var] == Assignment::AssignedTrue
? negLit(Var)
: posLit(Var);
- ClauseID FalseLitWatcher = CNF.WatchedHead[FalseLit];
- CNF.WatchedHead[FalseLit] = NullClause;
+ ClauseID FalseLitWatcher = WatchedHead[FalseLit];
+ WatchedHead[FalseLit] = NullClause;
while (FalseLitWatcher != NullClause) {
- const ClauseID NextFalseLitWatcher = CNF.NextWatched[FalseLitWatcher];
+ const ClauseID NextFalseLitWatcher = NextWatched[FalseLitWatcher];
// Pick the first non-false literal as the new watched literal.
- const size_t FalseLitWatcherStart = CNF.ClauseStarts[FalseLitWatcher];
- size_t NewWatchedLitIdx = FalseLitWatcherStart + 1;
- while (isCurrentlyFalse(CNF.Clauses[NewWatchedLitIdx]))
- ++NewWatchedLitIdx;
- const Literal NewWatchedLit = CNF.Clauses[NewWatchedLitIdx];
+ const CNFFormula::Iterator FalseLitWatcherStart =
+ CNF.startOfClause(FalseLitWatcher);
+ CNFFormula::Iterator NewWatchedLitIter = FalseLitWatcherStart.next();
+ while (isCurrentlyFalse(*NewWatchedLitIter))
+ ++NewWatchedLitIter;
+ const Literal NewWatchedLit = *NewWatchedLitIter;
const Variable NewWatchedLitVar = var(NewWatchedLit);
// Swap the old watched literal for the new one in `FalseLitWatcher` to
// maintain the invariant that the watched literal is at the beginning of
// the clause.
- CNF.Clauses[NewWatchedLitIdx] = FalseLit;
- CNF.Clauses[FalseLitWatcherStart] = NewWatchedLit;
+ *NewWatchedLitIter = FalseLit;
+ *FalseLitWatcherStart = NewWatchedLit;
// If the new watched literal isn't watched by any other clause and its
// variable isn't assigned we need to add it to the active variables.
@@ -686,8 +308,8 @@ private:
VarAssignments[NewWatchedLitVar] == Assignment::Unassigned)
ActiveVars.push_back(NewWatchedLitVar);
- CNF.NextWatched[FalseLitWatcher] = CNF.WatchedHead[NewWatchedLit];
- CNF.WatchedHead[NewWatchedLit] = FalseLitWatcher;
+ NextWatched[FalseLitWatcher] = WatchedHead[NewWatchedLit];
+ WatchedHead[NewWatchedLit] = FalseLitWatcher;
// Go to the next clause that watches `FalseLit`.
FalseLitWatcher = NextFalseLitWatcher;
@@ -697,8 +319,8 @@ private:
/// Returns true if and only if one of the clauses that watch `Lit` is a unit
/// clause.
bool watchedByUnitClause(Literal Lit) const {
- for (ClauseID LitWatcher = CNF.WatchedHead[Lit]; LitWatcher != NullClause;
- LitWatcher = CNF.NextWatched[LitWatcher]) {
+ for (ClauseID LitWatcher = WatchedHead[Lit]; LitWatcher != NullClause;
+ LitWatcher = NextWatched[LitWatcher]) {
llvm::ArrayRef<Literal> Clause = CNF.clauseLiterals(LitWatcher);
// Assert the invariant that the watched literal is always the first one
@@ -728,9 +350,7 @@ private:
}
/// Returns true if and only if `Lit` is watched by a clause in `Formula`.
- bool isWatched(Literal Lit) const {
- return CNF.WatchedHead[Lit] != NullClause;
- }
+ bool isWatched(Literal Lit) const { return WatchedHead[Lit] != NullClause; }
/// Returns an assignment for an unassigned variable.
Assignment decideAssignment(Variable Var) const {
@@ -742,8 +362,8 @@ private:
/// Returns a set of all watched literals.
llvm::DenseSet<Literal> watchedLiterals() const {
llvm::DenseSet<Literal> WatchedLiterals;
- for (Literal Lit = 2; Lit < CNF.WatchedHead.size(); Lit++) {
- if (CNF.WatchedHead[Lit] == NullClause)
+ for (Literal Lit = 2; Lit < WatchedHead.size(); Lit++) {
+ if (WatchedHead[Lit] == NullClause)
continue;
WatchedLiterals.insert(Lit);
}
@@ -783,6 +403,8 @@ private:
}
};
+} // namespace
+
Solver::Result
WatchedLiteralsSolver::solve(llvm::ArrayRef<const Formula *> Vals) {
if (Vals.empty())
diff --git a/clang/lib/Basic/FileManager.cpp b/clang/lib/Basic/FileManager.cpp
index 143c04309d07..1dc51deb8298 100644
--- a/clang/lib/Basic/FileManager.cpp
+++ b/clang/lib/Basic/FileManager.cpp
@@ -82,6 +82,22 @@ getDirectoryFromFile(FileManager &FileMgr, StringRef Filename,
return FileMgr.getDirectoryRef(DirName, CacheFailure);
}
+DirectoryEntry *&FileManager::getRealDirEntry(const llvm::vfs::Status &Status) {
+ assert(Status.isDirectory() && "The directory should exist!");
+ // See if we have already opened a directory with the
+ // same inode (this occurs on Unix-like systems when one dir is
+ // symlinked to another, for example) or the same path (on
+ // Windows).
+ DirectoryEntry *&UDE = UniqueRealDirs[Status.getUniqueID()];
+
+ if (!UDE) {
+ // We don't have this directory yet, add it. We use the string
+ // key from the SeenDirEntries map as the string.
+ UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
+ }
+ return UDE;
+}
+
/// Add all ancestors of the given path (pointing to either a file or
/// a directory) as virtual directories.
void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
@@ -99,10 +115,21 @@ void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
if (NamedDirEnt.second)
return;
- // Add the virtual directory to the cache.
- auto *UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
- NamedDirEnt.second = *UDE;
- VirtualDirectoryEntries.push_back(UDE);
+ // Check to see if the directory exists.
+ llvm::vfs::Status Status;
+ auto statError =
+ getStatValue(DirName, Status, false, nullptr /*directory lookup*/);
+ if (statError) {
+ // There's no real directory at the given path.
+ // Add the virtual directory to the cache.
+ auto *UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
+ NamedDirEnt.second = *UDE;
+ VirtualDirectoryEntries.push_back(UDE);
+ } else {
+ // There is the real directory
+ DirectoryEntry *&UDE = getRealDirEntry(Status);
+ NamedDirEnt.second = *UDE;
+ }
// Recursively add the other ancestors.
addAncestorsAsVirtualDirs(DirName);
@@ -162,17 +189,8 @@ FileManager::getDirectoryRef(StringRef DirName, bool CacheFailure) {
return llvm::errorCodeToError(statError);
}
- // It exists. See if we have already opened a directory with the
- // same inode (this occurs on Unix-like systems when one dir is
- // symlinked to another, for example) or the same path (on
- // Windows).
- DirectoryEntry *&UDE = UniqueRealDirs[Status.getUniqueID()];
-
- if (!UDE) {
- // We don't have this directory yet, add it. We use the string
- // key from the SeenDirEntries map as the string.
- UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
- }
+ // It exists.
+ DirectoryEntry *&UDE = getRealDirEntry(Status);
NamedDirEnt.second = *UDE;
return DirectoryEntryRef(NamedDirEnt);
diff --git a/clang/lib/Basic/Targets/Mips.cpp b/clang/lib/Basic/Targets/Mips.cpp
index 3a65f53c5248..174bc9d2ab99 100644
--- a/clang/lib/Basic/Targets/Mips.cpp
+++ b/clang/lib/Basic/Targets/Mips.cpp
@@ -273,6 +273,34 @@ bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
Diags.Report(diag::err_mips_fp64_req) << "-mfp64";
return false;
}
+ // FPXX requires mips2+
+ if (FPMode == FPXX && CPU == "mips1") {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfpxx" << CPU;
+ return false;
+ }
+ // -mmsa with -msoft-float makes nonsense
+ if (FloatABI == SoftFloat && HasMSA) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-msoft-float"
+ << "-mmsa";
+ return false;
+ }
+ // Option -mmsa permitted on Mips32 iff revision 2 or higher is present
+ if (HasMSA && (CPU == "mips1" || CPU == "mips2" || getISARev() < 2) &&
+ ABI == "o32") {
+ Diags.Report(diag::err_mips_fp64_req) << "-mmsa";
+ return false;
+ }
+ // MSA requires FP64
+ if (FPMode == FPXX && HasMSA) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfpxx"
+ << "-mmsa";
+ return false;
+ }
+ if (FPMode == FP32 && HasMSA) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfp32"
+ << "-mmsa";
+ return false;
+ }
return true;
}
diff --git a/clang/lib/Basic/Targets/Mips.h b/clang/lib/Basic/Targets/Mips.h
index 730deb674aa5..b6f110249fa7 100644
--- a/clang/lib/Basic/Targets/Mips.h
+++ b/clang/lib/Basic/Targets/Mips.h
@@ -85,8 +85,13 @@ public:
return CPU == "mips32r6" || CPU == "mips64r6";
}
- bool isFP64Default() const {
- return CPU == "mips32r6" || ABI == "n32" || ABI == "n64" || ABI == "64";
+ enum FPModeEnum getDefaultFPMode() const {
+ if (CPU == "mips32r6" || ABI == "n32" || ABI == "n64" || ABI == "64")
+ return FP64;
+ else if (CPU == "mips1")
+ return FP32;
+ else
+ return FPXX;
}
bool isNan2008() const override { return IsNan2008; }
@@ -315,10 +320,11 @@ public:
IsSingleFloat = false;
FloatABI = HardFloat;
DspRev = NoDSP;
- FPMode = isFP64Default() ? FP64 : FPXX;
NoOddSpreg = false;
+ FPMode = getDefaultFPMode();
bool OddSpregGiven = false;
bool StrictAlign = false;
+ bool FpGiven = false;
for (const auto &Feature : Features) {
if (Feature == "+single-float")
@@ -343,13 +349,16 @@ public:
HasMSA = true;
else if (Feature == "+nomadd4")
DisableMadd4 = true;
- else if (Feature == "+fp64")
+ else if (Feature == "+fp64") {
FPMode = FP64;
- else if (Feature == "-fp64")
+ FpGiven = true;
+ } else if (Feature == "-fp64") {
FPMode = FP32;
- else if (Feature == "+fpxx")
+ FpGiven = true;
+ } else if (Feature == "+fpxx") {
FPMode = FPXX;
- else if (Feature == "+nan2008")
+ FpGiven = true;
+ } else if (Feature == "+nan2008")
IsNan2008 = true;
else if (Feature == "-nan2008")
IsNan2008 = false;
@@ -376,6 +385,11 @@ public:
if (StrictAlign)
HasUnalignedAccess = false;
+ if (HasMSA && !FpGiven) {
+ FPMode = FP64;
+ Features.push_back("+fp64");
+ }
+
setDataLayout();
return true;
diff --git a/clang/lib/Basic/Targets/WebAssembly.h b/clang/lib/Basic/Targets/WebAssembly.h
index 4db97867df60..e4a449d1ff30 100644
--- a/clang/lib/Basic/Targets/WebAssembly.h
+++ b/clang/lib/Basic/Targets/WebAssembly.h
@@ -90,6 +90,9 @@ public:
StringRef getABI() const override;
bool setABI(const std::string &Name) override;
+ bool useFP16ConversionIntrinsics() const override {
+ return !HasHalfPrecision;
+ }
protected:
void getTargetDefines(const LangOptions &Opts,
diff --git a/clang/lib/Basic/Targets/X86.cpp b/clang/lib/Basic/Targets/X86.cpp
index b823eaf6ce33..3a30cff917bb 100644
--- a/clang/lib/Basic/Targets/X86.cpp
+++ b/clang/lib/Basic/Targets/X86.cpp
@@ -310,15 +310,9 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAVX512VNNI = true;
} else if (Feature == "+avx512bf16") {
HasAVX512BF16 = true;
- } else if (Feature == "+avx512er") {
- HasAVX512ER = true;
- Diags.Report(diag::warn_knl_knm_isa_support_removed);
} else if (Feature == "+avx512fp16") {
HasAVX512FP16 = true;
HasLegalHalfType = true;
- } else if (Feature == "+avx512pf") {
- HasAVX512PF = true;
- Diags.Report(diag::warn_knl_knm_isa_support_removed);
} else if (Feature == "+avx512dq") {
HasAVX512DQ = true;
} else if (Feature == "+avx512bitalg") {
@@ -375,9 +369,6 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasWBNOINVD = true;
} else if (Feature == "+prefetchi") {
HasPREFETCHI = true;
- } else if (Feature == "+prefetchwt1") {
- HasPREFETCHWT1 = true;
- Diags.Report(diag::warn_knl_knm_isa_support_removed);
} else if (Feature == "+clzero") {
HasCLZERO = true;
} else if (Feature == "+cldemote") {
@@ -840,12 +831,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVX512VNNI__");
if (HasAVX512BF16)
Builder.defineMacro("__AVX512BF16__");
- if (HasAVX512ER)
- Builder.defineMacro("__AVX512ER__");
if (HasAVX512FP16)
Builder.defineMacro("__AVX512FP16__");
- if (HasAVX512PF)
- Builder.defineMacro("__AVX512PF__");
if (HasAVX512DQ)
Builder.defineMacro("__AVX512DQ__");
if (HasAVX512BITALG)
@@ -897,8 +884,6 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__SM4__");
if (HasPREFETCHI)
Builder.defineMacro("__PREFETCHI__");
- if (HasPREFETCHWT1)
- Builder.defineMacro("__PREFETCHWT1__");
if (HasCLZERO)
Builder.defineMacro("__CLZERO__");
if (HasKL)
@@ -1084,9 +1069,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("avx512vpopcntdq", true)
.Case("avx512vnni", true)
.Case("avx512bf16", true)
- .Case("avx512er", true)
.Case("avx512fp16", true)
- .Case("avx512pf", true)
.Case("avx512dq", true)
.Case("avx512bitalg", true)
.Case("avx512bw", true)
@@ -1134,7 +1117,6 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("pku", true)
.Case("popcnt", true)
.Case("prefetchi", true)
- .Case("prefetchwt1", true)
.Case("prfchw", true)
.Case("ptwrite", true)
.Case("raoint", true)
@@ -1201,9 +1183,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("avx512vpopcntdq", HasAVX512VPOPCNTDQ)
.Case("avx512vnni", HasAVX512VNNI)
.Case("avx512bf16", HasAVX512BF16)
- .Case("avx512er", HasAVX512ER)
.Case("avx512fp16", HasAVX512FP16)
- .Case("avx512pf", HasAVX512PF)
.Case("avx512dq", HasAVX512DQ)
.Case("avx512bitalg", HasAVX512BITALG)
.Case("avx512bw", HasAVX512BW)
@@ -1253,7 +1233,6 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("pku", HasPKU)
.Case("popcnt", HasPOPCNT)
.Case("prefetchi", HasPREFETCHI)
- .Case("prefetchwt1", HasPREFETCHWT1)
.Case("prfchw", HasPRFCHW)
.Case("ptwrite", HasPTWRITE)
.Case("raoint", HasRAOINT)
diff --git a/clang/lib/Basic/Targets/X86.h b/clang/lib/Basic/Targets/X86.h
index 6a0a6cb84203..0633b7e0da96 100644
--- a/clang/lib/Basic/Targets/X86.h
+++ b/clang/lib/Basic/Targets/X86.h
@@ -103,8 +103,6 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasAVX512VNNI = false;
bool HasAVX512FP16 = false;
bool HasAVX512BF16 = false;
- bool HasAVX512ER = false;
- bool HasAVX512PF = false;
bool HasAVX512DQ = false;
bool HasAVX512BITALG = false;
bool HasAVX512BW = false;
@@ -136,7 +134,6 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasCLWB = false;
bool HasMOVBE = false;
bool HasPREFETCHI = false;
- bool HasPREFETCHWT1 = false;
bool HasRDPID = false;
bool HasRDPRU = false;
bool HasRetpolineExternalThunk = false;
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index 07452b18a85e..fbf942d06ca6 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -150,7 +150,7 @@ namespace {
Address getAtomicAddress() const {
llvm::Type *ElTy;
if (LVal.isSimple())
- ElTy = LVal.getAddress(CGF).getElementType();
+ ElTy = LVal.getAddress().getElementType();
else if (LVal.isBitField())
ElTy = LVal.getBitFieldAddress().getElementType();
else if (LVal.isVectorElt())
@@ -363,7 +363,7 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
assert(LVal.isSimple());
- Address addr = LVal.getAddress(CGF);
+ Address addr = LVal.getAddress();
if (!requiresMemSetZero(addr.getElementType()))
return false;
@@ -1603,7 +1603,7 @@ Address AtomicInfo::materializeRValue(RValue rvalue) const {
LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
AtomicInfo Atomics(CGF, TempLV);
Atomics.emitCopyIntoMemory(rvalue);
- return TempLV.getAddress(CGF);
+ return TempLV.getAddress();
}
llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal) const {
@@ -1951,7 +1951,7 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
// maybe for address-space qualification.
assert(!rvalue.isAggregate() ||
rvalue.getAggregateAddress().getElementType() ==
- dest.getAddress(*this).getElementType());
+ dest.getAddress().getElementType());
AtomicInfo atomics(*this, dest);
LValue LVal = atomics.getAtomicLValue();
@@ -2024,10 +2024,10 @@ std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
// maybe for address-space qualification.
assert(!Expected.isAggregate() ||
Expected.getAggregateAddress().getElementType() ==
- Obj.getAddress(*this).getElementType());
+ Obj.getAddress().getElementType());
assert(!Desired.isAggregate() ||
Desired.getAggregateAddress().getElementType() ==
- Obj.getAddress(*this).getElementType());
+ Obj.getAddress().getElementType());
AtomicInfo Atomics(*this, Obj);
return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
@@ -2068,7 +2068,7 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
// Evaluate the expression directly into the destination.
AggValueSlot slot = AggValueSlot::forLValue(
- dest, *this, AggValueSlot::IsNotDestructed,
+ dest, AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap,
Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 2742c39965b2..bf50f2025de5 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -927,7 +927,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
/*RefersToEnclosingVariableOrCapture*/ CI.isNested(),
type.getNonReferenceType(), VK_LValue,
SourceLocation());
- src = EmitDeclRefLValue(&declRef).getAddress(*this);
+ src = EmitDeclRefLValue(&declRef).getAddress();
};
// For byrefs, we just write the pointer to the byref struct into
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index e251091c6ce3..5edf8c797091 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -57,6 +57,7 @@
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/MatrixBuilder.h"
+#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ScopedPrinter.h"
@@ -5609,8 +5610,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
- llvm::Value *Range = NDRangeL.getAddress(*this).emitRawPointer(*this);
- llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
+ llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
+ llvm::Type *RangeTy = NDRangeL.getAddress().getType();
if (NumArgs == 4) {
// The most basic form of the call with parameters:
@@ -5629,7 +5630,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
AttrBuilder B(Builder.getContext());
- B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
+ B.addByValAttr(NDRangeL.getAddress().getElementType());
llvm::AttributeList ByValAttrSet =
llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
@@ -5817,7 +5818,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
- llvm::Value *NDRange = NDRangeL.getAddress(*this).emitRawPointer(*this);
+ llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
Value *Kernel =
@@ -18327,6 +18328,29 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
return nullptr;
}
+void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
+ const CallExpr *E) {
+ constexpr const char *Tag = "amdgpu-as";
+
+ LLVMContext &Ctx = Inst->getContext();
+ SmallVector<MMRAMetadata::TagT, 3> MMRAs;
+ for (unsigned K = 2; K < E->getNumArgs(); ++K) {
+ llvm::Value *V = EmitScalarExpr(E->getArg(K));
+ StringRef AS;
+ if (llvm::getConstantStringInfo(V, AS)) {
+ MMRAs.push_back({Tag, AS});
+ // TODO: Delete the resulting unused constant?
+ continue;
+ }
+ CGM.Error(E->getExprLoc(),
+ "expected an address space name as a string literal");
+ }
+
+ llvm::sort(MMRAs);
+ MMRAs.erase(llvm::unique(MMRAs), MMRAs.end());
+ Inst->setMetadata(LLVMContext::MD_mmra, MMRAMetadata::getMD(Ctx, MMRAs));
+}
+
Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
@@ -18997,7 +19021,10 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_fence: {
ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(1)), AO, SSID);
- return Builder.CreateFence(AO, SSID);
+ FenceInst *Fence = Builder.CreateFence(AO, SSID);
+ if (E->getNumArgs() > 2)
+ AddAMDGPUFenceAddressSpaceMMRA(Fence, E);
+ return Fence;
}
case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
@@ -21230,6 +21257,17 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_storef16_f32);
return Builder.CreateCall(Callee, {Val, Addr});
}
+ case WebAssembly::BI__builtin_wasm_splat_f16x8: {
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_splat_f16x8);
+ return Builder.CreateCall(Callee, {Val});
+ }
+ case WebAssembly::BI__builtin_wasm_extract_lane_f16x8: {
+ Value *Vector = EmitScalarExpr(E->getArg(0));
+ Value *Index = EmitScalarExpr(E->getArg(1));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_extract_lane_f16x8);
+ return Builder.CreateCall(Callee, {Vector, Index});
+ }
case WebAssembly::BI__builtin_wasm_table_get: {
assert(E->getArg(0)->getType()->isArrayType());
Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
@@ -21592,7 +21630,7 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
// Handle aggregate argument, namely RVV tuple types in segment load/store
if (hasAggregateEvaluationKind(E->getArg(i)->getType())) {
LValue L = EmitAggExprToLValue(E->getArg(i));
- llvm::Value *AggValue = Builder.CreateLoad(L.getAddress(*this));
+ llvm::Value *AggValue = Builder.CreateLoad(L.getAddress());
Ops.push_back(AggValue);
continue;
}
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 1b4ca2a8b2fe..97449a5e51e7 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -1051,12 +1051,12 @@ void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
forConstantArrayExpansion(
- *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
+ *this, CAExp, LV.getAddress(), [&](Address EltAddr) {
LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
});
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- Address This = LV.getAddress(*this);
+ Address This = LV.getAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
Address Base =
@@ -1088,7 +1088,7 @@ void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
// pointer type they use (see D118744). Once clang uses opaque pointers
// all LLVM pointer types will be the same and we can remove this check.
if (Arg->getType()->isPointerTy()) {
- Address Addr = LV.getAddress(*this);
+ Address Addr = LV.getAddress();
Arg = Builder.CreateBitCast(Arg, Addr.getElementType());
}
EmitStoreOfScalar(Arg, LV);
@@ -1101,7 +1101,7 @@ void CodeGenFunction::ExpandTypeToArgs(
SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
- Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
+ Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
: Arg.getKnownRValue().getAggregateAddress();
forConstantArrayExpansion(
*this, CAExp, Addr, [&](Address EltAddr) {
@@ -1112,7 +1112,7 @@ void CodeGenFunction::ExpandTypeToArgs(
IRCallArgPos);
});
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
+ Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
: Arg.getKnownRValue().getAggregateAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
@@ -4136,7 +4136,7 @@ static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF) {
static void emitWriteback(CodeGenFunction &CGF,
const CallArgList::Writeback &writeback) {
const LValue &srcLV = writeback.Source;
- Address srcAddr = srcLV.getAddress(CGF);
+ Address srcAddr = srcLV.getAddress();
assert(!isProvablyNull(srcAddr.getBasePointer()) &&
"shouldn't have writeback for provably null argument");
@@ -4243,7 +4243,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
}
- Address srcAddr = srcLV.getAddress(CGF);
+ Address srcAddr = srcLV.getAddress();
// The dest and src types don't necessarily match in LLVM terms
// because of the crazy ObjC compatibility rules.
@@ -4649,7 +4649,7 @@ RValue CallArg::getRValue(CodeGenFunction &CGF) const {
CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
LV.isVolatile());
IsUsed = true;
- return RValue::getAggregate(Copy.getAddress(CGF));
+ return RValue::getAggregate(Copy.getAddress());
}
void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
@@ -4659,7 +4659,7 @@ void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
else if (!HasLV && RV.isComplex())
CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
else {
- auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
+ auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
// We assume that call args are never copied into subobjects.
CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
@@ -5147,7 +5147,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
if (I->isAggregate()) {
RawAddress Addr = I->hasLValue()
- ? I->getKnownLValue().getAddress(*this)
+ ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
llvm::Instruction *Placeholder =
cast<llvm::Instruction>(Addr.getPointer());
@@ -5192,18 +5192,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Indirect:
case ABIArgInfo::IndirectAliased: {
assert(NumIRArgs == 1);
- if (!I->isAggregate()) {
- // Make a temporary alloca to pass the argument.
- RawAddress Addr = CreateMemTempWithoutCast(
- I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
-
- llvm::Value *Val = getAsNaturalPointerTo(Addr, I->Ty);
- if (ArgHasMaybeUndefAttr)
- Val = Builder.CreateFreeze(Val);
- IRCallArgs[FirstIRArg] = Val;
-
- I->copyInto(*this, Addr);
- } else {
+ if (I->isAggregate()) {
// We want to avoid creating an unnecessary temporary+copy here;
// however, we need one in three cases:
// 1. If the argument is not byval, and we are required to copy the
@@ -5213,7 +5202,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// 3. If the argument is byval, but RV is not located in default
// or alloca address space.
Address Addr = I->hasLValue()
- ? I->getKnownLValue().getAddress(*this)
+ ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
CharUnits Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
@@ -5256,28 +5245,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
- if (NeedCopy) {
- // Create an aligned temporary, and copy to it.
- RawAddress AI = CreateMemTempWithoutCast(
- I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
- llvm::Value *Val = getAsNaturalPointerTo(AI, I->Ty);
- if (ArgHasMaybeUndefAttr)
- Val = Builder.CreateFreeze(Val);
- IRCallArgs[FirstIRArg] = Val;
-
- // Emit lifetime markers for the temporary alloca.
- llvm::TypeSize ByvalTempElementSize =
- CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
- llvm::Value *LifetimeSize =
- EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
-
- // Add cleanup code to emit the end lifetime marker after the call.
- if (LifetimeSize) // In case we disabled lifetime markers.
- CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
-
- // Generate the copy.
- I->copyInto(*this, AI);
- } else {
+ if (!NeedCopy) {
// Skip the extra memcpy call.
llvm::Value *V = getAsNaturalPointerTo(Addr, I->Ty);
auto *T = llvm::PointerType::get(
@@ -5289,8 +5257,31 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (ArgHasMaybeUndefAttr)
Val = Builder.CreateFreeze(Val);
IRCallArgs[FirstIRArg] = Val;
+ break;
}
}
+
+ // For non-aggregate args and aggregate args meeting conditions above
+ // we need to create an aligned temporary, and copy to it.
+ RawAddress AI = CreateMemTempWithoutCast(
+ I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
+ llvm::Value *Val = getAsNaturalPointerTo(AI, I->Ty);
+ if (ArgHasMaybeUndefAttr)
+ Val = Builder.CreateFreeze(Val);
+ IRCallArgs[FirstIRArg] = Val;
+
+ // Emit lifetime markers for the temporary alloca.
+ llvm::TypeSize ByvalTempElementSize =
+ CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
+ llvm::Value *LifetimeSize =
+ EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
+
+ // Add cleanup code to emit the end lifetime marker after the call.
+ if (LifetimeSize) // In case we disabled lifetime markers.
+ CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
+
+ // Generate the copy.
+ I->copyInto(*this, AI);
break;
}
@@ -5309,7 +5300,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
V = I->getKnownRValue().getScalarVal();
else
V = Builder.CreateLoad(
- I->hasLValue() ? I->getKnownLValue().getAddress(*this)
+ I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress());
// Implement swifterror by copying into a new swifterror argument.
@@ -5372,7 +5363,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Src = CreateMemTemp(I->Ty, "coerce");
I->copyInto(*this, Src);
} else {
- Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
+ Src = I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
}
@@ -5459,7 +5450,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Address addr = Address::invalid();
RawAddress AllocaAddr = RawAddress::invalid();
if (I->isAggregate()) {
- addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
+ addr = I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
} else {
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index b3077292f4a2..b8cb78266130 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -680,7 +680,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// the constructor.
QualType::DestructionKind dtorKind = FieldType.isDestructedType();
if (CGF.needsEHCleanup(dtorKind))
- CGF.pushEHDestroy(dtorKind, LHS.getAddress(CGF), FieldType);
+ CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
return;
}
}
@@ -705,9 +705,9 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
break;
case TEK_Aggregate: {
AggValueSlot Slot = AggValueSlot::forLValue(
- LHS, *this, AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
- getOverlapForFieldInit(Field), AggValueSlot::IsNotZeroed,
+ LHS, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased, getOverlapForFieldInit(Field),
+ AggValueSlot::IsNotZeroed,
// Checks are made by the code that calls constructor.
AggValueSlot::IsSanitizerChecked);
EmitAggExpr(Init, Slot);
@@ -719,7 +719,7 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
// later in the constructor.
QualType::DestructionKind dtorKind = FieldType.isDestructedType();
if (needsEHCleanup(dtorKind))
- pushEHDestroy(dtorKind, LHS.getAddress(*this), FieldType);
+ pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
}
/// Checks whether the given constructor is a valid subject for the
@@ -983,8 +983,8 @@ namespace {
LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
emitMemcpyIR(
- Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(CGF),
- Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(CGF),
+ Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(),
+ Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(),
MemcpySize);
reset();
}
@@ -1131,7 +1131,7 @@ namespace {
continue;
LValue FieldLHS = LHS;
EmitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS);
- CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(CGF), FieldType);
+ CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType);
}
}
@@ -1647,7 +1647,7 @@ namespace {
LValue LV = CGF.EmitLValueForField(ThisLV, field);
assert(LV.isSimple());
- CGF.emitDestroy(LV.getAddress(CGF), field->getType(), destroyer,
+ CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index 9cc67cdbe424..4a213990d1e3 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -738,18 +738,17 @@ static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
LValue srcLV = CGF.EmitLValue(srcExpr);
// Handle a formal type change to avoid asserting.
- auto srcAddr = srcLV.getAddress(CGF);
+ auto srcAddr = srcLV.getAddress();
if (needsCast) {
- srcAddr =
- srcAddr.withElementType(destLV.getAddress(CGF).getElementType());
+ srcAddr = srcAddr.withElementType(destLV.getAddress().getElementType());
}
// If it was an l-value, use objc_copyWeak.
if (srcExpr->isLValue()) {
- CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr);
+ CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
} else {
assert(srcExpr->isXValue());
- CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr);
+ CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
}
return true;
}
@@ -767,7 +766,7 @@ static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
static void drillIntoBlockVariable(CodeGenFunction &CGF,
LValue &lvalue,
const VarDecl *var) {
- lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var));
+ lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
}
void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
@@ -826,18 +825,17 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
if (capturedByInit) {
// We can use a simple GEP for this because it can't have been
// moved yet.
- tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this),
+ tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(),
cast<VarDecl>(D),
/*follow*/ false));
}
- auto ty =
- cast<llvm::PointerType>(tempLV.getAddress(*this).getElementType());
+ auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
// If __weak, we want to use a barrier under certain conditions.
if (lifetime == Qualifiers::OCL_Weak)
- EmitARCInitWeak(tempLV.getAddress(*this), zero);
+ EmitARCInitWeak(tempLV.getAddress(), zero);
// Otherwise just do a simple store.
else
@@ -880,9 +878,9 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
if (accessedByInit)
- EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true);
+ EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
else
- EmitARCInitWeak(lvalue.getAddress(*this), value);
+ EmitARCInitWeak(lvalue.getAddress(), value);
return;
}
@@ -1620,7 +1618,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
CGM.getContext().getDeclAlign(&D),
AlignmentSource::Decl);
- address = Base.getAddress(*this);
+ address = Base.getAddress();
// Push a cleanup block to emit the call to __kmpc_free_shared in the
// appropriate location at the end of the scope of the
@@ -2034,10 +2032,10 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
else if (auto *FD = dyn_cast<FieldDecl>(D))
Overlap = getOverlapForFieldInit(FD);
// TODO: how can we delay here if D is captured by its initializer?
- EmitAggExpr(init, AggValueSlot::forLValue(
- lvalue, *this, AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased, Overlap));
+ EmitAggExpr(init,
+ AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased, Overlap));
}
return;
}
@@ -2683,7 +2681,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// objc_storeStrong attempts to release its old value.
llvm::Value *Null = CGM.EmitNullConstant(D.getType());
EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
- EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true);
+ EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
DoStore = false;
}
else
diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp
index e08a1e5f42df..b047279912f6 100644
--- a/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -57,7 +57,7 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
return;
case TEK_Aggregate:
CGF.EmitAggExpr(Init,
- AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed,
+ AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap));
diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp
index 34f289334a7d..bb2ed237ee9f 100644
--- a/clang/lib/CodeGen/CGException.cpp
+++ b/clang/lib/CodeGen/CGException.cpp
@@ -1052,7 +1052,8 @@ static void emitWasmCatchPadBlock(CodeGenFunction &CGF,
CGF.Builder.CreateStore(Exn, CGF.getExceptionSlot());
llvm::CallInst *Selector = CGF.Builder.CreateCall(GetSelectorFn, CPI);
- llvm::Function *TypeIDFn = CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+ llvm::Function *TypeIDFn =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for, {CGF.VoidPtrTy});
// If there's only a single catch-all, branch directly to its handler.
if (CatchScope.getNumHandlers() == 1 &&
@@ -1137,7 +1138,7 @@ static void emitCatchDispatchBlock(CodeGenFunction &CGF,
// Select the right handler.
llvm::Function *llvm_eh_typeid_for =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for, {CGF.VoidPtrTy});
llvm::Type *argTy = llvm_eh_typeid_for->getArg(0)->getType();
LangAS globAS = CGF.CGM.GetGlobalVarAddressSpace(nullptr);
@@ -1988,8 +1989,7 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
LValue ThisFieldLValue =
EmitLValueForLambdaField(LambdaThisCaptureField);
if (!LambdaThisCaptureField->getType()->isPointerType()) {
- CXXThisValue =
- ThisFieldLValue.getAddress(*this).emitRawPointer(*this);
+ CXXThisValue = ThisFieldLValue.getAddress().emitRawPointer(*this);
} else {
CXXThisValue = EmitLoadOfLValue(ThisFieldLValue, SourceLocation())
.getScalarVal();
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index d96c7bb1e568..d6478cc6835d 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -317,8 +317,8 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
CleanupKind CleanupKind;
if (Lifetime == Qualifiers::OCL_Strong) {
const ValueDecl *VD = M->getExtendingDecl();
- bool Precise =
- VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
+ bool Precise = isa_and_nonnull<VarDecl>(VD) &&
+ VD->hasAttr<ObjCPreciseLifetimeAttr>();
CleanupKind = CGF.getARCCleanupKind();
Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
: &CodeGenFunction::destroyARCStrongImprecise;
@@ -605,7 +605,7 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
LV = EmitLValueForField(LV, Adjustment.Field);
assert(LV.isSimple() &&
"materialized temporary field is not a simple lvalue");
- Object = LV.getAddress(*this);
+ Object = LV.getAddress();
break;
}
@@ -1123,7 +1123,7 @@ llvm::Value *CodeGenFunction::EmitCountedByFieldExpr(
getPointerAlign(), "dre.load");
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(StructBase)) {
LValue LV = EmitMemberExpr(ME);
- Address Addr = LV.getAddress(*this);
+ Address Addr = LV.getAddress();
Res = Addr.emitRawPointer(*this);
} else if (StructBase->getType()->isPointerType()) {
LValueBaseInfo BaseInfo;
@@ -1353,7 +1353,7 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
- return LV.getAddress(CGF);
+ return LV.getAddress();
}
}
@@ -1368,7 +1368,7 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
- return LV.getAddress(CGF);
+ return LV.getAddress();
}
}
}
@@ -1590,7 +1590,7 @@ LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
if (LV.isSimple()) {
// Defend against branches out of gnu statement expressions surrounded by
// cleanups.
- Address Addr = LV.getAddress(*this);
+ Address Addr = LV.getAddress();
llvm::Value *V = Addr.getBasePointer();
Scope.ForceCleanup({&V});
Addr.replaceBasePointer(V);
@@ -1839,7 +1839,7 @@ llvm::Value *CodeGenFunction::emitScalarConstant(
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
SourceLocation Loc) {
- return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(),
+ return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getType(), Loc, lvalue.getBaseInfo(),
lvalue.getTBAAInfo(), lvalue.isNontemporal());
}
@@ -2076,7 +2076,7 @@ static RawAddress MaybeConvertMatrixAddress(RawAddress Addr,
// (VectorType).
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
bool isInit, CodeGenFunction &CGF) {
- Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
+ Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
value->getType()->isVectorTy());
CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
@@ -2146,7 +2146,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
return;
}
- EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
+ EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getType(), lvalue.getBaseInfo(),
lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
}
@@ -2156,7 +2156,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
CodeGenFunction &CGF) {
assert(LV.getType()->isConstantMatrixType());
- Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
+ Address Addr = MaybeConvertMatrixAddress(LV.getAddress(), CGF);
LV.setAddress(Addr);
return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
}
@@ -2167,18 +2167,18 @@ static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
if (LV.isObjCWeak()) {
// load of a __weak object.
- Address AddrWeakObj = LV.getAddress(*this);
+ Address AddrWeakObj = LV.getAddress();
return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
AddrWeakObj));
}
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
// In MRC mode, we do a load+autorelease.
if (!getLangOpts().ObjCAutoRefCount) {
- return RValue::get(EmitARCLoadWeak(LV.getAddress(*this)));
+ return RValue::get(EmitARCLoadWeak(LV.getAddress()));
}
// In ARC mode, we load retained and then consume the value.
- llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this));
+ llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
Object = EmitObjCConsumeObject(LV.getType(), Object);
return RValue::get(Object);
}
@@ -2413,9 +2413,9 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
case Qualifiers::OCL_Weak:
if (isInit)
// Initialize and then skip the primitive store.
- EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal());
+ EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal());
else
- EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(),
+ EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(),
/*ignore*/ true);
return;
@@ -2429,7 +2429,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
// load of a __weak object.
- Address LvalueDst = Dst.getAddress(*this);
+ Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
return;
@@ -2437,7 +2437,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isObjCStrong() && !Dst.isNonGC()) {
// load of a __strong object.
- Address LvalueDst = Dst.getAddress(*this);
+ Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
if (Dst.isObjCIvar()) {
assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
@@ -2777,7 +2777,7 @@ CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
LValueBaseInfo *PointeeBaseInfo,
TBAAAccessInfo *PointeeTBAAInfo) {
llvm::LoadInst *Load =
- Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
+ Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
CharUnits(), /*ForPointeeType=*/true,
@@ -3027,7 +3027,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
LValue CapLVal =
EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
CapturedStmtInfo->getContextValue());
- Address LValueAddress = CapLVal.getAddress(*this);
+ Address LValueAddress = CapLVal.getAddress();
CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
LValueAddress.getElementType(),
getContext().getDeclAlign(VD)),
@@ -3217,7 +3217,7 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
// __real is valid on scalars. This is a faster way of testing that.
// __imag can only produce an rvalue on scalars.
if (E->getOpcode() == UO_Real &&
- !LV.getAddress(*this).getElementType()->isStructTy()) {
+ !LV.getAddress().getElementType()->isStructTy()) {
assert(E->getSubExpr()->getType()->isArithmeticType());
return LV;
}
@@ -3226,8 +3226,8 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
Address Component =
(E->getOpcode() == UO_Real
- ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType())
- : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType()));
+ ? emitAddrOfRealComponent(LV.getAddress(), LV.getType())
+ : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, T));
ElemLV.getQuals().addQualifiers(LV.getQuals());
@@ -3882,7 +3882,7 @@ Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
// Expressions of array type can't be bitfields or vector elements.
LValue LV = EmitLValue(E);
- Address Addr = LV.getAddress(*this);
+ Address Addr = LV.getAddress();
// If the array type was an incomplete type, we need to make sure
// the decay ends up being the right type.
@@ -4180,15 +4180,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// If the base is a vector type, then we are forming a vector element lvalue
// with this subscript.
- if (E->getBase()->getType()->isVectorType() &&
+ if (E->getBase()->getType()->isSubscriptableVectorType() &&
!isa<ExtVectorElementExpr>(E->getBase())) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
auto *Idx = EmitIdxAfterBase(/*Promote*/false);
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
- return LValue::MakeVectorElt(LHS.getAddress(*this), Idx,
- E->getBase()->getType(), LHS.getBaseInfo(),
- TBAAAccessInfo());
+ return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
+ LHS.getBaseInfo(), TBAAAccessInfo());
}
// All the other cases basically behave like simple offsetting.
@@ -4300,7 +4299,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// Create a GEP with a byte offset between the FAM and count and
// use that to load the count value.
Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(
- ArrayLV.getAddress(*this), Int8PtrTy, Int8Ty);
+ ArrayLV.getAddress(), Int8PtrTy, Int8Ty);
llvm::Type *CountTy = ConvertType(CountFD->getType());
llvm::Value *Res = Builder.CreateInBoundsGEP(
@@ -4320,7 +4319,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// Propagate the alignment from the array itself to the result.
QualType arrayType = Array->getType();
Addr = emitArraySubscriptGEP(
- *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
+ *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
E->getExprLoc(), &arrayType, E->getBase());
EltBaseInfo = ArrayLV.getBaseInfo();
@@ -4359,7 +4358,7 @@ LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
llvm::Value *FinalIdx =
Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
return LValue::MakeMatrixElt(
- MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx,
+ MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
}
@@ -4372,7 +4371,7 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
if (BaseTy->isArrayType()) {
- Address Addr = BaseLVal.getAddress(CGF);
+ Address Addr = BaseLVal.getAddress();
BaseInfo = BaseLVal.getBaseInfo();
// If the array type was an incomplete type, we need to make sure
@@ -4396,7 +4395,7 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
BaseInfo.mergeForCast(TypeBaseInfo);
TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
- return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)),
+ return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
CGF.ConvertTypeForMem(ElTy), Align);
}
return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
@@ -4548,7 +4547,7 @@ LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E,
// Propagate the alignment from the array itself to the result.
EltPtr = emitArraySubscriptGEP(
- *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
+ *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
/*signedIndices=*/false, E->getExprLoc());
BaseInfo = ArrayLV.getBaseInfo();
@@ -4608,7 +4607,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
if (Base.isSimple()) {
llvm::Constant *CV =
llvm::ConstantDataVector::get(getLLVMContext(), Indices);
- return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type,
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
Base.getBaseInfo(), TBAAAccessInfo());
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
@@ -4677,7 +4676,8 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
llvm::Value *ThisValue) {
bool HasExplicitObjectParameter = false;
- if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl)) {
+ const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
+ if (MD) {
HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
assert(MD->getParent()->isLambda());
assert(MD->getParent() == Field->getParent());
@@ -4694,6 +4694,17 @@ LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
else
LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
D->getType().getNonReferenceType());
+
+ // Make sure we have an lvalue to the lambda itself and not a derived class.
+ auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
+ auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
+ if (ThisTy != LambdaTy) {
+ const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
+ Address Base = GetAddressOfBaseClass(
+ LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
+ BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
+ LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
+ }
} else {
QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
@@ -4797,7 +4808,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
field->getType()
.withCVRQualifiers(base.getVRQualifiers())
.isVolatileQualified();
- Address Addr = base.getAddress(*this);
+ Address Addr = base.getAddress();
unsigned Idx = RL.getLLVMFieldNo(field);
const RecordDecl *rec = field->getParent();
if (hasBPFPreserveStaticOffset(rec))
@@ -4873,7 +4884,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
getContext().getTypeSizeInChars(FieldType).getQuantity();
}
- Address addr = base.getAddress(*this);
+ Address addr = base.getAddress();
if (hasBPFPreserveStaticOffset(rec))
addr = wrapWithBPFPreserveStaticOffset(*this, addr);
if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
@@ -4960,7 +4971,7 @@ CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
if (!FieldType->isReferenceType())
return EmitLValueForField(Base, Field);
- Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field);
+ Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
// Make sure that the address is pointing to the right type.
llvm::Type *llvmType = ConvertTypeForMem(FieldType);
@@ -5142,8 +5153,8 @@ LValue CodeGenFunction::EmitConditionalOperatorLValue(
return EmitUnsupportedLValue(expr, "conditional operator");
if (Info.LHS && Info.RHS) {
- Address lhsAddr = Info.LHS->getAddress(*this);
- Address rhsAddr = Info.RHS->getAddress(*this);
+ Address lhsAddr = Info.LHS->getAddress();
+ Address rhsAddr = Info.RHS->getAddress();
Address result = mergeAddressesInConditionalExpr(
lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
Builder.GetInsertBlock(), expr->getType());
@@ -5232,7 +5243,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_Dynamic: {
LValue LV = EmitLValue(E->getSubExpr());
- Address V = LV.getAddress(*this);
+ Address V = LV.getAddress();
const auto *DCE = cast<CXXDynamicCastExpr>(E);
return MakeNaturalAlignRawAddrLValue(EmitDynamicCast(V, DCE), E->getType());
}
@@ -5253,7 +5264,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
if (E->changesVolatileQualification())
LV.getQuals() = E->getType().getQualifiers();
if (LV.isSimple()) {
- Address V = LV.getAddress(*this);
+ Address V = LV.getAddress();
if (V.isValid()) {
llvm::Type *T = ConvertTypeForMem(E->getType());
if (V.getElementType() != T)
@@ -5270,7 +5281,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
- Address This = LV.getAddress(*this);
+ Address This = LV.getAddress();
// Perform the derived-to-base conversion
Address Base = GetAddressOfBaseClass(
@@ -5293,7 +5304,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
// Perform the base-to-derived conversion
Address Derived = GetAddressOfDerivedClass(
- LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(),
+ LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
/*NullCheckValue=*/false);
// C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
@@ -5316,7 +5327,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
CGM.EmitExplicitCastExprType(CE, this);
LValue LV = EmitLValue(E->getSubExpr());
- Address V = LV.getAddress(*this).withElementType(
+ Address V = LV.getAddress().withElementType(
ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
@@ -5335,12 +5346,12 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
E->getSubExpr()->getType().getAddressSpace(),
E->getType().getAddressSpace(), ConvertType(DestTy));
return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()),
- LV.getAddress(*this).getAlignment()),
+ LV.getAddress().getAlignment()),
E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
}
case CK_ObjCObjectLValueCast: {
LValue LV = EmitLValue(E->getSubExpr());
- Address V = LV.getAddress(*this).withElementType(ConvertType(E->getType()));
+ Address V = LV.getAddress().withElementType(ConvertType(E->getType()));
return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
@@ -5400,7 +5411,7 @@ RValue CodeGenFunction::EmitRValueForField(LValue LV,
case TEK_Complex:
return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
case TEK_Aggregate:
- return FieldLV.asAggregateRValue(*this);
+ return FieldLV.asAggregateRValue();
case TEK_Scalar:
// This routine is used to load fields one-by-one to perform a copy, so
// don't load reference fields.
@@ -6022,7 +6033,7 @@ EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
if (E->getOpcode() == BO_PtrMemI) {
BaseAddr = EmitPointerWithAlignment(E->getLHS());
} else {
- BaseAddr = EmitLValue(E->getLHS()).getAddress(*this);
+ BaseAddr = EmitLValue(E->getLHS()).getAddress();
}
llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
@@ -6047,7 +6058,7 @@ RValue CodeGenFunction::convertTempToRValue(Address addr,
case TEK_Complex:
return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
case TEK_Aggregate:
- return lvalue.asAggregateRValue(*this);
+ return lvalue.asAggregateRValue();
case TEK_Scalar:
return RValue::get(EmitLoadOfScalar(lvalue, loc));
}
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 6172eb9cdc1b..bba00257fd4f 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -384,8 +384,8 @@ void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
}
AggValueSlot srcAgg = AggValueSlot::forLValue(
- src, CGF, AggValueSlot::IsDestructed, needsGC(type),
- AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
+ src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased,
+ AggValueSlot::MayOverlap);
EmitCopy(type, Dest, srcAgg);
}
@@ -423,7 +423,7 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
ASTContext &Ctx = CGF.getContext();
LValue Array = CGF.EmitLValue(E->getSubExpr());
assert(Array.isSimple() && "initializer_list array not a simple lvalue");
- Address ArrayPtr = Array.getAddress(CGF);
+ Address ArrayPtr = Array.getAddress();
const ConstantArrayType *ArrayType =
Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
@@ -747,7 +747,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
CodeGenFunction::TCK_Load);
// FIXME: Do we also need to handle property references here?
if (LV.isSimple())
- CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E));
+ CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
else
CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
@@ -780,8 +780,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
}
LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
- Address SourceAddress =
- SourceLV.getAddress(CGF).withElementType(CGF.Int8Ty);
+ Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
llvm::Value *SizeVal = llvm::ConstantInt::get(
CGF.SizeTy,
@@ -1231,7 +1230,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
}
EmitCopy(E->getLHS()->getType(),
- AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed,
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased,
AggValueSlot::MayOverlap),
@@ -1253,7 +1252,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// Codegen the RHS so that it stores directly into the LHS.
AggValueSlot LHSSlot = AggValueSlot::forLValue(
- LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
+ LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
// A non-volatile aggregate destination might have volatile member.
if (!LHSSlot.isVolatile() &&
@@ -1400,9 +1399,9 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
CurField->getType().isDestructedType()) {
assert(LV.isSimple());
if (DtorKind)
- CGF.pushDestroyAndDeferDeactivation(
- NormalAndEHCleanup, LV.getAddress(CGF), CurField->getType(),
- CGF.getDestroyer(DtorKind), false);
+ CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),
+ CurField->getType(),
+ CGF.getDestroyer(DtorKind), false);
}
}
}
@@ -1580,7 +1579,7 @@ AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
return;
case TEK_Aggregate:
CGF.EmitAggExpr(
- E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed,
+ E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::MayOverlap, Dest.isZeroed()));
@@ -1619,7 +1618,7 @@ void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
// There's a potential optimization opportunity in combining
// memsets; that would be easy for arrays, but relatively
// difficult for structures with the current code.
- CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType());
+ CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
}
}
@@ -1795,9 +1794,9 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
= field->getType().isDestructedType()) {
assert(LV.isSimple());
if (dtorKind) {
- CGF.pushDestroyAndDeferDeactivation(
- NormalAndEHCleanup, LV.getAddress(CGF), field->getType(),
- CGF.getDestroyer(dtorKind), false);
+ CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),
+ field->getType(),
+ CGF.getDestroyer(dtorKind), false);
pushedCleanup = true;
}
}
@@ -1880,7 +1879,7 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
if (InnerLoop) {
// If the subexpression is an ArrayInitLoopExpr, share its cleanup.
auto elementSlot = AggValueSlot::forLValue(
- elementLV, CGF, AggValueSlot::IsDestructed,
+ elementLV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap);
AggExprEmitter(CGF, elementSlot, false)
@@ -2045,10 +2044,10 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
Address Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
- EmitAggExpr(E, AggValueSlot::forLValue(
- LV, *this, AggValueSlot::IsNotDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap));
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap));
return LV;
}
@@ -2097,8 +2096,8 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
bool isVolatile) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
- Address DestPtr = Dest.getAddress(*this);
- Address SrcPtr = Src.getAddress(*this);
+ Address DestPtr = Dest.getAddress();
+ Address SrcPtr = Src.getAddress();
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index c18c36d3f3f3..3c4f59fc765f 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -142,7 +142,7 @@ RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
BaseQuals = PTy->getPointeeType().getQualifiers();
} else {
LValue BaseLV = EmitLValue(BaseExpr);
- BaseValue = BaseLV.getAddress(*this);
+ BaseValue = BaseLV.getAddress();
QualType BaseTy = BaseExpr->getType();
BaseQuals = BaseTy.getQualifiers();
}
@@ -298,7 +298,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
/*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
- /*Delegating=*/false, This.getAddress(*this), Args,
+ /*Delegating=*/false, This.getAddress(), Args,
AggValueSlot::DoesNotOverlap, CE->getExprLoc(),
/*NewPointerIsChecked=*/false);
return RValue::get(nullptr);
@@ -375,7 +375,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
if (UseVirtualCall) {
CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
- This.getAddress(*this),
+ This.getAddress(),
cast<CXXMemberCallExpr>(CE));
} else {
GlobalDecl GD(Dtor, Dtor_Complete);
@@ -403,14 +403,14 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
CGCallee Callee;
if (UseVirtualCall) {
- Callee = CGCallee::forVirtual(CE, MD, This.getAddress(*this), Ty);
+ Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);
} else {
if (SanOpts.has(SanitizerKind::CFINVCall) &&
MD->getParent()->isDynamicClass()) {
llvm::Value *VTable;
const CXXRecordDecl *RD;
std::tie(VTable, RD) = CGM.getCXXABI().LoadVTablePtr(
- *this, This.getAddress(*this), CalleeDecl->getParent());
+ *this, This.getAddress(), CalleeDecl->getParent());
EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc());
}
@@ -429,7 +429,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (MD->isVirtual()) {
Address NewThisAddr =
CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
- *this, CalleeDecl, This.getAddress(*this), UseVirtualCall);
+ *this, CalleeDecl, This.getAddress(), UseVirtualCall);
This.setAddress(NewThisAddr);
}
@@ -456,7 +456,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
if (BO->getOpcode() == BO_PtrMemI)
This = EmitPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull);
else
- This = EmitLValue(BaseExpr, KnownNonNull).getAddress(*this);
+ This = EmitLValue(BaseExpr, KnownNonNull).getAddress();
EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),
QualType(MPT->getClass(), 0));
@@ -2178,7 +2178,7 @@ static bool isGLValueFromPointerDeref(const Expr *E) {
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
llvm::Type *StdTypeInfoPtrTy) {
// Get the vtable pointer.
- Address ThisPtr = CGF.EmitLValue(E).getAddress(CGF);
+ Address ThisPtr = CGF.EmitLValue(E).getAddress();
QualType SrcRecordTy = E->getType();
@@ -2216,7 +2216,12 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
}
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
- llvm::Type *PtrTy = llvm::PointerType::getUnqual(getLLVMContext());
+ // Ideally, we would like to use GlobalsInt8PtrTy here, however, we cannot,
+ // primarily because the result of applying typeid is a value of type
+ // type_info, which is declared & defined by the standard library
+ // implementation and expects to operate on the generic (default) AS.
+ // https://reviews.llvm.org/D157452 has more context, and a possible solution.
+ llvm::Type *PtrTy = Int8PtrTy;
LangAS GlobAS = CGM.GetGlobalVarAddressSpace(nullptr);
auto MaybeASCast = [=](auto &&TypeInfo) {
diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp
index 1facadd82f17..9ef73e36f66f 100644
--- a/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/clang/lib/CodeGen/CGExprComplex.cpp
@@ -434,7 +434,7 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
if (lvalue.getType()->isAtomicType())
return CGF.EmitAtomicLoad(lvalue, loc).getComplexVal();
- Address SrcPtr = lvalue.getAddress(CGF);
+ Address SrcPtr = lvalue.getAddress();
bool isVolatile = lvalue.isVolatileQualified();
llvm::Value *Real = nullptr, *Imag = nullptr;
@@ -460,7 +460,7 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
(!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue)))
return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit);
- Address Ptr = lvalue.getAddress(CGF);
+ Address Ptr = lvalue.getAddress();
Address RealPtr = CGF.emitAddrOfRealComponent(Ptr, lvalue.getType());
Address ImagPtr = CGF.emitAddrOfImagComponent(Ptr, lvalue.getType());
@@ -551,14 +551,14 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_LValueBitCast: {
LValue origLV = CGF.EmitLValue(Op);
- Address V = origLV.getAddress(CGF).withElementType(CGF.ConvertType(DestTy));
+ Address V = origLV.getAddress().withElementType(CGF.ConvertType(DestTy));
return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
}
case CK_LValueToRValueBitCast: {
LValue SourceLVal = CGF.EmitLValue(Op);
- Address Addr = SourceLVal.getAddress(CGF).withElementType(
- CGF.ConvertTypeForMem(DestTy));
+ Address Addr =
+ SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, Op->getExprLoc());
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index d84531959b50..1b144c178ce9 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -2212,7 +2212,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
- Address Addr = EmitLValue(E).getAddress(CGF);
+ Address Addr = EmitLValue(E).getAddress();
Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
return EmitLoadOfLValue(LV, CE->getExprLoc());
@@ -2220,8 +2220,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_LValueToRValueBitCast: {
LValue SourceLVal = CGF.EmitLValue(E);
- Address Addr = SourceLVal.getAddress(CGF).withElementType(
- CGF.ConvertTypeForMem(DestTy));
+ Address Addr =
+ SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
@@ -2772,14 +2772,14 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (isInc && type->isBooleanType()) {
llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
if (isPre) {
- Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
+ Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
return Builder.getTrue();
}
// For atomic bool increment, we just store true and return it for
// preincrement, do an atomic swap with true for postincrement
return Builder.CreateAtomicRMW(
- llvm::AtomicRMWInst::Xchg, LV.getAddress(CGF), True,
+ llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
llvm::AtomicOrdering::SequentiallyConsistent);
}
// Special case for atomic increment / decrement on integers, emit
@@ -2797,7 +2797,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = CGF.EmitToMemory(
llvm::ConstantInt::get(ConvertType(type), 1, true), type);
llvm::Value *old =
- Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
+ Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
llvm::AtomicOrdering::SequentiallyConsistent);
return isPre ? Builder.CreateBinOp(op, old, amt) : old;
}
@@ -2810,7 +2810,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = llvm::ConstantFP::get(
VMContext, llvm::APFloat(static_cast<float>(1.0)));
llvm::Value *old =
- Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
+ Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
llvm::AtomicOrdering::SequentiallyConsistent);
return isPre ? Builder.CreateBinOp(op, old, amt) : old;
}
@@ -3552,7 +3552,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
E->getExprLoc()),
LHSTy);
Value *OldVal = Builder.CreateAtomicRMW(
- AtomicOp, LHSLV.getAddress(CGF), Amt,
+ AtomicOp, LHSLV.getAddress(), Amt,
llvm::AtomicOrdering::SequentiallyConsistent);
// Since operation is atomic, the result type is guaranteed to be the
@@ -4782,7 +4782,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
case Qualifiers::OCL_Weak:
RHS = Visit(E->getRHS());
LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
- RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
+ RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
break;
case Qualifiers::OCL_None:
@@ -5534,7 +5534,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
ConvertTypeForMem(BaseExpr->getType()->getPointeeType());
Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
} else {
- Addr = EmitLValue(BaseExpr).getAddress(*this);
+ Addr = EmitLValue(BaseExpr).getAddress();
}
// Cast the address to Class*.
diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index 8fade0fac21e..6a02e4dbf84d 100644
--- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -711,7 +711,7 @@ struct GenMoveConstructor : GenBinaryFunc<GenMoveConstructor, true> {
LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT);
llvm::Value *SrcVal =
CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal();
- CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress(*CGF)), SrcLV);
+ CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV);
CGF->EmitStoreOfScalar(SrcVal, CGF->MakeAddrLValue(Addrs[DstIdx], QT),
/* isInitialization */ true);
}
@@ -774,7 +774,7 @@ struct GenMoveAssignment : GenBinaryFunc<GenMoveAssignment, true> {
LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT);
llvm::Value *SrcVal =
CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal();
- CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress(*CGF)), SrcLV);
+ CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV);
LValue DstLV = CGF->MakeAddrLValue(Addrs[DstIdx], QT);
llvm::Value *DstVal =
CGF->EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
@@ -810,7 +810,7 @@ void CodeGenFunction::destroyNonTrivialCStruct(CodeGenFunction &CGF,
// such structure.
void CodeGenFunction::defaultInitNonTrivialCStructVar(LValue Dst) {
GenDefaultInitialize Gen(getContext());
- Address DstPtr = Dst.getAddress(*this).withElementType(CGM.Int8PtrTy);
+ Address DstPtr = Dst.getAddress().withElementType(CGM.Int8PtrTy);
Gen.setCGF(this);
QualType QT = Dst.getType();
QT = Dst.isVolatile() ? QT.withVolatile() : QT;
@@ -842,7 +842,7 @@ getSpecialFunction(G &&Gen, StringRef FuncName, QualType QT, bool IsVolatile,
// Functions to emit calls to the special functions of a non-trivial C struct.
void CodeGenFunction::callCStructDefaultConstructor(LValue Dst) {
bool IsVolatile = Dst.isVolatile();
- Address DstPtr = Dst.getAddress(*this);
+ Address DstPtr = Dst.getAddress();
QualType QT = Dst.getType();
GenDefaultInitializeFuncName GenName(DstPtr.getAlignment(), getContext());
std::string FuncName = GenName.getName(QT, IsVolatile);
@@ -866,7 +866,7 @@ std::string CodeGenFunction::getNonTrivialDestructorStr(QualType QT,
void CodeGenFunction::callCStructDestructor(LValue Dst) {
bool IsVolatile = Dst.isVolatile();
- Address DstPtr = Dst.getAddress(*this);
+ Address DstPtr = Dst.getAddress();
QualType QT = Dst.getType();
GenDestructorFuncName GenName("__destructor_", DstPtr.getAlignment(),
getContext());
@@ -877,7 +877,7 @@ void CodeGenFunction::callCStructDestructor(LValue Dst) {
void CodeGenFunction::callCStructCopyConstructor(LValue Dst, LValue Src) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<false> GenName("__copy_constructor_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
@@ -891,7 +891,7 @@ void CodeGenFunction::callCStructCopyAssignmentOperator(LValue Dst, LValue Src
) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<false> GenName("__copy_assignment_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
@@ -902,7 +902,7 @@ void CodeGenFunction::callCStructCopyAssignmentOperator(LValue Dst, LValue Src
void CodeGenFunction::callCStructMoveConstructor(LValue Dst, LValue Src) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<true> GenName("__move_constructor_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
@@ -916,7 +916,7 @@ void CodeGenFunction::callCStructMoveAssignmentOperator(LValue Dst, LValue Src
) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<true> GenName("__move_assignment_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index ee571995ce4c..281b2d9795f6 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -586,7 +586,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
method->getMethodFamily() == OMF_retain) {
if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) {
LValue lvalue = EmitLValue(lvalueExpr);
- llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress(*this));
+ llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress());
return AdjustObjCObjectType(*this, E->getType(), RValue::get(result));
}
}
@@ -1189,7 +1189,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize);
// Perform an atomic load. This does not impose ordering constraints.
- Address ivarAddr = LV.getAddress(*this);
+ Address ivarAddr = LV.getAddress();
ivarAddr = ivarAddr.withElementType(bitcastType);
llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
load->setAtomic(llvm::AtomicOrdering::Unordered);
@@ -1287,14 +1287,14 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
case TEK_Scalar: {
llvm::Value *value;
if (propType->isReferenceType()) {
- value = LV.getAddress(*this).emitRawPointer(*this);
+ value = LV.getAddress().emitRawPointer(*this);
} else {
// We want to load and autoreleaseReturnValue ARC __weak ivars.
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
if (getLangOpts().ObjCAutoRefCount) {
value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
} else {
- value = EmitARCLoadWeak(LV.getAddress(*this));
+ value = EmitARCLoadWeak(LV.getAddress());
}
// Otherwise we want to do a simple load, suppressing the
@@ -1477,7 +1477,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
LValue ivarLValue =
EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
- Address ivarAddr = ivarLValue.getAddress(*this);
+ Address ivarAddr = ivarLValue.getAddress();
// Currently, all atomic accesses have to be through integer
// types, so there's no point in trying to pick a prettier type.
@@ -1655,7 +1655,7 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) override {
LValue lvalue
= CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
- CGF.emitDestroy(lvalue.getAddress(CGF), ivar->getType(), destroyer,
+ CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
@@ -1722,7 +1722,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
LoadObjCSelf(), Ivar, 0);
EmitAggExpr(IvarInit->getInit(),
- AggValueSlot::forLValue(LV, *this, AggValueSlot::IsDestructed,
+ AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap));
@@ -2508,7 +2508,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
!isBlock &&
(dst.getAlignment().isZero() ||
dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
- return EmitARCStoreStrongCall(dst.getAddress(*this), newValue, ignored);
+ return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
}
// Otherwise, split it out.
@@ -2898,7 +2898,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal();
} else {
assert(type.getObjCLifetime() == Qualifiers::OCL_Weak);
- result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress(CGF));
+ result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress());
}
return TryEmitResult(result, !shouldRetain);
}
@@ -2922,7 +2922,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
SourceLocation()).getScalarVal();
// Set the source pointer to NULL.
- CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress(CGF)), lv);
+ CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
return TryEmitResult(result, true);
}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index f56af318ff6a..f6d12d46cfc0 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -373,7 +373,7 @@ public:
/*RefersToEnclosingVariableOrCapture=*/false,
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
- PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
+ PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
}
(void)PrivScope.Privatize();
}
@@ -809,7 +809,7 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
}
llvm::Value *Size;
llvm::Value *SizeInChars;
- auto *ElemType = OrigAddresses[N].first.getAddress(CGF).getElementType();
+ auto *ElemType = OrigAddresses[N].first.getAddress().getElementType();
auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
if (AsArraySection) {
Size = CGF.Builder.CreatePtrDiff(ElemType,
@@ -897,15 +897,15 @@ static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
!CGF.getContext().hasSameType(BaseTy, ElTy)) {
if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
- BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(CGF), PtrTy);
+ BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
} else {
- LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(CGF), BaseTy);
+ LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
}
BaseTy = BaseTy->getPointeeType();
}
return CGF.MakeAddrLValue(
- BaseLV.getAddress(CGF).withElementType(CGF.ConvertTypeForMem(ElTy)),
+ BaseLV.getAddress().withElementType(CGF.ConvertTypeForMem(ElTy)),
BaseLV.getType(), BaseLV.getBaseInfo(),
CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
}
@@ -968,7 +968,7 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
LValue BaseLValue =
loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
OriginalBaseLValue);
- Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
+ Address SharedAddr = SharedAddresses[N].first.getAddress();
llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
SharedAddr.getElementType(), BaseLValue.getPointer(CGF),
SharedAddr.emitRawPointer(CGF));
@@ -979,7 +979,7 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
SharedAddr.getElementType(), PrivatePointer, Adjustment);
return castToBase(CGF, OrigVD->getType(),
SharedAddresses[N].first.getType(),
- OriginalBaseLValue.getAddress(CGF), Ptr);
+ OriginalBaseLValue.getAddress(), Ptr);
}
BaseDecls.emplace_back(
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
@@ -1108,11 +1108,11 @@ emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
Scope.addPrivate(
In, CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
- .getAddress(CGF));
+ .getAddress());
Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
Scope.addPrivate(
Out, CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
- .getAddress(CGF));
+ .getAddress());
(void)Scope.Privatize();
if (!IsCombiner && Out->hasInit() &&
!CGF.isTrivialInitializer(Out->getInit())) {
@@ -1946,7 +1946,7 @@ Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
if (OMPRegionInfo->getThreadIDVariable())
- return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(CGF);
+ return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
llvm::Value *ThreadID = getThreadID(CGF, Loc);
QualType Int32Ty =
@@ -3046,7 +3046,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
llvm::Value *CommonArgs[] = {
GtidParam, PartidParam, PrivatesParam, TaskPrivatesMap,
CGF.Builder
- .CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(CGF),
+ .CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(),
CGF.VoidPtrTy, CGF.Int8Ty)
.emitRawPointer(CGF)};
SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
@@ -3125,7 +3125,7 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
if (QualType::DestructionKind DtorKind =
Field->getType().isDestructedType()) {
LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
- CGF.pushDestroy(DtorKind, FieldLValue.getAddress(CGF), Field->getType());
+ CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
}
}
CGF.FinishFunction();
@@ -3233,7 +3233,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
LValue RefLVal =
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
- RefLVal.getAddress(CGF), RefLVal.getType()->castAs<PointerType>());
+ RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal);
++Counter;
}
@@ -3305,7 +3305,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
} else if (ForDup) {
SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
SharedRefLValue = CGF.MakeAddrLValue(
- SharedRefLValue.getAddress(CGF).withAlignment(
+ SharedRefLValue.getAddress().withAlignment(
C.getDeclAlign(OriginalVD)),
SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
SharedRefLValue.getTBAAInfo());
@@ -3329,8 +3329,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
// Initialize firstprivate array using element-by-element
// initialization.
CGF.EmitOMPAggregateAssign(
- PrivateLValue.getAddress(CGF), SharedRefLValue.getAddress(CGF),
- Type,
+ PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
[&CGF, Elem, Init, &CapturesInfo](Address DestElement,
Address SrcElement) {
// Clean up any temporaries needed by the initialization.
@@ -3347,7 +3346,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
}
} else {
CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(Elem, SharedRefLValue.getAddress(CGF));
+ InitScope.addPrivate(Elem, SharedRefLValue.getAddress());
(void)InitScope.Privatize();
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
CGF.EmitExprAsInit(Init, VD, PrivateLValue,
@@ -3508,7 +3507,7 @@ public:
HelperData.CounterVD->getType());
// Counter = 0;
CGF.EmitStoreOfScalar(
- llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0),
+ llvm::ConstantInt::get(CLVal.getAddress().getElementType(), 0),
CLVal);
CodeGenFunction::JumpDest &ContDest =
ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
@@ -3572,7 +3571,7 @@ getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
} else if (const auto *ASE =
dyn_cast<ArraySectionExpr>(E->IgnoreParenImpCasts())) {
LValue UpAddrLVal = CGF.EmitArraySectionExpr(ASE, /*IsLowerBound=*/false);
- Address UpAddrAddress = UpAddrLVal.getAddress(CGF);
+ Address UpAddrAddress = UpAddrLVal.getAddress();
llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
UpAddrAddress.getElementType(), UpAddrAddress.emitRawPointer(CGF),
/*Idx0=*/1);
@@ -4045,11 +4044,11 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF).withElementType(
+ DepobjLVal.getAddress().withElementType(
CGF.ConvertTypeForMem(KmpDependInfoPtrTy)),
KmpDependInfoPtrTy->castAs<PointerType>());
Address DepObjAddr = CGF.Builder.CreateGEP(
- CGF, Base.getAddress(CGF),
+ CGF, Base.getAddress(),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
@@ -4156,7 +4155,7 @@ SmallVector<llvm::Value *, 4> CGOpenMPRuntime::emitDepobjElementsSizes(
CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
C.getUIntPtrType());
CGF.Builder.CreateStore(llvm::ConstantInt::get(CGF.IntPtrTy, 0),
- NumLVal.getAddress(CGF));
+ NumLVal.getAddress());
llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
CGF.EmitStoreOfScalar(Add, NumLVal);
@@ -4198,7 +4197,7 @@ void CGOpenMPRuntime::emitDepobjElements(CodeGenFunction &CGF,
CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Address DepAddr = CGF.Builder.CreateGEP(CGF, DependenciesArray, Pos);
- CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
+ CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(), Size);
// Increase pos.
// pos += size;
@@ -4425,11 +4424,11 @@ void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
ASTContext &C = CGM.getContext();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
- LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF), C.VoidPtrTy.castAs<PointerType>());
+ LValue Base = CGF.EmitLoadOfPointerLValue(DepobjLVal.getAddress(),
+ C.VoidPtrTy.castAs<PointerType>());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy),
+ Base.getAddress(), CGF.ConvertTypeForMem(KmpDependInfoPtrTy),
CGF.ConvertTypeForMem(KmpDependInfoTy));
llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
Addr.getElementType(), Addr.emitRawPointer(CGF),
@@ -4460,7 +4459,7 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
LValue Base;
std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc);
- Address Begin = Base.getAddress(CGF);
+ Address Begin = Base.getAddress();
// Cast from pointer to array type to pointer to single element.
llvm::Value *End = CGF.Builder.CreateGEP(Begin.getElementType(),
Begin.emitRawPointer(CGF), NumDeps);
@@ -4646,24 +4645,21 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
const auto *LBVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
- CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(CGF),
- LBLVal.getQuals(),
+ CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(),
/*IsInitializer=*/true);
LValue UBLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
const auto *UBVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
- CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(CGF),
- UBLVal.getQuals(),
+ CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(),
/*IsInitializer=*/true);
LValue StLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
const auto *StVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
- CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(CGF),
- StLVal.getQuals(),
+ CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(),
/*IsInitializer=*/true);
// Store reductions address.
LValue RedLVal = CGF.EmitLValueForField(
@@ -4672,7 +4668,7 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
if (Data.Reductions) {
CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
} else {
- CGF.EmitNullInitialization(RedLVal.getAddress(CGF),
+ CGF.EmitNullInitialization(RedLVal.getAddress(),
CGF.getContext().VoidPtrTy);
}
enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
@@ -5522,8 +5518,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*isSigned=*/true),
FlagsLVal);
} else
- CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF),
- FlagsLVal.getType());
+ CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType());
}
if (Data.IsReductionWithTaskMod) {
// Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
@@ -5850,7 +5845,7 @@ void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
.getLimitedValue());
LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy, CGF.VoidPtrTy);
+ AllocatorTraitsLVal.getAddress(), CGF.VoidPtrPtrTy, CGF.VoidPtrTy);
AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
AllocatorTraitsLVal.getBaseInfo(),
AllocatorTraitsLVal.getTBAAInfo());
@@ -7043,7 +7038,7 @@ private:
} else if ((AE && isa<CXXThisExpr>(AE->getBase()->IgnoreParenImpCasts())) ||
(OASE &&
isa<CXXThisExpr>(OASE->getBase()->IgnoreParenImpCasts()))) {
- BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
+ BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress();
} else if (OAShE &&
isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
BP = Address(
@@ -7053,7 +7048,7 @@ private:
} else {
// The base is the reference to the variable.
// BP = &Var.
- BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
+ BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress();
if (const auto *VD =
dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
@@ -7252,13 +7247,13 @@ private:
LValue BaseLVal = EmitMemberExprBase(CGF, ME);
LowestElem = CGF.EmitLValueForFieldInitialization(
BaseLVal, cast<FieldDecl>(MapDecl))
- .getAddress(CGF);
+ .getAddress();
LB = CGF.EmitLoadOfReferenceLValue(LowestElem, MapDecl->getType())
- .getAddress(CGF);
+ .getAddress();
} else {
LowestElem = LB =
CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
- .getAddress(CGF);
+ .getAddress();
}
// If this component is a pointer inside the base struct then we don't
@@ -7316,11 +7311,11 @@ private:
LValue BaseLVal = EmitMemberExprBase(CGF, ME);
ComponentLB =
CGF.EmitLValueForFieldInitialization(BaseLVal, FD)
- .getAddress(CGF);
+ .getAddress();
} else {
ComponentLB =
CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
- .getAddress(CGF);
+ .getAddress();
}
llvm::Value *ComponentLBPtr = ComponentLB.emitRawPointer(CGF);
llvm::Value *LBPtr = LB.emitRawPointer(CGF);
@@ -7449,7 +7444,7 @@ private:
if (IsFinalArraySection) {
Address HB =
CGF.EmitArraySectionExpr(OASE, /*IsLowerBound=*/false)
- .getAddress(CGF);
+ .getAddress();
PartialStruct.HighestElem = {FieldIndex, HB};
} else {
PartialStruct.HighestElem = {FieldIndex, LowestElem};
@@ -7462,7 +7457,7 @@ private:
if (IsFinalArraySection) {
Address HB =
CGF.EmitArraySectionExpr(OASE, /*IsLowerBound=*/false)
- .getAddress(CGF);
+ .getAddress();
PartialStruct.HighestElem = {FieldIndex, HB};
} else {
PartialStruct.HighestElem = {FieldIndex, LowestElem};
@@ -11634,7 +11629,7 @@ Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
CGF.EmitStoreOfScalar(
llvm::ConstantInt::getNullValue(CGF.ConvertTypeForMem(C.CharTy)),
FiredLVal);
- return CGF.EmitLValueForField(BaseLVal, VDField).getAddress(CGF);
+ return CGF.EmitLValueForField(BaseLVal, VDField).getAddress();
}
namespace {
@@ -11820,7 +11815,7 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const FieldDecl* FiredDecl = std::get<2>(It->getSecond());
LValue PrivLVal = CGF.EmitLValue(FoundE);
Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- PrivLVal.getAddress(CGF),
+ PrivLVal.getAddress(),
CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)),
CGF.ConvertTypeForMem(StructTy));
LValue BaseLVal =
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 87496c8e488c..28da8662f5f6 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -1103,13 +1103,13 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
VoidPtr, VarPtrTy, VD->getName() + "_on_stack");
LValue VarAddr =
CGF.MakeNaturalAlignPointeeRawAddrLValue(CastedVoidPtr, VarTy);
- Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
+ Rec.second.PrivateAddr = VarAddr.getAddress();
Rec.second.GlobalizedVal = VoidPtr;
// Assign the local allocation to the newly globalized location.
if (EscapedParam) {
CGF.EmitStoreOfScalar(ParValue, VarAddr);
- I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF));
+ I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress());
}
if (auto *DI = CGF.getDebugInfo())
VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
@@ -1123,7 +1123,7 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
LValue Base = CGF.MakeAddrLValue(AddrSizePair.first, VD->getType(),
CGM.getContext().getDeclAlign(VD),
AlignmentSource::Decl);
- I->getSecond().MappedParams->setVarAddr(CGF, VD, Base.getAddress(CGF));
+ I->getSecond().MappedParams->setVarAddr(CGF, VD, Base.getAddress());
}
I->getSecond().MappedParams->apply(CGF);
}
@@ -2226,7 +2226,7 @@ static llvm::Value *emitListToGlobalCopyFunction(
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
+ Address GlobAddr = GlobLVal.getAddress();
GlobLVal.setAddress(Address(GlobAddr.emitRawPointer(CGF),
CGF.ConvertTypeForMem(Private->getType()),
GlobAddr.getAlignment()));
@@ -2327,7 +2327,7 @@ static llvm::Value *emitListToGlobalReduceFunction(
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
+ Address GlobAddr = GlobLVal.getAddress();
CGF.EmitStoreOfScalar(GlobAddr.emitRawPointer(CGF), Elem,
/*Volatile=*/false, C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
@@ -2433,7 +2433,7 @@ static llvm::Value *emitGlobalToListCopyFunction(
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
+ Address GlobAddr = GlobLVal.getAddress();
GlobLVal.setAddress(Address(GlobAddr.emitRawPointer(CGF),
CGF.ConvertTypeForMem(Private->getType()),
GlobAddr.getAlignment()));
@@ -2534,7 +2534,7 @@ static llvm::Value *emitGlobalToListReduceFunction(
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
+ Address GlobAddr = GlobLVal.getAddress();
CGF.EmitStoreOfScalar(GlobAddr.emitRawPointer(CGF), Elem,
/*Volatile=*/false, C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
@@ -3406,7 +3406,7 @@ void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
if (VD->getType().getCanonicalType()->isReferenceType())
VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
VD->getType().getCanonicalType())
- .getAddress(CGF);
+ .getAddress();
CGF.EmitStoreOfScalar(VDAddr.emitRawPointer(CGF), VarLVal);
}
}
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 36776846cd44..99daaa14cf3f 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -2372,13 +2372,12 @@ std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
- return {
- Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)),
- nullptr};
+ return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
+ nullptr};
}
}
- Address Addr = InputValue.getAddress(*this);
+ Address Addr = InputValue.getAddress();
ConstraintStr += '*';
return {InputValue.getPointer(*this), Addr.getElementType()};
}
@@ -2574,7 +2573,7 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
// ResultTypeRequiresCast.size() elements of RegResults.
if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
- Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]);
+ Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
Builder.CreateStore(Tmp, A);
continue;
@@ -2776,7 +2775,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinValue());
} else {
- Address DestAddr = Dest.getAddress(*this);
+ Address DestAddr = Dest.getAddress();
// Matrix types in memory are represented by arrays, but accessed through
// vector pointers, with the alignment specified on the access operation.
// For inline assembly, update pointer arguments to use vector pointers.
@@ -3124,7 +3123,7 @@ CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
LValue CapStruct = InitCapturedStruct(S);
- return CapStruct.getAddress(*this);
+ return CapStruct.getAddress();
}
/// Creates the outlined function for a CapturedStmt.
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index ef3aa3a8e0dc..6410f9e102c9 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -100,7 +100,7 @@ public:
isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
InlinedShareds.isGlobalVarCaptured(VD)),
VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
- InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
+ InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
}
}
(void)InlinedShareds.Privatize();
@@ -142,7 +142,7 @@ public:
/// of used expression from loop statement.
class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) {
- const DeclStmt *PreInits;
+ const Stmt *PreInits;
CodeGenFunction::OMPMapVars PreCondVars;
if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
@@ -182,17 +182,34 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
}
return false;
});
- PreInits = cast_or_null<DeclStmt>(LD->getPreInits());
+ PreInits = LD->getPreInits();
} else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) {
- PreInits = cast_or_null<DeclStmt>(Tile->getPreInits());
+ PreInits = Tile->getPreInits();
} else if (const auto *Unroll = dyn_cast<OMPUnrollDirective>(&S)) {
- PreInits = cast_or_null<DeclStmt>(Unroll->getPreInits());
+ PreInits = Unroll->getPreInits();
} else {
llvm_unreachable("Unknown loop-based directive kind.");
}
if (PreInits) {
- for (const auto *I : PreInits->decls())
- CGF.EmitVarDecl(cast<VarDecl>(*I));
+ // CompoundStmts and DeclStmts are used as lists of PreInit statements and
+ // declarations. Since declarations must be visible in the the following
+ // that they initialize, unpack the ComboundStmt they are nested in.
+ SmallVector<const Stmt *> PreInitStmts;
+ if (auto *PreInitCompound = dyn_cast<CompoundStmt>(PreInits))
+ llvm::append_range(PreInitStmts, PreInitCompound->body());
+ else
+ PreInitStmts.push_back(PreInits);
+
+ for (const Stmt *S : PreInitStmts) {
+ // EmitStmt skips any OMPCapturedExprDecls, but needs to be emitted
+ // here.
+ if (auto *PreInitDecl = dyn_cast<DeclStmt>(S)) {
+ for (Decl *I : PreInitDecl->decls())
+ CGF.EmitVarDecl(cast<VarDecl>(*I));
+ continue;
+ }
+ CGF.EmitStmt(S);
+ }
}
PreCondVars.restore(CGF);
}
@@ -276,7 +293,7 @@ public:
InlinedShareds.isGlobalVarCaptured(VD)),
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
- InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
+ InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
}
}
CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
@@ -369,8 +386,7 @@ void CodeGenFunction::GenerateOpenMPCapturedVars(
CapturedVars.push_back(CV);
} else {
assert(CurCap->capturesVariable() && "Expected capture by reference.");
- CapturedVars.push_back(
- EmitLValue(*I).getAddress(*this).emitRawPointer(*this));
+ CapturedVars.push_back(EmitLValue(*I).getAddress().emitRawPointer(*this));
}
}
}
@@ -381,11 +397,11 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
ASTContext &Ctx = CGF.getContext();
llvm::Value *CastedPtr = CGF.EmitScalarConversion(
- AddrLV.getAddress(CGF).emitRawPointer(CGF), Ctx.getUIntPtrType(),
+ AddrLV.getAddress().emitRawPointer(CGF), Ctx.getUIntPtrType(),
Ctx.getPointerType(DstType), Loc);
// FIXME: should the pointee type (DstType) be passed?
Address TmpAddr =
- CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF);
+ CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress();
return TmpAddr;
}
@@ -578,7 +594,7 @@ static llvm::Function *emitOutlinedFunctionPrologue(
} else if (I->capturesVariable()) {
const VarDecl *Var = I->getCapturedVar();
QualType VarTy = Var->getType();
- Address ArgAddr = ArgLVal.getAddress(CGF);
+ Address ArgAddr = ArgLVal.getAddress();
if (ArgLVal.getType()->isLValueReferenceType()) {
ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
} else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
@@ -599,12 +615,12 @@ static llvm::Function *emitOutlinedFunctionPrologue(
? castValueFromUintptr(
CGF, I->getLocation(), FD->getType(),
Args[Cnt]->getName(), ArgLVal)
- : ArgLVal.getAddress(CGF)}});
+ : ArgLVal.getAddress()}});
} else {
// If 'this' is captured, load it into CXXThisValue.
assert(I->capturesThis());
CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
- LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}});
+ LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress()}});
}
++Cnt;
++I;
@@ -674,7 +690,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
I->second.first ? I->second.first->getType() : Arg->getType(),
AlignmentSource::Decl);
if (LV.getType()->isAnyComplexType())
- LV.setAddress(LV.getAddress(WrapperCGF).withElementType(PI->getType()));
+ LV.setAddress(LV.getAddress().withElementType(PI->getType()));
CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
} else {
auto EI = VLASizes.find(Arg);
@@ -890,8 +906,7 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
EmitAggregateAssign(Dest, OriginalLVal, Type);
} else {
EmitOMPAggregateAssign(
- Emission.getAllocatedAddress(), OriginalLVal.getAddress(*this),
- Type,
+ Emission.getAllocatedAddress(), OriginalLVal.getAddress(), Type,
[this, VDInit, Init](Address DestElement, Address SrcElement) {
// Clean up any temporaries needed by the
// initialization.
@@ -908,7 +923,7 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
IsRegistered =
PrivateScope.addPrivate(OrigVD, Emission.getAllocatedAddress());
} else {
- Address OriginalAddr = OriginalLVal.getAddress(*this);
+ Address OriginalAddr = OriginalLVal.getAddress();
// Emit private VarDecl with copy init.
// Remap temp VDInit variable to the address of the original
// variable (for proper handling of captured global variables).
@@ -997,7 +1012,7 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
"Copyin threadprivates should have been captured!");
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- MasterAddr = EmitLValue(&DRE).getAddress(*this);
+ MasterAddr = EmitLValue(&DRE).getAddress();
LocalDeclMap.erase(VD);
} else {
MasterAddr =
@@ -1007,7 +1022,7 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
getContext().getDeclAlign(VD));
}
// Get the address of the threadprivate variable.
- Address PrivateAddr = EmitLValue(*IRef).getAddress(*this);
+ Address PrivateAddr = EmitLValue(*IRef).getAddress();
if (CopiedVars.size() == 1) {
// At first check if current thread is a master thread. If it is, no
// need to copy data.
@@ -1076,7 +1091,7 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
/*RefersToEnclosingVariableOrCapture=*/
CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress(*this));
+ PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress());
// Check if the variable is also a firstprivate: in this case IInit is
// not generated. Initialization of this variable will happen in codegen
// for 'firstprivate' clause.
@@ -1239,7 +1254,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
RedCG.emitAggregateType(*this, Count);
AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
- RedCG.getSharedLValue(Count).getAddress(*this),
+ RedCG.getSharedLValue(Count).getAddress(),
[&Emission](CodeGenFunction &CGF) {
CGF.EmitAutoVarInit(Emission);
return true;
@@ -1260,22 +1275,20 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD,
- RedCG.getSharedLValue(Count).getAddress(*this));
+ PrivateScope.addPrivate(LHSVD, RedCG.getSharedLValue(Count).getAddress());
PrivateScope.addPrivate(RHSVD, GetAddrOfLocalVar(PrivateVD));
} else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
isa<ArraySubscriptExpr>(IRef)) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD,
- RedCG.getSharedLValue(Count).getAddress(*this));
+ PrivateScope.addPrivate(LHSVD, RedCG.getSharedLValue(Count).getAddress());
PrivateScope.addPrivate(RHSVD,
GetAddrOfLocalVar(PrivateVD).withElementType(
ConvertTypeForMem(RHSVD->getType())));
} else {
QualType Type = PrivateVD->getType();
bool IsArray = getContext().getAsArrayType(Type) != nullptr;
- Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this);
+ Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress();
// Store the address of the original variable associated with the LHS
// implicit variable.
if (IsArray) {
@@ -2069,7 +2082,7 @@ void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) {
// variable and emit the body.
const DeclRefExpr *LoopVarRef = S->getLoopVarRef();
LValue LCVal = EmitLValue(LoopVarRef);
- Address LoopVarAddress = LCVal.getAddress(*this);
+ Address LoopVarAddress = LCVal.getAddress();
emitCapturedStmtCall(*this, LoopVarClosure,
{LoopVarAddress.emitRawPointer(*this), IndVar});
@@ -2210,7 +2223,7 @@ void CodeGenFunction::EmitOMPLinearClauseFinal(
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
- Address OrigAddr = EmitLValue(&DRE).getAddress(*this);
+ Address OrigAddr = EmitLValue(&DRE).getAddress();
CodeGenFunction::OMPPrivateScope VarScope(*this);
VarScope.addPrivate(OrigVD, OrigAddr);
(void)VarScope.Privatize();
@@ -2277,7 +2290,7 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
E->getType(), VK_LValue, E->getExprLoc());
- (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress(*this));
+ (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress());
} else {
(void)LoopScope.addPrivate(PrivateVD, VarEmission.getAllocatedAddress());
}
@@ -2443,13 +2456,12 @@ void CodeGenFunction::EmitOMPSimdFinal(
}
Address OrigAddr = Address::invalid();
if (CED) {
- OrigAddr =
- EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this);
+ OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
} else {
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
/*RefersToEnclosingVariableOrCapture=*/false,
(*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
- OrigAddr = EmitLValue(&DRE).getAddress(*this);
+ OrigAddr = EmitLValue(&DRE).getAddress();
}
OMPPrivateScope VarScope(*this);
VarScope.addPrivate(OrigVD, OrigAddr);
@@ -3165,16 +3177,14 @@ static void emitDistributeParallelForDistributeInnerBoundParams(
const auto &Dir = cast<OMPLoopDirective>(S);
LValue LB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
- llvm::Value *LBCast =
- CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)),
- CGF.SizeTy, /*isSigned=*/false);
+ llvm::Value *LBCast = CGF.Builder.CreateIntCast(
+ CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
CapturedVars.push_back(LBCast);
LValue UB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
- llvm::Value *UBCast =
- CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)),
- CGF.SizeTy, /*isSigned=*/false);
+ llvm::Value *UBCast = CGF.Builder.CreateIntCast(
+ CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
CapturedVars.push_back(UBCast);
}
@@ -3426,8 +3436,8 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// one chunk is distributed to each thread. Note that the size of
// the chunks is unspecified in this case.
CGOpenMPRuntime::StaticRTInput StaticInit(
- IVSize, IVSigned, Ordered, IL.getAddress(CGF),
- LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF),
+ IVSize, IVSigned, Ordered, IL.getAddress(), LB.getAddress(),
+ UB.getAddress(), ST.getAddress(),
StaticChunkedOne ? Chunk : nullptr);
CGF.CGM.getOpenMPRuntime().emitForStaticInit(
CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind,
@@ -3470,9 +3480,9 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
} else {
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
- OMPLoopArguments LoopArguments(
- LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
- IL.getAddress(*this), Chunk, EUB);
+ OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(),
+ ST.getAddress(), IL.getAddress(), Chunk,
+ EUB);
LoopArguments.DKind = OMPD_for;
EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
LoopArguments, CGDispatchBounds);
@@ -3639,11 +3649,10 @@ static void emitScanBasedDirectiveFinals(
RValue::get(OMPLast));
LValue DestLVal = CGF.EmitLValue(OrigExpr);
LValue SrcLVal = CGF.EmitLValue(CopyArrayElem);
- CGF.EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(CGF),
- SrcLVal.getAddress(CGF),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ CGF.EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
}
@@ -3753,7 +3762,7 @@ static void emitScanBasedDirective(
cast<OpaqueValueExpr>(
cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
RValue::get(IVal));
- LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress();
}
PrivScope.addPrivate(LHSVD, LHSAddr);
Address RHSAddr = Address::invalid();
@@ -3764,7 +3773,7 @@ static void emitScanBasedDirective(
cast<OpaqueValueExpr>(
cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
RValue::get(OffsetIVal));
- RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress();
}
PrivScope.addPrivate(RHSVD, RHSAddr);
++ILHS;
@@ -4078,8 +4087,8 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
OpenMPScheduleTy ScheduleKind;
ScheduleKind.Schedule = OMPC_SCHEDULE_static;
CGOpenMPRuntime::StaticRTInput StaticInit(
- /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF),
- LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF));
+ /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
+ LB.getAddress(), UB.getAddress(), ST.getAddress());
CGF.CGM.getOpenMPRuntime().emitForStaticInit(
CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit);
// UB = min(UB, GlobalUB);
@@ -4858,7 +4867,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
Pair.second->getType(), VK_LValue,
Pair.second->getExprLoc());
- Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress(CGF));
+ Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress());
}
for (const auto &Pair : PrivatePtrs) {
Address Replacement = Address(
@@ -5505,8 +5514,8 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
*cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl()));
LValue DestLVal = EmitLValue(TempExpr);
LValue SrcLVal = EmitLValue(LHSs[I]);
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
+ EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(),
+ SrcLVal.getAddress(),
cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
CopyOps[I]);
@@ -5527,11 +5536,10 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
DestLVal = EmitLValue(RHSs[I]);
SrcLVal = EmitLValue(TempExpr);
}
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
}
EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock);
@@ -5564,11 +5572,10 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
RValue::get(IdxVal));
LValue DestLVal = EmitLValue(CopyArrayElem);
LValue SrcLVal = EmitLValue(OrigExpr);
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
}
EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
@@ -5606,11 +5613,10 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
RValue::get(IdxVal));
LValue SrcLVal = EmitLValue(CopyArrayElem);
LValue DestLVal = EmitLValue(OrigExpr);
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
if (!IsInclusive) {
EmitBlock(ExclusiveExitBB);
@@ -5735,8 +5741,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
/* Chunked */ Chunk != nullptr) ||
StaticChunked) {
CGOpenMPRuntime::StaticRTInput StaticInit(
- IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this),
- LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
+ IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(),
+ LB.getAddress(), UB.getAddress(), ST.getAddress(),
StaticChunked ? Chunk : nullptr);
RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind,
StaticInit);
@@ -5812,8 +5818,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
const OMPLoopArguments LoopArguments = {
- LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
- IL.getAddress(*this), Chunk};
+ LB.getAddress(), UB.getAddress(), ST.getAddress(), IL.getAddress(),
+ Chunk};
EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
CodeGenLoop);
}
@@ -6127,8 +6133,7 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
// target platform.
if (BO == BO_Comma || !Update.isScalar() || !X.isSimple() ||
(!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
- (Update.getScalarVal()->getType() !=
- X.getAddress(CGF).getElementType())) ||
+ (Update.getScalarVal()->getType() != X.getAddress().getElementType())) ||
!Context.getTargetInfo().hasBuiltinAtomic(
Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
return std::make_pair(false, RValue::get(nullptr));
@@ -6144,10 +6149,10 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
};
if (!CheckAtomicSupport(Update.getScalarVal()->getType(), BO) ||
- !CheckAtomicSupport(X.getAddress(CGF).getElementType(), BO))
+ !CheckAtomicSupport(X.getAddress().getElementType(), BO))
return std::make_pair(false, RValue::get(nullptr));
- bool IsInteger = X.getAddress(CGF).getElementType()->isIntegerTy();
+ bool IsInteger = X.getAddress().getElementType()->isIntegerTy();
llvm::AtomicRMWInst::BinOp RMWOp;
switch (BO) {
case BO_Add:
@@ -6224,14 +6229,14 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
if (IsInteger)
UpdateVal = CGF.Builder.CreateIntCast(
- IC, X.getAddress(CGF).getElementType(),
+ IC, X.getAddress().getElementType(),
X.getType()->hasSignedIntegerRepresentation());
else
UpdateVal = CGF.Builder.CreateCast(llvm::Instruction::CastOps::UIToFP, IC,
- X.getAddress(CGF).getElementType());
+ X.getAddress().getElementType());
}
llvm::Value *Res =
- CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(CGF), UpdateVal, AO);
+ CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
return std::make_pair(true, RValue::get(Res));
}
@@ -6456,7 +6461,7 @@ static void emitOMPAtomicCompareExpr(
}
LValue XLVal = CGF.EmitLValue(X);
- Address XAddr = XLVal.getAddress(CGF);
+ Address XAddr = XLVal.getAddress();
auto EmitRValueWithCastIfNeeded = [&CGF, Loc](const Expr *X, const Expr *E) {
if (X->getType() == E->getType())
@@ -6472,12 +6477,12 @@ static void emitOMPAtomicCompareExpr(
llvm::Value *DVal = D ? EmitRValueWithCastIfNeeded(X, D) : nullptr;
if (auto *CI = dyn_cast<llvm::ConstantInt>(EVal))
EVal = CGF.Builder.CreateIntCast(
- CI, XLVal.getAddress(CGF).getElementType(),
+ CI, XLVal.getAddress().getElementType(),
E->getType()->hasSignedIntegerRepresentation());
if (DVal)
if (auto *CI = dyn_cast<llvm::ConstantInt>(DVal))
DVal = CGF.Builder.CreateIntCast(
- CI, XLVal.getAddress(CGF).getElementType(),
+ CI, XLVal.getAddress().getElementType(),
D->getType()->hasSignedIntegerRepresentation());
llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{
@@ -6487,14 +6492,14 @@ static void emitOMPAtomicCompareExpr(
llvm::OpenMPIRBuilder::AtomicOpValue VOpVal, ROpVal;
if (V) {
LValue LV = CGF.EmitLValue(V);
- Address Addr = LV.getAddress(CGF);
+ Address Addr = LV.getAddress();
VOpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(),
V->getType()->hasSignedIntegerRepresentation(),
V->getType().isVolatileQualified()};
}
if (R) {
LValue LV = CGF.EmitLValue(R);
- Address Addr = LV.getAddress(CGF);
+ Address Addr = LV.getAddress();
ROpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(),
R->getType()->hasSignedIntegerRepresentation(),
R->getType().isVolatileQualified()};
@@ -8127,7 +8132,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
continue;
if (!CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(Ref);
- GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
+ GlobalsScope.addPrivate(VD, GlobLVal.getAddress());
}
}
}
@@ -8142,7 +8147,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(E);
- GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
+ GlobalsScope.addPrivate(VD, GlobLVal.getAddress());
}
if (isa<OMPCapturedExprDecl>(VD)) {
// Emit only those that were not explicitly referenced in clauses.
diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h
index cc9ad10ae596..f1ba3cf95ae5 100644
--- a/clang/lib/CodeGen/CGValue.h
+++ b/clang/lib/CodeGen/CGValue.h
@@ -367,10 +367,7 @@ public:
return Addr.isValid() ? Addr.emitRawPointer(CGF) : nullptr;
}
- Address getAddress(CodeGenFunction &CGF) const {
- // FIXME: remove parameter.
- return Addr;
- }
+ Address getAddress() const { return Addr; }
void setAddress(Address address) { Addr = address; }
@@ -503,8 +500,8 @@ public:
return R;
}
- RValue asAggregateRValue(CodeGenFunction &CGF) const {
- return RValue::getAggregate(getAddress(CGF), isVolatileQualified());
+ RValue asAggregateRValue() const {
+ return RValue::getAggregate(getAddress(), isVolatileQualified());
}
};
@@ -607,11 +604,11 @@ public:
}
static AggValueSlot
- forLValue(const LValue &LV, CodeGenFunction &CGF, IsDestructed_t isDestructed,
+ forLValue(const LValue &LV, IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC, IsAliased_t isAliased,
Overlap_t mayOverlap, IsZeroed_t isZeroed = IsNotZeroed,
IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) {
- return forAddr(LV.getAddress(CGF), LV.getQuals(), isDestructed, needsGC,
+ return forAddr(LV.getAddress(), LV.getQuals(), isDestructed, needsGC,
isAliased, mayOverlap, isZeroed, isChecked);
}
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index 04abdadd9537..f0345f3b191b 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -2478,11 +2478,11 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
Address CodeGenFunction::EmitVAListRef(const Expr* E) {
if (getContext().getBuiltinVaListType()->isArrayType())
return EmitPointerWithAlignment(E);
- return EmitLValue(E).getAddress(*this);
+ return EmitLValue(E).getAddress();
}
Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
- return EmitLValue(E).getAddress(*this);
+ return EmitLValue(E).getAddress();
}
void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 5f3ee7eb943f..45585361a4fc 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -4635,6 +4635,9 @@ public:
llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue);
+
+ void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
+ const CallExpr *E);
void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
llvm::AtomicOrdering &AO,
llvm::SyncScope::ID &SSID);
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 489c08a4d481..e4774a587707 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -368,7 +368,8 @@ CodeGenModule::CodeGenModule(ASTContext &C,
IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
IntPtrTy = llvm::IntegerType::get(LLVMContext,
C.getTargetInfo().getMaxPointerWidth());
- Int8PtrTy = llvm::PointerType::get(LLVMContext, 0);
+ Int8PtrTy = llvm::PointerType::get(LLVMContext,
+ C.getTargetAddressSpace(LangAS::Default));
const llvm::DataLayout &DL = M.getDataLayout();
AllocaInt8PtrTy =
llvm::PointerType::get(LLVMContext, DL.getAllocaAddrSpace());
@@ -4149,7 +4150,7 @@ llvm::GlobalValue::LinkageTypes getMultiversionLinkage(CodeGenModule &CGM,
}
static FunctionDecl *createDefaultTargetVersionFrom(const FunctionDecl *FD) {
- DeclContext *DeclCtx = FD->getASTContext().getTranslationUnitDecl();
+ auto *DeclCtx = const_cast<DeclContext *>(FD->getDeclContext());
TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
StorageClass SC = FD->getStorageClass();
DeclarationName Name = FD->getNameInfo().getName();
@@ -5739,15 +5740,17 @@ CodeGenModule::getLLVMLinkageVarDefinition(const VarDecl *VD) {
static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
llvm::Function *newFn) {
// Fast path.
- if (old->use_empty()) return;
+ if (old->use_empty())
+ return;
llvm::Type *newRetTy = newFn->getReturnType();
- SmallVector<llvm::Value*, 4> newArgs;
+ SmallVector<llvm::Value *, 4> newArgs;
+
+ SmallVector<llvm::CallBase *> callSitesToBeRemovedFromParent;
for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
- ui != ue; ) {
- llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
- llvm::User *user = use->getUser();
+ ui != ue; ui++) {
+ llvm::User *user = ui->getUser();
// Recognize and replace uses of bitcasts. Most calls to
// unprototyped functions will use bitcasts.
@@ -5759,8 +5762,9 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
// Recognize calls to the function.
llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(user);
- if (!callSite) continue;
- if (!callSite->isCallee(&*use))
+ if (!callSite)
+ continue;
+ if (!callSite->isCallee(&*ui))
continue;
// If the return types don't match exactly, then we can't
@@ -5829,6 +5833,10 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
if (callSite->getDebugLoc())
newCall->setDebugLoc(callSite->getDebugLoc());
+ callSitesToBeRemovedFromParent.push_back(callSite);
+ }
+
+ for (auto *callSite : callSitesToBeRemovedFromParent) {
callSite->eraseFromParent();
}
}
diff --git a/clang/lib/CodeGen/CodeGenTypeCache.h b/clang/lib/CodeGen/CodeGenTypeCache.h
index 083d69214fb3..e273ebe3b060 100644
--- a/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -51,7 +51,7 @@ struct CodeGenTypeCache {
llvm::IntegerType *PtrDiffTy;
};
- /// void*, void** in address space 0
+ /// void*, void** in the target's default address space (often 0)
union {
llvm::PointerType *UnqualPtrTy;
llvm::PointerType *VoidPtrTy;
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index e8d75eda029e..0a926e4ac27f 100644
--- a/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -523,8 +523,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
return llvm::StructType::get(getLLVMContext(), EltTys);
}
return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
- Info.EC.getKnownMinValue() *
- Info.NumVectors);
+ Info.EC.getKnownMinValue());
}
#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
case BuiltinType::Id: { \
diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp
index cc8ab7a5b436..6ce2d32dd292 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -17,6 +17,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ProfileData/Coverage/CoverageMapping.h"
@@ -190,6 +191,10 @@ public:
bool isBranch() const { return FalseCount.has_value(); }
+ bool isMCDCBranch() const {
+ return std::holds_alternative<mcdc::BranchParameters>(MCDCParams);
+ }
+
bool isMCDCDecision() const {
return std::holds_alternative<mcdc::DecisionParameters>(MCDCParams);
}
@@ -289,10 +294,36 @@ public:
return SM.getLocForEndOfFile(SM.getFileID(Loc));
}
- /// Find out where the current file is included or macro is expanded.
- SourceLocation getIncludeOrExpansionLoc(SourceLocation Loc) {
- return Loc.isMacroID() ? SM.getImmediateExpansionRange(Loc).getBegin()
- : SM.getIncludeLoc(SM.getFileID(Loc));
+ /// Find out where a macro is expanded. If the immediate result is a
+ /// <scratch space>, keep looking until the result isn't. Return a pair of
+ /// \c SourceLocation. The first object is always the begin sloc of found
+ /// result. The second should be checked by the caller: if it has value, it's
+ /// the end sloc of the found result. Otherwise the while loop didn't get
+ /// executed, which means the location wasn't changed and the caller has to
+ /// learn the end sloc from somewhere else.
+ std::pair<SourceLocation, std::optional<SourceLocation>>
+ getNonScratchExpansionLoc(SourceLocation Loc) {
+ std::optional<SourceLocation> EndLoc = std::nullopt;
+ while (Loc.isMacroID() &&
+ SM.isWrittenInScratchSpace(SM.getSpellingLoc(Loc))) {
+ auto ExpansionRange = SM.getImmediateExpansionRange(Loc);
+ Loc = ExpansionRange.getBegin();
+ EndLoc = ExpansionRange.getEnd();
+ }
+ return std::make_pair(Loc, EndLoc);
+ }
+
+ /// Find out where the current file is included or macro is expanded. If
+ /// \c AcceptScratch is set to false, keep looking for expansions until the
+ /// found sloc is not a <scratch space>.
+ SourceLocation getIncludeOrExpansionLoc(SourceLocation Loc,
+ bool AcceptScratch = true) {
+ if (!Loc.isMacroID())
+ return SM.getIncludeLoc(SM.getFileID(Loc));
+ Loc = SM.getImmediateExpansionRange(Loc).getBegin();
+ if (AcceptScratch)
+ return Loc;
+ return getNonScratchExpansionLoc(Loc).first;
}
/// Return true if \c Loc is a location in a built-in macro.
@@ -336,16 +367,35 @@ public:
llvm::SmallSet<FileID, 8> Visited;
SmallVector<std::pair<SourceLocation, unsigned>, 8> FileLocs;
- for (const auto &Region : SourceRegions) {
+ for (auto &Region : SourceRegions) {
SourceLocation Loc = Region.getBeginLoc();
+
+ // Replace Region with its definition if it is in <scratch space>.
+ auto NonScratchExpansionLoc = getNonScratchExpansionLoc(Loc);
+ auto EndLoc = NonScratchExpansionLoc.second;
+ if (EndLoc.has_value()) {
+ Loc = NonScratchExpansionLoc.first;
+ Region.setStartLoc(Loc);
+ Region.setEndLoc(EndLoc.value());
+ }
+
+ // Replace Loc with FileLoc if it is expanded with system headers.
+ if (!SystemHeadersCoverage && SM.isInSystemMacro(Loc)) {
+ auto BeginLoc = SM.getSpellingLoc(Loc);
+ auto EndLoc = SM.getSpellingLoc(Region.getEndLoc());
+ if (SM.isWrittenInSameFile(BeginLoc, EndLoc)) {
+ Loc = SM.getFileLoc(Loc);
+ Region.setStartLoc(Loc);
+ Region.setEndLoc(SM.getFileLoc(Region.getEndLoc()));
+ }
+ }
+
FileID File = SM.getFileID(Loc);
if (!Visited.insert(File).second)
continue;
- // Do not map FileID's associated with system headers unless collecting
- // coverage from system headers is explicitly enabled.
- if (!SystemHeadersCoverage && SM.isInSystemHeader(SM.getSpellingLoc(Loc)))
- continue;
+ assert(SystemHeadersCoverage ||
+ !SM.isInSystemHeader(SM.getSpellingLoc(Loc)));
unsigned Depth = 0;
for (SourceLocation Parent = getIncludeOrExpansionLoc(Loc);
@@ -461,13 +511,19 @@ public:
// Ignore regions from system headers unless collecting coverage from
// system headers is explicitly enabled.
if (!SystemHeadersCoverage &&
- SM.isInSystemHeader(SM.getSpellingLoc(LocStart)))
+ SM.isInSystemHeader(SM.getSpellingLoc(LocStart))) {
+ assert(!Region.isMCDCBranch() && !Region.isMCDCDecision() &&
+ "Don't suppress the condition in system headers");
continue;
+ }
auto CovFileID = getCoverageFileID(LocStart);
// Ignore regions that don't have a file, such as builtin macros.
- if (!CovFileID)
+ if (!CovFileID) {
+ assert(!Region.isMCDCBranch() && !Region.isMCDCDecision() &&
+ "Don't suppress the condition in non-file regions");
continue;
+ }
SourceLocation LocEnd = Region.getEndLoc();
assert(SM.isWrittenInSameFile(LocStart, LocEnd) &&
@@ -477,8 +533,11 @@ public:
// This not only suppresses redundant regions, but sometimes prevents
// creating regions with wrong counters if, for example, a statement's
// body ends at the end of a nested macro.
- if (Filter.count(std::make_pair(LocStart, LocEnd)))
+ if (Filter.count(std::make_pair(LocStart, LocEnd))) {
+ assert(!Region.isMCDCBranch() && !Region.isMCDCDecision() &&
+ "Don't suppress the condition");
continue;
+ }
// Find the spelling locations for the mapping region.
SpellingRegion SR{SM, LocStart, LocEnd};
@@ -514,7 +573,7 @@ public:
SourceRegionFilter Filter;
for (const auto &FM : FileIDMapping) {
SourceLocation ExpandedLoc = FM.second.second;
- SourceLocation ParentLoc = getIncludeOrExpansionLoc(ExpandedLoc);
+ SourceLocation ParentLoc = getIncludeOrExpansionLoc(ExpandedLoc, false);
if (ParentLoc.isInvalid())
continue;
@@ -818,6 +877,10 @@ struct CounterCoverageMappingBuilder
/// A stack of currently live regions.
llvm::SmallVector<SourceMappingRegion> RegionStack;
+ /// Set if the Expr should be handled as a leaf even if it is kind of binary
+ /// logical ops (&&, ||).
+ llvm::DenseSet<const Stmt *> LeafExprSet;
+
/// An object to manage MCDC regions.
MCDCCoverageBuilder MCDCBuilder;
@@ -1040,7 +1103,10 @@ struct CounterCoverageMappingBuilder
// region onto RegionStack but immediately pop it (which adds it to the
// function's SourceRegions) because it doesn't apply to any other source
// code other than the Condition.
- if (CodeGenFunction::isInstrumentedCondition(C)) {
+ // With !SystemHeadersCoverage, binary logical ops in system headers may be
+ // treated as instrumentable conditions.
+ if (CodeGenFunction::isInstrumentedCondition(C) ||
+ LeafExprSet.count(CodeGenFunction::stripCond(C))) {
mcdc::Parameters BranchParams;
mcdc::ConditionID ID = MCDCBuilder.getCondID(C);
if (ID >= 0)
@@ -2070,7 +2136,20 @@ struct CounterCoverageMappingBuilder
createDecisionRegion(E, DecisionParams);
}
+ /// Check if E belongs to system headers.
+ bool isExprInSystemHeader(const BinaryOperator *E) const {
+ return (!SystemHeadersCoverage &&
+ SM.isInSystemHeader(SM.getSpellingLoc(E->getOperatorLoc())) &&
+ SM.isInSystemHeader(SM.getSpellingLoc(E->getBeginLoc())) &&
+ SM.isInSystemHeader(SM.getSpellingLoc(E->getEndLoc())));
+ }
+
void VisitBinLAnd(const BinaryOperator *E) {
+ if (isExprInSystemHeader(E)) {
+ LeafExprSet.insert(E);
+ return;
+ }
+
bool IsRootNode = MCDCBuilder.isIdle();
// Keep track of Binary Operator and assign MCDC condition IDs.
@@ -2125,6 +2204,11 @@ struct CounterCoverageMappingBuilder
}
void VisitBinLOr(const BinaryOperator *E) {
+ if (isExprInSystemHeader(E)) {
+ LeafExprSet.insert(E);
+ return;
+ }
+
bool IsRootNode = MCDCBuilder.isIdle();
// Keep track of Binary Operator and assign MCDC condition IDs.
@@ -2187,7 +2271,8 @@ struct CounterCoverageMappingBuilder
}
void VisitOpaqueValueExpr(const OpaqueValueExpr* OVE) {
- Visit(OVE->getSourceExpr());
+ if (OVE->isUnique())
+ Visit(OVE->getSourceExpr());
}
};
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 18acf7784f71..8427286dee88 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -1793,6 +1793,37 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
ThisTy, VTT, VTTTy, nullptr);
}
+// Check if any non-inline method has the specified attribute.
+template <typename T>
+static bool CXXRecordNonInlineHasAttr(const CXXRecordDecl *RD) {
+ for (const auto *D : RD->noload_decls()) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isInlined() || FD->doesThisDeclarationHaveABody() ||
+ FD->isPureVirtual())
+ continue;
+ if (D->hasAttr<T>())
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void setVTableSelectiveDLLImportExport(CodeGenModule &CGM,
+ llvm::GlobalVariable *VTable,
+ const CXXRecordDecl *RD) {
+ if (VTable->getDLLStorageClass() !=
+ llvm::GlobalVariable::DefaultStorageClass ||
+ RD->hasAttr<DLLImportAttr>() || RD->hasAttr<DLLExportAttr>())
+ return;
+
+ if (CGM.getVTables().isVTableExternal(RD)) {
+ if (CXXRecordNonInlineHasAttr<DLLImportAttr>(RD))
+ VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ } else if (CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
+ VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+}
+
void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) {
llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
@@ -1818,6 +1849,9 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
+ if (CGM.getTarget().hasPS4DLLImportExport())
+ setVTableSelectiveDLLImportExport(CGM, VTable, RD);
+
// Set the right visibility.
CGM.setGVProperties(VTable, RD);
@@ -1905,29 +1939,6 @@ ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
VTable->getValueType(), VTable, Indices, /*InBounds=*/true, InRange);
}
-// Check whether all the non-inline virtual methods for the class have the
-// specified attribute.
-template <typename T>
-static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
- bool FoundNonInlineVirtualMethodWithAttr = false;
- for (const auto *D : RD->noload_decls()) {
- if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
- if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
- FD->doesThisDeclarationHaveABody())
- continue;
- if (!D->hasAttr<T>())
- return false;
- FoundNonInlineVirtualMethodWithAttr = true;
- }
- }
-
- // We didn't find any non-inline virtual methods missing the attribute. We
- // will return true when we found at least one non-inline virtual with the
- // attribute. (This lets our caller know that the attribute needs to be
- // propagated up to the vtable.)
- return FoundNonInlineVirtualMethodWithAttr;
-}
-
llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
const CXXRecordDecl *NearestVBase) {
@@ -1981,26 +1992,10 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
getContext().toCharUnitsFromBits(PAlign).getAsAlign());
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- // In MS C++ if you have a class with virtual functions in which you are using
- // selective member import/export, then all virtual functions must be exported
- // unless they are inline, otherwise a link error will result. To match this
- // behavior, for such classes, we dllimport the vtable if it is defined
- // externally and all the non-inline virtual methods are marked dllimport, and
- // we dllexport the vtable if it is defined in this TU and all the non-inline
- // virtual methods are marked dllexport.
- if (CGM.getTarget().hasPS4DLLImportExport()) {
- if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
- if (CGM.getVTables().isVTableExternal(RD)) {
- if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
- VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
- } else {
- if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
- VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
- }
- }
- }
- CGM.setGVProperties(VTable, RD);
+ if (CGM.getTarget().hasPS4DLLImportExport())
+ setVTableSelectiveDLLImportExport(CGM, VTable, RD);
+ CGM.setGVProperties(VTable, RD);
return VTable;
}
@@ -3285,7 +3280,7 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
// Import the typeinfo symbol when all non-inline virtual methods are
// imported.
if (CGM.getTarget().hasPS4DLLImportExport()) {
- if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
+ if (RD && CXXRecordNonInlineHasAttr<DLLImportAttr>(RD)) {
GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
CGM.setDSOLocal(GV);
}
@@ -3938,13 +3933,13 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
// Export the typeinfo in the same circumstances as the vtable is exported.
auto GVDLLStorageClass = DLLStorageClass;
- if (CGM.getTarget().hasPS4DLLImportExport()) {
+ if (CGM.getTarget().hasPS4DLLImportExport() &&
+ GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
if (RD->hasAttr<DLLExportAttr>() ||
- CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
+ CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
- }
}
}
@@ -3984,9 +3979,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
CGM.setDSOLocal(GV);
TypeName->setDLLStorageClass(DLLStorageClass);
- GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
- ? GVDLLStorageClass
- : DLLStorageClass);
+ GV->setDLLStorageClass(GVDLLStorageClass);
TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
diff --git a/clang/lib/CodeGen/Targets/AArch64.cpp b/clang/lib/CodeGen/Targets/AArch64.cpp
index 0a4711fb2170..9aa3ea75681b 100644
--- a/clang/lib/CodeGen/Targets/AArch64.cpp
+++ b/clang/lib/CodeGen/Targets/AArch64.cpp
@@ -750,18 +750,7 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
// Again, stack arguments may need realignment. In this case both integer and
// floating-point ones might be affected.
if (!IsIndirect && TyAlign.getQuantity() > 8) {
- int Align = TyAlign.getQuantity();
-
- OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
-
- OnStackPtr = CGF.Builder.CreateAdd(
- OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
- "align_stack");
- OnStackPtr = CGF.Builder.CreateAnd(
- OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
- "align_stack");
-
- OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
+ OnStackPtr = emitRoundPointerUpToAlignment(CGF, OnStackPtr, TyAlign);
}
Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
std::max(CharUnits::fromQuantity(8), TyAlign));
diff --git a/clang/lib/CodeGen/Targets/NVPTX.cpp b/clang/lib/CodeGen/Targets/NVPTX.cpp
index 7dce5042c3dc..df798ce0ca67 100644
--- a/clang/lib/CodeGen/Targets/NVPTX.cpp
+++ b/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -85,7 +85,7 @@ private:
LValue Src) {
llvm::Value *Handle = nullptr;
llvm::Constant *C =
- llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).emitRawPointer(CGF));
+ llvm::dyn_cast<llvm::Constant>(Src.getAddress().emitRawPointer(CGF));
// Lookup `addrspacecast` through the constant pointer if any.
if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp
index 29d98aad8fcb..43dadf5e724a 100644
--- a/clang/lib/CodeGen/Targets/X86.cpp
+++ b/clang/lib/CodeGen/Targets/X86.cpp
@@ -327,7 +327,7 @@ void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
ResultTruncRegTypes.push_back(CoerceTy);
// Coerce the integer by bitcasting the return slot pointer.
- ReturnSlot.setAddress(ReturnSlot.getAddress(CGF).withElementType(CoerceTy));
+ ReturnSlot.setAddress(ReturnSlot.getAddress().withElementType(CoerceTy));
ResultRegDests.push_back(ReturnSlot);
rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index 2868b4f2b02e..f5ea73a04ae5 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -2653,22 +2653,13 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
Diag(clang::diag::note_drv_t_option_is_global);
}
- // CUDA/HIP and their preprocessor expansions can be accepted by CL mode.
// Warn -x after last input file has no effect
- auto LastXArg = Args.getLastArgValue(options::OPT_x);
- const llvm::StringSet<> ValidXArgs = {"cuda", "hip", "cui", "hipi"};
- if (!IsCLMode() || ValidXArgs.contains(LastXArg)) {
+ {
Arg *LastXArg = Args.getLastArgNoClaim(options::OPT_x);
Arg *LastInputArg = Args.getLastArgNoClaim(options::OPT_INPUT);
if (LastXArg && LastInputArg &&
LastInputArg->getIndex() < LastXArg->getIndex())
Diag(clang::diag::warn_drv_unused_x) << LastXArg->getValue();
- } else {
- // In CL mode suggest /TC or /TP since -x doesn't make sense if passed via
- // /clang:.
- if (auto *A = Args.getLastArg(options::OPT_x))
- Diag(diag::err_drv_unsupported_opt_with_suggestion)
- << A->getAsString(Args) << "/TC' or '/TP";
}
for (Arg *A : Args) {
diff --git a/clang/lib/Driver/ToolChains/AIX.cpp b/clang/lib/Driver/ToolChains/AIX.cpp
index 85825e1ea65b..381d72e045b9 100644
--- a/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/clang/lib/Driver/ToolChains/AIX.cpp
@@ -479,14 +479,6 @@ static void addTocDataOptions(const llvm::opt::ArgList &Args,
return false;
}();
- // Currently only supported for small code model.
- if (TOCDataGloballyinEffect &&
- (Args.getLastArgValue(options::OPT_mcmodel_EQ) == "large" ||
- Args.getLastArgValue(options::OPT_mcmodel_EQ) == "medium")) {
- D.Diag(clang::diag::warn_drv_unsupported_tocdata);
- return;
- }
-
enum TOCDataSetting {
AddressInTOC = 0, // Address of the symbol stored in the TOC.
DataInTOC = 1 // Symbol defined in the TOC.
diff --git a/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp b/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
index d23f9b36efb9..9ea4cc3f7cb9 100644
--- a/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
@@ -181,7 +181,7 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
// -m*-float and -mfpu=none/0/32 conflict with -mlsx.
if (A->getOption().matches(options::OPT_mlsx)) {
if (llvm::find(Features, "-d") != Features.end())
- D.Diag(diag::err_drv_loongarch_wrong_fpu_width_for_lsx);
+ D.Diag(diag::err_drv_loongarch_wrong_fpu_width) << /*LSX*/ 0;
else /*-mlsx*/
Features.push_back("+lsx");
} else /*-mno-lsx*/ {
@@ -196,7 +196,7 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
// -mno-lsx conflicts with -mlasx.
if (A->getOption().matches(options::OPT_mlasx)) {
if (llvm::find(Features, "-d") != Features.end())
- D.Diag(diag::err_drv_loongarch_wrong_fpu_width_for_lasx);
+ D.Diag(diag::err_drv_loongarch_wrong_fpu_width) << /*LASX*/ 1;
else if (llvm::find(Features, "-lsx") != Features.end())
D.Diag(diag::err_drv_loongarch_invalid_simd_option_combination);
else { /*-mlasx*/
diff --git a/clang/lib/Driver/ToolChains/Arch/Mips.cpp b/clang/lib/Driver/ToolChains/Arch/Mips.cpp
index 74a8874a3ea2..79a00711e6f5 100644
--- a/clang/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -369,6 +369,9 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
} else if (mips::isFP64ADefault(Triple, CPUName)) {
Features.push_back("+fp64");
Features.push_back("+nooddspreg");
+ } else if (Arg *A = Args.getLastArg(options::OPT_mmsa)) {
+ if (A->getOption().matches(options::OPT_mmsa))
+ Features.push_back("+fp64");
}
AddTargetFeature(Args, Features, options::OPT_mno_odd_spreg,
@@ -499,6 +502,13 @@ bool mips::shouldUseFPXX(const ArgList &Args, const llvm::Triple &Triple,
options::OPT_mdouble_float))
if (A->getOption().matches(options::OPT_msingle_float))
UseFPXX = false;
+ // FP64 should be used for MSA.
+ if (Arg *A = Args.getLastArg(options::OPT_mmsa))
+ if (A->getOption().matches(options::OPT_mmsa))
+ UseFPXX = llvm::StringSwitch<bool>(CPUName)
+ .Cases("mips32r2", "mips32r3", "mips32r5", false)
+ .Cases("mips64r2", "mips64r3", "mips64r5", false)
+ .Default(UseFPXX);
return UseFPXX;
}
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index c3e6d563f3bd..97e451cfe2ac 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -1030,7 +1030,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
// If user provided -o, that is the dependency target, except
// when we are only generating a dependency file.
- Arg *OutputOpt = Args.getLastArg(options::OPT_o);
+ Arg *OutputOpt = Args.getLastArg(options::OPT_o, options::OPT__SLASH_Fo);
if (OutputOpt && Output.getType() != types::TY_Dependencies) {
DepTarget = OutputOpt->getValue();
} else {
@@ -1522,7 +1522,7 @@ static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
auto isPAuthLR = [](const char *member) {
llvm::AArch64::ExtensionInfo pauthlr_extension =
llvm::AArch64::getExtensionByID(llvm::AArch64::AEK_PAUTHLR);
- return (pauthlr_extension.Feature.compare(member) == 0);
+ return pauthlr_extension.Feature == member;
};
if (std::any_of(CmdArgs.begin(), CmdArgs.end(), isPAuthLR))
@@ -5681,11 +5681,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// enabled. This alias option is being used to simplify the hasFlag logic.
OptSpecifier StrictAliasingAliasOption =
OFastEnabled ? options::OPT_Ofast : options::OPT_fstrict_aliasing;
- // We turn strict aliasing off by default if we're in CL mode, since MSVC
+ // We turn strict aliasing off by default if we're Windows MSVC since MSVC
// doesn't do any TBAA.
- bool TBAAOnByDefault = !D.IsCLMode();
if (!Args.hasFlag(options::OPT_fstrict_aliasing, StrictAliasingAliasOption,
- options::OPT_fno_strict_aliasing, TBAAOnByDefault))
+ options::OPT_fno_strict_aliasing, !IsWindowsMSVC))
CmdArgs.push_back("-relaxed-aliasing");
if (!Args.hasFlag(options::OPT_fstruct_path_tbaa,
options::OPT_fno_struct_path_tbaa, true))
@@ -7027,8 +7026,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fms_compatibility, options::OPT_fno_ms_compatibility,
(IsWindowsMSVC && Args.hasFlag(options::OPT_fms_extensions,
options::OPT_fno_ms_extensions, true)));
- if (IsMSVCCompat)
+ if (IsMSVCCompat) {
CmdArgs.push_back("-fms-compatibility");
+ if (!types::isCXX(Input.getType()) &&
+ Args.hasArg(options::OPT_fms_define_stdc))
+ CmdArgs.push_back("-fms-define-stdc");
+ }
if (Triple.isWindowsMSVCEnvironment() && !D.IsCLMode() &&
Args.hasArg(options::OPT_fms_runtime_lib_EQ))
@@ -7263,10 +7266,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- // -fsized-deallocation is off by default, as it is an ABI-breaking change for
- // most platforms.
- Args.addOptInFlag(CmdArgs, options::OPT_fsized_deallocation,
- options::OPT_fno_sized_deallocation);
+ // -fsized-deallocation is on by default in C++14 onwards and otherwise off
+ // by default.
+ Args.addLastArg(CmdArgs, options::OPT_fsized_deallocation,
+ options::OPT_fno_sized_deallocation);
// -faligned-allocation is on by default in C++17 onwards and otherwise off
// by default.
diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
index caf6c4a444fd..593b403a1e3f 100644
--- a/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -2912,9 +2912,54 @@ static bool sdkSupportsBuiltinModules(const Darwin::DarwinPlatformKind &TargetPl
}
}
-void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadKind) const {
+static inline llvm::VersionTuple
+sizedDeallocMinVersion(llvm::Triple::OSType OS) {
+ switch (OS) {
+ default:
+ break;
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX: // Earliest supporting version is 10.12.
+ return llvm::VersionTuple(10U, 12U);
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS: // Earliest supporting version is 10.0.0.
+ return llvm::VersionTuple(10U);
+ case llvm::Triple::WatchOS: // Earliest supporting version is 3.0.0.
+ return llvm::VersionTuple(3U);
+ }
+
+ llvm_unreachable("Unexpected OS");
+}
+
+bool Darwin::isSizedDeallocationUnavailable() const {
+ llvm::Triple::OSType OS;
+
+ if (isTargetMacCatalyst())
+ return TargetVersion < sizedDeallocMinVersion(llvm::Triple::MacOSX);
+ switch (TargetPlatform) {
+ case MacOS: // Earlier than 10.12.
+ OS = llvm::Triple::MacOSX;
+ break;
+ case IPhoneOS:
+ OS = llvm::Triple::IOS;
+ break;
+ case TvOS: // Earlier than 10.0.
+ OS = llvm::Triple::TvOS;
+ break;
+ case WatchOS: // Earlier than 3.0.
+ OS = llvm::Triple::WatchOS;
+ break;
+ case DriverKit:
+ case XROS:
+ // Always available.
+ return false;
+ }
+
+ return TargetVersion < sizedDeallocMinVersion(OS);
+}
+
+void Darwin::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const {
// Pass "-faligned-alloc-unavailable" only when the user hasn't manually
// enabled or disabled aligned allocations.
if (!DriverArgs.hasArgNoClaim(options::OPT_faligned_allocation,
@@ -2922,6 +2967,13 @@ void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
isAlignedAllocationUnavailable())
CC1Args.push_back("-faligned-alloc-unavailable");
+ // Pass "-fno-sized-deallocation" only when the user hasn't manually enabled
+ // or disabled sized deallocations.
+ if (!DriverArgs.hasArgNoClaim(options::OPT_fsized_deallocation,
+ options::OPT_fno_sized_deallocation) &&
+ isSizedDeallocationUnavailable())
+ CC1Args.push_back("-fno-sized-deallocation");
+
addClangCC1ASTargetOptions(DriverArgs, CC1Args);
// Enable compatibility mode for NSItemProviderCompletionHandler in
diff --git a/clang/lib/Driver/ToolChains/Darwin.h b/clang/lib/Driver/ToolChains/Darwin.h
index 10d4b69e5d5f..b45279ecedeb 100644
--- a/clang/lib/Driver/ToolChains/Darwin.h
+++ b/clang/lib/Driver/ToolChains/Darwin.h
@@ -511,6 +511,10 @@ protected:
/// targeting.
bool isAlignedAllocationUnavailable() const;
+ /// Return true if c++14 sized deallocation functions are not implemented in
+ /// the c++ standard library of the deployment target we are targeting.
+ bool isSizedDeallocationUnavailable() const;
+
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp
index 9849c59685cc..c0100fed1513 100644
--- a/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -2227,10 +2227,19 @@ void Generic_GCC::GCCInstallationDetector::init(
SmallVector<StringRef, 16> CandidateBiarchTripleAliases;
// Add some triples that we want to check first.
CandidateTripleAliases.push_back(TargetTriple.str());
- std::string TripleNoVendor = TargetTriple.getArchName().str() + "-" +
- TargetTriple.getOSAndEnvironmentName().str();
- if (TargetTriple.getVendor() == llvm::Triple::UnknownVendor)
+ std::string TripleNoVendor, BiarchTripleNoVendor;
+ if (TargetTriple.getVendor() == llvm::Triple::UnknownVendor) {
+ StringRef OSEnv = TargetTriple.getOSAndEnvironmentName();
+ if (TargetTriple.getEnvironment() == llvm::Triple::GNUX32)
+ OSEnv = "linux-gnu";
+ TripleNoVendor = (TargetTriple.getArchName().str() + '-' + OSEnv).str();
CandidateTripleAliases.push_back(TripleNoVendor);
+ if (BiarchVariantTriple.getArch() != llvm::Triple::UnknownArch) {
+ BiarchTripleNoVendor =
+ (BiarchVariantTriple.getArchName().str() + '-' + OSEnv).str();
+ CandidateBiarchTripleAliases.push_back(BiarchTripleNoVendor);
+ }
+ }
CollectLibDirsAndTriples(TargetTriple, BiarchVariantTriple, CandidateLibDirs,
CandidateTripleAliases, CandidateBiarchLibDirs,
@@ -2453,11 +2462,9 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
// lists should shrink over time. Please don't add more elements to *Triples.
static const char *const AArch64LibDirs[] = {"/lib64", "/lib"};
static const char *const AArch64Triples[] = {
- "aarch64-none-linux-gnu", "aarch64-linux-gnu", "aarch64-redhat-linux",
- "aarch64-suse-linux"};
+ "aarch64-none-linux-gnu", "aarch64-redhat-linux", "aarch64-suse-linux"};
static const char *const AArch64beLibDirs[] = {"/lib"};
- static const char *const AArch64beTriples[] = {"aarch64_be-none-linux-gnu",
- "aarch64_be-linux-gnu"};
+ static const char *const AArch64beTriples[] = {"aarch64_be-none-linux-gnu"};
static const char *const ARMLibDirs[] = {"/lib"};
static const char *const ARMTriples[] = {"arm-linux-gnueabi"};
@@ -2479,20 +2486,19 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const X86_64LibDirs[] = {"/lib64", "/lib"};
static const char *const X86_64Triples[] = {
- "x86_64-linux-gnu", "x86_64-unknown-linux-gnu",
- "x86_64-pc-linux-gnu", "x86_64-redhat-linux6E",
- "x86_64-redhat-linux", "x86_64-suse-linux",
- "x86_64-manbo-linux-gnu", "x86_64-linux-gnu",
- "x86_64-slackware-linux", "x86_64-unknown-linux",
+ "x86_64-unknown-linux-gnu", "x86_64-pc-linux-gnu",
+ "x86_64-redhat-linux6E", "x86_64-redhat-linux",
+ "x86_64-suse-linux", "x86_64-manbo-linux-gnu",
+ "x86_64-slackware-linux", "x86_64-unknown-linux",
"x86_64-amazon-linux"};
static const char *const X32Triples[] = {"x86_64-linux-gnux32",
"x86_64-pc-linux-gnux32"};
static const char *const X32LibDirs[] = {"/libx32", "/lib"};
static const char *const X86LibDirs[] = {"/lib32", "/lib"};
static const char *const X86Triples[] = {
- "i586-linux-gnu", "i686-linux-gnu", "i686-pc-linux-gnu",
- "i386-redhat-linux6E", "i686-redhat-linux", "i386-redhat-linux",
- "i586-suse-linux", "i686-montavista-linux",
+ "i686-linux-gnu", "i686-pc-linux-gnu", "i386-redhat-linux6E",
+ "i686-redhat-linux", "i386-redhat-linux", "i586-suse-linux",
+ "i686-montavista-linux",
};
static const char *const LoongArch64LibDirs[] = {"/lib64", "/lib"};
@@ -2500,26 +2506,24 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
"loongarch64-linux-gnu", "loongarch64-unknown-linux-gnu"};
static const char *const M68kLibDirs[] = {"/lib"};
- static const char *const M68kTriples[] = {
- "m68k-linux-gnu", "m68k-unknown-linux-gnu", "m68k-suse-linux"};
+ static const char *const M68kTriples[] = {"m68k-unknown-linux-gnu",
+ "m68k-suse-linux"};
static const char *const MIPSLibDirs[] = {"/libo32", "/lib"};
static const char *const MIPSTriples[] = {
"mips-linux-gnu", "mips-mti-linux", "mips-mti-linux-gnu",
"mips-img-linux-gnu", "mipsisa32r6-linux-gnu"};
static const char *const MIPSELLibDirs[] = {"/libo32", "/lib"};
- static const char *const MIPSELTriples[] = {
- "mipsel-linux-gnu", "mips-img-linux-gnu", "mipsisa32r6el-linux-gnu"};
+ static const char *const MIPSELTriples[] = {"mipsel-linux-gnu",
+ "mips-img-linux-gnu"};
static const char *const MIPS64LibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64Triples[] = {
- "mips64-linux-gnu", "mips-mti-linux-gnu",
- "mips-img-linux-gnu", "mips64-linux-gnuabi64",
+ "mips-mti-linux-gnu", "mips-img-linux-gnu", "mips64-linux-gnuabi64",
"mipsisa64r6-linux-gnu", "mipsisa64r6-linux-gnuabi64"};
static const char *const MIPS64ELLibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64ELTriples[] = {
- "mips64el-linux-gnu", "mips-mti-linux-gnu",
- "mips-img-linux-gnu", "mips64el-linux-gnuabi64",
+ "mips-mti-linux-gnu", "mips-img-linux-gnu", "mips64el-linux-gnuabi64",
"mipsisa64r6el-linux-gnu", "mipsisa64r6el-linux-gnuabi64"};
static const char *const MIPSN32LibDirs[] = {"/lib32"};
@@ -2534,46 +2538,39 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const PPCLibDirs[] = {"/lib32", "/lib"};
static const char *const PPCTriples[] = {
- "powerpc-linux-gnu", "powerpc-unknown-linux-gnu", "powerpc-linux-gnuspe",
+ "powerpc-unknown-linux-gnu",
// On 32-bit PowerPC systems running SUSE Linux, gcc is configured as a
// 64-bit compiler which defaults to "-m32", hence "powerpc64-suse-linux".
"powerpc64-suse-linux", "powerpc-montavista-linuxspe"};
static const char *const PPCLELibDirs[] = {"/lib32", "/lib"};
- static const char *const PPCLETriples[] = {"powerpcle-linux-gnu",
- "powerpcle-unknown-linux-gnu",
+ static const char *const PPCLETriples[] = {"powerpcle-unknown-linux-gnu",
"powerpcle-linux-musl"};
static const char *const PPC64LibDirs[] = {"/lib64", "/lib"};
- static const char *const PPC64Triples[] = {
- "powerpc64-linux-gnu", "powerpc64-unknown-linux-gnu",
- "powerpc64-suse-linux", "ppc64-redhat-linux"};
+ static const char *const PPC64Triples[] = {"powerpc64-unknown-linux-gnu",
+ "powerpc64-suse-linux",
+ "ppc64-redhat-linux"};
static const char *const PPC64LELibDirs[] = {"/lib64", "/lib"};
static const char *const PPC64LETriples[] = {
- "powerpc64le-linux-gnu", "powerpc64le-unknown-linux-gnu",
- "powerpc64le-none-linux-gnu", "powerpc64le-suse-linux",
- "ppc64le-redhat-linux"};
+ "powerpc64le-unknown-linux-gnu", "powerpc64le-none-linux-gnu",
+ "powerpc64le-suse-linux", "ppc64le-redhat-linux"};
static const char *const RISCV32LibDirs[] = {"/lib32", "/lib"};
static const char *const RISCV32Triples[] = {"riscv32-unknown-linux-gnu",
- "riscv32-linux-gnu",
"riscv32-unknown-elf"};
static const char *const RISCV64LibDirs[] = {"/lib64", "/lib"};
static const char *const RISCV64Triples[] = {"riscv64-unknown-linux-gnu",
- "riscv64-linux-gnu",
"riscv64-unknown-elf"};
static const char *const SPARCv8LibDirs[] = {"/lib32", "/lib"};
- static const char *const SPARCv8Triples[] = {"sparc-linux-gnu",
- "sparcv8-linux-gnu"};
+ static const char *const SPARCv8Triples[] = {"sparcv8-linux-gnu"};
static const char *const SPARCv9LibDirs[] = {"/lib64", "/lib"};
- static const char *const SPARCv9Triples[] = {"sparc64-linux-gnu",
- "sparcv9-linux-gnu"};
+ static const char *const SPARCv9Triples[] = {"sparcv9-linux-gnu"};
static const char *const SystemZLibDirs[] = {"/lib64", "/lib"};
static const char *const SystemZTriples[] = {
- "s390x-linux-gnu", "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu",
- "s390x-suse-linux", "s390x-redhat-linux"};
-
+ "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu", "s390x-suse-linux",
+ "s390x-redhat-linux"};
using std::begin;
using std::end;
diff --git a/clang/lib/Driver/ToolChains/HIPSPV.cpp b/clang/lib/Driver/ToolChains/HIPSPV.cpp
index a144b28057f4..bdbcf9109129 100644
--- a/clang/lib/Driver/ToolChains/HIPSPV.cpp
+++ b/clang/lib/Driver/ToolChains/HIPSPV.cpp
@@ -193,7 +193,7 @@ void HIPSPVToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
StringRef hipPath = DriverArgs.getLastArgValue(options::OPT_hip_path_EQ);
if (hipPath.empty()) {
- getDriver().Diag(diag::err_drv_hipspv_no_hip_path) << 1 << "'-nogpuinc'";
+ getDriver().Diag(diag::err_drv_hipspv_no_hip_path);
return;
}
SmallString<128> P(hipPath);
diff --git a/clang/lib/Driver/ToolChains/PS4CPU.cpp b/clang/lib/Driver/ToolChains/PS4CPU.cpp
index 7bf9aa79384c..3fd62d979309 100644
--- a/clang/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/clang/lib/Driver/ToolChains/PS4CPU.cpp
@@ -358,6 +358,12 @@ void toolchains::PS4PS5Base::addClangTargetOptions(
CC1Args.push_back("-fno-use-init-array");
+ // Default to `hidden` visibility for PS5.
+ if (getTriple().isPS5() &&
+ !DriverArgs.hasArg(options::OPT_fvisibility_EQ,
+ options::OPT_fvisibility_ms_compat))
+ CC1Args.push_back("-fvisibility=hidden");
+
// Default to -fvisibility-global-new-delete=source for PS5.
if (getTriple().isPS5() &&
!DriverArgs.hasArg(options::OPT_fvisibility_global_new_delete_EQ,
@@ -376,11 +382,15 @@ void toolchains::PS4PS5Base::addClangTargetOptions(
else
CC1Args.push_back("-fvisibility-dllexport=protected");
+ // For PS4 we override the visibilty of globals definitions without
+ // dllimport or dllexport annotations.
if (DriverArgs.hasArg(options::OPT_fvisibility_nodllstorageclass_EQ))
DriverArgs.AddLastArg(CC1Args,
options::OPT_fvisibility_nodllstorageclass_EQ);
- else
+ else if (getTriple().isPS4())
CC1Args.push_back("-fvisibility-nodllstorageclass=hidden");
+ else
+ CC1Args.push_back("-fvisibility-nodllstorageclass=keep");
if (DriverArgs.hasArg(options::OPT_fvisibility_externs_dllimport_EQ))
DriverArgs.AddLastArg(CC1Args,
@@ -388,12 +398,16 @@ void toolchains::PS4PS5Base::addClangTargetOptions(
else
CC1Args.push_back("-fvisibility-externs-dllimport=default");
+ // For PS4 we override the visibilty of external globals without
+ // dllimport or dllexport annotations.
if (DriverArgs.hasArg(
options::OPT_fvisibility_externs_nodllstorageclass_EQ))
DriverArgs.AddLastArg(
CC1Args, options::OPT_fvisibility_externs_nodllstorageclass_EQ);
- else
+ else if (getTriple().isPS4())
CC1Args.push_back("-fvisibility-externs-nodllstorageclass=default");
+ else
+ CC1Args.push_back("-fvisibility-externs-nodllstorageclass=keep");
}
}
diff --git a/clang/lib/Driver/ToolChains/ZOS.cpp b/clang/lib/Driver/ToolChains/ZOS.cpp
index d5fc7b8ef562..074e0556ecd2 100644
--- a/clang/lib/Driver/ToolChains/ZOS.cpp
+++ b/clang/lib/Driver/ToolChains/ZOS.cpp
@@ -36,6 +36,12 @@ void ZOS::addClangTargetOptions(const ArgList &DriverArgs,
if (!DriverArgs.hasArgNoClaim(options::OPT_faligned_allocation,
options::OPT_fno_aligned_allocation))
CC1Args.push_back("-faligned-alloc-unavailable");
+
+ // Pass "-fno-sized-deallocation" only when the user hasn't manually enabled
+ // or disabled sized deallocations.
+ if (!DriverArgs.hasArgNoClaim(options::OPT_fsized_deallocation,
+ options::OPT_fno_sized_deallocation))
+ CC1Args.push_back("-fno-sized-deallocation");
}
void zos::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
diff --git a/clang/lib/ExtractAPI/DeclarationFragments.cpp b/clang/lib/ExtractAPI/DeclarationFragments.cpp
index 98b9343924a8..8c7c0f8a1472 100644
--- a/clang/lib/ExtractAPI/DeclarationFragments.cpp
+++ b/clang/lib/ExtractAPI/DeclarationFragments.cpp
@@ -999,11 +999,11 @@ DeclarationFragmentsBuilder::getFragmentsForTemplateParameters(
DeclarationFragments::FragmentKind::GenericParameter);
if (TemplateParam->hasDefaultArgument()) {
- DeclarationFragments After;
+ const auto Default = TemplateParam->getDefaultArgument();
Fragments.append(" = ", DeclarationFragments::FragmentKind::Text)
- .append(getFragmentsForType(TemplateParam->getDefaultArgument(),
- TemplateParam->getASTContext(), After));
- Fragments.append(std::move(After));
+ .append(getFragmentsForTemplateArguments(
+ {Default.getArgument()}, TemplateParam->getASTContext(),
+ {Default}));
}
} else if (const auto *NTP =
dyn_cast<NonTypeTemplateParmDecl>(ParameterArray[i])) {
@@ -1023,8 +1023,9 @@ DeclarationFragmentsBuilder::getFragmentsForTemplateParameters(
if (NTP->hasDefaultArgument()) {
SmallString<8> ExprStr;
raw_svector_ostream Output(ExprStr);
- NTP->getDefaultArgument()->printPretty(
- Output, nullptr, NTP->getASTContext().getPrintingPolicy());
+ NTP->getDefaultArgument().getArgument().print(
+ NTP->getASTContext().getPrintingPolicy(), Output,
+ /*IncludeType=*/false);
Fragments.append(" = ", DeclarationFragments::FragmentKind::Text)
.append(ExprStr, DeclarationFragments::FragmentKind::Text);
}
@@ -1083,12 +1084,22 @@ DeclarationFragmentsBuilder::getFragmentsForTemplateArguments(
if (StringRef(ArgumentFragment.begin()->Spelling)
.starts_with("type-parameter")) {
- std::string ProperArgName = TemplateArgumentLocs.value()[i]
- .getTypeSourceInfo()
- ->getType()
- .getAsString();
- ArgumentFragment.begin()->Spelling.swap(ProperArgName);
+ if (TemplateArgumentLocs.has_value() &&
+ TemplateArgumentLocs->size() > i) {
+ std::string ProperArgName = TemplateArgumentLocs.value()[i]
+ .getTypeSourceInfo()
+ ->getType()
+ .getAsString();
+ ArgumentFragment.begin()->Spelling.swap(ProperArgName);
+ } else {
+ auto &Spelling = ArgumentFragment.begin()->Spelling;
+ Spelling.clear();
+ raw_string_ostream OutStream(Spelling);
+ CTA.print(Context.getPrintingPolicy(), OutStream, false);
+ OutStream.flush();
+ }
}
+
Fragments.append(std::move(ArgumentFragment));
break;
}
@@ -1211,9 +1222,9 @@ DeclarationFragmentsBuilder::getFragmentsForClassTemplateSpecialization(
cast<CXXRecordDecl>(Decl)))
.pop_back() // there is an extra semicolon now
.append("<", DeclarationFragments::FragmentKind::Text)
- .append(
- getFragmentsForTemplateArguments(Decl->getTemplateArgs().asArray(),
- Decl->getASTContext(), std::nullopt))
+ .append(getFragmentsForTemplateArguments(
+ Decl->getTemplateArgs().asArray(), Decl->getASTContext(),
+ Decl->getTemplateArgsAsWritten()->arguments()))
.append(">", DeclarationFragments::FragmentKind::Text)
.appendSemicolon();
}
@@ -1254,9 +1265,9 @@ DeclarationFragmentsBuilder::getFragmentsForVarTemplateSpecialization(
.append(DeclarationFragmentsBuilder::getFragmentsForVarTemplate(Decl))
.pop_back() // there is an extra semicolon now
.append("<", DeclarationFragments::FragmentKind::Text)
- .append(
- getFragmentsForTemplateArguments(Decl->getTemplateArgs().asArray(),
- Decl->getASTContext(), std::nullopt))
+ .append(getFragmentsForTemplateArguments(
+ Decl->getTemplateArgs().asArray(), Decl->getASTContext(),
+ Decl->getTemplateArgsAsWritten()->arguments()))
.append(">", DeclarationFragments::FragmentKind::Text)
.appendSemicolon();
}
diff --git a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
index c16d4623f115..08e711cafae2 100644
--- a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
+++ b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
@@ -925,6 +925,10 @@ bool SymbolGraphSerializer::visitObjCInterfaceRecord(
bool SymbolGraphSerializer::traverseObjCCategoryRecord(
const ObjCCategoryRecord *Record) {
+ if (SkipSymbolsInCategoriesToExternalTypes &&
+ !API.findRecordForUSR(Record->Interface.USR))
+ return true;
+
auto *CurrentModule = ModuleForCurrentSymbol;
if (Record->isExtendingExternalModule())
ModuleForCurrentSymbol = &ExtendedModules[Record->Interface.Source];
@@ -1040,8 +1044,11 @@ void SymbolGraphSerializer::serializeGraphToStream(
void SymbolGraphSerializer::serializeMainSymbolGraph(
raw_ostream &OS, const APISet &API, const APIIgnoresList &IgnoresList,
SymbolGraphSerializerOption Options) {
- SymbolGraphSerializer Serializer(API, IgnoresList,
- Options.EmitSymbolLabelsForTesting);
+ SymbolGraphSerializer Serializer(
+ API, IgnoresList, Options.EmitSymbolLabelsForTesting,
+ /*ForceEmitToMainModule=*/true,
+ /*SkipSymbolsInCategoriesToExternalTypes=*/true);
+
Serializer.traverseAPISet();
Serializer.serializeGraphToStream(OS, Options, API.ProductName,
std::move(Serializer.MainModule));
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 52005a6c881f..c015e03fa15e 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -28,7 +28,7 @@
using clang::format::FormatStyle;
-LLVM_YAML_IS_SEQUENCE_VECTOR(clang::format::FormatStyle::RawStringFormat)
+LLVM_YAML_IS_SEQUENCE_VECTOR(FormatStyle::RawStringFormat)
namespace llvm {
namespace yaml {
@@ -308,6 +308,7 @@ struct ScalarEnumerationTraits<FormatStyle::EscapedNewlineAlignmentStyle> {
FormatStyle::EscapedNewlineAlignmentStyle &Value) {
IO.enumCase(Value, "DontAlign", FormatStyle::ENAS_DontAlign);
IO.enumCase(Value, "Left", FormatStyle::ENAS_Left);
+ IO.enumCase(Value, "LeftWithLastLine", FormatStyle::ENAS_LeftWithLastLine);
IO.enumCase(Value, "Right", FormatStyle::ENAS_Right);
// For backward compatibility.
@@ -1235,7 +1236,7 @@ std::error_code make_error_code(ParseError e) {
return std::error_code(static_cast<int>(e), getParseCategory());
}
-inline llvm::Error make_string_error(const llvm::Twine &Message) {
+inline llvm::Error make_string_error(const Twine &Message) {
return llvm::make_error<llvm::StringError>(Message,
llvm::inconvertibleErrorCode());
}
@@ -2361,7 +2362,7 @@ private:
// FIXME: handle error. For now, print error message and skip the
// replacement for release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
};
@@ -2802,7 +2803,7 @@ private:
// FIXME: better error handling. for now just print error message and skip
// for the release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false && "Fixes must not conflict!");
}
Idx = End + 1;
@@ -3074,7 +3075,7 @@ static void sortCppIncludes(const FormatStyle &Style,
llvm::to_vector<16>(llvm::seq<unsigned>(0, Includes.size()));
if (Style.SortIncludes == FormatStyle::SI_CaseInsensitive) {
- llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
const auto LHSFilenameLower = Includes[LHSI].Filename.lower();
const auto RHSFilenameLower = Includes[RHSI].Filename.lower();
return std::tie(Includes[LHSI].Priority, LHSFilenameLower,
@@ -3083,7 +3084,7 @@ static void sortCppIncludes(const FormatStyle &Style,
Includes[RHSI].Filename);
});
} else {
- llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
return std::tie(Includes[LHSI].Priority, Includes[LHSI].Filename) <
std::tie(Includes[RHSI].Priority, Includes[RHSI].Filename);
});
@@ -3115,7 +3116,7 @@ static void sortCppIncludes(const FormatStyle &Style,
// enough as additional newlines might be added or removed across #include
// blocks. This we handle below by generating the updated #include blocks and
// comparing it to the original.
- if (Indices.size() == Includes.size() && llvm::is_sorted(Indices) &&
+ if (Indices.size() == Includes.size() && is_sorted(Indices) &&
Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Preserve) {
return;
}
@@ -3154,7 +3155,7 @@ static void sortCppIncludes(const FormatStyle &Style,
// FIXME: better error handling. For now, just skip the replacement for the
// release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
}
@@ -3307,7 +3308,7 @@ static void sortJavaImports(const FormatStyle &Style,
bool StaticImportAfterNormalImport =
Style.SortJavaStaticImport == FormatStyle::SJSIO_After;
- llvm::sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
// Negating IsStatic to push static imports above non-static imports.
return std::make_tuple(!Imports[LHSI].IsStatic ^
StaticImportAfterNormalImport,
@@ -3357,7 +3358,7 @@ static void sortJavaImports(const FormatStyle &Style,
// FIXME: better error handling. For now, just skip the replacement for the
// release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
}
@@ -3451,7 +3452,7 @@ tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
}
template <typename T>
-static llvm::Expected<tooling::Replacements>
+static Expected<tooling::Replacements>
processReplacements(T ProcessFunc, StringRef Code,
const tooling::Replacements &Replaces,
const FormatStyle &Style) {
@@ -3470,7 +3471,7 @@ processReplacements(T ProcessFunc, StringRef Code,
return Replaces.merge(FormatReplaces);
}
-llvm::Expected<tooling::Replacements>
+Expected<tooling::Replacements>
formatReplacements(StringRef Code, const tooling::Replacements &Replaces,
const FormatStyle &Style) {
// We need to use lambda function here since there are two versions of
@@ -3515,13 +3516,13 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
return Replaces;
tooling::Replacements HeaderInsertions;
- std::set<llvm::StringRef> HeadersToDelete;
+ std::set<StringRef> HeadersToDelete;
tooling::Replacements Result;
for (const auto &R : Replaces) {
if (isHeaderInsertion(R)) {
// Replacements from \p Replaces must be conflict-free already, so we can
// simply consume the error.
- llvm::consumeError(HeaderInsertions.add(R));
+ consumeError(HeaderInsertions.add(R));
} else if (isHeaderDeletion(R)) {
HeadersToDelete.insert(R.getReplacementText());
} else if (R.getOffset() == UINT_MAX) {
@@ -3529,7 +3530,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
"not supported! "
<< R.getReplacementText() << "\n";
} else {
- llvm::consumeError(Result.add(R));
+ consumeError(Result.add(R));
}
}
if (HeaderInsertions.empty() && HeadersToDelete.empty())
@@ -3546,13 +3547,12 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
if (Err) {
// Ignore the deletion on conflict.
llvm::errs() << "Failed to add header deletion replacement for "
- << Header << ": " << llvm::toString(std::move(Err))
- << "\n";
+ << Header << ": " << toString(std::move(Err)) << "\n";
}
}
}
- llvm::SmallVector<StringRef, 4> Matches;
+ SmallVector<StringRef, 4> Matches;
for (const auto &R : HeaderInsertions) {
auto IncludeDirective = R.getReplacementText();
bool Matched =
@@ -3567,7 +3567,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
if (Replace) {
auto Err = Result.add(*Replace);
if (Err) {
- llvm::consumeError(std::move(Err));
+ consumeError(std::move(Err));
unsigned NewOffset =
Result.getShiftedCodePosition(Replace->getOffset());
auto Shifted = tooling::Replacement(FileName, NewOffset, 0,
@@ -3581,7 +3581,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
} // anonymous namespace
-llvm::Expected<tooling::Replacements>
+Expected<tooling::Replacements>
cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces,
const FormatStyle &Style) {
// We need to use lambda function here since there are two versions of
@@ -3774,7 +3774,7 @@ reformat(const FormatStyle &Style, StringRef Code,
auto Err = NonNoOpFixes.add(Fix);
if (Err) {
llvm::errs() << "Error adding replacements : "
- << llvm::toString(std::move(Err)) << "\n";
+ << toString(std::move(Err)) << "\n";
}
}
}
@@ -3956,17 +3956,16 @@ loadAndParseConfigFile(StringRef ConfigFile, llvm::vfs::FileSystem *FS,
return Text;
}
-llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
- StringRef FallbackStyleName,
- StringRef Code, llvm::vfs::FileSystem *FS,
- bool AllowUnknownOptions) {
+Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
+ StringRef FallbackStyleName, StringRef Code,
+ llvm::vfs::FileSystem *FS,
+ bool AllowUnknownOptions) {
FormatStyle Style = getLLVMStyle(guessLanguage(FileName, Code));
FormatStyle FallbackStyle = getNoStyle();
if (!getPredefinedStyle(FallbackStyleName, Style.Language, &FallbackStyle))
return make_string_error("Invalid fallback style: " + FallbackStyleName);
- llvm::SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 1>
- ChildFormatTextToApply;
+ SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 1> ChildFormatTextToApply;
if (StyleName.starts_with("{")) {
// Parse YAML/JSON style from the command line.
@@ -4041,7 +4040,7 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
};
// Look for .clang-format/_clang-format file in the file's parent directories.
- llvm::SmallVector<std::string, 2> FilesToLookFor;
+ SmallVector<std::string, 2> FilesToLookFor;
FilesToLookFor.push_back(".clang-format");
FilesToLookFor.push_back("_clang-format");
diff --git a/clang/lib/Format/FormatTokenSource.h b/clang/lib/Format/FormatTokenSource.h
index 2b93f302d360..8f00e5f4582c 100644
--- a/clang/lib/Format/FormatTokenSource.h
+++ b/clang/lib/Format/FormatTokenSource.h
@@ -173,7 +173,7 @@ private:
return Next;
}
- void dbgToken(int Position, llvm::StringRef Indent = "") {
+ void dbgToken(int Position, StringRef Indent = "") {
FormatToken *Tok = Tokens[Position];
llvm::dbgs() << Indent << "[" << Position
<< "] Token: " << Tok->Tok.getName() << " / " << Tok->TokenText
diff --git a/clang/lib/Format/MacroExpander.cpp b/clang/lib/Format/MacroExpander.cpp
index 5a1cdd884c5e..5768ff37fefc 100644
--- a/clang/lib/Format/MacroExpander.cpp
+++ b/clang/lib/Format/MacroExpander.cpp
@@ -119,7 +119,7 @@ private:
};
MacroExpander::MacroExpander(
- const std::vector<std::string> &Macros, clang::SourceManager &SourceMgr,
+ const std::vector<std::string> &Macros, SourceManager &SourceMgr,
const FormatStyle &Style,
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
IdentifierTable &IdentTable)
@@ -134,7 +134,7 @@ MacroExpander::~MacroExpander() = default;
void MacroExpander::parseDefinition(const std::string &Macro) {
Buffers.push_back(
llvm::MemoryBuffer::getMemBufferCopy(Macro, "<scratch space>"));
- clang::FileID FID = SourceMgr.createFileID(Buffers.back()->getMemBufferRef());
+ FileID FID = SourceMgr.createFileID(Buffers.back()->getMemBufferRef());
FormatTokenLexer Lex(SourceMgr, FID, 0, Style, encoding::Encoding_UTF8,
Allocator, IdentTable);
const auto Tokens = Lex.lex();
@@ -150,20 +150,20 @@ void MacroExpander::parseDefinition(const std::string &Macro) {
}
}
-bool MacroExpander::defined(llvm::StringRef Name) const {
+bool MacroExpander::defined(StringRef Name) const {
return FunctionLike.contains(Name) || ObjectLike.contains(Name);
}
-bool MacroExpander::objectLike(llvm::StringRef Name) const {
+bool MacroExpander::objectLike(StringRef Name) const {
return ObjectLike.contains(Name);
}
-bool MacroExpander::hasArity(llvm::StringRef Name, unsigned Arity) const {
+bool MacroExpander::hasArity(StringRef Name, unsigned Arity) const {
auto it = FunctionLike.find(Name);
return it != FunctionLike.end() && it->second.contains(Arity);
}
-llvm::SmallVector<FormatToken *, 8>
+SmallVector<FormatToken *, 8>
MacroExpander::expand(FormatToken *ID,
std::optional<ArgsList> OptionalArgs) const {
if (OptionalArgs)
diff --git a/clang/lib/Format/Macros.h b/clang/lib/Format/Macros.h
index fb12d22299de..e05f734b0db8 100644
--- a/clang/lib/Format/Macros.h
+++ b/clang/lib/Format/Macros.h
@@ -79,7 +79,7 @@ struct UnwrappedLineNode;
///
class MacroExpander {
public:
- using ArgsList = llvm::ArrayRef<llvm::SmallVector<FormatToken *, 8>>;
+ using ArgsList = ArrayRef<SmallVector<FormatToken *, 8>>;
/// Construct a macro expander from a set of macro definitions.
/// Macro definitions must be encoded as UTF-8.
@@ -95,27 +95,27 @@ public:
/// Macros that cannot be parsed will be silently discarded.
///
MacroExpander(const std::vector<std::string> &Macros,
- clang::SourceManager &SourceMgr, const FormatStyle &Style,
+ SourceManager &SourceMgr, const FormatStyle &Style,
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
IdentifierTable &IdentTable);
~MacroExpander();
/// Returns whether any macro \p Name is defined, regardless of overloads.
- bool defined(llvm::StringRef Name) const;
+ bool defined(StringRef Name) const;
/// Returns whetherh there is an object-like overload, i.e. where the macro
/// has no arguments and should not consume subsequent parentheses.
- bool objectLike(llvm::StringRef Name) const;
+ bool objectLike(StringRef Name) const;
/// Returns whether macro \p Name provides an overload with the given arity.
- bool hasArity(llvm::StringRef Name, unsigned Arity) const;
+ bool hasArity(StringRef Name, unsigned Arity) const;
/// Returns the expanded stream of format tokens for \p ID, where
/// each element in \p Args is a positional argument to the macro call.
/// If \p Args is not set, the object-like overload is used.
/// If \p Args is set, the overload with the arity equal to \c Args.size() is
/// used.
- llvm::SmallVector<FormatToken *, 8>
+ SmallVector<FormatToken *, 8>
expand(FormatToken *ID, std::optional<ArgsList> OptionalArgs) const;
private:
@@ -124,7 +124,7 @@ private:
void parseDefinition(const std::string &Macro);
- clang::SourceManager &SourceMgr;
+ SourceManager &SourceMgr;
const FormatStyle &Style;
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator;
IdentifierTable &IdentTable;
@@ -260,7 +260,7 @@ private:
LineNode() = default;
LineNode(FormatToken *Tok) : Tok(Tok) {}
FormatToken *Tok = nullptr;
- llvm::SmallVector<std::unique_ptr<ReconstructedLine>> Children;
+ SmallVector<std::unique_ptr<ReconstructedLine>> Children;
};
// Line in which we build up the resulting unwrapped line.
@@ -269,7 +269,7 @@ private:
struct ReconstructedLine {
explicit ReconstructedLine(unsigned Level) : Level(Level) {}
unsigned Level;
- llvm::SmallVector<std::unique_ptr<LineNode>> Tokens;
+ SmallVector<std::unique_ptr<LineNode>> Tokens;
};
// The line in which we collect the resulting reconstructed output.
@@ -285,7 +285,7 @@ private:
// Stack of currently "open" lines, where each line's predecessor's last
// token is the parent token for that line.
- llvm::SmallVector<ReconstructedLine *> ActiveReconstructedLines;
+ SmallVector<ReconstructedLine *> ActiveReconstructedLines;
// Maps from the expanded token to the token that takes its place in the
// reconstructed token stream in terms of parent-child relationships.
@@ -325,7 +325,7 @@ private:
};
// Stack of macro calls for which we're in the middle of an expansion.
- llvm::SmallVector<Expansion> ActiveExpansions;
+ SmallVector<Expansion> ActiveExpansions;
struct MacroCallState {
MacroCallState(ReconstructedLine *Line, FormatToken *ParentLastToken,
@@ -368,7 +368,7 @@ private:
// |- ,
// | \- <argument>
// \- )
- llvm::SmallVector<MacroCallState> MacroCallStructure;
+ SmallVector<MacroCallState> MacroCallStructure;
// Maps from identifier of the macro call to an unwrapped line containing
// all tokens of the macro call.
diff --git a/clang/lib/Format/SortJavaScriptImports.cpp b/clang/lib/Format/SortJavaScriptImports.cpp
index 1a6a1b19e702..1acce26ff279 100644
--- a/clang/lib/Format/SortJavaScriptImports.cpp
+++ b/clang/lib/Format/SortJavaScriptImports.cpp
@@ -34,8 +34,6 @@ namespace format {
class FormatTokenLexer;
-using clang::format::FormatStyle;
-
// An imported symbol in a JavaScript ES6 import/export, possibly aliased.
struct JsImportedSymbol {
StringRef Symbol;
@@ -178,7 +176,7 @@ public:
}
}
}
- llvm::StringRef PreviousText = getSourceText(InsertionPoint);
+ StringRef PreviousText = getSourceText(InsertionPoint);
if (ReferencesText == PreviousText)
return {Result, 0};
@@ -209,7 +207,7 @@ public:
// FIXME: better error handling. For now, just print error message and skip
// the replacement for the release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
@@ -276,7 +274,7 @@ private:
SortChunk.push_back(*Start);
++Start;
}
- llvm::stable_sort(SortChunk);
+ stable_sort(SortChunk);
mergeModuleReferences(SortChunk);
ReferencesSorted.insert(ReferencesSorted.end(), SortChunk.begin(),
SortChunk.end());
@@ -334,10 +332,10 @@ private:
// Sort the individual symbols within the import.
// E.g. `import {b, a} from 'x';` -> `import {a, b} from 'x';`
SmallVector<JsImportedSymbol, 1> Symbols = Reference.Symbols;
- llvm::stable_sort(
- Symbols, [&](const JsImportedSymbol &LHS, const JsImportedSymbol &RHS) {
- return LHS.Symbol.compare_insensitive(RHS.Symbol) < 0;
- });
+ stable_sort(Symbols,
+ [&](const JsImportedSymbol &LHS, const JsImportedSymbol &RHS) {
+ return LHS.Symbol.compare_insensitive(RHS.Symbol) < 0;
+ });
if (!Reference.SymbolsMerged && Symbols == Reference.Symbols) {
// Symbols didn't change, just emit the entire module reference.
StringRef ReferenceStmt = getSourceText(Reference.Range);
@@ -349,7 +347,7 @@ private:
// ... then the references in order ...
if (!Symbols.empty()) {
Buffer += getSourceText(Symbols.front().Range);
- for (const JsImportedSymbol &Symbol : llvm::drop_begin(Symbols)) {
+ for (const JsImportedSymbol &Symbol : drop_begin(Symbols)) {
Buffer += ",";
Buffer += getSourceText(Symbol.Range);
}
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index b15a87327240..b6f7567adc14 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -1189,12 +1189,6 @@ void UnwrappedLineParser::parsePPDefine() {
return;
}
- if (FormatTok->is(tok::identifier) &&
- Tokens->peekNextToken()->is(tok::colon)) {
- nextToken();
- nextToken();
- }
-
// Errors during a preprocessor directive can only affect the layout of the
// preprocessor directive, and thus we ignore them. An alternative approach
// would be to use the same approach we use on the file level (no
@@ -1416,6 +1410,13 @@ void UnwrappedLineParser::readTokenWithJavaScriptASI() {
}
}
+static bool isAltOperator(const FormatToken &Tok) {
+ return isalpha(Tok.TokenText[0]) &&
+ Tok.isOneOf(tok::ampamp, tok::ampequal, tok::amp, tok::pipe,
+ tok::tilde, tok::exclaim, tok::exclaimequal, tok::pipepipe,
+ tok::pipeequal, tok::caret, tok::caretequal);
+}
+
void UnwrappedLineParser::parseStructuralElement(
const FormatToken *OpeningBrace, IfStmtKind *IfKind,
FormatToken **IfLeftBrace, bool *HasDoWhile, bool *HasLabel) {
@@ -1681,7 +1682,8 @@ void UnwrappedLineParser::parseStructuralElement(
if (!Style.isJavaScript() && !Style.isVerilog() && !Style.isTableGen() &&
Tokens->peekNextToken()->is(tok::colon) && !Line->MustBeDeclaration) {
nextToken();
- Line->Tokens.begin()->Tok->MustBreakBefore = true;
+ if (!Line->InMacroBody || CurrentLines->size() > 1)
+ Line->Tokens.begin()->Tok->MustBreakBefore = true;
FormatTok->setFinalizedType(TT_GotoLabelColon);
parseLabel(!Style.IndentGotoLabels);
if (HasLabel)
@@ -1694,9 +1696,15 @@ void UnwrappedLineParser::parseStructuralElement(
break;
}
- const bool InRequiresExpression =
- OpeningBrace && OpeningBrace->is(TT_RequiresExpressionLBrace);
- do {
+ for (const bool InRequiresExpression =
+ OpeningBrace && OpeningBrace->is(TT_RequiresExpressionLBrace);
+ !eof();) {
+ if (IsCpp && isAltOperator(*FormatTok)) {
+ if (auto *Next = Tokens->peekNextToken(/*SkipComment=*/true);
+ Next && Next->isBinaryOperator()) {
+ FormatTok->Tok.setKind(tok::identifier);
+ }
+ }
const FormatToken *Previous = FormatTok->Previous;
switch (FormatTok->Tok.getKind()) {
case tok::at:
@@ -2127,7 +2135,7 @@ void UnwrappedLineParser::parseStructuralElement(
nextToken();
break;
}
- } while (!eof());
+ }
}
bool UnwrappedLineParser::tryToParsePropertyAccessor() {
diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp
index ed06d6098a9f..50531aee9d59 100644
--- a/clang/lib/Format/WhitespaceManager.cpp
+++ b/clang/lib/Format/WhitespaceManager.cpp
@@ -1245,22 +1245,29 @@ void WhitespaceManager::alignTrailingComments(unsigned Start, unsigned End,
}
void WhitespaceManager::alignEscapedNewlines() {
- if (Style.AlignEscapedNewlines == FormatStyle::ENAS_DontAlign)
+ const auto Align = Style.AlignEscapedNewlines;
+ if (Align == FormatStyle::ENAS_DontAlign)
return;
- bool AlignLeft = Style.AlignEscapedNewlines == FormatStyle::ENAS_Left;
- unsigned MaxEndOfLine = AlignLeft ? 0 : Style.ColumnLimit;
+ const bool WithLastLine = Align == FormatStyle::ENAS_LeftWithLastLine;
+ const bool AlignLeft = Align == FormatStyle::ENAS_Left || WithLastLine;
+ const auto MaxColumn = Style.ColumnLimit;
+ unsigned MaxEndOfLine = AlignLeft ? 0 : MaxColumn;
unsigned StartOfMacro = 0;
for (unsigned i = 1, e = Changes.size(); i < e; ++i) {
Change &C = Changes[i];
- if (C.NewlinesBefore > 0) {
- if (C.ContinuesPPDirective) {
- MaxEndOfLine = std::max(C.PreviousEndOfTokenColumn + 2, MaxEndOfLine);
- } else {
- alignEscapedNewlines(StartOfMacro + 1, i, MaxEndOfLine);
- MaxEndOfLine = AlignLeft ? 0 : Style.ColumnLimit;
- StartOfMacro = i;
- }
+ if (C.NewlinesBefore == 0 && (!WithLastLine || C.Tok->isNot(tok::eof)))
+ continue;
+ const bool InPPDirective = C.ContinuesPPDirective;
+ const auto BackslashColumn = C.PreviousEndOfTokenColumn + 2;
+ if (InPPDirective ||
+ (WithLastLine && (MaxColumn == 0 || BackslashColumn <= MaxColumn))) {
+ MaxEndOfLine = std::max(BackslashColumn, MaxEndOfLine);
+ }
+ if (!InPPDirective) {
+ alignEscapedNewlines(StartOfMacro + 1, i, MaxEndOfLine);
+ MaxEndOfLine = AlignLeft ? 0 : MaxColumn;
+ StartOfMacro = i;
}
}
alignEscapedNewlines(StartOfMacro + 1, Changes.size(), MaxEndOfLine);
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index c1d209466ffe..e8c8a5175f8f 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -432,7 +432,8 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// [C++] Whether __STDC__ is predefined and if so, what its value is,
// are implementation-defined.
// (Removed in C++20.)
- if (!LangOpts.MSVCCompat && !LangOpts.TraditionalCPP)
+ if ((!LangOpts.MSVCCompat || LangOpts.MSVCEnableStdcMacro) &&
+ !LangOpts.TraditionalCPP)
Builder.defineMacro("__STDC__");
// -- __STDC_HOSTED__
// The integer literal 1 if the implementation is a hosted
@@ -1006,6 +1007,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
else if (LangOpts.hasDWARFExceptions() &&
(TI.getTriple().isThumb() || TI.getTriple().isARM()))
Builder.defineMacro("__ARM_DWARF_EH__");
+ else if (LangOpts.hasWasmExceptions() && TI.getTriple().isWasm())
+ Builder.defineMacro("__WASM_EXCEPTIONS__");
if (LangOpts.Deprecated)
Builder.defineMacro("__DEPRECATED");
diff --git a/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
index b76728acb907..0887b5a504f0 100644
--- a/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -574,7 +574,7 @@ void SDiagsWriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
SmallString<256> diagnostic;
Info.FormatDiagnostic(diagnostic);
getMetaDiags()->Report(
- diag::warn_fe_serialized_diag_failure_during_finalisation)
+ diag::warn_fe_serialized_diag_failure_during_finalization)
<< diagnostic;
return;
}
diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt
index 5f02c71f6ca5..d3090e488306 100644
--- a/clang/lib/Headers/CMakeLists.txt
+++ b/clang/lib/Headers/CMakeLists.txt
@@ -153,12 +153,10 @@ set(x86_files
avx512bwintrin.h
avx512cdintrin.h
avx512dqintrin.h
- avx512erintrin.h
avx512fintrin.h
avx512fp16intrin.h
avx512ifmaintrin.h
avx512ifmavlintrin.h
- avx512pfintrin.h
avx512vbmi2intrin.h
avx512vbmiintrin.h
avx512vbmivlintrin.h
@@ -445,14 +443,14 @@ endforeach( f )
function(add_header_target target_name file_list)
add_library(${target_name} INTERFACE ${file_list})
set_target_properties(${target_name} PROPERTIES
- FOLDER "Misc"
+ FOLDER "Clang/Resources"
RUNTIME_OUTPUT_DIRECTORY "${output_dir}")
endfunction()
# The catch-all clang-resource-headers target
add_library(clang-resource-headers INTERFACE ${out_files})
set_target_properties("clang-resource-headers" PROPERTIES
- FOLDER "Misc"
+ FOLDER "Clang/Resources"
RUNTIME_OUTPUT_DIRECTORY "${output_dir}")
add_dependencies("clang-resource-headers"
"core-resource-headers"
diff --git a/clang/lib/Headers/avx512erintrin.h b/clang/lib/Headers/avx512erintrin.h
deleted file mode 100644
index 1c5a2d2d208f..000000000000
--- a/clang/lib/Headers/avx512erintrin.h
+++ /dev/null
@@ -1,271 +0,0 @@
-/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512erintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512ERINTRIN_H
-#define __AVX512ERINTRIN_H
-
-/* exp2a23 */
-#define _mm512_exp2a23_round_pd(A, R) \
- ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_exp2a23_round_pd(S, M, A, R) \
- ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R)))
-
-#define _mm512_maskz_exp2a23_round_pd(M, A, R) \
- ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm512_exp2a23_pd(A) \
- _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_exp2a23_pd(S, M, A) \
- _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_exp2a23_pd(M, A) \
- _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_exp2a23_round_ps(A, R) \
- ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_exp2a23_round_ps(S, M, A, R) \
- ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R)))
-
-#define _mm512_maskz_exp2a23_round_ps(M, A, R) \
- ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R)))
-
-#define _mm512_exp2a23_ps(A) \
- _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_exp2a23_ps(S, M, A) \
- _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_exp2a23_ps(M, A) \
- _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-/* rsqrt28 */
-#define _mm512_rsqrt28_round_pd(A, R) \
- ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) \
- ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R)))
-
-#define _mm512_maskz_rsqrt28_round_pd(M, A, R) \
- ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm512_rsqrt28_pd(A) \
- _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_rsqrt28_pd(S, M, A) \
- _mm512_mask_rsqrt28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_rsqrt28_pd(M, A) \
- _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_rsqrt28_round_ps(A, R) \
- ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) \
- ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R)))
-
-#define _mm512_maskz_rsqrt28_round_ps(M, A, R) \
- ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R)))
-
-#define _mm512_rsqrt28_ps(A) \
- _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_rsqrt28_ps(S, M, A) \
- _mm512_mask_rsqrt28_round_ps((S), (M), A, _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_rsqrt28_ps(M, A) \
- _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_rsqrt28_round_ss(A, B, R) \
- ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) \
- ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(S), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) \
- ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_rsqrt28_ss(A, B) \
- _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_rsqrt28_ss(S, M, A, B) \
- _mm_mask_rsqrt28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_rsqrt28_ss(M, A, B) \
- _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_rsqrt28_round_sd(A, B, R) \
- ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) \
- ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(S), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) \
- ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_rsqrt28_sd(A, B) \
- _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_rsqrt28_sd(S, M, A, B) \
- _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_rsqrt28_sd(M, A, B) \
- _mm_maskz_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-/* rcp28 */
-#define _mm512_rcp28_round_pd(A, R) \
- ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm512_mask_rcp28_round_pd(S, M, A, R) \
- ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R)))
-
-#define _mm512_maskz_rcp28_round_pd(M, A, R) \
- ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm512_rcp28_pd(A) \
- _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_rcp28_pd(S, M, A) \
- _mm512_mask_rcp28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_rcp28_pd(M, A) \
- _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_rcp28_round_ps(A, R) \
- ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)))
-
-#define _mm512_mask_rcp28_round_ps(S, M, A, R) \
- ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R)))
-
-#define _mm512_maskz_rcp28_round_ps(M, A, R) \
- ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R)))
-
-#define _mm512_rcp28_ps(A) \
- _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_rcp28_ps(S, M, A) \
- _mm512_mask_rcp28_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_rcp28_ps(M, A) \
- _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_rcp28_round_ss(A, B, R) \
- ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_rcp28_round_ss(S, M, A, B, R) \
- ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(S), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_maskz_rcp28_round_ss(M, A, B, R) \
- ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_rcp28_ss(A, B) \
- _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_rcp28_ss(S, M, A, B) \
- _mm_mask_rcp28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_rcp28_ss(M, A, B) \
- _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_rcp28_round_sd(A, B, R) \
- ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)))
-
-#define _mm_mask_rcp28_round_sd(S, M, A, B, R) \
- ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(S), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_maskz_rcp28_round_sd(M, A, B, R) \
- ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(M), (int)(R)))
-
-#define _mm_rcp28_sd(A, B) \
- _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_rcp28_sd(S, M, A, B) \
- _mm_mask_rcp28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_rcp28_sd(M, A, B) \
- _mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-
-#endif /* __AVX512ERINTRIN_H */
diff --git a/clang/lib/Headers/avx512pfintrin.h b/clang/lib/Headers/avx512pfintrin.h
deleted file mode 100644
index f853be021a2d..000000000000
--- a/clang/lib/Headers/avx512pfintrin.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*===------------- avx512pfintrin.h - PF intrinsics ------------------------===
- *
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512pfintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512PFINTRIN_H
-#define __AVX512PFINTRIN_H
-
-#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) \
- __builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
- (void const *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) \
- __builtin_ia32_gatherpfdpd((__mmask8) -1, (__v8si)(__m256i)(index), \
- (void const *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) \
- __builtin_ia32_gatherpfdps((__mmask16)(mask), \
- (__v16si)(__m512i)(index), (void const *)(addr), \
- (int)(scale), (int)(hint))
-
-#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) \
- __builtin_ia32_gatherpfdps((__mmask16) -1, \
- (__v16si)(__m512i)(index), (void const *)(addr), \
- (int)(scale), (int)(hint))
-
-#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) \
- __builtin_ia32_gatherpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (void const *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) \
- __builtin_ia32_gatherpfqpd((__mmask8) -1, (__v8di)(__m512i)(index), \
- (void const *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) \
- __builtin_ia32_gatherpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (void const *)(addr), (int)(scale), (int)(hint))
-
-#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) \
- __builtin_ia32_gatherpfqps((__mmask8) -1, (__v8di)(__m512i)(index), \
- (void const *)(addr), (int)(scale), (int)(hint))
-
-#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) \
- __builtin_ia32_scatterpfdpd((__mmask8)-1, (__v8si)(__m256i)(index), \
- (void *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) \
- __builtin_ia32_scatterpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
- (void *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) \
- __builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \
- (void *)(addr), (int)(scale), (int)(hint))
-
-#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) \
- __builtin_ia32_scatterpfdps((__mmask16)(mask), \
- (__v16si)(__m512i)(index), (void *)(addr), \
- (int)(scale), (int)(hint))
-
-#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) \
- __builtin_ia32_scatterpfqpd((__mmask8)-1, (__v8di)(__m512i)(index), \
- (void *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) \
- __builtin_ia32_scatterpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (void *)(addr), (int)(scale), \
- (int)(hint))
-
-#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) \
- __builtin_ia32_scatterpfqps((__mmask8)-1, (__v8di)(__m512i)(index), \
- (void *)(addr), (int)(scale), (int)(hint))
-
-#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) \
- __builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (void *)(addr), (int)(scale), (int)(hint))
-
-#endif
diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
index 3390f0962f67..bc72e8a00e0d 100644
--- a/clang/lib/Headers/hlsl/hlsl_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -18,14 +18,21 @@ namespace hlsl {
#define _HLSL_BUILTIN_ALIAS(builtin) \
__attribute__((clang_builtin_alias(builtin)))
-#define _HLSL_AVAILABILITY(environment, version) \
- __attribute__((availability(environment, introduced = version)))
+#define _HLSL_AVAILABILITY(platform, version) \
+ __attribute__((availability(platform, introduced = version)))
+#define _HLSL_AVAILABILITY_STAGE(platform, version, stage) \
+ __attribute__(( \
+ availability(platform, introduced = version, environment = stage)))
#ifdef __HLSL_ENABLE_16_BIT
-#define _HLSL_16BIT_AVAILABILITY(environment, version) \
- __attribute__((availability(environment, introduced = version)))
+#define _HLSL_16BIT_AVAILABILITY(platform, version) \
+ __attribute__((availability(platform, introduced = version)))
+#define _HLSL_16BIT_AVAILABILITY_STAGE(platform, version, stage) \
+ __attribute__(( \
+ availability(platform, introduced = version, environment = stage)))
#else
#define _HLSL_16BIT_AVAILABILITY(environment, version)
+#define _HLSL_16BIT_AVAILABILITY_STAGE(environment, version, stage)
#endif
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/Headers/immintrin.h b/clang/lib/Headers/immintrin.h
index 508696d3725b..cd6cf09b90ca 100644
--- a/clang/lib/Headers/immintrin.h
+++ b/clang/lib/Headers/immintrin.h
@@ -151,10 +151,6 @@
#include <avx512vldqintrin.h>
#endif
-#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512ER__)
-#include <avx512erintrin.h>
-#endif
-
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512IFMA__)
#include <avx512ifmaintrin.h>
#endif
@@ -186,10 +182,6 @@
#include <avx512vlvbmi2intrin.h>
#endif
-#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512PF__)
-#include <avx512pfintrin.h>
-#endif
-
#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512FP16__)
#include <avx512fp16intrin.h>
#endif
diff --git a/clang/lib/Headers/intrin.h b/clang/lib/Headers/intrin.h
index 7eb6dceaabfa..5ceb986a1f65 100644
--- a/clang/lib/Headers/intrin.h
+++ b/clang/lib/Headers/intrin.h
@@ -378,7 +378,7 @@ unsigned int _CountLeadingSigns64(__int64);
unsigned int _CountOneBits(unsigned long);
unsigned int _CountOneBits64(unsigned __int64);
-void __cdecl __prefetch(void *);
+void __cdecl __prefetch(const void *);
#endif
/*----------------------------------------------------------------------------*\
diff --git a/clang/lib/Headers/module.modulemap b/clang/lib/Headers/module.modulemap
index 4abfd1d98a63..9ffc249c8d1a 100644
--- a/clang/lib/Headers/module.modulemap
+++ b/clang/lib/Headers/module.modulemap
@@ -44,7 +44,6 @@ module _Builtin_intrinsics [system] [extern_c] {
textual header "avxintrin.h"
textual header "avx2intrin.h"
textual header "avx512fintrin.h"
- textual header "avx512erintrin.h"
textual header "fmaintrin.h"
header "x86intrin.h"
diff --git a/clang/lib/Headers/opencl-c-base.h b/clang/lib/Headers/opencl-c-base.h
index 2494f6213fc5..786678b9d8a7 100644
--- a/clang/lib/Headers/opencl-c-base.h
+++ b/clang/lib/Headers/opencl-c-base.h
@@ -46,6 +46,10 @@
#define __opencl_c_ext_fp32_global_atomic_min_max 1
#define __opencl_c_ext_fp32_local_atomic_min_max 1
#define __opencl_c_ext_image_raw10_raw12 1
+#define cl_khr_kernel_clock 1
+#define __opencl_c_kernel_clock_scope_device 1
+#define __opencl_c_kernel_clock_scope_work_group 1
+#define __opencl_c_kernel_clock_scope_sub_group 1
#endif // defined(__SPIR__) || defined(__SPIRV__)
#endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
diff --git a/clang/lib/Headers/opencl-c.h b/clang/lib/Headers/opencl-c.h
index 288bb18bc654..20719b74b6b8 100644
--- a/clang/lib/Headers/opencl-c.h
+++ b/clang/lib/Headers/opencl-c.h
@@ -17314,6 +17314,21 @@ half __ovld __conv sub_group_clustered_rotate(half, int, uint);
#endif // cl_khr_fp16
#endif // cl_khr_subgroup_rotate
+#if defined(cl_khr_kernel_clock)
+#if defined(__opencl_c_kernel_clock_scope_device)
+ulong __ovld clock_read_device();
+uint2 __ovld clock_read_hilo_device();
+#endif // __opencl_c_kernel_clock_scope_device
+#if defined(__opencl_c_kernel_clock_scope_work_group)
+ulong __ovld clock_read_work_group();
+uint2 __ovld clock_read_hilo_work_group();
+#endif // __opencl_c_kernel_clock_scope_work_group
+#if defined(__opencl_c_kernel_clock_scope_sub_group)
+ulong __ovld clock_read_sub_group();
+uint2 __ovld clock_read_hilo_sub_group();
+#endif // __opencl_c_kernel_clock_scope_sub_group
+#endif // cl_khr_kernel_clock
+
#if defined(cl_intel_subgroups)
// Intel-Specific Sub Group Functions
float __ovld __conv intel_sub_group_shuffle( float , uint );
diff --git a/clang/lib/Index/CommentToXML.cpp b/clang/lib/Index/CommentToXML.cpp
index 295f3f228ff7..3372fbba4383 100644
--- a/clang/lib/Index/CommentToXML.cpp
+++ b/clang/lib/Index/CommentToXML.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/Comment.h"
#include "clang/AST/CommentVisitor.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "clang/Index/USRGeneration.h"
@@ -1052,6 +1053,11 @@ void CommentASTToXMLConverter::visitFullComment(const FullComment *C) {
}
if (AA->getUnavailable())
Result << "<Unavailable/>";
+
+ IdentifierInfo *Environment = AA->getEnvironment();
+ if (Environment) {
+ Result << "<Environment>" << Environment->getName() << "</Environment>";
+ }
Result << "</Availability>";
}
}
diff --git a/clang/lib/Index/IndexDecl.cpp b/clang/lib/Index/IndexDecl.cpp
index 8eb88f5a1e94..a7fa6c5e6898 100644
--- a/clang/lib/Index/IndexDecl.cpp
+++ b/clang/lib/Index/IndexDecl.cpp
@@ -703,14 +703,16 @@ public:
IndexCtx.handleDecl(TP);
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(TP)) {
if (TTP->hasDefaultArgument())
- IndexCtx.indexTypeSourceInfo(TTP->getDefaultArgumentInfo(), Parent);
+ handleTemplateArgumentLoc(TTP->getDefaultArgument(), Parent,
+ TP->getLexicalDeclContext());
if (auto *C = TTP->getTypeConstraint())
IndexCtx.handleReference(C->getNamedConcept(), C->getConceptNameLoc(),
Parent, TTP->getLexicalDeclContext());
} else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(TP)) {
IndexCtx.indexTypeSourceInfo(NTTP->getTypeSourceInfo(), Parent);
if (NTTP->hasDefaultArgument())
- IndexCtx.indexBody(NTTP->getDefaultArgument(), Parent);
+ handleTemplateArgumentLoc(NTTP->getDefaultArgument(), Parent,
+ TP->getLexicalDeclContext());
} else if (const auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(TP)) {
if (TTPD->hasDefaultArgument())
handleTemplateArgumentLoc(TTPD->getDefaultArgument(), Parent,
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index 8405b44685ae..c52891743733 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -467,6 +467,11 @@ bool Parser::ParseAttributeArgumentList(
break;
}
+ if (Actions.DiagnoseUnexpandedParameterPack(Expr.get())) {
+ SawError = true;
+ break;
+ }
+
Exprs.push_back(Expr.get());
if (Tok.isNot(tok::comma))
@@ -666,6 +671,9 @@ void Parser::ParseGNUAttributeArgs(
ParseBoundsAttribute(*AttrName, AttrNameLoc, Attrs, ScopeName, ScopeLoc,
Form);
return;
+ } else if (AttrKind == ParsedAttr::AT_CXXAssume) {
+ ParseCXXAssumeAttributeArg(Attrs, AttrName, AttrNameLoc, EndLoc, Form);
+ return;
}
// These may refer to the function arguments, but need to be parsed early to
@@ -720,6 +728,10 @@ unsigned Parser::ParseClangAttributeArgs(
ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Form);
break;
+
+ case ParsedAttr::AT_CXXAssume:
+ ParseCXXAssumeAttributeArg(Attrs, AttrName, AttrNameLoc, EndLoc, Form);
+ break;
}
return !Attrs.empty() ? Attrs.begin()->getNumArgs() : 0;
}
@@ -1256,6 +1268,7 @@ void Parser::ParseAvailabilityAttribute(
enum { Introduced, Deprecated, Obsoleted, Unknown };
AvailabilityChange Changes[Unknown];
ExprResult MessageExpr, ReplacementExpr;
+ IdentifierLoc *EnvironmentLoc = nullptr;
// Opening '('.
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -1303,6 +1316,7 @@ void Parser::ParseAvailabilityAttribute(
Ident_message = PP.getIdentifierInfo("message");
Ident_strict = PP.getIdentifierInfo("strict");
Ident_replacement = PP.getIdentifierInfo("replacement");
+ Ident_environment = PP.getIdentifierInfo("environment");
}
// Parse the optional "strict", the optional "replacement" and the set of
@@ -1350,6 +1364,13 @@ void Parser::ParseAvailabilityAttribute(
continue;
}
+ if (Keyword == Ident_environment) {
+ if (EnvironmentLoc != nullptr) {
+ Diag(KeywordLoc, diag::err_availability_redundant)
+ << Keyword << SourceRange(EnvironmentLoc->Loc);
+ }
+ }
+
if (Tok.isNot(tok::equal)) {
Diag(Tok, diag::err_expected_after) << Keyword << tok::equal;
SkipUntil(tok::r_paren, StopAtSemi);
@@ -1371,6 +1392,15 @@ void Parser::ParseAvailabilityAttribute(
continue;
}
}
+ if (Keyword == Ident_environment) {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_availability_expected_environment);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ EnvironmentLoc = ParseIdentifierLoc();
+ continue;
+ }
// Special handling of 'NA' only when applied to introduced or
// deprecated.
@@ -1452,7 +1482,7 @@ void Parser::ParseAvailabilityAttribute(
SourceRange(AvailabilityLoc, T.getCloseLocation()), ScopeName,
ScopeLoc, Platform, Changes[Introduced], Changes[Deprecated],
Changes[Obsoleted], UnavailableLoc, MessageExpr.get(), Form,
- StrictLoc, ReplacementExpr.get());
+ StrictLoc, ReplacementExpr.get(), EnvironmentLoc);
}
/// Parse the contents of the "external_source_symbol" attribute.
@@ -1905,9 +1935,8 @@ void Parser::DiagnoseCXX11AttributeExtension(ParsedAttributes &Attrs) {
// variable.
// This function moves attributes that should apply to the type off DS to Attrs.
void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributes &Attrs,
- DeclSpec &DS,
- Sema::TagUseKind TUK) {
- if (TUK == Sema::TUK_Reference)
+ DeclSpec &DS, TagUseKind TUK) {
+ if (TUK == TagUseKind::Reference)
return;
llvm::SmallVector<ParsedAttr *, 1> ToBeMoved;
@@ -3290,14 +3319,14 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
void Parser::DistributeCLateParsedAttrs(Decl *Dcl,
LateParsedAttrList *LateAttrs) {
- assert(Dcl && "Dcl cannot be null");
-
if (!LateAttrs)
return;
- for (auto *LateAttr : *LateAttrs) {
- if (LateAttr->Decls.empty())
- LateAttr->addDecl(Dcl);
+ if (Dcl) {
+ for (auto *LateAttr : *LateAttrs) {
+ if (LateAttr->Decls.empty())
+ LateAttr->addDecl(Dcl);
+ }
}
}
@@ -4926,12 +4955,25 @@ void Parser::ParseStructDeclaration(
}
}
+// TODO: All callers of this function should be moved to
+// `Parser::ParseLexedAttributeList`.
+void Parser::ParseLexedCAttributeList(LateParsedAttrList &LAs, bool EnterScope,
+ ParsedAttributes *OutAttrs) {
+ assert(LAs.parseSoon() &&
+ "Attribute list should be marked for immediate parsing.");
+ for (auto *LA : LAs) {
+ ParseLexedCAttribute(*LA, EnterScope, OutAttrs);
+ delete LA;
+ }
+ LAs.clear();
+}
+
/// Finish parsing an attribute for which parsing was delayed.
/// This will be called at the end of parsing a class declaration
/// for each LateParsedAttribute. We consume the saved tokens and
/// create an attribute with the arguments filled in. We add this
/// to the Attribute list for the decl.
-void Parser::ParseLexedCAttribute(LateParsedAttribute &LA,
+void Parser::ParseLexedCAttribute(LateParsedAttribute &LA, bool EnterScope,
ParsedAttributes *OutAttrs) {
// Create a fake EOF so that attribute parsing won't go off the end of the
// attribute.
@@ -4951,26 +4993,17 @@ void Parser::ParseLexedCAttribute(LateParsedAttribute &LA,
// as when we entered this function.
ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
+ // TODO: Use `EnterScope`
+ (void)EnterScope;
+
ParsedAttributes Attrs(AttrFactory);
assert(LA.Decls.size() <= 1 &&
"late field attribute expects to have at most one declaration.");
// Dispatch based on the attribute and parse it
- const AttributeCommonInfo::Form ParsedForm = ParsedAttr::Form::GNU();
- IdentifierInfo *ScopeName = nullptr;
- const ParsedAttr::Kind AttrKind =
- ParsedAttr::getParsedKind(&LA.AttrName, /*ScopeName=*/ScopeName,
- /*SyntaxUsed=*/ParsedForm.getSyntax());
- switch (AttrKind) {
- case ParsedAttr::Kind::AT_CountedBy:
- ParseBoundsAttribute(LA.AttrName, LA.AttrNameLoc, Attrs,
- /*ScopeName=*/ScopeName, SourceLocation(),
- /*Form=*/ParsedForm);
- break;
- default:
- llvm_unreachable("Unhandled late parsed attribute");
- }
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, nullptr, nullptr,
+ SourceLocation(), ParsedAttr::Form::GNU(), nullptr);
for (auto *D : LA.Decls)
Actions.ActOnFinishDelayedAttribute(getCurScope(), D, Attrs);
@@ -5014,7 +5047,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
// `LateAttrParseExperimentalExtOnly=true` requests that only attributes
// marked with `LateAttrParseExperimentalExt` are late parsed.
- LateParsedAttrList LateFieldAttrs(/*PSoon=*/false,
+ LateParsedAttrList LateFieldAttrs(/*PSoon=*/true,
/*LateAttrParseExperimentalExtOnly=*/true);
// While we still have something to read, read the declarations in the struct.
@@ -5123,9 +5156,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
MaybeParseGNUAttributes(attrs, &LateFieldAttrs);
// Late parse field attributes if necessary.
- assert(!getLangOpts().CPlusPlus);
- for (auto *LateAttr : LateFieldAttrs)
- ParseLexedCAttribute(*LateAttr);
+ ParseLexedCAttributeList(LateFieldAttrs, /*EnterScope=*/false);
SmallVector<Decl *, 32> FieldDecls(TagDecl->fields());
@@ -5359,9 +5390,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// enum foo {..}; void bar() { enum foo; } <- new foo in bar.
// enum foo {..}; void bar() { enum foo x; } <- use of old foo.
//
- Sema::TagUseKind TUK;
+ TagUseKind TUK;
if (AllowEnumSpecifier == AllowDefiningTypeSpec::No)
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
else if (Tok.is(tok::l_brace)) {
if (DS.isFriendSpecified()) {
Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
@@ -5373,9 +5404,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
ScopedEnumKWLoc = SourceLocation();
IsScopedUsingClassTag = false;
BaseType = TypeResult();
- TUK = Sema::TUK_Friend;
+ TUK = TagUseKind::Friend;
} else {
- TUK = Sema::TUK_Definition;
+ TUK = TagUseKind::Definition;
}
} else if (!isTypeSpecifier(DSC) &&
(Tok.is(tok::semi) ||
@@ -5384,7 +5415,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// An opaque-enum-declaration is required to be standalone (no preceding or
// following tokens in the declaration). Sema enforces this separately by
// diagnosing anything else in the DeclSpec.
- TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
+ TUK = DS.isFriendSpecified() ? TagUseKind::Friend : TagUseKind::Declaration;
if (Tok.isNot(tok::semi)) {
// A semicolon was missing after this declaration. Diagnose and recover.
ExpectAndConsume(tok::semi, diag::err_expected_after, "enum");
@@ -5392,21 +5423,21 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
Tok.setKind(tok::semi);
}
} else {
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
}
bool IsElaboratedTypeSpecifier =
- TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend;
+ TUK == TagUseKind::Reference || TUK == TagUseKind::Friend;
// If this is an elaborated type specifier nested in a larger declaration,
// and we delayed diagnostics before, just merge them into the current pool.
- if (TUK == Sema::TUK_Reference && shouldDelayDiagsInTag) {
+ if (TUK == TagUseKind::Reference && shouldDelayDiagsInTag) {
diagsFromTag.redelay();
}
MultiTemplateParamsArg TParams;
if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
- TUK != Sema::TUK_Reference) {
+ TUK != TagUseKind::Reference) {
if (!getLangOpts().CPlusPlus11 || !SS.isSet()) {
// Skip the rest of this declarator, up until the comma or semicolon.
Diag(Tok, diag::err_enum_template);
@@ -5427,7 +5458,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
SS.setTemplateParamLists(TParams);
}
- if (!Name && TUK != Sema::TUK_Definition) {
+ if (!Name && TUK != TagUseKind::Definition) {
Diag(Tok, diag::err_enumerator_unnamed_no_def);
DS.SetTypeSpecError();
@@ -5460,7 +5491,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
stripTypeAttributesOffDeclSpec(attrs, DS, TUK);
SkipBodyInfo SkipBody;
- if (!Name && TUK == Sema::TUK_Definition && Tok.is(tok::l_brace) &&
+ if (!Name && TUK == TagUseKind::Definition && Tok.is(tok::l_brace) &&
NextToken().is(tok::identifier))
SkipBody = Actions.shouldSkipAnonEnumBody(getCurScope(),
NextToken().getIdentifierInfo(),
@@ -5481,7 +5512,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
OffsetOfState, &SkipBody).get();
if (SkipBody.ShouldSkip) {
- assert(TUK == Sema::TUK_Definition && "can only skip a definition");
+ assert(TUK == TagUseKind::Definition && "can only skip a definition");
BalancedDelimiterTracker T(*this, tok::l_brace);
T.consumeOpen();
@@ -5523,7 +5554,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
if (!TagDecl) {
// The action failed to produce an enumeration tag. If this is a
// definition, consume the entire definition.
- if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
+ if (Tok.is(tok::l_brace) && TUK != TagUseKind::Reference) {
ConsumeBrace();
SkipUntil(tok::r_brace, StopAtSemi);
}
@@ -5532,7 +5563,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return;
}
- if (Tok.is(tok::l_brace) && TUK == Sema::TUK_Definition) {
+ if (Tok.is(tok::l_brace) && TUK == TagUseKind::Definition) {
Decl *D = SkipBody.CheckSameAsPrevious ? SkipBody.New : TagDecl;
ParseEnumBody(StartLoc, D);
if (SkipBody.CheckSameAsPrevious &&
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index 5eaec2b621e6..9a4a777f575b 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -1961,11 +1961,11 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
MaybeParseCXX11Attributes(Attributes);
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
- Sema::TagUseKind TUK;
+ TagUseKind TUK;
if (isDefiningTypeSpecifierContext(DSC, getLangOpts().CPlusPlus) ==
AllowDefiningTypeSpec::No ||
(getLangOpts().OpenMP && OpenMPDirectiveParsing))
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
else if (Tok.is(tok::l_brace) ||
(DSC != DeclSpecContext::DSC_association &&
getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
@@ -1980,10 +1980,10 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Skip everything up to the semicolon, so that this looks like a proper
// friend class (or template thereof) declaration.
SkipUntil(tok::semi, StopBeforeMatch);
- TUK = Sema::TUK_Friend;
+ TUK = TagUseKind::Friend;
} else {
// Okay, this is a class definition.
- TUK = Sema::TUK_Definition;
+ TUK = TagUseKind::Definition;
}
} else if (isClassCompatibleKeyword() &&
(NextToken().is(tok::l_square) ||
@@ -2024,15 +2024,15 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
}
if (Tok.isOneOf(tok::l_brace, tok::colon))
- TUK = Sema::TUK_Definition;
+ TUK = TagUseKind::Definition;
else
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
PA.Revert();
} else if (!isTypeSpecifier(DSC) &&
(Tok.is(tok::semi) ||
(Tok.isAtStartOfLine() && !isValidAfterTypeSpecifier(false)))) {
- TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
+ TUK = DS.isFriendSpecified() ? TagUseKind::Friend : TagUseKind::Declaration;
if (Tok.isNot(tok::semi)) {
const PrintingPolicy &PPol = Actions.getASTContext().getPrintingPolicy();
// A semicolon was missing after this declaration. Diagnose and recover.
@@ -2042,11 +2042,11 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
Tok.setKind(tok::semi);
}
} else
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
// Forbid misplaced attributes. In cases of a reference, we pass attributes
// to caller to handle.
- if (TUK != Sema::TUK_Reference) {
+ if (TUK != TagUseKind::Reference) {
// If this is not a reference, then the only possible
// valid place for C++11 attributes to appear here
// is between class-key and class-name. If there are
@@ -2072,7 +2072,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (!Name && !TemplateId &&
(DS.getTypeSpecType() == DeclSpec::TST_error ||
- TUK != Sema::TUK_Definition)) {
+ TUK != TagUseKind::Definition)) {
if (DS.getTypeSpecType() != DeclSpec::TST_error) {
// We have a declaration or reference to an anonymous class.
Diag(StartLoc, diag::err_anon_type_definition)
@@ -2082,7 +2082,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// If we are parsing a definition and stop at a base-clause, continue on
// until the semicolon. Continuing from the comma will just trick us into
// thinking we are seeing a variable declaration.
- if (TUK == Sema::TUK_Definition && Tok.is(tok::colon))
+ if (TUK == TagUseKind::Definition && Tok.is(tok::colon))
SkipUntil(tok::semi, StopBeforeMatch);
else
SkipUntil(tok::comma, StopAtSemi);
@@ -2103,7 +2103,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (TemplateId->isInvalid()) {
// Can't build the declaration.
} else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
- TUK == Sema::TUK_Declaration) {
+ TUK == TagUseKind::Declaration) {
// This is an explicit instantiation of a class template.
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
@@ -2119,8 +2119,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// they have template headers, in which case they're ill-formed
// (FIXME: "template <class T> friend class A<T>::B<int>;").
// We diagnose this error in ActOnClassTemplateSpecialization.
- } else if (TUK == Sema::TUK_Reference ||
- (TUK == Sema::TUK_Friend &&
+ } else if (TUK == TagUseKind::Reference ||
+ (TUK == TagUseKind::Friend &&
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
@@ -2145,10 +2145,10 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// It this is friend declaration however, since it cannot have a
// template header, it is most likely that the user meant to
// remove the 'template' keyword.
- assert((TUK == Sema::TUK_Definition || TUK == Sema::TUK_Friend) &&
+ assert((TUK == TagUseKind::Definition || TUK == TagUseKind::Friend) &&
"Expected a definition here");
- if (TUK == Sema::TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
Diag(DS.getFriendSpecLoc(), diag::err_friend_explicit_instantiation);
TemplateParams = nullptr;
} else {
@@ -2179,7 +2179,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
&SkipBody);
}
} else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
- TUK == Sema::TUK_Declaration) {
+ TUK == TagUseKind::Declaration) {
// Explicit instantiation of a member of a class template
// specialization, e.g.,
//
@@ -2190,7 +2190,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TagOrTempResult = Actions.ActOnExplicitInstantiation(
getCurScope(), TemplateInfo.ExternLoc, TemplateInfo.TemplateLoc,
TagType, StartLoc, SS, Name, NameLoc, attrs);
- } else if (TUK == Sema::TUK_Friend &&
+ } else if (TUK == TagUseKind::Friend &&
TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
@@ -2202,12 +2202,12 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
MultiTemplateParamsArg(TemplateParams ? &(*TemplateParams)[0] : nullptr,
TemplateParams ? TemplateParams->size() : 0));
} else {
- if (TUK != Sema::TUK_Declaration && TUK != Sema::TUK_Definition)
+ if (TUK != TagUseKind::Declaration && TUK != TagUseKind::Definition)
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
/* DiagnoseEmptyAttrs=*/true);
- if (TUK == Sema::TUK_Definition &&
+ if (TUK == TagUseKind::Definition &&
TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
// If the declarator-id is not a template-id, issue a diagnostic and
// recover by ignoring the 'template' keyword.
@@ -2222,7 +2222,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// reference. For example, we don't need the template parameters here:
// template <class T> class A *makeA(T t);
MultiTemplateParamsArg TParams;
- if (TUK != Sema::TUK_Reference && TemplateParams)
+ if (TUK != TagUseKind::Reference && TemplateParams)
TParams =
MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size());
@@ -2241,7 +2241,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// If ActOnTag said the type was dependent, try again with the
// less common call.
if (IsDependent) {
- assert(TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend);
+ assert(TUK == TagUseKind::Reference || TUK == TagUseKind::Friend);
TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK, SS,
Name, StartLoc, NameLoc);
}
@@ -2252,13 +2252,13 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// just merge them into the current pool.
if (shouldDelayDiagsInTag) {
diagsFromTag.done();
- if (TUK == Sema::TUK_Reference &&
+ if (TUK == TagUseKind::Reference &&
TemplateInfo.Kind == ParsedTemplateInfo::Template)
diagsFromTag.redelay();
}
// If there is a body, parse it and inform the actions module.
- if (TUK == Sema::TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
assert(Tok.is(tok::l_brace) ||
(getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
isClassCompatibleKeyword());
@@ -2316,7 +2316,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
//
// After a type-specifier, we don't expect a semicolon. This only happens in
// C, since definitions are not permitted in this context in C++.
- if (TUK == Sema::TUK_Definition &&
+ if (TUK == TagUseKind::Definition &&
(getLangOpts().CPlusPlus || !isTypeSpecifier(DSC)) &&
(TemplateInfo.Kind || !isValidAfterTypeSpecifier(false))) {
if (Tok.isNot(tok::semi)) {
@@ -4560,7 +4560,8 @@ static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
bool Parser::ParseCXXAssumeAttributeArg(ParsedAttributes &Attrs,
IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
- SourceLocation *EndLoc) {
+ SourceLocation *EndLoc,
+ ParsedAttr::Form Form) {
assert(Tok.is(tok::l_paren) && "Not a C++11 attribute argument list");
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -4603,7 +4604,7 @@ bool Parser::ParseCXXAssumeAttributeArg(ParsedAttributes &Attrs,
auto RParen = Tok.getLocation();
T.consumeClose();
Attrs.addNew(AttrName, SourceRange(AttrNameLoc, RParen), nullptr,
- SourceLocation(), &Assumption, 1, ParsedAttr::Form::CXX11());
+ SourceLocation(), &Assumption, 1, Form);
if (EndLoc)
*EndLoc = RParen;
@@ -4683,7 +4684,7 @@ bool Parser::ParseCXX11AttributeArgs(
ScopeName, ScopeLoc, Form);
// So does C++23's assume() attribute.
else if (!ScopeName && AttrName->isStr("assume")) {
- if (ParseCXXAssumeAttributeArg(Attrs, AttrName, AttrNameLoc, EndLoc))
+ if (ParseCXXAssumeAttributeArg(Attrs, AttrName, AttrNameLoc, EndLoc, Form))
return true;
NumArgs = 1;
} else
diff --git a/clang/lib/Parse/ParseOpenACC.cpp b/clang/lib/Parse/ParseOpenACC.cpp
index 5db3036b0003..e9c60f76165b 100644
--- a/clang/lib/Parse/ParseOpenACC.cpp
+++ b/clang/lib/Parse/ParseOpenACC.cpp
@@ -920,7 +920,8 @@ Parser::OpenACCClauseParseResult Parser::ParseOpenACCClauseParams(
case OpenACCClauseKind::PresentOrCopyIn: {
bool IsReadOnly = tryParseAndConsumeSpecialTokenKind(
*this, OpenACCSpecialTokenKind::ReadOnly, ClauseKind);
- ParsedClause.setVarListDetails(ParseOpenACCVarList(), IsReadOnly,
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
+ IsReadOnly,
/*IsZero=*/false);
break;
}
@@ -932,16 +933,17 @@ Parser::OpenACCClauseParseResult Parser::ParseOpenACCClauseParams(
case OpenACCClauseKind::PresentOrCopyOut: {
bool IsZero = tryParseAndConsumeSpecialTokenKind(
*this, OpenACCSpecialTokenKind::Zero, ClauseKind);
- ParsedClause.setVarListDetails(ParseOpenACCVarList(),
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
/*IsReadOnly=*/false, IsZero);
break;
}
- case OpenACCClauseKind::Reduction:
+ case OpenACCClauseKind::Reduction: {
// If we're missing a clause-kind (or it is invalid), see if we can parse
// the var-list anyway.
- ParseReductionOperator(*this);
- ParseOpenACCVarList();
+ OpenACCReductionOperator Op = ParseReductionOperator(*this);
+ ParsedClause.setReductionDetails(Op, ParseOpenACCVarList(ClauseKind));
break;
+ }
case OpenACCClauseKind::Self:
// The 'self' clause is a var-list instead of a 'condition' in the case of
// the 'update' clause, so we have to handle it here. U se an assert to
@@ -955,11 +957,11 @@ Parser::OpenACCClauseParseResult Parser::ParseOpenACCClauseParams(
case OpenACCClauseKind::Host:
case OpenACCClauseKind::Link:
case OpenACCClauseKind::UseDevice:
- ParseOpenACCVarList();
+ ParseOpenACCVarList(ClauseKind);
break;
case OpenACCClauseKind::Attach:
case OpenACCClauseKind::DevicePtr:
- ParsedClause.setVarListDetails(ParseOpenACCVarList(),
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
/*IsReadOnly=*/false, /*IsZero=*/false);
break;
case OpenACCClauseKind::Copy:
@@ -969,7 +971,7 @@ Parser::OpenACCClauseParseResult Parser::ParseOpenACCClauseParams(
case OpenACCClauseKind::NoCreate:
case OpenACCClauseKind::Present:
case OpenACCClauseKind::Private:
- ParsedClause.setVarListDetails(ParseOpenACCVarList(),
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
/*IsReadOnly=*/false, /*IsZero=*/false);
break;
case OpenACCClauseKind::Collapse: {
@@ -1278,7 +1280,7 @@ ExprResult Parser::ParseOpenACCBindClauseArgument() {
/// - an array element
/// - a member of a composite variable
/// - a common block name between slashes (fortran only)
-Parser::OpenACCVarParseResult Parser::ParseOpenACCVar() {
+Parser::OpenACCVarParseResult Parser::ParseOpenACCVar(OpenACCClauseKind CK) {
OpenACCArraySectionRAII ArraySections(*this);
ExprResult Res = ParseAssignmentExpression();
@@ -1289,15 +1291,15 @@ Parser::OpenACCVarParseResult Parser::ParseOpenACCVar() {
if (!Res.isUsable())
return {Res, OpenACCParseCanContinue::Can};
- Res = getActions().OpenACC().ActOnVar(Res.get());
+ Res = getActions().OpenACC().ActOnVar(CK, Res.get());
return {Res, OpenACCParseCanContinue::Can};
}
-llvm::SmallVector<Expr *> Parser::ParseOpenACCVarList() {
+llvm::SmallVector<Expr *> Parser::ParseOpenACCVarList(OpenACCClauseKind CK) {
llvm::SmallVector<Expr *> Vars;
- auto [Res, CanContinue] = ParseOpenACCVar();
+ auto [Res, CanContinue] = ParseOpenACCVar(CK);
if (Res.isUsable()) {
Vars.push_back(Res.get());
} else if (CanContinue == OpenACCParseCanContinue::Cannot) {
@@ -1308,7 +1310,7 @@ llvm::SmallVector<Expr *> Parser::ParseOpenACCVarList() {
while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
ExpectAndConsume(tok::comma);
- auto [Res, CanContinue] = ParseOpenACCVar();
+ auto [Res, CanContinue] = ParseOpenACCVar(CK);
if (Res.isUsable()) {
Vars.push_back(Res.get());
@@ -1342,7 +1344,7 @@ void Parser::ParseOpenACCCacheVarList() {
// ParseOpenACCVarList should leave us before a r-paren, so no need to skip
// anything here.
- ParseOpenACCVarList();
+ ParseOpenACCVarList(OpenACCClauseKind::Invalid);
}
Parser::OpenACCDirectiveParseInfo Parser::ParseOpenACCDirective() {
diff --git a/clang/lib/Parse/ParsePragma.cpp b/clang/lib/Parse/ParsePragma.cpp
index 643fdac287d1..cc6f18b5b319 100644
--- a/clang/lib/Parse/ParsePragma.cpp
+++ b/clang/lib/Parse/ParsePragma.cpp
@@ -23,6 +23,7 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaCodeCompletion.h"
+#include "clang/Sema/SemaRISCV.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSwitch.h"
#include <optional>
@@ -4154,7 +4155,7 @@ void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
}
if (II->isStr("vector"))
- Actions.DeclareRISCVVBuiltins = true;
+ Actions.RISCV().DeclareRVVBuiltins = true;
else if (II->isStr("sifive_vector"))
- Actions.DeclareRISCVSiFiveVectorBuiltins = true;
+ Actions.RISCV().DeclareSiFiveVectorBuiltins = true;
}
diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt
index 58e0a3b9679b..fe6471c81ff0 100644
--- a/clang/lib/Sema/CMakeLists.txt
+++ b/clang/lib/Sema/CMakeLists.txt
@@ -60,7 +60,7 @@ add_clang_library(clangSema
SemaOpenMP.cpp
SemaOverload.cpp
SemaPseudoObject.cpp
- SemaRISCVVectorLookup.cpp
+ SemaRISCV.cpp
SemaStmt.cpp
SemaStmtAsm.cpp
SemaStmtAttr.cpp
@@ -71,6 +71,7 @@ add_clang_library(clangSema
SemaTemplateInstantiateDecl.cpp
SemaTemplateVariadic.cpp
SemaType.cpp
+ SemaX86.cpp
TypeLocBuilder.cpp
DEPENDS
diff --git a/clang/lib/Sema/HLSLExternalSemaSource.cpp b/clang/lib/Sema/HLSLExternalSemaSource.cpp
index bb283c54b3d2..a2b29a7bdf50 100644
--- a/clang/lib/Sema/HLSLExternalSemaSource.cpp
+++ b/clang/lib/Sema/HLSLExternalSemaSource.cpp
@@ -308,17 +308,18 @@ struct BuiltinTypeDeclBuilder {
return *this;
}
- TemplateParameterListBuilder addTemplateArgumentList();
- BuiltinTypeDeclBuilder &addSimpleTemplateParams(ArrayRef<StringRef> Names);
+ TemplateParameterListBuilder addTemplateArgumentList(Sema &S);
+ BuiltinTypeDeclBuilder &addSimpleTemplateParams(Sema &S,
+ ArrayRef<StringRef> Names);
};
struct TemplateParameterListBuilder {
BuiltinTypeDeclBuilder &Builder;
- ASTContext &AST;
+ Sema &S;
llvm::SmallVector<NamedDecl *> Params;
- TemplateParameterListBuilder(BuiltinTypeDeclBuilder &RB)
- : Builder(RB), AST(RB.Record->getASTContext()) {}
+ TemplateParameterListBuilder(Sema &S, BuiltinTypeDeclBuilder &RB)
+ : Builder(RB), S(S) {}
~TemplateParameterListBuilder() { finalizeTemplateArgs(); }
@@ -328,12 +329,15 @@ struct TemplateParameterListBuilder {
return *this;
unsigned Position = static_cast<unsigned>(Params.size());
auto *Decl = TemplateTypeParmDecl::Create(
- AST, Builder.Record->getDeclContext(), SourceLocation(),
+ S.Context, Builder.Record->getDeclContext(), SourceLocation(),
SourceLocation(), /* TemplateDepth */ 0, Position,
- &AST.Idents.get(Name, tok::TokenKind::identifier), /* Typename */ false,
+ &S.Context.Idents.get(Name, tok::TokenKind::identifier),
+ /* Typename */ false,
/* ParameterPack */ false);
if (!DefaultValue.isNull())
- Decl->setDefaultArgument(AST.getTrivialTypeSourceInfo(DefaultValue));
+ Decl->setDefaultArgument(
+ S.Context, S.getTrivialTemplateArgumentLoc(DefaultValue, QualType(),
+ SourceLocation()));
Params.emplace_back(Decl);
return *this;
@@ -342,11 +346,11 @@ struct TemplateParameterListBuilder {
BuiltinTypeDeclBuilder &finalizeTemplateArgs() {
if (Params.empty())
return Builder;
- auto *ParamList =
- TemplateParameterList::Create(AST, SourceLocation(), SourceLocation(),
- Params, SourceLocation(), nullptr);
+ auto *ParamList = TemplateParameterList::Create(S.Context, SourceLocation(),
+ SourceLocation(), Params,
+ SourceLocation(), nullptr);
Builder.Template = ClassTemplateDecl::Create(
- AST, Builder.Record->getDeclContext(), SourceLocation(),
+ S.Context, Builder.Record->getDeclContext(), SourceLocation(),
DeclarationName(Builder.Record->getIdentifier()), ParamList,
Builder.Record);
Builder.Record->setDescribedClassTemplate(Builder.Template);
@@ -359,20 +363,22 @@ struct TemplateParameterListBuilder {
Params.clear();
QualType T = Builder.Template->getInjectedClassNameSpecialization();
- T = AST.getInjectedClassNameType(Builder.Record, T);
+ T = S.Context.getInjectedClassNameType(Builder.Record, T);
return Builder;
}
};
} // namespace
-TemplateParameterListBuilder BuiltinTypeDeclBuilder::addTemplateArgumentList() {
- return TemplateParameterListBuilder(*this);
+TemplateParameterListBuilder
+BuiltinTypeDeclBuilder::addTemplateArgumentList(Sema &S) {
+ return TemplateParameterListBuilder(S, *this);
}
BuiltinTypeDeclBuilder &
-BuiltinTypeDeclBuilder::addSimpleTemplateParams(ArrayRef<StringRef> Names) {
- TemplateParameterListBuilder Builder = this->addTemplateArgumentList();
+BuiltinTypeDeclBuilder::addSimpleTemplateParams(Sema &S,
+ ArrayRef<StringRef> Names) {
+ TemplateParameterListBuilder Builder = this->addTemplateArgumentList(S);
for (StringRef Name : Names)
Builder.addTypeParameter(Name);
return Builder.finalizeTemplateArgs();
@@ -426,7 +432,9 @@ void HLSLExternalSemaSource::defineHLSLVectorAlias() {
auto *TypeParam = TemplateTypeParmDecl::Create(
AST, HLSLNamespace, SourceLocation(), SourceLocation(), 0, 0,
&AST.Idents.get("element", tok::TokenKind::identifier), false, false);
- TypeParam->setDefaultArgument(AST.getTrivialTypeSourceInfo(AST.FloatTy));
+ TypeParam->setDefaultArgument(
+ AST, SemaPtr->getTrivialTemplateArgumentLoc(
+ TemplateArgument(AST.FloatTy), QualType(), SourceLocation()));
TemplateParams.emplace_back(TypeParam);
@@ -434,10 +442,12 @@ void HLSLExternalSemaSource::defineHLSLVectorAlias() {
AST, HLSLNamespace, SourceLocation(), SourceLocation(), 0, 1,
&AST.Idents.get("element_count", tok::TokenKind::identifier), AST.IntTy,
false, AST.getTrivialTypeSourceInfo(AST.IntTy));
- Expr *LiteralExpr =
- IntegerLiteral::Create(AST, llvm::APInt(AST.getIntWidth(AST.IntTy), 4),
- AST.IntTy, SourceLocation());
- SizeParam->setDefaultArgument(LiteralExpr);
+ llvm::APInt Val(AST.getIntWidth(AST.IntTy), 4);
+ TemplateArgument Default(AST, llvm::APSInt(std::move(Val)), AST.IntTy,
+ /*IsDefaulted=*/true);
+ SizeParam->setDefaultArgument(
+ AST, SemaPtr->getTrivialTemplateArgumentLoc(Default, AST.IntTy,
+ SourceLocation(), SizeParam));
TemplateParams.emplace_back(SizeParam);
auto *ParamList =
@@ -492,7 +502,7 @@ static BuiltinTypeDeclBuilder setupBufferType(CXXRecordDecl *Decl, Sema &S,
void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
CXXRecordDecl *Decl;
Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RWBuffer")
- .addSimpleTemplateParams({"element_type"})
+ .addSimpleTemplateParams(*SemaPtr, {"element_type"})
.Record;
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV,
@@ -503,7 +513,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
Decl =
BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RasterizerOrderedBuffer")
- .addSimpleTemplateParams({"element_type"})
+ .addSimpleTemplateParams(*SemaPtr, {"element_type"})
.Record;
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV,
diff --git a/clang/lib/Sema/OpenCLBuiltins.td b/clang/lib/Sema/OpenCLBuiltins.td
index a7bdfe20b982..4da61429fcce 100644
--- a/clang/lib/Sema/OpenCLBuiltins.td
+++ b/clang/lib/Sema/OpenCLBuiltins.td
@@ -1852,6 +1852,20 @@ let Extension = FunctionExtension<"cl_khr_subgroup_rotate"> in {
def : Builtin<"sub_group_clustered_rotate", [AGenType1, AGenType1, Int, UInt], Attr.Convergent>;
}
+// cl_khr_kernel_clock
+let Extension = FunctionExtension<"cl_khr_kernel_clock __opencl_c_kernel_clock_scope_device"> in {
+ def : Builtin<"clock_read_device", [ULong]>;
+ def : Builtin<"clock_read_hilo_device", [VectorType<UInt, 2>]>;
+}
+let Extension = FunctionExtension<"cl_khr_kernel_clock __opencl_c_kernel_clock_scope_work_group"> in {
+ def : Builtin<"clock_read_work_group", [ULong]>;
+ def : Builtin<"clock_read_hilo_work_group", [VectorType<UInt, 2>]>;
+}
+let Extension = FunctionExtension<"cl_khr_kernel_clock __opencl_c_kernel_clock_scope_sub_group"> in {
+ def : Builtin<"clock_read_sub_group", [ULong]>;
+ def : Builtin<"clock_read_hilo_sub_group", [VectorType<UInt, 2>]>;
+}
+
//--------------------------------------------------------------------
// Arm extensions.
let Extension = ArmIntegerDotProductInt8 in {
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index f847c49920cf..d1fb21bb1ae1 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -49,7 +49,10 @@
#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/SemaOpenACC.h"
#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPseudoObject.h"
+#include "clang/Sema/SemaRISCV.h"
#include "clang/Sema/SemaSYCL.h"
+#include "clang/Sema/SemaX86.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "clang/Sema/TypoCorrection.h"
@@ -210,7 +213,10 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
ObjCPtr(std::make_unique<SemaObjC>(*this)),
OpenACCPtr(std::make_unique<SemaOpenACC>(*this)),
OpenMPPtr(std::make_unique<SemaOpenMP>(*this)),
+ PseudoObjectPtr(std::make_unique<SemaPseudoObject>(*this)),
+ RISCVPtr(std::make_unique<SemaRISCV>(*this)),
SYCLPtr(std::make_unique<SemaSYCL>(*this)),
+ X86Ptr(std::make_unique<SemaX86>(*this)),
MSPointerToMemberRepresentationMethod(
LangOpts.getMSPointerToMemberRepresentationMethod()),
MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()),
@@ -2049,7 +2055,7 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) {
llvm::StringMap<bool> CallerFeatureMap;
Context.getFunctionFeatureMap(CallerFeatureMap, FD);
- checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap);
+ RISCV().checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap);
}
// Don't allow SVE types in functions without a SVE target.
diff --git a/clang/lib/Sema/SemaAPINotes.cpp b/clang/lib/Sema/SemaAPINotes.cpp
index 443bf162044f..c80b08e361cf 100644
--- a/clang/lib/Sema/SemaAPINotes.cpp
+++ b/clang/lib/Sema/SemaAPINotes.cpp
@@ -269,7 +269,8 @@ static void ProcessAPINotes(Sema &S, Decl *D,
ASTAllocateString(S.Context, Info.UnavailableMsg),
/*Strict=*/false,
/*Replacement=*/StringRef(),
- /*Priority=*/Sema::AP_Explicit);
+ /*Priority=*/Sema::AP_Explicit,
+ /*Environment=*/nullptr);
},
[](const Decl *D) {
return llvm::find_if(D->attrs(), [](const Attr *next) -> bool {
diff --git a/clang/lib/Sema/SemaAvailability.cpp b/clang/lib/Sema/SemaAvailability.cpp
index 5ebc25317bf3..22f5a2f66347 100644
--- a/clang/lib/Sema/SemaAvailability.cpp
+++ b/clang/lib/Sema/SemaAvailability.cpp
@@ -14,20 +14,37 @@
#include "clang/AST/Decl.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaObjC.h"
+#include "llvm/ADT/StringRef.h"
#include <optional>
using namespace clang;
using namespace sema;
+static bool hasMatchingEnvironmentOrNone(const ASTContext &Context,
+ const AvailabilityAttr *AA) {
+ IdentifierInfo *IIEnvironment = AA->getEnvironment();
+ auto Environment = Context.getTargetInfo().getTriple().getEnvironment();
+ if (!IIEnvironment || Environment == llvm::Triple::UnknownEnvironment)
+ return true;
+
+ llvm::Triple::EnvironmentType ET =
+ AvailabilityAttr::getEnvironmentType(IIEnvironment->getName());
+ return Environment == ET;
+}
+
static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
const Decl *D) {
+ AvailabilityAttr const *PartialMatch = nullptr;
// Check each AvailabilityAttr to find the one for this platform.
+ // For multiple attributes with the same platform try to find one for this
+ // environment.
for (const auto *A : D->attrs()) {
if (const auto *Avail = dyn_cast<AvailabilityAttr>(A)) {
// FIXME: this is copied from CheckAvailability. We should try to
@@ -46,11 +63,15 @@ static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
StringRef TargetPlatform = Context.getTargetInfo().getPlatformName();
// Match the platform name.
- if (RealizedPlatform == TargetPlatform)
- return Avail;
+ if (RealizedPlatform == TargetPlatform) {
+ // Find the best matching attribute for this environment
+ if (hasMatchingEnvironmentOrNone(Context, Avail))
+ return Avail;
+ PartialMatch = Avail;
+ }
}
}
- return nullptr;
+ return PartialMatch;
}
/// The diagnostic we should emit for \c D, and the declaration that
@@ -118,10 +139,9 @@ ShouldDiagnoseAvailabilityOfDecl(Sema &S, const NamedDecl *D,
/// whether we should emit a diagnostic for \c K and \c DeclVersion in
/// the context of \c Ctx. For example, we should emit an unavailable diagnostic
/// in a deprecated context, but not the other way around.
-static bool
-ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
- VersionTuple DeclVersion, Decl *Ctx,
- const NamedDecl *OffendingDecl) {
+static bool ShouldDiagnoseAvailabilityInContext(
+ Sema &S, AvailabilityResult K, VersionTuple DeclVersion,
+ const IdentifierInfo *DeclEnv, Decl *Ctx, const NamedDecl *OffendingDecl) {
assert(K != AR_Available && "Expected an unavailable declaration here!");
// If this was defined using CF_OPTIONS, etc. then ignore the diagnostic.
@@ -140,7 +160,8 @@ ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
auto CheckContext = [&](const Decl *C) {
if (K == AR_NotYetIntroduced) {
if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, C))
- if (AA->getIntroduced() >= DeclVersion)
+ if (AA->getIntroduced() >= DeclVersion &&
+ AA->getEnvironment() == DeclEnv)
return true;
} else if (K == AR_Deprecated) {
if (C->isDeprecated())
@@ -344,10 +365,14 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
unsigned available_here_select_kind;
VersionTuple DeclVersion;
- if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, OffendingDecl))
+ const AvailabilityAttr *AA = getAttrForPlatform(S.Context, OffendingDecl);
+ const IdentifierInfo *IIEnv = nullptr;
+ if (AA) {
DeclVersion = AA->getIntroduced();
+ IIEnv = AA->getEnvironment();
+ }
- if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, Ctx,
+ if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, IIEnv, Ctx,
OffendingDecl))
return;
@@ -355,8 +380,7 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
// The declaration can have multiple availability attributes, we are looking
// at one of them.
- const AvailabilityAttr *A = getAttrForPlatform(S.Context, OffendingDecl);
- if (A && A->isInherited()) {
+ if (AA && AA->isInherited()) {
for (const Decl *Redecl = OffendingDecl->getMostRecentDecl(); Redecl;
Redecl = Redecl->getPreviousDecl()) {
const AvailabilityAttr *AForRedecl =
@@ -376,26 +400,43 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
// not specified for deployment targets >= to iOS 11 or equivalent or
// for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
// later.
- const AvailabilityAttr *AA =
- getAttrForPlatform(S.getASTContext(), OffendingDecl);
+ assert(AA != nullptr && "expecting valid availability attribute");
VersionTuple Introduced = AA->getIntroduced();
+ bool EnvironmentMatchesOrNone =
+ hasMatchingEnvironmentOrNone(S.getASTContext(), AA);
+
+ const TargetInfo &TI = S.getASTContext().getTargetInfo();
+ std::string PlatformName(
+ AvailabilityAttr::getPrettyPlatformName(TI.getPlatformName()));
+ llvm::StringRef TargetEnvironment(AvailabilityAttr::getPrettyEnviromentName(
+ TI.getTriple().getEnvironmentName()));
+ llvm::StringRef AttrEnvironment =
+ AA->getEnvironment() ? AvailabilityAttr::getPrettyEnviromentName(
+ AA->getEnvironment()->getName())
+ : "";
+ bool UseEnvironment =
+ (!AttrEnvironment.empty() && !TargetEnvironment.empty());
bool UseNewWarning = shouldDiagnoseAvailabilityByDefault(
S.Context, S.Context.getTargetInfo().getPlatformMinVersion(),
Introduced);
- unsigned Warning = UseNewWarning ? diag::warn_unguarded_availability_new
- : diag::warn_unguarded_availability;
- std::string PlatformName(AvailabilityAttr::getPrettyPlatformName(
- S.getASTContext().getTargetInfo().getPlatformName()));
+ unsigned DiagKind =
+ EnvironmentMatchesOrNone
+ ? (UseNewWarning ? diag::warn_unguarded_availability_new
+ : diag::warn_unguarded_availability)
+ : (UseNewWarning ? diag::warn_unguarded_availability_unavailable_new
+ : diag::warn_unguarded_availability_unavailable);
- S.Diag(Loc, Warning) << OffendingDecl << PlatformName
- << Introduced.getAsString();
+ S.Diag(Loc, DiagKind) << OffendingDecl << PlatformName
+ << Introduced.getAsString() << UseEnvironment
+ << TargetEnvironment;
S.Diag(OffendingDecl->getLocation(),
diag::note_partial_availability_specified_here)
<< OffendingDecl << PlatformName << Introduced.getAsString()
- << S.Context.getTargetInfo().getPlatformMinVersion().getAsString();
+ << S.Context.getTargetInfo().getPlatformMinVersion().getAsString()
+ << UseEnvironment << AttrEnvironment << TargetEnvironment;
if (const auto *Enclosing = findEnclosingDeclToAnnotate(Ctx)) {
if (const auto *TD = dyn_cast<TagDecl>(Enclosing))
@@ -772,14 +813,17 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
const AvailabilityAttr *AA =
getAttrForPlatform(SemaRef.getASTContext(), OffendingDecl);
+ bool EnvironmentMatchesOrNone =
+ hasMatchingEnvironmentOrNone(SemaRef.getASTContext(), AA);
VersionTuple Introduced = AA->getIntroduced();
- if (AvailabilityStack.back() >= Introduced)
+ if (EnvironmentMatchesOrNone && AvailabilityStack.back() >= Introduced)
return;
// If the context of this function is less available than D, we should not
// emit a diagnostic.
- if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced, Ctx,
+ if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced,
+ AA->getEnvironment(), Ctx,
OffendingDecl))
return;
@@ -787,25 +831,39 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
// not specified for deployment targets >= to iOS 11 or equivalent or
// for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
// later.
- unsigned DiagKind =
- shouldDiagnoseAvailabilityByDefault(
- SemaRef.Context,
- SemaRef.Context.getTargetInfo().getPlatformMinVersion(), Introduced)
- ? diag::warn_unguarded_availability_new
- : diag::warn_unguarded_availability;
+ bool UseNewDiagKind = shouldDiagnoseAvailabilityByDefault(
+ SemaRef.Context,
+ SemaRef.Context.getTargetInfo().getPlatformMinVersion(), Introduced);
+
+ const TargetInfo &TI = SemaRef.getASTContext().getTargetInfo();
+ std::string PlatformName(
+ AvailabilityAttr::getPrettyPlatformName(TI.getPlatformName()));
+ llvm::StringRef TargetEnvironment(AvailabilityAttr::getPrettyEnviromentName(
+ TI.getTriple().getEnvironmentName()));
+ llvm::StringRef AttrEnvironment =
+ AA->getEnvironment() ? AvailabilityAttr::getPrettyEnviromentName(
+ AA->getEnvironment()->getName())
+ : "";
+ bool UseEnvironment =
+ (!AttrEnvironment.empty() && !TargetEnvironment.empty());
- std::string PlatformName(AvailabilityAttr::getPrettyPlatformName(
- SemaRef.getASTContext().getTargetInfo().getPlatformName()));
+ unsigned DiagKind =
+ EnvironmentMatchesOrNone
+ ? (UseNewDiagKind ? diag::warn_unguarded_availability_new
+ : diag::warn_unguarded_availability)
+ : (UseNewDiagKind
+ ? diag::warn_unguarded_availability_unavailable_new
+ : diag::warn_unguarded_availability_unavailable);
SemaRef.Diag(Range.getBegin(), DiagKind)
- << Range << D << PlatformName << Introduced.getAsString();
+ << Range << D << PlatformName << Introduced.getAsString()
+ << UseEnvironment << TargetEnvironment;
SemaRef.Diag(OffendingDecl->getLocation(),
diag::note_partial_availability_specified_here)
<< OffendingDecl << PlatformName << Introduced.getAsString()
- << SemaRef.Context.getTargetInfo()
- .getPlatformMinVersion()
- .getAsString();
+ << SemaRef.Context.getTargetInfo().getPlatformMinVersion().getAsString()
+ << UseEnvironment << AttrEnvironment << TargetEnvironment;
auto FixitDiag =
SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
@@ -929,11 +987,6 @@ void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
Stmt *Body = nullptr;
if (auto *FD = D->getAsFunction()) {
- // FIXME: We only examine the pattern decl for availability violations now,
- // but we should also examine instantiated templates.
- if (FD->isTemplateInstantiation())
- return;
-
Body = FD->getBody();
if (auto *CD = dyn_cast<CXXConstructorDecl>(FD))
diff --git a/clang/lib/Sema/SemaCXXScopeSpec.cpp b/clang/lib/Sema/SemaCXXScopeSpec.cpp
index fca5bd131bbc..c405fbc0aa42 100644
--- a/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -796,6 +796,14 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
Diag(IdInfo.IdentifierLoc,
diag::ext_undeclared_unqual_id_with_dependent_base)
<< IdInfo.Identifier << ContainingClass;
+ // Fake up a nested-name-specifier that starts with the
+ // injected-class-name of the enclosing class.
+ QualType T = Context.getTypeDeclType(ContainingClass);
+ TypeLocBuilder TLB;
+ TLB.pushTrivial(Context, T, IdInfo.IdentifierLoc);
+ SS.Extend(Context, /*TemplateKWLoc=*/SourceLocation(),
+ TLB.getTypeLocInContext(Context, T), IdInfo.IdentifierLoc);
+ // Add the identifier to form a dependent name.
SS.Extend(Context, IdInfo.Identifier, IdInfo.IdentifierLoc,
IdInfo.CCLoc);
return false;
diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp
index 483ec7e36eae..7db6b1dfe923 100644
--- a/clang/lib/Sema/SemaCast.cpp
+++ b/clang/lib/Sema/SemaCast.cpp
@@ -25,6 +25,7 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaRISCV.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include <set>
@@ -2391,7 +2392,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
}
// Allow bitcasting between SVE VLATs and VLSTs, and vice-versa.
- if (Self.isValidRVVBitcast(SrcType, DestType)) {
+ if (Self.RISCV().isValidRVVBitcast(SrcType, DestType)) {
Kind = CK_BitCast;
return TC_Success;
}
@@ -3002,7 +3003,7 @@ void CastOperation::CheckCStyleCast() {
// Allow bitcasting between compatible RVV vector types.
if ((SrcType->isVectorType() || DestType->isVectorType()) &&
- Self.isValidRVVBitcast(SrcType, DestType)) {
+ Self.RISCV().isValidRVVBitcast(SrcType, DestType)) {
Kind = CK_BitCast;
return;
}
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index f2dc8e9dd005..c3251f3cc9d8 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -63,6 +63,8 @@
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaRISCV.h"
+#include "clang/Sema/SemaX86.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
@@ -120,13 +122,12 @@ static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A,
/// Checks that a call expression's argument count is at least the desired
/// number. This is useful when doing custom type-checking on a variadic
/// function. Returns true on error.
-static bool checkArgCountAtLeast(Sema &S, CallExpr *Call,
- unsigned MinArgCount) {
+bool Sema::checkArgCountAtLeast(CallExpr *Call, unsigned MinArgCount) {
unsigned ArgCount = Call->getNumArgs();
if (ArgCount >= MinArgCount)
return false;
- return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
+ return Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
<< 0 /*function call*/ << MinArgCount << ArgCount
<< /*is non object*/ 0 << Call->getSourceRange();
}
@@ -134,12 +135,11 @@ static bool checkArgCountAtLeast(Sema &S, CallExpr *Call,
/// Checks that a call expression's argument count is at most the desired
/// number. This is useful when doing custom type-checking on a variadic
/// function. Returns true on error.
-static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) {
+bool Sema::checkArgCountAtMost(CallExpr *Call, unsigned MaxArgCount) {
unsigned ArgCount = Call->getNumArgs();
if (ArgCount <= MaxArgCount)
return false;
- return S.Diag(Call->getEndLoc(),
- diag::err_typecheck_call_too_many_args_at_most)
+ return Diag(Call->getEndLoc(), diag::err_typecheck_call_too_many_args_at_most)
<< 0 /*function call*/ << MaxArgCount << ArgCount
<< /*is non object*/ 0 << Call->getSourceRange();
}
@@ -147,20 +147,20 @@ static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) {
/// Checks that a call expression's argument count is in the desired range. This
/// is useful when doing custom type-checking on a variadic function. Returns
/// true on error.
-static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount,
- unsigned MaxArgCount) {
- return checkArgCountAtLeast(S, Call, MinArgCount) ||
- checkArgCountAtMost(S, Call, MaxArgCount);
+bool Sema::checkArgCountRange(CallExpr *Call, unsigned MinArgCount,
+ unsigned MaxArgCount) {
+ return checkArgCountAtLeast(Call, MinArgCount) ||
+ checkArgCountAtMost(Call, MaxArgCount);
}
/// Checks that a call expression's argument count is the desired number.
/// This is useful when doing custom type-checking. Returns true on error.
-static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
+bool Sema::checkArgCount(CallExpr *Call, unsigned DesiredArgCount) {
unsigned ArgCount = Call->getNumArgs();
if (ArgCount == DesiredArgCount)
return false;
- if (checkArgCountAtLeast(S, Call, DesiredArgCount))
+ if (checkArgCountAtLeast(Call, DesiredArgCount))
return true;
assert(ArgCount > DesiredArgCount && "should have diagnosed this");
@@ -168,7 +168,7 @@ static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(),
Call->getArg(ArgCount - 1)->getEndLoc());
- return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
+ return Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
<< 0 /*function call*/ << DesiredArgCount << ArgCount
<< /*is non object*/ 0 << Call->getArg(1)->getSourceRange();
}
@@ -190,7 +190,7 @@ static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) {
/// Check that the first argument to __builtin_annotation is an integer
/// and the second argument is a non-wide string literal.
static bool BuiltinAnnotation(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 2))
+ if (S.checkArgCount(TheCall, 2))
return true;
// First argument should be an integer.
@@ -240,7 +240,7 @@ static bool BuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
/// Check that the argument to __builtin_addressof is a glvalue, and set the
/// result type to the corresponding pointer type.
static bool BuiltinAddressof(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return true;
ExprResult Arg(TheCall->getArg(0));
@@ -255,7 +255,7 @@ static bool BuiltinAddressof(Sema &S, CallExpr *TheCall) {
/// Check that the argument to __builtin_function_start is a function.
static bool BuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return true;
ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
@@ -279,7 +279,7 @@ static bool BuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
/// Check the number of arguments and set the result type to
/// the argument type.
static bool BuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return true;
TheCall->setType(TheCall->getArg(0)->getType());
@@ -290,7 +290,7 @@ static bool BuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
/// type (but not a function pointer) and that the alignment is a power-of-two.
static bool BuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
- if (checkArgCount(S, TheCall, 2))
+ if (S.checkArgCount(TheCall, 2))
return true;
clang::Expr *Source = TheCall->getArg(0);
@@ -368,7 +368,7 @@ static bool BuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
}
static bool BuiltinOverflow(Sema &S, CallExpr *TheCall, unsigned BuiltinID) {
- if (checkArgCount(S, TheCall, 3))
+ if (S.checkArgCount(TheCall, 3))
return true;
std::pair<unsigned, const char *> Builtins[] = {
@@ -696,7 +696,7 @@ struct BuiltinDumpStructGenerator {
} // namespace
static ExprResult BuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
- if (checkArgCountAtLeast(S, TheCall, 2))
+ if (S.checkArgCountAtLeast(TheCall, 2))
return ExprError();
ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0));
@@ -762,7 +762,7 @@ static ExprResult BuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
}
static bool BuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
- if (checkArgCount(S, BuiltinCall, 2))
+ if (S.checkArgCount(BuiltinCall, 2))
return true;
SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
@@ -1504,7 +1504,7 @@ static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
}
static bool OpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 2))
+ if (S.checkArgCount(TheCall, 2))
return true;
if (checkOpenCLSubgroupExt(S, TheCall))
@@ -1531,7 +1531,7 @@ static bool OpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
/// get_kernel_work_group_size
/// and get_kernel_preferred_work_group_size_multiple builtin functions.
static bool OpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return true;
Expr *BlockArg = TheCall->getArg(0);
@@ -1861,7 +1861,7 @@ static bool BuiltinRWPipe(Sema &S, CallExpr *Call) {
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool BuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
+ if (S.checkArgCount(Call, 2))
return true;
if (checkOpenCLPipeArg(S, Call))
@@ -1890,7 +1890,7 @@ static bool BuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool BuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
+ if (S.checkArgCount(Call, 2))
return true;
if (checkOpenCLPipeArg(S, Call))
@@ -1913,7 +1913,7 @@ static bool BuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool BuiltinPipePackets(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 1))
+ if (S.checkArgCount(Call, 1))
return true;
if (!Call->getArg(0)->getType()->isPipeType()) {
@@ -1932,7 +1932,7 @@ static bool BuiltinPipePackets(Sema &S, CallExpr *Call) {
// \param Call A pointer to the builtin call.
// \return True if a semantic error has been found, false otherwise.
static bool OpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, CallExpr *Call) {
- if (checkArgCount(S, Call, 1))
+ if (S.checkArgCount(Call, 1))
return true;
auto RT = Call->getArg(0)->getType();
@@ -2087,7 +2087,7 @@ static bool checkPointerAuthValue(Sema &S, Expr *&Arg,
}
static ExprResult PointerAuthStrip(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
+ if (S.checkArgCount(Call, 2))
return ExprError();
if (checkPointerAuthEnabled(S, Call))
return ExprError();
@@ -2100,7 +2100,7 @@ static ExprResult PointerAuthStrip(Sema &S, CallExpr *Call) {
}
static ExprResult PointerAuthBlendDiscriminator(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
+ if (S.checkArgCount(Call, 2))
return ExprError();
if (checkPointerAuthEnabled(S, Call))
return ExprError();
@@ -2113,7 +2113,7 @@ static ExprResult PointerAuthBlendDiscriminator(Sema &S, CallExpr *Call) {
}
static ExprResult PointerAuthSignGenericData(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
+ if (S.checkArgCount(Call, 2))
return ExprError();
if (checkPointerAuthEnabled(S, Call))
return ExprError();
@@ -2127,7 +2127,7 @@ static ExprResult PointerAuthSignGenericData(Sema &S, CallExpr *Call) {
static ExprResult PointerAuthSignOrAuth(Sema &S, CallExpr *Call,
PointerAuthOpKind OpKind) {
- if (checkArgCount(S, Call, 3))
+ if (S.checkArgCount(Call, 3))
return ExprError();
if (checkPointerAuthEnabled(S, Call))
return ExprError();
@@ -2141,7 +2141,7 @@ static ExprResult PointerAuthSignOrAuth(Sema &S, CallExpr *Call,
}
static ExprResult PointerAuthAuthAndResign(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 5))
+ if (S.checkArgCount(Call, 5))
return ExprError();
if (checkPointerAuthEnabled(S, Call))
return ExprError();
@@ -2157,7 +2157,7 @@ static ExprResult PointerAuthAuthAndResign(Sema &S, CallExpr *Call) {
}
static ExprResult BuiltinLaunder(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return ExprError();
// Compute __builtin_launder's parameter type from the argument.
@@ -2278,7 +2278,7 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return X86().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::ppc:
case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
@@ -2288,7 +2288,7 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
- return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return RISCV().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::loongarch32:
case llvm::Triple::loongarch64:
return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
@@ -2377,7 +2377,7 @@ static bool BuiltinCpu(Sema &S, const TargetInfo &TI, CallExpr *TheCall,
/// Checks that __builtin_popcountg was called with a single argument, which is
/// an unsigned integer.
static bool BuiltinPopcountg(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return true;
ExprResult ArgRes = S.DefaultLvalueConversion(TheCall->getArg(0));
@@ -2401,7 +2401,7 @@ static bool BuiltinPopcountg(Sema &S, CallExpr *TheCall) {
/// an unsigned integer, and an optional second argument, which is promoted to
/// an 'int'.
static bool BuiltinCountZeroBitsGeneric(Sema &S, CallExpr *TheCall) {
- if (checkArgCountRange(S, TheCall, 1, 2))
+ if (S.checkArgCountRange(TheCall, 1, 2))
return true;
ExprResult Arg0Res = S.DefaultLvalueConversion(TheCall->getArg(0));
@@ -2625,7 +2625,8 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__builtin_classify_type:
- if (checkArgCount(*this, TheCall, 1)) return true;
+ if (checkArgCount(TheCall, 1))
+ return true;
TheCall->setType(Context.IntTy);
break;
case Builtin::BI__builtin_complex:
@@ -2633,7 +2634,8 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__builtin_constant_p: {
- if (checkArgCount(*this, TheCall, 1)) return true;
+ if (checkArgCount(TheCall, 1))
+ return true;
ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
if (Arg.isInvalid()) return true;
TheCall->setArg(0, Arg.get());
@@ -2822,7 +2824,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return BuiltinDumpStruct(*this, TheCall);
case Builtin::BI__builtin_expect_with_probability: {
// We first want to ensure we are called with 3 arguments
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return ExprError();
// then check probability is constant float in range [0.0, 1.0]
const Expr *ProbArg = TheCall->getArg(2);
@@ -2870,7 +2872,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__GetExceptionInfo:
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return ExprError();
if (CheckCXXThrowOperand(
@@ -2891,7 +2893,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// These are all expected to be of the form
// T &/&&/* f(U &/&&)
// where T and U only differ in qualification.
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return ExprError();
QualType Param = FDecl->getParamDecl(0)->getType();
QualType Result = FDecl->getReturnType();
@@ -3129,7 +3131,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
}
case Builtin::BI__builtin_elementwise_copysign: {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return ExprError();
ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0));
@@ -3806,7 +3808,7 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
// Ensure that we have the proper number of arguments.
- if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
+ if (checkArgCount(TheCall, IsLdrex ? 1 : 2))
return true;
// Inspect the pointer argument of the atomic builtin. This should always be
@@ -3822,7 +3824,7 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
if (!pointerType) {
Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
- << PointerArg->getType() << PointerArg->getSourceRange();
+ << PointerArg->getType() << 0 << PointerArg->getSourceRange();
return true;
}
@@ -3856,7 +3858,7 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
!ValType->isBlockPointerType() && !ValType->isFloatingType()) {
Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
- << PointerArg->getType() << PointerArg->getSourceRange();
+ << PointerArg->getType() << 0 << PointerArg->getSourceRange();
return true;
}
@@ -4145,7 +4147,7 @@ bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin");
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
// The second argument needs to be a constant int
@@ -5589,12 +5591,12 @@ bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
switch (BuiltinID) {
case Builtin::BI__builtin_hlsl_elementwise_all:
case Builtin::BI__builtin_hlsl_elementwise_any: {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
break;
}
case Builtin::BI__builtin_hlsl_elementwise_clamp: {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
if (CheckVectorElementCallArgs(this, TheCall))
return true;
@@ -5605,7 +5607,7 @@ bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
break;
}
case Builtin::BI__builtin_hlsl_dot: {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
if (CheckVectorElementCallArgs(this, TheCall))
return true;
@@ -5639,7 +5641,7 @@ bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
break;
}
case Builtin::BI__builtin_hlsl_lerp: {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
if (CheckVectorElementCallArgs(this, TheCall))
return true;
@@ -5650,7 +5652,7 @@ bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
break;
}
case Builtin::BI__builtin_hlsl_mad: {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
if (CheckVectorElementCallArgs(this, TheCall))
return true;
@@ -5694,6 +5696,28 @@ bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
// position of memory order and scope arguments in the builtin
unsigned OrderIndex, ScopeIndex;
switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_global_load_lds: {
+ constexpr const int SizeIdx = 2;
+ llvm::APSInt Size;
+ Expr *ArgExpr = TheCall->getArg(SizeIdx);
+ ExprResult R = VerifyIntegerConstantExpression(ArgExpr, &Size);
+ if (R.isInvalid())
+ return true;
+ switch (Size.getSExtValue()) {
+ case 1:
+ case 2:
+ case 4:
+ return false;
+ default:
+ Diag(ArgExpr->getExprLoc(),
+ diag::err_amdgcn_global_load_lds_size_invalid_value)
+ << ArgExpr->getSourceRange();
+ Diag(ArgExpr->getExprLoc(),
+ diag::note_amdgcn_global_load_lds_size_valid_value)
+ << ArgExpr->getSourceRange();
+ return true;
+ }
+ }
case AMDGPU::BI__builtin_amdgcn_get_fpenv:
case AMDGPU::BI__builtin_amdgcn_set_fpenv:
return false;
@@ -5753,866 +5777,6 @@ bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
return false;
}
-bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
- llvm::APSInt Result;
-
- // We can't check the value of a dependent argument.
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- return false;
-
- // Check constant-ness first.
- if (BuiltinConstantArg(TheCall, ArgNum, Result))
- return true;
-
- int64_t Val = Result.getSExtValue();
- if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
- return false;
-
- return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
- << Arg->getSourceRange();
-}
-
-static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
- Sema &S, QualType Type, int EGW) {
- assert((EGW == 128 || EGW == 256) && "EGW can only be 128 or 256 bits");
-
- // LMUL * VLEN >= EGW
- ASTContext::BuiltinVectorTypeInfo Info =
- S.Context.getBuiltinVectorTypeInfo(Type->castAs<BuiltinType>());
- unsigned ElemSize = S.Context.getTypeSize(Info.ElementType);
- unsigned MinElemCount = Info.EC.getKnownMinValue();
-
- unsigned EGS = EGW / ElemSize;
- // If EGS is less than or equal to the minimum number of elements, then the
- // type is valid.
- if (EGS <= MinElemCount)
- return false;
-
- // Otherwise, we need vscale to be at least EGS / MinElemCont.
- assert(EGS % MinElemCount == 0);
- unsigned VScaleFactor = EGS / MinElemCount;
- // Vscale is VLEN/RVVBitsPerBlock.
- unsigned MinRequiredVLEN = VScaleFactor * llvm::RISCV::RVVBitsPerBlock;
- std::string RequiredExt = "zvl" + std::to_string(MinRequiredVLEN) + "b";
- if (!TI.hasFeature(RequiredExt))
- return S.Diag(TheCall->getBeginLoc(),
- diag::err_riscv_type_requires_extension) << Type << RequiredExt;
-
- return false;
-}
-
-bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
- // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
- // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
- switch (BuiltinID) {
- default:
- break;
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vmulh_vv:
- case RISCVVector::BI__builtin_rvv_vmulh_vx:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv:
- case RISCVVector::BI__builtin_rvv_vsmul_vx:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
- ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(
- TheCall->getType()->castAs<BuiltinType>());
-
- if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v"))
- return Diag(TheCall->getBeginLoc(),
- diag::err_riscv_builtin_requires_extension)
- << /* IsExtension */ true << TheCall->getSourceRange() << "v";
-
- break;
- }
- }
-
- switch (BuiltinID) {
- case RISCVVector::BI__builtin_rvv_vsetvli:
- return BuiltinConstantArgRange(TheCall, 1, 0, 3) ||
- CheckRISCVLMUL(TheCall, 2);
- case RISCVVector::BI__builtin_rvv_vsetvlimax:
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- CheckRISCVLMUL(TheCall, 1);
- case RISCVVector::BI__builtin_rvv_vget_v: {
- ASTContext::BuiltinVectorTypeInfo ResVecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getType().getCanonicalType().getTypePtr()));
- ASTContext::BuiltinVectorTypeInfo VecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getArg(0)->getType().getCanonicalType().getTypePtr()));
- unsigned MaxIndex;
- if (VecInfo.NumVectors != 1) // vget for tuple type
- MaxIndex = VecInfo.NumVectors;
- else // vget for non-tuple type
- MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
- (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
- return BuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
- }
- case RISCVVector::BI__builtin_rvv_vset_v: {
- ASTContext::BuiltinVectorTypeInfo ResVecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getType().getCanonicalType().getTypePtr()));
- ASTContext::BuiltinVectorTypeInfo VecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getArg(2)->getType().getCanonicalType().getTypePtr()));
- unsigned MaxIndex;
- if (ResVecInfo.NumVectors != 1) // vset for tuple type
- MaxIndex = ResVecInfo.NumVectors;
- else // vset fo non-tuple type
- MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
- (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
- return BuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
- }
- // Vector Crypto
- case RISCVVector::BI__builtin_rvv_vaeskf1_vi_tu:
- case RISCVVector::BI__builtin_rvv_vaeskf2_vi_tu:
- case RISCVVector::BI__builtin_rvv_vaeskf2_vi:
- case RISCVVector::BI__builtin_rvv_vsm4k_vi_tu: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- QualType Op2Type = TheCall->getArg(1)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 31);
- }
- case RISCVVector::BI__builtin_rvv_vsm3c_vi_tu:
- case RISCVVector::BI__builtin_rvv_vsm3c_vi: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 256) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 31);
- }
- case RISCVVector::BI__builtin_rvv_vaeskf1_vi:
- case RISCVVector::BI__builtin_rvv_vsm4k_vi: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31);
- }
- case RISCVVector::BI__builtin_rvv_vaesdf_vv:
- case RISCVVector::BI__builtin_rvv_vaesdf_vs:
- case RISCVVector::BI__builtin_rvv_vaesdm_vv:
- case RISCVVector::BI__builtin_rvv_vaesdm_vs:
- case RISCVVector::BI__builtin_rvv_vaesef_vv:
- case RISCVVector::BI__builtin_rvv_vaesef_vs:
- case RISCVVector::BI__builtin_rvv_vaesem_vv:
- case RISCVVector::BI__builtin_rvv_vaesem_vs:
- case RISCVVector::BI__builtin_rvv_vaesz_vs:
- case RISCVVector::BI__builtin_rvv_vsm4r_vv:
- case RISCVVector::BI__builtin_rvv_vsm4r_vs:
- case RISCVVector::BI__builtin_rvv_vaesdf_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesdf_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesdm_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesdm_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesef_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesef_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesem_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesem_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesz_vs_tu:
- case RISCVVector::BI__builtin_rvv_vsm4r_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsm4r_vs_tu: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- QualType Op2Type = TheCall->getArg(1)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128);
- }
- case RISCVVector::BI__builtin_rvv_vsha2ch_vv:
- case RISCVVector::BI__builtin_rvv_vsha2cl_vv:
- case RISCVVector::BI__builtin_rvv_vsha2ms_vv:
- case RISCVVector::BI__builtin_rvv_vsha2ch_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsha2cl_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsha2ms_vv_tu: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- QualType Op2Type = TheCall->getArg(1)->getType();
- QualType Op3Type = TheCall->getArg(2)->getType();
- ASTContext::BuiltinVectorTypeInfo Info =
- Context.getBuiltinVectorTypeInfo(Op1Type->castAs<BuiltinType>());
- uint64_t ElemSize = Context.getTypeSize(Info.ElementType);
- if (ElemSize == 64 && !TI.hasFeature("zvknhb"))
- return Diag(TheCall->getBeginLoc(),
- diag::err_riscv_builtin_requires_extension)
- << /* IsExtension */ true << TheCall->getSourceRange() << "zvknb";
-
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, ElemSize * 4) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, ElemSize * 4) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op3Type, ElemSize * 4);
- }
-
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se:
- // bit_27_26, bit_24_20, bit_11_7, simm5, sew, log2lmul
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
- BuiltinConstantArgRange(TheCall, 3, -16, 15) ||
- CheckRISCVLMUL(TheCall, 5);
- case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
- // bit_27_26, bit_11_7, vs2, simm5
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- BuiltinConstantArgRange(TheCall, 3, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_v_i:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se:
- // bit_27_26, bit_24_20, simm5
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- BuiltinConstantArgRange(TheCall, 2, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_v_iv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se:
- // bit_27_26, vs2, simm5
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 2, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se:
- // bit_27_26, vd, vs2, simm5
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 3, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se:
- // bit_27_26, bit_24_20, bit_11_7, xs1, sew, log2lmul
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
- CheckRISCVLMUL(TheCall, 5);
- case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
- // bit_27_26, bit_11_7, vs2, xs1/vs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_x:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se:
- // bit_27_26, bit_24-20, xs1
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31);
- case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se:
- // bit_27_26, vd, vs2, xs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se:
- // bit_27_26, vs2, xs1/vs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se:
- // bit_27_26, vd, vs2, xs1/vs1
- return BuiltinConstantArgRange(TheCall, 0, 0, 3);
- case RISCVVector::BI__builtin_rvv_sf_vc_fv_se:
- // bit_26, bit_11_7, vs2, fs1
- return BuiltinConstantArgRange(TheCall, 0, 0, 1) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31);
- case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se:
- // bit_26, vd, vs2, fs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se:
- // bit_26, vs2, fs1
- return BuiltinConstantArgRange(TheCall, 0, 0, 1);
- // Check if byteselect is in [0, 3]
- case RISCV::BI__builtin_riscv_aes32dsi:
- case RISCV::BI__builtin_riscv_aes32dsmi:
- case RISCV::BI__builtin_riscv_aes32esi:
- case RISCV::BI__builtin_riscv_aes32esmi:
- case RISCV::BI__builtin_riscv_sm4ks:
- case RISCV::BI__builtin_riscv_sm4ed:
- return BuiltinConstantArgRange(TheCall, 2, 0, 3);
- // Check if rnum is in [0, 10]
- case RISCV::BI__builtin_riscv_aes64ks1i:
- return BuiltinConstantArgRange(TheCall, 1, 0, 10);
- // Check if value range for vxrm is in [0, 3]
- case RISCVVector::BI__builtin_rvv_vaaddu_vv:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx:
- case RISCVVector::BI__builtin_rvv_vaadd_vv:
- case RISCVVector::BI__builtin_rvv_vaadd_vx:
- case RISCVVector::BI__builtin_rvv_vasubu_vv:
- case RISCVVector::BI__builtin_rvv_vasubu_vx:
- case RISCVVector::BI__builtin_rvv_vasub_vv:
- case RISCVVector::BI__builtin_rvv_vasub_vx:
- case RISCVVector::BI__builtin_rvv_vsmul_vv:
- case RISCVVector::BI__builtin_rvv_vsmul_vx:
- case RISCVVector::BI__builtin_rvv_vssra_vv:
- case RISCVVector::BI__builtin_rvv_vssra_vx:
- case RISCVVector::BI__builtin_rvv_vssrl_vv:
- case RISCVVector::BI__builtin_rvv_vssrl_vx:
- case RISCVVector::BI__builtin_rvv_vnclip_wv:
- case RISCVVector::BI__builtin_rvv_vnclip_wx:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx:
- return BuiltinConstantArgRange(TheCall, 2, 0, 3);
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_tu:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
- case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
- case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
- case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_m:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_m:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_m:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_m:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_m:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_m:
- case RISCVVector::BI__builtin_rvv_vasub_vv_m:
- case RISCVVector::BI__builtin_rvv_vasub_vx_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
- case RISCVVector::BI__builtin_rvv_vssra_vv_m:
- case RISCVVector::BI__builtin_rvv_vssra_vx_m:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_m:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_m:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_m:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_m:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_m:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_m:
- return BuiltinConstantArgRange(TheCall, 3, 0, 3);
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_tum:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_mu:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_tum:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_mu:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vasub_vv_tum:
- case RISCVVector::BI__builtin_rvv_vasub_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vasub_vv_mu:
- case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
- case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
- case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
- case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
- case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
- case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
- return BuiltinConstantArgRange(TheCall, 4, 0, 3);
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
- return BuiltinConstantArgRange(TheCall, 1, 0, 4);
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
- return BuiltinConstantArgRange(TheCall, 2, 0, 4);
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
- return BuiltinConstantArgRange(TheCall, 3, 0, 4);
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
- return BuiltinConstantArgRange(TheCall, 4, 0, 4);
- case RISCV::BI__builtin_riscv_ntl_load:
- case RISCV::BI__builtin_riscv_ntl_store:
- DeclRefExpr *DRE =
- cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
- assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store ||
- BuiltinID == RISCV::BI__builtin_riscv_ntl_load) &&
- "Unexpected RISC-V nontemporal load/store builtin!");
- bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store;
- unsigned NumArgs = IsStore ? 3 : 2;
-
- if (checkArgCountAtLeast(*this, TheCall, NumArgs - 1))
- return true;
-
- if (checkArgCountAtMost(*this, TheCall, NumArgs))
- return true;
-
- // Domain value should be compile-time constant.
- // 2 <= domain <= 5
- if (TheCall->getNumArgs() == NumArgs &&
- BuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5))
- return true;
-
- Expr *PointerArg = TheCall->getArg(0);
- ExprResult PointerArgResult =
- DefaultFunctionArrayLvalueConversion(PointerArg);
-
- if (PointerArgResult.isInvalid())
- return true;
- PointerArg = PointerArgResult.get();
-
- const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>();
- if (!PtrType) {
- Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- QualType ValType = PtrType->getPointeeType();
- ValType = ValType.getUnqualifiedType();
- if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
- !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
- !ValType->isVectorType() && !ValType->isRVVSizelessBuiltinType()) {
- Diag(DRE->getBeginLoc(),
- diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- if (!IsStore) {
- TheCall->setType(ValType);
- return false;
- }
-
- ExprResult ValArg = TheCall->getArg(1);
- InitializedEntity Entity = InitializedEntity::InitializeParameter(
- Context, ValType, /*consume*/ false);
- ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
- if (ValArg.isInvalid())
- return true;
-
- TheCall->setArg(1, ValArg.get());
- TheCall->setType(Context.VoidTy);
- return false;
- }
-
- return false;
-}
-
bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
if (BuiltinID == SystemZ::BI__builtin_tabort) {
@@ -6708,38 +5872,6 @@ bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
return false;
}
-void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
- const llvm::StringMap<bool> &FeatureMap) {
- ASTContext::BuiltinVectorTypeInfo Info =
- Context.getBuiltinVectorTypeInfo(Ty->castAs<BuiltinType>());
- unsigned EltSize = Context.getTypeSize(Info.ElementType);
- unsigned MinElts = Info.EC.getKnownMinValue();
-
- if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
- !FeatureMap.lookup("zve64d"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
- // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
- // least zve64x
- else if (((EltSize == 64 && Info.ElementType->isIntegerType()) ||
- MinElts == 1) &&
- !FeatureMap.lookup("zve64x"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
- else if (Info.ElementType->isFloat16Type() && !FeatureMap.lookup("zvfh") &&
- !FeatureMap.lookup("zvfhmin"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D)
- << Ty << "zvfh or zvfhmin";
- else if (Info.ElementType->isBFloat16Type() &&
- !FeatureMap.lookup("experimental-zvfbfmin"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
- else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
- !FeatureMap.lookup("zve32f"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
- // Given that caller already checked isRVVType() before calling this function,
- // if we don't have at least zve32x supported, then we need to emit error.
- else if (!FeatureMap.lookup("zve32x"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
-}
-
bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID,
CallExpr *TheCall) {
@@ -6748,862 +5880,12 @@ bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
- return checkArgCountAtMost(*this, TheCall, 3);
+ return checkArgCountAtMost(TheCall, 3);
}
return false;
}
-// Check if the rounding mode is legal.
-bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
- // Indicates if this instruction has rounding control or just SAE.
- bool HasRC = false;
-
- unsigned ArgNum = 0;
- switch (BuiltinID) {
- default:
- return false;
- case X86::BI__builtin_ia32_vcvttsd2si32:
- case X86::BI__builtin_ia32_vcvttsd2si64:
- case X86::BI__builtin_ia32_vcvttsd2usi32:
- case X86::BI__builtin_ia32_vcvttsd2usi64:
- case X86::BI__builtin_ia32_vcvttss2si32:
- case X86::BI__builtin_ia32_vcvttss2si64:
- case X86::BI__builtin_ia32_vcvttss2usi32:
- case X86::BI__builtin_ia32_vcvttss2usi64:
- case X86::BI__builtin_ia32_vcvttsh2si32:
- case X86::BI__builtin_ia32_vcvttsh2si64:
- case X86::BI__builtin_ia32_vcvttsh2usi32:
- case X86::BI__builtin_ia32_vcvttsh2usi64:
- ArgNum = 1;
- break;
- case X86::BI__builtin_ia32_maxpd512:
- case X86::BI__builtin_ia32_maxps512:
- case X86::BI__builtin_ia32_minpd512:
- case X86::BI__builtin_ia32_minps512:
- case X86::BI__builtin_ia32_maxph512:
- case X86::BI__builtin_ia32_minph512:
- ArgNum = 2;
- break;
- case X86::BI__builtin_ia32_vcvtph2pd512_mask:
- case X86::BI__builtin_ia32_vcvtph2psx512_mask:
- case X86::BI__builtin_ia32_cvtps2pd512_mask:
- case X86::BI__builtin_ia32_cvttpd2dq512_mask:
- case X86::BI__builtin_ia32_cvttpd2qq512_mask:
- case X86::BI__builtin_ia32_cvttpd2udq512_mask:
- case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
- case X86::BI__builtin_ia32_cvttps2dq512_mask:
- case X86::BI__builtin_ia32_cvttps2qq512_mask:
- case X86::BI__builtin_ia32_cvttps2udq512_mask:
- case X86::BI__builtin_ia32_cvttps2uqq512_mask:
- case X86::BI__builtin_ia32_vcvttph2w512_mask:
- case X86::BI__builtin_ia32_vcvttph2uw512_mask:
- case X86::BI__builtin_ia32_vcvttph2dq512_mask:
- case X86::BI__builtin_ia32_vcvttph2udq512_mask:
- case X86::BI__builtin_ia32_vcvttph2qq512_mask:
- case X86::BI__builtin_ia32_vcvttph2uqq512_mask:
- case X86::BI__builtin_ia32_exp2pd_mask:
- case X86::BI__builtin_ia32_exp2ps_mask:
- case X86::BI__builtin_ia32_getexppd512_mask:
- case X86::BI__builtin_ia32_getexpps512_mask:
- case X86::BI__builtin_ia32_getexpph512_mask:
- case X86::BI__builtin_ia32_rcp28pd_mask:
- case X86::BI__builtin_ia32_rcp28ps_mask:
- case X86::BI__builtin_ia32_rsqrt28pd_mask:
- case X86::BI__builtin_ia32_rsqrt28ps_mask:
- case X86::BI__builtin_ia32_vcomisd:
- case X86::BI__builtin_ia32_vcomiss:
- case X86::BI__builtin_ia32_vcomish:
- case X86::BI__builtin_ia32_vcvtph2ps512_mask:
- ArgNum = 3;
- break;
- case X86::BI__builtin_ia32_cmppd512_mask:
- case X86::BI__builtin_ia32_cmpps512_mask:
- case X86::BI__builtin_ia32_cmpsd_mask:
- case X86::BI__builtin_ia32_cmpss_mask:
- case X86::BI__builtin_ia32_cmpsh_mask:
- case X86::BI__builtin_ia32_vcvtsh2sd_round_mask:
- case X86::BI__builtin_ia32_vcvtsh2ss_round_mask:
- case X86::BI__builtin_ia32_cvtss2sd_round_mask:
- case X86::BI__builtin_ia32_getexpsd128_round_mask:
- case X86::BI__builtin_ia32_getexpss128_round_mask:
- case X86::BI__builtin_ia32_getexpsh128_round_mask:
- case X86::BI__builtin_ia32_getmantpd512_mask:
- case X86::BI__builtin_ia32_getmantps512_mask:
- case X86::BI__builtin_ia32_getmantph512_mask:
- case X86::BI__builtin_ia32_maxsd_round_mask:
- case X86::BI__builtin_ia32_maxss_round_mask:
- case X86::BI__builtin_ia32_maxsh_round_mask:
- case X86::BI__builtin_ia32_minsd_round_mask:
- case X86::BI__builtin_ia32_minss_round_mask:
- case X86::BI__builtin_ia32_minsh_round_mask:
- case X86::BI__builtin_ia32_rcp28sd_round_mask:
- case X86::BI__builtin_ia32_rcp28ss_round_mask:
- case X86::BI__builtin_ia32_reducepd512_mask:
- case X86::BI__builtin_ia32_reduceps512_mask:
- case X86::BI__builtin_ia32_reduceph512_mask:
- case X86::BI__builtin_ia32_rndscalepd_mask:
- case X86::BI__builtin_ia32_rndscaleps_mask:
- case X86::BI__builtin_ia32_rndscaleph_mask:
- case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
- case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
- ArgNum = 4;
- break;
- case X86::BI__builtin_ia32_fixupimmpd512_mask:
- case X86::BI__builtin_ia32_fixupimmpd512_maskz:
- case X86::BI__builtin_ia32_fixupimmps512_mask:
- case X86::BI__builtin_ia32_fixupimmps512_maskz:
- case X86::BI__builtin_ia32_fixupimmsd_mask:
- case X86::BI__builtin_ia32_fixupimmsd_maskz:
- case X86::BI__builtin_ia32_fixupimmss_mask:
- case X86::BI__builtin_ia32_fixupimmss_maskz:
- case X86::BI__builtin_ia32_getmantsd_round_mask:
- case X86::BI__builtin_ia32_getmantss_round_mask:
- case X86::BI__builtin_ia32_getmantsh_round_mask:
- case X86::BI__builtin_ia32_rangepd512_mask:
- case X86::BI__builtin_ia32_rangeps512_mask:
- case X86::BI__builtin_ia32_rangesd128_round_mask:
- case X86::BI__builtin_ia32_rangess128_round_mask:
- case X86::BI__builtin_ia32_reducesd_mask:
- case X86::BI__builtin_ia32_reducess_mask:
- case X86::BI__builtin_ia32_reducesh_mask:
- case X86::BI__builtin_ia32_rndscalesd_round_mask:
- case X86::BI__builtin_ia32_rndscaless_round_mask:
- case X86::BI__builtin_ia32_rndscalesh_round_mask:
- ArgNum = 5;
- break;
- case X86::BI__builtin_ia32_vcvtsd2si64:
- case X86::BI__builtin_ia32_vcvtsd2si32:
- case X86::BI__builtin_ia32_vcvtsd2usi32:
- case X86::BI__builtin_ia32_vcvtsd2usi64:
- case X86::BI__builtin_ia32_vcvtss2si32:
- case X86::BI__builtin_ia32_vcvtss2si64:
- case X86::BI__builtin_ia32_vcvtss2usi32:
- case X86::BI__builtin_ia32_vcvtss2usi64:
- case X86::BI__builtin_ia32_vcvtsh2si32:
- case X86::BI__builtin_ia32_vcvtsh2si64:
- case X86::BI__builtin_ia32_vcvtsh2usi32:
- case X86::BI__builtin_ia32_vcvtsh2usi64:
- case X86::BI__builtin_ia32_sqrtpd512:
- case X86::BI__builtin_ia32_sqrtps512:
- case X86::BI__builtin_ia32_sqrtph512:
- ArgNum = 1;
- HasRC = true;
- break;
- case X86::BI__builtin_ia32_addph512:
- case X86::BI__builtin_ia32_divph512:
- case X86::BI__builtin_ia32_mulph512:
- case X86::BI__builtin_ia32_subph512:
- case X86::BI__builtin_ia32_addpd512:
- case X86::BI__builtin_ia32_addps512:
- case X86::BI__builtin_ia32_divpd512:
- case X86::BI__builtin_ia32_divps512:
- case X86::BI__builtin_ia32_mulpd512:
- case X86::BI__builtin_ia32_mulps512:
- case X86::BI__builtin_ia32_subpd512:
- case X86::BI__builtin_ia32_subps512:
- case X86::BI__builtin_ia32_cvtsi2sd64:
- case X86::BI__builtin_ia32_cvtsi2ss32:
- case X86::BI__builtin_ia32_cvtsi2ss64:
- case X86::BI__builtin_ia32_cvtusi2sd64:
- case X86::BI__builtin_ia32_cvtusi2ss32:
- case X86::BI__builtin_ia32_cvtusi2ss64:
- case X86::BI__builtin_ia32_vcvtusi2sh:
- case X86::BI__builtin_ia32_vcvtusi642sh:
- case X86::BI__builtin_ia32_vcvtsi2sh:
- case X86::BI__builtin_ia32_vcvtsi642sh:
- ArgNum = 2;
- HasRC = true;
- break;
- case X86::BI__builtin_ia32_cvtdq2ps512_mask:
- case X86::BI__builtin_ia32_cvtudq2ps512_mask:
- case X86::BI__builtin_ia32_vcvtpd2ph512_mask:
- case X86::BI__builtin_ia32_vcvtps2phx512_mask:
- case X86::BI__builtin_ia32_cvtpd2ps512_mask:
- case X86::BI__builtin_ia32_cvtpd2dq512_mask:
- case X86::BI__builtin_ia32_cvtpd2qq512_mask:
- case X86::BI__builtin_ia32_cvtpd2udq512_mask:
- case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
- case X86::BI__builtin_ia32_cvtps2dq512_mask:
- case X86::BI__builtin_ia32_cvtps2qq512_mask:
- case X86::BI__builtin_ia32_cvtps2udq512_mask:
- case X86::BI__builtin_ia32_cvtps2uqq512_mask:
- case X86::BI__builtin_ia32_cvtqq2pd512_mask:
- case X86::BI__builtin_ia32_cvtqq2ps512_mask:
- case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
- case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
- case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
- case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
- case X86::BI__builtin_ia32_vcvtw2ph512_mask:
- case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
- case X86::BI__builtin_ia32_vcvtph2w512_mask:
- case X86::BI__builtin_ia32_vcvtph2uw512_mask:
- case X86::BI__builtin_ia32_vcvtph2dq512_mask:
- case X86::BI__builtin_ia32_vcvtph2udq512_mask:
- case X86::BI__builtin_ia32_vcvtph2qq512_mask:
- case X86::BI__builtin_ia32_vcvtph2uqq512_mask:
- case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
- case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
- ArgNum = 3;
- HasRC = true;
- break;
- case X86::BI__builtin_ia32_addsh_round_mask:
- case X86::BI__builtin_ia32_addss_round_mask:
- case X86::BI__builtin_ia32_addsd_round_mask:
- case X86::BI__builtin_ia32_divsh_round_mask:
- case X86::BI__builtin_ia32_divss_round_mask:
- case X86::BI__builtin_ia32_divsd_round_mask:
- case X86::BI__builtin_ia32_mulsh_round_mask:
- case X86::BI__builtin_ia32_mulss_round_mask:
- case X86::BI__builtin_ia32_mulsd_round_mask:
- case X86::BI__builtin_ia32_subsh_round_mask:
- case X86::BI__builtin_ia32_subss_round_mask:
- case X86::BI__builtin_ia32_subsd_round_mask:
- case X86::BI__builtin_ia32_scalefph512_mask:
- case X86::BI__builtin_ia32_scalefpd512_mask:
- case X86::BI__builtin_ia32_scalefps512_mask:
- case X86::BI__builtin_ia32_scalefsd_round_mask:
- case X86::BI__builtin_ia32_scalefss_round_mask:
- case X86::BI__builtin_ia32_scalefsh_round_mask:
- case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
- case X86::BI__builtin_ia32_vcvtss2sh_round_mask:
- case X86::BI__builtin_ia32_vcvtsd2sh_round_mask:
- case X86::BI__builtin_ia32_sqrtsd_round_mask:
- case X86::BI__builtin_ia32_sqrtss_round_mask:
- case X86::BI__builtin_ia32_sqrtsh_round_mask:
- case X86::BI__builtin_ia32_vfmaddsd3_mask:
- case X86::BI__builtin_ia32_vfmaddsd3_maskz:
- case X86::BI__builtin_ia32_vfmaddsd3_mask3:
- case X86::BI__builtin_ia32_vfmaddss3_mask:
- case X86::BI__builtin_ia32_vfmaddss3_maskz:
- case X86::BI__builtin_ia32_vfmaddss3_mask3:
- case X86::BI__builtin_ia32_vfmaddsh3_mask:
- case X86::BI__builtin_ia32_vfmaddsh3_maskz:
- case X86::BI__builtin_ia32_vfmaddsh3_mask3:
- case X86::BI__builtin_ia32_vfmaddpd512_mask:
- case X86::BI__builtin_ia32_vfmaddpd512_maskz:
- case X86::BI__builtin_ia32_vfmaddpd512_mask3:
- case X86::BI__builtin_ia32_vfmsubpd512_mask3:
- case X86::BI__builtin_ia32_vfmaddps512_mask:
- case X86::BI__builtin_ia32_vfmaddps512_maskz:
- case X86::BI__builtin_ia32_vfmaddps512_mask3:
- case X86::BI__builtin_ia32_vfmsubps512_mask3:
- case X86::BI__builtin_ia32_vfmaddph512_mask:
- case X86::BI__builtin_ia32_vfmaddph512_maskz:
- case X86::BI__builtin_ia32_vfmaddph512_mask3:
- case X86::BI__builtin_ia32_vfmsubph512_mask3:
- case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
- case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
- case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
- case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
- case X86::BI__builtin_ia32_vfmaddsubps512_mask:
- case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
- case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
- case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
- case X86::BI__builtin_ia32_vfmaddsubph512_mask:
- case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
- case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
- case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
- case X86::BI__builtin_ia32_vfmaddcsh_mask:
- case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
- case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
- case X86::BI__builtin_ia32_vfmaddcph512_mask:
- case X86::BI__builtin_ia32_vfmaddcph512_maskz:
- case X86::BI__builtin_ia32_vfmaddcph512_mask3:
- case X86::BI__builtin_ia32_vfcmaddcsh_mask:
- case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
- case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
- case X86::BI__builtin_ia32_vfcmaddcph512_mask:
- case X86::BI__builtin_ia32_vfcmaddcph512_maskz:
- case X86::BI__builtin_ia32_vfcmaddcph512_mask3:
- case X86::BI__builtin_ia32_vfmulcsh_mask:
- case X86::BI__builtin_ia32_vfmulcph512_mask:
- case X86::BI__builtin_ia32_vfcmulcsh_mask:
- case X86::BI__builtin_ia32_vfcmulcph512_mask:
- ArgNum = 4;
- HasRC = true;
- break;
- }
-
- llvm::APSInt Result;
-
- // We can't check the value of a dependent argument.
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- return false;
-
- // Check constant-ness first.
- if (BuiltinConstantArg(TheCall, ArgNum, Result))
- return true;
-
- // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
- // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
- // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
- // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
- if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
- Result == 8/*ROUND_NO_EXC*/ ||
- (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
- (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
- return false;
-
- return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
- << Arg->getSourceRange();
-}
-
-// Check if the gather/scatter scale is legal.
-bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
- CallExpr *TheCall) {
- unsigned ArgNum = 0;
- switch (BuiltinID) {
- default:
- return false;
- case X86::BI__builtin_ia32_gatherpfdpd:
- case X86::BI__builtin_ia32_gatherpfdps:
- case X86::BI__builtin_ia32_gatherpfqpd:
- case X86::BI__builtin_ia32_gatherpfqps:
- case X86::BI__builtin_ia32_scatterpfdpd:
- case X86::BI__builtin_ia32_scatterpfdps:
- case X86::BI__builtin_ia32_scatterpfqpd:
- case X86::BI__builtin_ia32_scatterpfqps:
- ArgNum = 3;
- break;
- case X86::BI__builtin_ia32_gatherd_pd:
- case X86::BI__builtin_ia32_gatherd_pd256:
- case X86::BI__builtin_ia32_gatherq_pd:
- case X86::BI__builtin_ia32_gatherq_pd256:
- case X86::BI__builtin_ia32_gatherd_ps:
- case X86::BI__builtin_ia32_gatherd_ps256:
- case X86::BI__builtin_ia32_gatherq_ps:
- case X86::BI__builtin_ia32_gatherq_ps256:
- case X86::BI__builtin_ia32_gatherd_q:
- case X86::BI__builtin_ia32_gatherd_q256:
- case X86::BI__builtin_ia32_gatherq_q:
- case X86::BI__builtin_ia32_gatherq_q256:
- case X86::BI__builtin_ia32_gatherd_d:
- case X86::BI__builtin_ia32_gatherd_d256:
- case X86::BI__builtin_ia32_gatherq_d:
- case X86::BI__builtin_ia32_gatherq_d256:
- case X86::BI__builtin_ia32_gather3div2df:
- case X86::BI__builtin_ia32_gather3div2di:
- case X86::BI__builtin_ia32_gather3div4df:
- case X86::BI__builtin_ia32_gather3div4di:
- case X86::BI__builtin_ia32_gather3div4sf:
- case X86::BI__builtin_ia32_gather3div4si:
- case X86::BI__builtin_ia32_gather3div8sf:
- case X86::BI__builtin_ia32_gather3div8si:
- case X86::BI__builtin_ia32_gather3siv2df:
- case X86::BI__builtin_ia32_gather3siv2di:
- case X86::BI__builtin_ia32_gather3siv4df:
- case X86::BI__builtin_ia32_gather3siv4di:
- case X86::BI__builtin_ia32_gather3siv4sf:
- case X86::BI__builtin_ia32_gather3siv4si:
- case X86::BI__builtin_ia32_gather3siv8sf:
- case X86::BI__builtin_ia32_gather3siv8si:
- case X86::BI__builtin_ia32_gathersiv8df:
- case X86::BI__builtin_ia32_gathersiv16sf:
- case X86::BI__builtin_ia32_gatherdiv8df:
- case X86::BI__builtin_ia32_gatherdiv16sf:
- case X86::BI__builtin_ia32_gathersiv8di:
- case X86::BI__builtin_ia32_gathersiv16si:
- case X86::BI__builtin_ia32_gatherdiv8di:
- case X86::BI__builtin_ia32_gatherdiv16si:
- case X86::BI__builtin_ia32_scatterdiv2df:
- case X86::BI__builtin_ia32_scatterdiv2di:
- case X86::BI__builtin_ia32_scatterdiv4df:
- case X86::BI__builtin_ia32_scatterdiv4di:
- case X86::BI__builtin_ia32_scatterdiv4sf:
- case X86::BI__builtin_ia32_scatterdiv4si:
- case X86::BI__builtin_ia32_scatterdiv8sf:
- case X86::BI__builtin_ia32_scatterdiv8si:
- case X86::BI__builtin_ia32_scattersiv2df:
- case X86::BI__builtin_ia32_scattersiv2di:
- case X86::BI__builtin_ia32_scattersiv4df:
- case X86::BI__builtin_ia32_scattersiv4di:
- case X86::BI__builtin_ia32_scattersiv4sf:
- case X86::BI__builtin_ia32_scattersiv4si:
- case X86::BI__builtin_ia32_scattersiv8sf:
- case X86::BI__builtin_ia32_scattersiv8si:
- case X86::BI__builtin_ia32_scattersiv8df:
- case X86::BI__builtin_ia32_scattersiv16sf:
- case X86::BI__builtin_ia32_scatterdiv8df:
- case X86::BI__builtin_ia32_scatterdiv16sf:
- case X86::BI__builtin_ia32_scattersiv8di:
- case X86::BI__builtin_ia32_scattersiv16si:
- case X86::BI__builtin_ia32_scatterdiv8di:
- case X86::BI__builtin_ia32_scatterdiv16si:
- ArgNum = 4;
- break;
- }
-
- llvm::APSInt Result;
-
- // We can't check the value of a dependent argument.
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- return false;
-
- // Check constant-ness first.
- if (BuiltinConstantArg(TheCall, ArgNum, Result))
- return true;
-
- if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
- return false;
-
- return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
- << Arg->getSourceRange();
-}
-
-enum { TileRegLow = 0, TileRegHigh = 7 };
-
-bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
- ArrayRef<int> ArgNums) {
- for (int ArgNum : ArgNums) {
- if (BuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh))
- return true;
- }
- return false;
-}
-
-bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
- ArrayRef<int> ArgNums) {
- // Because the max number of tile register is TileRegHigh + 1, so here we use
- // each bit to represent the usage of them in bitset.
- std::bitset<TileRegHigh + 1> ArgValues;
- for (int ArgNum : ArgNums) {
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- continue;
-
- llvm::APSInt Result;
- if (BuiltinConstantArg(TheCall, ArgNum, Result))
- return true;
- int ArgExtValue = Result.getExtValue();
- assert((ArgExtValue >= TileRegLow && ArgExtValue <= TileRegHigh) &&
- "Incorrect tile register num.");
- if (ArgValues.test(ArgExtValue))
- return Diag(TheCall->getBeginLoc(),
- diag::err_x86_builtin_tile_arg_duplicate)
- << TheCall->getArg(ArgNum)->getSourceRange();
- ArgValues.set(ArgExtValue);
- }
- return false;
-}
-
-bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
- ArrayRef<int> ArgNums) {
- return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
- CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
-}
-
-bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
- switch (BuiltinID) {
- default:
- return false;
- case X86::BI__builtin_ia32_tileloadd64:
- case X86::BI__builtin_ia32_tileloaddt164:
- case X86::BI__builtin_ia32_tilestored64:
- case X86::BI__builtin_ia32_tilezero:
- return CheckX86BuiltinTileArgumentsRange(TheCall, 0);
- case X86::BI__builtin_ia32_tdpbssd:
- case X86::BI__builtin_ia32_tdpbsud:
- case X86::BI__builtin_ia32_tdpbusd:
- case X86::BI__builtin_ia32_tdpbuud:
- case X86::BI__builtin_ia32_tdpbf16ps:
- case X86::BI__builtin_ia32_tdpfp16ps:
- case X86::BI__builtin_ia32_tcmmimfp16ps:
- case X86::BI__builtin_ia32_tcmmrlfp16ps:
- return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
- }
-}
-static bool isX86_32Builtin(unsigned BuiltinID) {
- // These builtins only work on x86-32 targets.
- switch (BuiltinID) {
- case X86::BI__builtin_ia32_readeflags_u32:
- case X86::BI__builtin_ia32_writeeflags_u32:
- return true;
- }
-
- return false;
-}
-
-bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
- // Check for 32-bit only builtins on a 64-bit target.
- const llvm::Triple &TT = TI.getTriple();
- if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
- return Diag(TheCall->getCallee()->getBeginLoc(),
- diag::err_32_bit_builtin_64_bit_tgt);
-
- // If the intrinsic has rounding or SAE make sure its valid.
- if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
- return true;
-
- // If the intrinsic has a gather/scatter scale immediate make sure its valid.
- if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
- return true;
-
- // If the intrinsic has a tile arguments, make sure they are valid.
- if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
- return true;
-
- // For intrinsics which take an immediate value as part of the instruction,
- // range check them here.
- int i = 0, l = 0, u = 0;
- switch (BuiltinID) {
- default:
- return false;
- case X86::BI__builtin_ia32_vec_ext_v2si:
- case X86::BI__builtin_ia32_vec_ext_v2di:
- case X86::BI__builtin_ia32_vextractf128_pd256:
- case X86::BI__builtin_ia32_vextractf128_ps256:
- case X86::BI__builtin_ia32_vextractf128_si256:
- case X86::BI__builtin_ia32_extract128i256:
- case X86::BI__builtin_ia32_extractf64x4_mask:
- case X86::BI__builtin_ia32_extracti64x4_mask:
- case X86::BI__builtin_ia32_extractf32x8_mask:
- case X86::BI__builtin_ia32_extracti32x8_mask:
- case X86::BI__builtin_ia32_extractf64x2_256_mask:
- case X86::BI__builtin_ia32_extracti64x2_256_mask:
- case X86::BI__builtin_ia32_extractf32x4_256_mask:
- case X86::BI__builtin_ia32_extracti32x4_256_mask:
- i = 1; l = 0; u = 1;
- break;
- case X86::BI__builtin_ia32_vec_set_v2di:
- case X86::BI__builtin_ia32_vinsertf128_pd256:
- case X86::BI__builtin_ia32_vinsertf128_ps256:
- case X86::BI__builtin_ia32_vinsertf128_si256:
- case X86::BI__builtin_ia32_insert128i256:
- case X86::BI__builtin_ia32_insertf32x8:
- case X86::BI__builtin_ia32_inserti32x8:
- case X86::BI__builtin_ia32_insertf64x4:
- case X86::BI__builtin_ia32_inserti64x4:
- case X86::BI__builtin_ia32_insertf64x2_256:
- case X86::BI__builtin_ia32_inserti64x2_256:
- case X86::BI__builtin_ia32_insertf32x4_256:
- case X86::BI__builtin_ia32_inserti32x4_256:
- i = 2; l = 0; u = 1;
- break;
- case X86::BI__builtin_ia32_vpermilpd:
- case X86::BI__builtin_ia32_vec_ext_v4hi:
- case X86::BI__builtin_ia32_vec_ext_v4si:
- case X86::BI__builtin_ia32_vec_ext_v4sf:
- case X86::BI__builtin_ia32_vec_ext_v4di:
- case X86::BI__builtin_ia32_extractf32x4_mask:
- case X86::BI__builtin_ia32_extracti32x4_mask:
- case X86::BI__builtin_ia32_extractf64x2_512_mask:
- case X86::BI__builtin_ia32_extracti64x2_512_mask:
- i = 1; l = 0; u = 3;
- break;
- case X86::BI_mm_prefetch:
- case X86::BI__builtin_ia32_vec_ext_v8hi:
- case X86::BI__builtin_ia32_vec_ext_v8si:
- i = 1; l = 0; u = 7;
- break;
- case X86::BI__builtin_ia32_sha1rnds4:
- case X86::BI__builtin_ia32_blendpd:
- case X86::BI__builtin_ia32_shufpd:
- case X86::BI__builtin_ia32_vec_set_v4hi:
- case X86::BI__builtin_ia32_vec_set_v4si:
- case X86::BI__builtin_ia32_vec_set_v4di:
- case X86::BI__builtin_ia32_shuf_f32x4_256:
- case X86::BI__builtin_ia32_shuf_f64x2_256:
- case X86::BI__builtin_ia32_shuf_i32x4_256:
- case X86::BI__builtin_ia32_shuf_i64x2_256:
- case X86::BI__builtin_ia32_insertf64x2_512:
- case X86::BI__builtin_ia32_inserti64x2_512:
- case X86::BI__builtin_ia32_insertf32x4:
- case X86::BI__builtin_ia32_inserti32x4:
- i = 2; l = 0; u = 3;
- break;
- case X86::BI__builtin_ia32_vpermil2pd:
- case X86::BI__builtin_ia32_vpermil2pd256:
- case X86::BI__builtin_ia32_vpermil2ps:
- case X86::BI__builtin_ia32_vpermil2ps256:
- i = 3; l = 0; u = 3;
- break;
- case X86::BI__builtin_ia32_cmpb128_mask:
- case X86::BI__builtin_ia32_cmpw128_mask:
- case X86::BI__builtin_ia32_cmpd128_mask:
- case X86::BI__builtin_ia32_cmpq128_mask:
- case X86::BI__builtin_ia32_cmpb256_mask:
- case X86::BI__builtin_ia32_cmpw256_mask:
- case X86::BI__builtin_ia32_cmpd256_mask:
- case X86::BI__builtin_ia32_cmpq256_mask:
- case X86::BI__builtin_ia32_cmpb512_mask:
- case X86::BI__builtin_ia32_cmpw512_mask:
- case X86::BI__builtin_ia32_cmpd512_mask:
- case X86::BI__builtin_ia32_cmpq512_mask:
- case X86::BI__builtin_ia32_ucmpb128_mask:
- case X86::BI__builtin_ia32_ucmpw128_mask:
- case X86::BI__builtin_ia32_ucmpd128_mask:
- case X86::BI__builtin_ia32_ucmpq128_mask:
- case X86::BI__builtin_ia32_ucmpb256_mask:
- case X86::BI__builtin_ia32_ucmpw256_mask:
- case X86::BI__builtin_ia32_ucmpd256_mask:
- case X86::BI__builtin_ia32_ucmpq256_mask:
- case X86::BI__builtin_ia32_ucmpb512_mask:
- case X86::BI__builtin_ia32_ucmpw512_mask:
- case X86::BI__builtin_ia32_ucmpd512_mask:
- case X86::BI__builtin_ia32_ucmpq512_mask:
- case X86::BI__builtin_ia32_vpcomub:
- case X86::BI__builtin_ia32_vpcomuw:
- case X86::BI__builtin_ia32_vpcomud:
- case X86::BI__builtin_ia32_vpcomuq:
- case X86::BI__builtin_ia32_vpcomb:
- case X86::BI__builtin_ia32_vpcomw:
- case X86::BI__builtin_ia32_vpcomd:
- case X86::BI__builtin_ia32_vpcomq:
- case X86::BI__builtin_ia32_vec_set_v8hi:
- case X86::BI__builtin_ia32_vec_set_v8si:
- i = 2; l = 0; u = 7;
- break;
- case X86::BI__builtin_ia32_vpermilpd256:
- case X86::BI__builtin_ia32_roundps:
- case X86::BI__builtin_ia32_roundpd:
- case X86::BI__builtin_ia32_roundps256:
- case X86::BI__builtin_ia32_roundpd256:
- case X86::BI__builtin_ia32_getmantpd128_mask:
- case X86::BI__builtin_ia32_getmantpd256_mask:
- case X86::BI__builtin_ia32_getmantps128_mask:
- case X86::BI__builtin_ia32_getmantps256_mask:
- case X86::BI__builtin_ia32_getmantpd512_mask:
- case X86::BI__builtin_ia32_getmantps512_mask:
- case X86::BI__builtin_ia32_getmantph128_mask:
- case X86::BI__builtin_ia32_getmantph256_mask:
- case X86::BI__builtin_ia32_getmantph512_mask:
- case X86::BI__builtin_ia32_vec_ext_v16qi:
- case X86::BI__builtin_ia32_vec_ext_v16hi:
- i = 1; l = 0; u = 15;
- break;
- case X86::BI__builtin_ia32_pblendd128:
- case X86::BI__builtin_ia32_blendps:
- case X86::BI__builtin_ia32_blendpd256:
- case X86::BI__builtin_ia32_shufpd256:
- case X86::BI__builtin_ia32_roundss:
- case X86::BI__builtin_ia32_roundsd:
- case X86::BI__builtin_ia32_rangepd128_mask:
- case X86::BI__builtin_ia32_rangepd256_mask:
- case X86::BI__builtin_ia32_rangepd512_mask:
- case X86::BI__builtin_ia32_rangeps128_mask:
- case X86::BI__builtin_ia32_rangeps256_mask:
- case X86::BI__builtin_ia32_rangeps512_mask:
- case X86::BI__builtin_ia32_getmantsd_round_mask:
- case X86::BI__builtin_ia32_getmantss_round_mask:
- case X86::BI__builtin_ia32_getmantsh_round_mask:
- case X86::BI__builtin_ia32_vec_set_v16qi:
- case X86::BI__builtin_ia32_vec_set_v16hi:
- i = 2; l = 0; u = 15;
- break;
- case X86::BI__builtin_ia32_vec_ext_v32qi:
- i = 1; l = 0; u = 31;
- break;
- case X86::BI__builtin_ia32_cmpps:
- case X86::BI__builtin_ia32_cmpss:
- case X86::BI__builtin_ia32_cmppd:
- case X86::BI__builtin_ia32_cmpsd:
- case X86::BI__builtin_ia32_cmpps256:
- case X86::BI__builtin_ia32_cmppd256:
- case X86::BI__builtin_ia32_cmpps128_mask:
- case X86::BI__builtin_ia32_cmppd128_mask:
- case X86::BI__builtin_ia32_cmpps256_mask:
- case X86::BI__builtin_ia32_cmppd256_mask:
- case X86::BI__builtin_ia32_cmpps512_mask:
- case X86::BI__builtin_ia32_cmppd512_mask:
- case X86::BI__builtin_ia32_cmpsd_mask:
- case X86::BI__builtin_ia32_cmpss_mask:
- case X86::BI__builtin_ia32_vec_set_v32qi:
- i = 2; l = 0; u = 31;
- break;
- case X86::BI__builtin_ia32_permdf256:
- case X86::BI__builtin_ia32_permdi256:
- case X86::BI__builtin_ia32_permdf512:
- case X86::BI__builtin_ia32_permdi512:
- case X86::BI__builtin_ia32_vpermilps:
- case X86::BI__builtin_ia32_vpermilps256:
- case X86::BI__builtin_ia32_vpermilpd512:
- case X86::BI__builtin_ia32_vpermilps512:
- case X86::BI__builtin_ia32_pshufd:
- case X86::BI__builtin_ia32_pshufd256:
- case X86::BI__builtin_ia32_pshufd512:
- case X86::BI__builtin_ia32_pshufhw:
- case X86::BI__builtin_ia32_pshufhw256:
- case X86::BI__builtin_ia32_pshufhw512:
- case X86::BI__builtin_ia32_pshuflw:
- case X86::BI__builtin_ia32_pshuflw256:
- case X86::BI__builtin_ia32_pshuflw512:
- case X86::BI__builtin_ia32_vcvtps2ph:
- case X86::BI__builtin_ia32_vcvtps2ph_mask:
- case X86::BI__builtin_ia32_vcvtps2ph256:
- case X86::BI__builtin_ia32_vcvtps2ph256_mask:
- case X86::BI__builtin_ia32_vcvtps2ph512_mask:
- case X86::BI__builtin_ia32_rndscaleps_128_mask:
- case X86::BI__builtin_ia32_rndscalepd_128_mask:
- case X86::BI__builtin_ia32_rndscaleps_256_mask:
- case X86::BI__builtin_ia32_rndscalepd_256_mask:
- case X86::BI__builtin_ia32_rndscaleps_mask:
- case X86::BI__builtin_ia32_rndscalepd_mask:
- case X86::BI__builtin_ia32_rndscaleph_mask:
- case X86::BI__builtin_ia32_reducepd128_mask:
- case X86::BI__builtin_ia32_reducepd256_mask:
- case X86::BI__builtin_ia32_reducepd512_mask:
- case X86::BI__builtin_ia32_reduceps128_mask:
- case X86::BI__builtin_ia32_reduceps256_mask:
- case X86::BI__builtin_ia32_reduceps512_mask:
- case X86::BI__builtin_ia32_reduceph128_mask:
- case X86::BI__builtin_ia32_reduceph256_mask:
- case X86::BI__builtin_ia32_reduceph512_mask:
- case X86::BI__builtin_ia32_prold512:
- case X86::BI__builtin_ia32_prolq512:
- case X86::BI__builtin_ia32_prold128:
- case X86::BI__builtin_ia32_prold256:
- case X86::BI__builtin_ia32_prolq128:
- case X86::BI__builtin_ia32_prolq256:
- case X86::BI__builtin_ia32_prord512:
- case X86::BI__builtin_ia32_prorq512:
- case X86::BI__builtin_ia32_prord128:
- case X86::BI__builtin_ia32_prord256:
- case X86::BI__builtin_ia32_prorq128:
- case X86::BI__builtin_ia32_prorq256:
- case X86::BI__builtin_ia32_fpclasspd128_mask:
- case X86::BI__builtin_ia32_fpclasspd256_mask:
- case X86::BI__builtin_ia32_fpclassps128_mask:
- case X86::BI__builtin_ia32_fpclassps256_mask:
- case X86::BI__builtin_ia32_fpclassps512_mask:
- case X86::BI__builtin_ia32_fpclasspd512_mask:
- case X86::BI__builtin_ia32_fpclassph128_mask:
- case X86::BI__builtin_ia32_fpclassph256_mask:
- case X86::BI__builtin_ia32_fpclassph512_mask:
- case X86::BI__builtin_ia32_fpclasssd_mask:
- case X86::BI__builtin_ia32_fpclassss_mask:
- case X86::BI__builtin_ia32_fpclasssh_mask:
- case X86::BI__builtin_ia32_pslldqi128_byteshift:
- case X86::BI__builtin_ia32_pslldqi256_byteshift:
- case X86::BI__builtin_ia32_pslldqi512_byteshift:
- case X86::BI__builtin_ia32_psrldqi128_byteshift:
- case X86::BI__builtin_ia32_psrldqi256_byteshift:
- case X86::BI__builtin_ia32_psrldqi512_byteshift:
- case X86::BI__builtin_ia32_kshiftliqi:
- case X86::BI__builtin_ia32_kshiftlihi:
- case X86::BI__builtin_ia32_kshiftlisi:
- case X86::BI__builtin_ia32_kshiftlidi:
- case X86::BI__builtin_ia32_kshiftriqi:
- case X86::BI__builtin_ia32_kshiftrihi:
- case X86::BI__builtin_ia32_kshiftrisi:
- case X86::BI__builtin_ia32_kshiftridi:
- i = 1; l = 0; u = 255;
- break;
- case X86::BI__builtin_ia32_vperm2f128_pd256:
- case X86::BI__builtin_ia32_vperm2f128_ps256:
- case X86::BI__builtin_ia32_vperm2f128_si256:
- case X86::BI__builtin_ia32_permti256:
- case X86::BI__builtin_ia32_pblendw128:
- case X86::BI__builtin_ia32_pblendw256:
- case X86::BI__builtin_ia32_blendps256:
- case X86::BI__builtin_ia32_pblendd256:
- case X86::BI__builtin_ia32_palignr128:
- case X86::BI__builtin_ia32_palignr256:
- case X86::BI__builtin_ia32_palignr512:
- case X86::BI__builtin_ia32_alignq512:
- case X86::BI__builtin_ia32_alignd512:
- case X86::BI__builtin_ia32_alignd128:
- case X86::BI__builtin_ia32_alignd256:
- case X86::BI__builtin_ia32_alignq128:
- case X86::BI__builtin_ia32_alignq256:
- case X86::BI__builtin_ia32_vcomisd:
- case X86::BI__builtin_ia32_vcomiss:
- case X86::BI__builtin_ia32_shuf_f32x4:
- case X86::BI__builtin_ia32_shuf_f64x2:
- case X86::BI__builtin_ia32_shuf_i32x4:
- case X86::BI__builtin_ia32_shuf_i64x2:
- case X86::BI__builtin_ia32_shufpd512:
- case X86::BI__builtin_ia32_shufps:
- case X86::BI__builtin_ia32_shufps256:
- case X86::BI__builtin_ia32_shufps512:
- case X86::BI__builtin_ia32_dbpsadbw128:
- case X86::BI__builtin_ia32_dbpsadbw256:
- case X86::BI__builtin_ia32_dbpsadbw512:
- case X86::BI__builtin_ia32_vpshldd128:
- case X86::BI__builtin_ia32_vpshldd256:
- case X86::BI__builtin_ia32_vpshldd512:
- case X86::BI__builtin_ia32_vpshldq128:
- case X86::BI__builtin_ia32_vpshldq256:
- case X86::BI__builtin_ia32_vpshldq512:
- case X86::BI__builtin_ia32_vpshldw128:
- case X86::BI__builtin_ia32_vpshldw256:
- case X86::BI__builtin_ia32_vpshldw512:
- case X86::BI__builtin_ia32_vpshrdd128:
- case X86::BI__builtin_ia32_vpshrdd256:
- case X86::BI__builtin_ia32_vpshrdd512:
- case X86::BI__builtin_ia32_vpshrdq128:
- case X86::BI__builtin_ia32_vpshrdq256:
- case X86::BI__builtin_ia32_vpshrdq512:
- case X86::BI__builtin_ia32_vpshrdw128:
- case X86::BI__builtin_ia32_vpshrdw256:
- case X86::BI__builtin_ia32_vpshrdw512:
- i = 2; l = 0; u = 255;
- break;
- case X86::BI__builtin_ia32_fixupimmpd512_mask:
- case X86::BI__builtin_ia32_fixupimmpd512_maskz:
- case X86::BI__builtin_ia32_fixupimmps512_mask:
- case X86::BI__builtin_ia32_fixupimmps512_maskz:
- case X86::BI__builtin_ia32_fixupimmsd_mask:
- case X86::BI__builtin_ia32_fixupimmsd_maskz:
- case X86::BI__builtin_ia32_fixupimmss_mask:
- case X86::BI__builtin_ia32_fixupimmss_maskz:
- case X86::BI__builtin_ia32_fixupimmpd128_mask:
- case X86::BI__builtin_ia32_fixupimmpd128_maskz:
- case X86::BI__builtin_ia32_fixupimmpd256_mask:
- case X86::BI__builtin_ia32_fixupimmpd256_maskz:
- case X86::BI__builtin_ia32_fixupimmps128_mask:
- case X86::BI__builtin_ia32_fixupimmps128_maskz:
- case X86::BI__builtin_ia32_fixupimmps256_mask:
- case X86::BI__builtin_ia32_fixupimmps256_maskz:
- case X86::BI__builtin_ia32_pternlogd512_mask:
- case X86::BI__builtin_ia32_pternlogd512_maskz:
- case X86::BI__builtin_ia32_pternlogq512_mask:
- case X86::BI__builtin_ia32_pternlogq512_maskz:
- case X86::BI__builtin_ia32_pternlogd128_mask:
- case X86::BI__builtin_ia32_pternlogd128_maskz:
- case X86::BI__builtin_ia32_pternlogd256_mask:
- case X86::BI__builtin_ia32_pternlogd256_maskz:
- case X86::BI__builtin_ia32_pternlogq128_mask:
- case X86::BI__builtin_ia32_pternlogq128_maskz:
- case X86::BI__builtin_ia32_pternlogq256_mask:
- case X86::BI__builtin_ia32_pternlogq256_maskz:
- case X86::BI__builtin_ia32_vsm3rnds2:
- i = 3; l = 0; u = 255;
- break;
- case X86::BI__builtin_ia32_gatherpfdpd:
- case X86::BI__builtin_ia32_gatherpfdps:
- case X86::BI__builtin_ia32_gatherpfqpd:
- case X86::BI__builtin_ia32_gatherpfqps:
- case X86::BI__builtin_ia32_scatterpfdpd:
- case X86::BI__builtin_ia32_scatterpfdps:
- case X86::BI__builtin_ia32_scatterpfqpd:
- case X86::BI__builtin_ia32_scatterpfqps:
- i = 4; l = 2; u = 3;
- break;
- case X86::BI__builtin_ia32_reducesd_mask:
- case X86::BI__builtin_ia32_reducess_mask:
- case X86::BI__builtin_ia32_rndscalesd_round_mask:
- case X86::BI__builtin_ia32_rndscaless_round_mask:
- case X86::BI__builtin_ia32_rndscalesh_round_mask:
- case X86::BI__builtin_ia32_reducesh_mask:
- i = 4; l = 0; u = 255;
- break;
- case X86::BI__builtin_ia32_cmpccxadd32:
- case X86::BI__builtin_ia32_cmpccxadd64:
- i = 3; l = 0; u = 15;
- break;
- }
-
- // Note that we don't force a hard error on the range check here, allowing
- // template-generated or macro-generated dead code to potentially have out-of-
- // range values. These need to code generate, but don't need to necessarily
- // make any sense. We use a warning that defaults to an error.
- return BuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
-}
-
/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
/// parameter with the FormatAttr's correct format_idx and firstDataArg.
/// Returns true when the format fits the function and the FormatStringInfo has
@@ -8521,7 +6803,7 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
if (!pointerType) {
Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
- << Ptr->getType() << Ptr->getSourceRange();
+ << Ptr->getType() << 0 << Ptr->getSourceRange();
return ExprError();
}
@@ -8550,6 +6832,13 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
}
}
+ // Pointer to object of size zero is not allowed.
+ if (Context.getTypeInfoInChars(AtomTy).Width.isZero()) {
+ Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
+ << Ptr->getType() << 1 << Ptr->getSourceRange();
+ return ExprError();
+ }
+
// For an arithmetic operation, the implied arithmetic must be well-formed.
if (Form == Arithmetic) {
// GCC does not enforce these rules for GNU atomics, but we do to help catch
@@ -8941,7 +7230,7 @@ ExprResult Sema::BuiltinAtomicOverloaded(ExprResult TheCallResult) {
const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
if (!pointerType) {
Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
- << FirstArg->getType() << FirstArg->getSourceRange();
+ << FirstArg->getType() << 0 << FirstArg->getSourceRange();
return ExprError();
}
@@ -8949,7 +7238,7 @@ ExprResult Sema::BuiltinAtomicOverloaded(ExprResult TheCallResult) {
if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
!ValType->isBlockPointerType()) {
Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
- << FirstArg->getType() << FirstArg->getSourceRange();
+ << FirstArg->getType() << 0 << FirstArg->getSourceRange();
return ExprError();
}
@@ -9302,7 +7591,7 @@ ExprResult Sema::BuiltinNontemporalOverloaded(ExprResult TheCallResult) {
unsigned numArgs = isStore ? 2 : 1;
// Ensure that we have the proper number of arguments.
- if (checkArgCount(*this, TheCall, numArgs))
+ if (checkArgCount(TheCall, numArgs))
return ExprError();
// Inspect the last argument of the nontemporal builtin. This should always
@@ -9467,7 +7756,7 @@ bool Sema::BuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
// In C23 mode, va_start only needs one argument. However, the builtin still
// requires two arguments (which matches the behavior of the GCC builtin),
// <stdarg.h> passes `0` as the second argument in C23 mode.
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
// Type-check the first argument normally.
@@ -9598,7 +7887,7 @@ bool Sema::BuiltinVAStartARMMicrosoft(CallExpr *Call) {
/// BuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
/// friends. This is declared to take (...), so we have to check everything.
bool Sema::BuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
if (BuiltinID == Builtin::BI__builtin_isunordered &&
@@ -9642,7 +7931,7 @@ bool Sema::BuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) {
/// to check everything.
bool Sema::BuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
unsigned BuiltinID) {
- if (checkArgCount(*this, TheCall, NumArgs))
+ if (checkArgCount(TheCall, NumArgs))
return true;
FPOptions FPO = TheCall->getFPFeaturesInEffect(getLangOpts());
@@ -9727,7 +8016,7 @@ bool Sema::BuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
/// Perform semantic analysis for a call to __builtin_complex.
bool Sema::BuiltinComplex(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
bool Dependent = false;
@@ -9789,7 +8078,7 @@ bool Sema::BuiltinComplex(CallExpr *TheCall) {
// vector short vec_xxsldwi(vector short, vector short, int);
bool Sema::BuiltinVSX(CallExpr *TheCall) {
unsigned ExpectedNumArgs = 3;
- if (checkArgCount(*this, TheCall, ExpectedNumArgs))
+ if (checkArgCount(TheCall, ExpectedNumArgs))
return true;
// Check the third argument is a compile time constant
@@ -9976,7 +8265,7 @@ bool Sema::BuiltinArithmeticFence(CallExpr *TheCall) {
if (!Context.getTargetInfo().checkArithmeticFenceSupported())
return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
<< SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
Expr *Arg = TheCall->getArg(0);
if (Arg->isInstantiationDependent())
@@ -10046,7 +8335,7 @@ bool Sema::BuiltinAllocaWithAlign(CallExpr *TheCall) {
/// Handle __builtin_assume_aligned. This is declared
/// as (const void*, size_t, ...) and can take one optional constant int arg.
bool Sema::BuiltinAssumeAligned(CallExpr *TheCall) {
- if (checkArgCountRange(*this, TheCall, 2, 3))
+ if (checkArgCountRange(TheCall, 2, 3))
return true;
unsigned NumArgs = TheCall->getNumArgs();
@@ -10349,7 +8638,7 @@ bool Sema::BuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
/// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
bool Sema::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
if (BuiltinID == AArch64::BI__builtin_arm_irg) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
Expr *Arg0 = TheCall->getArg(0);
Expr *Arg1 = TheCall->getArg(1);
@@ -10377,7 +8666,7 @@ bool Sema::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
}
if (BuiltinID == AArch64::BI__builtin_arm_addg) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
Expr *Arg0 = TheCall->getArg(0);
@@ -10398,7 +8687,7 @@ bool Sema::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
}
if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
Expr *Arg0 = TheCall->getArg(0);
Expr *Arg1 = TheCall->getArg(1);
@@ -10421,7 +8710,7 @@ bool Sema::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
BuiltinID == AArch64::BI__builtin_arm_stg) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
Expr *Arg0 = TheCall->getArg(0);
ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
@@ -10694,7 +8983,7 @@ bool Sema::BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
(void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
ArgNum++;
}
- if (checkArgCount(*this, TheCall, ArgNum))
+ if (checkArgCount(TheCall, ArgNum))
return true;
return false;
@@ -19706,7 +17995,7 @@ void Sema::CheckAddressOfPackedMember(Expr *rhs) {
}
bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
ExprResult A = UsualUnaryConversions(TheCall->getArg(0));
@@ -19745,7 +18034,7 @@ bool Sema::BuiltinVectorToScalarMath(CallExpr *TheCall) {
}
bool Sema::BuiltinVectorMath(CallExpr *TheCall, QualType &Res) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
ExprResult A = TheCall->getArg(0);
@@ -19774,7 +18063,7 @@ bool Sema::BuiltinVectorMath(CallExpr *TheCall, QualType &Res) {
bool Sema::BuiltinElementwiseTernaryMath(CallExpr *TheCall,
bool CheckForFloatArgs) {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
Expr *Args[3];
@@ -19817,7 +18106,7 @@ bool Sema::BuiltinElementwiseTernaryMath(CallExpr *TheCall,
}
bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
ExprResult A = UsualUnaryConversions(TheCall->getArg(0));
@@ -19829,7 +18118,7 @@ bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
}
bool Sema::BuiltinNonDeterministicValue(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
ExprResult Arg = TheCall->getArg(0);
@@ -19845,7 +18134,7 @@ bool Sema::BuiltinNonDeterministicValue(CallExpr *TheCall) {
ExprResult Sema::BuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return ExprError();
ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0));
@@ -19900,7 +18189,7 @@ ExprResult Sema::BuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
return ExprError();
}
- if (checkArgCount(*this, TheCall, 4))
+ if (checkArgCount(TheCall, 4))
return ExprError();
unsigned PtrArgIdx = 0;
@@ -20011,7 +18300,7 @@ ExprResult Sema::BuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult Sema::BuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult) {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return ExprError();
unsigned PtrArgIdx = 1;
@@ -20137,7 +18426,7 @@ static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E,
/// Check that the first argument is a WebAssembly table, and the second
/// is an index to use as index into the table.
bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
QualType ElTy;
@@ -20160,7 +18449,7 @@ bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) {
/// an index to use as index into the table and the third is the reference
/// type to set into the table.
bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
QualType ElTy;
@@ -20178,7 +18467,7 @@ bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) {
/// Check that the argument is a WebAssembly table.
bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
QualType ElTy;
@@ -20192,7 +18481,7 @@ bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) {
/// value to use for new elements (of a type matching the table type), the
/// third value is an integer.
bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
QualType ElTy;
@@ -20216,7 +18505,7 @@ bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) {
/// integer, the third is the value to use to fill the table (of a type
/// matching the table type), and the fourth is an integer.
bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 4))
+ if (checkArgCount(TheCall, 4))
return true;
QualType ElTy;
@@ -20243,7 +18532,7 @@ bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) {
/// WebAssembly table (of the same element type), and the third to fifth
/// arguments are integers.
bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 5))
+ if (checkArgCount(TheCall, 5))
return true;
QualType XElTy;
diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp
index ad3ca4cc94ca..cd1c5f9391cc 100644
--- a/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/clang/lib/Sema/SemaCodeComplete.cpp
@@ -5692,8 +5692,15 @@ QualType getApproximateType(const Expr *E) {
}
}
if (const auto *UO = llvm::dyn_cast<UnaryOperator>(E)) {
- if (UO->getOpcode() == UnaryOperatorKind::UO_Deref)
- return UO->getSubExpr()->getType()->getPointeeType();
+ if (UO->getOpcode() == UnaryOperatorKind::UO_Deref) {
+ // We recurse into the subexpression because it could be of dependent
+ // type.
+ if (auto Pointee = getApproximateType(UO->getSubExpr())->getPointeeType();
+ !Pointee.isNull())
+ return Pointee;
+ // Our caller expects a non-null result, even though the SubType is
+ // supposed to have a pointee. Fall through to Unresolved anyway.
+ }
}
return Unresolved;
}
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index f2b9202255cd..2a87b26f17a2 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -50,6 +50,7 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaRISCV.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/SmallString.h"
@@ -2879,7 +2880,7 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
D, *AA, AA->getPlatform(), AA->isImplicit(), AA->getIntroduced(),
AA->getDeprecated(), AA->getObsoleted(), AA->getUnavailable(),
AA->getMessage(), AA->getStrict(), AA->getReplacement(), AMK,
- AA->getPriority());
+ AA->getPriority(), AA->getEnvironment());
else if (const auto *VA = dyn_cast<VisibilityAttr>(Attr))
NewAttr = S.mergeVisibilityAttr(D, *VA, VA->getVisibility());
else if (const auto *VA = dyn_cast<TypeVisibilityAttr>(Attr))
@@ -4985,7 +4986,7 @@ void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
if (TagFromDeclSpec->hasNameForLinkage())
return;
- // A well-formed anonymous tag must always be a TUK_Definition.
+ // A well-formed anonymous tag must always be a TagUseKind::Definition.
assert(TagFromDeclSpec->isThisDeclarationADefinition());
// The type must match the tag exactly; no qualifiers allowed.
@@ -8926,8 +8927,8 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
const FunctionDecl *FD = cast<FunctionDecl>(CurContext);
llvm::StringMap<bool> CallerFeatureMap;
Context.getFunctionFeatureMap(CallerFeatureMap, FD);
- checkRVVTypeSupport(T, NewVD->getLocation(), cast<Decl>(CurContext),
- CallerFeatureMap);
+ RISCV().checkRVVTypeSupport(T, NewVD->getLocation(), cast<Decl>(CurContext),
+ CallerFeatureMap);
}
}
@@ -9217,19 +9218,20 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
<< Idx << FDParam->getType()
<< NewFD->getParamDecl(Idx - 1)->getType();
} else if (FDisConst != NewFDisConst) {
- SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_const_match)
- << NewFDisConst << FD->getSourceRange().getEnd()
- << (NewFDisConst
- ? FixItHint::CreateRemoval(ExtraArgs.D.getFunctionTypeInfo()
- .getConstQualifierLoc())
- : FixItHint::CreateInsertion(ExtraArgs.D.getFunctionTypeInfo()
- .getRParenLoc()
- .getLocWithOffset(1),
- " const"));
- } else
+ auto DB = SemaRef.Diag(FD->getLocation(),
+ diag::note_member_def_close_const_match)
+ << NewFDisConst << FD->getSourceRange().getEnd();
+ if (const auto &FTI = ExtraArgs.D.getFunctionTypeInfo(); !NewFDisConst)
+ DB << FixItHint::CreateInsertion(FTI.getRParenLoc().getLocWithOffset(1),
+ " const");
+ else if (FTI.hasMethodTypeQualifiers() &&
+ FTI.getConstQualifierLoc().isValid())
+ DB << FixItHint::CreateRemoval(FTI.getConstQualifierLoc());
+ } else {
SemaRef.Diag(FD->getLocation(),
IsMember ? diag::note_member_def_close_match
: diag::note_local_decl_close_match);
+ }
}
return nullptr;
}
@@ -11866,8 +11868,8 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
return false;
if (!OldDecl || !OldDecl->getAsFunction() ||
- OldDecl->getDeclContext()->getRedeclContext() !=
- NewFD->getDeclContext()->getRedeclContext()) {
+ !OldDecl->getDeclContext()->getRedeclContext()->Equals(
+ NewFD->getDeclContext()->getRedeclContext())) {
// If there's no previous declaration, AND this isn't attempting to cause
// multiversioning, this isn't an error condition.
if (MVKind == MultiVersionKind::None)
@@ -17237,9 +17239,9 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
OffsetOfKind OOK, SkipBodyInfo *SkipBody) {
// If this is not a definition, it must have a name.
IdentifierInfo *OrigName = Name;
- assert((Name != nullptr || TUK == TUK_Definition) &&
+ assert((Name != nullptr || TUK == TagUseKind::Definition) &&
"Nameless record must be a definition!");
- assert(TemplateParameterLists.size() == 0 || TUK != TUK_Reference);
+ assert(TemplateParameterLists.size() == 0 || TUK != TagUseKind::Reference);
OwnedDecl = false;
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
@@ -17253,11 +17255,11 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// or a scope specifier, which also conveniently avoids this work
// for non-C++ cases.
if (TemplateParameterLists.size() > 0 ||
- (SS.isNotEmpty() && TUK != TUK_Reference)) {
+ (SS.isNotEmpty() && TUK != TagUseKind::Reference)) {
TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
KWLoc, NameLoc, SS, nullptr, TemplateParameterLists,
- TUK == TUK_Friend, isMemberSpecialization, Invalid);
+ TUK == TagUseKind::Friend, isMemberSpecialization, Invalid);
// C++23 [dcl.type.elab] p2:
// If an elaborated-type-specifier is the sole constituent of a
@@ -17272,7 +17274,8 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// FIXME: Class template partial specializations can be forward declared
// per CWG2213, but the resolution failed to allow qualified forward
// declarations. This is almost certainly unintentional, so we allow them.
- if (TUK == TUK_Declaration && SS.isNotEmpty() && !isMemberSpecialization)
+ if (TUK == TagUseKind::Declaration && SS.isNotEmpty() &&
+ !isMemberSpecialization)
Diag(SS.getBeginLoc(), diag::err_standalone_class_nested_name_specifier)
<< TypeWithKeyword::getTagTypeKindName(Kind) << SS.getRange();
@@ -17309,7 +17312,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
return true;
}
- if (TUK == TUK_Friend && Kind == TagTypeKind::Enum) {
+ if (TUK == TagUseKind::Friend && Kind == TagTypeKind::Enum) {
// C++23 [dcl.type.elab]p4:
// If an elaborated-type-specifier appears with the friend specifier as
// an entire member-declaration, the member-declaration shall have one
@@ -17360,7 +17363,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// of 'int'. However, if this is an unfixed forward declaration, don't set
// the underlying type unless the user enables -fms-compatibility. This
// makes unfixed forward declared enums incomplete and is more conforming.
- if (TUK == TUK_Definition || getLangOpts().MSVCCompat)
+ if (TUK == TagUseKind::Definition || getLangOpts().MSVCCompat)
EnumUnderlying = Context.IntTy.getTypePtr();
}
}
@@ -17371,7 +17374,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
bool isStdAlignValT = false;
RedeclarationKind Redecl = forRedeclarationInCurContext();
- if (TUK == TUK_Friend || TUK == TUK_Reference)
+ if (TUK == TagUseKind::Friend || TUK == TagUseKind::Reference)
Redecl = RedeclarationKind::NotForRedeclaration;
/// Create a new tag decl in C/ObjC. Since the ODR-like semantics for ObjC/C
@@ -17389,7 +17392,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name, nullptr,
ScopedEnum, ScopedEnumUsesClassTag, IsFixed);
// If this is an undefined enum, bail.
- if (TUK != TUK_Definition && !Invalid)
+ if (TUK != TagUseKind::Definition && !Invalid)
return nullptr;
if (EnumUnderlying) {
EnumDecl *ED = cast<EnumDecl>(New);
@@ -17417,7 +17420,8 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition &&
+ (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
@@ -17438,7 +17442,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// If this is a friend or a reference to a class in a dependent
// context, don't try to make a decl for it.
- if (TUK == TUK_Friend || TUK == TUK_Reference) {
+ if (TUK == TagUseKind::Friend || TUK == TagUseKind::Reference) {
DC = computeDeclContext(SS, false);
if (!DC) {
IsDependent = true;
@@ -17471,7 +17475,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// this as a dependent elaborated-type-specifier.
// But this only makes any sense for reference-like lookups.
if (Previous.wasNotFoundInCurrentInstantiation() &&
- (TUK == TUK_Reference || TUK == TUK_Friend)) {
+ (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend)) {
IsDependent = true;
return true;
}
@@ -17488,7 +17492,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// If T is the name of a class, then each of the following shall have a
// name different from T:
// -- every member of class T that is itself a type
- if (TUK != TUK_Reference && TUK != TUK_Friend &&
+ if (TUK != TagUseKind::Reference && TUK != TagUseKind::Friend &&
DiagnoseClassNameShadow(SearchDC, DeclarationNameInfo(Name, NameLoc)))
return true;
@@ -17502,7 +17506,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// When declaring or defining a tag, ignore ambiguities introduced
// by types using'ed into this scope.
if (Previous.isAmbiguous() &&
- (TUK == TUK_Definition || TUK == TUK_Declaration)) {
+ (TUK == TagUseKind::Definition || TUK == TagUseKind::Declaration)) {
LookupResult::Filter F = Previous.makeFilter();
while (F.hasNext()) {
NamedDecl *ND = F.next();
@@ -17526,7 +17530,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
//
// Does it matter that this should be by scope instead of by
// semantic context?
- if (!Previous.empty() && TUK == TUK_Friend) {
+ if (!Previous.empty() && TUK == TagUseKind::Friend) {
DeclContext *EnclosingNS = SearchDC->getEnclosingNamespaceContext();
LookupResult::Filter F = Previous.makeFilter();
bool FriendSawTagOutsideEnclosingNamespace = false;
@@ -17556,7 +17560,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (Previous.isAmbiguous())
return true;
- if (!getLangOpts().CPlusPlus && TUK != TUK_Reference) {
+ if (!getLangOpts().CPlusPlus && TUK != TagUseKind::Reference) {
// FIXME: This makes sure that we ignore the contexts associated
// with C structs, unions, and enums when looking for a matching
// tag declaration or definition. See the similar lookup tweak
@@ -17608,11 +17612,12 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// also need to do a redeclaration lookup there, just in case
// there's a shadow friend decl.
if (Name && Previous.empty() &&
- (TUK == TUK_Reference || TUK == TUK_Friend || IsTemplateParamOrArg)) {
+ (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend ||
+ IsTemplateParamOrArg)) {
if (Invalid) goto CreateNewDecl;
assert(SS.isEmpty());
- if (TUK == TUK_Reference || IsTemplateParamOrArg) {
+ if (TUK == TagUseKind::Reference || IsTemplateParamOrArg) {
// C++ [basic.scope.pdecl]p5:
// -- for an elaborated-type-specifier of the form
//
@@ -17646,7 +17651,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// Find the scope where we'll be declaring the tag.
S = getTagInjectionScope(S, getLangOpts());
} else {
- assert(TUK == TUK_Friend);
+ assert(TUK == TagUseKind::Friend);
CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(SearchDC);
// C++ [namespace.memdef]p3:
@@ -17711,7 +17716,8 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// redefinition if either context is within the other.
if (auto *Shadow = dyn_cast<UsingShadowDecl>(DirectPrevDecl)) {
auto *OldTag = dyn_cast<TagDecl>(PrevDecl);
- if (SS.isEmpty() && TUK != TUK_Reference && TUK != TUK_Friend &&
+ if (SS.isEmpty() && TUK != TagUseKind::Reference &&
+ TUK != TagUseKind::Friend &&
isDeclInScope(Shadow, SearchDC, S, isMemberSpecialization) &&
!(OldTag && isAcceptableTagRedeclContext(
*this, OldTag->getDeclContext(), SearchDC))) {
@@ -17730,13 +17736,13 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// If this is a use of a previous tag, or if the tag is already declared
// in the same scope (so that the definition/declaration completes or
// rementions the tag), reuse the decl.
- if (TUK == TUK_Reference || TUK == TUK_Friend ||
+ if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend ||
isDeclInScope(DirectPrevDecl, SearchDC, S,
SS.isNotEmpty() || isMemberSpecialization)) {
// Make sure that this wasn't declared as an enum and now used as a
// struct or something similar.
if (!isAcceptableTagRedeclaration(PrevTagDecl, Kind,
- TUK == TUK_Definition, KWLoc,
+ TUK == TagUseKind::Definition, KWLoc,
Name)) {
bool SafeToContinue =
(PrevTagDecl->getTagKind() != TagTypeKind::Enum &&
@@ -17763,7 +17769,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (Kind == TagTypeKind::Enum &&
PrevTagDecl->getTagKind() == TagTypeKind::Enum) {
const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
- if (TUK == TUK_Reference || TUK == TUK_Friend)
+ if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend)
return PrevTagDecl;
QualType EnumUnderlyingTy;
@@ -17778,14 +17784,14 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (CheckEnumRedeclaration(NameLoc.isValid() ? NameLoc : KWLoc,
ScopedEnum, EnumUnderlyingTy,
IsFixed, PrevEnum))
- return TUK == TUK_Declaration ? PrevTagDecl : nullptr;
+ return TUK == TagUseKind::Declaration ? PrevTagDecl : nullptr;
}
// C++11 [class.mem]p1:
// A member shall not be declared twice in the member-specification,
// except that a nested class or member class template can be declared
// and then later defined.
- if (TUK == TUK_Declaration && PrevDecl->isCXXClassMember() &&
+ if (TUK == TagUseKind::Declaration && PrevDecl->isCXXClassMember() &&
S->isDeclScope(PrevDecl)) {
Diag(NameLoc, diag::ext_member_redeclared);
Diag(PrevTagDecl->getLocation(), diag::note_previous_declaration);
@@ -17794,11 +17800,11 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (!Invalid) {
// If this is a use, just return the declaration we found, unless
// we have attributes.
- if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend) {
if (!Attrs.empty()) {
// FIXME: Diagnose these attributes. For now, we create a new
// declaration to hold them.
- } else if (TUK == TUK_Reference &&
+ } else if (TUK == TagUseKind::Reference &&
(PrevTagDecl->getFriendObjectKind() ==
Decl::FOK_Undeclared ||
PrevDecl->getOwningModule() != getCurrentModule()) &&
@@ -17822,7 +17828,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
}
// Diagnose attempts to redefine a tag.
- if (TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
if (NamedDecl *Def = PrevTagDecl->getDefinition()) {
// If we're defining a specialization and the previous definition
// is from an implicit instantiation, don't emit an error
@@ -17902,7 +17908,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// Okay, we're going to make a redeclaration. If this is some kind
// of reference, make sure we build the redeclaration in the same DC
// as the original, and ignore the current access specifier.
- if (TUK == TUK_Friend || TUK == TUK_Reference) {
+ if (TUK == TagUseKind::Friend || TUK == TagUseKind::Reference) {
SearchDC = PrevTagDecl->getDeclContext();
AS = AS_none;
}
@@ -17928,7 +17934,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// Use a better diagnostic if an elaborated-type-specifier
// found the wrong kind of type on the first
// (non-redeclaration) lookup.
- if ((TUK == TUK_Reference || TUK == TUK_Friend) &&
+ if ((TUK == TagUseKind::Reference || TUK == TagUseKind::Friend) &&
!Previous.isForRedeclaration()) {
NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
Diag(NameLoc, diag::err_tag_reference_non_tag)
@@ -17942,7 +17948,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// do nothing
// Diagnose implicit declarations introduced by elaborated types.
- } else if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ } else if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend) {
NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
Diag(NameLoc, diag::err_tag_reference_conflict) << NTK;
Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
@@ -18001,7 +18007,7 @@ CreateNewDecl:
StdAlignValT = cast<EnumDecl>(New);
// If this is an undefined enum, warn.
- if (TUK != TUK_Definition && !Invalid) {
+ if (TUK != TagUseKind::Definition && !Invalid) {
TagDecl *Def;
if (IsFixed && cast<EnumDecl>(New)->isFixed()) {
// C++0x: 7.2p2: opaque-enum-declaration.
@@ -18051,21 +18057,22 @@ CreateNewDecl:
}
// Only C23 and later allow defining new types in 'offsetof()'.
- if (OOK != OOK_Outside && TUK == TUK_Definition && !getLangOpts().CPlusPlus &&
- !getLangOpts().C23)
+ if (OOK != OOK_Outside && TUK == TagUseKind::Definition &&
+ !getLangOpts().CPlusPlus && !getLangOpts().C23)
Diag(New->getLocation(), diag::ext_type_defined_in_offsetof)
<< (OOK == OOK_Macro) << New->getSourceRange();
// C++11 [dcl.type]p3:
// A type-specifier-seq shall not define a class or enumeration [...].
if (!Invalid && getLangOpts().CPlusPlus &&
- (IsTypeSpecifier || IsTemplateParamOrArg) && TUK == TUK_Definition) {
+ (IsTypeSpecifier || IsTemplateParamOrArg) &&
+ TUK == TagUseKind::Definition) {
Diag(New->getLocation(), diag::err_type_defined_in_type_specifier)
<< Context.getTagDeclType(New);
Invalid = true;
}
- if (!Invalid && getLangOpts().CPlusPlus && TUK == TUK_Definition &&
+ if (!Invalid && getLangOpts().CPlusPlus && TUK == TagUseKind::Definition &&
DC->getDeclKind() == Decl::Enum) {
Diag(New->getLocation(), diag::err_type_defined_in_enum)
<< Context.getTagDeclType(New);
@@ -18077,7 +18084,7 @@ CreateNewDecl:
if (SS.isSet()) {
// If this is either a declaration or a definition, check the
// nested-name-specifier against the current context.
- if ((TUK == TUK_Definition || TUK == TUK_Declaration) &&
+ if ((TUK == TagUseKind::Definition || TUK == TagUseKind::Declaration) &&
diagnoseQualifiedDeclaration(SS, DC, OrigName, Loc,
/*TemplateId=*/nullptr,
isMemberSpecialization))
@@ -18102,7 +18109,7 @@ CreateNewDecl:
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
@@ -18133,7 +18140,7 @@ CreateNewDecl:
if (getLangOpts().CPlusPlus) {
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
- if (TUK == TUK_Definition && !IsTypeSpecifier) {
+ if (TUK == TagUseKind::Definition && !IsTypeSpecifier) {
Diag(Loc, diag::err_type_defined_in_param_type)
<< Name;
Invalid = true;
@@ -18154,7 +18161,7 @@ CreateNewDecl:
// In Microsoft mode, a friend declaration also acts as a forward
// declaration so we always pass true to setObjectOfFriendDecl to make
// the tag name visible.
- if (TUK == TUK_Friend)
+ if (TUK == TagUseKind::Friend)
New->setObjectOfFriendDecl(getLangOpts().MSVCCompat);
// Set the access specifier.
@@ -18164,14 +18171,14 @@ CreateNewDecl:
if (PrevDecl)
CheckRedeclarationInModule(New, PrevDecl);
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip))
New->startDefinition();
ProcessDeclAttributeList(S, New, Attrs);
AddPragmaAttributes(S, New);
// If this has an identifier, add it to the scope stack.
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
// We might be replacing an existing declaration in the lookup tables;
// if so, borrow its access specifier.
if (PrevDecl)
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index e816ea3647a7..5041fd65286f 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -26,6 +26,7 @@
#include "clang/Basic/Cuda.h"
#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/HLSLRuntime.h"
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
@@ -52,6 +53,7 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <optional>
using namespace clang;
@@ -2495,7 +2497,7 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(
bool Implicit, VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable, StringRef Message,
bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK,
- int Priority) {
+ int Priority, IdentifierInfo *Environment) {
VersionTuple MergedIntroduced = Introduced;
VersionTuple MergedDeprecated = Deprecated;
VersionTuple MergedObsoleted = Obsoleted;
@@ -2529,6 +2531,12 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(
continue;
}
+ IdentifierInfo *OldEnvironment = OldAA->getEnvironment();
+ if (OldEnvironment != Environment) {
+ ++i;
+ continue;
+ }
+
// If there is an existing availability attribute for this platform that
// has a lower priority use the existing one and discard the new
// attribute.
@@ -2647,7 +2655,7 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(
!OverrideOrImpl) {
auto *Avail = ::new (Context) AvailabilityAttr(
Context, CI, Platform, Introduced, Deprecated, Obsoleted, IsUnavailable,
- Message, IsStrict, Replacement, Priority);
+ Message, IsStrict, Replacement, Priority, Environment);
Avail->setImplicit(Implicit);
return Avail;
}
@@ -2706,13 +2714,34 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
+ if (S.getLangOpts().HLSL && IsStrict)
+ S.Diag(AL.getStrictLoc(), diag::err_availability_unexpected_parameter)
+ << "strict" << /* HLSL */ 0;
+
int PriorityModifier = AL.isPragmaClangAttribute()
? Sema::AP_PragmaClangAttribute
: Sema::AP_Explicit;
+
+ const IdentifierLoc *EnvironmentLoc = AL.getEnvironment();
+ IdentifierInfo *IIEnvironment = nullptr;
+ if (EnvironmentLoc) {
+ if (S.getLangOpts().HLSL) {
+ IIEnvironment = EnvironmentLoc->Ident;
+ if (AvailabilityAttr::getEnvironmentType(
+ EnvironmentLoc->Ident->getName()) ==
+ llvm::Triple::EnvironmentType::UnknownEnvironment)
+ S.Diag(EnvironmentLoc->Loc, diag::warn_availability_unknown_environment)
+ << EnvironmentLoc->Ident;
+ } else {
+ S.Diag(EnvironmentLoc->Loc, diag::err_availability_unexpected_parameter)
+ << "environment" << /* C/C++ */ 1;
+ }
+ }
+
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
ND, AL, II, false /*Implicit*/, Introduced.Version, Deprecated.Version,
Obsoleted.Version, IsUnavailable, Str, IsStrict, Replacement,
- Sema::AMK_None, PriorityModifier);
+ Sema::AMK_None, PriorityModifier, IIEnvironment);
if (NewAttr)
D->addAttr(NewAttr);
@@ -2768,8 +2797,8 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
ND, AL, NewII, true /*Implicit*/, NewIntroduced, NewDeprecated,
NewObsoleted, IsUnavailable, Str, IsStrict, Replacement,
- Sema::AMK_None,
- PriorityModifier + Sema::AP_InferredFromOtherPlatform);
+ Sema::AMK_None, PriorityModifier + Sema::AP_InferredFromOtherPlatform,
+ IIEnvironment);
if (NewAttr)
D->addAttr(NewAttr);
}
@@ -2810,8 +2839,8 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
ND, AL, NewII, true /*Implicit*/, NewIntroduced, NewDeprecated,
NewObsoleted, IsUnavailable, Str, IsStrict, Replacement,
- Sema::AMK_None,
- PriorityModifier + Sema::AP_InferredFromOtherPlatform);
+ Sema::AMK_None, PriorityModifier + Sema::AP_InferredFromOtherPlatform,
+ IIEnvironment);
if (NewAttr)
D->addAttr(NewAttr);
}
@@ -2844,7 +2873,7 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
MinMacCatalystVersion(Deprecated.Version),
MinMacCatalystVersion(Obsoleted.Version), IsUnavailable, Str,
IsStrict, Replacement, Sema::AMK_None,
- PriorityModifier + Sema::AP_InferredFromOtherPlatform);
+ PriorityModifier + Sema::AP_InferredFromOtherPlatform, IIEnvironment);
if (NewAttr)
D->addAttr(NewAttr);
} else if (II->getName() == "macos" && GetSDKInfo() &&
@@ -2887,7 +2916,8 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
VersionOrEmptyVersion(NewObsoleted), /*IsUnavailable=*/false, Str,
IsStrict, Replacement, Sema::AMK_None,
PriorityModifier + Sema::AP_InferredFromOtherPlatform +
- Sema::AP_InferredFromOtherPlatform);
+ Sema::AP_InferredFromOtherPlatform,
+ IIEnvironment);
if (NewAttr)
D->addAttr(NewAttr);
}
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index 822538198505..8ab429e2a136 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -2656,188 +2656,122 @@ bool Sema::isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS) {
return false;
}
-/// Determine whether the given class is a base class of the given
-/// class, including looking at dependent bases.
-static bool findCircularInheritance(const CXXRecordDecl *Class,
- const CXXRecordDecl *Current) {
- SmallVector<const CXXRecordDecl*, 8> Queue;
-
- Class = Class->getCanonicalDecl();
- while (true) {
- for (const auto &I : Current->bases()) {
- CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl();
- if (!Base)
- continue;
-
- Base = Base->getDefinition();
- if (!Base)
- continue;
-
- if (Base->getCanonicalDecl() == Class)
- return true;
-
- Queue.push_back(Base);
- }
-
- if (Queue.empty())
- return false;
-
- Current = Queue.pop_back_val();
- }
-
- return false;
-}
-
/// Check the validity of a C++ base class specifier.
///
/// \returns a new CXXBaseSpecifier if well-formed, emits diagnostics
/// and returns NULL otherwise.
-CXXBaseSpecifier *
-Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
- SourceRange SpecifierRange,
- bool Virtual, AccessSpecifier Access,
- TypeSourceInfo *TInfo,
- SourceLocation EllipsisLoc) {
- // In HLSL, unspecified class access is public rather than private.
- if (getLangOpts().HLSL && Class->getTagKind() == TagTypeKind::Class &&
- Access == AS_none)
- Access = AS_public;
-
+CXXBaseSpecifier *Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
+ SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ TypeSourceInfo *TInfo,
+ SourceLocation EllipsisLoc) {
QualType BaseType = TInfo->getType();
+ SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc();
if (BaseType->containsErrors()) {
// Already emitted a diagnostic when parsing the error type.
return nullptr;
}
- // C++ [class.union]p1:
- // A union shall not have base classes.
- if (Class->isUnion()) {
- Diag(Class->getLocation(), diag::err_base_clause_on_union)
- << SpecifierRange;
- return nullptr;
- }
- if (EllipsisLoc.isValid() &&
- !TInfo->getType()->containsUnexpandedParameterPack()) {
+ if (EllipsisLoc.isValid() && !BaseType->containsUnexpandedParameterPack()) {
Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
<< TInfo->getTypeLoc().getSourceRange();
EllipsisLoc = SourceLocation();
}
- SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc();
-
- if (BaseType->isDependentType()) {
- // Make sure that we don't have circular inheritance among our dependent
- // bases. For non-dependent bases, the check for completeness below handles
- // this.
- if (CXXRecordDecl *BaseDecl = BaseType->getAsCXXRecordDecl()) {
- if (BaseDecl->getCanonicalDecl() == Class->getCanonicalDecl() ||
- ((BaseDecl = BaseDecl->getDefinition()) &&
- findCircularInheritance(Class, BaseDecl))) {
- Diag(BaseLoc, diag::err_circular_inheritance)
- << BaseType << Context.getTypeDeclType(Class);
-
- if (BaseDecl->getCanonicalDecl() != Class->getCanonicalDecl())
- Diag(BaseDecl->getLocation(), diag::note_previous_decl)
- << BaseType;
+ auto *BaseDecl =
+ dyn_cast_if_present<CXXRecordDecl>(computeDeclContext(BaseType));
+ // C++ [class.derived.general]p2:
+ // A class-or-decltype shall denote a (possibly cv-qualified) class type
+ // that is not an incompletely defined class; any cv-qualifiers are
+ // ignored.
+ if (BaseDecl) {
+ // C++ [class.union.general]p4:
+ // [...] A union shall not be used as a base class.
+ if (BaseDecl->isUnion()) {
+ Diag(BaseLoc, diag::err_union_as_base_class) << SpecifierRange;
+ return nullptr;
+ }
- return nullptr;
+ // For the MS ABI, propagate DLL attributes to base class templates.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ Context.getTargetInfo().getTriple().isPS()) {
+ if (Attr *ClassAttr = getDLLAttr(Class)) {
+ if (auto *BaseSpec =
+ dyn_cast<ClassTemplateSpecializationDecl>(BaseDecl)) {
+ propagateDLLAttrToBaseClassTemplate(Class, ClassAttr, BaseSpec,
+ BaseLoc);
+ }
}
}
+ if (RequireCompleteType(BaseLoc, BaseType, diag::err_incomplete_base_class,
+ SpecifierRange)) {
+ Class->setInvalidDecl();
+ return nullptr;
+ }
+
+ BaseDecl = BaseDecl->getDefinition();
+ assert(BaseDecl && "Base type is not incomplete, but has no definition");
+
+ // Microsoft docs say:
+ // "If a base-class has a code_seg attribute, derived classes must have the
+ // same attribute."
+ const auto *BaseCSA = BaseDecl->getAttr<CodeSegAttr>();
+ const auto *DerivedCSA = Class->getAttr<CodeSegAttr>();
+ if ((DerivedCSA || BaseCSA) &&
+ (!BaseCSA || !DerivedCSA ||
+ BaseCSA->getName() != DerivedCSA->getName())) {
+ Diag(Class->getLocation(), diag::err_mismatched_code_seg_base);
+ Diag(BaseDecl->getLocation(), diag::note_base_class_specified_here)
+ << BaseDecl;
+ return nullptr;
+ }
+
+ // A class which contains a flexible array member is not suitable for use as
+ // a base class:
+ // - If the layout determines that a base comes before another base,
+ // the flexible array member would index into the subsequent base.
+ // - If the layout determines that base comes before the derived class,
+ // the flexible array member would index into the derived class.
+ if (BaseDecl->hasFlexibleArrayMember()) {
+ Diag(BaseLoc, diag::err_base_class_has_flexible_array_member)
+ << BaseDecl->getDeclName();
+ return nullptr;
+ }
+
+ // C++ [class]p3:
+ // If a class is marked final and it appears as a base-type-specifier in
+ // base-clause, the program is ill-formed.
+ if (FinalAttr *FA = BaseDecl->getAttr<FinalAttr>()) {
+ Diag(BaseLoc, diag::err_class_marked_final_used_as_base)
+ << BaseDecl->getDeclName() << FA->isSpelledAsSealed();
+ Diag(BaseDecl->getLocation(), diag::note_entity_declared_at)
+ << BaseDecl->getDeclName() << FA->getRange();
+ return nullptr;
+ }
+
+ // If the base class is invalid the derived class is as well.
+ if (BaseDecl->isInvalidDecl())
+ Class->setInvalidDecl();
+ } else if (BaseType->isDependentType()) {
// Make sure that we don't make an ill-formed AST where the type of the
// Class is non-dependent and its attached base class specifier is an
// dependent type, which violates invariants in many clang code paths (e.g.
// constexpr evaluator). If this case happens (in errory-recovery mode), we
// explicitly mark the Class decl invalid. The diagnostic was already
// emitted.
- if (!Class->getTypeForDecl()->isDependentType())
+ if (!Class->isDependentContext())
Class->setInvalidDecl();
- return new (Context) CXXBaseSpecifier(
- SpecifierRange, Virtual, Class->getTagKind() == TagTypeKind::Class,
- Access, TInfo, EllipsisLoc);
- }
-
- // Base specifiers must be record types.
- if (!BaseType->isRecordType()) {
+ } else {
+ // The base class is some non-dependent non-class type.
Diag(BaseLoc, diag::err_base_must_be_class) << SpecifierRange;
return nullptr;
}
- // C++ [class.union]p1:
- // A union shall not be used as a base class.
- if (BaseType->isUnionType()) {
- Diag(BaseLoc, diag::err_union_as_base_class) << SpecifierRange;
- return nullptr;
- }
-
- // For the MS ABI, propagate DLL attributes to base class templates.
- if (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
- Context.getTargetInfo().getTriple().isPS()) {
- if (Attr *ClassAttr = getDLLAttr(Class)) {
- if (auto *BaseTemplate = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
- BaseType->getAsCXXRecordDecl())) {
- propagateDLLAttrToBaseClassTemplate(Class, ClassAttr, BaseTemplate,
- BaseLoc);
- }
- }
- }
-
- // C++ [class.derived]p2:
- // The class-name in a base-specifier shall not be an incompletely
- // defined class.
- if (RequireCompleteType(BaseLoc, BaseType,
- diag::err_incomplete_base_class, SpecifierRange)) {
- Class->setInvalidDecl();
- return nullptr;
- }
-
- // If the base class is polymorphic or isn't empty, the new one is/isn't, too.
- RecordDecl *BaseDecl = BaseType->castAs<RecordType>()->getDecl();
- assert(BaseDecl && "Record type has no declaration");
- BaseDecl = BaseDecl->getDefinition();
- assert(BaseDecl && "Base type is not incomplete, but has no definition");
- CXXRecordDecl *CXXBaseDecl = cast<CXXRecordDecl>(BaseDecl);
- assert(CXXBaseDecl && "Base type is not a C++ type");
-
- // Microsoft docs say:
- // "If a base-class has a code_seg attribute, derived classes must have the
- // same attribute."
- const auto *BaseCSA = CXXBaseDecl->getAttr<CodeSegAttr>();
- const auto *DerivedCSA = Class->getAttr<CodeSegAttr>();
- if ((DerivedCSA || BaseCSA) &&
- (!BaseCSA || !DerivedCSA || BaseCSA->getName() != DerivedCSA->getName())) {
- Diag(Class->getLocation(), diag::err_mismatched_code_seg_base);
- Diag(CXXBaseDecl->getLocation(), diag::note_base_class_specified_here)
- << CXXBaseDecl;
- return nullptr;
- }
-
- // A class which contains a flexible array member is not suitable for use as a
- // base class:
- // - If the layout determines that a base comes before another base,
- // the flexible array member would index into the subsequent base.
- // - If the layout determines that base comes before the derived class,
- // the flexible array member would index into the derived class.
- if (CXXBaseDecl->hasFlexibleArrayMember()) {
- Diag(BaseLoc, diag::err_base_class_has_flexible_array_member)
- << CXXBaseDecl->getDeclName();
- return nullptr;
- }
-
- // C++ [class]p3:
- // If a class is marked final and it appears as a base-type-specifier in
- // base-clause, the program is ill-formed.
- if (FinalAttr *FA = CXXBaseDecl->getAttr<FinalAttr>()) {
- Diag(BaseLoc, diag::err_class_marked_final_used_as_base)
- << CXXBaseDecl->getDeclName()
- << FA->isSpelledAsSealed();
- Diag(CXXBaseDecl->getLocation(), diag::note_entity_declared_at)
- << CXXBaseDecl->getDeclName() << FA->getRange();
- return nullptr;
- }
-
- if (BaseDecl->isInvalidDecl())
- Class->setInvalidDecl();
+ // In HLSL, unspecified class access is public rather than private.
+ if (getLangOpts().HLSL && Class->getTagKind() == TagTypeKind::Class &&
+ Access == AS_none)
+ Access = AS_public;
// Create the base specifier.
return new (Context) CXXBaseSpecifier(
@@ -2887,13 +2821,20 @@ BaseResult Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
UPPC_BaseType))
return true;
+ // C++ [class.union.general]p4:
+ // [...] A union shall not have base classes.
+ if (Class->isUnion()) {
+ Diag(Class->getLocation(), diag::err_base_clause_on_union)
+ << SpecifierRange;
+ return true;
+ }
+
if (CXXBaseSpecifier *BaseSpec = CheckBaseSpecifier(Class, SpecifierRange,
Virtual, Access, TInfo,
EllipsisLoc))
return BaseSpec;
- else
- Class->setInvalidDecl();
+ Class->setInvalidDecl();
return true;
}
@@ -17639,11 +17580,12 @@ DeclResult Sema::ActOnTemplatedFriendTag(
if (Invalid)
return true;
- return CheckClassTemplate(S, TagSpec, TUK_Friend, TagLoc, SS, Name,
- NameLoc, Attr, TemplateParams, AS_public,
+ return CheckClassTemplate(S, TagSpec, TagUseKind::Friend, TagLoc, SS,
+ Name, NameLoc, Attr, TemplateParams, AS_public,
/*ModulePrivateLoc=*/SourceLocation(),
FriendLoc, TempParamLists.size() - 1,
- TempParamLists.data()).get();
+ TempParamLists.data())
+ .get();
} else {
// The "template<>" header is extraneous.
Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams)
@@ -17671,8 +17613,8 @@ DeclResult Sema::ActOnTemplatedFriendTag(
if (SS.isEmpty()) {
bool Owned = false;
bool IsDependent = false;
- return ActOnTag(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc, Attr,
- AS_public,
+ return ActOnTag(S, TagSpec, TagUseKind::Friend, TagLoc, SS, Name, NameLoc,
+ Attr, AS_public,
/*ModulePrivateLoc=*/SourceLocation(),
MultiTemplateParamsArg(), Owned, IsDependent,
/*ScopedEnumKWLoc=*/SourceLocation(),
@@ -17787,7 +17729,7 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
// Try to convert the decl specifier to a type. This works for
// friend templates because ActOnTag never produces a ClassTemplateDecl
- // for a TUK_Friend.
+ // for a TagUseKind::Friend.
Declarator TheDeclarator(DS, ParsedAttributesView::none(),
DeclaratorContext::Member);
TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator);
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 5ecfdee21f09..ded4f59833ac 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -54,6 +54,7 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPseudoObject.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/STLForwardCompat.h"
@@ -2718,34 +2719,6 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
return ExprError();
}
- // C++ [temp.dep.expr]p3:
- // An id-expression is type-dependent if it contains:
- // -- an identifier that was declared with a dependent type,
- // (note: handled after lookup)
- // -- a template-id that is dependent,
- // (note: handled in BuildTemplateIdExpr)
- // -- a conversion-function-id that specifies a dependent type,
- // -- a nested-name-specifier that contains a class-name that
- // names a dependent type.
- // Determine whether this is a member of an unknown specialization;
- // we need to handle these differently.
- bool DependentID = false;
- if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName &&
- Name.getCXXNameType()->isDependentType()) {
- DependentID = true;
- } else if (SS.isSet()) {
- if (DeclContext *DC = computeDeclContext(SS, false)) {
- if (RequireCompleteDeclContext(SS, DC))
- return ExprError();
- } else {
- DependentID = true;
- }
- }
-
- if (DependentID)
- return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
- IsAddressOfOperand, TemplateArgs);
-
// BoundsSafety: This specially handles arguments of bounds attributes
// appertains to a type of C struct field such that the name lookup
// within a struct finds the member name, which is not the case for other
@@ -2781,7 +2754,7 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
&AssumedTemplate))
return ExprError();
- if (R.wasNotFoundInCurrentInstantiation())
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
IsAddressOfOperand, TemplateArgs);
} else {
@@ -2791,7 +2764,7 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// If the result might be in a dependent base class, this is a dependent
// id-expression.
- if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
IsAddressOfOperand, TemplateArgs);
@@ -2946,26 +2919,14 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
/// this path.
ExprResult Sema::BuildQualifiedDeclarationNameExpr(
CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
- bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI) {
- if (NameInfo.getName().isDependentName())
- return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
- NameInfo, /*TemplateArgs=*/nullptr);
-
- DeclContext *DC = computeDeclContext(SS, false);
- if (!DC)
- return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
- NameInfo, /*TemplateArgs=*/nullptr);
-
- if (RequireCompleteDeclContext(SS, DC))
- return ExprError();
-
+ bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI) {
LookupResult R(*this, NameInfo, LookupOrdinaryName);
- LookupQualifiedName(R, DC);
+ LookupParsedName(R, /*S=*/nullptr, &SS, /*ObjectType=*/QualType());
if (R.isAmbiguous())
return ExprError();
- if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
NameInfo, /*TemplateArgs=*/nullptr);
@@ -2974,6 +2935,7 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
// diagnostic during template instantiation is likely bogus, e.g. if a class
// is invalid because it's derived from an invalid base class, then missing
// members were likely supposed to be inherited.
+ DeclContext *DC = computeDeclContext(SS);
if (const auto *CD = dyn_cast<CXXRecordDecl>(DC))
if (CD->isInvalidDecl())
return ExprError();
@@ -3017,16 +2979,14 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
return ExprEmpty();
}
- // Defend against this resolving to an implicit member access. We usually
- // won't get here if this might be a legitimate a class member (we end up in
- // BuildMemberReferenceExpr instead), but this can be valid if we're forming
- // a pointer-to-member or in an unevaluated context in C++11.
- if (!R.empty() && (*R.begin())->isCXXClassMember() && !IsAddressOfOperand)
+ // If necessary, build an implicit class member access.
+ if (isPotentialImplicitMemberAccess(SS, R, IsAddressOfOperand))
return BuildPossibleImplicitMemberExpr(SS,
/*TemplateKWLoc=*/SourceLocation(),
- R, /*TemplateArgs=*/nullptr, S);
+ R, /*TemplateArgs=*/nullptr,
+ /*S=*/nullptr);
- return BuildDeclarationNameExpr(SS, R, /* ADL */ false);
+ return BuildDeclarationNameExpr(SS, R, /*ADL=*/false);
}
/// Cast a base object to a member's actual type.
@@ -3190,7 +3150,7 @@ bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
return false;
// Never if a scope specifier was provided.
- if (SS.isSet())
+ if (SS.isNotEmpty())
return false;
// Only in C++ or ObjC++.
@@ -3718,9 +3678,7 @@ static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal,
APFloat::getSmallest(Format).toString(buffer);
}
- S.Diag(Loc, diagnostic)
- << Ty
- << StringRef(buffer.data(), buffer.size());
+ S.Diag(Loc, diagnostic) << Ty << buffer.str();
}
bool isExact = (result == APFloat::opOK);
@@ -5227,7 +5185,7 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
}
// Perform default conversions.
- if (!LHSExp->getType()->getAs<VectorType>()) {
+ if (!LHSExp->getType()->isSubscriptableVectorType()) {
ExprResult Result = DefaultFunctionArrayLvalueConversion(LHSExp);
if (Result.isInvalid())
return ExprError();
@@ -5283,36 +5241,22 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
<< ResultType << BaseExpr->getSourceRange();
return ExprError();
}
- } else if (const VectorType *VTy = LHSTy->getAs<VectorType>()) {
- BaseExpr = LHSExp; // vectors: V[123]
- IndexExpr = RHSExp;
- // We apply C++ DR1213 to vector subscripting too.
- if (getLangOpts().CPlusPlus11 && LHSExp->isPRValue()) {
- ExprResult Materialized = TemporaryMaterializationConversion(LHSExp);
- if (Materialized.isInvalid())
- return ExprError();
- LHSExp = Materialized.get();
+ } else if (LHSTy->isSubscriptableVectorType()) {
+ if (LHSTy->isBuiltinType() &&
+ LHSTy->getAs<BuiltinType>()->isSveVLSBuiltinType()) {
+ const BuiltinType *BTy = LHSTy->getAs<BuiltinType>();
+ if (BTy->isSVEBool())
+ return ExprError(Diag(LLoc, diag::err_subscript_svbool_t)
+ << LHSExp->getSourceRange()
+ << RHSExp->getSourceRange());
+ ResultType = BTy->getSveEltType(Context);
+ } else {
+ const VectorType *VTy = LHSTy->getAs<VectorType>();
+ ResultType = VTy->getElementType();
}
- VK = LHSExp->getValueKind();
- if (VK != VK_PRValue)
- OK = OK_VectorComponent;
-
- ResultType = VTy->getElementType();
- QualType BaseType = BaseExpr->getType();
- Qualifiers BaseQuals = BaseType.getQualifiers();
- Qualifiers MemberQuals = ResultType.getQualifiers();
- Qualifiers Combined = BaseQuals + MemberQuals;
- if (Combined != MemberQuals)
- ResultType = Context.getQualifiedType(ResultType, Combined);
- } else if (LHSTy->isBuiltinType() &&
- LHSTy->getAs<BuiltinType>()->isSveVLSBuiltinType()) {
- const BuiltinType *BTy = LHSTy->getAs<BuiltinType>();
- if (BTy->isSVEBool())
- return ExprError(Diag(LLoc, diag::err_subscript_svbool_t)
- << LHSExp->getSourceRange() << RHSExp->getSourceRange());
-
- BaseExpr = LHSExp;
+ BaseExpr = LHSExp; // vectors: V[123]
IndexExpr = RHSExp;
+ // We apply C++ DR1213 to vector subscripting too.
if (getLangOpts().CPlusPlus11 && LHSExp->isPRValue()) {
ExprResult Materialized = TemporaryMaterializationConversion(LHSExp);
if (Materialized.isInvalid())
@@ -5323,8 +5267,6 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
if (VK != VK_PRValue)
OK = OK_VectorComponent;
- ResultType = BTy->getSveEltType(Context);
-
QualType BaseType = BaseExpr->getType();
Qualifiers BaseQuals = BaseType.getQualifiers();
Qualifiers MemberQuals = ResultType.getQualifiers();
@@ -5564,6 +5506,15 @@ struct EnsureImmediateInvocationInDefaultArgs
// cause it to incorrectly point it to the outermost class
// in the case of nested struct initialization.
ExprResult TransformCXXThisExpr(CXXThisExpr *E) { return E; }
+
+ // Rewrite to source location to refer to the context in which they are used.
+ ExprResult TransformSourceLocExpr(SourceLocExpr *E) {
+ if (E->getParentContext() == SemaRef.CurContext)
+ return E;
+ return getDerived().RebuildSourceLocExpr(E->getIdentKind(), E->getType(),
+ E->getBeginLoc(), E->getEndLoc(),
+ SemaRef.CurContext);
+ }
};
ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
@@ -5621,10 +5572,9 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
Res = Immediate.TransformInitializer(Param->getInit(),
/*NotCopy=*/false);
});
- if (Res.isInvalid())
- return ExprError();
- Res = ConvertParamDefaultArgument(Param, Res.get(),
- Res.get()->getBeginLoc());
+ if (Res.isUsable())
+ Res = ConvertParamDefaultArgument(Param, Res.get(),
+ Res.get()->getBeginLoc());
if (Res.isInvalid())
return ExprError();
Init = Res.get();
@@ -5658,9 +5608,10 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
InitializationContext.emplace(Loc, Field, CurContext);
Expr *Init = nullptr;
+ bool HasRewrittenInit = false;
bool NestedDefaultChecking = isCheckingDefaultArgumentOrInitializer();
-
+ bool InLifetimeExtendingContext = isInLifetimeExtendingContext();
EnterExpressionEvaluationContext EvalContext(
*this, ExpressionEvaluationContext::PotentiallyEvaluated, Field);
@@ -5695,19 +5646,36 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
ImmediateCallVisitor V(getASTContext());
if (!NestedDefaultChecking)
V.TraverseDecl(Field);
- if (V.HasImmediateCalls) {
+
+ // CWG1815
+ // Support lifetime extension of temporary created by aggregate
+ // initialization using a default member initializer. We should always rebuild
+ // the initializer if it contains any temporaries (if the initializer
+ // expression is an ExprWithCleanups). Then make sure the normal lifetime
+ // extension code recurses into the default initializer and does lifetime
+ // extension when warranted.
+ bool ContainsAnyTemporaries =
+ isa_and_present<ExprWithCleanups>(Field->getInClassInitializer());
+ if (V.HasImmediateCalls || InLifetimeExtendingContext ||
+ ContainsAnyTemporaries) {
+ HasRewrittenInit = true;
ExprEvalContexts.back().DelayedDefaultInitializationContext = {Loc, Field,
CurContext};
ExprEvalContexts.back().IsCurrentlyCheckingDefaultArgumentOrInitializer =
NestedDefaultChecking;
-
+ // Pass down lifetime extending flag, and collect temporaries in
+ // CreateMaterializeTemporaryExpr when we rewrite the call argument.
+ keepInLifetimeExtendingContext();
EnsureImmediateInvocationInDefaultArgs Immediate(*this);
ExprResult Res;
+
+ // Rebuild CXXDefaultInitExpr might cause diagnostics.
+ SFINAETrap Trap(*this);
runWithSufficientStackSpace(Loc, [&] {
Res = Immediate.TransformInitializer(Field->getInClassInitializer(),
/*CXXDirectInit=*/false);
});
- if (!Res.isInvalid())
+ if (Res.isUsable())
Res = ConvertMemberDefaultInitExpression(Field, Res.get(), Loc);
if (Res.isInvalid()) {
Field->setInvalidDecl();
@@ -5734,7 +5702,7 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
return CXXDefaultInitExpr::Create(Context, InitializationContext->Loc,
Field, InitializationContext->Context,
- Init);
+ HasRewrittenInit ? Init : nullptr);
}
// DR1351:
@@ -7585,27 +7553,6 @@ bool Sema::isValidSveBitcast(QualType srcTy, QualType destTy) {
ValidScalableConversion(destTy, srcTy);
}
-/// Are the two types RVV-bitcast-compatible types? I.e. is bitcasting from the
-/// first RVV type (e.g. an RVV scalable type) to the second type (e.g. an RVV
-/// VLS type) allowed?
-///
-/// This will also return false if the two given types do not make sense from
-/// the perspective of RVV bitcasts.
-bool Sema::isValidRVVBitcast(QualType srcTy, QualType destTy) {
- assert(srcTy->isVectorType() || destTy->isVectorType());
-
- auto ValidScalableConversion = [](QualType FirstType, QualType SecondType) {
- if (!FirstType->isRVVSizelessBuiltinType())
- return false;
-
- const auto *VecTy = SecondType->getAs<VectorType>();
- return VecTy && VecTy->getVectorKind() == VectorKind::RVVFixedLengthData;
- };
-
- return ValidScalableConversion(srcTy, destTy) ||
- ValidScalableConversion(destTy, srcTy);
-}
-
/// Are the two types matrix types and do they have the same dimensions i.e.
/// do they have the same number of rows and the same number of columns?
bool Sema::areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy) {
@@ -15282,7 +15229,7 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
LHSExpr = LHS.get();
RHSExpr = RHS.get();
- // We want to end up calling one of checkPseudoObjectAssignment
+ // We want to end up calling one of SemaPseudoObject::checkAssignment
// (if the LHS is a pseudo-object), BuildOverloadedBinOp (if
// both expressions are overloadable or either is type-dependent),
// or CreateBuiltinBinOp (in any other case). We also want to get
@@ -15293,7 +15240,7 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
// Assignments with a pseudo-object l-value need special analysis.
if (pty->getKind() == BuiltinType::PseudoObject &&
BinaryOperator::isAssignmentOp(Opc))
- return checkPseudoObjectAssignment(S, OpLoc, Opc, LHSExpr, RHSExpr);
+ return PseudoObject().checkAssignment(S, OpLoc, Opc, LHSExpr, RHSExpr);
// Don't resolve overloads if the other type is overloadable.
if (getLangOpts().CPlusPlus && pty->getKind() == BuiltinType::Overload) {
@@ -15716,7 +15663,7 @@ ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
// Increment and decrement of pseudo-object references.
if (pty->getKind() == BuiltinType::PseudoObject &&
UnaryOperator::isIncrementDecrementOp(Opc))
- return checkPseudoObjectIncDec(S, OpLoc, Opc, Input);
+ return PseudoObject().checkIncDec(S, OpLoc, Opc, Input);
// extension is always a builtin operator.
if (Opc == UO_Extension)
@@ -17285,8 +17232,7 @@ ExprResult Sema::TransformToPotentiallyEvaluated(Expr *E) {
TypeSourceInfo *Sema::TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo) {
assert(isUnevaluatedContext() &&
"Should only transform unevaluated expressions");
- ExprEvalContexts.back().Context =
- ExprEvalContexts[ExprEvalContexts.size() - 2].Context;
+ ExprEvalContexts.back().Context = parentEvaluationContext().Context;
if (isUnevaluatedContext())
return TInfo;
return TransformToPE(*this).TransformType(TInfo);
@@ -17303,14 +17249,13 @@ Sema::PushExpressionEvaluationContext(
// discarded statements or immediate context are themselves
// a discarded statement or an immediate context, respectively.
ExprEvalContexts.back().InDiscardedStatement =
- ExprEvalContexts[ExprEvalContexts.size() - 2]
- .isDiscardedStatementContext();
+ parentEvaluationContext().isDiscardedStatementContext();
// C++23 [expr.const]/p15
// An expression or conversion is in an immediate function context if [...]
// it is a subexpression of a manifestly constant-evaluated expression or
// conversion.
- const auto &Prev = ExprEvalContexts[ExprEvalContexts.size() - 2];
+ const auto &Prev = parentEvaluationContext();
ExprEvalContexts.back().InImmediateFunctionContext =
Prev.isImmediateFunctionContext() || Prev.isConstantEvaluated();
@@ -17755,7 +17700,7 @@ void Sema::PopExpressionEvaluationContext() {
// Append the collected materialized temporaries into previous context before
// exit if the previous also is a lifetime extending context.
- auto &PrevRecord = ExprEvalContexts[ExprEvalContexts.size() - 2];
+ auto &PrevRecord = parentEvaluationContext();
if (getLangOpts().CPlusPlus23 && Rec.InLifetimeExtendingContext &&
PrevRecord.InLifetimeExtendingContext &&
!Rec.ForRangeLifetimeExtendTemps.empty()) {
@@ -20933,7 +20878,7 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
// Pseudo-objects.
case BuiltinType::PseudoObject:
- return checkPseudoObjectRValue(E);
+ return PseudoObject().checkRValue(E);
case BuiltinType::BuiltinFn: {
// Accept __noop without parens by implicitly converting it to a call expr.
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index e4601f7d6c47..d3e9dcb4f439 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -1554,9 +1554,6 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
bool ListInitialization) {
QualType Ty = TInfo->getType();
SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
-
- assert((!ListInitialization || Exprs.size() == 1) &&
- "List initialization must have exactly one expression.");
SourceRange FullRange = SourceRange(TyBeginLoc, RParenOrBraceLoc);
InitializedEntity Entity =
@@ -5217,10 +5214,18 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
case UTT_IsFloatingPoint:
return T->isFloatingType();
case UTT_IsArray:
+ // Zero-sized arrays aren't considered arrays in partial specializations,
+ // so __is_array shouldn't consider them arrays either.
+ if (const auto *CAT = C.getAsConstantArrayType(T))
+ return CAT->getSize() != 0;
return T->isArrayType();
case UTT_IsBoundedArray:
if (DiagnoseVLAInCXXTypeTrait(Self, TInfo, tok::kw___is_bounded_array))
return false;
+ // Zero-sized arrays aren't considered arrays in partial specializations,
+ // so __is_bounded_array shouldn't consider them arrays either.
+ if (const auto *CAT = C.getAsConstantArrayType(T))
+ return CAT->getSize() != 0;
return T->isArrayType() && !T->isIncompleteArrayType();
case UTT_IsUnboundedArray:
if (DiagnoseVLAInCXXTypeTrait(Self, TInfo, tok::kw___is_unbounded_array))
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index 353e911c5cc3..79bdc8e9f878 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -8066,11 +8066,6 @@ static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
enum PathLifetimeKind {
/// Lifetime-extend along this path.
Extend,
- /// We should lifetime-extend, but we don't because (due to technical
- /// limitations) we can't. This happens for default member initializers,
- /// which we don't clone for every use, so we don't have a unique
- /// MaterializeTemporaryExpr to update.
- ShouldExtend,
/// Do not lifetime extend along this path.
NoExtend
};
@@ -8082,7 +8077,7 @@ shouldLifetimeExtendThroughPath(const IndirectLocalPath &Path) {
PathLifetimeKind Kind = PathLifetimeKind::Extend;
for (auto Elem : Path) {
if (Elem.Kind == IndirectLocalPathEntry::DefaultInit)
- Kind = PathLifetimeKind::ShouldExtend;
+ Kind = PathLifetimeKind::Extend;
else if (Elem.Kind != IndirectLocalPathEntry::LambdaCaptureInit)
return PathLifetimeKind::NoExtend;
}
@@ -8202,18 +8197,6 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
ExtendingEntity->allocateManglingNumber());
// Also visit the temporaries lifetime-extended by this initializer.
return true;
-
- case PathLifetimeKind::ShouldExtend:
- // We're supposed to lifetime-extend the temporary along this path (per
- // the resolution of DR1815), but we don't support that yet.
- //
- // FIXME: Properly handle this situation. Perhaps the easiest approach
- // would be to clone the initializer expression on each use that would
- // lifetime extend its temporaries.
- Diag(DiagLoc, diag::warn_unsupported_lifetime_extension)
- << RK << DiagRange;
- break;
-
case PathLifetimeKind::NoExtend:
// If the path goes through the initialization of a variable or field,
// it can't possibly reach a temporary created in this full-expression.
diff --git a/clang/lib/Sema/SemaLambda.cpp b/clang/lib/Sema/SemaLambda.cpp
index 1743afaf1528..276a43ad79b9 100644
--- a/clang/lib/Sema/SemaLambda.cpp
+++ b/clang/lib/Sema/SemaLambda.cpp
@@ -12,6 +12,7 @@
#include "clang/Sema/SemaLambda.h"
#include "TypeLocBuilder.h"
#include "clang/AST/ASTLambda.h"
+#include "clang/AST/CXXInheritance.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
@@ -386,30 +387,69 @@ buildTypeForLambdaCallOperator(Sema &S, clang::CXXRecordDecl *Class,
// parameter, if any, of the lambda's function call operator (possibly
// instantiated from a function call operator template) shall be either:
// - the closure type,
-// - class type derived from the closure type, or
+// - class type publicly and unambiguously derived from the closure type, or
// - a reference to a possibly cv-qualified such type.
-void Sema::DiagnoseInvalidExplicitObjectParameterInLambda(
- CXXMethodDecl *Method) {
+bool Sema::DiagnoseInvalidExplicitObjectParameterInLambda(
+ CXXMethodDecl *Method, SourceLocation CallLoc) {
if (!isLambdaCallWithExplicitObjectParameter(Method))
- return;
+ return false;
CXXRecordDecl *RD = Method->getParent();
if (Method->getType()->isDependentType())
- return;
+ return false;
if (RD->isCapturelessLambda())
- return;
- QualType ExplicitObjectParameterType = Method->getParamDecl(0)
- ->getType()
+ return false;
+
+ ParmVarDecl *Param = Method->getParamDecl(0);
+ QualType ExplicitObjectParameterType = Param->getType()
.getNonReferenceType()
.getUnqualifiedType()
.getDesugaredType(getASTContext());
QualType LambdaType = getASTContext().getRecordType(RD);
if (LambdaType == ExplicitObjectParameterType)
- return;
- if (IsDerivedFrom(RD->getLocation(), ExplicitObjectParameterType, LambdaType))
- return;
- Diag(Method->getParamDecl(0)->getLocation(),
- diag::err_invalid_explicit_object_type_in_lambda)
- << ExplicitObjectParameterType;
+ return false;
+
+ // Don't check the same instantiation twice.
+ //
+ // If this call operator is ill-formed, there is no point in issuing
+ // a diagnostic every time it is called because the problem is in the
+ // definition of the derived type, not at the call site.
+ //
+ // FIXME: Move this check to where we instantiate the method? This should
+ // be possible, but the naive approach of just marking the method as invalid
+ // leads to us emitting more diagnostics than we should have to for this case
+ // (1 error here *and* 1 error about there being no matching overload at the
+ // call site). It might be possible to avoid that by also checking if there
+ // is an empty cast path for the method stored in the context (signalling that
+ // we've already diagnosed it) and then just not building the call, but that
+ // doesn't really seem any simpler than diagnosing it at the call site...
+ if (auto It = Context.LambdaCastPaths.find(Method);
+ It != Context.LambdaCastPaths.end())
+ return It->second.empty();
+
+ CXXCastPath &Path = Context.LambdaCastPaths[Method];
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (!IsDerivedFrom(RD->getLocation(), ExplicitObjectParameterType, LambdaType,
+ Paths)) {
+ Diag(Param->getLocation(), diag::err_invalid_explicit_object_type_in_lambda)
+ << ExplicitObjectParameterType;
+ return true;
+ }
+
+ if (Paths.isAmbiguous(LambdaType->getCanonicalTypeUnqualified())) {
+ std::string PathsDisplay = getAmbiguousPathsDisplayString(Paths);
+ Diag(CallLoc, diag::err_explicit_object_lambda_ambiguous_base)
+ << LambdaType << PathsDisplay;
+ return true;
+ }
+
+ if (CheckBaseClassAccess(CallLoc, LambdaType, ExplicitObjectParameterType,
+ Paths.front(),
+ diag::err_explicit_object_lambda_inaccessible_base))
+ return true;
+
+ BuildBasePathArray(Paths, Path);
+ return false;
}
void Sema::handleLambdaNumbering(
diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp
index 0834db95d42a..ef0a655b631a 100644
--- a/clang/lib/Sema/SemaLookup.cpp
+++ b/clang/lib/Sema/SemaLookup.cpp
@@ -34,6 +34,7 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaRISCV.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/STLExtras.h"
@@ -945,13 +946,13 @@ bool Sema::LookupBuiltin(LookupResult &R) {
}
}
- if (DeclareRISCVVBuiltins || DeclareRISCVSiFiveVectorBuiltins) {
- if (!RVIntrinsicManager)
- RVIntrinsicManager = CreateRISCVIntrinsicManager(*this);
+ if (RISCV().DeclareRVVBuiltins || RISCV().DeclareSiFiveVectorBuiltins) {
+ if (!RISCV().IntrinsicManager)
+ RISCV().IntrinsicManager = CreateRISCVIntrinsicManager(*this);
- RVIntrinsicManager->InitIntrinsicList();
+ RISCV().IntrinsicManager->InitIntrinsicList();
- if (RVIntrinsicManager->CreateIntrinsicIfFound(R, II, PP))
+ if (RISCV().IntrinsicManager->CreateIntrinsicIfFound(R, II, PP))
return true;
}
@@ -2771,9 +2772,6 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
ObjectType->castAs<TagType>()->isBeingDefined()) &&
"Caller should have completed object type");
} else if (SS && SS->isNotEmpty()) {
- if (NestedNameSpecifier *NNS = SS->getScopeRep();
- NNS->getKind() == NestedNameSpecifier::Super)
- return LookupInSuper(R, NNS->getAsRecordDecl());
// This nested-name-specifier occurs after another nested-name-specifier,
// so long into the context associated with the prior nested-name-specifier.
if ((DC = computeDeclContext(*SS, EnteringContext))) {
@@ -2781,6 +2779,12 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
if (!DC->isDependentContext() && RequireCompleteDeclContext(*SS, DC))
return false;
R.setContextRange(SS->getRange());
+ // FIXME: '__super' lookup semantics could be implemented by a
+ // LookupResult::isSuperLookup flag which skips the initial search of
+ // the lookup context in LookupQualified.
+ if (NestedNameSpecifier *NNS = SS->getScopeRep();
+ NNS->getKind() == NestedNameSpecifier::Super)
+ return LookupInSuper(R, NNS->getAsRecordDecl());
}
IsDependent = !DC && isDependentScopeSpecifier(*SS);
} else {
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index f174b2fa63c6..09d91b31cfe5 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -233,6 +233,19 @@ bool doesClauseApplyToDirective(OpenACCDirectiveKind DirectiveKind,
return false;
}
+ case OpenACCClauseKind::Reduction:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Loop:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+
default:
// Do nothing so we can go to the 'unimplemented' diagnostic instead.
return true;
@@ -281,7 +294,6 @@ bool checkValidAfterDeviceType(
return true;
}
}
-
} // namespace
SemaOpenACC::SemaOpenACC(Sema &S) : SemaBase(S) {}
@@ -426,6 +438,22 @@ SemaOpenACC::ActOnClause(ArrayRef<const OpenACCClause *> ExistingClauses,
<< /*NoArgs=*/1 << Clause.getDirectiveKind() << MaxArgs
<< Clause.getIntExprs().size();
+ // OpenACC 3.3 Section 2.5.4:
+ // A reduction clause may not appear on a parallel construct with a
+ // num_gangs clause that has more than one argument.
+ if (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel &&
+ Clause.getIntExprs().size() > 1) {
+ auto *Parallel =
+ llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCReductionClause>);
+
+ if (Parallel != ExistingClauses.end()) {
+ Diag(Clause.getBeginLoc(), diag::err_acc_reduction_num_gangs_conflict)
+ << Clause.getIntExprs().size();
+ Diag((*Parallel)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
+ }
+ }
+
// Create the AST node for the clause even if the number of expressions is
// incorrect.
return OpenACCNumGangsClause::Create(
@@ -706,6 +734,46 @@ SemaOpenACC::ActOnClause(ArrayRef<const OpenACCClause *> ExistingClauses,
Clause.getLParenLoc(), Clause.getDeviceTypeArchitectures(),
Clause.getEndLoc());
}
+ case OpenACCClauseKind::Reduction: {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ break;
+
+ // OpenACC 3.3 Section 2.5.4:
+ // A reduction clause may not appear on a parallel construct with a
+ // num_gangs clause that has more than one argument.
+ if (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel) {
+ auto NumGangsClauses = llvm::make_filter_range(
+ ExistingClauses, llvm::IsaPred<OpenACCNumGangsClause>);
+
+ for (auto *NGC : NumGangsClauses) {
+ unsigned NumExprs =
+ cast<OpenACCNumGangsClause>(NGC)->getIntExprs().size();
+
+ if (NumExprs > 1) {
+ Diag(Clause.getBeginLoc(), diag::err_acc_reduction_num_gangs_conflict)
+ << NumExprs;
+ Diag(NGC->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
+ }
+ }
+ }
+
+ SmallVector<Expr *> ValidVars;
+
+ for (Expr *Var : Clause.getVarList()) {
+ ExprResult Res = CheckReductionVar(Var);
+
+ if (Res.isUsable())
+ ValidVars.push_back(Res.get());
+ }
+
+ return OpenACCReductionClause::Create(
+ getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.getReductionOp(), ValidVars, Clause.getEndLoc());
+ }
default:
break;
}
@@ -715,6 +783,66 @@ SemaOpenACC::ActOnClause(ArrayRef<const OpenACCClause *> ExistingClauses,
return nullptr;
}
+/// OpenACC 3.3 section 2.5.15:
+/// At a mininmum, the supported data types include ... the numerical data types
+/// in C, C++, and Fortran.
+///
+/// If the reduction var is a composite variable, each
+/// member of the composite variable must be a supported datatype for the
+/// reduction operation.
+ExprResult SemaOpenACC::CheckReductionVar(Expr *VarExpr) {
+ VarExpr = VarExpr->IgnoreParenCasts();
+
+ auto TypeIsValid = [](QualType Ty) {
+ return Ty->isDependentType() || Ty->isScalarType();
+ };
+
+ if (isa<ArraySectionExpr>(VarExpr)) {
+ Expr *ASExpr = VarExpr;
+ QualType BaseTy = ArraySectionExpr::getBaseOriginalType(ASExpr);
+ QualType EltTy = getASTContext().getBaseElementType(BaseTy);
+
+ if (!TypeIsValid(EltTy)) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_type)
+ << EltTy << /*Sub array base type*/ 1;
+ return ExprError();
+ }
+ } else if (auto *RD = VarExpr->getType()->getAsRecordDecl()) {
+ if (!RD->isStruct() && !RD->isClass()) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
+ << /*not class or struct*/ 0 << VarExpr->getType();
+ return ExprError();
+ }
+
+ if (!RD->isCompleteDefinition()) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
+ << /*incomplete*/ 1 << VarExpr->getType();
+ return ExprError();
+ }
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+ CXXRD && !CXXRD->isAggregate()) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
+ << /*aggregate*/ 2 << VarExpr->getType();
+ return ExprError();
+ }
+
+ for (FieldDecl *FD : RD->fields()) {
+ if (!TypeIsValid(FD->getType())) {
+ Diag(VarExpr->getExprLoc(),
+ diag::err_acc_reduction_composite_member_type);
+ Diag(FD->getLocation(), diag::note_acc_reduction_composite_member_loc);
+ return ExprError();
+ }
+ }
+ } else if (!TypeIsValid(VarExpr->getType())) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_type)
+ << VarExpr->getType() << /*Sub array base type*/ 0;
+ return ExprError();
+ }
+
+ return VarExpr;
+}
+
void SemaOpenACC::ActOnConstruct(OpenACCDirectiveKind K,
SourceLocation StartLoc) {
switch (K) {
@@ -864,9 +992,7 @@ bool SemaOpenACC::CheckVarIsPointerType(OpenACCClauseKind ClauseKind,
return false;
}
-ExprResult SemaOpenACC::ActOnVar(Expr *VarExpr) {
- // We still need to retain the array subscript/subarray exprs, so work on a
- // copy.
+ExprResult SemaOpenACC::ActOnVar(OpenACCClauseKind CK, Expr *VarExpr) {
Expr *CurVarExpr = VarExpr->IgnoreParenImpCasts();
// Sub-arrays/subscript-exprs are fine as long as the base is a
@@ -882,14 +1008,19 @@ ExprResult SemaOpenACC::ActOnVar(Expr *VarExpr) {
// References to a VarDecl are fine.
if (const auto *DRE = dyn_cast<DeclRefExpr>(CurVarExpr)) {
if (isa<VarDecl, NonTypeTemplateParmDecl>(
- DRE->getDecl()->getCanonicalDecl()))
+ DRE->getFoundDecl()->getCanonicalDecl()))
return VarExpr;
}
+ // If CK is a Reduction, this special cases for OpenACC3.3 2.5.15: "A var in a
+ // reduction clause must be a scalar variable name, an aggregate variable
+ // name, an array element, or a subarray.
// A MemberExpr that references a Field is valid.
- if (const auto *ME = dyn_cast<MemberExpr>(CurVarExpr)) {
- if (isa<FieldDecl>(ME->getMemberDecl()->getCanonicalDecl()))
- return VarExpr;
+ if (CK != OpenACCClauseKind::Reduction) {
+ if (const auto *ME = dyn_cast<MemberExpr>(CurVarExpr)) {
+ if (isa<FieldDecl>(ME->getMemberDecl()->getCanonicalDecl()))
+ return VarExpr;
+ }
}
// Referring to 'this' is always OK.
@@ -898,7 +1029,9 @@ ExprResult SemaOpenACC::ActOnVar(Expr *VarExpr) {
// Nothing really we can do here, as these are dependent. So just return they
// are valid.
- if (isa<DependentScopeDeclRefExpr, CXXDependentScopeMemberExpr>(CurVarExpr))
+ if (isa<DependentScopeDeclRefExpr>(CurVarExpr) ||
+ (CK != OpenACCClauseKind::Reduction &&
+ isa<CXXDependentScopeMemberExpr>(CurVarExpr)))
return VarExpr;
// There isn't really anything we can do in the case of a recovery expr, so
@@ -906,7 +1039,8 @@ ExprResult SemaOpenACC::ActOnVar(Expr *VarExpr) {
if (isa<RecoveryExpr>(CurVarExpr))
return ExprError();
- Diag(VarExpr->getExprLoc(), diag::err_acc_not_a_var_ref);
+ Diag(VarExpr->getExprLoc(), diag::err_acc_not_a_var_ref)
+ << (CK != OpenACCClauseKind::Reduction);
return ExprError();
}
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 6110e5229b07..bab61e8fd54e 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -9815,6 +9815,25 @@ static Stmt *buildPreInits(ASTContext &Context,
return nullptr;
}
+/// Append the \p Item or the content of a CompoundStmt to the list \p
+/// TargetList.
+///
+/// A CompoundStmt is used as container in case multiple statements need to be
+/// stored in lieu of using an explicit list. Flattening is necessary because
+/// contained DeclStmts need to be visible after the execution of the list. Used
+/// for OpenMP pre-init declarations/statements.
+static void appendFlattendedStmtList(SmallVectorImpl<Stmt *> &TargetList,
+ Stmt *Item) {
+ // nullptr represents an empty list.
+ if (!Item)
+ return;
+
+ if (auto *CS = dyn_cast<CompoundStmt>(Item))
+ llvm::append_range(TargetList, CS->body());
+ else
+ TargetList.push_back(Item);
+}
+
/// Build preinits statement for the given declarations.
static Stmt *
buildPreInits(ASTContext &Context,
@@ -9828,6 +9847,17 @@ buildPreInits(ASTContext &Context,
return nullptr;
}
+/// Build pre-init statement for the given statements.
+static Stmt *buildPreInits(ASTContext &Context, ArrayRef<Stmt *> PreInits) {
+ if (PreInits.empty())
+ return nullptr;
+
+ SmallVector<Stmt *> Stmts;
+ for (Stmt *S : PreInits)
+ appendFlattendedStmtList(Stmts, S);
+ return CompoundStmt::Create(Context, PreInits, FPOptionsOverride(), {}, {});
+}
+
/// Build postupdate expression for the given list of postupdates expressions.
static Expr *buildPostUpdate(Sema &S, ArrayRef<Expr *> PostUpdates) {
Expr *PostUpdate = nullptr;
@@ -9924,11 +9954,21 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Stmt *DependentPreInits = Transform->getPreInits();
if (!DependentPreInits)
return;
- for (Decl *C : cast<DeclStmt>(DependentPreInits)->getDeclGroup()) {
- auto *D = cast<VarDecl>(C);
- DeclRefExpr *Ref = buildDeclRefExpr(SemaRef, D, D->getType(),
- Transform->getBeginLoc());
- Captures[Ref] = Ref;
+
+ // Search for pre-init declared variables that need to be captured
+ // to be referenceable inside the directive.
+ SmallVector<Stmt *> Constituents;
+ appendFlattendedStmtList(Constituents, DependentPreInits);
+ for (Stmt *S : Constituents) {
+ if (auto *DC = dyn_cast<DeclStmt>(S)) {
+ for (Decl *C : DC->decls()) {
+ auto *D = cast<VarDecl>(C);
+ DeclRefExpr *Ref = buildDeclRefExpr(
+ SemaRef, D, D->getType().getNonReferenceType(),
+ Transform->getBeginLoc());
+ Captures[Ref] = Ref;
+ }
+ }
}
}))
return 0;
@@ -15059,9 +15099,7 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeSimdDirective(
bool SemaOpenMP::checkTransformableLoopNest(
OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
- Stmt *&Body,
- SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
- &OriginalInits) {
+ Stmt *&Body, SmallVectorImpl<SmallVector<Stmt *, 0>> &OriginalInits) {
OriginalInits.emplace_back();
bool Result = OMPLoopBasedDirective::doForAllLoops(
AStmt->IgnoreContainers(), /*TryImperfectlyNestedLoops=*/false, NumLoops,
@@ -15095,16 +15133,70 @@ bool SemaOpenMP::checkTransformableLoopNest(
DependentPreInits = Dir->getPreInits();
else
llvm_unreachable("Unhandled loop transformation");
- if (!DependentPreInits)
- return;
- llvm::append_range(OriginalInits.back(),
- cast<DeclStmt>(DependentPreInits)->getDeclGroup());
+
+ appendFlattendedStmtList(OriginalInits.back(), DependentPreInits);
});
assert(OriginalInits.back().empty() && "No preinit after innermost loop");
OriginalInits.pop_back();
return Result;
}
+/// Add preinit statements that need to be propageted from the selected loop.
+static void addLoopPreInits(ASTContext &Context,
+ OMPLoopBasedDirective::HelperExprs &LoopHelper,
+ Stmt *LoopStmt, ArrayRef<Stmt *> OriginalInit,
+ SmallVectorImpl<Stmt *> &PreInits) {
+
+ // For range-based for-statements, ensure that their syntactic sugar is
+ // executed by adding them as pre-init statements.
+ if (auto *CXXRangeFor = dyn_cast<CXXForRangeStmt>(LoopStmt)) {
+ Stmt *RangeInit = CXXRangeFor->getInit();
+ if (RangeInit)
+ PreInits.push_back(RangeInit);
+
+ DeclStmt *RangeStmt = CXXRangeFor->getRangeStmt();
+ PreInits.push_back(new (Context) DeclStmt(RangeStmt->getDeclGroup(),
+ RangeStmt->getBeginLoc(),
+ RangeStmt->getEndLoc()));
+
+ DeclStmt *RangeEnd = CXXRangeFor->getEndStmt();
+ PreInits.push_back(new (Context) DeclStmt(RangeEnd->getDeclGroup(),
+ RangeEnd->getBeginLoc(),
+ RangeEnd->getEndLoc()));
+ }
+
+ llvm::append_range(PreInits, OriginalInit);
+
+ // List of OMPCapturedExprDecl, for __begin, __end, and NumIterations
+ if (auto *PI = cast_or_null<DeclStmt>(LoopHelper.PreInits)) {
+ PreInits.push_back(new (Context) DeclStmt(
+ PI->getDeclGroup(), PI->getBeginLoc(), PI->getEndLoc()));
+ }
+
+ // Gather declarations for the data members used as counters.
+ for (Expr *CounterRef : LoopHelper.Counters) {
+ auto *CounterDecl = cast<DeclRefExpr>(CounterRef)->getDecl();
+ if (isa<OMPCapturedExprDecl>(CounterDecl))
+ PreInits.push_back(new (Context) DeclStmt(
+ DeclGroupRef(CounterDecl), SourceLocation(), SourceLocation()));
+ }
+}
+
+/// Collect the loop statements (ForStmt or CXXRangeForStmt) of the affected
+/// loop of a construct.
+static void collectLoopStmts(Stmt *AStmt, MutableArrayRef<Stmt *> LoopStmts) {
+ size_t NumLoops = LoopStmts.size();
+ OMPLoopBasedDirective::doForAllLoops(
+ AStmt, /*TryImperfectlyNestedLoops=*/false, NumLoops,
+ [LoopStmts](unsigned Cnt, Stmt *CurStmt) {
+ assert(!LoopStmts[Cnt] && "Loop statement must not yet be assigned");
+ LoopStmts[Cnt] = CurStmt;
+ return false;
+ });
+ assert(!is_contained(LoopStmts, nullptr) &&
+ "Expecting a loop statement for each affected loop");
+}
+
StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
@@ -15126,8 +15218,7 @@ StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
// Verify and diagnose loop nest.
SmallVector<OMPLoopBasedDirective::HelperExprs, 4> LoopHelpers(NumLoops);
Stmt *Body = nullptr;
- SmallVector<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>, 4>
- OriginalInits;
+ SmallVector<SmallVector<Stmt *, 0>, 4> OriginalInits;
if (!checkTransformableLoopNest(OMPD_tile, AStmt, NumLoops, LoopHelpers, Body,
OriginalInits))
return StmtError();
@@ -15144,7 +15235,11 @@ StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
"Expecting loop iteration space dimensionality to match number of "
"affected loops");
- SmallVector<Decl *, 4> PreInits;
+ // Collect all affected loop statements.
+ SmallVector<Stmt *> LoopStmts(NumLoops, nullptr);
+ collectLoopStmts(AStmt, LoopStmts);
+
+ SmallVector<Stmt *, 4> PreInits;
CaptureVars CopyTransformer(SemaRef);
// Create iteration variables for the generated loops.
@@ -15184,20 +15279,9 @@ StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
&SemaRef.PP.getIdentifierTable().get(TileCntName));
TileIndVars[I] = TileCntDecl;
}
- for (auto &P : OriginalInits[I]) {
- if (auto *D = P.dyn_cast<Decl *>())
- PreInits.push_back(D);
- else if (auto *PI = dyn_cast_or_null<DeclStmt>(P.dyn_cast<Stmt *>()))
- PreInits.append(PI->decl_begin(), PI->decl_end());
- }
- if (auto *PI = cast_or_null<DeclStmt>(LoopHelper.PreInits))
- PreInits.append(PI->decl_begin(), PI->decl_end());
- // Gather declarations for the data members used as counters.
- for (Expr *CounterRef : LoopHelper.Counters) {
- auto *CounterDecl = cast<DeclRefExpr>(CounterRef)->getDecl();
- if (isa<OMPCapturedExprDecl>(CounterDecl))
- PreInits.push_back(CounterDecl);
- }
+
+ addLoopPreInits(Context, LoopHelper, LoopStmts[I], OriginalInits[I],
+ PreInits);
}
// Once the original iteration values are set, append the innermost body.
@@ -15246,19 +15330,20 @@ StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers[I];
Expr *NumIterations = LoopHelper.NumIterations;
auto *OrigCntVar = cast<DeclRefExpr>(LoopHelper.Counters[0]);
- QualType CntTy = OrigCntVar->getType();
+ QualType IVTy = NumIterations->getType();
+ Stmt *LoopStmt = LoopStmts[I];
// Commonly used variables. One of the constraints of an AST is that every
// node object must appear at most once, hence we define lamdas that create
// a new AST node at every use.
- auto MakeTileIVRef = [&SemaRef = this->SemaRef, &TileIndVars, I, CntTy,
+ auto MakeTileIVRef = [&SemaRef = this->SemaRef, &TileIndVars, I, IVTy,
OrigCntVar]() {
- return buildDeclRefExpr(SemaRef, TileIndVars[I], CntTy,
+ return buildDeclRefExpr(SemaRef, TileIndVars[I], IVTy,
OrigCntVar->getExprLoc());
};
- auto MakeFloorIVRef = [&SemaRef = this->SemaRef, &FloorIndVars, I, CntTy,
+ auto MakeFloorIVRef = [&SemaRef = this->SemaRef, &FloorIndVars, I, IVTy,
OrigCntVar]() {
- return buildDeclRefExpr(SemaRef, FloorIndVars[I], CntTy,
+ return buildDeclRefExpr(SemaRef, FloorIndVars[I], IVTy,
OrigCntVar->getExprLoc());
};
@@ -15320,6 +15405,8 @@ StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
// further into the inner loop.
SmallVector<Stmt *, 4> BodyParts;
BodyParts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end());
+ if (auto *SourceCXXFor = dyn_cast<CXXForRangeStmt>(LoopStmt))
+ BodyParts.push_back(SourceCXXFor->getLoopVarStmt());
BodyParts.push_back(Inner);
Inner = CompoundStmt::Create(Context, BodyParts, FPOptionsOverride(),
Inner->getBeginLoc(), Inner->getEndLoc());
@@ -15334,12 +15421,14 @@ StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
auto &LoopHelper = LoopHelpers[I];
Expr *NumIterations = LoopHelper.NumIterations;
DeclRefExpr *OrigCntVar = cast<DeclRefExpr>(LoopHelper.Counters[0]);
- QualType CntTy = OrigCntVar->getType();
+ QualType IVTy = NumIterations->getType();
- // Commonly used variables.
- auto MakeFloorIVRef = [&SemaRef = this->SemaRef, &FloorIndVars, I, CntTy,
+ // Commonly used variables. One of the constraints of an AST is that every
+ // node object must appear at most once, hence we define lamdas that create
+ // a new AST node at every use.
+ auto MakeFloorIVRef = [&SemaRef = this->SemaRef, &FloorIndVars, I, IVTy,
OrigCntVar]() {
- return buildDeclRefExpr(SemaRef, FloorIndVars[I], CntTy,
+ return buildDeclRefExpr(SemaRef, FloorIndVars[I], IVTy,
OrigCntVar->getExprLoc());
};
@@ -15405,8 +15494,7 @@ StmtResult SemaOpenMP::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Stmt *Body = nullptr;
SmallVector<OMPLoopBasedDirective::HelperExprs, NumLoops> LoopHelpers(
NumLoops);
- SmallVector<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>, NumLoops + 1>
- OriginalInits;
+ SmallVector<SmallVector<Stmt *, 0>, NumLoops + 1> OriginalInits;
if (!checkTransformableLoopNest(OMPD_unroll, AStmt, NumLoops, LoopHelpers,
Body, OriginalInits))
return StmtError();
@@ -15418,6 +15506,10 @@ StmtResult SemaOpenMP::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
return OMPUnrollDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
NumGeneratedLoops, nullptr, nullptr);
+ assert(LoopHelpers.size() == NumLoops &&
+ "Expecting a single-dimensional loop iteration space");
+ assert(OriginalInits.size() == NumLoops &&
+ "Expecting a single-dimensional loop iteration space");
OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers.front();
if (FullClause) {
@@ -15481,24 +15573,13 @@ StmtResult SemaOpenMP::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
// of a canonical loop nest where these PreInits are emitted before the
// outermost directive.
+ // Find the loop statement.
+ Stmt *LoopStmt = nullptr;
+ collectLoopStmts(AStmt, {LoopStmt});
+
// Determine the PreInit declarations.
- SmallVector<Decl *, 4> PreInits;
- assert(OriginalInits.size() == 1 &&
- "Expecting a single-dimensional loop iteration space");
- for (auto &P : OriginalInits[0]) {
- if (auto *D = P.dyn_cast<Decl *>())
- PreInits.push_back(D);
- else if (auto *PI = dyn_cast_or_null<DeclStmt>(P.dyn_cast<Stmt *>()))
- PreInits.append(PI->decl_begin(), PI->decl_end());
- }
- if (auto *PI = cast_or_null<DeclStmt>(LoopHelper.PreInits))
- PreInits.append(PI->decl_begin(), PI->decl_end());
- // Gather declarations for the data members used as counters.
- for (Expr *CounterRef : LoopHelper.Counters) {
- auto *CounterDecl = cast<DeclRefExpr>(CounterRef)->getDecl();
- if (isa<OMPCapturedExprDecl>(CounterDecl))
- PreInits.push_back(CounterDecl);
- }
+ SmallVector<Stmt *, 4> PreInits;
+ addLoopPreInits(Context, LoopHelper, LoopStmt, OriginalInits[0], PreInits);
auto *IterationVarRef = cast<DeclRefExpr>(LoopHelper.IterationVarRef);
QualType IVTy = IterationVarRef->getType();
@@ -15604,6 +15685,8 @@ StmtResult SemaOpenMP::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
// Inner For statement.
SmallVector<Stmt *> InnerBodyStmts;
InnerBodyStmts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end());
+ if (auto *CXXRangeFor = dyn_cast<CXXForRangeStmt>(LoopStmt))
+ InnerBodyStmts.push_back(CXXRangeFor->getLoopVarStmt());
InnerBodyStmts.push_back(Body);
CompoundStmt *InnerBody =
CompoundStmt::Create(getASTContext(), InnerBodyStmts, FPOptionsOverride(),
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 2eb25237a0de..6c5e8afbcfb6 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -13,6 +13,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DependenceFlags.h"
@@ -1481,7 +1482,7 @@ static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New,
}
if (OldMethod && NewMethod && !OldMethod->isStatic() &&
- !OldMethod->isStatic()) {
+ !NewMethod->isStatic()) {
bool HaveCorrespondingObjectParameters = [&](const CXXMethodDecl *Old,
const CXXMethodDecl *New) {
auto NewObjectType = New->getFunctionObjectParameterReferenceType();
@@ -6472,17 +6473,20 @@ ExprResult Sema::InitializeExplicitObjectArgument(Sema &S, Expr *Obj,
Obj->getExprLoc(), Obj);
}
-static void PrepareExplicitObjectArgument(Sema &S, CXXMethodDecl *Method,
+static bool PrepareExplicitObjectArgument(Sema &S, CXXMethodDecl *Method,
Expr *Object, MultiExprArg &Args,
SmallVectorImpl<Expr *> &NewArgs) {
assert(Method->isExplicitObjectMemberFunction() &&
"Method is not an explicit member function");
assert(NewArgs.empty() && "NewArgs should be empty");
+
NewArgs.reserve(Args.size() + 1);
Expr *This = GetExplicitObjectExpr(S, Object, Method);
NewArgs.push_back(This);
NewArgs.append(Args.begin(), Args.end());
Args = NewArgs;
+ return S.DiagnoseInvalidExplicitObjectParameterInLambda(
+ Method, Object->getBeginLoc());
}
/// Determine whether the provided type is an integral type, or an enumeration
@@ -11298,8 +11302,16 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
Expr *FromExpr = Conv.Bad.FromExpr;
QualType FromTy = Conv.Bad.getFromType();
QualType ToTy = Conv.Bad.getToType();
- SourceRange ToParamRange =
- !isObjectArgument ? Fn->getParamDecl(I)->getSourceRange() : SourceRange();
+ SourceRange ToParamRange;
+
+ // FIXME: In presence of parameter packs we can't determine parameter range
+ // reliably, as we don't have access to instantiation.
+ bool HasParamPack =
+ llvm::any_of(Fn->parameters().take_front(I), [](const ParmVarDecl *Parm) {
+ return Parm->isParameterPack();
+ });
+ if (!isObjectArgument && !HasParamPack)
+ ToParamRange = Fn->getParamDecl(I)->getSourceRange();
if (FromTy == S.Context.OverloadTy) {
assert(FromExpr && "overload set argument came from implicit argument?");
@@ -14351,7 +14363,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
if (Fn.isInvalid())
return ExprError();
return CXXOperatorCallExpr::Create(Context, Op, Fn.get(), ArgsArray,
- Context.DependentTy, VK, OpLoc,
+ Context.DependentTy, VK_PRValue, OpLoc,
CurFPFeatureOverrides());
}
@@ -15612,8 +15624,10 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
CallExpr *TheCall = nullptr;
llvm::SmallVector<Expr *, 8> NewArgs;
if (Method->isExplicitObjectMemberFunction()) {
- PrepareExplicitObjectArgument(*this, Method, MemExpr->getBase(), Args,
- NewArgs);
+ if (PrepareExplicitObjectArgument(*this, Method, MemExpr->getBase(), Args,
+ NewArgs))
+ return ExprError();
+
// Build the actual expression node.
ExprResult FnExpr =
CreateFunctionRefExpr(*this, Method, FoundDecl, MemExpr,
@@ -15927,9 +15941,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// Initialize the object parameter.
llvm::SmallVector<Expr *, 8> NewArgs;
if (Method->isExplicitObjectMemberFunction()) {
- // FIXME: we should do that during the definition of the lambda when we can.
- DiagnoseInvalidExplicitObjectParameterInLambda(Method);
- PrepareExplicitObjectArgument(*this, Method, Obj, Args, NewArgs);
+ IsError |= PrepareExplicitObjectArgument(*this, Method, Obj, Args, NewArgs);
} else {
ExprResult ObjRes = PerformImplicitObjectArgumentInitialization(
Object.get(), /*Qualifier=*/nullptr, Best->FoundDecl, Method);
diff --git a/clang/lib/Sema/SemaPseudoObject.cpp b/clang/lib/Sema/SemaPseudoObject.cpp
index 14ed9590afc6..fdb584ceb810 100644
--- a/clang/lib/Sema/SemaPseudoObject.cpp
+++ b/clang/lib/Sema/SemaPseudoObject.cpp
@@ -29,6 +29,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Sema/SemaPseudoObject.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Basic/CharInfo.h"
@@ -1446,24 +1447,24 @@ ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl,
// General Sema routines.
//===----------------------------------------------------------------------===//
-ExprResult Sema::checkPseudoObjectRValue(Expr *E) {
+ExprResult SemaPseudoObject::checkRValue(Expr *E) {
Expr *opaqueRef = E->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr, true);
+ ObjCPropertyOpBuilder builder(SemaRef, refExpr, true);
return builder.buildRValueOperation(E);
}
else if (ObjCSubscriptRefExpr *refExpr
= dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
- ObjCSubscriptOpBuilder builder(*this, refExpr, true);
+ ObjCSubscriptOpBuilder builder(SemaRef, refExpr, true);
return builder.buildRValueOperation(E);
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr, true);
+ MSPropertyOpBuilder builder(SemaRef, refExpr, true);
return builder.buildRValueOperation(E);
} else if (MSPropertySubscriptExpr *RefExpr =
dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr, true);
+ MSPropertyOpBuilder Builder(SemaRef, RefExpr, true);
return Builder.buildRValueOperation(E);
} else {
llvm_unreachable("unknown pseudo-object kind!");
@@ -1471,48 +1472,48 @@ ExprResult Sema::checkPseudoObjectRValue(Expr *E) {
}
/// Check an increment or decrement of a pseudo-object expression.
-ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc,
+ExprResult SemaPseudoObject::checkIncDec(Scope *Sc, SourceLocation opcLoc,
UnaryOperatorKind opcode, Expr *op) {
// Do nothing if the operand is dependent.
if (op->isTypeDependent())
- return UnaryOperator::Create(Context, op, opcode, Context.DependentTy,
- VK_PRValue, OK_Ordinary, opcLoc, false,
- CurFPFeatureOverrides());
+ return UnaryOperator::Create(
+ SemaRef.Context, op, opcode, SemaRef.Context.DependentTy, VK_PRValue,
+ OK_Ordinary, opcLoc, false, SemaRef.CurFPFeatureOverrides());
assert(UnaryOperator::isIncrementDecrementOp(opcode));
Expr *opaqueRef = op->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr, false);
+ ObjCPropertyOpBuilder builder(SemaRef, refExpr, false);
return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else if (isa<ObjCSubscriptRefExpr>(opaqueRef)) {
Diag(opcLoc, diag::err_illegal_container_subscripting_op);
return ExprError();
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr, false);
+ MSPropertyOpBuilder builder(SemaRef, refExpr, false);
return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else if (MSPropertySubscriptExpr *RefExpr
= dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr, false);
+ MSPropertyOpBuilder Builder(SemaRef, RefExpr, false);
return Builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else {
llvm_unreachable("unknown pseudo-object kind!");
}
}
-ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
+ExprResult SemaPseudoObject::checkAssignment(Scope *S, SourceLocation opcLoc,
BinaryOperatorKind opcode,
Expr *LHS, Expr *RHS) {
// Do nothing if either argument is dependent.
if (LHS->isTypeDependent() || RHS->isTypeDependent())
- return BinaryOperator::Create(Context, LHS, RHS, opcode,
- Context.DependentTy, VK_PRValue, OK_Ordinary,
- opcLoc, CurFPFeatureOverrides());
+ return BinaryOperator::Create(
+ SemaRef.Context, LHS, RHS, opcode, SemaRef.Context.DependentTy,
+ VK_PRValue, OK_Ordinary, opcLoc, SemaRef.CurFPFeatureOverrides());
// Filter out non-overload placeholder types in the RHS.
if (RHS->getType()->isNonOverloadPlaceholderType()) {
- ExprResult result = CheckPlaceholderExpr(RHS);
+ ExprResult result = SemaRef.CheckPlaceholderExpr(RHS);
if (result.isInvalid()) return ExprError();
RHS = result.get();
}
@@ -1521,20 +1522,20 @@ ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
Expr *opaqueRef = LHS->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr, IsSimpleAssign);
+ ObjCPropertyOpBuilder builder(SemaRef, refExpr, IsSimpleAssign);
return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (ObjCSubscriptRefExpr *refExpr
= dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
- ObjCSubscriptOpBuilder builder(*this, refExpr, IsSimpleAssign);
+ ObjCSubscriptOpBuilder builder(SemaRef, refExpr, IsSimpleAssign);
return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr, IsSimpleAssign);
- return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ MSPropertyOpBuilder builder(SemaRef, refExpr, IsSimpleAssign);
+ return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (MSPropertySubscriptExpr *RefExpr
= dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr, IsSimpleAssign);
- return Builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ MSPropertyOpBuilder Builder(SemaRef, RefExpr, IsSimpleAssign);
+ return Builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else {
llvm_unreachable("unknown pseudo-object kind!");
}
@@ -1557,36 +1558,38 @@ static Expr *stripOpaqueValuesFromPseudoObjectRef(Sema &S, Expr *E) {
/// This is a hack which should be removed when TreeTransform is
/// capable of rebuilding a tree without stripping implicit
/// operations.
-Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) {
+Expr *SemaPseudoObject::recreateSyntacticForm(PseudoObjectExpr *E) {
Expr *syntax = E->getSyntacticForm();
if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) {
- Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr());
- return UnaryOperator::Create(Context, op, uop->getOpcode(), uop->getType(),
- uop->getValueKind(), uop->getObjectKind(),
- uop->getOperatorLoc(), uop->canOverflow(),
- CurFPFeatureOverrides());
+ Expr *op = stripOpaqueValuesFromPseudoObjectRef(SemaRef, uop->getSubExpr());
+ return UnaryOperator::Create(
+ SemaRef.Context, op, uop->getOpcode(), uop->getType(),
+ uop->getValueKind(), uop->getObjectKind(), uop->getOperatorLoc(),
+ uop->canOverflow(), SemaRef.CurFPFeatureOverrides());
} else if (CompoundAssignOperator *cop
= dyn_cast<CompoundAssignOperator>(syntax)) {
- Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS());
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(SemaRef, cop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(cop->getRHS())->getSourceExpr();
return CompoundAssignOperator::Create(
- Context, lhs, rhs, cop->getOpcode(), cop->getType(),
+ SemaRef.Context, lhs, rhs, cop->getOpcode(), cop->getType(),
cop->getValueKind(), cop->getObjectKind(), cop->getOperatorLoc(),
- CurFPFeatureOverrides(), cop->getComputationLHSType(),
+ SemaRef.CurFPFeatureOverrides(), cop->getComputationLHSType(),
cop->getComputationResultType());
} else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(syntax)) {
- Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, bop->getLHS());
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(SemaRef, bop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(bop->getRHS())->getSourceExpr();
- return BinaryOperator::Create(Context, lhs, rhs, bop->getOpcode(),
+ return BinaryOperator::Create(SemaRef.Context, lhs, rhs, bop->getOpcode(),
bop->getType(), bop->getValueKind(),
bop->getObjectKind(), bop->getOperatorLoc(),
- CurFPFeatureOverrides());
+ SemaRef.CurFPFeatureOverrides());
} else if (isa<CallExpr>(syntax)) {
return syntax;
} else {
assert(syntax->hasPlaceholderType(BuiltinType::PseudoObject));
- return stripOpaqueValuesFromPseudoObjectRef(*this, syntax);
+ return stripOpaqueValuesFromPseudoObjectRef(SemaRef, syntax);
}
}
+
+SemaPseudoObject::SemaPseudoObject(Sema &S) : SemaBase(S) {}
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
new file mode 100644
index 000000000000..ea6e3f75490b
--- /dev/null
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -0,0 +1,1427 @@
+//===------ SemaRISCV.cpp ------- RISC-V target-specific routines ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to RISC-V.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaRISCV.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/RISCVIntrinsicManager.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Support/RISCVVIntrinsicUtils.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
+#include <optional>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+using namespace clang;
+using namespace clang::RISCV;
+
+using IntrinsicKind = sema::RISCVIntrinsicManager::IntrinsicKind;
+
+namespace {
+
+// Function definition of a RVV intrinsic.
+struct RVVIntrinsicDef {
+ /// Mapping to which clang built-in function, e.g. __builtin_rvv_vadd.
+ std::string BuiltinName;
+
+ /// Function signature, first element is return type.
+ RVVTypes Signature;
+};
+
+struct RVVOverloadIntrinsicDef {
+ // Indexes of RISCVIntrinsicManagerImpl::IntrinsicList.
+ SmallVector<uint16_t, 8> Indexes;
+};
+
+} // namespace
+
+static const PrototypeDescriptor RVVSignatureTable[] = {
+#define DECL_SIGNATURE_TABLE
+#include "clang/Basic/riscv_vector_builtin_sema.inc"
+#undef DECL_SIGNATURE_TABLE
+};
+
+static const PrototypeDescriptor RVSiFiveVectorSignatureTable[] = {
+#define DECL_SIGNATURE_TABLE
+#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
+#undef DECL_SIGNATURE_TABLE
+};
+
+static const RVVIntrinsicRecord RVVIntrinsicRecords[] = {
+#define DECL_INTRINSIC_RECORDS
+#include "clang/Basic/riscv_vector_builtin_sema.inc"
+#undef DECL_INTRINSIC_RECORDS
+};
+
+static const RVVIntrinsicRecord RVSiFiveVectorIntrinsicRecords[] = {
+#define DECL_INTRINSIC_RECORDS
+#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
+#undef DECL_INTRINSIC_RECORDS
+};
+
+// Get subsequence of signature table.
+static ArrayRef<PrototypeDescriptor>
+ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
+ switch (K) {
+ case IntrinsicKind::RVV:
+ return ArrayRef(&RVVSignatureTable[Index], Length);
+ case IntrinsicKind::SIFIVE_VECTOR:
+ return ArrayRef(&RVSiFiveVectorSignatureTable[Index], Length);
+ }
+ llvm_unreachable("Unhandled IntrinsicKind");
+}
+
+static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
+ QualType QT;
+ switch (Type->getScalarType()) {
+ case ScalarTypeKind::Void:
+ QT = Context.VoidTy;
+ break;
+ case ScalarTypeKind::Size_t:
+ QT = Context.getSizeType();
+ break;
+ case ScalarTypeKind::Ptrdiff_t:
+ QT = Context.getPointerDiffType();
+ break;
+ case ScalarTypeKind::UnsignedLong:
+ QT = Context.UnsignedLongTy;
+ break;
+ case ScalarTypeKind::SignedLong:
+ QT = Context.LongTy;
+ break;
+ case ScalarTypeKind::Boolean:
+ QT = Context.BoolTy;
+ break;
+ case ScalarTypeKind::SignedInteger:
+ QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), true);
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), false);
+ break;
+ case ScalarTypeKind::BFloat:
+ QT = Context.BFloat16Ty;
+ break;
+ case ScalarTypeKind::Float:
+ switch (Type->getElementBitwidth()) {
+ case 64:
+ QT = Context.DoubleTy;
+ break;
+ case 32:
+ QT = Context.FloatTy;
+ break;
+ case 16:
+ QT = Context.Float16Ty;
+ break;
+ default:
+ llvm_unreachable("Unsupported floating point width.");
+ }
+ break;
+ case Invalid:
+ case Undefined:
+ llvm_unreachable("Unhandled type.");
+ }
+ if (Type->isVector()) {
+ if (Type->isTuple())
+ QT = Context.getScalableVectorType(QT, *Type->getScale(), Type->getNF());
+ else
+ QT = Context.getScalableVectorType(QT, *Type->getScale());
+ }
+
+ if (Type->isConstant())
+ QT = Context.getConstType(QT);
+
+ // Transform the type to a pointer as the last step, if necessary.
+ if (Type->isPointer())
+ QT = Context.getPointerType(QT);
+
+ return QT;
+}
+
+namespace {
+class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
+private:
+ Sema &S;
+ ASTContext &Context;
+ RVVTypeCache TypeCache;
+ bool ConstructedRISCVVBuiltins;
+ bool ConstructedRISCVSiFiveVectorBuiltins;
+
+ // List of all RVV intrinsic.
+ std::vector<RVVIntrinsicDef> IntrinsicList;
+ // Mapping function name to index of IntrinsicList.
+ StringMap<uint16_t> Intrinsics;
+ // Mapping function name to RVVOverloadIntrinsicDef.
+ StringMap<RVVOverloadIntrinsicDef> OverloadIntrinsics;
+
+ // Create RVVIntrinsicDef.
+ void InitRVVIntrinsic(const RVVIntrinsicRecord &Record, StringRef SuffixStr,
+ StringRef OverloadedSuffixStr, bool IsMask,
+ RVVTypes &Types, bool HasPolicy, Policy PolicyAttrs);
+
+ // Create FunctionDecl for a vector intrinsic.
+ void CreateRVVIntrinsicDecl(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP, uint32_t Index,
+ bool IsOverload);
+
+ void ConstructRVVIntrinsics(ArrayRef<RVVIntrinsicRecord> Recs,
+ IntrinsicKind K);
+
+public:
+ RISCVIntrinsicManagerImpl(clang::Sema &S) : S(S), Context(S.Context) {
+ ConstructedRISCVVBuiltins = false;
+ ConstructedRISCVSiFiveVectorBuiltins = false;
+ }
+
+ // Initialize IntrinsicList
+ void InitIntrinsicList() override;
+
+ // Create RISC-V vector intrinsic and insert into symbol table if found, and
+ // return true, otherwise return false.
+ bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP) override;
+};
+} // namespace
+
+void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
+ ArrayRef<RVVIntrinsicRecord> Recs, IntrinsicKind K) {
+ const TargetInfo &TI = Context.getTargetInfo();
+ static const std::pair<const char *, RVVRequire> FeatureCheckList[] = {
+ {"64bit", RVV_REQ_RV64},
+ {"xsfvcp", RVV_REQ_Xsfvcp},
+ {"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
+ {"xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq},
+ {"xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod},
+ {"xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq},
+ {"zvbb", RVV_REQ_Zvbb},
+ {"zvbc", RVV_REQ_Zvbc},
+ {"zvkb", RVV_REQ_Zvkb},
+ {"zvkg", RVV_REQ_Zvkg},
+ {"zvkned", RVV_REQ_Zvkned},
+ {"zvknha", RVV_REQ_Zvknha},
+ {"zvknhb", RVV_REQ_Zvknhb},
+ {"zvksed", RVV_REQ_Zvksed},
+ {"zvksh", RVV_REQ_Zvksh},
+ {"zvfbfwma", RVV_REQ_Zvfbfwma},
+ {"zvfbfmin", RVV_REQ_Zvfbfmin},
+ {"experimental", RVV_REQ_Experimental}};
+
+ // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
+ // in RISCVVEmitter.cpp.
+ for (auto &Record : Recs) {
+ // Check requirements.
+ if (llvm::any_of(FeatureCheckList, [&](const auto &Item) {
+ return (Record.RequiredExtensions & Item.second) == Item.second &&
+ !TI.hasFeature(Item.first);
+ }))
+ continue;
+
+ // Create Intrinsics for each type and LMUL.
+ BasicType BaseType = BasicType::Unknown;
+ ArrayRef<PrototypeDescriptor> BasicProtoSeq =
+ ProtoSeq2ArrayRef(K, Record.PrototypeIndex, Record.PrototypeLength);
+ ArrayRef<PrototypeDescriptor> SuffixProto =
+ ProtoSeq2ArrayRef(K, Record.SuffixIndex, Record.SuffixLength);
+ ArrayRef<PrototypeDescriptor> OverloadedSuffixProto = ProtoSeq2ArrayRef(
+ K, Record.OverloadedSuffixIndex, Record.OverloadedSuffixSize);
+
+ PolicyScheme UnMaskedPolicyScheme =
+ static_cast<PolicyScheme>(Record.UnMaskedPolicyScheme);
+ PolicyScheme MaskedPolicyScheme =
+ static_cast<PolicyScheme>(Record.MaskedPolicyScheme);
+
+ const Policy DefaultPolicy;
+
+ llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
+ UnMaskedPolicyScheme, DefaultPolicy, Record.IsTuple);
+
+ llvm::SmallVector<PrototypeDescriptor> ProtoMaskSeq;
+ if (Record.HasMasked)
+ ProtoMaskSeq = RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
+ Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy,
+ Record.IsTuple);
+
+ bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone;
+ bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone;
+ SmallVector<Policy> SupportedUnMaskedPolicies =
+ RVVIntrinsic::getSupportedUnMaskedPolicies();
+ SmallVector<Policy> SupportedMaskedPolicies =
+ RVVIntrinsic::getSupportedMaskedPolicies(Record.HasTailPolicy,
+ Record.HasMaskPolicy);
+
+ for (unsigned int TypeRangeMaskShift = 0;
+ TypeRangeMaskShift <= static_cast<unsigned int>(BasicType::MaxOffset);
+ ++TypeRangeMaskShift) {
+ unsigned int BaseTypeI = 1 << TypeRangeMaskShift;
+ BaseType = static_cast<BasicType>(BaseTypeI);
+
+ if ((BaseTypeI & Record.TypeRangeMask) != BaseTypeI)
+ continue;
+
+ if (BaseType == BasicType::Float16) {
+ if ((Record.RequiredExtensions & RVV_REQ_Zvfhmin) == RVV_REQ_Zvfhmin) {
+ if (!TI.hasFeature("zvfhmin"))
+ continue;
+ } else if (!TI.hasFeature("zvfh")) {
+ continue;
+ }
+ }
+
+ // Expanded with different LMUL.
+ for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) {
+ if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3))))
+ continue;
+
+ std::optional<RVVTypes> Types =
+ TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoSeq);
+
+ // Ignored to create new intrinsic if there are any illegal types.
+ if (!Types.has_value())
+ continue;
+
+ std::string SuffixStr = RVVIntrinsic::getSuffixStr(
+ TypeCache, BaseType, Log2LMUL, SuffixProto);
+ std::string OverloadedSuffixStr = RVVIntrinsic::getSuffixStr(
+ TypeCache, BaseType, Log2LMUL, OverloadedSuffixProto);
+
+ // Create non-masked intrinsic.
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, false, *Types,
+ UnMaskedHasPolicy, DefaultPolicy);
+
+ // Create non-masked policy intrinsic.
+ if (Record.UnMaskedPolicyScheme != PolicyScheme::SchemeNone) {
+ for (auto P : SupportedUnMaskedPolicies) {
+ llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
+ UnMaskedPolicyScheme, P, Record.IsTuple);
+ std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
+ BaseType, Log2LMUL, Record.NF, PolicyPrototype);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
+ /*IsMask=*/false, *PolicyTypes, UnMaskedHasPolicy,
+ P);
+ }
+ }
+ if (!Record.HasMasked)
+ continue;
+ // Create masked intrinsic.
+ std::optional<RVVTypes> MaskTypes =
+ TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoMaskSeq);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, true,
+ *MaskTypes, MaskedHasPolicy, DefaultPolicy);
+ if (Record.MaskedPolicyScheme == PolicyScheme::SchemeNone)
+ continue;
+ // Create masked policy intrinsic.
+ for (auto P : SupportedMaskedPolicies) {
+ llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
+ Record.HasVL, Record.NF, MaskedPolicyScheme, P,
+ Record.IsTuple);
+ std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
+ BaseType, Log2LMUL, Record.NF, PolicyPrototype);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
+ /*IsMask=*/true, *PolicyTypes, MaskedHasPolicy, P);
+ }
+ } // End for different LMUL
+ } // End for different TypeRange
+ }
+}
+
+void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
+
+ if (S.RISCV().DeclareRVVBuiltins && !ConstructedRISCVVBuiltins) {
+ ConstructedRISCVVBuiltins = true;
+ ConstructRVVIntrinsics(RVVIntrinsicRecords, IntrinsicKind::RVV);
+ }
+ if (S.RISCV().DeclareSiFiveVectorBuiltins &&
+ !ConstructedRISCVSiFiveVectorBuiltins) {
+ ConstructedRISCVSiFiveVectorBuiltins = true;
+ ConstructRVVIntrinsics(RVSiFiveVectorIntrinsicRecords,
+ IntrinsicKind::SIFIVE_VECTOR);
+ }
+}
+
+// Compute name and signatures for intrinsic with practical types.
+void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
+ const RVVIntrinsicRecord &Record, StringRef SuffixStr,
+ StringRef OverloadedSuffixStr, bool IsMasked, RVVTypes &Signature,
+ bool HasPolicy, Policy PolicyAttrs) {
+ // Function name, e.g. vadd_vv_i32m1.
+ std::string Name = Record.Name;
+ if (!SuffixStr.empty())
+ Name += "_" + SuffixStr.str();
+
+ // Overloaded function name, e.g. vadd.
+ std::string OverloadedName;
+ if (!Record.OverloadedName)
+ OverloadedName = StringRef(Record.Name).split("_").first.str();
+ else
+ OverloadedName = Record.OverloadedName;
+ if (!OverloadedSuffixStr.empty())
+ OverloadedName += "_" + OverloadedSuffixStr.str();
+
+ // clang built-in function name, e.g. __builtin_rvv_vadd.
+ std::string BuiltinName = std::string(Record.Name);
+
+ RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName,
+ OverloadedName, PolicyAttrs,
+ Record.HasFRMRoundModeOp);
+
+ // Put into IntrinsicList.
+ uint16_t Index = IntrinsicList.size();
+ assert(IntrinsicList.size() == (size_t)Index &&
+ "Intrinsics indices overflow.");
+ IntrinsicList.push_back({BuiltinName, Signature});
+
+ // Creating mapping to Intrinsics.
+ Intrinsics.insert({Name, Index});
+
+ // Get the RVVOverloadIntrinsicDef.
+ RVVOverloadIntrinsicDef &OverloadIntrinsicDef =
+ OverloadIntrinsics[OverloadedName];
+
+ // And added the index.
+ OverloadIntrinsicDef.Indexes.push_back(Index);
+}
+
+void RISCVIntrinsicManagerImpl::CreateRVVIntrinsicDecl(LookupResult &LR,
+ IdentifierInfo *II,
+ Preprocessor &PP,
+ uint32_t Index,
+ bool IsOverload) {
+ ASTContext &Context = S.Context;
+ RVVIntrinsicDef &IDef = IntrinsicList[Index];
+ RVVTypes Sigs = IDef.Signature;
+ size_t SigLength = Sigs.size();
+ RVVType *ReturnType = Sigs[0];
+ QualType RetType = RVVType2Qual(Context, ReturnType);
+ SmallVector<QualType, 8> ArgTypes;
+ QualType BuiltinFuncType;
+
+ // Skip return type, and convert RVVType to QualType for arguments.
+ for (size_t i = 1; i < SigLength; ++i)
+ ArgTypes.push_back(RVVType2Qual(Context, Sigs[i]));
+
+ FunctionProtoType::ExtProtoInfo PI(
+ Context.getDefaultCallingConvention(false, false, true));
+
+ PI.Variadic = false;
+
+ SourceLocation Loc = LR.getNameLoc();
+ BuiltinFuncType = Context.getFunctionType(RetType, ArgTypes, PI);
+ DeclContext *Parent = Context.getTranslationUnitDecl();
+
+ FunctionDecl *RVVIntrinsicDecl = FunctionDecl::Create(
+ Context, Parent, Loc, Loc, II, BuiltinFuncType, /*TInfo=*/nullptr,
+ SC_Extern, S.getCurFPFeatures().isFPConstrained(),
+ /*isInlineSpecified*/ false,
+ /*hasWrittenPrototype*/ true);
+
+ // Create Decl objects for each parameter, adding them to the
+ // FunctionDecl.
+ const auto *FP = cast<FunctionProtoType>(BuiltinFuncType);
+ SmallVector<ParmVarDecl *, 8> ParmList;
+ for (unsigned IParm = 0, E = FP->getNumParams(); IParm != E; ++IParm) {
+ ParmVarDecl *Parm =
+ ParmVarDecl::Create(Context, RVVIntrinsicDecl, Loc, Loc, nullptr,
+ FP->getParamType(IParm), nullptr, SC_None, nullptr);
+ Parm->setScopeInfo(0, IParm);
+ ParmList.push_back(Parm);
+ }
+ RVVIntrinsicDecl->setParams(ParmList);
+
+ // Add function attributes.
+ if (IsOverload)
+ RVVIntrinsicDecl->addAttr(OverloadableAttr::CreateImplicit(Context));
+
+ // Setup alias to __builtin_rvv_*
+ IdentifierInfo &IntrinsicII =
+ PP.getIdentifierTable().get("__builtin_rvv_" + IDef.BuiltinName);
+ RVVIntrinsicDecl->addAttr(
+ BuiltinAliasAttr::CreateImplicit(S.Context, &IntrinsicII));
+
+ // Add to symbol table.
+ LR.addDecl(RVVIntrinsicDecl);
+}
+
+bool RISCVIntrinsicManagerImpl::CreateIntrinsicIfFound(LookupResult &LR,
+ IdentifierInfo *II,
+ Preprocessor &PP) {
+ StringRef Name = II->getName();
+ if (!Name.consume_front("__riscv_"))
+ return false;
+
+ // Lookup the function name from the overload intrinsics first.
+ auto OvIItr = OverloadIntrinsics.find(Name);
+ if (OvIItr != OverloadIntrinsics.end()) {
+ const RVVOverloadIntrinsicDef &OvIntrinsicDef = OvIItr->second;
+ for (auto Index : OvIntrinsicDef.Indexes)
+ CreateRVVIntrinsicDecl(LR, II, PP, Index,
+ /*IsOverload*/ true);
+
+ // If we added overloads, need to resolve the lookup result.
+ LR.resolveKind();
+ return true;
+ }
+
+ // Lookup the function name from the intrinsics.
+ auto Itr = Intrinsics.find(Name);
+ if (Itr != Intrinsics.end()) {
+ CreateRVVIntrinsicDecl(LR, II, PP, Itr->second,
+ /*IsOverload*/ false);
+ return true;
+ }
+
+ // It's not an RVV intrinsics.
+ return false;
+}
+
+namespace clang {
+std::unique_ptr<clang::sema::RISCVIntrinsicManager>
+CreateRISCVIntrinsicManager(Sema &S) {
+ return std::make_unique<RISCVIntrinsicManagerImpl>(S);
+}
+
+bool SemaRISCV::CheckLMUL(CallExpr *TheCall, unsigned ArgNum) {
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ int64_t Val = Result.getSExtValue();
+ if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
+ return false;
+
+ return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
+ << Arg->getSourceRange();
+}
+
+static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
+ Sema &S, QualType Type, int EGW) {
+ assert((EGW == 128 || EGW == 256) && "EGW can only be 128 or 256 bits");
+
+ // LMUL * VLEN >= EGW
+ ASTContext::BuiltinVectorTypeInfo Info =
+ S.Context.getBuiltinVectorTypeInfo(Type->castAs<BuiltinType>());
+ unsigned ElemSize = S.Context.getTypeSize(Info.ElementType);
+ unsigned MinElemCount = Info.EC.getKnownMinValue();
+
+ unsigned EGS = EGW / ElemSize;
+ // If EGS is less than or equal to the minimum number of elements, then the
+ // type is valid.
+ if (EGS <= MinElemCount)
+ return false;
+
+ // Otherwise, we need vscale to be at least EGS / MinElemCont.
+ assert(EGS % MinElemCount == 0);
+ unsigned VScaleFactor = EGS / MinElemCount;
+ // Vscale is VLEN/RVVBitsPerBlock.
+ unsigned MinRequiredVLEN = VScaleFactor * llvm::RISCV::RVVBitsPerBlock;
+ std::string RequiredExt = "zvl" + std::to_string(MinRequiredVLEN) + "b";
+ if (!TI.hasFeature(RequiredExt))
+ return S.Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_type_requires_extension)
+ << Type << RequiredExt;
+
+ return false;
+}
+
+bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ ASTContext &Context = getASTContext();
+ // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
+ // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
+ switch (BuiltinID) {
+ default:
+ break;
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
+ ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(
+ TheCall->getType()->castAs<BuiltinType>());
+
+ if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_builtin_requires_extension)
+ << /* IsExtension */ true << TheCall->getSourceRange() << "v";
+
+ break;
+ }
+ }
+
+ switch (BuiltinID) {
+ case RISCVVector::BI__builtin_rvv_vsetvli:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 3) ||
+ CheckLMUL(TheCall, 2);
+ case RISCVVector::BI__builtin_rvv_vsetvlimax:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ CheckLMUL(TheCall, 1);
+ case RISCVVector::BI__builtin_rvv_vget_v: {
+ ASTContext::BuiltinVectorTypeInfo ResVecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getType().getCanonicalType().getTypePtr()));
+ ASTContext::BuiltinVectorTypeInfo VecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getArg(0)->getType().getCanonicalType().getTypePtr()));
+ unsigned MaxIndex;
+ if (VecInfo.NumVectors != 1) // vget for tuple type
+ MaxIndex = VecInfo.NumVectors;
+ else // vget for non-tuple type
+ MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
+ (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
+ }
+ case RISCVVector::BI__builtin_rvv_vset_v: {
+ ASTContext::BuiltinVectorTypeInfo ResVecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getType().getCanonicalType().getTypePtr()));
+ ASTContext::BuiltinVectorTypeInfo VecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getArg(2)->getType().getCanonicalType().getTypePtr()));
+ unsigned MaxIndex;
+ if (ResVecInfo.NumVectors != 1) // vset for tuple type
+ MaxIndex = ResVecInfo.NumVectors;
+ else // vset fo non-tuple type
+ MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
+ (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
+ }
+ // Vector Crypto
+ case RISCVVector::BI__builtin_rvv_vaeskf1_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vaeskf2_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vaeskf2_vi:
+ case RISCVVector::BI__builtin_rvv_vsm4k_vi_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 128) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op2Type, 128) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vsm3c_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vsm3c_vi: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 256) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vaeskf1_vi:
+ case RISCVVector::BI__builtin_rvv_vsm4k_vi: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 128) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vaesdf_vv:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vs:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vv:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vs:
+ case RISCVVector::BI__builtin_rvv_vaesef_vv:
+ case RISCVVector::BI__builtin_rvv_vaesef_vs:
+ case RISCVVector::BI__builtin_rvv_vaesem_vv:
+ case RISCVVector::BI__builtin_rvv_vaesem_vs:
+ case RISCVVector::BI__builtin_rvv_vaesz_vs:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vv:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vs:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesef_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesef_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesem_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesem_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesz_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vs_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 128) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op2Type, 128);
+ }
+ case RISCVVector::BI__builtin_rvv_vsha2ch_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2cl_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2ms_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2ch_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsha2cl_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsha2ms_vv_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ QualType Op3Type = TheCall->getArg(2)->getType();
+ ASTContext::BuiltinVectorTypeInfo Info =
+ Context.getBuiltinVectorTypeInfo(Op1Type->castAs<BuiltinType>());
+ uint64_t ElemSize = Context.getTypeSize(Info.ElementType);
+ if (ElemSize == 64 && !TI.hasFeature("zvknhb"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_builtin_requires_extension)
+ << /* IsExtension */ true << TheCall->getSourceRange() << "zvknb";
+
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type,
+ ElemSize * 4) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op2Type,
+ ElemSize * 4) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op3Type, ElemSize * 4);
+ }
+
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se:
+ // bit_27_26, bit_24_20, bit_11_7, simm5, sew, log2lmul
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, -16, 15) ||
+ CheckLMUL(TheCall, 5);
+ case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
+ // bit_27_26, bit_11_7, vs2, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_i:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se:
+ // bit_27_26, bit_24_20, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_iv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se:
+ // bit_27_26, vs2, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se:
+ // bit_27_26, vd, vs2, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se:
+ // bit_27_26, bit_24_20, bit_11_7, xs1, sew, log2lmul
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
+ CheckLMUL(TheCall, 5);
+ case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
+ // bit_27_26, bit_11_7, vs2, xs1/vs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_x:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se:
+ // bit_27_26, bit_24-20, xs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se:
+ // bit_27_26, vd, vs2, xs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se:
+ // bit_27_26, vs2, xs1/vs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se:
+ // bit_27_26, vd, vs2, xs1/vs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3);
+ case RISCVVector::BI__builtin_rvv_sf_vc_fv_se:
+ // bit_26, bit_11_7, vs2, fs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 1) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se:
+ // bit_26, vd, vs2, fs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se:
+ // bit_26, vs2, fs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 1);
+ // Check if byteselect is in [0, 3]
+ case RISCV::BI__builtin_riscv_aes32dsi:
+ case RISCV::BI__builtin_riscv_aes32dsmi:
+ case RISCV::BI__builtin_riscv_aes32esi:
+ case RISCV::BI__builtin_riscv_aes32esmi:
+ case RISCV::BI__builtin_riscv_sm4ks:
+ case RISCV::BI__builtin_riscv_sm4ed:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ // Check if rnum is in [0, 10]
+ case RISCV::BI__builtin_riscv_aes64ks1i:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 10);
+ // Check if value range for vxrm is in [0, 3]
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx:
+ case RISCVVector::BI__builtin_rvv_vasub_vv:
+ case RISCVVector::BI__builtin_rvv_vasub_vx:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx:
+ case RISCVVector::BI__builtin_rvv_vssra_vv:
+ case RISCVVector::BI__builtin_rvv_vssra_vx:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_m:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_m:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_m:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_m:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_m:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_m:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_m:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_m:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_m:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_m:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_m:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 4);
+ case RISCV::BI__builtin_riscv_ntl_load:
+ case RISCV::BI__builtin_riscv_ntl_store:
+ DeclRefExpr *DRE =
+ cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store ||
+ BuiltinID == RISCV::BI__builtin_riscv_ntl_load) &&
+ "Unexpected RISC-V nontemporal load/store builtin!");
+ bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store;
+ unsigned NumArgs = IsStore ? 3 : 2;
+
+ if (SemaRef.checkArgCountAtLeast(TheCall, NumArgs - 1))
+ return true;
+
+ if (SemaRef.checkArgCountAtMost(TheCall, NumArgs))
+ return true;
+
+ // Domain value should be compile-time constant.
+ // 2 <= domain <= 5
+ if (TheCall->getNumArgs() == NumArgs &&
+ SemaRef.BuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5))
+ return true;
+
+ Expr *PointerArg = TheCall->getArg(0);
+ ExprResult PointerArgResult =
+ SemaRef.DefaultFunctionArrayLvalueConversion(PointerArg);
+
+ if (PointerArgResult.isInvalid())
+ return true;
+ PointerArg = PointerArgResult.get();
+
+ const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>();
+ if (!PtrType) {
+ Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ QualType ValType = PtrType->getPointeeType();
+ ValType = ValType.getUnqualifiedType();
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
+ !ValType->isVectorType() && !ValType->isRVVSizelessBuiltinType()) {
+ Diag(DRE->getBeginLoc(),
+ diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ if (!IsStore) {
+ TheCall->setType(ValType);
+ return false;
+ }
+
+ ExprResult ValArg = TheCall->getArg(1);
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, ValType, /*consume*/ false);
+ ValArg =
+ SemaRef.PerformCopyInitialization(Entity, SourceLocation(), ValArg);
+ if (ValArg.isInvalid())
+ return true;
+
+ TheCall->setArg(1, ValArg.get());
+ TheCall->setType(Context.VoidTy);
+ return false;
+ }
+
+ return false;
+}
+
+void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
+ const llvm::StringMap<bool> &FeatureMap) {
+ ASTContext::BuiltinVectorTypeInfo Info =
+ SemaRef.Context.getBuiltinVectorTypeInfo(Ty->castAs<BuiltinType>());
+ unsigned EltSize = SemaRef.Context.getTypeSize(Info.ElementType);
+ unsigned MinElts = Info.EC.getKnownMinValue();
+
+ if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
+ !FeatureMap.lookup("zve64d"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
+ // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
+ // least zve64x
+ else if (((EltSize == 64 && Info.ElementType->isIntegerType()) ||
+ MinElts == 1) &&
+ !FeatureMap.lookup("zve64x"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
+ else if (Info.ElementType->isFloat16Type() && !FeatureMap.lookup("zvfh") &&
+ !FeatureMap.lookup("zvfhmin"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D)
+ << Ty << "zvfh or zvfhmin";
+ else if (Info.ElementType->isBFloat16Type() &&
+ !FeatureMap.lookup("experimental-zvfbfmin"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
+ else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
+ !FeatureMap.lookup("zve32f"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
+ // Given that caller already checked isRVVType() before calling this function,
+ // if we don't have at least zve32x supported, then we need to emit error.
+ else if (!FeatureMap.lookup("zve32x"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
+}
+
+/// Are the two types RVV-bitcast-compatible types? I.e. is bitcasting from the
+/// first RVV type (e.g. an RVV scalable type) to the second type (e.g. an RVV
+/// VLS type) allowed?
+///
+/// This will also return false if the two given types do not make sense from
+/// the perspective of RVV bitcasts.
+bool SemaRISCV::isValidRVVBitcast(QualType srcTy, QualType destTy) {
+ assert(srcTy->isVectorType() || destTy->isVectorType());
+
+ auto ValidScalableConversion = [](QualType FirstType, QualType SecondType) {
+ if (!FirstType->isRVVSizelessBuiltinType())
+ return false;
+
+ const auto *VecTy = SecondType->getAs<VectorType>();
+ return VecTy && VecTy->getVectorKind() == VectorKind::RVVFixedLengthData;
+ };
+
+ return ValidScalableConversion(srcTy, destTy) ||
+ ValidScalableConversion(destTy, srcTy);
+}
+
+SemaRISCV::SemaRISCV(Sema &S) : SemaBase(S) {}
+
+} // namespace clang
diff --git a/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/clang/lib/Sema/SemaRISCVVectorLookup.cpp
deleted file mode 100644
index 26e13e87b1d6..000000000000
--- a/clang/lib/Sema/SemaRISCVVectorLookup.cpp
+++ /dev/null
@@ -1,504 +0,0 @@
-//==- SemaRISCVVectorLookup.cpp - Name Lookup for RISC-V Vector Intrinsic -==//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements name lookup for RISC-V vector intrinsic.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/AST/ASTContext.h"
-#include "clang/AST/Decl.h"
-#include "clang/Basic/Builtins.h"
-#include "clang/Basic/TargetInfo.h"
-#include "clang/Lex/Preprocessor.h"
-#include "clang/Sema/Lookup.h"
-#include "clang/Sema/RISCVIntrinsicManager.h"
-#include "clang/Sema/Sema.h"
-#include "clang/Support/RISCVVIntrinsicUtils.h"
-#include "llvm/ADT/SmallVector.h"
-#include <optional>
-#include <string>
-#include <vector>
-
-using namespace llvm;
-using namespace clang;
-using namespace clang::RISCV;
-
-using IntrinsicKind = sema::RISCVIntrinsicManager::IntrinsicKind;
-
-namespace {
-
-// Function definition of a RVV intrinsic.
-struct RVVIntrinsicDef {
- /// Mapping to which clang built-in function, e.g. __builtin_rvv_vadd.
- std::string BuiltinName;
-
- /// Function signature, first element is return type.
- RVVTypes Signature;
-};
-
-struct RVVOverloadIntrinsicDef {
- // Indexes of RISCVIntrinsicManagerImpl::IntrinsicList.
- SmallVector<uint16_t, 8> Indexes;
-};
-
-} // namespace
-
-static const PrototypeDescriptor RVVSignatureTable[] = {
-#define DECL_SIGNATURE_TABLE
-#include "clang/Basic/riscv_vector_builtin_sema.inc"
-#undef DECL_SIGNATURE_TABLE
-};
-
-static const PrototypeDescriptor RVSiFiveVectorSignatureTable[] = {
-#define DECL_SIGNATURE_TABLE
-#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
-#undef DECL_SIGNATURE_TABLE
-};
-
-static const RVVIntrinsicRecord RVVIntrinsicRecords[] = {
-#define DECL_INTRINSIC_RECORDS
-#include "clang/Basic/riscv_vector_builtin_sema.inc"
-#undef DECL_INTRINSIC_RECORDS
-};
-
-static const RVVIntrinsicRecord RVSiFiveVectorIntrinsicRecords[] = {
-#define DECL_INTRINSIC_RECORDS
-#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
-#undef DECL_INTRINSIC_RECORDS
-};
-
-// Get subsequence of signature table.
-static ArrayRef<PrototypeDescriptor>
-ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
- switch (K) {
- case IntrinsicKind::RVV:
- return ArrayRef(&RVVSignatureTable[Index], Length);
- case IntrinsicKind::SIFIVE_VECTOR:
- return ArrayRef(&RVSiFiveVectorSignatureTable[Index], Length);
- }
- llvm_unreachable("Unhandled IntrinsicKind");
-}
-
-static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
- QualType QT;
- switch (Type->getScalarType()) {
- case ScalarTypeKind::Void:
- QT = Context.VoidTy;
- break;
- case ScalarTypeKind::Size_t:
- QT = Context.getSizeType();
- break;
- case ScalarTypeKind::Ptrdiff_t:
- QT = Context.getPointerDiffType();
- break;
- case ScalarTypeKind::UnsignedLong:
- QT = Context.UnsignedLongTy;
- break;
- case ScalarTypeKind::SignedLong:
- QT = Context.LongTy;
- break;
- case ScalarTypeKind::Boolean:
- QT = Context.BoolTy;
- break;
- case ScalarTypeKind::SignedInteger:
- QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), true);
- break;
- case ScalarTypeKind::UnsignedInteger:
- QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), false);
- break;
- case ScalarTypeKind::BFloat:
- QT = Context.BFloat16Ty;
- break;
- case ScalarTypeKind::Float:
- switch (Type->getElementBitwidth()) {
- case 64:
- QT = Context.DoubleTy;
- break;
- case 32:
- QT = Context.FloatTy;
- break;
- case 16:
- QT = Context.Float16Ty;
- break;
- default:
- llvm_unreachable("Unsupported floating point width.");
- }
- break;
- case Invalid:
- case Undefined:
- llvm_unreachable("Unhandled type.");
- }
- if (Type->isVector()) {
- if (Type->isTuple())
- QT = Context.getScalableVectorType(QT, *Type->getScale(), Type->getNF());
- else
- QT = Context.getScalableVectorType(QT, *Type->getScale());
- }
-
- if (Type->isConstant())
- QT = Context.getConstType(QT);
-
- // Transform the type to a pointer as the last step, if necessary.
- if (Type->isPointer())
- QT = Context.getPointerType(QT);
-
- return QT;
-}
-
-namespace {
-class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
-private:
- Sema &S;
- ASTContext &Context;
- RVVTypeCache TypeCache;
- bool ConstructedRISCVVBuiltins;
- bool ConstructedRISCVSiFiveVectorBuiltins;
-
- // List of all RVV intrinsic.
- std::vector<RVVIntrinsicDef> IntrinsicList;
- // Mapping function name to index of IntrinsicList.
- StringMap<uint16_t> Intrinsics;
- // Mapping function name to RVVOverloadIntrinsicDef.
- StringMap<RVVOverloadIntrinsicDef> OverloadIntrinsics;
-
-
- // Create RVVIntrinsicDef.
- void InitRVVIntrinsic(const RVVIntrinsicRecord &Record, StringRef SuffixStr,
- StringRef OverloadedSuffixStr, bool IsMask,
- RVVTypes &Types, bool HasPolicy, Policy PolicyAttrs);
-
- // Create FunctionDecl for a vector intrinsic.
- void CreateRVVIntrinsicDecl(LookupResult &LR, IdentifierInfo *II,
- Preprocessor &PP, uint32_t Index,
- bool IsOverload);
-
- void ConstructRVVIntrinsics(ArrayRef<RVVIntrinsicRecord> Recs,
- IntrinsicKind K);
-
-public:
- RISCVIntrinsicManagerImpl(clang::Sema &S) : S(S), Context(S.Context) {
- ConstructedRISCVVBuiltins = false;
- ConstructedRISCVSiFiveVectorBuiltins = false;
- }
-
- // Initialize IntrinsicList
- void InitIntrinsicList() override;
-
- // Create RISC-V vector intrinsic and insert into symbol table if found, and
- // return true, otherwise return false.
- bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
- Preprocessor &PP) override;
-};
-} // namespace
-
-void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
- ArrayRef<RVVIntrinsicRecord> Recs, IntrinsicKind K) {
- const TargetInfo &TI = Context.getTargetInfo();
- static const std::pair<const char *, RVVRequire> FeatureCheckList[] = {
- {"64bit", RVV_REQ_RV64},
- {"xsfvcp", RVV_REQ_Xsfvcp},
- {"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
- {"xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq},
- {"xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod},
- {"xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq},
- {"zvbb", RVV_REQ_Zvbb},
- {"zvbc", RVV_REQ_Zvbc},
- {"zvkb", RVV_REQ_Zvkb},
- {"zvkg", RVV_REQ_Zvkg},
- {"zvkned", RVV_REQ_Zvkned},
- {"zvknha", RVV_REQ_Zvknha},
- {"zvknhb", RVV_REQ_Zvknhb},
- {"zvksed", RVV_REQ_Zvksed},
- {"zvksh", RVV_REQ_Zvksh},
- {"zvfbfwma", RVV_REQ_Zvfbfwma},
- {"zvfbfmin", RVV_REQ_Zvfbfmin},
- {"experimental", RVV_REQ_Experimental}};
-
- // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
- // in RISCVVEmitter.cpp.
- for (auto &Record : Recs) {
- // Check requirements.
- if (llvm::any_of(FeatureCheckList, [&](const auto &Item) {
- return (Record.RequiredExtensions & Item.second) == Item.second &&
- !TI.hasFeature(Item.first);
- }))
- continue;
-
- // Create Intrinsics for each type and LMUL.
- BasicType BaseType = BasicType::Unknown;
- ArrayRef<PrototypeDescriptor> BasicProtoSeq =
- ProtoSeq2ArrayRef(K, Record.PrototypeIndex, Record.PrototypeLength);
- ArrayRef<PrototypeDescriptor> SuffixProto =
- ProtoSeq2ArrayRef(K, Record.SuffixIndex, Record.SuffixLength);
- ArrayRef<PrototypeDescriptor> OverloadedSuffixProto = ProtoSeq2ArrayRef(
- K, Record.OverloadedSuffixIndex, Record.OverloadedSuffixSize);
-
- PolicyScheme UnMaskedPolicyScheme =
- static_cast<PolicyScheme>(Record.UnMaskedPolicyScheme);
- PolicyScheme MaskedPolicyScheme =
- static_cast<PolicyScheme>(Record.MaskedPolicyScheme);
-
- const Policy DefaultPolicy;
-
- llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
- RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/false,
- /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
- UnMaskedPolicyScheme, DefaultPolicy, Record.IsTuple);
-
- llvm::SmallVector<PrototypeDescriptor> ProtoMaskSeq;
- if (Record.HasMasked)
- ProtoMaskSeq = RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
- Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy,
- Record.IsTuple);
-
- bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone;
- bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone;
- SmallVector<Policy> SupportedUnMaskedPolicies =
- RVVIntrinsic::getSupportedUnMaskedPolicies();
- SmallVector<Policy> SupportedMaskedPolicies =
- RVVIntrinsic::getSupportedMaskedPolicies(Record.HasTailPolicy,
- Record.HasMaskPolicy);
-
- for (unsigned int TypeRangeMaskShift = 0;
- TypeRangeMaskShift <= static_cast<unsigned int>(BasicType::MaxOffset);
- ++TypeRangeMaskShift) {
- unsigned int BaseTypeI = 1 << TypeRangeMaskShift;
- BaseType = static_cast<BasicType>(BaseTypeI);
-
- if ((BaseTypeI & Record.TypeRangeMask) != BaseTypeI)
- continue;
-
- if (BaseType == BasicType::Float16) {
- if ((Record.RequiredExtensions & RVV_REQ_Zvfhmin) == RVV_REQ_Zvfhmin) {
- if (!TI.hasFeature("zvfhmin"))
- continue;
- } else if (!TI.hasFeature("zvfh")) {
- continue;
- }
- }
-
- // Expanded with different LMUL.
- for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) {
- if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3))))
- continue;
-
- std::optional<RVVTypes> Types =
- TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoSeq);
-
- // Ignored to create new intrinsic if there are any illegal types.
- if (!Types.has_value())
- continue;
-
- std::string SuffixStr = RVVIntrinsic::getSuffixStr(
- TypeCache, BaseType, Log2LMUL, SuffixProto);
- std::string OverloadedSuffixStr = RVVIntrinsic::getSuffixStr(
- TypeCache, BaseType, Log2LMUL, OverloadedSuffixProto);
-
- // Create non-masked intrinsic.
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, false, *Types,
- UnMaskedHasPolicy, DefaultPolicy);
-
- // Create non-masked policy intrinsic.
- if (Record.UnMaskedPolicyScheme != PolicyScheme::SchemeNone) {
- for (auto P : SupportedUnMaskedPolicies) {
- llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
- RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/false,
- /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
- UnMaskedPolicyScheme, P, Record.IsTuple);
- std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
- BaseType, Log2LMUL, Record.NF, PolicyPrototype);
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
- /*IsMask=*/false, *PolicyTypes, UnMaskedHasPolicy,
- P);
- }
- }
- if (!Record.HasMasked)
- continue;
- // Create masked intrinsic.
- std::optional<RVVTypes> MaskTypes =
- TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoMaskSeq);
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, true,
- *MaskTypes, MaskedHasPolicy, DefaultPolicy);
- if (Record.MaskedPolicyScheme == PolicyScheme::SchemeNone)
- continue;
- // Create masked policy intrinsic.
- for (auto P : SupportedMaskedPolicies) {
- llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
- RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
- Record.HasVL, Record.NF, MaskedPolicyScheme, P,
- Record.IsTuple);
- std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
- BaseType, Log2LMUL, Record.NF, PolicyPrototype);
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
- /*IsMask=*/true, *PolicyTypes, MaskedHasPolicy, P);
- }
- } // End for different LMUL
- } // End for different TypeRange
- }
-}
-
-void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
-
- if (S.DeclareRISCVVBuiltins && !ConstructedRISCVVBuiltins) {
- ConstructedRISCVVBuiltins = true;
- ConstructRVVIntrinsics(RVVIntrinsicRecords,
- IntrinsicKind::RVV);
- }
- if (S.DeclareRISCVSiFiveVectorBuiltins &&
- !ConstructedRISCVSiFiveVectorBuiltins) {
- ConstructedRISCVSiFiveVectorBuiltins = true;
- ConstructRVVIntrinsics(RVSiFiveVectorIntrinsicRecords,
- IntrinsicKind::SIFIVE_VECTOR);
- }
-}
-
-// Compute name and signatures for intrinsic with practical types.
-void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
- const RVVIntrinsicRecord &Record, StringRef SuffixStr,
- StringRef OverloadedSuffixStr, bool IsMasked, RVVTypes &Signature,
- bool HasPolicy, Policy PolicyAttrs) {
- // Function name, e.g. vadd_vv_i32m1.
- std::string Name = Record.Name;
- if (!SuffixStr.empty())
- Name += "_" + SuffixStr.str();
-
- // Overloaded function name, e.g. vadd.
- std::string OverloadedName;
- if (!Record.OverloadedName)
- OverloadedName = StringRef(Record.Name).split("_").first.str();
- else
- OverloadedName = Record.OverloadedName;
- if (!OverloadedSuffixStr.empty())
- OverloadedName += "_" + OverloadedSuffixStr.str();
-
- // clang built-in function name, e.g. __builtin_rvv_vadd.
- std::string BuiltinName = std::string(Record.Name);
-
- RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName,
- OverloadedName, PolicyAttrs,
- Record.HasFRMRoundModeOp);
-
- // Put into IntrinsicList.
- uint16_t Index = IntrinsicList.size();
- assert(IntrinsicList.size() == (size_t)Index &&
- "Intrinsics indices overflow.");
- IntrinsicList.push_back({BuiltinName, Signature});
-
- // Creating mapping to Intrinsics.
- Intrinsics.insert({Name, Index});
-
- // Get the RVVOverloadIntrinsicDef.
- RVVOverloadIntrinsicDef &OverloadIntrinsicDef =
- OverloadIntrinsics[OverloadedName];
-
- // And added the index.
- OverloadIntrinsicDef.Indexes.push_back(Index);
-}
-
-void RISCVIntrinsicManagerImpl::CreateRVVIntrinsicDecl(LookupResult &LR,
- IdentifierInfo *II,
- Preprocessor &PP,
- uint32_t Index,
- bool IsOverload) {
- ASTContext &Context = S.Context;
- RVVIntrinsicDef &IDef = IntrinsicList[Index];
- RVVTypes Sigs = IDef.Signature;
- size_t SigLength = Sigs.size();
- RVVType *ReturnType = Sigs[0];
- QualType RetType = RVVType2Qual(Context, ReturnType);
- SmallVector<QualType, 8> ArgTypes;
- QualType BuiltinFuncType;
-
- // Skip return type, and convert RVVType to QualType for arguments.
- for (size_t i = 1; i < SigLength; ++i)
- ArgTypes.push_back(RVVType2Qual(Context, Sigs[i]));
-
- FunctionProtoType::ExtProtoInfo PI(
- Context.getDefaultCallingConvention(false, false, true));
-
- PI.Variadic = false;
-
- SourceLocation Loc = LR.getNameLoc();
- BuiltinFuncType = Context.getFunctionType(RetType, ArgTypes, PI);
- DeclContext *Parent = Context.getTranslationUnitDecl();
-
- FunctionDecl *RVVIntrinsicDecl = FunctionDecl::Create(
- Context, Parent, Loc, Loc, II, BuiltinFuncType, /*TInfo=*/nullptr,
- SC_Extern, S.getCurFPFeatures().isFPConstrained(),
- /*isInlineSpecified*/ false,
- /*hasWrittenPrototype*/ true);
-
- // Create Decl objects for each parameter, adding them to the
- // FunctionDecl.
- const auto *FP = cast<FunctionProtoType>(BuiltinFuncType);
- SmallVector<ParmVarDecl *, 8> ParmList;
- for (unsigned IParm = 0, E = FP->getNumParams(); IParm != E; ++IParm) {
- ParmVarDecl *Parm =
- ParmVarDecl::Create(Context, RVVIntrinsicDecl, Loc, Loc, nullptr,
- FP->getParamType(IParm), nullptr, SC_None, nullptr);
- Parm->setScopeInfo(0, IParm);
- ParmList.push_back(Parm);
- }
- RVVIntrinsicDecl->setParams(ParmList);
-
- // Add function attributes.
- if (IsOverload)
- RVVIntrinsicDecl->addAttr(OverloadableAttr::CreateImplicit(Context));
-
- // Setup alias to __builtin_rvv_*
- IdentifierInfo &IntrinsicII =
- PP.getIdentifierTable().get("__builtin_rvv_" + IDef.BuiltinName);
- RVVIntrinsicDecl->addAttr(
- BuiltinAliasAttr::CreateImplicit(S.Context, &IntrinsicII));
-
- // Add to symbol table.
- LR.addDecl(RVVIntrinsicDecl);
-}
-
-bool RISCVIntrinsicManagerImpl::CreateIntrinsicIfFound(LookupResult &LR,
- IdentifierInfo *II,
- Preprocessor &PP) {
- StringRef Name = II->getName();
- if (!Name.consume_front("__riscv_"))
- return false;
-
- // Lookup the function name from the overload intrinsics first.
- auto OvIItr = OverloadIntrinsics.find(Name);
- if (OvIItr != OverloadIntrinsics.end()) {
- const RVVOverloadIntrinsicDef &OvIntrinsicDef = OvIItr->second;
- for (auto Index : OvIntrinsicDef.Indexes)
- CreateRVVIntrinsicDecl(LR, II, PP, Index,
- /*IsOverload*/ true);
-
- // If we added overloads, need to resolve the lookup result.
- LR.resolveKind();
- return true;
- }
-
- // Lookup the function name from the intrinsics.
- auto Itr = Intrinsics.find(Name);
- if (Itr != Intrinsics.end()) {
- CreateRVVIntrinsicDecl(LR, II, PP, Itr->second,
- /*IsOverload*/ false);
- return true;
- }
-
- // It's not an RVV intrinsics.
- return false;
-}
-
-namespace clang {
-std::unique_ptr<clang::sema::RISCVIntrinsicManager>
-CreateRISCVIntrinsicManager(Sema &S) {
- return std::make_unique<RISCVIntrinsicManagerImpl>(S);
-}
-} // namespace clang
diff --git a/clang/lib/Sema/SemaStmtAttr.cpp b/clang/lib/Sema/SemaStmtAttr.cpp
index 36f8ecadcfab..8735d96c8407 100644
--- a/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/clang/lib/Sema/SemaStmtAttr.cpp
@@ -665,7 +665,8 @@ bool Sema::CheckRebuiltStmtAttributes(ArrayRef<const Attr *> Attrs) {
ExprResult Sema::ActOnCXXAssumeAttr(Stmt *St, const ParsedAttr &A,
SourceRange Range) {
if (A.getNumArgs() != 1 || !A.getArgAsExpr(0)) {
- Diag(A.getLoc(), diag::err_assume_attr_args) << A.getAttrName() << Range;
+ Diag(A.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << A.getAttrName() << 1 << Range;
return ExprError();
}
@@ -682,8 +683,11 @@ ExprResult Sema::ActOnCXXAssumeAttr(Stmt *St, const ParsedAttr &A,
Assumption = Res.get();
}
- if (!getLangOpts().CPlusPlus23)
+ if (!getLangOpts().CPlusPlus23 &&
+ A.getSyntax() == AttributeCommonInfo::AS_CXX11) {
+ llvm::dbgs() << "Syntax: " << int(A.getSyntax()) << "\n";
Diag(A.getLoc(), diag::ext_cxx23_attr) << A << Range;
+ }
return Assumption;
}
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index 4937cce4621f..39e9dbed0c3e 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -726,44 +726,22 @@ Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs) {
- DeclContext *DC = getFunctionLevelDeclContext();
-
- // C++11 [expr.prim.general]p12:
- // An id-expression that denotes a non-static data member or non-static
- // member function of a class can only be used:
- // (...)
- // - if that id-expression denotes a non-static data member and it
- // appears in an unevaluated operand.
- //
- // If this might be the case, form a DependentScopeDeclRefExpr instead of a
- // CXXDependentScopeMemberExpr. The former can instantiate to either
- // DeclRefExpr or MemberExpr depending on lookup results, while the latter is
- // always a MemberExpr.
- bool MightBeCxx11UnevalField =
- getLangOpts().CPlusPlus11 && isUnevaluatedContext();
-
- // Check if the nested name specifier is an enum type.
- bool IsEnum = false;
- if (NestedNameSpecifier *NNS = SS.getScopeRep())
- IsEnum = isa_and_nonnull<EnumType>(NNS->getAsType());
-
- if (!MightBeCxx11UnevalField && !isAddressOfOperand && !IsEnum &&
- isa<CXXMethodDecl>(DC) &&
- cast<CXXMethodDecl>(DC)->isImplicitObjectMemberFunction()) {
- QualType ThisType =
- cast<CXXMethodDecl>(DC)->getThisType().getNonReferenceType();
-
- // Since the 'this' expression is synthesized, we don't need to
- // perform the double-lookup check.
- NamedDecl *FirstQualifierInScope = nullptr;
+ if (SS.isEmpty()) {
+ // FIXME: This codepath is only used by dependent unqualified names
+ // (e.g. a dependent conversion-function-id, or operator= once we support
+ // it). It doesn't quite do the right thing, and it will silently fail if
+ // getCurrentThisType() returns null.
+ QualType ThisType = getCurrentThisType();
+ if (ThisType.isNull())
+ return ExprError();
return CXXDependentScopeMemberExpr::Create(
- Context, /*This=*/nullptr, ThisType,
+ Context, /*Base=*/nullptr, ThisType,
/*IsArrow=*/!Context.getLangOpts().HLSL,
- /*Op=*/SourceLocation(), SS.getWithLocInContext(Context), TemplateKWLoc,
- FirstQualifierInScope, NameInfo, TemplateArgs);
+ /*OperatorLoc=*/SourceLocation(),
+ /*QualifierLoc=*/NestedNameSpecifierLoc(), TemplateKWLoc,
+ /*FirstQualifierFoundInScope=*/nullptr, NameInfo, TemplateArgs);
}
-
return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
}
@@ -772,13 +750,15 @@ Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
- // DependentScopeDeclRefExpr::Create requires a valid QualifierLoc
- NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
- if (!QualifierLoc)
- return ExprError();
+ // DependentScopeDeclRefExpr::Create requires a valid NestedNameSpecifierLoc
+ if (!SS.isValid())
+ return CreateRecoveryExpr(
+ SS.getBeginLoc(),
+ TemplateArgs ? TemplateArgs->getRAngleLoc() : NameInfo.getEndLoc(), {});
return DependentScopeDeclRefExpr::Create(
- Context, QualifierLoc, TemplateKWLoc, NameInfo, TemplateArgs);
+ Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo,
+ TemplateArgs);
}
@@ -1091,7 +1071,8 @@ NamedDecl *Sema::ActOnTypeParameter(Scope *S, bool Typename,
return Param;
}
- Param->setDefaultArgument(DefaultTInfo);
+ Param->setDefaultArgument(
+ Context, TemplateArgumentLoc(DefaultTInfo->getType(), DefaultTInfo));
}
return Param;
@@ -1618,7 +1599,9 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
if (DiagnoseUnexpandedParameterPack(Default, UPPC_DefaultArgument))
return Param;
- Param->setDefaultArgument(Default);
+ Param->setDefaultArgument(
+ Context, getTrivialTemplateArgumentLoc(TemplateArgument(Default),
+ QualType(), SourceLocation()));
}
return Param;
@@ -1859,7 +1842,8 @@ DeclResult Sema::CheckClassTemplate(
TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody) {
assert(TemplateParams && TemplateParams->size() > 0 &&
"No template parameters");
- assert(TUK != TUK_Reference && "Can only declare or define class templates");
+ assert(TUK != TagUseKind::Reference &&
+ "Can only declare or define class templates");
bool Invalid = false;
// Check that we can declare a template here.
@@ -1881,8 +1865,9 @@ DeclResult Sema::CheckClassTemplate(
// C++11 [basic.lookup.elab]p2).
DeclContext *SemanticContext;
LookupResult Previous(*this, Name, NameLoc,
- (SS.isEmpty() && TUK == TUK_Friend)
- ? LookupTagName : LookupOrdinaryName,
+ (SS.isEmpty() && TUK == TagUseKind::Friend)
+ ? LookupTagName
+ : LookupOrdinaryName,
forRedeclarationInCurContext());
if (SS.isNotEmpty() && !SS.isInvalid()) {
SemanticContext = computeDeclContext(SS, true);
@@ -1890,11 +1875,11 @@ DeclResult Sema::CheckClassTemplate(
// FIXME: Horrible, horrible hack! We can't currently represent this
// in the AST, and historically we have just ignored such friend
// class templates, so don't complain here.
- Diag(NameLoc, TUK == TUK_Friend
+ Diag(NameLoc, TUK == TagUseKind::Friend
? diag::warn_template_qualified_friend_ignored
: diag::err_template_qualified_declarator_no_match)
<< SS.getScopeRep() << SS.getRange();
- return TUK != TUK_Friend;
+ return TUK != TagUseKind::Friend;
}
if (RequireCompleteDeclContext(SS, SemanticContext))
@@ -1909,7 +1894,7 @@ DeclResult Sema::CheckClassTemplate(
Invalid = true;
}
- if (TUK != TUK_Friend && TUK != TUK_Reference)
+ if (TUK != TagUseKind::Friend && TUK != TagUseKind::Reference)
diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc,
/*TemplateId-*/ nullptr,
/*IsMemberSpecialization*/ false);
@@ -1922,7 +1907,7 @@ DeclResult Sema::CheckClassTemplate(
// If T is the name of a class, then each of the following shall have a
// name different from T:
// -- every member template of class T
- if (TUK != TUK_Friend &&
+ if (TUK != TagUseKind::Friend &&
DiagnoseClassNameShadow(SemanticContext,
DeclarationNameInfo(Name, NameLoc)))
return true;
@@ -1964,7 +1949,7 @@ DeclResult Sema::CheckClassTemplate(
}
}
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
// C++ [namespace.memdef]p3:
// [...] When looking for a prior declaration of a class or a function
// declared as a friend, and when the name of the friend class or
@@ -2001,9 +1986,8 @@ DeclResult Sema::CheckClassTemplate(
PrevDecl = (*Previous.begin())->getUnderlyingDecl();
}
}
- } else if (PrevDecl &&
- !isDeclInScope(Previous.getRepresentativeDecl(), SemanticContext,
- S, SS.isValid()))
+ } else if (PrevDecl && !isDeclInScope(Previous.getRepresentativeDecl(),
+ SemanticContext, S, SS.isValid()))
PrevDecl = PrevClassTemplate = nullptr;
if (auto *Shadow = dyn_cast_or_null<UsingShadowDecl>(
@@ -2025,7 +2009,7 @@ DeclResult Sema::CheckClassTemplate(
// Ensure that the template parameter lists are compatible. Skip this check
// for a friend in a dependent context: the template parameter list itself
// could be dependent.
- if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
+ if (!(TUK == TagUseKind::Friend && CurContext->isDependentContext()) &&
!TemplateParameterListsAreEqual(
TemplateCompareNewDeclInfo(SemanticContext ? SemanticContext
: CurContext,
@@ -2041,8 +2025,8 @@ DeclResult Sema::CheckClassTemplate(
// the class-key shall agree in kind with the original class
// template declaration (7.1.5.3).
RecordDecl *PrevRecordDecl = PrevClassTemplate->getTemplatedDecl();
- if (!isAcceptableTagRedeclaration(PrevRecordDecl, Kind,
- TUK == TUK_Definition, KWLoc, Name)) {
+ if (!isAcceptableTagRedeclaration(
+ PrevRecordDecl, Kind, TUK == TagUseKind::Definition, KWLoc, Name)) {
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< Name
<< FixItHint::CreateReplacement(KWLoc, PrevRecordDecl->getKindName());
@@ -2051,7 +2035,7 @@ DeclResult Sema::CheckClassTemplate(
}
// Check for redefinition of this class template.
- if (TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
if (TagDecl *Def = PrevRecordDecl->getDefinition()) {
// If we have a prior definition that is not visible, treat this as
// simply making that previous definition visible.
@@ -2088,7 +2072,7 @@ DeclResult Sema::CheckClassTemplate(
// merging in the template parameter list from the previous class
// template declaration. Skip this check for a friend in a dependent
// context, because the template parameter list might be dependent.
- if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
+ if (!(TUK == TagUseKind::Friend && CurContext->isDependentContext()) &&
CheckTemplateParameterList(
TemplateParams,
PrevClassTemplate ? GetTemplateParameterList(PrevClassTemplate)
@@ -2096,8 +2080,8 @@ DeclResult Sema::CheckClassTemplate(
(SS.isSet() && SemanticContext && SemanticContext->isRecord() &&
SemanticContext->isDependentContext())
? TPC_ClassTemplateMember
- : TUK == TUK_Friend ? TPC_FriendClassTemplate
- : TPC_ClassTemplate,
+ : TUK == TagUseKind::Friend ? TPC_FriendClassTemplate
+ : TPC_ClassTemplate,
SkipBody))
Invalid = true;
@@ -2105,9 +2089,10 @@ DeclResult Sema::CheckClassTemplate(
// If the name of the template was qualified, we must be defining the
// template out-of-line.
if (!SS.isInvalid() && !Invalid && !PrevClassTemplate) {
- Diag(NameLoc, TUK == TUK_Friend ? diag::err_friend_decl_does_not_match
- : diag::err_member_decl_does_not_match)
- << Name << SemanticContext << /*IsDefinition*/true << SS.getRange();
+ Diag(NameLoc, TUK == TagUseKind::Friend
+ ? diag::err_friend_decl_does_not_match
+ : diag::err_member_decl_does_not_match)
+ << Name << SemanticContext << /*IsDefinition*/ true << SS.getRange();
Invalid = true;
}
}
@@ -2117,8 +2102,8 @@ DeclResult Sema::CheckClassTemplate(
// recent declaration tricking the template instantiator to make substitutions
// there.
// FIXME: Figure out how to combine with shouldLinkDependentDeclWithPrevious
- bool ShouldAddRedecl
- = !(TUK == TUK_Friend && CurContext->isDependentContext());
+ bool ShouldAddRedecl =
+ !(TUK == TagUseKind::Friend && CurContext->isDependentContext());
CXXRecordDecl *NewClass =
CXXRecordDecl::Create(Context, Kind, SemanticContext, KWLoc, NameLoc, Name,
@@ -2133,7 +2118,7 @@ DeclResult Sema::CheckClassTemplate(
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(NewClass);
AddMsStructLayoutForRecord(NewClass);
}
@@ -2164,14 +2149,15 @@ DeclResult Sema::CheckClassTemplate(
PrevClassTemplate->setMemberSpecialization();
// Set the access specifier.
- if (!Invalid && TUK != TUK_Friend && NewTemplate->getDeclContext()->isRecord())
+ if (!Invalid && TUK != TagUseKind::Friend &&
+ NewTemplate->getDeclContext()->isRecord())
SetMemberAccessSpecifier(NewTemplate, PrevClassTemplate, AS);
// Set the lexical context of these templates
NewClass->setLexicalDeclContext(CurContext);
NewTemplate->setLexicalDeclContext(CurContext);
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip))
NewClass->startDefinition();
ProcessDeclAttributeList(S, NewClass, Attr);
@@ -2184,7 +2170,7 @@ DeclResult Sema::CheckClassTemplate(
inferGslOwnerPointerAttribute(NewClass);
inferNullableClassAttribute(NewClass);
- if (TUK != TUK_Friend) {
+ if (TUK != TagUseKind::Friend) {
// Per C++ [basic.scope.temp]p2, skip the template parameter scopes.
Scope *Outer = S;
while ((Outer->getFlags() & Scope::TemplateParamScope) != 0)
@@ -2338,11 +2324,11 @@ transformTemplateTypeParam(Sema &SemaRef, DeclContext *DC,
SemaRef.SubstTypeConstraint(NewTTP, TC, Args,
/*EvaluateConstraint=*/true);
if (TTP->hasDefaultArgument()) {
- TypeSourceInfo *InstantiatedDefaultArg =
- SemaRef.SubstType(TTP->getDefaultArgumentInfo(), Args,
- TTP->getDefaultArgumentLoc(), TTP->getDeclName());
- if (InstantiatedDefaultArg)
- NewTTP->setDefaultArgument(InstantiatedDefaultArg);
+ TemplateArgumentLoc InstantiatedDefaultArg;
+ if (!SemaRef.SubstTemplateArgument(
+ TTP->getDefaultArgument(), Args, InstantiatedDefaultArg,
+ TTP->getDefaultArgumentLoc(), TTP->getDeclName()))
+ NewTTP->setDefaultArgument(SemaRef.Context, InstantiatedDefaultArg);
}
SemaRef.CurrentInstantiationScope->InstantiatedLocal(TTP, NewTTP);
return NewTTP;
@@ -3595,10 +3581,9 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
= dyn_cast<TemplateTypeParmDecl>(*NewParam)) {
// Check the presence of a default argument here.
if (NewTypeParm->hasDefaultArgument() &&
- DiagnoseDefaultTemplateArgument(*this, TPC,
- NewTypeParm->getLocation(),
- NewTypeParm->getDefaultArgumentInfo()->getTypeLoc()
- .getSourceRange()))
+ DiagnoseDefaultTemplateArgument(
+ *this, TPC, NewTypeParm->getLocation(),
+ NewTypeParm->getDefaultArgument().getSourceRange()))
NewTypeParm->removeDefaultArgument();
// Merge default arguments for template type parameters.
@@ -3647,9 +3632,9 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
// Check the presence of a default argument here.
if (NewNonTypeParm->hasDefaultArgument() &&
- DiagnoseDefaultTemplateArgument(*this, TPC,
- NewNonTypeParm->getLocation(),
- NewNonTypeParm->getDefaultArgument()->getSourceRange())) {
+ DiagnoseDefaultTemplateArgument(
+ *this, TPC, NewNonTypeParm->getLocation(),
+ NewNonTypeParm->getDefaultArgument().getSourceRange())) {
NewNonTypeParm->removeDefaultArgument();
}
@@ -5035,7 +5020,7 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
IdentifierInfo *Id = D->getIdentifier();
assert(Id && "templated class must have an identifier");
- if (!isAcceptableTagRedeclaration(D, TagKind, TUK == TUK_Definition,
+ if (!isAcceptableTagRedeclaration(D, TagKind, TUK == TagUseKind::Definition,
TagLoc, Id)) {
Diag(TagLoc, diag::err_use_with_wrong_tag)
<< Result
@@ -5747,50 +5732,36 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
}
// We actually only call this from template instantiation.
-ExprResult
-Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *TemplateArgs) {
-
+ExprResult Sema::BuildQualifiedTemplateIdExpr(
+ CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs, bool IsAddressOfOperand) {
assert(TemplateArgs || TemplateKWLoc.isValid());
- DeclContext *DC;
- if (!(DC = computeDeclContext(SS, false)) ||
- DC->isDependentContext() ||
- RequireCompleteDeclContext(SS, DC))
- return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
LookupResult R(*this, NameInfo, LookupOrdinaryName);
- if (LookupTemplateName(R, (Scope *)nullptr, SS, QualType(),
- /*Entering*/ false, TemplateKWLoc))
+ if (LookupTemplateName(R, /*S=*/nullptr, SS, /*ObjectType=*/QualType(),
+ /*EnteringContext=*/false, TemplateKWLoc))
return ExprError();
if (R.isAmbiguous())
return ExprError();
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
+ return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
+
if (R.empty()) {
+ DeclContext *DC = computeDeclContext(SS);
Diag(NameInfo.getLoc(), diag::err_no_member)
<< NameInfo.getName() << DC << SS.getRange();
return ExprError();
}
- auto DiagnoseTypeTemplateDecl = [&](TemplateDecl *Temp,
- bool isTypeAliasTemplateDecl) {
- Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_type_template)
- << SS.getScopeRep() << NameInfo.getName().getAsString() << SS.getRange()
- << isTypeAliasTemplateDecl;
- Diag(Temp->getLocation(), diag::note_referenced_type_template)
- << isTypeAliasTemplateDecl;
- return CreateRecoveryExpr(NameInfo.getBeginLoc(), NameInfo.getEndLoc(), {});
- };
-
- if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>())
- return DiagnoseTypeTemplateDecl(Temp, false);
-
- if (TypeAliasTemplateDecl *Temp = R.getAsSingle<TypeAliasTemplateDecl>())
- return DiagnoseTypeTemplateDecl(Temp, true);
+ // If necessary, build an implicit class member access.
+ if (isPotentialImplicitMemberAccess(SS, R, IsAddressOfOperand))
+ return BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs,
+ /*S=*/nullptr);
- return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL=*/false, TemplateArgs);
}
/// Form a template name from a name that is syntactically required to name a
@@ -5982,8 +5953,7 @@ bool Sema::CheckTemplateTypeArgument(
LookupParsedName(Result, CurScope, &SS, /*ObjectType=*/QualType());
if (Result.getAsSingle<TypeDecl>() ||
- Result.getResultKind() ==
- LookupResult::NotFoundInCurrentInstantiation) {
+ Result.wasNotFoundInCurrentInstantiation()) {
assert(SS.getScopeRep() && "dependent scope expr must has a scope!");
// Suggest that the user add 'typename' before the NNS.
SourceLocation Loc = AL.getSourceRange().getBegin();
@@ -6075,22 +6045,26 @@ bool Sema::CheckTemplateTypeArgument(
///
/// \param Converted the list of template arguments provided for template
/// parameters that precede \p Param in the template parameter list.
-/// \returns the substituted template argument, or NULL if an error occurred.
-static TypeSourceInfo *SubstDefaultTemplateArgument(
+///
+/// \param Output the resulting substituted template argument.
+///
+/// \returns true if an error occurred.
+static bool SubstDefaultTemplateArgument(
Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc,
SourceLocation RAngleLoc, TemplateTypeParmDecl *Param,
ArrayRef<TemplateArgument> SugaredConverted,
- ArrayRef<TemplateArgument> CanonicalConverted) {
- TypeSourceInfo *ArgType = Param->getDefaultArgumentInfo();
+ ArrayRef<TemplateArgument> CanonicalConverted,
+ TemplateArgumentLoc &Output) {
+ Output = Param->getDefaultArgument();
// If the argument type is dependent, instantiate it now based
// on the previously-computed template arguments.
- if (ArgType->getType()->isInstantiationDependentType()) {
+ if (Output.getArgument().isInstantiationDependent()) {
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc, Param, Template,
SugaredConverted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
- return nullptr;
+ return true;
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists(Template, SugaredConverted,
@@ -6103,12 +6077,14 @@ static TypeSourceInfo *SubstDefaultTemplateArgument(
ForLambdaCallOperator = Rec->isLambda();
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext(),
!ForLambdaCallOperator);
- ArgType =
- SemaRef.SubstType(ArgType, TemplateArgLists,
- Param->getDefaultArgumentLoc(), Param->getDeclName());
+
+ if (SemaRef.SubstTemplateArgument(Output, TemplateArgLists, Output,
+ Param->getDefaultArgumentLoc(),
+ Param->getDeclName()))
+ return true;
}
- return ArgType;
+ return false;
}
/// Substitute template arguments into the default template argument for
@@ -6133,16 +6109,17 @@ static TypeSourceInfo *SubstDefaultTemplateArgument(
/// parameters that precede \p Param in the template parameter list.
///
/// \returns the substituted template argument, or NULL if an error occurred.
-static ExprResult SubstDefaultTemplateArgument(
+static bool SubstDefaultTemplateArgument(
Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc,
SourceLocation RAngleLoc, NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> SugaredConverted,
- ArrayRef<TemplateArgument> CanonicalConverted) {
+ ArrayRef<TemplateArgument> CanonicalConverted,
+ TemplateArgumentLoc &Output) {
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc, Param, Template,
SugaredConverted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
- return ExprError();
+ return true;
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists(Template, SugaredConverted,
@@ -6153,7 +6130,8 @@ static ExprResult SubstDefaultTemplateArgument(
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
EnterExpressionEvaluationContext ConstantEvaluated(
SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- return SemaRef.SubstExpr(Param->getDefaultArgument(), TemplateArgLists);
+ return SemaRef.SubstTemplateArgument(Param->getDefaultArgument(),
+ TemplateArgLists, Output);
}
/// Substitute template arguments into the default template argument for
@@ -6231,13 +6209,12 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
return TemplateArgumentLoc();
HasDefaultArg = true;
- TypeSourceInfo *DI = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, TypeParm, SugaredConverted,
- CanonicalConverted);
- if (DI)
- return TemplateArgumentLoc(TemplateArgument(DI->getType()), DI);
-
- return TemplateArgumentLoc();
+ TemplateArgumentLoc Output;
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
+ TypeParm, SugaredConverted,
+ CanonicalConverted, Output))
+ return TemplateArgumentLoc();
+ return Output;
}
if (NonTypeTemplateParmDecl *NonTypeParm
@@ -6246,14 +6223,12 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
return TemplateArgumentLoc();
HasDefaultArg = true;
- ExprResult Arg = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, NonTypeParm, SugaredConverted,
- CanonicalConverted);
- if (Arg.isInvalid())
+ TemplateArgumentLoc Output;
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
+ NonTypeParm, SugaredConverted,
+ CanonicalConverted, Output))
return TemplateArgumentLoc();
-
- Expr *ArgE = Arg.getAs<Expr>();
- return TemplateArgumentLoc(TemplateArgument(ArgE), ArgE);
+ return Output;
}
TemplateTemplateParmDecl *TempTempParm
@@ -6820,28 +6795,20 @@ bool Sema::CheckTemplateArgumentList(
return diagnoseMissingArgument(*this, TemplateLoc, Template, TTP,
NewArgs);
- TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, TTP, SugaredConverted,
- CanonicalConverted);
- if (!ArgType)
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
+ TTP, SugaredConverted,
+ CanonicalConverted, Arg))
return true;
-
- Arg = TemplateArgumentLoc(TemplateArgument(ArgType->getType()),
- ArgType);
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
if (!hasReachableDefaultArgument(NTTP))
return diagnoseMissingArgument(*this, TemplateLoc, Template, NTTP,
NewArgs);
- ExprResult E = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, NTTP, SugaredConverted,
- CanonicalConverted);
- if (E.isInvalid())
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
+ NTTP, SugaredConverted,
+ CanonicalConverted, Arg))
return true;
-
- Expr *Ex = E.getAs<Expr>();
- Arg = TemplateArgumentLoc(TemplateArgument(Ex), Ex);
} else {
TemplateTemplateParmDecl *TempParm
= cast<TemplateTemplateParmDecl>(*Param);
@@ -9486,7 +9453,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody) {
- assert(TUK != TUK_Reference && "References are not specializations");
+ assert(TUK != TagUseKind::Reference && "References are not specializations");
SourceLocation TemplateNameLoc = TemplateId.TemplateNameLoc;
SourceLocation LAngleLoc = TemplateId.LAngleLoc;
@@ -9508,7 +9475,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
bool isPartialSpecialization = false;
if (SS.isSet()) {
- if (TUK != TUK_Reference && TUK != TUK_Friend &&
+ if (TUK != TagUseKind::Reference && TUK != TagUseKind::Friend &&
diagnoseQualifiedDeclaration(SS, ClassTemplate->getDeclContext(),
ClassTemplate->getDeclName(),
TemplateNameLoc, &TemplateId,
@@ -9523,9 +9490,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
bool Invalid = false;
TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
- KWLoc, TemplateNameLoc, SS, &TemplateId,
- TemplateParameterLists, TUK == TUK_Friend, isMemberSpecialization,
- Invalid);
+ KWLoc, TemplateNameLoc, SS, &TemplateId, TemplateParameterLists,
+ TUK == TagUseKind::Friend, isMemberSpecialization, Invalid);
if (Invalid)
return true;
@@ -9536,7 +9502,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (TemplateParams && TemplateParams->size() > 0) {
isPartialSpecialization = true;
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
Diag(KWLoc, diag::err_partial_specialization_friend)
<< SourceRange(LAngleLoc, RAngleLoc);
return true;
@@ -9555,10 +9521,10 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
}
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- if (Expr *DefArg = NTTP->getDefaultArgument()) {
+ if (NTTP->hasDefaultArgument()) {
Diag(NTTP->getDefaultArgumentLoc(),
diag::err_default_arg_in_partial_spec)
- << DefArg->getSourceRange();
+ << NTTP->getDefaultArgument().getSourceRange();
NTTP->removeDefaultArgument();
}
} else {
@@ -9572,14 +9538,15 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
}
}
} else if (TemplateParams) {
- if (TUK == TUK_Friend)
+ if (TUK == TagUseKind::Friend)
Diag(KWLoc, diag::err_template_spec_friend)
<< FixItHint::CreateRemoval(
SourceRange(TemplateParams->getTemplateLoc(),
TemplateParams->getRAngleLoc()))
<< SourceRange(LAngleLoc, RAngleLoc);
} else {
- assert(TUK == TUK_Friend && "should have a 'template<>' for this decl");
+ assert(TUK == TagUseKind::Friend &&
+ "should have a 'template<>' for this decl");
}
// Check that the specialization uses the same tag kind as the
@@ -9587,8 +9554,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
assert(Kind != TagTypeKind::Enum &&
"Invalid enum tag in class template spec!");
- if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
- Kind, TUK == TUK_Definition, KWLoc,
+ if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(), Kind,
+ TUK == TagUseKind::Definition, KWLoc,
ClassTemplate->getIdentifier())) {
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< ClassTemplate
@@ -9652,7 +9619,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// Check whether we can declare a class template specialization in
// the current scope.
- if (TUK != TUK_Friend &&
+ if (TUK != TagUseKind::Friend &&
CheckTemplateSpecializationScope(*this, ClassTemplate, PrevDecl,
TemplateNameLoc,
isPartialSpecialization))
@@ -9679,8 +9646,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// This rule has since been removed, because it's redundant given DR1495,
// but we keep it because it produces better diagnostics and recovery.
Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template)
- << /*class template*/0 << (TUK == TUK_Definition)
- << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
+ << /*class template*/ 0 << (TUK == TagUseKind::Definition)
+ << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
return CheckClassTemplate(S, TagSpec, TUK, KWLoc, SS,
ClassTemplate->getIdentifier(),
TemplateNameLoc,
@@ -9772,11 +9739,11 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
}
// If this is not a friend, note that this is an explicit specialization.
- if (TUK != TUK_Friend)
+ if (TUK != TagUseKind::Friend)
Specialization->setSpecializationKind(TSK_ExplicitSpecialization);
// Check that this isn't a redefinition of this specialization.
- if (TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
RecordDecl *Def = Specialization->getDefinition();
NamedDecl *Hidden = nullptr;
if (Def && SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
@@ -9797,7 +9764,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(Specialization);
AddMsStructLayoutForRecord(Specialization);
}
@@ -9818,10 +9785,10 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
Specialization->setLexicalDeclContext(CurContext);
// We may be starting the definition of this specialization.
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip))
Specialization->startDefinition();
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
// Build the fully-sugared type for this class template
// specialization as the user wrote in the specialization
// itself. This means that we'll pretty-print the type retrieved
@@ -10290,15 +10257,20 @@ bool Sema::CheckFunctionTemplateSpecialization(
Ovl->getDeclContext()->getRedeclContext()))
continue;
+ QualType FT = FD->getType();
+ // C++11 [dcl.constexpr]p8:
+ // A constexpr specifier for a non-static member function that is not
+ // a constructor declares that member function to be const.
+ //
// When matching a constexpr member function template specialization
// against the primary template, we don't yet know whether the
// specialization has an implicit 'const' (because we don't know whether
// it will be a static member function until we know which template it
- // specializes), so adjust it now assuming it specializes this template.
- QualType FT = FD->getType();
- if (FD->isConstexpr()) {
- CXXMethodDecl *OldMD =
- dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
+ // specializes). This rule was removed in C++14.
+ if (auto *NewMD = dyn_cast<CXXMethodDecl>(FD);
+ !getLangOpts().CPlusPlus14 && NewMD && NewMD->isConstexpr() &&
+ !isa<CXXConstructorDecl, CXXDestructorDecl>(NewMD)) {
+ auto *OldMD = dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
if (OldMD && OldMD->isConst()) {
const FunctionProtoType *FPT = FT->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
@@ -11124,8 +11096,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
Def->setTemplateSpecializationKind(TSK);
if (!getDLLAttr(Def) && getDLLAttr(Specialization) &&
- (Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
- !Context.getTargetInfo().getTriple().isPS())) {
+ Context.getTargetInfo().shouldDLLImportComdatSymbols()) {
// An explicit instantiation definition can add a dll attribute to a
// template with a previous instantiation declaration. MinGW doesn't
// allow this.
@@ -11142,8 +11113,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
bool NewlyDLLExported =
!PreviouslyDLLExported && Specialization->hasAttr<DLLExportAttr>();
if (Old_TSK == TSK_ImplicitInstantiation && NewlyDLLExported &&
- (Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
- !Context.getTargetInfo().getTriple().isPS())) {
+ Context.getTargetInfo().shouldDLLImportComdatSymbols()) {
// An explicit instantiation definition can add a dll attribute to a
// template with a previous implicit instantiation. MinGW doesn't allow
// this. We limit clang to only adding dllexport, to avoid potentially
@@ -11192,11 +11162,13 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
bool Owned = false;
bool IsDependent = false;
- Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference, KWLoc, SS, Name,
- NameLoc, Attr, AS_none, /*ModulePrivateLoc=*/SourceLocation(),
+ Decl *TagD =
+ ActOnTag(S, TagSpec, TagUseKind::Reference, KWLoc, SS, Name, NameLoc,
+ Attr, AS_none, /*ModulePrivateLoc=*/SourceLocation(),
MultiTemplateParamsArg(), Owned, IsDependent, SourceLocation(),
false, TypeResult(), /*IsTypeSpecifier*/ false,
- /*IsTemplateParamOrArg*/ false, /*OOK=*/OOK_Outside).get();
+ /*IsTemplateParamOrArg*/ false, /*OOK=*/OOK_Outside)
+ .get();
assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
if (!TagD)
@@ -11727,9 +11699,9 @@ TypeResult Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
- if (TUK == TUK_Declaration || TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Declaration || TUK == TagUseKind::Definition) {
Diag(NameLoc, diag::err_dependent_tag_decl)
- << (TUK == TUK_Definition) << llvm::to_underlying(Kind)
+ << (TUK == TagUseKind::Definition) << llvm::to_underlying(Kind)
<< SS.getRange();
return true;
}
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index 41fd210f29d0..f9ec34163e65 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -519,18 +519,14 @@ static NamedDecl *getTemplateParameterWithDefault(Sema &S, NamedDecl *A,
switch (A->getKind()) {
case Decl::TemplateTypeParm: {
auto *T = cast<TemplateTypeParmDecl>(A);
- // FIXME: A TemplateTypeParmDecl's DefaultArgument can't hold a full
- // TemplateArgument, so there is currently no way to specify a pack as a
- // default argument for these.
- if (T->isParameterPack())
- return A;
auto *R = TemplateTypeParmDecl::Create(
S.Context, A->getDeclContext(), SourceLocation(), SourceLocation(),
T->getDepth(), T->getIndex(), T->getIdentifier(),
- T->wasDeclaredWithTypename(), /*ParameterPack=*/false,
+ T->wasDeclaredWithTypename(), T->isParameterPack(),
T->hasTypeConstraint());
R->setDefaultArgument(
- S.Context.getTrivialTypeSourceInfo(Default.getAsType()));
+ S.Context,
+ S.getTrivialTemplateArgumentLoc(Default, QualType(), SourceLocation()));
if (R->hasTypeConstraint()) {
auto *C = R->getTypeConstraint();
R->setTypeConstraint(C->getConceptReference(),
@@ -540,14 +536,14 @@ static NamedDecl *getTemplateParameterWithDefault(Sema &S, NamedDecl *A,
}
case Decl::NonTypeTemplateParm: {
auto *T = cast<NonTypeTemplateParmDecl>(A);
- // FIXME: Ditto, as above for TemplateTypeParm case.
- if (T->isParameterPack())
- return A;
auto *R = NonTypeTemplateParmDecl::Create(
S.Context, A->getDeclContext(), SourceLocation(), SourceLocation(),
T->getDepth(), T->getIndex(), T->getIdentifier(), T->getType(),
- /*ParameterPack=*/false, T->getTypeSourceInfo());
- R->setDefaultArgument(Default.getAsExpr());
+ T->isParameterPack(), T->getTypeSourceInfo());
+ R->setDefaultArgument(S.Context,
+ S.getTrivialTemplateArgumentLoc(
+ Default, Default.getNonTypeTemplateArgumentType(),
+ SourceLocation()));
if (auto *PTC = T->getPlaceholderTypeConstraint())
R->setPlaceholderTypeConstraint(PTC);
return R;
@@ -4776,8 +4772,13 @@ TemplateDeductionResult Sema::DeduceTemplateArguments(
DeduceReturnType(Specialization, Info.getLocation(), false))
return TemplateDeductionResult::MiscellaneousDeductionFailure;
+ // [C++26][expr.const]/p17
+ // An expression or conversion is immediate-escalating if it is not initially
+ // in an immediate function context and it is [...]
+ // a potentially-evaluated id-expression that denotes an immediate function.
if (IsAddressOfFunction && getLangOpts().CPlusPlus20 &&
Specialization->isImmediateEscalating() &&
+ parentEvaluationContext().isPotentiallyEvaluated() &&
CheckIfFunctionSpecializationIsImmediate(Specialization,
Info.getLocation()))
return TemplateDeductionResult::MiscellaneousDeductionFailure;
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 07626058c797..abb8a260faab 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -1619,11 +1619,6 @@ namespace {
case TemplateArgument::Pack:
// Literally rewrite the template argument pack, instead of unpacking
// it.
- assert(
- SemaRef.CodeSynthesisContexts.back().Kind ==
- Sema::CodeSynthesisContext::BuildingDeductionGuides &&
- "Transforming a template argument pack is only allowed in building "
- "deduction guide");
for (auto &pack : Arg.getPackAsArray()) {
TemplateArgumentLoc Input = SemaRef.getTrivialTemplateArgumentLoc(
pack, QualType(), SourceLocation{});
@@ -4375,9 +4370,9 @@ Sema::SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs) {
bool Sema::SubstTemplateArgument(
const TemplateArgumentLoc &Input,
const MultiLevelTemplateArgumentList &TemplateArgs,
- TemplateArgumentLoc &Output) {
- TemplateInstantiator Instantiator(*this, TemplateArgs, SourceLocation(),
- DeclarationName());
+ TemplateArgumentLoc &Output, SourceLocation Loc,
+ const DeclarationName &Entity) {
+ TemplateInstantiator Instantiator(*this, TemplateArgs, Loc, Entity);
return Instantiator.TransformTemplateArgument(Input, Output);
}
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 381d79b2fcd4..bb49aae2cb66 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -2956,11 +2956,10 @@ Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
}
}
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
- TypeSourceInfo *InstantiatedDefaultArg =
- SemaRef.SubstType(D->getDefaultArgumentInfo(), TemplateArgs,
- D->getDefaultArgumentLoc(), D->getDeclName());
- if (InstantiatedDefaultArg)
- Inst->setDefaultArgument(InstantiatedDefaultArg);
+ TemplateArgumentLoc Output;
+ if (!SemaRef.SubstTemplateArgument(D->getDefaultArgument(), TemplateArgs,
+ Output))
+ Inst->setDefaultArgument(SemaRef.getASTContext(), Output);
}
// Introduce this template parameter's instantiation into the instantiation
@@ -3124,9 +3123,10 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
EnterExpressionEvaluationContext ConstantEvaluated(
SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- ExprResult Value = SemaRef.SubstExpr(D->getDefaultArgument(), TemplateArgs);
- if (!Value.isInvalid())
- Param->setDefaultArgument(Value.get());
+ TemplateArgumentLoc Result;
+ if (!SemaRef.SubstTemplateArgument(D->getDefaultArgument(), TemplateArgs,
+ Result))
+ Param->setDefaultArgument(SemaRef.Context, Result);
}
// Introduce this template parameter's instantiation into the instantiation
@@ -5055,6 +5055,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
Function->setLocation(PatternDecl->getLocation());
Function->setInnerLocStart(PatternDecl->getInnerLocStart());
Function->setRangeEnd(PatternDecl->getEndLoc());
+ Function->setDeclarationNameLoc(PatternDecl->getNameInfo().getInfo());
EnterExpressionEvaluationContext EvalContext(
*this, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
diff --git a/clang/lib/Sema/SemaTemplateVariadic.cpp b/clang/lib/Sema/SemaTemplateVariadic.cpp
index a4b681ae4f00..7a44b978aacd 100644
--- a/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -66,6 +66,9 @@ namespace {
bool shouldWalkTypesOfTypeLocs() const { return false; }
+ // We need this so we can find e.g. attributes on lambdas.
+ bool shouldVisitImplicitCode() const { return true; }
+
//------------------------------------------------------------------------
// Recording occurrences of (unexpanded) parameter packs.
//------------------------------------------------------------------------
@@ -1128,7 +1131,7 @@ Sema::BuildPackIndexingExpr(Expr *PackExpression, SourceLocation EllipsisLoc,
return PackIndexingExpr::Create(getASTContext(), EllipsisLoc, RSquareLoc,
PackExpression, IndexExpr, Index,
- ExpandedExprs);
+ ExpandedExprs, EmptyPack);
}
TemplateArgumentLoc Sema::getTemplateArgumentPackExpansionPattern(
diff --git a/clang/lib/Sema/SemaX86.cpp b/clang/lib/Sema/SemaX86.cpp
new file mode 100644
index 000000000000..ffac1afc5d78
--- /dev/null
+++ b/clang/lib/Sema/SemaX86.cpp
@@ -0,0 +1,878 @@
+//===------ SemaX86.cpp ---------- X86 target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to X86.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaX86.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/TargetParser/Triple.h"
+#include <bitset>
+
+namespace clang {
+
+SemaX86::SemaX86(Sema &S) : SemaBase(S) {}
+
+// Check if the rounding mode is legal.
+bool SemaX86::CheckBuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
+ // Indicates if this instruction has rounding control or just SAE.
+ bool HasRC = false;
+
+ unsigned ArgNum = 0;
+ switch (BuiltinID) {
+ default:
+ return false;
+ case X86::BI__builtin_ia32_vcvttsd2si32:
+ case X86::BI__builtin_ia32_vcvttsd2si64:
+ case X86::BI__builtin_ia32_vcvttsd2usi32:
+ case X86::BI__builtin_ia32_vcvttsd2usi64:
+ case X86::BI__builtin_ia32_vcvttss2si32:
+ case X86::BI__builtin_ia32_vcvttss2si64:
+ case X86::BI__builtin_ia32_vcvttss2usi32:
+ case X86::BI__builtin_ia32_vcvttss2usi64:
+ case X86::BI__builtin_ia32_vcvttsh2si32:
+ case X86::BI__builtin_ia32_vcvttsh2si64:
+ case X86::BI__builtin_ia32_vcvttsh2usi32:
+ case X86::BI__builtin_ia32_vcvttsh2usi64:
+ ArgNum = 1;
+ break;
+ case X86::BI__builtin_ia32_maxpd512:
+ case X86::BI__builtin_ia32_maxps512:
+ case X86::BI__builtin_ia32_minpd512:
+ case X86::BI__builtin_ia32_minps512:
+ case X86::BI__builtin_ia32_maxph512:
+ case X86::BI__builtin_ia32_minph512:
+ ArgNum = 2;
+ break;
+ case X86::BI__builtin_ia32_vcvtph2pd512_mask:
+ case X86::BI__builtin_ia32_vcvtph2psx512_mask:
+ case X86::BI__builtin_ia32_cvtps2pd512_mask:
+ case X86::BI__builtin_ia32_cvttpd2dq512_mask:
+ case X86::BI__builtin_ia32_cvttpd2qq512_mask:
+ case X86::BI__builtin_ia32_cvttpd2udq512_mask:
+ case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
+ case X86::BI__builtin_ia32_cvttps2dq512_mask:
+ case X86::BI__builtin_ia32_cvttps2qq512_mask:
+ case X86::BI__builtin_ia32_cvttps2udq512_mask:
+ case X86::BI__builtin_ia32_cvttps2uqq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2w512_mask:
+ case X86::BI__builtin_ia32_vcvttph2uw512_mask:
+ case X86::BI__builtin_ia32_vcvttph2dq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2udq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2qq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2uqq512_mask:
+ case X86::BI__builtin_ia32_getexppd512_mask:
+ case X86::BI__builtin_ia32_getexpps512_mask:
+ case X86::BI__builtin_ia32_getexpph512_mask:
+ case X86::BI__builtin_ia32_vcomisd:
+ case X86::BI__builtin_ia32_vcomiss:
+ case X86::BI__builtin_ia32_vcomish:
+ case X86::BI__builtin_ia32_vcvtph2ps512_mask:
+ ArgNum = 3;
+ break;
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ case X86::BI__builtin_ia32_cmpsd_mask:
+ case X86::BI__builtin_ia32_cmpss_mask:
+ case X86::BI__builtin_ia32_cmpsh_mask:
+ case X86::BI__builtin_ia32_vcvtsh2sd_round_mask:
+ case X86::BI__builtin_ia32_vcvtsh2ss_round_mask:
+ case X86::BI__builtin_ia32_cvtss2sd_round_mask:
+ case X86::BI__builtin_ia32_getexpsd128_round_mask:
+ case X86::BI__builtin_ia32_getexpss128_round_mask:
+ case X86::BI__builtin_ia32_getexpsh128_round_mask:
+ case X86::BI__builtin_ia32_getmantpd512_mask:
+ case X86::BI__builtin_ia32_getmantps512_mask:
+ case X86::BI__builtin_ia32_getmantph512_mask:
+ case X86::BI__builtin_ia32_maxsd_round_mask:
+ case X86::BI__builtin_ia32_maxss_round_mask:
+ case X86::BI__builtin_ia32_maxsh_round_mask:
+ case X86::BI__builtin_ia32_minsd_round_mask:
+ case X86::BI__builtin_ia32_minss_round_mask:
+ case X86::BI__builtin_ia32_minsh_round_mask:
+ case X86::BI__builtin_ia32_reducepd512_mask:
+ case X86::BI__builtin_ia32_reduceps512_mask:
+ case X86::BI__builtin_ia32_reduceph512_mask:
+ case X86::BI__builtin_ia32_rndscalepd_mask:
+ case X86::BI__builtin_ia32_rndscaleps_mask:
+ case X86::BI__builtin_ia32_rndscaleph_mask:
+ ArgNum = 4;
+ break;
+ case X86::BI__builtin_ia32_fixupimmpd512_mask:
+ case X86::BI__builtin_ia32_fixupimmpd512_maskz:
+ case X86::BI__builtin_ia32_fixupimmps512_mask:
+ case X86::BI__builtin_ia32_fixupimmps512_maskz:
+ case X86::BI__builtin_ia32_fixupimmsd_mask:
+ case X86::BI__builtin_ia32_fixupimmsd_maskz:
+ case X86::BI__builtin_ia32_fixupimmss_mask:
+ case X86::BI__builtin_ia32_fixupimmss_maskz:
+ case X86::BI__builtin_ia32_getmantsd_round_mask:
+ case X86::BI__builtin_ia32_getmantss_round_mask:
+ case X86::BI__builtin_ia32_getmantsh_round_mask:
+ case X86::BI__builtin_ia32_rangepd512_mask:
+ case X86::BI__builtin_ia32_rangeps512_mask:
+ case X86::BI__builtin_ia32_rangesd128_round_mask:
+ case X86::BI__builtin_ia32_rangess128_round_mask:
+ case X86::BI__builtin_ia32_reducesd_mask:
+ case X86::BI__builtin_ia32_reducess_mask:
+ case X86::BI__builtin_ia32_reducesh_mask:
+ case X86::BI__builtin_ia32_rndscalesd_round_mask:
+ case X86::BI__builtin_ia32_rndscaless_round_mask:
+ case X86::BI__builtin_ia32_rndscalesh_round_mask:
+ ArgNum = 5;
+ break;
+ case X86::BI__builtin_ia32_vcvtsd2si64:
+ case X86::BI__builtin_ia32_vcvtsd2si32:
+ case X86::BI__builtin_ia32_vcvtsd2usi32:
+ case X86::BI__builtin_ia32_vcvtsd2usi64:
+ case X86::BI__builtin_ia32_vcvtss2si32:
+ case X86::BI__builtin_ia32_vcvtss2si64:
+ case X86::BI__builtin_ia32_vcvtss2usi32:
+ case X86::BI__builtin_ia32_vcvtss2usi64:
+ case X86::BI__builtin_ia32_vcvtsh2si32:
+ case X86::BI__builtin_ia32_vcvtsh2si64:
+ case X86::BI__builtin_ia32_vcvtsh2usi32:
+ case X86::BI__builtin_ia32_vcvtsh2usi64:
+ case X86::BI__builtin_ia32_sqrtpd512:
+ case X86::BI__builtin_ia32_sqrtps512:
+ case X86::BI__builtin_ia32_sqrtph512:
+ ArgNum = 1;
+ HasRC = true;
+ break;
+ case X86::BI__builtin_ia32_addph512:
+ case X86::BI__builtin_ia32_divph512:
+ case X86::BI__builtin_ia32_mulph512:
+ case X86::BI__builtin_ia32_subph512:
+ case X86::BI__builtin_ia32_addpd512:
+ case X86::BI__builtin_ia32_addps512:
+ case X86::BI__builtin_ia32_divpd512:
+ case X86::BI__builtin_ia32_divps512:
+ case X86::BI__builtin_ia32_mulpd512:
+ case X86::BI__builtin_ia32_mulps512:
+ case X86::BI__builtin_ia32_subpd512:
+ case X86::BI__builtin_ia32_subps512:
+ case X86::BI__builtin_ia32_cvtsi2sd64:
+ case X86::BI__builtin_ia32_cvtsi2ss32:
+ case X86::BI__builtin_ia32_cvtsi2ss64:
+ case X86::BI__builtin_ia32_cvtusi2sd64:
+ case X86::BI__builtin_ia32_cvtusi2ss32:
+ case X86::BI__builtin_ia32_cvtusi2ss64:
+ case X86::BI__builtin_ia32_vcvtusi2sh:
+ case X86::BI__builtin_ia32_vcvtusi642sh:
+ case X86::BI__builtin_ia32_vcvtsi2sh:
+ case X86::BI__builtin_ia32_vcvtsi642sh:
+ ArgNum = 2;
+ HasRC = true;
+ break;
+ case X86::BI__builtin_ia32_cvtdq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtudq2ps512_mask:
+ case X86::BI__builtin_ia32_vcvtpd2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtps2phx512_mask:
+ case X86::BI__builtin_ia32_cvtpd2ps512_mask:
+ case X86::BI__builtin_ia32_cvtpd2dq512_mask:
+ case X86::BI__builtin_ia32_cvtpd2qq512_mask:
+ case X86::BI__builtin_ia32_cvtpd2udq512_mask:
+ case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
+ case X86::BI__builtin_ia32_cvtps2dq512_mask:
+ case X86::BI__builtin_ia32_cvtps2qq512_mask:
+ case X86::BI__builtin_ia32_cvtps2udq512_mask:
+ case X86::BI__builtin_ia32_cvtps2uqq512_mask:
+ case X86::BI__builtin_ia32_cvtqq2pd512_mask:
+ case X86::BI__builtin_ia32_cvtqq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
+ case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtph2w512_mask:
+ case X86::BI__builtin_ia32_vcvtph2uw512_mask:
+ case X86::BI__builtin_ia32_vcvtph2dq512_mask:
+ case X86::BI__builtin_ia32_vcvtph2udq512_mask:
+ case X86::BI__builtin_ia32_vcvtph2qq512_mask:
+ case X86::BI__builtin_ia32_vcvtph2uqq512_mask:
+ case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
+ ArgNum = 3;
+ HasRC = true;
+ break;
+ case X86::BI__builtin_ia32_addsh_round_mask:
+ case X86::BI__builtin_ia32_addss_round_mask:
+ case X86::BI__builtin_ia32_addsd_round_mask:
+ case X86::BI__builtin_ia32_divsh_round_mask:
+ case X86::BI__builtin_ia32_divss_round_mask:
+ case X86::BI__builtin_ia32_divsd_round_mask:
+ case X86::BI__builtin_ia32_mulsh_round_mask:
+ case X86::BI__builtin_ia32_mulss_round_mask:
+ case X86::BI__builtin_ia32_mulsd_round_mask:
+ case X86::BI__builtin_ia32_subsh_round_mask:
+ case X86::BI__builtin_ia32_subss_round_mask:
+ case X86::BI__builtin_ia32_subsd_round_mask:
+ case X86::BI__builtin_ia32_scalefph512_mask:
+ case X86::BI__builtin_ia32_scalefpd512_mask:
+ case X86::BI__builtin_ia32_scalefps512_mask:
+ case X86::BI__builtin_ia32_scalefsd_round_mask:
+ case X86::BI__builtin_ia32_scalefss_round_mask:
+ case X86::BI__builtin_ia32_scalefsh_round_mask:
+ case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
+ case X86::BI__builtin_ia32_vcvtss2sh_round_mask:
+ case X86::BI__builtin_ia32_vcvtsd2sh_round_mask:
+ case X86::BI__builtin_ia32_sqrtsd_round_mask:
+ case X86::BI__builtin_ia32_sqrtss_round_mask:
+ case X86::BI__builtin_ia32_sqrtsh_round_mask:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask:
+ case X86::BI__builtin_ia32_vfmaddsd3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask3:
+ case X86::BI__builtin_ia32_vfmaddss3_mask:
+ case X86::BI__builtin_ia32_vfmaddss3_maskz:
+ case X86::BI__builtin_ia32_vfmaddss3_mask3:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask:
+ case X86::BI__builtin_ia32_vfmaddsh3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask3:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmaddps512_mask:
+ case X86::BI__builtin_ia32_vfmaddps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddph512_mask:
+ case X86::BI__builtin_ia32_vfmaddph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddcsh_mask:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_vfmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfmaddcph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddcph512_mask3:
+ case X86::BI__builtin_ia32_vfcmaddcsh_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_vfcmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfcmaddcph512_maskz:
+ case X86::BI__builtin_ia32_vfcmaddcph512_mask3:
+ case X86::BI__builtin_ia32_vfmulcsh_mask:
+ case X86::BI__builtin_ia32_vfmulcph512_mask:
+ case X86::BI__builtin_ia32_vfcmulcsh_mask:
+ case X86::BI__builtin_ia32_vfcmulcph512_mask:
+ ArgNum = 4;
+ HasRC = true;
+ break;
+ }
+
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
+ // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
+ // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
+ // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
+ if (Result == 4 /*ROUND_CUR_DIRECTION*/ || Result == 8 /*ROUND_NO_EXC*/ ||
+ (!HasRC && Result == 12 /*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
+ (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
+ return false;
+
+ return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
+ << Arg->getSourceRange();
+}
+
+// Check if the gather/scatter scale is legal.
+bool SemaX86::CheckBuiltinGatherScatterScale(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ unsigned ArgNum = 0;
+ switch (BuiltinID) {
+ default:
+ return false;
+ case X86::BI__builtin_ia32_gatherd_pd:
+ case X86::BI__builtin_ia32_gatherd_pd256:
+ case X86::BI__builtin_ia32_gatherq_pd:
+ case X86::BI__builtin_ia32_gatherq_pd256:
+ case X86::BI__builtin_ia32_gatherd_ps:
+ case X86::BI__builtin_ia32_gatherd_ps256:
+ case X86::BI__builtin_ia32_gatherq_ps:
+ case X86::BI__builtin_ia32_gatherq_ps256:
+ case X86::BI__builtin_ia32_gatherd_q:
+ case X86::BI__builtin_ia32_gatherd_q256:
+ case X86::BI__builtin_ia32_gatherq_q:
+ case X86::BI__builtin_ia32_gatherq_q256:
+ case X86::BI__builtin_ia32_gatherd_d:
+ case X86::BI__builtin_ia32_gatherd_d256:
+ case X86::BI__builtin_ia32_gatherq_d:
+ case X86::BI__builtin_ia32_gatherq_d256:
+ case X86::BI__builtin_ia32_gather3div2df:
+ case X86::BI__builtin_ia32_gather3div2di:
+ case X86::BI__builtin_ia32_gather3div4df:
+ case X86::BI__builtin_ia32_gather3div4di:
+ case X86::BI__builtin_ia32_gather3div4sf:
+ case X86::BI__builtin_ia32_gather3div4si:
+ case X86::BI__builtin_ia32_gather3div8sf:
+ case X86::BI__builtin_ia32_gather3div8si:
+ case X86::BI__builtin_ia32_gather3siv2df:
+ case X86::BI__builtin_ia32_gather3siv2di:
+ case X86::BI__builtin_ia32_gather3siv4df:
+ case X86::BI__builtin_ia32_gather3siv4di:
+ case X86::BI__builtin_ia32_gather3siv4sf:
+ case X86::BI__builtin_ia32_gather3siv4si:
+ case X86::BI__builtin_ia32_gather3siv8sf:
+ case X86::BI__builtin_ia32_gather3siv8si:
+ case X86::BI__builtin_ia32_gathersiv8df:
+ case X86::BI__builtin_ia32_gathersiv16sf:
+ case X86::BI__builtin_ia32_gatherdiv8df:
+ case X86::BI__builtin_ia32_gatherdiv16sf:
+ case X86::BI__builtin_ia32_gathersiv8di:
+ case X86::BI__builtin_ia32_gathersiv16si:
+ case X86::BI__builtin_ia32_gatherdiv8di:
+ case X86::BI__builtin_ia32_gatherdiv16si:
+ case X86::BI__builtin_ia32_scatterdiv2df:
+ case X86::BI__builtin_ia32_scatterdiv2di:
+ case X86::BI__builtin_ia32_scatterdiv4df:
+ case X86::BI__builtin_ia32_scatterdiv4di:
+ case X86::BI__builtin_ia32_scatterdiv4sf:
+ case X86::BI__builtin_ia32_scatterdiv4si:
+ case X86::BI__builtin_ia32_scatterdiv8sf:
+ case X86::BI__builtin_ia32_scatterdiv8si:
+ case X86::BI__builtin_ia32_scattersiv2df:
+ case X86::BI__builtin_ia32_scattersiv2di:
+ case X86::BI__builtin_ia32_scattersiv4df:
+ case X86::BI__builtin_ia32_scattersiv4di:
+ case X86::BI__builtin_ia32_scattersiv4sf:
+ case X86::BI__builtin_ia32_scattersiv4si:
+ case X86::BI__builtin_ia32_scattersiv8sf:
+ case X86::BI__builtin_ia32_scattersiv8si:
+ case X86::BI__builtin_ia32_scattersiv8df:
+ case X86::BI__builtin_ia32_scattersiv16sf:
+ case X86::BI__builtin_ia32_scatterdiv8df:
+ case X86::BI__builtin_ia32_scatterdiv16sf:
+ case X86::BI__builtin_ia32_scattersiv8di:
+ case X86::BI__builtin_ia32_scattersiv16si:
+ case X86::BI__builtin_ia32_scatterdiv8di:
+ case X86::BI__builtin_ia32_scatterdiv16si:
+ ArgNum = 4;
+ break;
+ }
+
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
+ return false;
+
+ return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
+ << Arg->getSourceRange();
+}
+
+enum { TileRegLow = 0, TileRegHigh = 7 };
+
+bool SemaX86::CheckBuiltinTileArgumentsRange(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ for (int ArgNum : ArgNums) {
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, TileRegLow,
+ TileRegHigh))
+ return true;
+ }
+ return false;
+}
+
+bool SemaX86::CheckBuiltinTileDuplicate(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ // Because the max number of tile register is TileRegHigh + 1, so here we use
+ // each bit to represent the usage of them in bitset.
+ std::bitset<TileRegHigh + 1> ArgValues;
+ for (int ArgNum : ArgNums) {
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ continue;
+
+ llvm::APSInt Result;
+ if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+ int ArgExtValue = Result.getExtValue();
+ assert((ArgExtValue >= TileRegLow && ArgExtValue <= TileRegHigh) &&
+ "Incorrect tile register num.");
+ if (ArgValues.test(ArgExtValue))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_x86_builtin_tile_arg_duplicate)
+ << TheCall->getArg(ArgNum)->getSourceRange();
+ ArgValues.set(ArgExtValue);
+ }
+ return false;
+}
+
+bool SemaX86::CheckBuiltinTileRangeAndDuplicate(CallExpr *TheCall,
+ ArrayRef<int> ArgNums) {
+ return CheckBuiltinTileArgumentsRange(TheCall, ArgNums) ||
+ CheckBuiltinTileDuplicate(TheCall, ArgNums);
+}
+
+bool SemaX86::CheckBuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
+ switch (BuiltinID) {
+ default:
+ return false;
+ case X86::BI__builtin_ia32_tileloadd64:
+ case X86::BI__builtin_ia32_tileloaddt164:
+ case X86::BI__builtin_ia32_tilestored64:
+ case X86::BI__builtin_ia32_tilezero:
+ return CheckBuiltinTileArgumentsRange(TheCall, 0);
+ case X86::BI__builtin_ia32_tdpbssd:
+ case X86::BI__builtin_ia32_tdpbsud:
+ case X86::BI__builtin_ia32_tdpbusd:
+ case X86::BI__builtin_ia32_tdpbuud:
+ case X86::BI__builtin_ia32_tdpbf16ps:
+ case X86::BI__builtin_ia32_tdpfp16ps:
+ case X86::BI__builtin_ia32_tcmmimfp16ps:
+ case X86::BI__builtin_ia32_tcmmrlfp16ps:
+ return CheckBuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
+ }
+}
+static bool isX86_32Builtin(unsigned BuiltinID) {
+ // These builtins only work on x86-32 targets.
+ switch (BuiltinID) {
+ case X86::BI__builtin_ia32_readeflags_u32:
+ case X86::BI__builtin_ia32_writeeflags_u32:
+ return true;
+ }
+
+ return false;
+}
+
+bool SemaX86::CheckBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
+ // Check for 32-bit only builtins on a 64-bit target.
+ const llvm::Triple &TT = TI.getTriple();
+ if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
+ return Diag(TheCall->getCallee()->getBeginLoc(),
+ diag::err_32_bit_builtin_64_bit_tgt);
+
+ // If the intrinsic has rounding or SAE make sure its valid.
+ if (CheckBuiltinRoundingOrSAE(BuiltinID, TheCall))
+ return true;
+
+ // If the intrinsic has a gather/scatter scale immediate make sure its valid.
+ if (CheckBuiltinGatherScatterScale(BuiltinID, TheCall))
+ return true;
+
+ // If the intrinsic has a tile arguments, make sure they are valid.
+ if (CheckBuiltinTileArguments(BuiltinID, TheCall))
+ return true;
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ int i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default:
+ return false;
+ case X86::BI__builtin_ia32_vec_ext_v2si:
+ case X86::BI__builtin_ia32_vec_ext_v2di:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256:
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_extractf64x4_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ i = 1;
+ l = 0;
+ u = 1;
+ break;
+ case X86::BI__builtin_ia32_vec_set_v2di:
+ case X86::BI__builtin_ia32_vinsertf128_pd256:
+ case X86::BI__builtin_ia32_vinsertf128_ps256:
+ case X86::BI__builtin_ia32_vinsertf128_si256:
+ case X86::BI__builtin_ia32_insert128i256:
+ case X86::BI__builtin_ia32_insertf32x8:
+ case X86::BI__builtin_ia32_inserti32x8:
+ case X86::BI__builtin_ia32_insertf64x4:
+ case X86::BI__builtin_ia32_inserti64x4:
+ case X86::BI__builtin_ia32_insertf64x2_256:
+ case X86::BI__builtin_ia32_inserti64x2_256:
+ case X86::BI__builtin_ia32_insertf32x4_256:
+ case X86::BI__builtin_ia32_inserti32x4_256:
+ i = 2;
+ l = 0;
+ u = 1;
+ break;
+ case X86::BI__builtin_ia32_vpermilpd:
+ case X86::BI__builtin_ia32_vec_ext_v4hi:
+ case X86::BI__builtin_ia32_vec_ext_v4si:
+ case X86::BI__builtin_ia32_vec_ext_v4sf:
+ case X86::BI__builtin_ia32_vec_ext_v4di:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ i = 1;
+ l = 0;
+ u = 3;
+ break;
+ case X86::BI_mm_prefetch:
+ case X86::BI__builtin_ia32_vec_ext_v8hi:
+ case X86::BI__builtin_ia32_vec_ext_v8si:
+ i = 1;
+ l = 0;
+ u = 7;
+ break;
+ case X86::BI__builtin_ia32_sha1rnds4:
+ case X86::BI__builtin_ia32_blendpd:
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_vec_set_v4hi:
+ case X86::BI__builtin_ia32_vec_set_v4si:
+ case X86::BI__builtin_ia32_vec_set_v4di:
+ case X86::BI__builtin_ia32_shuf_f32x4_256:
+ case X86::BI__builtin_ia32_shuf_f64x2_256:
+ case X86::BI__builtin_ia32_shuf_i32x4_256:
+ case X86::BI__builtin_ia32_shuf_i64x2_256:
+ case X86::BI__builtin_ia32_insertf64x2_512:
+ case X86::BI__builtin_ia32_inserti64x2_512:
+ case X86::BI__builtin_ia32_insertf32x4:
+ case X86::BI__builtin_ia32_inserti32x4:
+ i = 2;
+ l = 0;
+ u = 3;
+ break;
+ case X86::BI__builtin_ia32_vpermil2pd:
+ case X86::BI__builtin_ia32_vpermil2pd256:
+ case X86::BI__builtin_ia32_vpermil2ps:
+ case X86::BI__builtin_ia32_vpermil2ps256:
+ i = 3;
+ l = 0;
+ u = 3;
+ break;
+ case X86::BI__builtin_ia32_cmpb128_mask:
+ case X86::BI__builtin_ia32_cmpw128_mask:
+ case X86::BI__builtin_ia32_cmpd128_mask:
+ case X86::BI__builtin_ia32_cmpq128_mask:
+ case X86::BI__builtin_ia32_cmpb256_mask:
+ case X86::BI__builtin_ia32_cmpw256_mask:
+ case X86::BI__builtin_ia32_cmpd256_mask:
+ case X86::BI__builtin_ia32_cmpq256_mask:
+ case X86::BI__builtin_ia32_cmpb512_mask:
+ case X86::BI__builtin_ia32_cmpw512_mask:
+ case X86::BI__builtin_ia32_cmpd512_mask:
+ case X86::BI__builtin_ia32_cmpq512_mask:
+ case X86::BI__builtin_ia32_ucmpb128_mask:
+ case X86::BI__builtin_ia32_ucmpw128_mask:
+ case X86::BI__builtin_ia32_ucmpd128_mask:
+ case X86::BI__builtin_ia32_ucmpq128_mask:
+ case X86::BI__builtin_ia32_ucmpb256_mask:
+ case X86::BI__builtin_ia32_ucmpw256_mask:
+ case X86::BI__builtin_ia32_ucmpd256_mask:
+ case X86::BI__builtin_ia32_ucmpq256_mask:
+ case X86::BI__builtin_ia32_ucmpb512_mask:
+ case X86::BI__builtin_ia32_ucmpw512_mask:
+ case X86::BI__builtin_ia32_ucmpd512_mask:
+ case X86::BI__builtin_ia32_ucmpq512_mask:
+ case X86::BI__builtin_ia32_vpcomub:
+ case X86::BI__builtin_ia32_vpcomuw:
+ case X86::BI__builtin_ia32_vpcomud:
+ case X86::BI__builtin_ia32_vpcomuq:
+ case X86::BI__builtin_ia32_vpcomb:
+ case X86::BI__builtin_ia32_vpcomw:
+ case X86::BI__builtin_ia32_vpcomd:
+ case X86::BI__builtin_ia32_vpcomq:
+ case X86::BI__builtin_ia32_vec_set_v8hi:
+ case X86::BI__builtin_ia32_vec_set_v8si:
+ i = 2;
+ l = 0;
+ u = 7;
+ break;
+ case X86::BI__builtin_ia32_vpermilpd256:
+ case X86::BI__builtin_ia32_roundps:
+ case X86::BI__builtin_ia32_roundpd:
+ case X86::BI__builtin_ia32_roundps256:
+ case X86::BI__builtin_ia32_roundpd256:
+ case X86::BI__builtin_ia32_getmantpd128_mask:
+ case X86::BI__builtin_ia32_getmantpd256_mask:
+ case X86::BI__builtin_ia32_getmantps128_mask:
+ case X86::BI__builtin_ia32_getmantps256_mask:
+ case X86::BI__builtin_ia32_getmantpd512_mask:
+ case X86::BI__builtin_ia32_getmantps512_mask:
+ case X86::BI__builtin_ia32_getmantph128_mask:
+ case X86::BI__builtin_ia32_getmantph256_mask:
+ case X86::BI__builtin_ia32_getmantph512_mask:
+ case X86::BI__builtin_ia32_vec_ext_v16qi:
+ case X86::BI__builtin_ia32_vec_ext_v16hi:
+ i = 1;
+ l = 0;
+ u = 15;
+ break;
+ case X86::BI__builtin_ia32_pblendd128:
+ case X86::BI__builtin_ia32_blendps:
+ case X86::BI__builtin_ia32_blendpd256:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_roundss:
+ case X86::BI__builtin_ia32_roundsd:
+ case X86::BI__builtin_ia32_rangepd128_mask:
+ case X86::BI__builtin_ia32_rangepd256_mask:
+ case X86::BI__builtin_ia32_rangepd512_mask:
+ case X86::BI__builtin_ia32_rangeps128_mask:
+ case X86::BI__builtin_ia32_rangeps256_mask:
+ case X86::BI__builtin_ia32_rangeps512_mask:
+ case X86::BI__builtin_ia32_getmantsd_round_mask:
+ case X86::BI__builtin_ia32_getmantss_round_mask:
+ case X86::BI__builtin_ia32_getmantsh_round_mask:
+ case X86::BI__builtin_ia32_vec_set_v16qi:
+ case X86::BI__builtin_ia32_vec_set_v16hi:
+ i = 2;
+ l = 0;
+ u = 15;
+ break;
+ case X86::BI__builtin_ia32_vec_ext_v32qi:
+ i = 1;
+ l = 0;
+ u = 31;
+ break;
+ case X86::BI__builtin_ia32_cmpps:
+ case X86::BI__builtin_ia32_cmpss:
+ case X86::BI__builtin_ia32_cmppd:
+ case X86::BI__builtin_ia32_cmpsd:
+ case X86::BI__builtin_ia32_cmpps256:
+ case X86::BI__builtin_ia32_cmppd256:
+ case X86::BI__builtin_ia32_cmpps128_mask:
+ case X86::BI__builtin_ia32_cmppd128_mask:
+ case X86::BI__builtin_ia32_cmpps256_mask:
+ case X86::BI__builtin_ia32_cmppd256_mask:
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ case X86::BI__builtin_ia32_cmpsd_mask:
+ case X86::BI__builtin_ia32_cmpss_mask:
+ case X86::BI__builtin_ia32_vec_set_v32qi:
+ i = 2;
+ l = 0;
+ u = 31;
+ break;
+ case X86::BI__builtin_ia32_permdf256:
+ case X86::BI__builtin_ia32_permdi256:
+ case X86::BI__builtin_ia32_permdf512:
+ case X86::BI__builtin_ia32_permdi512:
+ case X86::BI__builtin_ia32_vpermilps:
+ case X86::BI__builtin_ia32_vpermilps256:
+ case X86::BI__builtin_ia32_vpermilpd512:
+ case X86::BI__builtin_ia32_vpermilps512:
+ case X86::BI__builtin_ia32_pshufd:
+ case X86::BI__builtin_ia32_pshufd256:
+ case X86::BI__builtin_ia32_pshufd512:
+ case X86::BI__builtin_ia32_pshufhw:
+ case X86::BI__builtin_ia32_pshufhw256:
+ case X86::BI__builtin_ia32_pshufhw512:
+ case X86::BI__builtin_ia32_pshuflw:
+ case X86::BI__builtin_ia32_pshuflw256:
+ case X86::BI__builtin_ia32_pshuflw512:
+ case X86::BI__builtin_ia32_vcvtps2ph:
+ case X86::BI__builtin_ia32_vcvtps2ph_mask:
+ case X86::BI__builtin_ia32_vcvtps2ph256:
+ case X86::BI__builtin_ia32_vcvtps2ph256_mask:
+ case X86::BI__builtin_ia32_vcvtps2ph512_mask:
+ case X86::BI__builtin_ia32_rndscaleps_128_mask:
+ case X86::BI__builtin_ia32_rndscalepd_128_mask:
+ case X86::BI__builtin_ia32_rndscaleps_256_mask:
+ case X86::BI__builtin_ia32_rndscalepd_256_mask:
+ case X86::BI__builtin_ia32_rndscaleps_mask:
+ case X86::BI__builtin_ia32_rndscalepd_mask:
+ case X86::BI__builtin_ia32_rndscaleph_mask:
+ case X86::BI__builtin_ia32_reducepd128_mask:
+ case X86::BI__builtin_ia32_reducepd256_mask:
+ case X86::BI__builtin_ia32_reducepd512_mask:
+ case X86::BI__builtin_ia32_reduceps128_mask:
+ case X86::BI__builtin_ia32_reduceps256_mask:
+ case X86::BI__builtin_ia32_reduceps512_mask:
+ case X86::BI__builtin_ia32_reduceph128_mask:
+ case X86::BI__builtin_ia32_reduceph256_mask:
+ case X86::BI__builtin_ia32_reduceph512_mask:
+ case X86::BI__builtin_ia32_prold512:
+ case X86::BI__builtin_ia32_prolq512:
+ case X86::BI__builtin_ia32_prold128:
+ case X86::BI__builtin_ia32_prold256:
+ case X86::BI__builtin_ia32_prolq128:
+ case X86::BI__builtin_ia32_prolq256:
+ case X86::BI__builtin_ia32_prord512:
+ case X86::BI__builtin_ia32_prorq512:
+ case X86::BI__builtin_ia32_prord128:
+ case X86::BI__builtin_ia32_prord256:
+ case X86::BI__builtin_ia32_prorq128:
+ case X86::BI__builtin_ia32_prorq256:
+ case X86::BI__builtin_ia32_fpclasspd128_mask:
+ case X86::BI__builtin_ia32_fpclasspd256_mask:
+ case X86::BI__builtin_ia32_fpclassps128_mask:
+ case X86::BI__builtin_ia32_fpclassps256_mask:
+ case X86::BI__builtin_ia32_fpclassps512_mask:
+ case X86::BI__builtin_ia32_fpclasspd512_mask:
+ case X86::BI__builtin_ia32_fpclassph128_mask:
+ case X86::BI__builtin_ia32_fpclassph256_mask:
+ case X86::BI__builtin_ia32_fpclassph512_mask:
+ case X86::BI__builtin_ia32_fpclasssd_mask:
+ case X86::BI__builtin_ia32_fpclassss_mask:
+ case X86::BI__builtin_ia32_fpclasssh_mask:
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift:
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift:
+ case X86::BI__builtin_ia32_kshiftliqi:
+ case X86::BI__builtin_ia32_kshiftlihi:
+ case X86::BI__builtin_ia32_kshiftlisi:
+ case X86::BI__builtin_ia32_kshiftlidi:
+ case X86::BI__builtin_ia32_kshiftriqi:
+ case X86::BI__builtin_ia32_kshiftrihi:
+ case X86::BI__builtin_ia32_kshiftrisi:
+ case X86::BI__builtin_ia32_kshiftridi:
+ i = 1;
+ l = 0;
+ u = 255;
+ break;
+ case X86::BI__builtin_ia32_vperm2f128_pd256:
+ case X86::BI__builtin_ia32_vperm2f128_ps256:
+ case X86::BI__builtin_ia32_vperm2f128_si256:
+ case X86::BI__builtin_ia32_permti256:
+ case X86::BI__builtin_ia32_pblendw128:
+ case X86::BI__builtin_ia32_pblendw256:
+ case X86::BI__builtin_ia32_blendps256:
+ case X86::BI__builtin_ia32_pblendd256:
+ case X86::BI__builtin_ia32_palignr128:
+ case X86::BI__builtin_ia32_palignr256:
+ case X86::BI__builtin_ia32_palignr512:
+ case X86::BI__builtin_ia32_alignq512:
+ case X86::BI__builtin_ia32_alignd512:
+ case X86::BI__builtin_ia32_alignd128:
+ case X86::BI__builtin_ia32_alignd256:
+ case X86::BI__builtin_ia32_alignq128:
+ case X86::BI__builtin_ia32_alignq256:
+ case X86::BI__builtin_ia32_vcomisd:
+ case X86::BI__builtin_ia32_vcomiss:
+ case X86::BI__builtin_ia32_shuf_f32x4:
+ case X86::BI__builtin_ia32_shuf_f64x2:
+ case X86::BI__builtin_ia32_shuf_i32x4:
+ case X86::BI__builtin_ia32_shuf_i64x2:
+ case X86::BI__builtin_ia32_shufpd512:
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512:
+ case X86::BI__builtin_ia32_dbpsadbw128:
+ case X86::BI__builtin_ia32_dbpsadbw256:
+ case X86::BI__builtin_ia32_dbpsadbw512:
+ case X86::BI__builtin_ia32_vpshldd128:
+ case X86::BI__builtin_ia32_vpshldd256:
+ case X86::BI__builtin_ia32_vpshldd512:
+ case X86::BI__builtin_ia32_vpshldq128:
+ case X86::BI__builtin_ia32_vpshldq256:
+ case X86::BI__builtin_ia32_vpshldq512:
+ case X86::BI__builtin_ia32_vpshldw128:
+ case X86::BI__builtin_ia32_vpshldw256:
+ case X86::BI__builtin_ia32_vpshldw512:
+ case X86::BI__builtin_ia32_vpshrdd128:
+ case X86::BI__builtin_ia32_vpshrdd256:
+ case X86::BI__builtin_ia32_vpshrdd512:
+ case X86::BI__builtin_ia32_vpshrdq128:
+ case X86::BI__builtin_ia32_vpshrdq256:
+ case X86::BI__builtin_ia32_vpshrdq512:
+ case X86::BI__builtin_ia32_vpshrdw128:
+ case X86::BI__builtin_ia32_vpshrdw256:
+ case X86::BI__builtin_ia32_vpshrdw512:
+ i = 2;
+ l = 0;
+ u = 255;
+ break;
+ case X86::BI__builtin_ia32_fixupimmpd512_mask:
+ case X86::BI__builtin_ia32_fixupimmpd512_maskz:
+ case X86::BI__builtin_ia32_fixupimmps512_mask:
+ case X86::BI__builtin_ia32_fixupimmps512_maskz:
+ case X86::BI__builtin_ia32_fixupimmsd_mask:
+ case X86::BI__builtin_ia32_fixupimmsd_maskz:
+ case X86::BI__builtin_ia32_fixupimmss_mask:
+ case X86::BI__builtin_ia32_fixupimmss_maskz:
+ case X86::BI__builtin_ia32_fixupimmpd128_mask:
+ case X86::BI__builtin_ia32_fixupimmpd128_maskz:
+ case X86::BI__builtin_ia32_fixupimmpd256_mask:
+ case X86::BI__builtin_ia32_fixupimmpd256_maskz:
+ case X86::BI__builtin_ia32_fixupimmps128_mask:
+ case X86::BI__builtin_ia32_fixupimmps128_maskz:
+ case X86::BI__builtin_ia32_fixupimmps256_mask:
+ case X86::BI__builtin_ia32_fixupimmps256_maskz:
+ case X86::BI__builtin_ia32_pternlogd512_mask:
+ case X86::BI__builtin_ia32_pternlogd512_maskz:
+ case X86::BI__builtin_ia32_pternlogq512_mask:
+ case X86::BI__builtin_ia32_pternlogq512_maskz:
+ case X86::BI__builtin_ia32_pternlogd128_mask:
+ case X86::BI__builtin_ia32_pternlogd128_maskz:
+ case X86::BI__builtin_ia32_pternlogd256_mask:
+ case X86::BI__builtin_ia32_pternlogd256_maskz:
+ case X86::BI__builtin_ia32_pternlogq128_mask:
+ case X86::BI__builtin_ia32_pternlogq128_maskz:
+ case X86::BI__builtin_ia32_pternlogq256_mask:
+ case X86::BI__builtin_ia32_pternlogq256_maskz:
+ case X86::BI__builtin_ia32_vsm3rnds2:
+ i = 3;
+ l = 0;
+ u = 255;
+ break;
+ case X86::BI__builtin_ia32_reducesd_mask:
+ case X86::BI__builtin_ia32_reducess_mask:
+ case X86::BI__builtin_ia32_rndscalesd_round_mask:
+ case X86::BI__builtin_ia32_rndscaless_round_mask:
+ case X86::BI__builtin_ia32_rndscalesh_round_mask:
+ case X86::BI__builtin_ia32_reducesh_mask:
+ i = 4;
+ l = 0;
+ u = 255;
+ break;
+ case X86::BI__builtin_ia32_cmpccxadd32:
+ case X86::BI__builtin_ia32_cmpccxadd64:
+ i = 3;
+ l = 0;
+ u = 15;
+ break;
+ }
+
+ // Note that we don't force a hard error on the range check here, allowing
+ // template-generated or macro-generated dead code to potentially have out-of-
+ // range values. These need to code generate, but don't need to necessarily
+ // make any sense. We use a warning that defaults to an error.
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u,
+ /*RangeIsError*/ false);
+}
+
+} // namespace clang
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 29444f0edc2a..dee335b52699 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -42,6 +42,7 @@
#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/SemaOpenACC.h"
#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPseudoObject.h"
#include "clang/Sema/SemaSYCL.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/ErrorHandling.h"
@@ -3478,11 +3479,11 @@ public:
SS.Adopt(QualifierLoc);
if (TemplateArgs || TemplateKWLoc.isValid())
- return getSema().BuildQualifiedTemplateIdExpr(SS, TemplateKWLoc, NameInfo,
- TemplateArgs);
+ return getSema().BuildQualifiedTemplateIdExpr(
+ SS, TemplateKWLoc, NameInfo, TemplateArgs, IsAddressOfOperand);
return getSema().BuildQualifiedDeclarationNameExpr(
- SS, NameInfo, IsAddressOfOperand, /*S*/nullptr, RecoveryTSI);
+ SS, NameInfo, IsAddressOfOperand, RecoveryTSI);
}
/// Build a new template-id expression.
@@ -4818,14 +4819,6 @@ bool TreeTransform<Derived>::TransformTemplateArguments(
TemplateArgumentLoc In = *First;
if (In.getArgument().getKind() == TemplateArgument::Pack) {
- // When building the deduction guides, we rewrite the argument packs
- // instead of unpacking.
- if (getSema().CodeSynthesisContexts.back().Kind ==
- Sema::CodeSynthesisContext::BuildingDeductionGuides) {
- if (getDerived().TransformTemplateArgument(In, Out, Uneval))
- return true;
- continue;
- }
// Unpack argument packs, which we translate them into separate
// arguments.
// FIXME: We could do much better if we could guarantee that the
@@ -11133,7 +11126,8 @@ class OpenACCClauseTransform final
if (!Res.isUsable())
continue;
- Res = Self.getSema().OpenACC().ActOnVar(Res.get());
+ Res = Self.getSema().OpenACC().ActOnVar(ParsedClause.getClauseKind(),
+ Res.get());
if (Res.isUsable())
InstantiatedVarList.push_back(Res.get());
@@ -11493,6 +11487,24 @@ void OpenACCClauseTransform<Derived>::VisitDeviceTypeClause(
ParsedClause.getBeginLoc(), ParsedClause.getLParenLoc(),
C.getArchitectures(), ParsedClause.getEndLoc());
}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitReductionClause(
+ const OpenACCReductionClause &C) {
+ SmallVector<Expr *> TransformedVars = VisitVarList(C.getVarList());
+ SmallVector<Expr *> ValidVars;
+
+ for (Expr *Var : TransformedVars) {
+ ExprResult Res = Self.getSema().OpenACC().CheckReductionVar(Var);
+ if (Res.isUsable())
+ ValidVars.push_back(Res.get());
+ }
+
+ NewClause = OpenACCReductionClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), C.getReductionOp(), ValidVars,
+ ParsedClause.getEndLoc());
+}
} // namespace
template <typename Derived>
OpenACCClause *TreeTransform<Derived>::TransformOpenACCClause(
@@ -11890,7 +11902,7 @@ TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
// better solution (rebuilding the semantic expressions and
// rebinding OVEs as necessary) doesn't work; we'd need
// TreeTransform to not strip away implicit conversions.
- Expr *newSyntacticForm = SemaRef.recreateSyntacticForm(E);
+ Expr *newSyntacticForm = SemaRef.PseudoObject().recreateSyntacticForm(E);
ExprResult result = getDerived().TransformExpr(newSyntacticForm);
if (result.isInvalid()) return ExprError();
@@ -11898,7 +11910,7 @@ TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
// expression must have been an lvalue-to-rvalue conversion which we
// should reapply.
if (result.get()->hasPlaceholderType(BuiltinType::PseudoObject))
- result = SemaRef.checkPseudoObjectRValue(result.get());
+ result = SemaRef.PseudoObject().checkRValue(result.get());
return result;
}
@@ -14102,6 +14114,13 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
&ArgumentChanged))
return ExprError();
+
+ if (E->isListInitialization() && !E->isStdInitListInitialization()) {
+ ExprResult Res = RebuildInitList(E->getBeginLoc(), Args, E->getEndLoc());
+ if (Res.isInvalid())
+ return ExprError();
+ Args = {Res.get()};
+ }
}
if (!getDerived().AlwaysRebuild() &&
@@ -14113,12 +14132,9 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
return SemaRef.MaybeBindToTemporary(E);
}
- // FIXME: We should just pass E->isListInitialization(), but we're not
- // prepared to handle list-initialization without a child InitListExpr.
SourceLocation LParenLoc = T->getTypeLoc().getEndLoc();
return getDerived().RebuildCXXTemporaryObjectExpr(
- T, LParenLoc, Args, E->getEndLoc(),
- /*ListInitialization=*/LParenLoc.isInvalid());
+ T, LParenLoc, Args, E->getEndLoc(), E->isListInitialization());
}
template<typename Derived>
@@ -14975,7 +14991,7 @@ TreeTransform<Derived>::TransformPackIndexingExpr(PackIndexingExpr *E) {
return ExprError();
SmallVector<Expr *, 5> ExpandedExprs;
- if (E->getExpressions().empty()) {
+ if (!E->expandsToEmptyPack() && E->getExpressions().empty()) {
Expr *Pattern = E->getPackIdExpression();
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
getSema().collectUnexpandedParameterPacks(E->getPackIdExpression(),
@@ -15029,9 +15045,7 @@ TreeTransform<Derived>::TransformPackIndexingExpr(PackIndexingExpr *E) {
return true;
ExpandedExprs.push_back(Out.get());
}
- }
-
- else {
+ } else if (!E->expandsToEmptyPack()) {
if (getDerived().TransformExprs(E->getExpressions().data(),
E->getExpressions().size(), false,
ExpandedExprs))
@@ -16196,8 +16210,8 @@ ExprResult TreeTransform<Derived>::RebuildCXXOperatorCallExpr(
if (First->getObjectKind() == OK_ObjCProperty) {
BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
if (BinaryOperator::isAssignmentOp(Opc))
- return SemaRef.checkPseudoObjectAssignment(/*Scope=*/nullptr, OpLoc, Opc,
- First, Second);
+ return SemaRef.PseudoObject().checkAssignment(/*Scope=*/nullptr, OpLoc,
+ Opc, First, Second);
ExprResult Result = SemaRef.CheckPlaceholderExpr(First);
if (Result.isInvalid())
return ExprError();
diff --git a/clang/lib/Serialization/ASTCommon.h b/clang/lib/Serialization/ASTCommon.h
index 296642e3674a..0230908d3e05 100644
--- a/clang/lib/Serialization/ASTCommon.h
+++ b/clang/lib/Serialization/ASTCommon.h
@@ -46,30 +46,6 @@ enum DeclUpdateKind {
TypeIdx TypeIdxFromBuiltin(const BuiltinType *BT);
-template <typename IdxForTypeTy>
-TypeID MakeTypeID(ASTContext &Context, QualType T, IdxForTypeTy IdxForType) {
- if (T.isNull())
- return PREDEF_TYPE_NULL_ID;
-
- unsigned FastQuals = T.getLocalFastQualifiers();
- T.removeLocalFastQualifiers();
-
- if (T.hasLocalNonFastQualifiers())
- return IdxForType(T).asTypeID(FastQuals);
-
- assert(!T.hasLocalQualifiers());
-
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(T.getTypePtr()))
- return TypeIdxFromBuiltin(BT).asTypeID(FastQuals);
-
- if (T == Context.AutoDeductTy)
- return TypeIdx(PREDEF_TYPE_AUTO_DEDUCT).asTypeID(FastQuals);
- if (T == Context.AutoRRefDeductTy)
- return TypeIdx(PREDEF_TYPE_AUTO_RREF_DEDUCT).asTypeID(FastQuals);
-
- return IdxForType(T).asTypeID(FastQuals);
-}
-
unsigned ComputeHash(Selector Sel);
/// Retrieve the "definitive" declaration that provides all of the
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index f50f9569c0a5..4a6e1d23161b 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -1264,7 +1264,7 @@ bool ASTReader::ReadLexicalDeclContextStorage(ModuleFile &M,
if (!Lex.first) {
Lex = std::make_pair(
&M, llvm::ArrayRef(
- reinterpret_cast<const unalighed_decl_id_t *>(Blob.data()),
+ reinterpret_cast<const unaligned_decl_id_t *>(Blob.data()),
Blob.size() / sizeof(DeclID)));
}
DC->setHasExternalLexicalStorage(true);
@@ -3401,7 +3401,7 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
case TU_UPDATE_LEXICAL: {
DeclContext *TU = ContextObj->getTranslationUnitDecl();
LexicalContents Contents(
- reinterpret_cast<const unalighed_decl_id_t *>(Blob.data()),
+ reinterpret_cast<const unaligned_decl_id_t *>(Blob.data()),
static_cast<unsigned int>(Blob.size() / sizeof(DeclID)));
TULexicalDecls.push_back(std::make_pair(&F, Contents));
TU->setHasExternalLexicalStorage(true);
@@ -4059,7 +4059,7 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
RemapBuilder DeclRemap(F.DeclRemap);
RemapBuilder TypeRemap(F.TypeRemap);
- auto &ImportedModuleVector = F.DependentModules;
+ auto &ImportedModuleVector = F.TransitiveImports;
assert(ImportedModuleVector.empty());
while (Data < DataEnd) {
@@ -11921,6 +11921,13 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() {
return OpenACCDeviceTypeClause::Create(getContext(), ClauseKind, BeginLoc,
LParenLoc, Archs, EndLoc);
}
+ case OpenACCClauseKind::Reduction: {
+ SourceLocation LParenLoc = readSourceLocation();
+ OpenACCReductionOperator Op = readEnum<OpenACCReductionOperator>();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCReductionClause::Create(getContext(), BeginLoc, LParenLoc, Op,
+ VarList, EndLoc);
+ }
case OpenACCClauseKind::Finalize:
case OpenACCClauseKind::IfPresent:
@@ -11937,7 +11944,6 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() {
case OpenACCClauseKind::DeviceResident:
case OpenACCClauseKind::Host:
case OpenACCClauseKind::Link:
- case OpenACCClauseKind::Reduction:
case OpenACCClauseKind::Collapse:
case OpenACCClauseKind::Bind:
case OpenACCClauseKind::DeviceNum:
diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp
index 0c647086e304..61cc99d4df68 100644
--- a/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -2695,7 +2695,8 @@ void ASTDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
}
if (Record.readInt())
- D->setDefaultArgument(readTypeSourceInfo());
+ D->setDefaultArgument(Reader.getContext(),
+ Record.readTemplateArgumentLoc());
}
void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
@@ -2716,7 +2717,8 @@ void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
// Rest of NonTypeTemplateParmDecl.
D->ParameterPack = Record.readInt();
if (Record.readInt())
- D->setDefaultArgument(Record.readExpr());
+ D->setDefaultArgument(Reader.getContext(),
+ Record.readTemplateArgumentLoc());
}
}
@@ -4186,12 +4188,35 @@ void ASTReader::PassInterestingDeclsToConsumer() {
GetDecl(ID);
EagerlyDeserializedDecls.clear();
- while (!PotentiallyInterestingDecls.empty()) {
- Decl *D = PotentiallyInterestingDecls.front();
- PotentiallyInterestingDecls.pop_front();
+ auto ConsumingPotentialInterestingDecls = [this]() {
+ while (!PotentiallyInterestingDecls.empty()) {
+ Decl *D = PotentiallyInterestingDecls.front();
+ PotentiallyInterestingDecls.pop_front();
+ if (isConsumerInterestedIn(D))
+ PassInterestingDeclToConsumer(D);
+ }
+ };
+ std::deque<Decl *> MaybeInterestingDecls =
+ std::move(PotentiallyInterestingDecls);
+ assert(PotentiallyInterestingDecls.empty());
+ while (!MaybeInterestingDecls.empty()) {
+ Decl *D = MaybeInterestingDecls.front();
+ MaybeInterestingDecls.pop_front();
+ // Since we load the variable's initializers lazily, it'd be problematic
+ // if the initializers dependent on each other. So here we try to load the
+ // initializers of static variables to make sure they are passed to code
+ // generator by order. If we read anything interesting, we would consume
+ // that before emitting the current declaration.
+ if (auto *VD = dyn_cast<VarDecl>(D);
+ VD && VD->isFileVarDecl() && !VD->isExternallyVisible())
+ VD->getInit();
+ ConsumingPotentialInterestingDecls();
if (isConsumerInterestedIn(D))
PassInterestingDeclToConsumer(D);
}
+
+ // If we add any new potential interesting decl in the last call, consume it.
+ ConsumingPotentialInterestingDecls();
}
void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) {
diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp
index 7d3930022a69..eac4faff2854 100644
--- a/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -2177,6 +2177,7 @@ void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
void ASTStmtReader::VisitPackIndexingExpr(PackIndexingExpr *E) {
VisitExpr(E);
E->TransformedExpressions = Record.readInt();
+ E->ExpandedToEmptyPack = Record.readInt();
E->EllipsisLoc = readSourceLocation();
E->RSquareLoc = readSourceLocation();
E->SubExprs[0] = Record.readStmt();
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index 2a107e4c56a3..a85cd94fd5b5 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -5037,6 +5037,14 @@ void ASTWriter::PrepareWritingSpecialDecls(Sema &SemaRef) {
continue;
}
+ // If we're writing C++ named modules, don't emit declarations which are
+ // not from modules by default. They may be built in declarations (be
+ // handled above) or implcit declarations (see the implementation of
+ // `Sema::Initialize()` for example).
+ if (isWritingStdCXXNamedModules() && !D->getOwningModule() &&
+ D->isImplicit())
+ continue;
+
GetDeclRef(D);
}
@@ -6074,6 +6082,31 @@ void ASTWriter::AddTypeRef(QualType T, RecordDataImpl &Record) {
Record.push_back(GetOrCreateTypeID(T));
}
+template <typename IdxForTypeTy>
+static TypeID MakeTypeID(ASTContext &Context, QualType T,
+ IdxForTypeTy IdxForType) {
+ if (T.isNull())
+ return PREDEF_TYPE_NULL_ID;
+
+ unsigned FastQuals = T.getLocalFastQualifiers();
+ T.removeLocalFastQualifiers();
+
+ if (T.hasLocalNonFastQualifiers())
+ return IdxForType(T).asTypeID(FastQuals);
+
+ assert(!T.hasLocalQualifiers());
+
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T.getTypePtr()))
+ return TypeIdxFromBuiltin(BT).asTypeID(FastQuals);
+
+ if (T == Context.AutoDeductTy)
+ return TypeIdx(PREDEF_TYPE_AUTO_DEDUCT).asTypeID(FastQuals);
+ if (T == Context.AutoRRefDeductTy)
+ return TypeIdx(PREDEF_TYPE_AUTO_RREF_DEDUCT).asTypeID(FastQuals);
+
+ return IdxForType(T).asTypeID(FastQuals);
+}
+
TypeID ASTWriter::GetOrCreateTypeID(QualType T) {
assert(Context);
return MakeTypeID(*Context, T, [&](QualType T) -> TypeIdx {
@@ -6097,19 +6130,6 @@ TypeID ASTWriter::GetOrCreateTypeID(QualType T) {
});
}
-TypeID ASTWriter::getTypeID(QualType T) const {
- assert(Context);
- return MakeTypeID(*Context, T, [&](QualType T) -> TypeIdx {
- if (T.isNull())
- return TypeIdx();
- assert(!T.getLocalFastQualifiers());
-
- TypeIdxMap::const_iterator I = TypeIdxs.find(T);
- assert(I != TypeIdxs.end() && "Type not emitted!");
- return I->second;
- });
-}
-
void ASTWriter::AddEmittedDeclRef(const Decl *D, RecordDataImpl &Record) {
if (!wasDeclEmitted(D))
return;
@@ -6185,8 +6205,9 @@ bool ASTWriter::wasDeclEmitted(const Decl *D) const {
return true;
bool Emitted = DeclIDs.contains(D);
- assert((Emitted || GeneratingReducedBMI) &&
- "The declaration can only be omitted in reduced BMI.");
+ assert((Emitted || (!D->getOwningModule() && isWritingStdCXXNamedModules()) ||
+ GeneratingReducedBMI) &&
+ "The declaration within modules can only be omitted in reduced BMI.");
return Emitted;
}
@@ -7947,6 +7968,13 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) {
}
return;
}
+ case OpenACCClauseKind::Reduction: {
+ const auto *RC = cast<OpenACCReductionClause>(C);
+ writeSourceLocation(RC->getLParenLoc());
+ writeEnum(RC->getReductionOp());
+ writeOpenACCVarList(RC);
+ return;
+ }
case OpenACCClauseKind::Finalize:
case OpenACCClauseKind::IfPresent:
@@ -7963,7 +7991,6 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) {
case OpenACCClauseKind::DeviceResident:
case OpenACCClauseKind::Host:
case OpenACCClauseKind::Link:
- case OpenACCClauseKind::Reduction:
case OpenACCClauseKind::Collapse:
case OpenACCClauseKind::Bind:
case OpenACCClauseKind::DeviceNum:
diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp
index c2f1d1b44241..bbd16dbdb8ff 100644
--- a/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -1899,7 +1899,7 @@ void ASTDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
!D->defaultArgumentWasInherited();
Record.push_back(OwnsDefaultArg);
if (OwnsDefaultArg)
- Record.AddTypeSourceInfo(D->getDefaultArgumentInfo());
+ Record.AddTemplateArgumentLoc(D->getDefaultArgument());
if (!TC && !OwnsDefaultArg &&
D->getDeclContext() == D->getLexicalDeclContext() &&
@@ -1941,7 +1941,7 @@ void ASTDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
!D->defaultArgumentWasInherited();
Record.push_back(OwnsDefaultArg);
if (OwnsDefaultArg)
- Record.AddStmt(D->getDefaultArgument());
+ Record.AddTemplateArgumentLoc(D->getDefaultArgument());
Code = serialization::DECL_NON_TYPE_TEMPLATE_PARM;
}
}
diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp
index 6f7c368ce9ca..a44852af97be 100644
--- a/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -2157,11 +2157,11 @@ void ASTStmtWriter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
void ASTStmtWriter::VisitPackIndexingExpr(PackIndexingExpr *E) {
VisitExpr(E);
Record.push_back(E->TransformedExpressions);
+ Record.push_back(E->ExpandedToEmptyPack);
Record.AddSourceLocation(E->getEllipsisLoc());
Record.AddSourceLocation(E->getRSquareLoc());
Record.AddStmt(E->getPackIdExpression());
Record.AddStmt(E->getIndexExpr());
- Record.push_back(E->TransformedExpressions);
for (Expr *Sub : E->getExpressions())
Record.AddStmt(Sub);
Code = serialization::EXPR_PACK_INDEXING;
diff --git a/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt b/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt
index 4443ffd09293..cd5a3bdd02e4 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt
+++ b/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt
@@ -96,13 +96,14 @@ add_clang_library(clangStaticAnalyzerCheckers
PointerSortingChecker.cpp
PointerSubChecker.cpp
PthreadLockChecker.cpp
- cert/PutenvWithAutoChecker.cpp
+ PutenvStackArrayChecker.cpp
RetainCountChecker/RetainCountChecker.cpp
RetainCountChecker/RetainCountDiagnostics.cpp
ReturnPointerRangeChecker.cpp
ReturnUndefChecker.cpp
ReturnValueChecker.cpp
RunLoopAutoreleaseLeakChecker.cpp
+ SetgidSetuidOrderChecker.cpp
SimpleStreamChecker.cpp
SmartPtrChecker.cpp
SmartPtrModeling.cpp
diff --git a/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PutenvStackArrayChecker.cpp
index a82f7caf16b2..bf81d57bf82f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PutenvStackArrayChecker.cpp
@@ -1,4 +1,4 @@
-//== PutenvWithAutoChecker.cpp --------------------------------- -*- C++ -*--=//
+//== PutenvStackArrayChecker.cpp ------------------------------- -*- C++ -*--=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,13 +6,13 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines PutenvWithAutoChecker which finds calls of ``putenv``
-// function with automatic variable as the argument.
+// This file defines PutenvStackArrayChecker which finds calls of ``putenv``
+// function with automatic array variable as the argument.
// https://wiki.sei.cmu.edu/confluence/x/6NYxBQ
//
//===----------------------------------------------------------------------===//
-#include "../AllocationState.h"
+#include "AllocationState.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -26,9 +26,9 @@ using namespace clang;
using namespace ento;
namespace {
-class PutenvWithAutoChecker : public Checker<check::PostCall> {
+class PutenvStackArrayChecker : public Checker<check::PostCall> {
private:
- BugType BT{this, "'putenv' function should not be called with auto variables",
+ BugType BT{this, "'putenv' called with stack-allocated string",
categories::SecurityError};
const CallDescription Putenv{CDM::CLibrary, {"putenv"}, 1};
@@ -37,20 +37,25 @@ public:
};
} // namespace
-void PutenvWithAutoChecker::checkPostCall(const CallEvent &Call,
- CheckerContext &C) const {
+void PutenvStackArrayChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
if (!Putenv.matches(Call))
return;
SVal ArgV = Call.getArgSVal(0);
const Expr *ArgExpr = Call.getArgExpr(0);
- const MemSpaceRegion *MSR = ArgV.getAsRegion()->getMemorySpace();
- if (!isa<StackSpaceRegion>(MSR))
+ const auto *SSR =
+ dyn_cast<StackSpaceRegion>(ArgV.getAsRegion()->getMemorySpace());
+ if (!SSR)
+ return;
+ const auto *StackFrameFuncD =
+ dyn_cast_or_null<FunctionDecl>(SSR->getStackFrame()->getDecl());
+ if (StackFrameFuncD && StackFrameFuncD->isMain())
return;
StringRef ErrorMsg = "The 'putenv' function should not be called with "
- "arguments that have automatic storage";
+ "arrays that have automatic storage";
ExplodedNode *N = C.generateErrorNode();
auto Report = std::make_unique<PathSensitiveBugReport>(BT, ErrorMsg, N);
@@ -60,8 +65,10 @@ void PutenvWithAutoChecker::checkPostCall(const CallEvent &Call,
C.emitReport(std::move(Report));
}
-void ento::registerPutenvWithAuto(CheckerManager &Mgr) {
- Mgr.registerChecker<PutenvWithAutoChecker>();
+void ento::registerPutenvStackArray(CheckerManager &Mgr) {
+ Mgr.registerChecker<PutenvStackArrayChecker>();
}
-bool ento::shouldRegisterPutenvWithAuto(const CheckerManager &) { return true; }
+bool ento::shouldRegisterPutenvStackArray(const CheckerManager &) {
+ return true;
+}
diff --git a/clang/lib/StaticAnalyzer/Checkers/SetgidSetuidOrderChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/SetgidSetuidOrderChecker.cpp
new file mode 100644
index 000000000000..dbe3fd33a6b4
--- /dev/null
+++ b/clang/lib/StaticAnalyzer/Checkers/SetgidSetuidOrderChecker.cpp
@@ -0,0 +1,196 @@
+//===-- SetgidSetuidOrderChecker.cpp - check privilege revocation calls ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker to detect possible reversed order of privilege
+// revocations when 'setgid' and 'setuid' is used.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+enum SetPrivilegeFunctionKind { Irrelevant, Setuid, Setgid };
+
+class SetgidSetuidOrderChecker : public Checker<check::PostCall, eval::Assume> {
+ const BugType BT{this, "Possible wrong order of privilege revocation"};
+
+ const CallDescription SetuidDesc{CDM::CLibrary, {"setuid"}, 1};
+ const CallDescription SetgidDesc{CDM::CLibrary, {"setgid"}, 1};
+
+ const CallDescription GetuidDesc{CDM::CLibrary, {"getuid"}, 0};
+ const CallDescription GetgidDesc{CDM::CLibrary, {"getgid"}, 0};
+
+ const CallDescriptionSet OtherSetPrivilegeDesc{
+ {CDM::CLibrary, {"seteuid"}, 1}, {CDM::CLibrary, {"setegid"}, 1},
+ {CDM::CLibrary, {"setreuid"}, 2}, {CDM::CLibrary, {"setregid"}, 2},
+ {CDM::CLibrary, {"setresuid"}, 3}, {CDM::CLibrary, {"setresgid"}, 3}};
+
+public:
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ ProgramStateRef evalAssume(ProgramStateRef State, SVal Cond,
+ bool Assumption) const;
+
+private:
+ void processSetuid(ProgramStateRef State, const CallEvent &Call,
+ CheckerContext &C) const;
+ void processSetgid(ProgramStateRef State, const CallEvent &Call,
+ CheckerContext &C) const;
+ void processOther(ProgramStateRef State, const CallEvent &Call,
+ CheckerContext &C) const;
+ /// Check if a function like \c getuid or \c getgid is called directly from
+ /// the first argument of function called from \a Call.
+ bool isFunctionCalledInArg(const CallDescription &Desc,
+ const CallEvent &Call) const;
+ void emitReport(ProgramStateRef State, CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+/// Store if there was a call to 'setuid(getuid())' or 'setgid(getgid())' not
+/// followed by other different privilege-change functions.
+/// If the value \c Setuid is stored and a 'setgid(getgid())' call is found we
+/// have found the bug to be reported. Value \c Setgid is used too to prevent
+/// warnings at a setgid-setuid-setgid sequence.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(LastSetPrivilegeCall, SetPrivilegeFunctionKind)
+/// Store the symbol value of the last 'setuid(getuid())' call. This is used to
+/// detect if the result is compared to -1 and avoid warnings on that branch
+/// (which is the failure branch of the call), and for identification of note
+/// tags.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(LastSetuidCallSVal, SymbolRef)
+
+void SetgidSetuidOrderChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (SetuidDesc.matches(Call)) {
+ processSetuid(State, Call, C);
+ } else if (SetgidDesc.matches(Call)) {
+ processSetgid(State, Call, C);
+ } else if (OtherSetPrivilegeDesc.contains(Call)) {
+ processOther(State, Call, C);
+ }
+}
+
+ProgramStateRef SetgidSetuidOrderChecker::evalAssume(ProgramStateRef State,
+ SVal Cond,
+ bool Assumption) const {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ SymbolRef LastSetuidSym = State->get<LastSetuidCallSVal>();
+ if (!LastSetuidSym)
+ return State;
+
+ // Check if the most recent call to 'setuid(getuid())' is assumed to be != 0.
+ // It should be only -1 at failure, but we want to accept a "!= 0" check too.
+ // (But now an invalid failure check like "!= 1" will be recognized as correct
+ // too. The "invalid failure check" is a different bug that is not the scope
+ // of this checker.)
+ auto FailComparison =
+ SVB.evalBinOpNN(State, BO_NE, nonloc::SymbolVal(LastSetuidSym),
+ SVB.makeIntVal(0, /*isUnsigned=*/false),
+ SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!FailComparison)
+ return State;
+ if (auto IsFailBranch = State->assume(*FailComparison);
+ IsFailBranch.first && !IsFailBranch.second) {
+ // This is the 'setuid(getuid())' != 0 case.
+ // On this branch we do not want to emit warning.
+ State = State->set<LastSetPrivilegeCall>(Irrelevant);
+ State = State->set<LastSetuidCallSVal>(SymbolRef{});
+ }
+ return State;
+}
+
+void SetgidSetuidOrderChecker::processSetuid(ProgramStateRef State,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ bool IsSetuidWithGetuid = isFunctionCalledInArg(GetuidDesc, Call);
+ if (State->get<LastSetPrivilegeCall>() != Setgid && IsSetuidWithGetuid) {
+ SymbolRef RetSym = Call.getReturnValue().getAsSymbol();
+ State = State->set<LastSetPrivilegeCall>(Setuid);
+ State = State->set<LastSetuidCallSVal>(RetSym);
+ const NoteTag *Note = C.getNoteTag([this,
+ RetSym](PathSensitiveBugReport &BR) {
+ if (!BR.isInteresting(RetSym) || &BR.getBugType() != &this->BT)
+ return "";
+ return "Call to 'setuid' found here that removes superuser privileges";
+ });
+ C.addTransition(State, Note);
+ return;
+ }
+ State = State->set<LastSetPrivilegeCall>(Irrelevant);
+ State = State->set<LastSetuidCallSVal>(SymbolRef{});
+ C.addTransition(State);
+}
+
+void SetgidSetuidOrderChecker::processSetgid(ProgramStateRef State,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ bool IsSetgidWithGetgid = isFunctionCalledInArg(GetgidDesc, Call);
+ if (State->get<LastSetPrivilegeCall>() == Setuid) {
+ if (IsSetgidWithGetgid) {
+ State = State->set<LastSetPrivilegeCall>(Irrelevant);
+ emitReport(State, C);
+ return;
+ }
+ State = State->set<LastSetPrivilegeCall>(Irrelevant);
+ } else {
+ State = State->set<LastSetPrivilegeCall>(IsSetgidWithGetgid ? Setgid
+ : Irrelevant);
+ }
+ State = State->set<LastSetuidCallSVal>(SymbolRef{});
+ C.addTransition(State);
+}
+
+void SetgidSetuidOrderChecker::processOther(ProgramStateRef State,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ State = State->set<LastSetuidCallSVal>(SymbolRef{});
+ State = State->set<LastSetPrivilegeCall>(Irrelevant);
+ C.addTransition(State);
+}
+
+bool SetgidSetuidOrderChecker::isFunctionCalledInArg(
+ const CallDescription &Desc, const CallEvent &Call) const {
+ if (const auto *CallInArg0 =
+ dyn_cast<CallExpr>(Call.getArgExpr(0)->IgnoreParenImpCasts()))
+ return Desc.matchesAsWritten(*CallInArg0);
+ return false;
+}
+
+void SetgidSetuidOrderChecker::emitReport(ProgramStateRef State,
+ CheckerContext &C) const {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
+ llvm::StringLiteral Msg =
+ "A 'setgid(getgid())' call following a 'setuid(getuid())' "
+ "call is likely to fail; probably the order of these "
+ "statements is wrong";
+ auto Report = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
+ Report->markInteresting(State->get<LastSetuidCallSVal>());
+ C.emitReport(std::move(Report));
+ }
+}
+
+void ento::registerSetgidSetuidOrderChecker(CheckerManager &mgr) {
+ mgr.registerChecker<SetgidSetuidOrderChecker>();
+}
+
+bool ento::shouldRegisterSetgidSetuidOrderChecker(const CheckerManager &mgr) {
+ return true;
+}
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
index 5c797d523308..49bbff194216 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
@@ -271,6 +271,43 @@ public:
TrivialFunctionAnalysisVisitor(CacheTy &Cache) : Cache(Cache) {}
+ bool IsFunctionTrivial(const Decl *D) {
+ auto CacheIt = Cache.find(D);
+ if (CacheIt != Cache.end())
+ return CacheIt->second;
+
+ // Treat a recursive function call to be trivial until proven otherwise.
+ auto [RecursiveIt, IsNew] = RecursiveFn.insert(std::make_pair(D, true));
+ if (!IsNew)
+ return RecursiveIt->second;
+
+ bool Result = [&]() {
+ if (auto *CtorDecl = dyn_cast<CXXConstructorDecl>(D)) {
+ for (auto *CtorInit : CtorDecl->inits()) {
+ if (!Visit(CtorInit->getInit()))
+ return false;
+ }
+ }
+ const Stmt *Body = D->getBody();
+ if (!Body)
+ return false;
+ return Visit(Body);
+ }();
+
+ if (!Result) {
+ // D and its mutually recursive callers are all non-trivial.
+ for (auto &It : RecursiveFn)
+ It.second = false;
+ }
+ RecursiveIt = RecursiveFn.find(D);
+ assert(RecursiveIt != RecursiveFn.end());
+ Result = RecursiveIt->second;
+ RecursiveFn.erase(RecursiveIt);
+ Cache[D] = Result;
+
+ return Result;
+ }
+
bool VisitStmt(const Stmt *S) {
// All statements are non-trivial unless overriden later.
// Don't even recurse into children by default.
@@ -368,7 +405,7 @@ public:
Name == "bitwise_cast" || Name.find("__builtin") == 0)
return true;
- return TrivialFunctionAnalysis::isTrivialImpl(Callee, Cache);
+ return IsFunctionTrivial(Callee);
}
bool
@@ -403,7 +440,7 @@ public:
return true;
// Recursively descend into the callee to confirm that it's trivial as well.
- return TrivialFunctionAnalysis::isTrivialImpl(Callee, Cache);
+ return IsFunctionTrivial(Callee);
}
bool VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *OCE) {
@@ -413,7 +450,7 @@ public:
if (!Callee)
return false;
// Recursively descend into the callee to confirm that it's trivial as well.
- return TrivialFunctionAnalysis::isTrivialImpl(Callee, Cache);
+ return IsFunctionTrivial(Callee);
}
bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) {
@@ -439,7 +476,7 @@ public:
}
// Recursively descend into the callee to confirm that it's trivial.
- return TrivialFunctionAnalysis::isTrivialImpl(CE->getConstructor(), Cache);
+ return IsFunctionTrivial(CE->getConstructor());
}
bool VisitCXXNewExpr(const CXXNewExpr *NE) { return VisitChildren(NE); }
@@ -513,36 +550,13 @@ public:
private:
CacheTy &Cache;
+ CacheTy RecursiveFn;
};
bool TrivialFunctionAnalysis::isTrivialImpl(
const Decl *D, TrivialFunctionAnalysis::CacheTy &Cache) {
- // If the function isn't in the cache, conservatively assume that
- // it's not trivial until analysis completes. This makes every recursive
- // function non-trivial. This also guarantees that each function
- // will be scanned at most once.
- auto [It, IsNew] = Cache.insert(std::make_pair(D, false));
- if (!IsNew)
- return It->second;
-
TrivialFunctionAnalysisVisitor V(Cache);
-
- if (auto *CtorDecl = dyn_cast<CXXConstructorDecl>(D)) {
- for (auto *CtorInit : CtorDecl->inits()) {
- if (!V.Visit(CtorInit->getInit()))
- return false;
- }
- }
-
- const Stmt *Body = D->getBody();
- if (!Body)
- return false;
-
- bool Result = V.Visit(Body);
- if (Result)
- Cache[D] = true;
-
- return Result;
+ return V.IsFunctionTrivial(D);
}
bool TrivialFunctionAnalysis::isTrivialImpl(
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
index 7f4c3a7b787e..9df108e28ecd 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
@@ -11,16 +11,116 @@
#include "PtrTypesSemantics.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
#include <optional>
using namespace clang;
using namespace ento;
namespace {
+
+class DerefFuncDeleteExprVisitor
+ : public ConstStmtVisitor<DerefFuncDeleteExprVisitor, bool> {
+ // Returns true if any of child statements return true.
+ bool VisitChildren(const Stmt *S) {
+ for (const Stmt *Child : S->children()) {
+ if (Child && Visit(Child))
+ return true;
+ }
+ return false;
+ }
+
+ bool VisitBody(const Stmt *Body) {
+ if (!Body)
+ return false;
+
+ auto [It, IsNew] = VisitedBody.insert(Body);
+ if (!IsNew) // This body is recursive
+ return false;
+
+ return Visit(Body);
+ }
+
+public:
+ DerefFuncDeleteExprVisitor(const TemplateArgumentList &ArgList,
+ const CXXRecordDecl *ClassDecl)
+ : ArgList(&ArgList), ClassDecl(ClassDecl) {}
+
+ DerefFuncDeleteExprVisitor(const CXXRecordDecl *ClassDecl)
+ : ClassDecl(ClassDecl) {}
+
+ std::optional<bool> HasSpecializedDelete(CXXMethodDecl *Decl) {
+ if (auto *Body = Decl->getBody())
+ return VisitBody(Body);
+ if (Decl->getTemplateInstantiationPattern())
+ return std::nullopt; // Indeterminate. There was no concrete instance.
+ return false;
+ }
+
+ bool VisitCallExpr(const CallExpr *CE) {
+ const Decl *D = CE->getCalleeDecl();
+ if (D && D->hasBody())
+ return VisitBody(D->getBody());
+ return false;
+ }
+
+ bool VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
+ auto *Arg = E->getArgument();
+ while (Arg) {
+ if (auto *Paren = dyn_cast<ParenExpr>(Arg))
+ Arg = Paren->getSubExpr();
+ else if (auto *Cast = dyn_cast<CastExpr>(Arg)) {
+ Arg = Cast->getSubExpr();
+ auto CastType = Cast->getType();
+ if (auto *PtrType = dyn_cast<PointerType>(CastType)) {
+ auto PointeeType = PtrType->getPointeeType();
+ while (auto *ET = dyn_cast<ElaboratedType>(PointeeType)) {
+ if (ET->isSugared())
+ PointeeType = ET->desugar();
+ }
+ if (auto *ParmType = dyn_cast<TemplateTypeParmType>(PointeeType)) {
+ if (ArgList) {
+ auto ParmIndex = ParmType->getIndex();
+ auto Type = ArgList->get(ParmIndex).getAsType();
+ if (Type->getAsCXXRecordDecl() == ClassDecl)
+ return true;
+ }
+ } else if (auto *RD = dyn_cast<RecordType>(PointeeType)) {
+ if (RD->getDecl() == ClassDecl)
+ return true;
+ } else if (auto *ST =
+ dyn_cast<SubstTemplateTypeParmType>(PointeeType)) {
+ auto Type = ST->getReplacementType();
+ if (auto *RD = dyn_cast<RecordType>(Type)) {
+ if (RD->getDecl() == ClassDecl)
+ return true;
+ }
+ }
+ }
+ } else
+ break;
+ }
+ return false;
+ }
+
+ bool VisitStmt(const Stmt *S) { return VisitChildren(S); }
+
+ // Return false since the contents of lambda isn't necessarily executed.
+ // If it is executed, VisitCallExpr above will visit its body.
+ bool VisitLambdaExpr(const LambdaExpr *) { return false; }
+
+private:
+ const TemplateArgumentList *ArgList{nullptr};
+ const CXXRecordDecl *ClassDecl;
+ llvm::DenseSet<const Stmt *> VisitedBody;
+};
+
class RefCntblBaseVirtualDtorChecker
: public Checker<check::ASTDecl<TranslationUnitDecl>> {
private:
@@ -51,63 +151,93 @@ public:
bool shouldVisitImplicitCode() const { return false; }
bool VisitCXXRecordDecl(const CXXRecordDecl *RD) {
- Checker->visitCXXRecordDecl(RD);
+ if (!RD->hasDefinition())
+ return true;
+
+ Decls.insert(RD);
+
+ for (auto &Base : RD->bases()) {
+ const auto AccSpec = Base.getAccessSpecifier();
+ if (AccSpec == AS_protected || AccSpec == AS_private ||
+ (AccSpec == AS_none && RD->isClass()))
+ continue;
+
+ QualType T = Base.getType();
+ if (T.isNull())
+ continue;
+
+ const CXXRecordDecl *C = T->getAsCXXRecordDecl();
+ if (!C)
+ continue;
+
+ if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(C)) {
+ for (auto &Arg : CTSD->getTemplateArgs().asArray()) {
+ if (Arg.getKind() != TemplateArgument::Type)
+ continue;
+ auto TemplT = Arg.getAsType();
+ if (TemplT.isNull())
+ continue;
+
+ bool IsCRTP = TemplT->getAsCXXRecordDecl() == RD;
+ if (!IsCRTP)
+ continue;
+ CRTPs.insert(C);
+ }
+ }
+ }
+
return true;
}
+
+ llvm::SetVector<const CXXRecordDecl *> Decls;
+ llvm::DenseSet<const CXXRecordDecl *> CRTPs;
};
LocalVisitor visitor(this);
visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
+ for (auto *RD : visitor.Decls) {
+ if (visitor.CRTPs.contains(RD))
+ continue;
+ visitCXXRecordDecl(RD);
+ }
}
void visitCXXRecordDecl(const CXXRecordDecl *RD) const {
if (shouldSkipDecl(RD))
return;
- CXXBasePaths Paths;
- Paths.setOrigin(RD);
+ for (auto &Base : RD->bases()) {
+ const auto AccSpec = Base.getAccessSpecifier();
+ if (AccSpec == AS_protected || AccSpec == AS_private ||
+ (AccSpec == AS_none && RD->isClass()))
+ continue;
- const CXXBaseSpecifier *ProblematicBaseSpecifier = nullptr;
- const CXXRecordDecl *ProblematicBaseClass = nullptr;
+ auto hasRefInBase = clang::hasPublicMethodInBase(&Base, "ref");
+ auto hasDerefInBase = clang::hasPublicMethodInBase(&Base, "deref");
- const auto IsPublicBaseRefCntblWOVirtualDtor =
- [RD, &ProblematicBaseSpecifier,
- &ProblematicBaseClass](const CXXBaseSpecifier *Base, CXXBasePath &) {
- const auto AccSpec = Base->getAccessSpecifier();
- if (AccSpec == AS_protected || AccSpec == AS_private ||
- (AccSpec == AS_none && RD->isClass()))
- return false;
+ bool hasRef = hasRefInBase && *hasRefInBase != nullptr;
+ bool hasDeref = hasDerefInBase && *hasDerefInBase != nullptr;
- auto hasRefInBase = clang::hasPublicMethodInBase(Base, "ref");
- auto hasDerefInBase = clang::hasPublicMethodInBase(Base, "deref");
+ QualType T = Base.getType();
+ if (T.isNull())
+ continue;
- bool hasRef = hasRefInBase && *hasRefInBase != nullptr;
- bool hasDeref = hasDerefInBase && *hasDerefInBase != nullptr;
+ const CXXRecordDecl *C = T->getAsCXXRecordDecl();
+ if (!C)
+ continue;
- QualType T = Base->getType();
- if (T.isNull())
- return false;
-
- const CXXRecordDecl *C = T->getAsCXXRecordDecl();
- if (!C)
- return false;
- if (isRefCountedClass(C))
- return false;
-
- bool AnyInconclusiveBase = false;
- const auto hasPublicRefInBase =
- [&AnyInconclusiveBase](const CXXBaseSpecifier *Base,
- CXXBasePath &) {
- auto hasRefInBase = clang::hasPublicMethodInBase(Base, "ref");
- if (!hasRefInBase) {
- AnyInconclusiveBase = true;
- return false;
- }
- return (*hasRefInBase) != nullptr;
- };
- const auto hasPublicDerefInBase = [&AnyInconclusiveBase](
- const CXXBaseSpecifier *Base,
- CXXBasePath &) {
+ bool AnyInconclusiveBase = false;
+ const auto hasPublicRefInBase =
+ [&AnyInconclusiveBase](const CXXBaseSpecifier *Base, CXXBasePath &) {
+ auto hasRefInBase = clang::hasPublicMethodInBase(Base, "ref");
+ if (!hasRefInBase) {
+ AnyInconclusiveBase = true;
+ return false;
+ }
+ return (*hasRefInBase) != nullptr;
+ };
+ const auto hasPublicDerefInBase =
+ [&AnyInconclusiveBase](const CXXBaseSpecifier *Base, CXXBasePath &) {
auto hasDerefInBase = clang::hasPublicMethodInBase(Base, "deref");
if (!hasDerefInBase) {
AnyInconclusiveBase = true;
@@ -115,28 +245,42 @@ public:
}
return (*hasDerefInBase) != nullptr;
};
- CXXBasePaths Paths;
- Paths.setOrigin(C);
- hasRef = hasRef || C->lookupInBases(hasPublicRefInBase, Paths,
+ CXXBasePaths Paths;
+ Paths.setOrigin(C);
+ hasRef = hasRef || C->lookupInBases(hasPublicRefInBase, Paths,
+ /*LookupInDependent =*/true);
+ hasDeref = hasDeref || C->lookupInBases(hasPublicDerefInBase, Paths,
/*LookupInDependent =*/true);
- hasDeref = hasDeref || C->lookupInBases(hasPublicDerefInBase, Paths,
- /*LookupInDependent =*/true);
- if (AnyInconclusiveBase || !hasRef || !hasDeref)
- return false;
-
- const auto *Dtor = C->getDestructor();
- if (!Dtor || !Dtor->isVirtual()) {
- ProblematicBaseSpecifier = Base;
- ProblematicBaseClass = C;
- return true;
- }
-
- return false;
- };
-
- if (RD->lookupInBases(IsPublicBaseRefCntblWOVirtualDtor, Paths,
- /*LookupInDependent =*/true)) {
- reportBug(RD, ProblematicBaseSpecifier, ProblematicBaseClass);
+ if (AnyInconclusiveBase || !hasRef || !hasDeref)
+ continue;
+
+ auto HasSpecializedDelete = isClassWithSpecializedDelete(C, RD);
+ if (!HasSpecializedDelete || *HasSpecializedDelete)
+ continue;
+ if (C->lookupInBases(
+ [&](const CXXBaseSpecifier *Base, CXXBasePath &) {
+ auto *T = Base->getType().getTypePtrOrNull();
+ if (!T)
+ return false;
+ auto *R = T->getAsCXXRecordDecl();
+ if (!R)
+ return false;
+ auto Result = isClassWithSpecializedDelete(R, RD);
+ if (!Result)
+ AnyInconclusiveBase = true;
+ return Result && *Result;
+ },
+ Paths, /*LookupInDependent =*/true))
+ continue;
+ if (AnyInconclusiveBase)
+ continue;
+
+ const auto *Dtor = C->getDestructor();
+ if (!Dtor || !Dtor->isVirtual()) {
+ auto *ProblematicBaseSpecifier = &Base;
+ auto *ProblematicBaseClass = C;
+ reportBug(RD, ProblematicBaseSpecifier, ProblematicBaseClass);
+ }
}
}
@@ -182,6 +326,32 @@ public:
ClsName == "ThreadSafeRefCountedAndCanMakeThreadSafeWeakPtr");
}
+ static std::optional<bool>
+ isClassWithSpecializedDelete(const CXXRecordDecl *C,
+ const CXXRecordDecl *DerivedClass) {
+ if (auto *ClsTmplSpDecl = dyn_cast<ClassTemplateSpecializationDecl>(C)) {
+ for (auto *MethodDecl : C->methods()) {
+ if (safeGetName(MethodDecl) == "deref") {
+ DerefFuncDeleteExprVisitor Visitor(ClsTmplSpDecl->getTemplateArgs(),
+ DerivedClass);
+ auto Result = Visitor.HasSpecializedDelete(MethodDecl);
+ if (!Result || *Result)
+ return Result;
+ }
+ }
+ return false;
+ }
+ for (auto *MethodDecl : C->methods()) {
+ if (safeGetName(MethodDecl) == "deref") {
+ DerefFuncDeleteExprVisitor Visitor(DerivedClass);
+ auto Result = Visitor.HasSpecializedDelete(MethodDecl);
+ if (!Result || *Result)
+ return Result;
+ }
+ }
+ return false;
+ }
+
void reportBug(const CXXRecordDecl *DerivedClass,
const CXXBaseSpecifier *BaseSpec,
const CXXRecordDecl *ProblematicBaseClass) const {
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp
index 0d9710a5e2d8..274da0baf2ce 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp
@@ -135,7 +135,19 @@ public:
bool shouldVisitImplicitCode() const { return false; }
bool VisitVarDecl(VarDecl *V) {
- Checker->visitVarDecl(V);
+ auto *Init = V->getInit();
+ if (Init && V->isLocalVarDecl())
+ Checker->visitVarDecl(V, Init);
+ return true;
+ }
+
+ bool VisitBinaryOperator(const BinaryOperator *BO) {
+ if (BO->isAssignmentOp()) {
+ if (auto *VarRef = dyn_cast<DeclRefExpr>(BO->getLHS())) {
+ if (auto *V = dyn_cast<VarDecl>(VarRef->getDecl()))
+ Checker->visitVarDecl(V, BO->getRHS());
+ }
+ }
return true;
}
@@ -174,7 +186,7 @@ public:
visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
}
- void visitVarDecl(const VarDecl *V) const {
+ void visitVarDecl(const VarDecl *V, const Expr *Value) const {
if (shouldSkipVarDecl(V))
return;
@@ -184,12 +196,8 @@ public:
std::optional<bool> IsUncountedPtr = isUncountedPtr(ArgType);
if (IsUncountedPtr && *IsUncountedPtr) {
- const Expr *const InitExpr = V->getInit();
- if (!InitExpr)
- return; // FIXME: later on we might warn on uninitialized vars too
-
if (tryToFindPtrOrigin(
- InitExpr, /*StopAtFirstRefCountedObj=*/false,
+ Value, /*StopAtFirstRefCountedObj=*/false,
[&](const clang::Expr *InitArgOrigin, bool IsSafe) {
if (!InitArgOrigin)
return true;
@@ -232,34 +240,46 @@ public:
}))
return;
- reportBug(V);
+ reportBug(V, Value);
}
}
bool shouldSkipVarDecl(const VarDecl *V) const {
assert(V);
- if (!V->isLocalVarDecl())
- return true;
-
- if (BR->getSourceManager().isInSystemHeader(V->getLocation()))
- return true;
-
- return false;
+ return BR->getSourceManager().isInSystemHeader(V->getLocation());
}
- void reportBug(const VarDecl *V) const {
+ void reportBug(const VarDecl *V, const Expr *Value) const {
assert(V);
SmallString<100> Buf;
llvm::raw_svector_ostream Os(Buf);
- Os << "Local variable ";
- printQuotedQualifiedName(Os, V);
- Os << " is uncounted and unsafe.";
-
- PathDiagnosticLocation BSLoc(V->getLocation(), BR->getSourceManager());
- auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
- Report->addRange(V->getSourceRange());
- BR->emitReport(std::move(Report));
+ if (dyn_cast<ParmVarDecl>(V)) {
+ Os << "Assignment to an uncounted parameter ";
+ printQuotedQualifiedName(Os, V);
+ Os << " is unsafe.";
+
+ PathDiagnosticLocation BSLoc(Value->getExprLoc(), BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ Report->addRange(Value->getSourceRange());
+ BR->emitReport(std::move(Report));
+ } else {
+ if (V->hasLocalStorage())
+ Os << "Local variable ";
+ else if (V->isStaticLocal())
+ Os << "Static local variable ";
+ else if (V->hasGlobalStorage())
+ Os << "Global variable ";
+ else
+ Os << "Variable ";
+ printQuotedQualifiedName(Os, V);
+ Os << " is uncounted and unsafe.";
+
+ PathDiagnosticLocation BSLoc(V->getLocation(), BR->getSourceManager());
+ auto Report = std::make_unique<BasicBugReport>(Bug, Os.str(), BSLoc);
+ Report->addRange(V->getSourceRange());
+ BR->emitReport(std::move(Report));
+ }
}
};
} // namespace
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 0b1edf3e5c96..793f3a63ea29 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1970,33 +1970,45 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ExplodedNodeSet Tmp;
StmtNodeBuilder Bldr2(PreVisit, Tmp, *currBldrCtx);
- const Expr *ArgE;
- if (const auto *DefE = dyn_cast<CXXDefaultArgExpr>(S))
+ bool HasRewrittenInit = false;
+ const Expr *ArgE = nullptr;
+ if (const auto *DefE = dyn_cast<CXXDefaultArgExpr>(S)) {
ArgE = DefE->getExpr();
- else if (const auto *DefE = dyn_cast<CXXDefaultInitExpr>(S))
+ HasRewrittenInit = DefE->hasRewrittenInit();
+ } else if (const auto *DefE = dyn_cast<CXXDefaultInitExpr>(S)) {
ArgE = DefE->getExpr();
- else
+ HasRewrittenInit = DefE->hasRewrittenInit();
+ } else
llvm_unreachable("unknown constant wrapper kind");
- bool IsTemporary = false;
- if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(ArgE)) {
- ArgE = MTE->getSubExpr();
- IsTemporary = true;
- }
+ if (HasRewrittenInit) {
+ for (auto *N : PreVisit) {
+ ProgramStateRef state = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+ state = state->BindExpr(S, LCtx, state->getSVal(ArgE, LCtx));
+ Bldr2.generateNode(S, N, state);
+ }
+ } else {
+ // If it's not rewritten, the contents of these expressions are not
+ // actually part of the current function, so we fall back to constant
+ // evaluation.
+ bool IsTemporary = false;
+ if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(ArgE)) {
+ ArgE = MTE->getSubExpr();
+ IsTemporary = true;
+ }
+
+ std::optional<SVal> ConstantVal = svalBuilder.getConstantVal(ArgE);
+ const LocationContext *LCtx = Pred->getLocationContext();
+ for (auto *I : PreVisit) {
+ ProgramStateRef State = I->getState();
+ State = State->BindExpr(S, LCtx, ConstantVal.value_or(UnknownVal()));
+ if (IsTemporary)
+ State = createTemporaryRegionIfNeeded(State, LCtx, cast<Expr>(S),
+ cast<Expr>(S));
- std::optional<SVal> ConstantVal = svalBuilder.getConstantVal(ArgE);
- if (!ConstantVal)
- ConstantVal = UnknownVal();
-
- const LocationContext *LCtx = Pred->getLocationContext();
- for (const auto I : PreVisit) {
- ProgramStateRef State = I->getState();
- State = State->BindExpr(S, LCtx, *ConstantVal);
- if (IsTemporary)
- State = createTemporaryRegionIfNeeded(State, LCtx,
- cast<Expr>(S),
- cast<Expr>(S));
- Bldr2.generateNode(S, I, State);
+ Bldr2.generateNode(S, I, State);
+ }
}
getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
diff --git a/clang/test/AST/Interp/arrays.cpp b/clang/test/AST/Interp/arrays.cpp
index 929f25b95fa1..dd5064d993e6 100644
--- a/clang/test/AST/Interp/arrays.cpp
+++ b/clang/test/AST/Interp/arrays.cpp
@@ -26,6 +26,7 @@ static_assert(foo[2][2] == nullptr, "");
static_assert(foo[2][3] == &m, "");
static_assert(foo[2][4] == nullptr, "");
+constexpr int ZeroSizeArray[] = {};
constexpr int SomeInt[] = {1};
constexpr int getSomeInt() { return *SomeInt; }
@@ -53,6 +54,10 @@ constexpr int derefPtr(const int *d) {
}
static_assert(derefPtr(data) == 5, "");
+/// Make sure we can refer to the one-past-the-end element
+/// and then return back to the end of the array.
+static_assert((&data[5])[-1] == 1, "");
+
constexpr int storePtr() {
int b[] = {1,2,3,4};
int *c = b;
@@ -595,3 +600,12 @@ int test_multiarray22() {
}
#endif
+
+namespace ArrayMemberAccess {
+ struct A {
+ int x;
+ };
+ void f(const A (&a)[]) {
+ bool cond = a->x;
+ }
+}
diff --git a/clang/test/AST/Interp/builtin-functions.cpp b/clang/test/AST/Interp/builtin-functions.cpp
index fbe76aba73c9..0a17106449fa 100644
--- a/clang/test/AST/Interp/builtin-functions.cpp
+++ b/clang/test/AST/Interp/builtin-functions.cpp
@@ -900,7 +900,7 @@ namespace shufflevector {
static_assert(vectorShuffle6[7] == 7, "");// ref-error {{not an integral constant expression}}
constexpr vector4char vectorShuffleFail1 = __builtin_shufflevector( // both-error {{must be initialized by a constant expression}}\
- // ref-error {{index for __builtin_shufflevector not within the bounds of the input vectors; index of -1 found at position 0 not permitted in a constexpr context.}}
+ // ref-error {{index for __builtin_shufflevector not within the bounds of the input vectors; index of -1 found at position 0 is not permitted in a constexpr context}}
vector4charConst1,
vector4charConst2, -1, -1, -1, -1);
}
diff --git a/clang/test/AST/Interp/c.c b/clang/test/AST/Interp/c.c
index 2a75457a4693..f4c7bf16f2f9 100644
--- a/clang/test/AST/Interp/c.c
+++ b/clang/test/AST/Interp/c.c
@@ -278,3 +278,15 @@ void addrlabelexpr(void) {
a0: ;
static void *ps[] = { &&a0 }; // pedantic-warning {{use of GNU address-of-label extension}}
}
+
+extern void cv2;
+void *foo5 (void)
+{
+ return &cv2; // pedantic-warning{{address of an expression of type 'void'}}
+}
+
+__attribute__((weak)) const unsigned int test10_bound = 10;
+char test10_global[test10_bound]; // all-error {{variable length array declaration not allowed at file scope}}
+void test10(void) {
+ char test10_local[test10_bound] = "help"; // all-error {{variable-sized object may not be initialized}}
+}
diff --git a/clang/test/AST/Interp/cxx03.cpp b/clang/test/AST/Interp/cxx03.cpp
index b6aaf0840cfb..70ae4134842b 100644
--- a/clang/test/AST/Interp/cxx03.cpp
+++ b/clang/test/AST/Interp/cxx03.cpp
@@ -24,3 +24,8 @@ namespace NonLValueMemberExpr {
const int &TT1::subobj_init = PODType().value;
}
+
+void LambdaAccessingADummy() {
+ int d;
+ int a9[1] = {[d = 0] = 1}; // both-error {{is not an integral constant expression}}
+}
diff --git a/clang/test/AST/Interp/cxx11.cpp b/clang/test/AST/Interp/cxx11.cpp
index 993e3618a378..f06a5dd173cb 100644
--- a/clang/test/AST/Interp/cxx11.cpp
+++ b/clang/test/AST/Interp/cxx11.cpp
@@ -30,3 +30,19 @@ constexpr S s = { 5 };
constexpr const int *p = &s.m + 1;
constexpr const int *np2 = &(*(int(*)[4])nullptr)[0]; // ok
+
+constexpr int preDec(int x) { // both-error {{never produces a constant expression}}
+ return --x; // both-note {{subexpression}}
+}
+
+constexpr int postDec(int x) { // both-error {{never produces a constant expression}}
+ return x--; // both-note {{subexpression}}
+}
+
+constexpr int preInc(int x) { // both-error {{never produces a constant expression}}
+ return ++x; // both-note {{subexpression}}
+}
+
+constexpr int postInc(int x) { // both-error {{never produces a constant expression}}
+ return x++; // both-note {{subexpression}}
+}
diff --git a/clang/test/AST/Interp/cxx98.cpp b/clang/test/AST/Interp/cxx98.cpp
index be81735329db..e68e4dbc8d74 100644
--- a/clang/test/AST/Interp/cxx98.cpp
+++ b/clang/test/AST/Interp/cxx98.cpp
@@ -50,3 +50,7 @@ _Static_assert(c0_test == 0, "");
int a = 0; // both-note {{declared here}}
_Static_assert(a == 0, ""); // both-error {{static assertion expression is not an integral constant expression}} \
// both-note {{read of non-const variable 'a' is not allowed in a constant expression}}
+
+struct SelfReference { SelfReference &r; };
+extern SelfReference self_reference_1;
+SelfReference self_reference_2 = {self_reference_1};
diff --git a/clang/test/AST/Interp/eval-order.cpp b/clang/test/AST/Interp/eval-order.cpp
index 695a43c9d235..aaf2b74510bb 100644
--- a/clang/test/AST/Interp/eval-order.cpp
+++ b/clang/test/AST/Interp/eval-order.cpp
@@ -1,8 +1,7 @@
-// RUN: %clang_cc1 -std=c++1z -verify %s -fcxx-exceptions -triple=x86_64-linux-gnu
-// RUN: %clang_cc1 -std=c++1z -verify %s -fcxx-exceptions -triple=x86_64-linux-gnu -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -std=c++1z -verify=ref,both %s -fcxx-exceptions -triple=x86_64-linux-gnu
+// RUN: %clang_cc1 -std=c++1z -verify=expected,both %s -fcxx-exceptions -triple=x86_64-linux-gnu -fexperimental-new-constant-interpreter
// ref-no-diagnostics
-// expected-no-diagnostics
/// Check that assignment operators evaluate their operands right-to-left.
/// Copied from test/SemaCXX/constant-expression-cxx1z.cpp
@@ -46,7 +45,7 @@ namespace EvalOrder {
}
template <typename T> constexpr T &&b(T &&v) {
if (!done_a)
- throw "wrong";
+ throw "wrong"; // expected-note 7{{not valid}}
done_b = true;
return (T &&)v;
}
@@ -76,21 +75,30 @@ namespace EvalOrder {
// SEQ(A(&ud)->*B(&UserDefined::n)); FIXME
// Rule 4: a(b1, b2, b3)
- // SEQ(A(f)(B(1), B(2), B(3))); FIXME
+ SEQ(A(f)(B(1), B(2), B(3))); // expected-error {{not an integral constant expression}} FIXME \
+ // expected-note 2{{in call to}}
// Rule 5: b = a, b @= a
- // SEQ(B(lvalue<int>().get()) = A(0)); FIXME
- // SEQ(B(lvalue<UserDefined>().get()) = A(ud)); FIXME
+ SEQ(B(lvalue<int>().get()) = A(0)); // expected-error {{not an integral constant expression}} FIXME \
+ // expected-note 2{{in call to}}
+ SEQ(B(lvalue<UserDefined>().get()) = A(ud)); // expected-error {{not an integral constant expression}} FIXME \
+ // expected-note 2{{in call to}}
SEQ(B(lvalue<int>().get()) += A(0));
- // SEQ(B(lvalue<UserDefined>().get()) += A(ud)); FIXME
- // SEQ(B(lvalue<NonMember>().get()) += A(nm)); FIXME
+ SEQ(B(lvalue<UserDefined>().get()) += A(ud)); // expected-error {{not an integral constant expression}} FIXME \
+ // expected-note 2{{in call to}}
+
+ SEQ(B(lvalue<NonMember>().get()) += A(nm)); // expected-error {{not an integral constant expression}} FIXME \
+ // expected-note 2{{in call to}}
+
// Rule 6: a[b]
constexpr int arr[3] = {};
SEQ(A(arr)[B(0)]);
SEQ(A(+arr)[B(0)]);
- // SEQ(A(0)[B(arr)]); FIXME
- // SEQ(A(0)[B(+arr)]); FIXME
+ SEQ(A(0)[B(arr)]); // expected-error {{not an integral constant expression}} FIXME \
+ // expected-note 2{{in call to}}
+ SEQ(A(0)[B(+arr)]); // expected-error {{not an integral constant expression}} FIXME \
+ // expected-note 2{{in call to}}
SEQ(A(ud)[B(0)]);
// Rule 7: a << b
diff --git a/clang/test/AST/Interp/functions.cpp b/clang/test/AST/Interp/functions.cpp
index a5bb9f1a19aa..10c62a43ef33 100644
--- a/clang/test/AST/Interp/functions.cpp
+++ b/clang/test/AST/Interp/functions.cpp
@@ -617,3 +617,15 @@ namespace {
void bir [[clang::annotate("B", {1, 2, 3, 4})]] (); // both-error {{'annotate' attribute requires parameter 1 to be a constant expression}} \
// both-note {{subexpression not valid in a constant expression}}
}
+
+namespace FuncPtrParam {
+ void foo(int(&a)()) {
+ *a; // both-warning {{expression result unused}}
+ }
+}
+
+namespace {
+ void f() noexcept;
+ void (&r)() = f;
+ void (&cond3)() = r;
+}
diff --git a/clang/test/AST/Interp/objc.mm b/clang/test/AST/Interp/objc.mm
new file mode 100644
index 000000000000..6402c8ae098f
--- /dev/null
+++ b/clang/test/AST/Interp/objc.mm
@@ -0,0 +1,13 @@
+// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -verify=expected,both %s
+// RUN: %clang_cc1 -verify=ref,both %s
+
+@interface A {
+ int a;
+ static_assert(a, ""); // both-error {{static assertion expression is not an integral constant expression}}
+}
+@end
+
+@interface NSString
+@end
+constexpr NSString *t0 = @"abc";
+constexpr NSString *t1 = @("abc");
diff --git a/clang/test/AST/Interp/records.cpp b/clang/test/AST/Interp/records.cpp
index 41be9b71a27f..0a89c81bafd5 100644
--- a/clang/test/AST/Interp/records.cpp
+++ b/clang/test/AST/Interp/records.cpp
@@ -1335,8 +1335,6 @@ namespace UnnamedBitFields {
static_assert(a.c == 'a', "");
}
-/// FIXME: This still doesn't work in the new interpreter because
-/// we lack type information for dummy pointers.
namespace VirtualBases {
/// This used to crash.
namespace One {
@@ -1346,7 +1344,7 @@ namespace VirtualBases {
};
class B : public virtual A {
public:
- int getX() { return x; } // ref-note {{declared here}}
+ int getX() { return x; } // both-note {{declared here}}
};
class DV : virtual public B{};
@@ -1354,7 +1352,7 @@ namespace VirtualBases {
void foo() {
DV b;
int a[b.getX()]; // both-warning {{variable length arrays}} \
- // ref-note {{non-constexpr function 'getX' cannot be used}}
+ // both-note {{non-constexpr function 'getX' cannot be used}}
}
}
@@ -1459,3 +1457,26 @@ namespace TemporaryWithInvalidDestructor {
// both-note {{in call to}}
#endif
}
+
+namespace IgnoredCtorWithZeroInit {
+ struct S {
+ int a;
+ };
+
+ bool get_status() {
+ return (S(), true);
+ }
+}
+
+#if __cplusplus >= 202002L
+namespace VirtOperator {
+ /// This used to crash because it's a virtual CXXOperatorCallExpr.
+ struct B {
+ virtual constexpr bool operator==(const B&) const { return true; }
+ };
+ struct D : B {
+ constexpr bool operator==(const B&) const override{ return false; } // both-note {{operator}}
+ };
+ constexpr bool cmp_base_derived = D() == D(); // both-warning {{ambiguous}}
+}
+#endif
diff --git a/clang/test/AST/Interp/sycl.cpp b/clang/test/AST/Interp/sycl.cpp
new file mode 100644
index 000000000000..5c922eca5809
--- /dev/null
+++ b/clang/test/AST/Interp/sycl.cpp
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 %s -std=c++17 -triple x86_64-linux-gnu -fsycl-is-device -verify=both,ref -fsyntax-only -Wno-unused
+// RUN: %clang_cc1 %s -std=c++17 -triple x86_64-linux-gnu -fsycl-is-device -verify=both,expected -fsyntax-only -Wno-unused -fexperimental-new-constant-interpreter
+
+// both-no-diagnostics
+
+constexpr int a = 0;
+constexpr const char *a_name = __builtin_sycl_unique_stable_name(decltype(a));
+static_assert(__builtin_strcmp(a_name, "_ZTSKi") == 0);
+
diff --git a/clang/test/AST/Interp/unions.cpp b/clang/test/AST/Interp/unions.cpp
new file mode 100644
index 000000000000..293a1981a52f
--- /dev/null
+++ b/clang/test/AST/Interp/unions.cpp
@@ -0,0 +1,67 @@
+// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -verify=expected,both %s
+// RUN: %clang_cc1 -verify=ref,both %s
+
+union U {
+ int a;
+ int b;
+};
+
+constexpr U a = {12};
+static_assert(a.a == 12, "");
+static_assert(a.b == 0, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{read of member 'b' of union with active member 'a'}}
+union U1 {
+ int i;
+ float f = 3.0f;
+};
+constexpr U1 u1{};
+static_assert(u1.f == 3.0, "");
+static_assert(u1.i == 1, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{read of member 'i' of union with active member 'f'}}
+
+
+
+union A {
+ int a;
+ double d;
+};
+constexpr A aa = {1, 2.0}; // both-error {{excess elements in union initializer}}
+constexpr A ab = {.d = 1.0};
+static_assert(ab.d == 1.0, "");
+static_assert(ab.a == 1, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{read of member 'a' of union with active member 'd'}}
+
+
+namespace Empty {
+ union E {};
+ constexpr E e{};
+}
+
+namespace SimpleStore {
+ union A {
+ int a;
+ int b;
+ };
+ constexpr int foo() {
+ A a{.b = 4};
+ a.b = 10;
+ return a.b;
+ }
+ static_assert(foo() == 10, "");
+
+ constexpr int empty() {
+ A a{}; /// Just test that this works.
+ return 10;
+ }
+ static_assert(empty() == 10, "");
+}
+
+namespace ZeroInit {
+ struct S { int m; };
+ union Z {
+ float f;
+ };
+
+ constexpr Z z{};
+ static_assert(z.f == 0.0, "");
+}
diff --git a/clang/test/AST/ast-dump-ctad-alias.cpp b/clang/test/AST/ast-dump-ctad-alias.cpp
index 7fe6c05621ee..9382558393e4 100644
--- a/clang/test/AST/ast-dump-ctad-alias.cpp
+++ b/clang/test/AST/ast-dump-ctad-alias.cpp
@@ -48,3 +48,23 @@ Out2<double>::AInner t(1.0);
// CHECK-NEXT: | |-TemplateArgument type 'double'
// CHECK-NEXT: | | `-BuiltinType {{.*}} 'double'
// CHECK-NEXT: | `-ParmVarDecl {{.*}} 'double'
+
+template <typename... T1>
+struct Foo {
+ Foo(T1...);
+};
+
+template <typename...T2>
+using AFoo = Foo<T2...>;
+AFoo a(1, 2);
+// CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for AFoo> 'auto (type-parameter-0-0...) -> Foo<type-parameter-0-0...>'
+// CHECK-NEXT: | | `-ParmVarDecl {{.*}} 'type-parameter-0-0...' pack
+// CHECK-NEXT: | `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for AFoo> 'auto (int, int) -> Foo<int, int>' implicit_instantiation
+
+template <typename T>
+using BFoo = Foo<T, T>;
+BFoo b2(1.0, 2.0);
+// CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for BFoo> 'auto (type-parameter-0-0, type-parameter-0-0) -> Foo<type-parameter-0-0, type-parameter-0-0>'
+// CHECK-NEXT: | | |-ParmVarDecl {{.*}} 'type-parameter-0-0'
+// CHECK-NEXT: | | `-ParmVarDecl {{.*}} 'type-parameter-0-0'
+// CHECK-NEXT: | `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for BFoo> 'auto (double, double) -> Foo<double, double>' implicit_instantiation
diff --git a/clang/test/AST/ast-dump-decl.cpp b/clang/test/AST/ast-dump-decl.cpp
index 554cdcf83fcd..e062d4f068a4 100644
--- a/clang/test/AST/ast-dump-decl.cpp
+++ b/clang/test/AST/ast-dump-decl.cpp
@@ -459,7 +459,7 @@ namespace testClassTemplateDecl {
// CHECK: ClassTemplateDecl 0x{{.+}} <{{.+}}:[[@LINE-148]]:3, col:31> col:31 TestTemplateDefaultNonType{{$}}
// CHECK-NEXT: |-NonTypeTemplateParmDecl 0x{{.+}} <col:12, col:20> col:16 'int' depth 0 index 0 I{{$}}
-// CHECK-NEXT: | `-TemplateArgument expr{{$}}
+// CHECK-NEXT: | `-TemplateArgument <col:20> expr{{$}}
// CHECK-NEXT: | `-IntegerLiteral 0x{{.+}} <col:20> 'int' 42{{$}}
// CHECK-NEXT: `-CXXRecordDecl 0x{{.+}} <col:24, col:31> col:31 struct TestTemplateDefaultNonType{{$}}
@@ -671,7 +671,7 @@ namespace TestNonTypeTemplateParmDecl {
// CHECK: NamespaceDecl{{.*}} TestNonTypeTemplateParmDecl
// CHECK-NEXT: FunctionTemplateDecl
// CHECK-NEXT: NonTypeTemplateParmDecl{{.*}} 'int' depth 0 index 0 I
-// CHECK-NEXT: TemplateArgument expr
+// CHECK-NEXT: TemplateArgument {{.*}} expr
// CHECK-NEXT: IntegerLiteral{{.*}} 'int' 1
// CHECK-NEXT: NonTypeTemplateParmDecl{{.*}} 'int' depth 0 index 1 ... J
diff --git a/clang/test/AST/ast-dump-default-init-json.cpp b/clang/test/AST/ast-dump-default-init-json.cpp
index 1058b4e3ea4d..f4949a9c9eed 100644
--- a/clang/test/AST/ast-dump-default-init-json.cpp
+++ b/clang/test/AST/ast-dump-default-init-json.cpp
@@ -789,10 +789,10 @@ void test() {
// CHECK-NEXT: "valueCategory": "lvalue",
// CHECK-NEXT: "extendingDecl": {
// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "FieldDecl",
-// CHECK-NEXT: "name": "a",
+// CHECK-NEXT: "kind": "VarDecl",
+// CHECK-NEXT: "name": "b",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "const A &"
+// CHECK-NEXT: "qualType": "B"
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "storageDuration": "automatic",
diff --git a/clang/test/AST/ast-dump-default-init.cpp b/clang/test/AST/ast-dump-default-init.cpp
index 15b29f04bf21..26864fbf1542 100644
--- a/clang/test/AST/ast-dump-default-init.cpp
+++ b/clang/test/AST/ast-dump-default-init.cpp
@@ -13,7 +13,7 @@ void test() {
}
// CHECK: -CXXDefaultInitExpr 0x{{[^ ]*}} <{{.*}}> 'const A' lvalue has rewritten init
// CHECK-NEXT: `-ExprWithCleanups 0x{{[^ ]*}} <{{.*}}> 'const A' lvalue
-// CHECK-NEXT: `-MaterializeTemporaryExpr 0x{{[^ ]*}} <{{.*}}> 'const A' lvalue extended by Field 0x{{[^ ]*}} 'a' 'const A &'
+// CHECK-NEXT: `-MaterializeTemporaryExpr 0x{{[^ ]*}} <{{.*}}> 'const A' lvalue extended by Var 0x{{[^ ]*}} 'b' 'B'
// CHECK-NEXT: `-ImplicitCastExpr 0x{{[^ ]*}} <{{.*}}> 'const A' <NoOp>
// CHECK-NEXT: `-CXXFunctionalCastExpr 0x{{[^ ]*}} <{{.*}}> 'A' functional cast to A <NoOp>
// CHECK-NEXT: `-InitListExpr 0x{{[^ ]*}} <{{.*}}> 'A'
diff --git a/clang/test/AST/ast-dump-expr-json.cpp b/clang/test/AST/ast-dump-expr-json.cpp
index 4b7365e554cb..dd2fe1fcf60c 100644
--- a/clang/test/AST/ast-dump-expr-json.cpp
+++ b/clang/test/AST/ast-dump-expr-json.cpp
@@ -2333,7 +2333,7 @@ void TestNonADLCall3() {
// CHECK-NEXT: "kind": "FunctionDecl",
// CHECK-NEXT: "name": "operator delete",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "void (void *) noexcept"
+// CHECK-NEXT: "qualType": "void (void *, unsigned long) noexcept"
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "inner": [
diff --git a/clang/test/AST/ast-dump-expr.cpp b/clang/test/AST/ast-dump-expr.cpp
index 604868103dab..f9e9ee9d35dd 100644
--- a/clang/test/AST/ast-dump-expr.cpp
+++ b/clang/test/AST/ast-dump-expr.cpp
@@ -164,7 +164,7 @@ void UnaryExpressions(int *p) {
// CHECK-NEXT: DeclRefExpr 0x{{[^ ]*}} <col:8> 'int *' lvalue ParmVar 0x{{[^ ]*}} 'p' 'int *'
::delete p;
- // CHECK: CXXDeleteExpr 0x{{[^ ]*}} <line:[[@LINE-1]]:3, col:12> 'void' global Function 0x{{[^ ]*}} 'operator delete' 'void (void *) noexcept'
+ // CHECK: CXXDeleteExpr 0x{{[^ ]*}} <line:[[@LINE-1]]:3, col:12> 'void' global Function 0x{{[^ ]*}} 'operator delete' 'void (void *, unsigned long) noexcept'
// CHECK-NEXT: ImplicitCastExpr
// CHECK-NEXT: DeclRefExpr 0x{{[^ ]*}} <col:12> 'int *' lvalue ParmVar 0x{{[^ ]*}} 'p' 'int *'
diff --git a/clang/test/AST/ast-dump-stmt-json.cpp b/clang/test/AST/ast-dump-stmt-json.cpp
index 667a12a01202..a473d17da942 100644
--- a/clang/test/AST/ast-dump-stmt-json.cpp
+++ b/clang/test/AST/ast-dump-stmt-json.cpp
@@ -994,7 +994,7 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: "kind": "FunctionDecl",
// CHECK-NEXT: "name": "operator delete",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "void (void *) noexcept"
+// CHECK-NEXT: "qualType": "void (void *, unsigned long) noexcept"
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "inner": [
@@ -1369,7 +1369,7 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: "kind": "FunctionDecl",
// CHECK-NEXT: "name": "operator delete",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "void (void *) noexcept"
+// CHECK-NEXT: "qualType": "void (void *, unsigned long) noexcept"
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "inner": [
@@ -1722,7 +1722,6 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: "end": {}
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
-// CHECK-NEXT: "isUsed": true,
// CHECK-NEXT: "name": "operator delete",
// CHECK-NEXT: "mangledName": "_ZdlPv",
// CHECK-NEXT: "type": {
@@ -1819,6 +1818,126 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NEXT: },
// CHECK-NEXT: "isImplicit": true,
// CHECK-NEXT: "isUsed": true,
+// CHECK-NEXT: "name": "operator delete",
+// CHECK-NEXT: "mangledName": "_ZdlPvm",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "void (void *, unsigned long) noexcept"
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "ParmVarDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "void *"
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "ParmVarDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "unsigned long"
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "VisibilityAttr",
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "implicit": true,
+// CHECK-NEXT: "visibility": "default"
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
+// CHECK-NEXT: }
+
+// CHECK-NOT: {{^}}Dumping
+// CHECK: "kind": "FunctionDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "name": "operator delete",
+// CHECK-NEXT: "mangledName": "_ZdlPvmSt11align_val_t",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "void (void *, unsigned long, std::align_val_t) noexcept"
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "ParmVarDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "void *"
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "ParmVarDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "unsigned long"
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "ParmVarDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "std::align_val_t"
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "VisibilityAttr",
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "implicit": true,
+// CHECK-NEXT: "visibility": "default"
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
+// CHECK-NEXT: }
+
+// CHECK-NOT: {{^}}Dumping
+// CHECK: "kind": "FunctionDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "isUsed": true,
// CHECK-NEXT: "name": "operator delete[]",
// CHECK-NEXT: "mangledName": "_ZdaPv",
// CHECK-NEXT: "type": {
@@ -1907,6 +2026,125 @@ void TestDependentGenericSelectionExpr(Ty T) {
// CHECK-NOT: {{^}}Dumping
+// CHECK: "kind": "FunctionDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "name": "operator delete[]",
+// CHECK-NEXT: "mangledName": "_ZdaPvm",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "void (void *, unsigned long) noexcept"
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "ParmVarDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "void *"
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "ParmVarDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "unsigned long"
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "VisibilityAttr",
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "implicit": true,
+// CHECK-NEXT: "visibility": "default"
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
+// CHECK-NEXT: }
+
+// CHECK-NOT: {{^}}Dumping
+// CHECK: "kind": "FunctionDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "name": "operator delete[]",
+// CHECK-NEXT: "mangledName": "_ZdaPvmSt11align_val_t",
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "void (void *, unsigned long, std::align_val_t) noexcept"
+// CHECK-NEXT: },
+// CHECK-NEXT: "inner": [
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "ParmVarDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "void *"
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "ParmVarDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "unsigned long"
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "ParmVarDecl",
+// CHECK-NEXT: "loc": {},
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "isImplicit": true,
+// CHECK-NEXT: "type": {
+// CHECK-NEXT: "qualType": "std::align_val_t"
+// CHECK-NEXT: }
+// CHECK-NEXT: },
+// CHECK-NEXT: {
+// CHECK-NEXT: "id": "0x{{.*}}",
+// CHECK-NEXT: "kind": "VisibilityAttr",
+// CHECK-NEXT: "range": {
+// CHECK-NEXT: "begin": {},
+// CHECK-NEXT: "end": {}
+// CHECK-NEXT: },
+// CHECK-NEXT: "implicit": true,
+// CHECK-NEXT: "visibility": "default"
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
+// CHECK-NEXT: }
+
+// CHECK-NOT: {{^}}Dumping
// CHECK: "kind": "FunctionTemplateDecl",
// CHECK-NEXT: "loc": {
// CHECK-NEXT: "offset": 598,
diff --git a/clang/test/AST/ast-print-openacc-compute-construct.cpp b/clang/test/AST/ast-print-openacc-compute-construct.cpp
index 19965e749141..fe580c86ac8e 100644
--- a/clang/test/AST/ast-print-openacc-compute-construct.cpp
+++ b/clang/test/AST/ast-print-openacc-compute-construct.cpp
@@ -130,5 +130,33 @@ void foo() {
//CHECK: #pragma acc parallel device_type(SomeStructImpl)
#pragma acc parallel device_type (SomeStructImpl)
while(true);
+
+//CHECK: #pragma acc parallel reduction(+: iPtr)
+#pragma acc parallel reduction(+: iPtr)
+ while(true);
+//CHECK: #pragma acc parallel reduction(*: i)
+#pragma acc parallel reduction(*: i)
+ while(true);
+//CHECK: #pragma acc parallel reduction(max: SomeB)
+#pragma acc parallel reduction(max: SomeB)
+ while(true);
+//CHECK: #pragma acc parallel reduction(min: iPtr)
+#pragma acc parallel reduction(min: iPtr)
+ while(true);
+//CHECK: #pragma acc parallel reduction(&: i)
+#pragma acc parallel reduction(&: i)
+ while(true);
+//CHECK: #pragma acc parallel reduction(|: SomeB)
+#pragma acc parallel reduction(|: SomeB)
+ while(true);
+//CHECK: #pragma acc parallel reduction(^: iPtr)
+#pragma acc parallel reduction(^: iPtr)
+ while(true);
+//CHECK: #pragma acc parallel reduction(&&: i)
+#pragma acc parallel reduction(&&: i)
+ while(true);
+//CHECK: #pragma acc parallel reduction(||: SomeB)
+#pragma acc parallel reduction(||: SomeB)
+ while(true);
}
diff --git a/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor-ref-deref-on-diff-classes.cpp b/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor-ref-deref-on-diff-classes.cpp
index aac58c0c1dda..85108bccfee7 100644
--- a/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor-ref-deref-on-diff-classes.cpp
+++ b/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor-ref-deref-on-diff-classes.cpp
@@ -19,4 +19,5 @@ struct Derived : Base { };
void foo () {
Derived d;
+ d.deref();
}
diff --git a/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor-templates.cpp b/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor-templates.cpp
index eeb62d5d89ec..4fc1624d7a15 100644
--- a/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor-templates.cpp
+++ b/clang/test/Analysis/Checkers/WebKit/ref-cntbl-base-virtual-dtor-templates.cpp
@@ -10,8 +10,7 @@ struct DerivedClassTmpl1 : T { };
// expected-warning@-1{{Struct 'RefCntblBase' is used as a base of struct 'DerivedClassTmpl1<RefCntblBase>' but doesn't have virtual destructor}}
DerivedClassTmpl1<RefCntblBase> a;
-
-
+void foo(DerivedClassTmpl1<RefCntblBase>& obj) { obj.deref(); }
template<class T>
struct DerivedClassTmpl2 : T { };
@@ -21,7 +20,6 @@ template<class T> int foo(T) { DerivedClassTmpl2<T> f; return 42; }
int b = foo(RefCntblBase{});
-
template<class T>
struct DerivedClassTmpl3 : T { };
// expected-warning@-1{{Struct 'RefCntblBase' is used as a base of struct 'DerivedClassTmpl3<RefCntblBase>' but doesn't have virtual destructor}}
@@ -29,7 +27,6 @@ struct DerivedClassTmpl3 : T { };
typedef DerivedClassTmpl3<RefCntblBase> Foo;
Foo c;
-
namespace WTF {
class RefCountedBase {
@@ -58,33 +55,344 @@ protected:
RefCounted() { }
};
+template <typename X, typename T>
+class ExoticRefCounted : public RefCountedBase {
+public:
+ void deref() const {
+ if (derefBase())
+ delete (const_cast<T*>(static_cast<const T*>(this)));
+ }
+};
+
+template <typename X, typename T>
+class BadBase : RefCountedBase {
+public:
+ void deref() const {
+ if (derefBase())
+ delete (const_cast<X*>(static_cast<const X*>(this)));
+ }
+};
+
+template <typename T>
+class FancyDeref {
+public:
+ void ref() const
+ {
+ ++refCount;
+ }
+
+ void deref() const
+ {
+ --refCount;
+ if (refCount)
+ return;
+ auto deleteThis = [this] {
+ delete static_cast<const T*>(this);
+ };
+ deleteThis();
+ }
+private:
+ mutable unsigned refCount { 0 };
+};
+
+namespace Detail {
+
+ template<typename Out, typename... In>
+ class CallableWrapperBase {
+ public:
+ virtual ~CallableWrapperBase() { }
+ virtual Out call(In...) = 0;
+ };
+
+ template<typename, typename, typename...> class CallableWrapper;
+
+ template<typename CallableType, typename Out, typename... In>
+ class CallableWrapper : public CallableWrapperBase<Out, In...> {
+ public:
+ explicit CallableWrapper(CallableType&& callable)
+ : m_callable(WTFMove(callable)) { }
+ CallableWrapper(const CallableWrapper&) = delete;
+ CallableWrapper& operator=(const CallableWrapper&) = delete;
+ Out call(In... in) final { return m_callable(in...); }
+ private:
+ CallableType m_callable;
+ };
+
+} // namespace Detail
+
+template<typename> class Function;
+
+template <typename Out, typename... In>
+class Function<Out(In...)> {
+public:
+ using Impl = Detail::CallableWrapperBase<Out, In...>;
+
+ Function() = default;
+
+ template<typename CallableType>
+ Function(CallableType&& callable)
+ : m_callableWrapper(new Detail::CallableWrapper<CallableType, Out, In...>>(callable)) { }
+
+ template<typename FunctionType>
+ Function(FunctionType f)
+ : m_callableWrapper(new Detail::CallableWrapper<FunctionType, Out, In...>>(f)) { }
+
+ ~Function() {
+ }
+
+ Out operator()(In... in) const {
+ ASSERT(m_callableWrapper);
+ return m_callableWrapper->call(in...);
+ }
+
+ explicit operator bool() const { return !!m_callableWrapper; }
+
+private:
+ Impl* m_callableWrapper;
+};
+
+void ensureOnMainThread(const Function<void()>&& function);
+
+enum class DestructionThread { Any, MainThread };
+
+template <typename T, DestructionThread destructionThread = DestructionThread::Any>
+class FancyDeref2 {
+public:
+ void ref() const
+ {
+ ++refCount;
+ }
+
+ void deref() const
+ {
+ --refCount;
+ if (refCount)
+ return;
+ const_cast<FancyDeref2<T, destructionThread>*>(this)->destroy();
+ }
+
+private:
+ void destroy() {
+ delete static_cast<T*>(this);
+ }
+ mutable unsigned refCount { 0 };
+};
+
+template <typename S>
+class DerivedFancyDeref2 : public FancyDeref2<S> {
+};
+
+template <typename T>
+class BadFancyDeref {
+public:
+ void ref() const
+ {
+ ++refCount;
+ }
+
+ void deref() const
+ {
+ --refCount;
+ if (refCount)
+ return;
+ auto deleteThis = [this] {
+ delete static_cast<const T*>(this);
+ };
+ delete this;
+ }
+private:
+ mutable unsigned refCount { 0 };
+};
+
template <typename T>
class ThreadSafeRefCounted {
public:
- void ref() const;
- bool deref() const;
+ void ref() const { ++refCount; }
+ void deref() const {
+ if (!--refCount)
+ delete const_cast<T*>(static_cast<const T*>(this));
+ }
+private:
+ mutable unsigned refCount { 0 };
};
template <typename T>
class ThreadSafeRefCountedAndCanMakeThreadSafeWeakPtr {
public:
- void ref() const;
- bool deref() const;
+ void ref() const { ++refCount; }
+ void deref() const {
+ if (!--refCount)
+ delete const_cast<T*>(static_cast<const T*>(this));
+ }
+private:
+ mutable unsigned refCount { 0 };
};
} // namespace WTF
class DerivedClass4 : public WTF::RefCounted<DerivedClass4> { };
+class DerivedClass4b : public WTF::ExoticRefCounted<int, DerivedClass4b> { };
+
+class DerivedClass4cSub;
+class DerivedClass4c : public WTF::BadBase<DerivedClass4cSub, DerivedClass4c> { };
+// expected-warning@-1{{Class 'WTF::BadBase<DerivedClass4cSub, DerivedClass4c>' is used as a base of class 'DerivedClass4c' but doesn't have virtual destructor}}
+class DerivedClass4cSub : public DerivedClass4c { };
+void UseDerivedClass4c(DerivedClass4c &obj) { obj.deref(); }
+
+class DerivedClass4d : public WTF::RefCounted<DerivedClass4d> {
+public:
+ virtual ~DerivedClass4d() { }
+};
+class DerivedClass4dSub : public DerivedClass4d { };
+
class DerivedClass5 : public DerivedClass4 { };
// expected-warning@-1{{Class 'DerivedClass4' is used as a base of class 'DerivedClass5' but doesn't have virtual destructor}}
+void UseDerivedClass5(DerivedClass5 &obj) { obj.deref(); }
class DerivedClass6 : public WTF::ThreadSafeRefCounted<DerivedClass6> { };
+void UseDerivedClass6(DerivedClass6 &obj) { obj.deref(); }
class DerivedClass7 : public DerivedClass6 { };
// expected-warning@-1{{Class 'DerivedClass6' is used as a base of class 'DerivedClass7' but doesn't have virtual destructor}}
+void UseDerivedClass7(DerivedClass7 &obj) { obj.deref(); }
class DerivedClass8 : public WTF::ThreadSafeRefCountedAndCanMakeThreadSafeWeakPtr<DerivedClass8> { };
+void UseDerivedClass8(DerivedClass8 &obj) { obj.deref(); }
class DerivedClass9 : public DerivedClass8 { };
// expected-warning@-1{{Class 'DerivedClass8' is used as a base of class 'DerivedClass9' but doesn't have virtual destructor}}
+void UseDerivedClass9(DerivedClass9 &obj) { obj.deref(); }
+
+class DerivedClass10 : public WTF::FancyDeref<DerivedClass10> { };
+void UseDerivedClass10(DerivedClass10 &obj) { obj.deref(); }
+
+class DerivedClass10b : public WTF::DerivedFancyDeref2<DerivedClass10b> { };
+void UseDerivedClass10b(DerivedClass10b &obj) { obj.deref(); }
+
+class DerivedClass10c : public WTF::BadFancyDeref<DerivedClass10c> { };
+// expected-warning@-1{{Class 'WTF::BadFancyDeref<DerivedClass10c>' is used as a base of class 'DerivedClass10c' but doesn't have virtual destructor}}
+void UseDerivedClass10c(DerivedClass10c &obj) { obj.deref(); }
+
+class BaseClass1 {
+public:
+ void ref() const { ++refCount; }
+ void deref() const;
+private:
+ enum class Type { Base, Derived } type { Type::Base };
+ mutable unsigned refCount { 0 };
+};
+
+class DerivedClass11 : public BaseClass1 { };
+
+void BaseClass1::deref() const
+{
+ --refCount;
+ if (refCount)
+ return;
+ switch (type) {
+ case Type::Base:
+ delete const_cast<BaseClass1*>(this);
+ break;
+ case Type::Derived:
+ delete const_cast<DerivedClass11*>(static_cast<const DerivedClass11*>(this));
+ break;
+ }
+}
+
+void UseDerivedClass11(DerivedClass11& obj) { obj.deref(); }
+
+class BaseClass2;
+static void deleteBase2(BaseClass2*);
+
+class BaseClass2 {
+public:
+ void ref() const { ++refCount; }
+ void deref() const
+ {
+ if (!--refCount)
+ deleteBase2(const_cast<BaseClass2*>(this));
+ }
+ virtual bool isDerived() { return false; }
+private:
+ mutable unsigned refCount { 0 };
+};
+
+class DerivedClass12 : public BaseClass2 {
+ bool isDerived() final { return true; }
+};
+
+void UseDerivedClass11(DerivedClass12& obj) { obj.deref(); }
+
+void deleteBase2(BaseClass2* obj) {
+ if (obj->isDerived())
+ delete static_cast<DerivedClass12*>(obj);
+ else
+ delete obj;
+}
+
+class BaseClass3 {
+public:
+ void ref() const { ++refCount; }
+ void deref() const
+ {
+ if (!--refCount)
+ const_cast<BaseClass3*>(this)->destory();
+ }
+ virtual bool isDerived() { return false; }
+
+private:
+ void destory();
+
+ mutable unsigned refCount { 0 };
+};
+
+class DerivedClass13 : public BaseClass3 {
+ bool isDerived() final { return true; }
+};
+
+void UseDerivedClass11(DerivedClass13& obj) { obj.deref(); }
+
+void BaseClass3::destory() {
+ if (isDerived())
+ delete static_cast<DerivedClass13*>(this);
+ else
+ delete this;
+}
+
+class RecursiveBaseClass {
+public:
+ void ref() const {
+ if (otherObject)
+ otherObject->ref();
+ else
+ ++refCount;
+ }
+ void deref() const {
+ if (otherObject)
+ otherObject->deref();
+ else {
+ --refCount;
+ if (refCount)
+ return;
+ delete this;
+ }
+ }
+private:
+ RecursiveBaseClass* otherObject { nullptr };
+ mutable unsigned refCount { 0 };
+};
+
+class RecursiveDerivedClass : public RecursiveBaseClass { };
+// expected-warning@-1{{Class 'RecursiveBaseClass' is used as a base of class 'RecursiveDerivedClass' but doesn't have virtual destructor}}
+
+class DerivedClass14 : public WTF::RefCounted<DerivedClass14> {
+public:
+ virtual ~DerivedClass14() { }
+};
+
+void UseDerivedClass14(DerivedClass14& obj) { obj.deref(); }
+
+class DerivedClass15 : public DerivedClass14 { };
+
+void UseDerivedClass15(DerivedClass15& obj) { obj.deref(); }
diff --git a/clang/test/Analysis/Checkers/WebKit/uncounted-local-vars.cpp b/clang/test/Analysis/Checkers/WebKit/uncounted-local-vars.cpp
index 632a82eb0d8d..25776870dd3a 100644
--- a/clang/test/Analysis/Checkers/WebKit/uncounted-local-vars.cpp
+++ b/clang/test/Analysis/Checkers/WebKit/uncounted-local-vars.cpp
@@ -216,3 +216,76 @@ void foo() {
}
} // namespace conditional_op
+
+namespace local_assignment_basic {
+
+RefCountable *provide_ref_cntbl();
+
+void foo(RefCountable* a) {
+ RefCountable* b = a;
+ // expected-warning@-1{{Local variable 'b' is uncounted and unsafe [alpha.webkit.UncountedLocalVarsChecker]}}
+ if (b->trivial())
+ b = provide_ref_cntbl();
+}
+
+void bar(RefCountable* a) {
+ RefCountable* b;
+ // expected-warning@-1{{Local variable 'b' is uncounted and unsafe [alpha.webkit.UncountedLocalVarsChecker]}}
+ b = provide_ref_cntbl();
+}
+
+void baz() {
+ RefPtr a = provide_ref_cntbl();
+ {
+ RefCountable* b = a.get();
+ // expected-warning@-1{{Local variable 'b' is uncounted and unsafe [alpha.webkit.UncountedLocalVarsChecker]}}
+ b = provide_ref_cntbl();
+ }
+}
+
+} // namespace local_assignment_basic
+
+namespace local_assignment_to_parameter {
+
+RefCountable *provide_ref_cntbl();
+void someFunction();
+
+void foo(RefCountable* a) {
+ a = provide_ref_cntbl();
+ // expected-warning@-1{{Assignment to an uncounted parameter 'a' is unsafe [alpha.webkit.UncountedLocalVarsChecker]}}
+ someFunction();
+ a->method();
+}
+
+} // namespace local_assignment_to_parameter
+
+namespace local_assignment_to_static_local {
+
+RefCountable *provide_ref_cntbl();
+void someFunction();
+
+void foo() {
+ static RefCountable* a = nullptr;
+ // expected-warning@-1{{Static local variable 'a' is uncounted and unsafe [alpha.webkit.UncountedLocalVarsChecker]}}
+ a = provide_ref_cntbl();
+ someFunction();
+ a->method();
+}
+
+} // namespace local_assignment_to_static_local
+
+namespace local_assignment_to_global {
+
+RefCountable *provide_ref_cntbl();
+void someFunction();
+
+RefCountable* g_a = nullptr;
+// expected-warning@-1{{Global variable 'local_assignment_to_global::g_a' is uncounted and unsafe [alpha.webkit.UncountedLocalVarsChecker]}}
+
+void foo() {
+ g_a = provide_ref_cntbl();
+ someFunction();
+ g_a->method();
+}
+
+} // namespace local_assignment_to_global
diff --git a/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp b/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp
index 96986631726f..a98c6eb9c84d 100644
--- a/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp
+++ b/clang/test/Analysis/Checkers/WebKit/uncounted-obj-arg.cpp
@@ -231,6 +231,18 @@ public:
void method();
void someFunction();
int otherFunction();
+ unsigned recursiveTrivialFunction(int n) { return !n ? 1 : recursiveTrivialFunction(n - 1); }
+ unsigned recursiveComplexFunction(int n) { return !n ? otherFunction() : recursiveComplexFunction(n - 1); }
+ unsigned mutuallyRecursiveFunction1(int n) { return n < 0 ? 1 : (n % 2 ? mutuallyRecursiveFunction2(n - 2) : mutuallyRecursiveFunction1(n - 1)); }
+ unsigned mutuallyRecursiveFunction2(int n) { return n < 0 ? 1 : (n % 3 ? mutuallyRecursiveFunction2(n - 3) : mutuallyRecursiveFunction1(n - 2)); }
+ unsigned mutuallyRecursiveFunction3(int n) { return n < 0 ? 1 : (n % 5 ? mutuallyRecursiveFunction3(n - 5) : mutuallyRecursiveFunction4(n - 3)); }
+ unsigned mutuallyRecursiveFunction4(int n) { return n < 0 ? 1 : (n % 7 ? otherFunction() : mutuallyRecursiveFunction3(n - 3)); }
+ unsigned recursiveFunction5(unsigned n) { return n > 100 ? 2 : (n % 2 ? recursiveFunction5(n + 1) : recursiveFunction6(n + 2)); }
+ unsigned recursiveFunction6(unsigned n) { return n > 100 ? 3 : (n % 2 ? recursiveFunction6(n % 7) : recursiveFunction7(n % 5)); }
+ unsigned recursiveFunction7(unsigned n) { return n > 100 ? 5 : recursiveFunction7(n * 5); }
+
+ void mutuallyRecursive8() { mutuallyRecursive9(); someFunction(); }
+ void mutuallyRecursive9() { mutuallyRecursive8(); }
int trivial1() { return 123; }
float trivial2() { return 0.3; }
@@ -498,6 +510,24 @@ public:
RefCounted::singleton().trivial18(); // no-warning
RefCounted::singleton().someFunction(); // no-warning
+ getFieldTrivial().recursiveTrivialFunction(7); // no-warning
+ getFieldTrivial().recursiveComplexFunction(9);
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().mutuallyRecursiveFunction1(11); // no-warning
+ getFieldTrivial().mutuallyRecursiveFunction2(13); // no-warning
+ getFieldTrivial().mutuallyRecursiveFunction3(17);
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().mutuallyRecursiveFunction4(19);
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().recursiveFunction5(23); // no-warning
+ getFieldTrivial().recursiveFunction6(29); // no-warning
+ getFieldTrivial().recursiveFunction7(31); // no-warning
+
+ getFieldTrivial().mutuallyRecursive8();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+ getFieldTrivial().mutuallyRecursive9();
+ // expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
+
getFieldTrivial().someFunction();
// expected-warning@-1{{Call argument for 'this' parameter is uncounted and unsafe}}
getFieldTrivial().nonTrivial1();
diff --git a/clang/test/Analysis/cert/pos34-c-fp-suppression.cpp b/clang/test/Analysis/cert/pos34-c-fp-suppression.cpp
deleted file mode 100644
index d982fcb8a1ba..000000000000
--- a/clang/test/Analysis/cert/pos34-c-fp-suppression.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-// RUN: %clang_analyze_cc1 \
-// RUN: -analyzer-checker=alpha.security.cert.pos.34c\
-// RUN: -verify %s
-
-#include "../Inputs/system-header-simulator.h"
-void free(void *memblock);
-void *malloc(size_t size);
-int putenv(char *);
-int rand();
-
-namespace test_auto_var_used_good {
-
-extern char *ex;
-int test_extern() {
- return putenv(ex); // no-warning: extern storage class.
-}
-
-void foo(void) {
- char *buffer = (char *)"huttah!";
- if (rand() % 2 == 0) {
- buffer = (char *)malloc(5);
- strcpy(buffer, "woot");
- }
- putenv(buffer);
-}
-
-void bar(void) {
- char *buffer = (char *)malloc(5);
- strcpy(buffer, "woot");
-
- if (rand() % 2 == 0) {
- free(buffer);
- buffer = (char *)"blah blah blah";
- }
- putenv(buffer);
-}
-
-void baz() {
- char env[] = "NAME=value";
- // TODO: False Positive
- putenv(env);
- // expected-warning@-1 {{The 'putenv' function should not be called with arguments that have automatic storage}}
-
- /*
- DO SOMETHING
- */
-
- putenv((char *)"NAME=anothervalue");
-}
-
-} // namespace test_auto_var_used_good
diff --git a/clang/test/Analysis/cert/pos34-c.cpp b/clang/test/Analysis/cert/pos34-c.cpp
deleted file mode 100644
index f2bd7b393d88..000000000000
--- a/clang/test/Analysis/cert/pos34-c.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-// RUN: %clang_analyze_cc1 \
-// RUN: -analyzer-checker=alpha.security.cert.pos.34c\
-// RUN: -verify %s
-
-// Examples from the CERT rule's page.
-// https://wiki.sei.cmu.edu/confluence/x/6NYxBQ
-
-#include "../Inputs/system-header-simulator.h"
-void free(void *memblock);
-void *malloc(size_t size);
-int putenv(char *);
-int snprintf(char *str, size_t size, const char *format, ...);
-
-namespace test_auto_var_used_bad {
-
-int volatile_memory1(const char *var) {
- char env[1024];
- int retval = snprintf(env, sizeof(env), "TEST=%s", var);
- if (retval < 0 || (size_t)retval >= sizeof(env)) {
- /* Handle error */
- }
-
- return putenv(env);
- // expected-warning@-1 {{The 'putenv' function should not be called with arguments that have automatic storage}}
-}
-
-} // namespace test_auto_var_used_bad
-
-namespace test_auto_var_used_good {
-
-int test_static(const char *var) {
- static char env[1024];
-
- int retval = snprintf(env, sizeof(env), "TEST=%s", var);
- if (retval < 0 || (size_t)retval >= sizeof(env)) {
- /* Handle error */
- }
-
- return putenv(env);
-}
-
-int test_heap_memory(const char *var) {
- static char *oldenv;
- const char *env_format = "TEST=%s";
- const size_t len = strlen(var) + strlen(env_format);
- char *env = (char *)malloc(len);
- if (env == NULL) {
- return -1;
- }
- if (putenv(env) != 0) { // no-warning: env was dynamically allocated.
- free(env);
- return -1;
- }
- if (oldenv != NULL) {
- free(oldenv); /* avoid memory leak */
- }
- oldenv = env;
- return 0;
-}
-
-} // namespace test_auto_var_used_good
diff --git a/clang/test/Analysis/cxx-uninitialized-object.cpp b/clang/test/Analysis/cxx-uninitialized-object.cpp
index e3fa8ae8d7f2..aee0dae15fbf 100644
--- a/clang/test/Analysis/cxx-uninitialized-object.cpp
+++ b/clang/test/Analysis/cxx-uninitialized-object.cpp
@@ -1114,27 +1114,27 @@ void fCXX11MemberInitTest1() {
CXX11MemberInitTest1();
}
+#ifdef PEDANTIC
struct CXX11MemberInitTest2 {
struct RecordType {
- // TODO: we'd expect the note: {{uninitialized field 'this->rec.a'}}
- int a; // no-note
- // TODO: we'd expect the note: {{uninitialized field 'this->rec.b'}}
- int b; // no-note
+ int a; // expected-note {{uninitialized field 'this->a'}}
+ int b; // expected-note {{uninitialized field 'this->b'}}
RecordType(int) {}
};
- RecordType rec = RecordType(int());
+ RecordType rec = RecordType(int()); // expected-warning {{2 uninitialized fields}}
int dontGetFilteredByNonPedanticMode = 0;
CXX11MemberInitTest2() {}
};
void fCXX11MemberInitTest2() {
- // TODO: we'd expect the warning: {{2 uninitializeds field}}
CXX11MemberInitTest2(); // no-warning
}
+#endif // PEDANTIC
+
//===----------------------------------------------------------------------===//
// "Esoteric" primitive type tests.
//===----------------------------------------------------------------------===//
diff --git a/clang/test/Analysis/cxxnewexpr-callback.cpp b/clang/test/Analysis/cxxnewexpr-callback.cpp
index fe7a9fffad93..7df58cfa9ca2 100644
--- a/clang/test/Analysis/cxxnewexpr-callback.cpp
+++ b/clang/test/Analysis/cxxnewexpr-callback.cpp
@@ -9,7 +9,7 @@ void free(void *);
} // namespace std
void *operator new(size_t size) { return std::malloc(size); }
-void operator delete(void *ptr) { std::free(ptr); }
+void operator delete(void *ptr, size_t size) { std::free(ptr); }
struct S {
S() {}
@@ -49,7 +49,7 @@ void test() {
// CHECK-NEXT: PostCall (operator delete)
}
-void operator delete(void *ptr) {
+void operator delete(void *ptr, size_t size) {
std::free(ptr);
// CHECK-NO-INLINE-NEXT: PreCall (std::free)
// CHECK-NO-INLINE-NEXT: PostCall (std::free)
diff --git a/clang/test/Analysis/lifetime-extended-regions.cpp b/clang/test/Analysis/lifetime-extended-regions.cpp
index 4e98bd4b0403..524f4e0c400d 100644
--- a/clang/test/Analysis/lifetime-extended-regions.cpp
+++ b/clang/test/Analysis/lifetime-extended-regions.cpp
@@ -120,11 +120,11 @@ void aggregateWithReferences() {
clang_analyzer_dump(viaReference); // expected-warning-re {{&lifetime_extended_object{RefAggregate, viaReference, S{{[0-9]+}}} }}
clang_analyzer_dump(viaReference.rx); // expected-warning-re {{&lifetime_extended_object{int, viaReference, S{{[0-9]+}}} }}
clang_analyzer_dump(viaReference.ry); // expected-warning-re {{&lifetime_extended_object{Composite, viaReference, S{{[0-9]+}}} }}
-
- // clang does not currently implement extending lifetime of object bound to reference members of aggregates,
- // that are created from default member initializer (see `warn_unsupported_lifetime_extension` from `-Wdangling`)
- RefAggregate defaultInitExtended{i}; // clang-bug does not extend `Composite`
- clang_analyzer_dump(defaultInitExtended.ry); // expected-warning {{Unknown }}
+
+ // The lifetime lifetime of object bound to reference members of aggregates,
+ // that are created from default member initializer was extended.
+ RefAggregate defaultInitExtended{i};
+ clang_analyzer_dump(defaultInitExtended.ry); // expected-warning-re {{&lifetime_extended_object{Composite, defaultInitExtended, S{{[0-9]+}}} }}
}
void lambda() {
diff --git a/clang/test/Analysis/putenv-stack-array.c b/clang/test/Analysis/putenv-stack-array.c
new file mode 100644
index 000000000000..f28aed73031d
--- /dev/null
+++ b/clang/test/Analysis/putenv-stack-array.c
@@ -0,0 +1,90 @@
+// RUN: %clang_analyze_cc1 \
+// RUN: -analyzer-checker=alpha.security.PutenvStackArray \
+// RUN: -verify %s
+
+#include "Inputs/system-header-simulator.h"
+void free(void *);
+void *malloc(size_t);
+int putenv(char *);
+int snprintf(char *, size_t, const char *, ...);
+
+int test_auto_var(const char *var) {
+ char env[1024];
+ (void)snprintf(env, sizeof(env), "TEST=%s", var);
+ return putenv(env); // expected-warning{{The 'putenv' function should not be called with arrays that have automatic storage}}
+}
+
+int test_static_var(const char *var) {
+ static char env[1024];
+ (void)snprintf(env, sizeof(env), "TEST=%s", var);
+ return putenv(env); // no-warning: static array is used
+}
+
+void test_heap_memory(const char *var) {
+ const char *env_format = "TEST=%s";
+ const size_t len = strlen(var) + strlen(env_format);
+ char *env = (char *)malloc(len);
+ if (env == NULL)
+ return;
+ if (putenv(env) != 0) // no-warning: env was dynamically allocated.
+ free(env);
+}
+
+typedef struct {
+ int A;
+ char Env[1024];
+} Mem;
+
+int test_auto_var_struct() {
+ Mem mem;
+ return putenv(mem.Env); // expected-warning{{The 'putenv' function should not be called with}}
+}
+
+int test_auto_var_subarray() {
+ char env[1024];
+ return putenv(env + 100); // expected-warning{{The 'putenv' function should not be called with}}
+}
+
+int f_test_auto_var_call(char *env) {
+ return putenv(env); // expected-warning{{The 'putenv' function should not be called with}}
+}
+
+int test_auto_var_call() {
+ char env[1024];
+ return f_test_auto_var_call(env);
+}
+
+int test_constant() {
+ char *env = "TEST";
+ return putenv(env); // no-warning: data is not on the stack
+}
+
+extern char *ext_env;
+int test_extern() {
+ return putenv(ext_env); // no-warning: extern storage class.
+}
+
+void test_auto_var_reset() {
+ char env[] = "NAME=value";
+ putenv(env); // expected-warning{{The 'putenv' function should not be called with}}
+ // ... (do something)
+ // Even cases like this are likely a bug:
+ // It looks like that if one string was passed to putenv,
+ // it should not be deallocated at all, because when reading the
+ // environment variable a pointer into this string is returned.
+ // In this case, if another (or the same) thread reads variable "NAME"
+ // at this point and does not copy the returned string, the data may
+ // become invalid.
+ putenv((char *)"NAME=anothervalue");
+}
+
+void f_main(char *env) {
+ putenv(env); // no warning: string allocated in stack of 'main'
+}
+
+int main(int argc, char **argv) {
+ char env[] = "NAME=value";
+ putenv(env); // no warning: string allocated in stack of 'main'
+ f_main(env);
+ return 0;
+}
diff --git a/clang/test/Analysis/setgid-setuid-order-notes.c b/clang/test/Analysis/setgid-setuid-order-notes.c
new file mode 100644
index 000000000000..03402413581c
--- /dev/null
+++ b/clang/test/Analysis/setgid-setuid-order-notes.c
@@ -0,0 +1,73 @@
+// RUN: %clang_analyze_cc1 -analyzer-checker=core,security.SetgidSetuidOrder -analyzer-output=text -verify %s
+
+typedef int uid_t;
+typedef int gid_t;
+
+int setuid(uid_t);
+int setgid(gid_t);
+
+uid_t getuid();
+gid_t getgid();
+
+
+
+void test_note_1() {
+ if (setuid(getuid()) == -1) // expected-note{{Assuming the condition is false}} \
+ // expected-note{{Taking false branch}}
+ return;
+ if (setuid(getuid()) == -1) // expected-note{{Call to 'setuid' found here that removes superuser privileges}} \
+ // expected-note{{Assuming the condition is false}} \
+ // expected-note{{Taking false branch}}
+ return;
+ if (setgid(getgid()) == -1) // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}} \
+ // expected-note{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+ return;
+}
+
+void test_note_2() {
+ if (setuid(getuid()) == -1) // expected-note{{Call to 'setuid' found here that removes superuser privileges}} \
+ // expected-note 2 {{Assuming the condition is false}} \
+ // expected-note 2 {{Taking false branch}}
+ return;
+ if (setgid(getgid()) == -1) // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}} \
+ // expected-note{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}} \
+ // expected-note{{Assuming the condition is false}} \
+ // expected-note{{Taking false branch}}
+ return;
+ if (setuid(getuid()) == -1) // expected-note{{Call to 'setuid' found here that removes superuser privileges}} \
+ // expected-note{{Assuming the condition is false}} \
+ // expected-note{{Taking false branch}}
+ return;
+ if (setgid(getgid()) == -1) // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}} \
+ // expected-note{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+ return;
+}
+
+int f_setuid() {
+ return setuid(getuid()); // expected-note{{Call to 'setuid' found here that removes superuser privileges}}
+}
+
+int f_setgid() {
+ return setgid(getgid()); // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}} \
+ // expected-note{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+}
+
+void test_note_3() {
+ if (f_setuid() == -1) // expected-note{{Assuming the condition is false}} \
+ // expected-note{{Calling 'f_setuid'}} \
+ // expected-note{{Returning from 'f_setuid'}} \
+ // expected-note{{Taking false branch}}
+ return;
+ if (f_setgid() == -1) // expected-note{{Calling 'f_setgid'}}
+ return;
+}
+
+void test_note_4() {
+ if (setuid(getuid()) == 0) { // expected-note{{Assuming the condition is true}} \
+ // expected-note{{Call to 'setuid' found here that removes superuser privileges}} \
+ // expected-note{{Taking true branch}}
+ if (setgid(getgid()) == 0) { // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}} \
+ // expected-note{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+ }
+ }
+}
diff --git a/clang/test/Analysis/setgid-setuid-order.c b/clang/test/Analysis/setgid-setuid-order.c
new file mode 100644
index 000000000000..1c411aa6a27b
--- /dev/null
+++ b/clang/test/Analysis/setgid-setuid-order.c
@@ -0,0 +1,257 @@
+// RUN: %clang_analyze_cc1 -analyzer-checker=core,security.SetgidSetuidOrder -verify %s
+
+typedef int uid_t;
+typedef int gid_t;
+
+int setuid(uid_t);
+int setgid(gid_t);
+int seteuid(uid_t);
+int setegid(gid_t);
+int setreuid(uid_t, uid_t);
+int setregid(gid_t, gid_t);
+int setresuid(uid_t, uid_t, uid_t);
+int setresgid(gid_t, gid_t, gid_t);
+
+uid_t getuid();
+gid_t getgid();
+
+
+
+void correct_order() {
+ // A correct revocation sequence starts here.
+ if (setgid(getgid()) == -1)
+ return;
+ if (setuid(getuid()) == -1)
+ return;
+ // No warning for the following setgid statement.
+ // The previous setgid and setuid calls are a correct privilege revocation
+ // sequence. The checker does not care about the following statements (except
+ // if a wrong setuid-setgid sequence follows again).
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+void incorrect_after_correct() {
+ if (setgid(getgid()) == -1)
+ return;
+ if (setuid(getuid()) == -1)
+ return;
+ // Incorrect sequence starts here.
+ if (setuid(getuid()) == -1)
+ return;
+ if (setgid(getgid()) == -1) // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+ return;
+}
+
+void incorrect_order() {
+ if (setuid(getuid()) == -1)
+ return;
+ if (setgid(getgid()) == -1) // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+ return;
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+void warn_at_second_time() {
+ if (setuid(getuid()) == -1)
+ return;
+ if (setgid(getgid()) == -1) // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+ return;
+ if (setuid(getuid()) == -1)
+ return;
+ if (setgid(getgid()) == -1) // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+ return;
+}
+
+uid_t f_uid();
+gid_t f_gid();
+
+void setuid_other() {
+ if (setuid(f_uid()) == -1)
+ return;
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+void setgid_other() {
+ if (setuid(getuid()) == -1)
+ return;
+ if (setgid(f_gid()) == -1)
+ return;
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+void setuid_other_between() {
+ if (setuid(getuid()) == -1)
+ return;
+ if (setuid(f_uid()) == -1)
+ return;
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+void setgid_with_getuid() {
+ if (setuid(getuid()) == -1)
+ return;
+ // add a clang-tidy check for this case?
+ if (setgid(getuid()) == -1)
+ return;
+}
+
+void setuid_with_getgid() {
+ // add a clang-tidy check for this case?
+ if (setuid(getgid()) == -1)
+ return;
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+int f_setuid() {
+ return setuid(getuid());
+}
+
+int f_setgid() {
+ return setgid(getgid()); // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+}
+
+void function_calls() {
+ if (f_setuid() == -1)
+ return;
+ if (f_setgid() == -1)
+ return;
+}
+
+void seteuid_between() {
+ if (setuid(getuid()) == -1)
+ return;
+ if (seteuid(getuid()) == -1)
+ return;
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+void setegid_between() {
+ if (setuid(getuid()) == -1)
+ return;
+ if (setegid(getgid()) == -1)
+ return;
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+void setreuid_between() {
+ if (setuid(getuid()) == -1)
+ return;
+ if (setreuid(getuid(), getuid()) == -1)
+ return;
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+void setregid_between() {
+ if (setuid(getuid()) == -1)
+ return;
+ if (setregid(getgid(), getgid()) == -1)
+ return;
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+void setresuid_between() {
+ if (setuid(getuid()) == -1)
+ return;
+ if (setresuid(getuid(), getuid(), getuid()) == -1)
+ return;
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+void setresgid_between() {
+ if (setuid(getuid()) == -1)
+ return;
+ if (setresgid(getgid(), getgid(), getgid()) == -1)
+ return;
+ if (setgid(getgid()) == -1)
+ return;
+}
+
+void getgid_getuid_between() {
+ if (setuid(getuid()) == -1)
+ return;
+ (void)getgid();
+ (void)getuid();
+ if (setgid(getgid()) == -1) // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+ return;
+}
+
+void stored_getgid_getuid() {
+ // possible future improvement: detect this case
+ uid_t u = getuid();
+ gid_t g = getgid();
+ if (setuid(u) == -1)
+ return;
+ if (setgid(g) == -1) // no warning
+ return;
+}
+
+void f_extern();
+
+void other_unknown_function_between() {
+ if (setuid(getuid()) == -1)
+ return;
+ f_extern();
+ if (setgid(getgid()) == -1) // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+ return;
+}
+
+void setuid_error_case() {
+ if (setuid(getuid()) == -1) {
+ // No warning if we know that the first setuid call has failed.
+ (void)setgid(getgid());
+ return;
+ }
+ (void)setgid(getgid()); // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+}
+
+void setuid_success_case() {
+ if (setuid(getuid()) == 0) {
+ if (setgid(getgid()) == 0) { // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+ }
+ }
+}
+
+void incorrect_order_compare_zero() {
+ if (setuid(getuid()) != 0)
+ return;
+ (void)setgid(getgid()); // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+}
+
+void setuid_error_case_compare_zero() {
+ if (setuid(getuid()) != 0) {
+ // No warning if we know that the first setuid call has failed.
+ (void)setgid(getgid());
+ return;
+ }
+}
+
+void incorrect_order_compare_other() {
+ if (setuid(getuid()) == -2) {
+ // This is a case for improvement:
+ // The checker does not recognize that this is an invalid error check,
+ // but this is really another type of bug not related to this checker.
+ (void)setgid(getgid()); // warning should appear here
+ return;
+ }
+ if (setgid(getgid()) == -2) { // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+ return;
+ }
+}
+
+const int FAIL = -1;
+
+void incorrect_order_compare_var() {
+ if (setuid(getuid()) == FAIL)
+ return;
+ (void)setgid(getgid()); // expected-warning{{A 'setgid(getgid())' call following a 'setuid(getuid())' call is likely to fail}}
+}
diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt
index df34a5707da3..5fceb1d71033 100644
--- a/clang/test/CMakeLists.txt
+++ b/clang/test/CMakeLists.txt
@@ -170,7 +170,7 @@ configure_file(AST/gen_ast_dump_json_test.py
${CLANG_BINARY_DIR}/bin/gen_ast_dump_json_test.py COPYONLY)
add_custom_target(clang-test-depends DEPENDS ${CLANG_TEST_DEPS})
-set_target_properties(clang-test-depends PROPERTIES FOLDER "Clang tests")
+set_target_properties(clang-test-depends PROPERTIES FOLDER "Clang/Tests")
add_lit_testsuite(check-clang "Running the Clang regression tests"
${CMAKE_CURRENT_BINARY_DIR}
@@ -179,7 +179,6 @@ add_lit_testsuite(check-clang "Running the Clang regression tests"
DEPENDS ${CLANG_TEST_DEPS}
ARGS ${CLANG_TEST_EXTRA_ARGS}
)
-set_target_properties(check-clang PROPERTIES FOLDER "Clang tests")
add_lit_testsuites(CLANG ${CMAKE_CURRENT_SOURCE_DIR}
PARAMS ${CLANG_TEST_PARAMS}
@@ -190,7 +189,7 @@ add_lit_testsuites(CLANG ${CMAKE_CURRENT_SOURCE_DIR}
# Add a legacy target spelling: clang-test
add_custom_target(clang-test)
add_dependencies(clang-test check-clang)
-set_target_properties(clang-test PROPERTIES FOLDER "Clang tests")
+set_target_properties(clang-test PROPERTIES FOLDER "Clang/Tests")
# FIXME: This logic can be removed once all buildbots have moved
# debuginfo-test from clang/test to llvm/projects or monorepo.
diff --git a/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp b/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
index be07ab0a48b3..0fa98ad101f6 100644
--- a/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
+++ b/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
@@ -141,11 +141,15 @@ namespace InhCtor {
// ill-formed.
template<typename T>
struct S : T {
- struct U : S { // expected-note 6{{candidate}}
- using S::S;
- };
+ struct U; // expected-note 6{{candidate}}
using T::T;
};
+
+ template<typename T>
+ struct S<T>::U : S {
+ using S::S;
+ };
+
S<A>::U ua(0); // expected-error {{no match}}
S<B>::U ub(0); // expected-error {{no match}}
diff --git a/clang/test/CXX/basic/basic.stc/basic.stc.dynamic/basic.stc.dynamic.deallocation/p2.cpp b/clang/test/CXX/basic/basic.stc/basic.stc.dynamic/basic.stc.dynamic.deallocation/p2.cpp
index 9e3210c6650f..706549f56c52 100644
--- a/clang/test/CXX/basic/basic.stc/basic.stc.dynamic/basic.stc.dynamic.deallocation/p2.cpp
+++ b/clang/test/CXX/basic/basic.stc/basic.stc.dynamic/basic.stc.dynamic.deallocation/p2.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -std=c++1z -fsized-deallocation -fexceptions -verify %s
+// RUN: %clang_cc1 -std=c++1z -fexceptions -verify %s
using size_t = decltype(sizeof(0));
diff --git a/clang/test/CXX/class.derived/class.derived.general/p2.cpp b/clang/test/CXX/class.derived/class.derived.general/p2.cpp
new file mode 100644
index 000000000000..888d9cd7a939
--- /dev/null
+++ b/clang/test/CXX/class.derived/class.derived.general/p2.cpp
@@ -0,0 +1,116 @@
+// RUN: %clang_cc1 %s -fsyntax-only -verify
+
+namespace CurrentInstantiation {
+ template<typename T>
+ struct A0 { // expected-note 6{{definition of 'A0<T>' is not complete until the closing '}'}}
+ struct B0 : A0 { }; // expected-error {{base class has incomplete type}}
+
+ template<typename U>
+ struct B1 : A0 { }; // expected-error {{base class has incomplete type}}
+
+ struct B2;
+
+ template<typename U>
+ struct B3;
+
+ struct B4 { // expected-note 2{{definition of 'CurrentInstantiation::A0::B4' is not complete until the closing '}'}}
+ struct C0 : A0, B4 { }; // expected-error 2{{base class has incomplete type}}
+
+ template<typename V>
+ struct C1 : A0, B4 { }; // expected-error 2{{base class has incomplete type}}
+
+ struct C2;
+
+ template<typename V>
+ struct C3;
+ };
+
+ template<typename U>
+ struct B5 { // expected-note 2{{definition of 'B5<U>' is not complete until the closing '}'}}
+ struct C0 : A0, B5 { }; // expected-error 2{{base class has incomplete type}}
+
+ template<typename V>
+ struct C1 : A0, B5 { }; // expected-error 2{{base class has incomplete type}}
+
+ struct C2;
+
+ template<typename V>
+ struct C3;
+ };
+ };
+
+ template<typename T>
+ struct A0<T>::B2 : A0 { };
+
+ template<typename T>
+ template<typename U>
+ struct A0<T>::B3 : A0 { };
+
+ template<typename T>
+ struct A0<T>::B4::C2 : A0, B4 { };
+
+ template<typename T>
+ template<typename V>
+ struct A0<T>::B4::C3 : A0, B4 { };
+
+ template<typename T>
+ template<typename U>
+ struct A0<T>::B5<U>::C2 : A0, B5 { };
+
+ template<typename T>
+ template<typename U>
+ template<typename V>
+ struct A0<T>::B5<U>::C3 : A0, B5 { };
+
+ template<typename T>
+ struct A0<T*> { // expected-note 2{{definition of 'A0<type-parameter-0-0 *>' is not complete until the closing '}'}}
+ struct B0 : A0 { }; // expected-error {{base class has incomplete type}}
+
+ template<typename U>
+ struct B1 : A0 { }; // expected-error {{base class has incomplete type}}
+
+ struct B2;
+
+ template<typename U>
+ struct B3;
+ };
+
+ template<typename T>
+ struct A0<T*>::B2 : A0 { };
+
+ template<typename T>
+ template<typename U>
+ struct A0<T*>::B3 : A0 { };
+} // namespace CurrentInstantiation
+
+namespace MemberOfCurrentInstantiation {
+ template<typename T>
+ struct A0 {
+ struct B : B { }; // expected-error {{base class has incomplete type}}
+ // expected-note@-1 {{definition of 'MemberOfCurrentInstantiation::A0::B' is not complete until the closing '}'}}
+
+ template<typename U>
+ struct C : C<U> { }; // expected-error {{base class has incomplete type}}
+ // expected-note@-1 {{definition of 'C<U>' is not complete until the closing '}'}}
+ };
+
+ template<typename T>
+ struct A1 {
+ struct B; // expected-note {{definition of 'MemberOfCurrentInstantiation::A1<long>::B' is not complete until the closing '}'}}
+
+ struct C : B { }; // expected-error {{base class has incomplete type}}
+
+ struct B : C { }; // expected-note {{in instantiation of member class 'MemberOfCurrentInstantiation::A1<long>::C' requested here}}
+ };
+
+ template struct A1<long>; // expected-note {{in instantiation of member class 'MemberOfCurrentInstantiation::A1<long>::B' requested here}}
+
+ template<>
+ struct A1<short>::B {
+ static constexpr bool f() {
+ return true;
+ }
+ };
+
+ static_assert(A1<short>::C::f());
+} // namespace MemberOfCurrentInstantiation
diff --git a/clang/test/CXX/class/class.mfct/class.mfct.non-static/p3.cpp b/clang/test/CXX/class/class.mfct/class.mfct.non-static/p3.cpp
index 9116e7146f81..01fa923dd171 100644
--- a/clang/test/CXX/class/class.mfct/class.mfct.non-static/p3.cpp
+++ b/clang/test/CXX/class/class.mfct/class.mfct.non-static/p3.cpp
@@ -70,7 +70,7 @@ namespace test2 {
}
void test1() {
- B<T>::foo();
+ B<T>::foo(); // expected-error {{call to non-static member function without an object argument}}
}
static void test2() {
@@ -91,8 +91,95 @@ namespace test2 {
int test() {
A<int> a;
a.test0(); // no instantiation note here, decl is ill-formed
- a.test1();
+ a.test1(); // expected-note {{in instantiation}}
a.test2(); // expected-note {{in instantiation}}
a.test3(); // expected-note {{in instantiation}}
}
}
+
+namespace test3 {
+ struct A {
+ void f0();
+
+ template<typename T>
+ void f1();
+
+ static void f2();
+
+ template<typename T>
+ static void f3();
+
+ int x0;
+
+ static constexpr int x1 = 0;
+
+ template<typename T>
+ static constexpr int x2 = 0;
+ };
+
+ template<typename T>
+ struct B : T {
+ auto g0() -> decltype(T::f0());
+
+ auto g1() -> decltype(T::template f1<int>());
+
+ auto g2() -> decltype(T::f2());
+
+ auto g3() -> decltype(T::template f3<int>());
+
+ auto g4() -> decltype(T::x0);
+
+ auto g5() -> decltype(T::x1);
+
+ auto g6() -> decltype(T::template x2<int>);
+
+ decltype(T::f0()) g7(); // expected-error {{call to non-static member function without an object argument}}
+
+ decltype(T::template f1<int>()) g8(); // expected-error {{call to non-static member function without an object argument}}
+
+ decltype(T::f2()) g9();
+
+ decltype(T::template f3<int>()) g10();
+
+ decltype(T::x0) g11();
+
+ decltype(T::x1) g12();
+
+ decltype(T::template x2<int>) g13();
+ };
+
+ template struct B<A>; // expected-note {{in instantiation of}}
+
+ template<typename T>
+ struct C : T {
+ static auto g0() -> decltype(T::f0()); // expected-error {{'this' cannot be implicitly used in a static member function declaration}}
+
+ static auto g1() -> decltype(T::template f1<int>()); // expected-error {{'this' cannot be implicitly used in a static member function declaration}}
+
+ static auto g2() -> decltype(T::f2());
+
+ static auto g3() -> decltype(T::template f3<int>());
+
+ static auto g4() -> decltype(T::x0); // expected-error {{'this' cannot be implicitly used in a static member function declaration}}
+
+ static auto g5() -> decltype(T::x1);
+
+ static auto g6() -> decltype(T::template x2<int>);
+
+ static decltype(T::f0()) g7(); // expected-error {{call to non-static member function without an object argument}}
+
+ static decltype(T::template f1<int>()) g8(); // expected-error {{call to non-static member function without an object argument}}
+
+ static decltype(T::f2()) g9();
+
+ static decltype(T::template f3<int>()) g10();
+
+ static decltype(T::x0) g11();
+
+ static decltype(T::x1) g12();
+
+ static decltype(T::template x2<int>) g13();
+ };
+
+ template struct C<A>; // expected-note {{in instantiation of}}
+}
diff --git a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p1.cpp b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p1.cpp
index a28a5f91c477..9e890204c78b 100644
--- a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p1.cpp
+++ b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p1.cpp
@@ -89,6 +89,9 @@ struct S {
template<typename T> constexpr T f(); // expected-warning 0-1{{C++14}} expected-note 0-1{{candidate}}
template <typename T>
T g() const; // expected-note-re {{candidate template ignored: could not match 'T (){{( __attribute__\(\(thiscall\)\))?}} const' against 'char (){{( __attribute__\(\(thiscall\)\))?}}'}}
+#if __cplusplus >= 201402L
+ // expected-note@-2 {{candidate template ignored: could not match 'T () const' against 'int ()'}}
+#endif
};
// explicit specialization can differ in constepxr
@@ -100,13 +103,17 @@ template <> notlit S::f() const { return notlit(); }
#if __cplusplus >= 201402L
// expected-error@-2 {{no function template matches}}
#endif
-template <> constexpr int S::g() { return 0; } // expected-note {{previous}}
+template <> constexpr int S::g() { return 0; }
#if __cplusplus < 201402L
// expected-warning@-2 {{C++14}}
+// expected-note@-3 {{previous}}
#else
-// expected-error@-4 {{does not match any declaration in 'S'}}
+// expected-error@-5 {{no function template matches function template specialization 'g'}}
+#endif
+template <> int S::g() const;
+#if __cplusplus < 201402L
+// expected-error@-2 {{non-constexpr declaration of 'g<int>' follows constexpr declaration}}
#endif
-template <> int S::g() const; // expected-error {{non-constexpr declaration of 'g<int>' follows constexpr declaration}}
// specializations can drop the 'constexpr' but not the implied 'const'.
template <> char S::g() { return 0; } // expected-error {{no function template matches}}
template <> double S::g() const { return 0; } // ok
@@ -154,3 +161,14 @@ namespace {
// FIXME: We should diagnose this prior to C++17.
const int &r = A::n;
}
+
+#if __cplusplus < 201402L
+namespace ImplicitConstexprDef {
+ struct A {
+ void f(); // expected-note {{member declaration does not match because it is not const qualified}}
+ };
+
+ constexpr void A::f() { } // expected-warning {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const' to avoid a change in behavior}}
+ // expected-error@-1 {{out-of-line definition of 'f' does not match any declaration in 'ImplicitConstexprDef::A'}}
+}
+#endif
diff --git a/clang/test/CXX/drs/cwg16xx.cpp b/clang/test/CXX/drs/cwg16xx.cpp
index cf6b45ceabf2..82ef871939d2 100644
--- a/clang/test/CXX/drs/cwg16xx.cpp
+++ b/clang/test/CXX/drs/cwg16xx.cpp
@@ -483,8 +483,6 @@ namespace cwg1696 { // cwg1696: 7
const A &a = A(); // #cwg1696-D1-a
};
D1 d1 = {}; // #cwg1696-d1
- // since-cxx14-warning@-1 {{lifetime extension of temporary created by aggregate initialization using a default member initializer is not yet supported; lifetime of temporary will end at the end of the full-expression}}
- // since-cxx14-note@#cwg1696-D1-a {{initializing field 'a' with default member initializer}}
struct D2 {
const A &a = A(); // #cwg1696-D2-a
diff --git a/clang/test/CXX/drs/cwg18xx.cpp b/clang/test/CXX/drs/cwg18xx.cpp
index 35615076a628..b71a81b62f81 100644
--- a/clang/test/CXX/drs/cwg18xx.cpp
+++ b/clang/test/CXX/drs/cwg18xx.cpp
@@ -206,19 +206,28 @@ namespace cwg1814 { // cwg1814: yes
#endif
}
-namespace cwg1815 { // cwg1815: no
+namespace cwg1815 { // cwg1815: 19
#if __cplusplus >= 201402L
- // FIXME: needs codegen test
- struct A { int &&r = 0; }; // #cwg1815-A
+ struct A { int &&r = 0; };
A a = {};
- // since-cxx14-warning@-1 {{lifetime extension of temporary created by aggregate initialization using a default member initializer is not yet supported; lifetime of temporary will end at the end of the full-expression}} FIXME
- // since-cxx14-note@#cwg1815-A {{initializing field 'r' with default member initializer}}
struct B { int &&r = 0; }; // #cwg1815-B
// since-cxx14-error@-1 {{reference member 'r' binds to a temporary object whose lifetime would be shorter than the lifetime of the constructed object}}
// since-cxx14-note@#cwg1815-B {{initializing field 'r' with default member initializer}}
// since-cxx14-note@#cwg1815-b {{in implicit default constructor for 'cwg1815::B' first required here}}
B b; // #cwg1815-b
+
+#if __cplusplus >= 201703L
+ struct C { const int &r = 0; };
+ constexpr C c = {}; // OK, since cwg1815
+ static_assert(c.r == 0);
+
+ constexpr int f() {
+ A a = {}; // OK, since cwg1815
+ return a.r;
+ }
+ static_assert(f() == 0);
+#endif
#endif
}
diff --git a/clang/test/CXX/drs/cwg28xx.cpp b/clang/test/CXX/drs/cwg28xx.cpp
index 696cd1b9c84e..8469a065ccaa 100644
--- a/clang/test/CXX/drs/cwg28xx.cpp
+++ b/clang/test/CXX/drs/cwg28xx.cpp
@@ -109,3 +109,74 @@ struct A {
#endif
} // namespace cwg2858
+
+namespace cwg2881 { // cwg2881: 19 tentatively ready 2024-04-19
+
+#if __cplusplus >= 202302L
+
+template <typename T> struct A : T {};
+template <typename T> struct B : T {};
+template <typename T> struct C : virtual T { C(T t) : T(t) {} };
+template <typename T> struct D : virtual T { D(T t) : T(t) {} };
+
+template <typename Ts>
+struct O1 : A<Ts>, B<Ts> {
+ using A<Ts>::operator();
+ using B<Ts>::operator();
+};
+
+template <typename Ts> struct O2 : protected Ts { // expected-note {{declared protected here}}
+ using Ts::operator();
+ O2(Ts ts) : Ts(ts) {}
+};
+
+template <typename Ts> struct O3 : private Ts { // expected-note {{declared private here}}
+ using Ts::operator();
+ O3(Ts ts) : Ts(ts) {}
+};
+
+// Not ambiguous because of virtual inheritance.
+template <typename Ts>
+struct O4 : C<Ts>, D<Ts> {
+ using C<Ts>::operator();
+ using D<Ts>::operator();
+ O4(Ts t) : Ts(t), C<Ts>(t), D<Ts>(t) {}
+};
+
+// This still has a public path to the lambda, and it's also not
+// ambiguous because of virtual inheritance.
+template <typename Ts>
+struct O5 : private C<Ts>, D<Ts> {
+ using C<Ts>::operator();
+ using D<Ts>::operator();
+ O5(Ts t) : Ts(t), C<Ts>(t), D<Ts>(t) {}
+};
+
+// This is only invalid if we call T's call operator.
+template <typename T, typename U>
+struct O6 : private T, U { // expected-note {{declared private here}}
+ using T::operator();
+ using U::operator();
+ O6(T t, U u) : T(t), U(u) {}
+};
+
+void f() {
+ int x;
+ auto L1 = [=](this auto&& self) { (void) &x; };
+ auto L2 = [&](this auto&& self) { (void) &x; };
+ O1<decltype(L1)>{L1, L1}(); // expected-error {{inaccessible due to ambiguity}}
+ O1<decltype(L2)>{L2, L2}(); // expected-error {{inaccessible due to ambiguity}}
+ O2{L1}(); // expected-error {{must derive publicly from the lambda}}
+ O3{L1}(); // expected-error {{must derive publicly from the lambda}}
+ O4{L1}();
+ O5{L1}();
+ O6 o{L1, L2};
+ o.decltype(L1)::operator()(); // expected-error {{must derive publicly from the lambda}}
+ o.decltype(L1)::operator()(); // No error here because we've already diagnosed this method.
+ o.decltype(L2)::operator()();
+}
+
+#endif
+
+} // namespace cwg2881
+
diff --git a/clang/test/CXX/drs/cwg292.cpp b/clang/test/CXX/drs/cwg292.cpp
index b05d3b92d627..a7bcbe6f5051 100644
--- a/clang/test/CXX/drs/cwg292.cpp
+++ b/clang/test/CXX/drs/cwg292.cpp
@@ -1,10 +1,10 @@
-// RUN: %clang_cc1 -std=c++98 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
-// RUN: %clang_cc1 -std=c++11 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
-// RUN: %clang_cc1 -std=c++14 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
-// RUN: %clang_cc1 -std=c++17 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
-// RUN: %clang_cc1 -std=c++20 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
-// RUN: %clang_cc1 -std=c++23 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
-// RUN: %clang_cc1 -std=c++2c %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK
+// RUN: %clang_cc1 -std=c++98 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK,CXX98-11
+// RUN: %clang_cc1 -std=c++11 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK,CXX98-11
+// RUN: %clang_cc1 -std=c++14 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK,SINCE-CXX14
+// RUN: %clang_cc1 -std=c++17 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK,SINCE-CXX14
+// RUN: %clang_cc1 -std=c++20 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK,SINCE-CXX14
+// RUN: %clang_cc1 -std=c++23 %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK,SINCE-CXX14
+// RUN: %clang_cc1 -std=c++2c %s -triple x86_64-linux-gnu -emit-llvm -disable-llvm-passes -o - -fexceptions -fcxx-exceptions -pedantic-errors | llvm-cxxfilt -n | FileCheck %s --check-prefixes CHECK,SINCE-CXX14
namespace cwg292 { // cwg292: 2.9
@@ -23,7 +23,8 @@ void f() {
// CHECK: invoke {{.*}} i32 @cwg292::g()()
// CHECK-NEXT: to {{.*}} unwind label %lpad
// CHECK-LABEL: lpad:
-// CHECK: call void @operator delete(void*)(ptr {{.*}} %[[CALL]])
+// CXX98-11: call void @operator delete(void*)(ptr {{.*}} %[[CALL]])
+// SINCE-CXX14: call void @operator delete(void*, unsigned long)(ptr {{.*}} %[[CALL]], i64 noundef 1)
// CHECK-LABEL: eh.resume:
// CHECK-LABEL: }
diff --git a/clang/test/CXX/expr/expr.unary/expr.new/p14.cpp b/clang/test/CXX/expr/expr.unary/expr.new/p14.cpp
index 6537cdcfeafa..d0b24c8fe47b 100644
--- a/clang/test/CXX/expr/expr.unary/expr.new/p14.cpp
+++ b/clang/test/CXX/expr/expr.unary/expr.new/p14.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -std=c++1z -fsized-deallocation -fexceptions %s -verify
+// RUN: %clang_cc1 -std=c++1z -fexceptions %s -verify
using size_t = decltype(sizeof(0));
namespace std { enum class align_val_t : size_t {}; }
diff --git a/clang/test/CXX/expr/expr.unary/expr.sizeof/p5-0x.cpp b/clang/test/CXX/expr/expr.unary/expr.sizeof/p5-0x.cpp
index afd8ef05302f..19f90801df31 100644
--- a/clang/test/CXX/expr/expr.unary/expr.sizeof/p5-0x.cpp
+++ b/clang/test/CXX/expr/expr.unary/expr.sizeof/p5-0x.cpp
@@ -33,6 +33,6 @@ template<int Value> struct count_ints_2 {
template<typename ...Types> // expected-note{{parameter pack 'Types' declared here}}
struct count_types_2 {
static const unsigned value = sizeof... Type; // expected-error{{missing parentheses around the size of parameter pack 'Type'}} \
- // expected-error{{Type' does not refer to the name of a parameter pack; did you mean 'Types'?}}
+ // expected-error{{'Type' does not refer to the name of a parameter pack; did you mean 'Types'?}}
};
diff --git a/clang/test/CXX/special/class.temporary/p6.cpp b/clang/test/CXX/special/class.temporary/p6.cpp
index 5554363cc69a..a6d2adfd1fd2 100644
--- a/clang/test/CXX/special/class.temporary/p6.cpp
+++ b/clang/test/CXX/special/class.temporary/p6.cpp
@@ -269,6 +269,40 @@ void init_capture_init_list() {
// CHECK: }
}
+void check_dr1815() { // dr1815: yes
+#if __cplusplus >= 201402L
+
+ struct A {
+ int &&r = 0;
+ ~A() {}
+ };
+
+ struct B {
+ A &&a = A{};
+ ~B() {}
+ };
+ B a = {};
+
+ // CHECK: call {{.*}}block_scope_begin_function
+ extern void block_scope_begin_function();
+ extern void block_scope_end_function();
+ block_scope_begin_function();
+ {
+ // CHECK: call void @_ZZ12check_dr1815vEN1BD1Ev
+ // CHECK: call void @_ZZ12check_dr1815vEN1AD1Ev
+ B b = {};
+ }
+ // CHECK: call {{.*}}block_scope_end_function
+ block_scope_end_function();
+
+ // CHECK: call {{.*}}some_other_function
+ extern void some_other_function();
+ some_other_function();
+ // CHECK: call void @_ZZ12check_dr1815vEN1BD1Ev
+ // CHECK: call void @_ZZ12check_dr1815vEN1AD1Ev
+#endif
+}
+
namespace P2718R0 {
namespace basic {
template <typename E> using T2 = std::list<E>;
diff --git a/clang/test/CXX/temp/temp.spec/temp.expl.spec/p12.cpp b/clang/test/CXX/temp/temp.spec/temp.expl.spec/p12.cpp
new file mode 100644
index 000000000000..2a5748908369
--- /dev/null
+++ b/clang/test/CXX/temp/temp.spec/temp.expl.spec/p12.cpp
@@ -0,0 +1,70 @@
+// RUN: %clang_cc1 -fsyntax-only -std=c++11 -verify=expected,cxx11 %s
+// RUN: %clang_cc1 -fsyntax-only -std=c++14 -verify=expected,since-cxx14 %s
+
+struct A {
+ template<typename T>
+ void f0();
+
+ template<>
+ constexpr void f0<short>(); // cxx11-error {{conflicting types for 'f0'}}
+ // cxx11-note@-1 {{previous declaration is here}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+ template<typename T>
+ void f1() const; // since-cxx14-note 2{{candidate template ignored: could not match 'void () const' against 'void ()'}}
+
+ template<>
+ constexpr void f1<short>(); // since-cxx14-error {{no function template matches function template specialization 'f1'}}
+ // cxx11-warning@-1 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+};
+
+template<>
+constexpr void A::f0<long>(); // cxx11-error {{conflicting types for 'f0'}}
+ // cxx11-note@-1 {{previous declaration is here}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+template<>
+constexpr void A::f1<long>(); // since-cxx14-error {{no function template matches function template specialization 'f1'}}
+ // cxx11-warning@-1 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+// FIXME: It's unclear whether [temp.expl.spec]p12 is intended to apply to
+// members of a class template explicitly specialized for an implicitly
+// instantiated specialization of that template.
+template<typename T>
+struct B {
+ void g0(); // since-cxx14-note {{previous declaration is here}}
+ // cxx11-note@-1 {{member declaration does not match because it is not const qualified}}
+
+ void g1() const; // since-cxx14-note {{member declaration does not match because it is const qualified}}
+ // cxx11-note@-1 {{previous declaration is here}}
+
+ template<typename U>
+ void h0(); // since-cxx14-note {{previous declaration is here}}
+
+ template<typename U>
+ void h1() const; // cxx11-note {{previous declaration is here}}
+};
+
+template<>
+constexpr void B<short>::g0(); // since-cxx14-error {{constexpr declaration of 'g0' follows non-constexpr declaration}}
+ // cxx11-error@-1 {{out-of-line declaration of 'g0' does not match any declaration in 'B<short>'}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+template<>
+constexpr void B<short>::g1(); // since-cxx14-error {{out-of-line declaration of 'g1' does not match any declaration in 'B<short>'}}
+ // cxx11-error@-1 {{constexpr declaration of 'g1' follows non-constexpr declaration}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+template<>
+template<typename U>
+constexpr void B<long>::h0(); // since-cxx14-error {{constexpr declaration of 'h0' follows non-constexpr declaration}}
+ // cxx11-error@-1 {{out-of-line declaration of 'h0' does not match any declaration in 'B<long>'}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+template<>
+template<typename U>
+constexpr void B<long>::h1(); // since-cxx14-error {{out-of-line declaration of 'h1' does not match any declaration in 'B<long>'}}
+ // cxx11-error@-1 {{constexpr declaration of 'h1' follows non-constexpr declaration}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+
diff --git a/clang/test/ClangScanDeps/response-file-clang-cl.c b/clang/test/ClangScanDeps/response-file-clang-cl.c
new file mode 100644
index 000000000000..b543231f4bb1
--- /dev/null
+++ b/clang/test/ClangScanDeps/response-file-clang-cl.c
@@ -0,0 +1,56 @@
+// Check that the scanner can adjust arguments by reading .rsp files in advance.
+
+// RUN: rm -rf %t
+// RUN: split-file %s %t
+
+// First run the tests with a .cdb
+// RUN: sed -e "s|DIR|%/t|g" %t/cdb.json.template > %t/cdb.json
+// RUN: sed -e "s|DIR|%/t|g" %t/args_nested.template > %t/args_nested.rsp
+
+// RUN: cp %t/args_compilation.rsp %t/args.rsp
+// RUN: clang-scan-deps --compilation-database %t/cdb.json > %t/deps.json
+// RUN: cat %t/deps.json | sed 's:\\\\\?:/:g' | FileCheck -DPREFIX=%/t %s
+
+// RUN: cp %t/args_preprocess.rsp %t/args.rsp
+// RUN: clang-scan-deps --compilation-database %t/cdb.json > %t/deps.json
+// RUN: cat %t/deps.json | sed 's:\\\\\?:/:g' | FileCheck -DPREFIX=%/t %s
+
+
+// Now run the tests again with a in-place compilation database
+// RUN: cd %t
+
+// RUN: cp args_compilation.rsp args.rsp
+// RUN: clang-scan-deps -o deps.json -- %clang_cl @args.rsp
+// RUN: cat deps.json | sed 's:\\\\\?:/:g' | FileCheck -DPREFIX=%/t %s
+
+// RUN: cp args_preprocess.rsp args.rsp
+// RUN: clang-scan-deps -o deps.json -- %clang_cl @args.rsp
+// RUN: cat deps.json | sed 's:\\\\\?:/:g' | FileCheck -DPREFIX=%/t %s
+
+// Here we ensure that we got a qualified .obj with its full path, since that's what we're passing with /Fo
+// CHECK: [[PREFIX]]/tu.obj:
+
+//--- cdb.json.template
+[{
+ "file": "DIR/tu.cpp",
+ "directory": "DIR",
+ "command": "clang-cl @DIR/args.rsp"
+}]
+
+//--- args_compilation.rsp
+@args_nested.rsp
+/c
+
+//--- args_preprocess.rsp
+@args_nested.rsp
+/E
+
+//--- args_nested.template
+/I include
+tu.cpp
+/FoDIR/tu.obj
+
+//--- include/header.h
+
+//--- tu.cpp
+#include "header.h"
diff --git a/clang/test/CodeCompletion/member-access.cpp b/clang/test/CodeCompletion/member-access.cpp
index 9f8c21c0bca6..912f269db6c1 100644
--- a/clang/test/CodeCompletion/member-access.cpp
+++ b/clang/test/CodeCompletion/member-access.cpp
@@ -367,4 +367,20 @@ class A {
// CHECK-DEREF-THIS: [#void#]function()
}
};
+
+template <typename Element>
+struct RepeatedField {
+ void Add();
+};
+
+template <typename T>
+RepeatedField<T>* MutableRepeatedField() {}
+
+template <class T>
+void Foo() {
+ auto& C = *MutableRepeatedField<T>();
+ C.
+}
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:382:5 %s -o - | FileCheck -check-prefix=CHECK-DEREF-DEPENDENT %s
+// CHECK-DEREF-DEPENDENT: [#void#]Add()
}
diff --git a/clang/test/CodeGen/RISCV/riscv-inline-asm.c b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
index 3565705dea71..ed97add95e71 100644
--- a/clang/test/CodeGen/RISCV/riscv-inline-asm.c
+++ b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
@@ -49,9 +49,9 @@ extern int var, arr[2][2];
struct Pair { int a, b; } pair;
// CHECK-LABEL: test_s(
-// CHECK: call void asm sideeffect "// $0 $1 $2", "s,s,s"(ptr nonnull @var, ptr nonnull getelementptr inbounds ([2 x [2 x i32]], ptr @arr, {{.*}}), ptr nonnull @test_s)
-// CHECK: call void asm sideeffect "// $0", "s"(ptr nonnull getelementptr inbounds (%struct.Pair, ptr @pair, {{.*}}))
-// CHECK: call void asm sideeffect "// $0 $1 $2", "S,S,S"(ptr nonnull @var, ptr nonnull getelementptr inbounds ([2 x [2 x i32]], ptr @arr, {{.*}}), ptr nonnull @test_s)
+// CHECK: call void asm sideeffect "// $0 $1 $2", "s,s,s"(ptr nonnull @var, ptr nonnull getelementptr inbounds (i8, ptr @arr, {{.*}}), ptr nonnull @test_s)
+// CHECK: call void asm sideeffect "// $0", "s"(ptr nonnull getelementptr inbounds (i8, ptr @pair, {{.*}}))
+// CHECK: call void asm sideeffect "// $0 $1 $2", "S,S,S"(ptr nonnull @var, ptr nonnull getelementptr inbounds (i8, ptr @arr, {{.*}}), ptr nonnull @test_s)
void test_s(void) {
asm("// %0 %1 %2" :: "s"(&var), "s"(&arr[1][1]), "s"(test_s));
asm("// %0" :: "s"(&pair.b));
diff --git a/clang/test/CodeGen/SystemZ/sync-builtins-i128-8Al.c b/clang/test/CodeGen/SystemZ/sync-builtins-i128-8Al.c
index 76c9c0ebed2b..c678e9a9882f 100644
--- a/clang/test/CodeGen/SystemZ/sync-builtins-i128-8Al.c
+++ b/clang/test/CodeGen/SystemZ/sync-builtins-i128-8Al.c
@@ -7,21 +7,21 @@
__int128 Ptr __attribute__((aligned(8)));
__int128 f1() {
-// CHECK: warning: __sync builtin operation MUST have natural alignment (consider using __atomic). [-Wsync-alignment]
+// CHECK: warning: __sync builtin operation must have natural alignment (consider using __atomic)
return __sync_fetch_and_add(&Ptr, 1);
}
__int128 f2() {
-// CHECK: warning: __sync builtin operation MUST have natural alignment (consider using __atomic). [-Wsync-alignment]
+// CHECK: warning: __sync builtin operation must have natural alignment (consider using __atomic)
return __sync_sub_and_fetch(&Ptr, 1);
}
__int128 f3() {
-// CHECK: warning: __sync builtin operation MUST have natural alignment (consider using __atomic). [-Wsync-alignment]
+// CHECK: warning: __sync builtin operation must have natural alignment (consider using __atomic)
return __sync_val_compare_and_swap(&Ptr, 0, 1);
}
void f4() {
-// CHECK: warning: __sync builtin operation MUST have natural alignment (consider using __atomic). [-Wsync-alignment]
+// CHECK: warning: __sync builtin operation must have natural alignment (consider using __atomic)
__sync_lock_release(&Ptr);
}
diff --git a/clang/test/CodeGen/X86/avx512er-builtins.c b/clang/test/CodeGen/X86/avx512er-builtins.c
deleted file mode 100644
index 11ec6aabec1e..000000000000
--- a/clang/test/CodeGen/X86/avx512er-builtins.c
+++ /dev/null
@@ -1,347 +0,0 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -target-feature +avx512er -emit-llvm -o - -Wall | FileCheck %s
-
-
-#include <immintrin.h>
-
-__m512d test_mm512_rsqrt28_round_pd(__m512d a) {
- // CHECK-LABEL: @test_mm512_rsqrt28_round_pd
- // CHECK: @llvm.x86.avx512.rsqrt28.pd
- return _mm512_rsqrt28_round_pd(a, _MM_FROUND_NO_EXC);
-}
-
-__m512d test_mm512_mask_rsqrt28_round_pd(__m512d s, __mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_mask_rsqrt28_round_pd
- // CHECK: @llvm.x86.avx512.rsqrt28.pd
- return _mm512_mask_rsqrt28_round_pd(s, m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512d test_mm512_maskz_rsqrt28_round_pd(__mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_maskz_rsqrt28_round_pd
- // CHECK: @llvm.x86.avx512.rsqrt28.pd
- return _mm512_maskz_rsqrt28_round_pd(m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512d test_mm512_rsqrt28_pd(__m512d a) {
- // CHECK-LABEL: @test_mm512_rsqrt28_pd
- // CHECK: @llvm.x86.avx512.rsqrt28.pd
- return _mm512_rsqrt28_pd(a);
-}
-
-__m512d test_mm512_mask_rsqrt28_pd(__m512d s, __mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_mask_rsqrt28_pd
- // CHECK: @llvm.x86.avx512.rsqrt28.pd
- return _mm512_mask_rsqrt28_pd(s, m, a);
-}
-
-__m512d test_mm512_maskz_rsqrt28_pd(__mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_maskz_rsqrt28_pd
- // CHECK: @llvm.x86.avx512.rsqrt28.pd
- return _mm512_maskz_rsqrt28_pd(m, a);
-}
-
-__m512 test_mm512_rsqrt28_round_ps(__m512 a) {
- // CHECK-LABEL: @test_mm512_rsqrt28_round_ps
- // CHECK: @llvm.x86.avx512.rsqrt28.ps
- return _mm512_rsqrt28_round_ps(a, _MM_FROUND_NO_EXC);
-}
-
-__m512 test_mm512_mask_rsqrt28_round_ps(__m512 s, __mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_mask_rsqrt28_round_ps
- // CHECK: @llvm.x86.avx512.rsqrt28.ps
- return _mm512_mask_rsqrt28_round_ps(s, m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512 test_mm512_maskz_rsqrt28_round_ps(__mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_maskz_rsqrt28_round_ps
- // CHECK: @llvm.x86.avx512.rsqrt28.ps
- return _mm512_maskz_rsqrt28_round_ps(m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512 test_mm512_rsqrt28_ps(__m512 a) {
- // CHECK-LABEL: @test_mm512_rsqrt28_ps
- // CHECK: @llvm.x86.avx512.rsqrt28.ps
- return _mm512_rsqrt28_ps(a);
-}
-
-__m512 test_mm512_mask_rsqrt28_ps(__m512 s, __mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_mask_rsqrt28_ps
- // CHECK: @llvm.x86.avx512.rsqrt28.ps
- return _mm512_mask_rsqrt28_ps(s, m, a);
-}
-
-__m512 test_mm512_maskz_rsqrt28_ps(__mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_maskz_rsqrt28_ps
- // CHECK: @llvm.x86.avx512.rsqrt28.ps
- return _mm512_maskz_rsqrt28_ps(m, a);
-}
-
-__m128 test_mm_rsqrt28_round_ss(__m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_rsqrt28_round_ss
- // CHECK: @llvm.x86.avx512.rsqrt28.ss
- return _mm_rsqrt28_round_ss(a, b, _MM_FROUND_NO_EXC);
-}
-
-__m128 test_mm_mask_rsqrt28_round_ss(__m128 s, __mmask16 m, __m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_mask_rsqrt28_round_ss
- // CHECK: @llvm.x86.avx512.rsqrt28.ss
- return _mm_mask_rsqrt28_round_ss(s, m, a, b, _MM_FROUND_NO_EXC);
-}
-
-__m128 test_mm_maskz_rsqrt28_round_ss(__mmask16 m, __m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_maskz_rsqrt28_round_ss
- // CHECK: @llvm.x86.avx512.rsqrt28.ss
- return _mm_maskz_rsqrt28_round_ss(m, a, b, _MM_FROUND_NO_EXC);
-}
-
-__m128 test_mm_rsqrt28_ss(__m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_rsqrt28_ss
- // CHECK: @llvm.x86.avx512.rsqrt28.ss
- return _mm_rsqrt28_ss(a, b);
-}
-
-__m128 test_mm_mask_rsqrt28_ss(__m128 s, __mmask16 m, __m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_mask_rsqrt28_ss
- // CHECK: @llvm.x86.avx512.rsqrt28.ss
- return _mm_mask_rsqrt28_ss(s, m, a, b);
-}
-
-__m128 test_mm_maskz_rsqrt28_ss(__mmask16 m, __m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_maskz_rsqrt28_ss
- // CHECK: @llvm.x86.avx512.rsqrt28.ss
- return _mm_maskz_rsqrt28_ss(m, a, b);
-}
-
-__m128d test_mm_rsqrt28_round_sd(__m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_rsqrt28_round_sd
- // CHECK: @llvm.x86.avx512.rsqrt28.sd
- return _mm_rsqrt28_round_sd(a, b, _MM_FROUND_NO_EXC);
-}
-
-__m128d test_mm_mask_rsqrt28_round_sd(__m128d s, __mmask8 m, __m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_mask_rsqrt28_round_sd
- // CHECK: @llvm.x86.avx512.rsqrt28.sd
- return _mm_mask_rsqrt28_round_sd(s, m, a, b, _MM_FROUND_NO_EXC);
-}
-
-__m128d test_mm_maskz_rsqrt28_round_sd(__mmask8 m, __m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_maskz_rsqrt28_round_sd
- // CHECK: @llvm.x86.avx512.rsqrt28.sd
- return _mm_maskz_rsqrt28_round_sd(m, a, b, _MM_FROUND_NO_EXC);
-}
-
-__m512d test_mm512_rcp28_round_pd(__m512d a) {
- // CHECK-LABEL: @test_mm512_rcp28_round_pd
- // CHECK: @llvm.x86.avx512.rcp28.pd
- return _mm512_rcp28_round_pd(a, _MM_FROUND_NO_EXC);
-}
-
-__m512d test_mm512_mask_rcp28_round_pd(__m512d s, __mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_mask_rcp28_round_pd
- // CHECK: @llvm.x86.avx512.rcp28.pd
- return _mm512_mask_rcp28_round_pd(s, m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512d test_mm512_maskz_rcp28_round_pd(__mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_maskz_rcp28_round_pd
- // CHECK: @llvm.x86.avx512.rcp28.pd
- return _mm512_maskz_rcp28_round_pd(m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512d test_mm512_rcp28_pd(__m512d a) {
- // CHECK-LABEL: @test_mm512_rcp28_pd
- // CHECK: @llvm.x86.avx512.rcp28.pd
- return _mm512_rcp28_pd(a);
-}
-
-__m512d test_mm512_mask_rcp28_pd(__m512d s, __mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_mask_rcp28_pd
- // CHECK: @llvm.x86.avx512.rcp28.pd
- return _mm512_mask_rcp28_pd(s, m, a);
-}
-
-__m512d test_mm512_maskz_rcp28_pd(__mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_maskz_rcp28_pd
- // CHECK: @llvm.x86.avx512.rcp28.pd
- return _mm512_maskz_rcp28_pd(m, a);
-}
-
-__m512 test_mm512_rcp28_round_ps(__m512 a) {
- // CHECK-LABEL: @test_mm512_rcp28_round_ps
- // CHECK: @llvm.x86.avx512.rcp28.ps
- return _mm512_rcp28_round_ps(a, _MM_FROUND_NO_EXC);
-}
-
-__m512 test_mm512_mask_rcp28_round_ps(__m512 s, __mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_mask_rcp28_round_ps
- // CHECK: @llvm.x86.avx512.rcp28.ps
- return _mm512_mask_rcp28_round_ps(s, m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512 test_mm512_maskz_rcp28_round_ps(__mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_maskz_rcp28_round_ps
- // CHECK: @llvm.x86.avx512.rcp28.ps
- return _mm512_maskz_rcp28_round_ps(m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512 test_mm512_rcp28_ps(__m512 a) {
- // CHECK-LABEL: @test_mm512_rcp28_ps
- // CHECK: @llvm.x86.avx512.rcp28.ps
- return _mm512_rcp28_ps(a);
-}
-
-__m512 test_mm512_mask_rcp28_ps(__m512 s, __mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_mask_rcp28_ps
- // CHECK: @llvm.x86.avx512.rcp28.ps
- return _mm512_mask_rcp28_ps(s, m, a);
-}
-
-__m512 test_mm512_maskz_rcp28_ps(__mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_maskz_rcp28_ps
- // CHECK: @llvm.x86.avx512.rcp28.ps
- return _mm512_maskz_rcp28_ps(m, a);
-}
-
-__m128 test_mm_rcp28_round_ss(__m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_rcp28_round_ss
- // CHECK: @llvm.x86.avx512.rcp28.ss
- return _mm_rcp28_round_ss(a, b, _MM_FROUND_NO_EXC);
-}
-
-__m128 test_mm_mask_rcp28_round_ss(__m128 s, __mmask16 m, __m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_mask_rcp28_round_ss
- // CHECK: @llvm.x86.avx512.rcp28.ss
- return _mm_mask_rcp28_round_ss(s, m, a, b, _MM_FROUND_NO_EXC);
-}
-
-__m128 test_mm_maskz_rcp28_round_ss(__mmask16 m, __m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_maskz_rcp28_round_ss
- // CHECK: @llvm.x86.avx512.rcp28.ss
- return _mm_maskz_rcp28_round_ss(m, a, b, _MM_FROUND_NO_EXC);
-}
-
-__m128 test_mm_rcp28_ss(__m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_rcp28_ss
- // CHECK: @llvm.x86.avx512.rcp28.ss
- return _mm_rcp28_ss(a, b);
-}
-
-__m128 test_mm_mask_rcp28_ss(__m128 s, __mmask16 m, __m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_mask_rcp28_ss
- // CHECK: @llvm.x86.avx512.rcp28.ss
- return _mm_mask_rcp28_ss(s, m, a, b);
-}
-
-__m128 test_mm_maskz_rcp28_ss(__mmask16 m, __m128 a, __m128 b) {
- // CHECK-LABEL: @test_mm_maskz_rcp28_ss
- // CHECK: @llvm.x86.avx512.rcp28.ss
- return _mm_maskz_rcp28_ss(m, a, b);
-}
-
-__m128d test_mm_rcp28_round_sd(__m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_rcp28_round_sd
- // CHECK: @llvm.x86.avx512.rcp28.sd
- return _mm_rcp28_round_sd(a, b, _MM_FROUND_NO_EXC);
-}
-
-__m128d test_mm_mask_rcp28_round_sd(__m128d s, __mmask8 m, __m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_mask_rcp28_round_sd
- // CHECK: @llvm.x86.avx512.rcp28.sd
- return _mm_mask_rcp28_round_sd(s, m, a, b, _MM_FROUND_NO_EXC);
-}
-
-__m128d test_mm_maskz_rcp28_round_sd(__mmask8 m, __m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_maskz_rcp28_round_sd
- // CHECK: @llvm.x86.avx512.rcp28.sd
- return _mm_maskz_rcp28_round_sd(m, a, b, _MM_FROUND_NO_EXC);
-}
-
-__m128d test_mm_rcp28_sd(__m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_rcp28_sd
- // CHECK: @llvm.x86.avx512.rcp28.sd
- return _mm_rcp28_sd(a, b);
-}
-
-__m128d test_mm_mask_rcp28_sd(__m128d s, __mmask8 m, __m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_mask_rcp28_sd
- // CHECK: @llvm.x86.avx512.rcp28.sd
- return _mm_mask_rcp28_sd(s, m, a, b);
-}
-
-__m128d test_mm_maskz_rcp28_sd(__mmask8 m, __m128d a, __m128d b) {
- // CHECK-LABEL: @test_mm_maskz_rcp28_sd
- // CHECK: @llvm.x86.avx512.rcp28.sd
- return _mm_maskz_rcp28_sd(m, a, b);
-}
-
-__m512d test_mm512_exp2a23_round_pd(__m512d a) {
- // CHECK-LABEL: @test_mm512_exp2a23_round_pd
- // CHECK: @llvm.x86.avx512.exp2.pd
- return _mm512_exp2a23_round_pd(a, _MM_FROUND_NO_EXC);
-}
-
-__m512d test_mm512_mask_exp2a23_round_pd(__m512d s, __mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_mask_exp2a23_round_pd
- // CHECK: @llvm.x86.avx512.exp2.pd
- return _mm512_mask_exp2a23_round_pd(s, m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512d test_mm512_maskz_exp2a23_round_pd(__mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_maskz_exp2a23_round_pd
- // CHECK: @llvm.x86.avx512.exp2.pd
- return _mm512_maskz_exp2a23_round_pd(m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512d test_mm512_exp2a23_pd(__m512d a) {
- // CHECK-LABEL: @test_mm512_exp2a23_pd
- // CHECK: @llvm.x86.avx512.exp2.pd
- return _mm512_exp2a23_pd(a);
-}
-
-__m512d test_mm512_mask_exp2a23_pd(__m512d s, __mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_mask_exp2a23_pd
- // CHECK: @llvm.x86.avx512.exp2.pd
- return _mm512_mask_exp2a23_pd(s, m, a);
-}
-
-__m512d test_mm512_maskz_exp2a23_pd(__mmask8 m, __m512d a) {
- // CHECK-LABEL: @test_mm512_maskz_exp2a23_pd
- // CHECK: @llvm.x86.avx512.exp2.pd
- return _mm512_maskz_exp2a23_pd(m, a);
-}
-
-__m512 test_mm512_exp2a23_round_ps(__m512 a) {
- // CHECK-LABEL: @test_mm512_exp2a23_round_ps
- // CHECK: @llvm.x86.avx512.exp2.ps
- return _mm512_exp2a23_round_ps(a, _MM_FROUND_NO_EXC);
-}
-
-__m512 test_mm512_mask_exp2a23_round_ps(__m512 s, __mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_mask_exp2a23_round_ps
- // CHECK: @llvm.x86.avx512.exp2.ps
- return _mm512_mask_exp2a23_round_ps(s, m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512 test_mm512_maskz_exp2a23_round_ps(__mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_maskz_exp2a23_round_ps
- // CHECK: @llvm.x86.avx512.exp2.ps
- return _mm512_maskz_exp2a23_round_ps(m, a, _MM_FROUND_NO_EXC);
-}
-
-__m512 test_mm512_exp2a23_ps(__m512 a) {
- // CHECK-LABEL: @test_mm512_exp2a23_ps
- // CHECK: @llvm.x86.avx512.exp2.ps
- return _mm512_exp2a23_ps(a);
-}
-
-__m512 test_mm512_mask_exp2a23_ps(__m512 s, __mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_mask_exp2a23_ps
- // CHECK: @llvm.x86.avx512.exp2.ps
- return _mm512_mask_exp2a23_ps(s, m, a);
-}
-
-__m512 test_mm512_maskz_exp2a23_ps(__mmask16 m, __m512 a) {
- // CHECK-LABEL: @test_mm512_maskz_exp2a23_ps
- // CHECK: @llvm.x86.avx512.exp2.ps
- return _mm512_maskz_exp2a23_ps(m, a);
-}
-
diff --git a/clang/test/CodeGen/X86/avx512pf-builtins.c b/clang/test/CodeGen/X86/avx512pf-builtins.c
deleted file mode 100644
index 3a117ed6a946..000000000000
--- a/clang/test/CodeGen/X86/avx512pf-builtins.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512pf -emit-llvm -o - -Wall | FileCheck %s
-
-
-#include <immintrin.h>
-
-void test_mm512_mask_prefetch_i32gather_pd(__m256i index, __mmask8 mask, void const *addr) {
- // CHECK-LABEL: @test_mm512_mask_prefetch_i32gather_pd
- // CHECK: @llvm.x86.avx512.gatherpf.dpd
- return _mm512_mask_prefetch_i32gather_pd(index, mask, addr, 2, _MM_HINT_T0);
-}
-
-void test_mm512_prefetch_i32gather_pd(__m256i index, void const *addr) {
- // CHECK-LABEL: @test_mm512_prefetch_i32gather_pd
- // CHECK: @llvm.x86.avx512.gatherpf.dpd
- return _mm512_prefetch_i32gather_pd(index, addr, 2, _MM_HINT_T0);
-}
-
-void test_mm512_mask_prefetch_i32gather_ps(__m512i index, __mmask16 mask, void const *addr) {
- // CHECK-LABEL: @test_mm512_mask_prefetch_i32gather_ps
- // CHECK: @llvm.x86.avx512.gatherpf.dps
- return _mm512_mask_prefetch_i32gather_ps(index, mask, addr, 2, _MM_HINT_T0);
-}
-
-void test_mm512_prefetch_i32gather_ps(__m512i index, void const *addr) {
- // CHECK-LABEL: @test_mm512_prefetch_i32gather_ps
- // CHECK: @llvm.x86.avx512.gatherpf.dps
- return _mm512_prefetch_i32gather_ps(index, addr, 2, _MM_HINT_T0);
-}
-
-void test_mm512_mask_prefetch_i64gather_pd(__m512i index, __mmask8 mask, void const *addr) {
- // CHECK-LABEL: @test_mm512_mask_prefetch_i64gather_pd
- // CHECK: @llvm.x86.avx512.gatherpf.qpd
- return _mm512_mask_prefetch_i64gather_pd(index, mask, addr, 2, _MM_HINT_T0);
-}
-
-void test_mm512_prefetch_i64gather_pd(__m512i index, void const *addr) {
- // CHECK-LABEL: @test_mm512_prefetch_i64gather_pd
- // CHECK: @llvm.x86.avx512.gatherpf.qpd
- return _mm512_prefetch_i64gather_pd(index, addr, 2, _MM_HINT_T0);
-}
-
-void test_mm512_mask_prefetch_i64gather_ps(__m512i index, __mmask8 mask, void const *addr) {
- // CHECK-LABEL: @test_mm512_mask_prefetch_i64gather_ps
- // CHECK: @llvm.x86.avx512.gatherpf.qps
- return _mm512_mask_prefetch_i64gather_ps(index, mask, addr, 2, _MM_HINT_T0);
-}
-
-void test_mm512_prefetch_i64gather_ps(__m512i index, void const *addr) {
- // CHECK-LABEL: @test_mm512_prefetch_i64gather_ps
- // CHECK: @llvm.x86.avx512.gatherpf.qps
- return _mm512_prefetch_i64gather_ps(index, addr, 2, _MM_HINT_T0);
-}
-
-void test_mm512_prefetch_i32scatter_pd(void *addr, __m256i index) {
- // CHECK-LABEL: @test_mm512_prefetch_i32scatter_pd
- // CHECK: @llvm.x86.avx512.scatterpf.dpd.512
- return _mm512_prefetch_i32scatter_pd(addr, index, 1, _MM_HINT_T1);
-}
-
-void test_mm512_mask_prefetch_i32scatter_pd(void *addr, __mmask8 mask, __m256i index) {
- // CHECK-LABEL: @test_mm512_mask_prefetch_i32scatter_pd
- // CHECK: @llvm.x86.avx512.scatterpf.dpd.512
- return _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, 1, _MM_HINT_T1);
-}
-
-void test_mm512_prefetch_i32scatter_ps(void *addr, __m512i index) {
- // CHECK-LABEL: @test_mm512_prefetch_i32scatter_ps
- // CHECK: @llvm.x86.avx512.scatterpf.dps.512
- return _mm512_prefetch_i32scatter_ps(addr, index, 1, _MM_HINT_T1);
-}
-
-void test_mm512_mask_prefetch_i32scatter_ps(void *addr, __mmask16 mask, __m512i index) {
- // CHECK-LABEL: @test_mm512_mask_prefetch_i32scatter_ps
- // CHECK: @llvm.x86.avx512.scatterpf.dps.512
- return _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, 1, _MM_HINT_T1);
-}
-
-void test_mm512_prefetch_i64scatter_pd(void *addr, __m512i index) {
- // CHECK-LABEL: @test_mm512_prefetch_i64scatter_pd
- // CHECK: @llvm.x86.avx512.scatterpf.qpd.512
- return _mm512_prefetch_i64scatter_pd(addr, index, 1, _MM_HINT_T1);
-}
-
-void test_mm512_mask_prefetch_i64scatter_pd(void *addr, __mmask16 mask, __m512i index) {
- // CHECK-LABEL: @test_mm512_mask_prefetch_i64scatter_pd
- // CHECK: @llvm.x86.avx512.scatterpf.qpd.512
- return _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, 1, _MM_HINT_T1);
-}
-
-void test_mm512_prefetch_i64scatter_ps(void *addr, __m512i index) {
- // CHECK-LABEL: @test_mm512_prefetch_i64scatter_ps
- // CHECK: @llvm.x86.avx512.scatterpf.qps.512
- return _mm512_prefetch_i64scatter_ps(addr, index, 1, _MM_HINT_T1);
-}
-
-void test_mm512_mask_prefetch_i64scatter_ps(void *addr, __mmask16 mask, __m512i index) {
- // CHECK-LABEL: @test_mm512_mask_prefetch_i64scatter_ps
- // CHECK: @llvm.x86.avx512.scatterpf.qps.512
- return _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, 1, _MM_HINT_T1);
-}
diff --git a/clang/test/CodeGen/aarch64-byval-temp.c b/clang/test/CodeGen/aarch64-byval-temp.c
index e9e2586406e5..0384830c69a4 100644
--- a/clang/test/CodeGen/aarch64-byval-temp.c
+++ b/clang/test/CodeGen/aarch64-byval-temp.c
@@ -1,13 +1,14 @@
-// RUN: %clang_cc1 -emit-llvm -triple arm64-- -o - %s -O0 | FileCheck %s --check-prefix=CHECK-O0
-// RUN: %clang_cc1 -emit-llvm -disable-llvm-optzns -triple arm64-- -o - %s -O3 | FileCheck %s --check-prefix=CHECK-O3
+// RUN: %clang_cc1 -emit-llvm -triple arm64-- -fexperimental-max-bitint-width=1024 -o - %s -O0 | FileCheck %s --check-prefix=CHECK-O0
+// RUN: %clang_cc1 -emit-llvm -disable-llvm-optzns -fexperimental-max-bitint-width=1024 -triple arm64-- -o - %s -O3 | FileCheck %s --check-prefix=CHECK-O3
struct large {
void* pointers[8];
};
void pass_large(struct large);
+void pass_large_BitInt(_BitInt(129));
-// For arm64, we don't use byval to pass structs but instead we create
+// For arm64, we don't use byval to pass structs and _BitInt(>128) type, but instead we create
// temporary allocas.
//
// Make sure we generate the appropriate lifetime markers for the temporary
@@ -71,3 +72,41 @@ void example(void) {
// Mark the end of the lifetime of `l`.
// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %l)
// CHECK-O3-NEXT: ret void
+
+void example_BitInt(void) {
+ _BitInt(129) l = {0};
+ pass_large_BitInt(l);
+ pass_large_BitInt(l);
+}
+// CHECK-O0-LABEL: define dso_local void @example_BitInt(
+// CHECK-O0-NEXT: entry:
+// CHECK-O0-NEXT: [[L:%.*]] = alloca i129, align 16
+// CHECK-O0-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i129, align 16
+// CHECK-O0-NEXT: [[INDIRECT_ARG_TEMP1:%.*]] = alloca i129, align 16
+// CHECK-O0-NEXT: store i129 0, ptr [[L]], align 16
+// CHECK-O0-NEXT: [[TMP0:%.*]] = load i129, ptr [[L]], align 16
+// CHECK-O0-NEXT: store i129 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 16
+// CHECK-O0-NEXT: call void @pass_large_BitInt(ptr noundef [[INDIRECT_ARG_TEMP]])
+// CHECK-O0-NEXT: [[TMP1:%.*]] = load i129, ptr [[L]], align 16
+// CHECK-O0-NEXT: store i129 [[TMP1]], ptr [[INDIRECT_ARG_TEMP1]], align 16
+// CHECK-O0-NEXT: call void @pass_large_BitInt(ptr noundef [[INDIRECT_ARG_TEMP1]])
+// CHECK-O0-NEXT: ret void
+//
+// CHECK-O3-LABEL: define dso_local void @example_BitInt(
+// CHECK-O3-NEXT: entry:
+// CHECK-O3-NEXT: [[L:%.*]] = alloca i129, align 16
+// CHECK-O3-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i129, align 16
+// CHECK-O3-NEXT: [[INDIRECT_ARG_TEMP1:%.*]] = alloca i129, align 16
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[L]])
+// CHECK-O3-NEXT: store i129 0, ptr [[L]], align 16, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-O3-NEXT: [[TMP0:%.*]] = load i129, ptr [[L]], align 16, !tbaa [[TBAA6]]
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[INDIRECT_ARG_TEMP]])
+// CHECK-O3-NEXT: store i129 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 16, !tbaa [[TBAA6]]
+// CHECK-O3-NEXT: call void @pass_large_BitInt(ptr noundef [[INDIRECT_ARG_TEMP]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[INDIRECT_ARG_TEMP]])
+// CHECK-O3-NEXT: [[TMP1:%.*]] = load i129, ptr [[L]], align 16, !tbaa [[TBAA6]]
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[INDIRECT_ARG_TEMP1]])
+// CHECK-O3-NEXT: store i129 [[TMP1]], ptr [[INDIRECT_ARG_TEMP1]], align 16, !tbaa [[TBAA6]]
+// CHECK-O3-NEXT: call void @pass_large_BitInt(ptr noundef [[INDIRECT_ARG_TEMP1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[INDIRECT_ARG_TEMP1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[L]])
diff --git a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_reinterpret_svcount_svbool.c b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_reinterpret_svcount_svbool.c
index c442d2c0c475..d894e98451b4 100644
--- a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_reinterpret_svcount_svbool.c
+++ b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_reinterpret_svcount_svbool.c
@@ -2,12 +2,14 @@
// REQUIRES: aarch64-registered-target
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve2p1 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sve2p1 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
// RUN: %clang_cc1 -triple aarch64 -target-feature +sme2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
// RUN: %clang_cc1 -triple aarch64 -target-feature +sme2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-#include <arm_sme.h>
+#include <arm_sve.h>
#if defined __ARM_FEATURE_SME
#define MODE_ATTR __arm_streaming
@@ -16,7 +18,7 @@
#endif
#ifdef SVE_OVERLOADED_FORMS
-// A simple used,unused... macro, long enough to represent any SVE builtin.§
+// A simple used,unused... macro, long enough to represent any SVE builtin.
#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
#else
#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
index a4abe96cc08a..55e1ed393d84 100644
--- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
+++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
@@ -88,8 +88,10 @@ typedef svint8_t vec2 __attribute__((arm_sve_vector_bits(N)));
// CHECK-NEXT: entry:
// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16
// CHECK-NEXT: [[X:%.*]] = tail call <[[#div(VBITS,8)]] x i8> @llvm.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[X_COERCE:%.*]], i64 0)
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 [[SIZE:[0-9]+]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X]], ptr [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]]
// CHECK-NEXT: call void @f3(ptr noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 [[SIZE]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: ret void
// CHECK128-LABEL: declare void @f3(<16 x i8> noundef)
diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
index 05587fd9e7fe..30ea73b63bce 100644
--- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
+++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
@@ -73,8 +73,10 @@ typedef svint16_t vec2 __attribute__((arm_sve_vector_bits(N)));
// CHECK128-NEXT: ret void
// CHECKWIDE-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS, 16)]] x i16>, align 16
// CHECKWIDE-NEXT: [[X:%.*]] = tail call <[[#div(VBITS, 16)]] x i16> @llvm.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0)
+// CHECKWIDE-NEXT: call void @llvm.lifetime.start.p0(i64 [[SIZE:[0-9]+]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECKWIDE-NEXT: store <[[#div(VBITS, 16)]] x i16> [[X]], ptr [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6:!tbaa !.*]]
// CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS, 16)]]_s(ptr noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
+// CHECKWIDE-NEXT: call void @llvm.lifetime.end.p0(i64 [[SIZE]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECKWIDE-NEXT: ret void
void g(vec2 x) { f(x); } // OK
#endif
diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret-bfloat.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret-bfloat.c
index bf2cd23e4080..41208bfb1f43 100644
--- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret-bfloat.c
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret-bfloat.c
@@ -4,6 +4,10 @@
// RUN: %clang_cc1 -fclang-abi-compat=latest -DTUPLE=x2 -triple aarch64 -target-feature +sve -target-feature +bf16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE2
// RUN: %clang_cc1 -fclang-abi-compat=latest -DTUPLE=x3 -triple aarch64 -target-feature +sve -target-feature +bf16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE3
// RUN: %clang_cc1 -fclang-abi-compat=latest -DTUPLE=x4 -triple aarch64 -target-feature +sve -target-feature +bf16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE4
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DTUPLE=x2 -triple aarch64 -target-feature +sme -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE2
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DTUPLE=x3 -triple aarch64 -target-feature +sme -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE3
+// RUN: %clang_cc1 -fclang-abi-compat=latest -DTUPLE=x4 -triple aarch64 -target-feature +sme -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE4
// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -target-feature +bf16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
// RUN: %clang_cc1 -fclang-abi-compat=latest -DTUPLE=x2 -triple aarch64 -target-feature +sve -target-feature +bf16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-TUPLE2
// RUN: %clang_cc1 -fclang-abi-compat=latest -DTUPLE=x3 -triple aarch64 -target-feature +sve -target-feature +bf16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-TUPLE3
@@ -18,9 +22,16 @@
// RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -DTUPLE=x4 -triple aarch64 -target-feature +sve -target-feature +bf16 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-TUPLE4
// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -target-feature +bf16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
+#if defined __ARM_FEATURE_SME
+#define MODE_ATTR __arm_streaming
+#else
+#define MODE_ATTR
+#endif
+
#ifdef TUPLE
#define TYPE_1(base,tuple) base ## tuple ## _t
#define TYPE_0(base,tuple) TYPE_1(base,tuple)
@@ -81,7 +92,7 @@
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x bfloat> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svint8) test_svreinterpret_s8_bf16(TYPE(svbfloat16) op) {
+TYPE(svint8) test_svreinterpret_s8_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_s8, _bf16)(op);
}
@@ -125,7 +136,7 @@ TYPE(svint8) test_svreinterpret_s8_bf16(TYPE(svbfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x bfloat> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svint16) test_svreinterpret_s16_bf16(TYPE(svbfloat16) op) {
+TYPE(svint16) test_svreinterpret_s16_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_s16, _bf16)(op);
}
@@ -169,7 +180,7 @@ TYPE(svint16) test_svreinterpret_s16_bf16(TYPE(svbfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x bfloat> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svint32) test_svreinterpret_s32_bf16(TYPE(svbfloat16) op) {
+TYPE(svint32) test_svreinterpret_s32_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_s32, _bf16)(op);
}
// CHECK-LABEL: @test_svreinterpret_s64_bf16(
@@ -212,7 +223,7 @@ TYPE(svint32) test_svreinterpret_s32_bf16(TYPE(svbfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x bfloat> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svint64) test_svreinterpret_s64_bf16(TYPE(svbfloat16) op) {
+TYPE(svint64) test_svreinterpret_s64_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_s64, _bf16)(op);
}
@@ -256,7 +267,7 @@ TYPE(svint64) test_svreinterpret_s64_bf16(TYPE(svbfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x bfloat> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svuint8) test_svreinterpret_u8_bf16(TYPE(svbfloat16) op) {
+TYPE(svuint8) test_svreinterpret_u8_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_u8, _bf16)(op);
}
@@ -300,7 +311,7 @@ TYPE(svuint8) test_svreinterpret_u8_bf16(TYPE(svbfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x bfloat> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svuint16) test_svreinterpret_u16_bf16(TYPE(svbfloat16) op) {
+TYPE(svuint16) test_svreinterpret_u16_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_u16, _bf16)(op);
}
@@ -344,7 +355,7 @@ TYPE(svuint16) test_svreinterpret_u16_bf16(TYPE(svbfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x bfloat> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svuint32) test_svreinterpret_u32_bf16(TYPE(svbfloat16) op) {
+TYPE(svuint32) test_svreinterpret_u32_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_u32, _bf16)(op);
}
@@ -388,7 +399,7 @@ TYPE(svuint32) test_svreinterpret_u32_bf16(TYPE(svbfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x bfloat> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svuint64) test_svreinterpret_u64_bf16(TYPE(svbfloat16) op) {
+TYPE(svuint64) test_svreinterpret_u64_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_u64, _bf16)(op);
}
@@ -432,7 +443,7 @@ TYPE(svuint64) test_svreinterpret_u64_bf16(TYPE(svbfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 32 x bfloat>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_s8(TYPE(svint8) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_s8(TYPE(svint8) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _s8)(op);
}
@@ -476,7 +487,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_s8(TYPE(svint8) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 32 x bfloat>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_s16(TYPE(svint16) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_s16(TYPE(svint16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _s16)(op);
}
@@ -520,7 +531,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_s16(TYPE(svint16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 32 x bfloat>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_s32(TYPE(svint32) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_s32(TYPE(svint32) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _s32)(op);
}
@@ -564,7 +575,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_s32(TYPE(svint32) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 32 x bfloat>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_s64(TYPE(svint64) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_s64(TYPE(svint64) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _s64)(op);
}
@@ -608,7 +619,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_s64(TYPE(svint64) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 32 x bfloat>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_u8(TYPE(svuint8) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_u8(TYPE(svuint8) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _u8)(op);
}
@@ -652,7 +663,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_u8(TYPE(svuint8) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 32 x bfloat>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_u16(TYPE(svuint16) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_u16(TYPE(svuint16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _u16)(op);
}
@@ -696,7 +707,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_u16(TYPE(svuint16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 32 x bfloat>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_u32(TYPE(svuint32) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_u32(TYPE(svuint32) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _u32)(op);
}
@@ -740,7 +751,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_u32(TYPE(svuint32) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 32 x bfloat>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_u64(TYPE(svuint64) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_u64(TYPE(svuint64) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _u64)(op);
}
@@ -776,7 +787,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_u64(TYPE(svuint64) op) {
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[OP:%.*]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_bf16(TYPE(svbfloat16) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _bf16)(op);
}
@@ -820,7 +831,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_bf16(TYPE(svbfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[OP:%.*]] to <vscale x 32 x bfloat>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_f16(TYPE(svfloat16) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_f16(TYPE(svfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _f16)(op);
}
@@ -864,7 +875,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_f16(TYPE(svfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[OP:%.*]] to <vscale x 32 x bfloat>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_f32(TYPE(svfloat32) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_f32(TYPE(svfloat32) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _f32)(op);
}
@@ -908,7 +919,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_f32(TYPE(svfloat32) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[OP:%.*]] to <vscale x 32 x bfloat>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
//
-TYPE(svbfloat16) test_svreinterpret_bf16_f64(TYPE(svfloat64) op) {
+TYPE(svbfloat16) test_svreinterpret_bf16_f64(TYPE(svfloat64) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_bf16, _f64)(op);
}
@@ -952,7 +963,7 @@ TYPE(svbfloat16) test_svreinterpret_bf16_f64(TYPE(svfloat64) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x bfloat> [[OP:%.*]] to <vscale x 16 x float>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
-TYPE(svfloat32) test_svreinterpret_f32_bf16(TYPE(svbfloat16) op) {
+TYPE(svfloat32) test_svreinterpret_f32_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_f32, _bf16)(op);
}
@@ -996,7 +1007,7 @@ TYPE(svfloat32) test_svreinterpret_f32_bf16(TYPE(svbfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x bfloat> [[OP:%.*]] to <vscale x 32 x half>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
-TYPE(svfloat16) test_svreinterpret_f16_bf16(TYPE(svbfloat16) op) {
+TYPE(svfloat16) test_svreinterpret_f16_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_f16, _bf16)(op);
}
@@ -1040,6 +1051,6 @@ TYPE(svfloat16) test_svreinterpret_f16_bf16(TYPE(svbfloat16) op) {
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x bfloat> [[OP:%.*]] to <vscale x 8 x double>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
-TYPE(svfloat64) test_svreinterpret_f64_bf16(TYPE(svbfloat16) op) {
+TYPE(svfloat64) test_svreinterpret_f64_bf16(TYPE(svbfloat16) op) MODE_ATTR {
return SVE_ACLE_FUNC(svreinterpret_f64, _bf16)(op);
}
diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret.c
index 3d9d5c3ce45a..e61bbf3e03d7 100644
--- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret.c
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret.c
@@ -4,6 +4,10 @@
// RUN: %clang_cc1 -DTUPLE=x2 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE2
// RUN: %clang_cc1 -DTUPLE=x3 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE3
// RUN: %clang_cc1 -DTUPLE=x4 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE4
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DTUPLE=x2 -triple aarch64 -target-feature +sme -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE2
+// RUN: %clang_cc1 -DTUPLE=x3 -triple aarch64 -target-feature +sme -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE3
+// RUN: %clang_cc1 -DTUPLE=x4 -triple aarch64 -target-feature +sme -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=TUPLE4
// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
// RUN: %clang_cc1 -DTUPLE=x2 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-TUPLE2
// RUN: %clang_cc1 -DTUPLE=x3 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-TUPLE3
@@ -17,9 +21,16 @@
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -DTUPLE=x3 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-TUPLE3
// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -DTUPLE=x4 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-TUPLE4
// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sve.h>
+#if defined __ARM_FEATURE_SME
+#define MODE_ATTR __arm_streaming
+#else
+#define MODE_ATTR
+#endif
+
#ifdef TUPLE
#define TYPE_1(base,tuple) base ## tuple ## _t
#define TYPE_0(base,tuple) TYPE_1(base,tuple)
@@ -72,7 +83,7 @@
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[OP:%.*]]
//
-TYPE(svint8) test_svreinterpret_s8_s8(TYPE(svint8) op)
+TYPE(svint8) test_svreinterpret_s8_s8(TYPE(svint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s8,_s8)(op);
}
@@ -117,7 +128,7 @@ TYPE(svint8) test_svreinterpret_s8_s8(TYPE(svint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svint8) test_svreinterpret_s8_s16(TYPE(svint16) op)
+TYPE(svint8) test_svreinterpret_s8_s16(TYPE(svint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s8,_s16)(op);
}
@@ -162,7 +173,7 @@ TYPE(svint8) test_svreinterpret_s8_s16(TYPE(svint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svint8) test_svreinterpret_s8_s32(TYPE(svint32) op)
+TYPE(svint8) test_svreinterpret_s8_s32(TYPE(svint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s8,_s32)(op);
}
@@ -207,7 +218,7 @@ TYPE(svint8) test_svreinterpret_s8_s32(TYPE(svint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svint8) test_svreinterpret_s8_s64(TYPE(svint64) op)
+TYPE(svint8) test_svreinterpret_s8_s64(TYPE(svint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s8,_s64)(op);
}
@@ -244,7 +255,7 @@ TYPE(svint8) test_svreinterpret_s8_s64(TYPE(svint64) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[OP:%.*]]
//
-TYPE(svint8) test_svreinterpret_s8_u8(TYPE(svuint8) op)
+TYPE(svint8) test_svreinterpret_s8_u8(TYPE(svuint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s8,_u8)(op);
}
@@ -289,7 +300,7 @@ TYPE(svint8) test_svreinterpret_s8_u8(TYPE(svuint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svint8) test_svreinterpret_s8_u16(TYPE(svuint16) op)
+TYPE(svint8) test_svreinterpret_s8_u16(TYPE(svuint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s8,_u16)(op);
}
@@ -335,7 +346,7 @@ TYPE(svint8) test_svreinterpret_s8_u16(TYPE(svuint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svint8) test_svreinterpret_s8_u32(TYPE(svuint32) op)
+TYPE(svint8) test_svreinterpret_s8_u32(TYPE(svuint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s8,_u32)(op);
}
@@ -381,7 +392,7 @@ TYPE(svint8) test_svreinterpret_s8_u32(TYPE(svuint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svint8) test_svreinterpret_s8_u64(TYPE(svuint64) op)
+TYPE(svint8) test_svreinterpret_s8_u64(TYPE(svuint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s8,_u64)(op);
}
@@ -426,7 +437,7 @@ TYPE(svint8) test_svreinterpret_s8_u64(TYPE(svuint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svint8) test_svreinterpret_s8_f16(TYPE(svfloat16) op)
+TYPE(svint8) test_svreinterpret_s8_f16(TYPE(svfloat16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s8,_f16)(op);
}
@@ -471,7 +482,7 @@ TYPE(svint8) test_svreinterpret_s8_f16(TYPE(svfloat16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svint8) test_svreinterpret_s8_f32(TYPE(svfloat32) op)
+TYPE(svint8) test_svreinterpret_s8_f32(TYPE(svfloat32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s8,_f32)(op);
}
@@ -516,7 +527,7 @@ TYPE(svint8) test_svreinterpret_s8_f32(TYPE(svfloat32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svint8) test_svreinterpret_s8_f64(TYPE(svfloat64) op)
+TYPE(svint8) test_svreinterpret_s8_f64(TYPE(svfloat64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s8,_f64)(op);
}
@@ -561,7 +572,7 @@ TYPE(svint8) test_svreinterpret_s8_f64(TYPE(svfloat64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svint16) test_svreinterpret_s16_s8(TYPE(svint8) op)
+TYPE(svint16) test_svreinterpret_s16_s8(TYPE(svint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s16,_s8)(op);
}
@@ -598,7 +609,7 @@ TYPE(svint16) test_svreinterpret_s16_s8(TYPE(svint8) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[OP:%.*]]
//
-TYPE(svint16) test_svreinterpret_s16_s16(TYPE(svint16) op)
+TYPE(svint16) test_svreinterpret_s16_s16(TYPE(svint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s16,_s16)(op);
}
@@ -643,7 +654,7 @@ TYPE(svint16) test_svreinterpret_s16_s16(TYPE(svint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svint16) test_svreinterpret_s16_s32(TYPE(svint32) op)
+TYPE(svint16) test_svreinterpret_s16_s32(TYPE(svint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s16,_s32)(op);
}
@@ -688,7 +699,7 @@ TYPE(svint16) test_svreinterpret_s16_s32(TYPE(svint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svint16) test_svreinterpret_s16_s64(TYPE(svint64) op)
+TYPE(svint16) test_svreinterpret_s16_s64(TYPE(svint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s16,_s64)(op);
}
@@ -733,7 +744,7 @@ TYPE(svint16) test_svreinterpret_s16_s64(TYPE(svint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svint16) test_svreinterpret_s16_u8(TYPE(svuint8) op)
+TYPE(svint16) test_svreinterpret_s16_u8(TYPE(svuint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s16,_u8)(op);
}
@@ -770,7 +781,7 @@ TYPE(svint16) test_svreinterpret_s16_u8(TYPE(svuint8) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[OP:%.*]]
//
-TYPE(svint16) test_svreinterpret_s16_u16(TYPE(svuint16) op)
+TYPE(svint16) test_svreinterpret_s16_u16(TYPE(svuint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s16,_u16)(op);
}
@@ -815,7 +826,7 @@ TYPE(svint16) test_svreinterpret_s16_u16(TYPE(svuint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svint16) test_svreinterpret_s16_u32(TYPE(svuint32) op)
+TYPE(svint16) test_svreinterpret_s16_u32(TYPE(svuint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s16,_u32)(op);
}
@@ -860,7 +871,7 @@ TYPE(svint16) test_svreinterpret_s16_u32(TYPE(svuint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svint16) test_svreinterpret_s16_u64(TYPE(svuint64) op)
+TYPE(svint16) test_svreinterpret_s16_u64(TYPE(svuint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s16,_u64)(op);
}
@@ -905,7 +916,7 @@ TYPE(svint16) test_svreinterpret_s16_u64(TYPE(svuint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svint16) test_svreinterpret_s16_f16(TYPE(svfloat16) op)
+TYPE(svint16) test_svreinterpret_s16_f16(TYPE(svfloat16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s16,_f16)(op);
}
@@ -950,7 +961,7 @@ TYPE(svint16) test_svreinterpret_s16_f16(TYPE(svfloat16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svint16) test_svreinterpret_s16_f32(TYPE(svfloat32) op)
+TYPE(svint16) test_svreinterpret_s16_f32(TYPE(svfloat32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s16,_f32)(op);
}
@@ -995,7 +1006,7 @@ TYPE(svint16) test_svreinterpret_s16_f32(TYPE(svfloat32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svint16) test_svreinterpret_s16_f64(TYPE(svfloat64) op)
+TYPE(svint16) test_svreinterpret_s16_f64(TYPE(svfloat64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s16,_f64)(op);
}
@@ -1040,7 +1051,7 @@ TYPE(svint16) test_svreinterpret_s16_f64(TYPE(svfloat64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svint32) test_svreinterpret_s32_s8(TYPE(svint8) op)
+TYPE(svint32) test_svreinterpret_s32_s8(TYPE(svint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s32,_s8)(op);
}
@@ -1085,7 +1096,7 @@ TYPE(svint32) test_svreinterpret_s32_s8(TYPE(svint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svint32) test_svreinterpret_s32_s16(TYPE(svint16) op)
+TYPE(svint32) test_svreinterpret_s32_s16(TYPE(svint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s32,_s16)(op);
}
@@ -1122,7 +1133,7 @@ TYPE(svint32) test_svreinterpret_s32_s16(TYPE(svint16) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[OP:%.*]]
//
-TYPE(svint32) test_svreinterpret_s32_s32(TYPE(svint32) op)
+TYPE(svint32) test_svreinterpret_s32_s32(TYPE(svint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s32,_s32)(op);
}
@@ -1167,7 +1178,7 @@ TYPE(svint32) test_svreinterpret_s32_s32(TYPE(svint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svint32) test_svreinterpret_s32_s64(TYPE(svint64) op)
+TYPE(svint32) test_svreinterpret_s32_s64(TYPE(svint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s32,_s64)(op);
}
@@ -1212,7 +1223,7 @@ TYPE(svint32) test_svreinterpret_s32_s64(TYPE(svint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svint32) test_svreinterpret_s32_u8(TYPE(svuint8) op)
+TYPE(svint32) test_svreinterpret_s32_u8(TYPE(svuint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s32,_u8)(op);
}
@@ -1257,7 +1268,7 @@ TYPE(svint32) test_svreinterpret_s32_u8(TYPE(svuint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svint32) test_svreinterpret_s32_u16(TYPE(svuint16) op)
+TYPE(svint32) test_svreinterpret_s32_u16(TYPE(svuint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s32,_u16)(op);
}
@@ -1294,7 +1305,7 @@ TYPE(svint32) test_svreinterpret_s32_u16(TYPE(svuint16) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[OP:%.*]]
//
-TYPE(svint32) test_svreinterpret_s32_u32(TYPE(svuint32) op)
+TYPE(svint32) test_svreinterpret_s32_u32(TYPE(svuint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s32,_u32)(op);
}
@@ -1339,7 +1350,7 @@ TYPE(svint32) test_svreinterpret_s32_u32(TYPE(svuint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svint32) test_svreinterpret_s32_u64(TYPE(svuint64) op)
+TYPE(svint32) test_svreinterpret_s32_u64(TYPE(svuint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s32,_u64)(op);
}
@@ -1384,7 +1395,7 @@ TYPE(svint32) test_svreinterpret_s32_u64(TYPE(svuint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svint32) test_svreinterpret_s32_f16(TYPE(svfloat16) op)
+TYPE(svint32) test_svreinterpret_s32_f16(TYPE(svfloat16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s32,_f16)(op);
}
@@ -1429,7 +1440,7 @@ TYPE(svint32) test_svreinterpret_s32_f16(TYPE(svfloat16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svint32) test_svreinterpret_s32_f32(TYPE(svfloat32) op)
+TYPE(svint32) test_svreinterpret_s32_f32(TYPE(svfloat32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s32,_f32)(op);
}
@@ -1475,7 +1486,7 @@ TYPE(svint32) test_svreinterpret_s32_f32(TYPE(svfloat32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svint32) test_svreinterpret_s32_f64(TYPE(svfloat64) op)
+TYPE(svint32) test_svreinterpret_s32_f64(TYPE(svfloat64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s32,_f64)(op);
}
@@ -1520,7 +1531,7 @@ TYPE(svint32) test_svreinterpret_s32_f64(TYPE(svfloat64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svint64) test_svreinterpret_s64_s8(TYPE(svint8) op)
+TYPE(svint64) test_svreinterpret_s64_s8(TYPE(svint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s64,_s8)(op);
}
@@ -1565,7 +1576,7 @@ TYPE(svint64) test_svreinterpret_s64_s8(TYPE(svint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svint64) test_svreinterpret_s64_s16(TYPE(svint16) op)
+TYPE(svint64) test_svreinterpret_s64_s16(TYPE(svint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s64,_s16)(op);
}
@@ -1610,7 +1621,7 @@ TYPE(svint64) test_svreinterpret_s64_s16(TYPE(svint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svint64) test_svreinterpret_s64_s32(TYPE(svint32) op)
+TYPE(svint64) test_svreinterpret_s64_s32(TYPE(svint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s64,_s32)(op);
}
@@ -1647,7 +1658,7 @@ TYPE(svint64) test_svreinterpret_s64_s32(TYPE(svint32) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[OP:%.*]]
//
-TYPE(svint64) test_svreinterpret_s64_s64(TYPE(svint64) op)
+TYPE(svint64) test_svreinterpret_s64_s64(TYPE(svint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s64,_s64)(op);
}
@@ -1692,7 +1703,7 @@ TYPE(svint64) test_svreinterpret_s64_s64(TYPE(svint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svint64) test_svreinterpret_s64_u8(TYPE(svuint8) op)
+TYPE(svint64) test_svreinterpret_s64_u8(TYPE(svuint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s64,_u8)(op);
}
@@ -1737,7 +1748,7 @@ TYPE(svint64) test_svreinterpret_s64_u8(TYPE(svuint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svint64) test_svreinterpret_s64_u16(TYPE(svuint16) op)
+TYPE(svint64) test_svreinterpret_s64_u16(TYPE(svuint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s64,_u16)(op);
}
@@ -1782,7 +1793,7 @@ TYPE(svint64) test_svreinterpret_s64_u16(TYPE(svuint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svint64) test_svreinterpret_s64_u32(TYPE(svuint32) op)
+TYPE(svint64) test_svreinterpret_s64_u32(TYPE(svuint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s64,_u32)(op);
}
@@ -1819,7 +1830,7 @@ TYPE(svint64) test_svreinterpret_s64_u32(TYPE(svuint32) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[OP:%.*]]
//
-TYPE(svint64) test_svreinterpret_s64_u64(TYPE(svuint64) op)
+TYPE(svint64) test_svreinterpret_s64_u64(TYPE(svuint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s64,_u64)(op);
}
@@ -1864,7 +1875,7 @@ TYPE(svint64) test_svreinterpret_s64_u64(TYPE(svuint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svint64) test_svreinterpret_s64_f16(TYPE(svfloat16) op)
+TYPE(svint64) test_svreinterpret_s64_f16(TYPE(svfloat16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s64,_f16)(op);
}
@@ -1909,7 +1920,7 @@ TYPE(svint64) test_svreinterpret_s64_f16(TYPE(svfloat16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svint64) test_svreinterpret_s64_f32(TYPE(svfloat32) op)
+TYPE(svint64) test_svreinterpret_s64_f32(TYPE(svfloat32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s64,_f32)(op);
}
@@ -1954,7 +1965,7 @@ TYPE(svint64) test_svreinterpret_s64_f32(TYPE(svfloat32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svint64) test_svreinterpret_s64_f64(TYPE(svfloat64) op)
+TYPE(svint64) test_svreinterpret_s64_f64(TYPE(svfloat64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_s64,_f64)(op);
}
@@ -1991,7 +2002,7 @@ TYPE(svint64) test_svreinterpret_s64_f64(TYPE(svfloat64) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[OP:%.*]]
//
-TYPE(svuint8) test_svreinterpret_u8_s8(TYPE(svint8) op)
+TYPE(svuint8) test_svreinterpret_u8_s8(TYPE(svint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u8,_s8)(op);
}
@@ -2036,7 +2047,7 @@ TYPE(svuint8) test_svreinterpret_u8_s8(TYPE(svint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svuint8) test_svreinterpret_u8_s16(TYPE(svint16) op)
+TYPE(svuint8) test_svreinterpret_u8_s16(TYPE(svint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u8,_s16)(op);
}
@@ -2081,7 +2092,7 @@ TYPE(svuint8) test_svreinterpret_u8_s16(TYPE(svint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svuint8) test_svreinterpret_u8_s32(TYPE(svint32) op)
+TYPE(svuint8) test_svreinterpret_u8_s32(TYPE(svint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u8,_s32)(op);
}
@@ -2126,7 +2137,7 @@ TYPE(svuint8) test_svreinterpret_u8_s32(TYPE(svint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svuint8) test_svreinterpret_u8_s64(TYPE(svint64) op)
+TYPE(svuint8) test_svreinterpret_u8_s64(TYPE(svint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u8,_s64)(op);
}
@@ -2163,7 +2174,7 @@ TYPE(svuint8) test_svreinterpret_u8_s64(TYPE(svint64) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[OP:%.*]]
//
-TYPE(svuint8) test_svreinterpret_u8_u8(TYPE(svuint8) op)
+TYPE(svuint8) test_svreinterpret_u8_u8(TYPE(svuint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u8,_u8)(op);
}
@@ -2208,7 +2219,7 @@ TYPE(svuint8) test_svreinterpret_u8_u8(TYPE(svuint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svuint8) test_svreinterpret_u8_u16(TYPE(svuint16) op)
+TYPE(svuint8) test_svreinterpret_u8_u16(TYPE(svuint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u8,_u16)(op);
}
@@ -2253,7 +2264,7 @@ TYPE(svuint8) test_svreinterpret_u8_u16(TYPE(svuint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svuint8) test_svreinterpret_u8_u32(TYPE(svuint32) op)
+TYPE(svuint8) test_svreinterpret_u8_u32(TYPE(svuint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u8,_u32)(op);
}
@@ -2298,7 +2309,7 @@ TYPE(svuint8) test_svreinterpret_u8_u32(TYPE(svuint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svuint8) test_svreinterpret_u8_u64(TYPE(svuint64) op)
+TYPE(svuint8) test_svreinterpret_u8_u64(TYPE(svuint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u8,_u64)(op);
}
@@ -2343,7 +2354,7 @@ TYPE(svuint8) test_svreinterpret_u8_u64(TYPE(svuint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svuint8) test_svreinterpret_u8_f16(TYPE(svfloat16) op)
+TYPE(svuint8) test_svreinterpret_u8_f16(TYPE(svfloat16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u8,_f16)(op);
}
@@ -2388,7 +2399,7 @@ TYPE(svuint8) test_svreinterpret_u8_f16(TYPE(svfloat16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svuint8) test_svreinterpret_u8_f32(TYPE(svfloat32) op)
+TYPE(svuint8) test_svreinterpret_u8_f32(TYPE(svfloat32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u8,_f32)(op);
}
@@ -2433,7 +2444,7 @@ TYPE(svuint8) test_svreinterpret_u8_f32(TYPE(svfloat32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[OP:%.*]] to <vscale x 64 x i8>
// CPP-TUPLE4-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-TYPE(svuint8) test_svreinterpret_u8_f64(TYPE(svfloat64) op)
+TYPE(svuint8) test_svreinterpret_u8_f64(TYPE(svfloat64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u8,_f64)(op);
}
@@ -2478,7 +2489,7 @@ TYPE(svuint8) test_svreinterpret_u8_f64(TYPE(svfloat64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svuint16) test_svreinterpret_u16_s8(TYPE(svint8) op)
+TYPE(svuint16) test_svreinterpret_u16_s8(TYPE(svint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u16,_s8)(op);
}
@@ -2515,7 +2526,7 @@ TYPE(svuint16) test_svreinterpret_u16_s8(TYPE(svint8) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[OP:%.*]]
//
-TYPE(svuint16) test_svreinterpret_u16_s16(TYPE(svint16) op)
+TYPE(svuint16) test_svreinterpret_u16_s16(TYPE(svint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u16,_s16)(op);
}
@@ -2560,7 +2571,7 @@ TYPE(svuint16) test_svreinterpret_u16_s16(TYPE(svint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svuint16) test_svreinterpret_u16_s32(TYPE(svint32) op)
+TYPE(svuint16) test_svreinterpret_u16_s32(TYPE(svint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u16,_s32)(op);
}
@@ -2605,7 +2616,7 @@ TYPE(svuint16) test_svreinterpret_u16_s32(TYPE(svint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svuint16) test_svreinterpret_u16_s64(TYPE(svint64) op)
+TYPE(svuint16) test_svreinterpret_u16_s64(TYPE(svint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u16,_s64)(op);
}
@@ -2650,7 +2661,7 @@ TYPE(svuint16) test_svreinterpret_u16_s64(TYPE(svint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svuint16) test_svreinterpret_u16_u8(TYPE(svuint8) op)
+TYPE(svuint16) test_svreinterpret_u16_u8(TYPE(svuint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u16,_u8)(op);
}
@@ -2687,7 +2698,7 @@ TYPE(svuint16) test_svreinterpret_u16_u8(TYPE(svuint8) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[OP:%.*]]
//
-TYPE(svuint16) test_svreinterpret_u16_u16(TYPE(svuint16) op)
+TYPE(svuint16) test_svreinterpret_u16_u16(TYPE(svuint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u16,_u16)(op);
}
@@ -2732,7 +2743,7 @@ TYPE(svuint16) test_svreinterpret_u16_u16(TYPE(svuint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svuint16) test_svreinterpret_u16_u32(TYPE(svuint32) op)
+TYPE(svuint16) test_svreinterpret_u16_u32(TYPE(svuint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u16,_u32)(op);
}
@@ -2777,7 +2788,7 @@ TYPE(svuint16) test_svreinterpret_u16_u32(TYPE(svuint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svuint16) test_svreinterpret_u16_u64(TYPE(svuint64) op)
+TYPE(svuint16) test_svreinterpret_u16_u64(TYPE(svuint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u16,_u64)(op);
}
@@ -2822,7 +2833,7 @@ TYPE(svuint16) test_svreinterpret_u16_u64(TYPE(svuint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svuint16) test_svreinterpret_u16_f16(TYPE(svfloat16) op)
+TYPE(svuint16) test_svreinterpret_u16_f16(TYPE(svfloat16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u16,_f16)(op);
}
@@ -2867,7 +2878,7 @@ TYPE(svuint16) test_svreinterpret_u16_f16(TYPE(svfloat16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svuint16) test_svreinterpret_u16_f32(TYPE(svfloat32) op)
+TYPE(svuint16) test_svreinterpret_u16_f32(TYPE(svfloat32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u16,_f32)(op);
}
@@ -2912,7 +2923,7 @@ TYPE(svuint16) test_svreinterpret_u16_f32(TYPE(svfloat32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[OP:%.*]] to <vscale x 32 x i16>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-TYPE(svuint16) test_svreinterpret_u16_f64(TYPE(svfloat64) op)
+TYPE(svuint16) test_svreinterpret_u16_f64(TYPE(svfloat64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u16,_f64)(op);
}
@@ -2957,7 +2968,7 @@ TYPE(svuint16) test_svreinterpret_u16_f64(TYPE(svfloat64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svuint32) test_svreinterpret_u32_s8(TYPE(svint8) op)
+TYPE(svuint32) test_svreinterpret_u32_s8(TYPE(svint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u32,_s8)(op);
}
@@ -3002,7 +3013,7 @@ TYPE(svuint32) test_svreinterpret_u32_s8(TYPE(svint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svuint32) test_svreinterpret_u32_s16(TYPE(svint16) op)
+TYPE(svuint32) test_svreinterpret_u32_s16(TYPE(svint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u32,_s16)(op);
}
@@ -3039,7 +3050,7 @@ TYPE(svuint32) test_svreinterpret_u32_s16(TYPE(svint16) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[OP:%.*]]
//
-TYPE(svuint32) test_svreinterpret_u32_s32(TYPE(svint32) op)
+TYPE(svuint32) test_svreinterpret_u32_s32(TYPE(svint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u32,_s32)(op);
}
@@ -3084,7 +3095,7 @@ TYPE(svuint32) test_svreinterpret_u32_s32(TYPE(svint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svuint32) test_svreinterpret_u32_s64(TYPE(svint64) op)
+TYPE(svuint32) test_svreinterpret_u32_s64(TYPE(svint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u32,_s64)(op);
}
@@ -3129,7 +3140,7 @@ TYPE(svuint32) test_svreinterpret_u32_s64(TYPE(svint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svuint32) test_svreinterpret_u32_u8(TYPE(svuint8) op)
+TYPE(svuint32) test_svreinterpret_u32_u8(TYPE(svuint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u32,_u8)(op);
}
@@ -3174,7 +3185,7 @@ TYPE(svuint32) test_svreinterpret_u32_u8(TYPE(svuint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svuint32) test_svreinterpret_u32_u16(TYPE(svuint16) op)
+TYPE(svuint32) test_svreinterpret_u32_u16(TYPE(svuint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u32,_u16)(op);
}
@@ -3211,7 +3222,7 @@ TYPE(svuint32) test_svreinterpret_u32_u16(TYPE(svuint16) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[OP:%.*]]
//
-TYPE(svuint32) test_svreinterpret_u32_u32(TYPE(svuint32) op)
+TYPE(svuint32) test_svreinterpret_u32_u32(TYPE(svuint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u32,_u32)(op);
}
@@ -3256,7 +3267,7 @@ TYPE(svuint32) test_svreinterpret_u32_u32(TYPE(svuint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svuint32) test_svreinterpret_u32_u64(TYPE(svuint64) op)
+TYPE(svuint32) test_svreinterpret_u32_u64(TYPE(svuint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u32,_u64)(op);
}
@@ -3301,7 +3312,7 @@ TYPE(svuint32) test_svreinterpret_u32_u64(TYPE(svuint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svuint32) test_svreinterpret_u32_f16(TYPE(svfloat16) op)
+TYPE(svuint32) test_svreinterpret_u32_f16(TYPE(svfloat16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u32,_f16)(op);
}
@@ -3346,7 +3357,7 @@ TYPE(svuint32) test_svreinterpret_u32_f16(TYPE(svfloat16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svuint32) test_svreinterpret_u32_f32(TYPE(svfloat32) op)
+TYPE(svuint32) test_svreinterpret_u32_f32(TYPE(svfloat32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u32,_f32)(op);
}
@@ -3391,7 +3402,7 @@ TYPE(svuint32) test_svreinterpret_u32_f32(TYPE(svfloat32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[OP:%.*]] to <vscale x 16 x i32>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-TYPE(svuint32) test_svreinterpret_u32_f64(TYPE(svfloat64) op)
+TYPE(svuint32) test_svreinterpret_u32_f64(TYPE(svfloat64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u32,_f64)(op);
}
@@ -3436,7 +3447,7 @@ TYPE(svuint32) test_svreinterpret_u32_f64(TYPE(svfloat64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svuint64) test_svreinterpret_u64_s8(TYPE(svint8) op)
+TYPE(svuint64) test_svreinterpret_u64_s8(TYPE(svint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u64,_s8)(op);
}
@@ -3481,7 +3492,7 @@ TYPE(svuint64) test_svreinterpret_u64_s8(TYPE(svint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svuint64) test_svreinterpret_u64_s16(TYPE(svint16) op)
+TYPE(svuint64) test_svreinterpret_u64_s16(TYPE(svint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u64,_s16)(op);
}
@@ -3526,7 +3537,7 @@ TYPE(svuint64) test_svreinterpret_u64_s16(TYPE(svint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svuint64) test_svreinterpret_u64_s32(TYPE(svint32) op)
+TYPE(svuint64) test_svreinterpret_u64_s32(TYPE(svint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u64,_s32)(op);
}
@@ -3563,7 +3574,7 @@ TYPE(svuint64) test_svreinterpret_u64_s32(TYPE(svint32) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[OP:%.*]]
//
-TYPE(svuint64) test_svreinterpret_u64_s64(TYPE(svint64) op)
+TYPE(svuint64) test_svreinterpret_u64_s64(TYPE(svint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u64,_s64)(op);
}
@@ -3608,7 +3619,7 @@ TYPE(svuint64) test_svreinterpret_u64_s64(TYPE(svint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svuint64) test_svreinterpret_u64_u8(TYPE(svuint8) op)
+TYPE(svuint64) test_svreinterpret_u64_u8(TYPE(svuint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u64,_u8)(op);
}
@@ -3653,7 +3664,7 @@ TYPE(svuint64) test_svreinterpret_u64_u8(TYPE(svuint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svuint64) test_svreinterpret_u64_u16(TYPE(svuint16) op)
+TYPE(svuint64) test_svreinterpret_u64_u16(TYPE(svuint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u64,_u16)(op);
}
@@ -3698,7 +3709,7 @@ TYPE(svuint64) test_svreinterpret_u64_u16(TYPE(svuint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svuint64) test_svreinterpret_u64_u32(TYPE(svuint32) op)
+TYPE(svuint64) test_svreinterpret_u64_u32(TYPE(svuint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u64,_u32)(op);
}
@@ -3735,7 +3746,7 @@ TYPE(svuint64) test_svreinterpret_u64_u32(TYPE(svuint32) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[OP:%.*]]
//
-TYPE(svuint64) test_svreinterpret_u64_u64(TYPE(svuint64) op)
+TYPE(svuint64) test_svreinterpret_u64_u64(TYPE(svuint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u64,_u64)(op);
}
@@ -3780,7 +3791,7 @@ TYPE(svuint64) test_svreinterpret_u64_u64(TYPE(svuint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svuint64) test_svreinterpret_u64_f16(TYPE(svfloat16) op)
+TYPE(svuint64) test_svreinterpret_u64_f16(TYPE(svfloat16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u64,_f16)(op);
}
@@ -3825,7 +3836,7 @@ TYPE(svuint64) test_svreinterpret_u64_f16(TYPE(svfloat16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svuint64) test_svreinterpret_u64_f32(TYPE(svfloat32) op)
+TYPE(svuint64) test_svreinterpret_u64_f32(TYPE(svfloat32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u64,_f32)(op);
}
@@ -3870,7 +3881,7 @@ TYPE(svuint64) test_svreinterpret_u64_f32(TYPE(svfloat32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[OP:%.*]] to <vscale x 8 x i64>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-TYPE(svuint64) test_svreinterpret_u64_f64(TYPE(svfloat64) op)
+TYPE(svuint64) test_svreinterpret_u64_f64(TYPE(svfloat64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_u64,_f64)(op);
}
@@ -3915,7 +3926,7 @@ TYPE(svuint64) test_svreinterpret_u64_f64(TYPE(svfloat64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 32 x half>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
-TYPE(svfloat16) test_svreinterpret_f16_s8(TYPE(svint8) op)
+TYPE(svfloat16) test_svreinterpret_f16_s8(TYPE(svint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f16,_s8)(op);
}
@@ -3960,7 +3971,7 @@ TYPE(svfloat16) test_svreinterpret_f16_s8(TYPE(svint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 32 x half>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
-TYPE(svfloat16) test_svreinterpret_f16_s16(TYPE(svint16) op)
+TYPE(svfloat16) test_svreinterpret_f16_s16(TYPE(svint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f16,_s16)(op);
}
@@ -4005,7 +4016,7 @@ TYPE(svfloat16) test_svreinterpret_f16_s16(TYPE(svint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 32 x half>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
-TYPE(svfloat16) test_svreinterpret_f16_s32(TYPE(svint32) op)
+TYPE(svfloat16) test_svreinterpret_f16_s32(TYPE(svint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f16,_s32)(op);
}
@@ -4050,7 +4061,7 @@ TYPE(svfloat16) test_svreinterpret_f16_s32(TYPE(svint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 32 x half>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
-TYPE(svfloat16) test_svreinterpret_f16_s64(TYPE(svint64) op)
+TYPE(svfloat16) test_svreinterpret_f16_s64(TYPE(svint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f16,_s64)(op);
}
@@ -4095,7 +4106,7 @@ TYPE(svfloat16) test_svreinterpret_f16_s64(TYPE(svint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 32 x half>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
-TYPE(svfloat16) test_svreinterpret_f16_u8(TYPE(svuint8) op)
+TYPE(svfloat16) test_svreinterpret_f16_u8(TYPE(svuint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f16,_u8)(op);
}
@@ -4140,7 +4151,7 @@ TYPE(svfloat16) test_svreinterpret_f16_u8(TYPE(svuint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 32 x half>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
-TYPE(svfloat16) test_svreinterpret_f16_u16(TYPE(svuint16) op)
+TYPE(svfloat16) test_svreinterpret_f16_u16(TYPE(svuint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f16,_u16)(op);
}
@@ -4185,7 +4196,7 @@ TYPE(svfloat16) test_svreinterpret_f16_u16(TYPE(svuint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 32 x half>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
-TYPE(svfloat16) test_svreinterpret_f16_u32(TYPE(svuint32) op)
+TYPE(svfloat16) test_svreinterpret_f16_u32(TYPE(svuint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f16,_u32)(op);
}
@@ -4230,7 +4241,7 @@ TYPE(svfloat16) test_svreinterpret_f16_u32(TYPE(svuint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 32 x half>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
-TYPE(svfloat16) test_svreinterpret_f16_u64(TYPE(svuint64) op)
+TYPE(svfloat16) test_svreinterpret_f16_u64(TYPE(svuint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f16,_u64)(op);
}
@@ -4267,7 +4278,7 @@ TYPE(svfloat16) test_svreinterpret_f16_u64(TYPE(svuint64) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[OP:%.*]]
//
-TYPE(svfloat16) test_svreinterpret_f16_f16(TYPE(svfloat16) op)
+TYPE(svfloat16) test_svreinterpret_f16_f16(TYPE(svfloat16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f16,_f16)(op);
}
@@ -4312,7 +4323,7 @@ TYPE(svfloat16) test_svreinterpret_f16_f16(TYPE(svfloat16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[OP:%.*]] to <vscale x 32 x half>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
-TYPE(svfloat16) test_svreinterpret_f16_f32(TYPE(svfloat32) op)
+TYPE(svfloat16) test_svreinterpret_f16_f32(TYPE(svfloat32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f16,_f32)(op);
}
@@ -4357,7 +4368,7 @@ TYPE(svfloat16) test_svreinterpret_f16_f32(TYPE(svfloat32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[OP:%.*]] to <vscale x 32 x half>
// CPP-TUPLE4-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
-TYPE(svfloat16) test_svreinterpret_f16_f64(TYPE(svfloat64) op)
+TYPE(svfloat16) test_svreinterpret_f16_f64(TYPE(svfloat64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f16,_f64)(op);
}
@@ -4402,7 +4413,7 @@ TYPE(svfloat16) test_svreinterpret_f16_f64(TYPE(svfloat64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 16 x float>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
-TYPE(svfloat32) test_svreinterpret_f32_s8(TYPE(svint8) op)
+TYPE(svfloat32) test_svreinterpret_f32_s8(TYPE(svint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f32,_s8)(op);
}
@@ -4447,7 +4458,7 @@ TYPE(svfloat32) test_svreinterpret_f32_s8(TYPE(svint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 16 x float>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
-TYPE(svfloat32) test_svreinterpret_f32_s16(TYPE(svint16) op)
+TYPE(svfloat32) test_svreinterpret_f32_s16(TYPE(svint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f32,_s16)(op);
}
@@ -4492,7 +4503,7 @@ TYPE(svfloat32) test_svreinterpret_f32_s16(TYPE(svint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 16 x float>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
-TYPE(svfloat32) test_svreinterpret_f32_s32(TYPE(svint32) op)
+TYPE(svfloat32) test_svreinterpret_f32_s32(TYPE(svint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f32,_s32)(op);
}
@@ -4537,7 +4548,7 @@ TYPE(svfloat32) test_svreinterpret_f32_s32(TYPE(svint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 16 x float>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
-TYPE(svfloat32) test_svreinterpret_f32_s64(TYPE(svint64) op)
+TYPE(svfloat32) test_svreinterpret_f32_s64(TYPE(svint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f32,_s64)(op);
}
@@ -4582,7 +4593,7 @@ TYPE(svfloat32) test_svreinterpret_f32_s64(TYPE(svint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 16 x float>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
-TYPE(svfloat32) test_svreinterpret_f32_u8(TYPE(svuint8) op)
+TYPE(svfloat32) test_svreinterpret_f32_u8(TYPE(svuint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f32,_u8)(op);
}
@@ -4627,7 +4638,7 @@ TYPE(svfloat32) test_svreinterpret_f32_u8(TYPE(svuint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 16 x float>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
-TYPE(svfloat32) test_svreinterpret_f32_u16(TYPE(svuint16) op)
+TYPE(svfloat32) test_svreinterpret_f32_u16(TYPE(svuint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f32,_u16)(op);
}
@@ -4672,7 +4683,7 @@ TYPE(svfloat32) test_svreinterpret_f32_u16(TYPE(svuint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 16 x float>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
-TYPE(svfloat32) test_svreinterpret_f32_u32(TYPE(svuint32) op)
+TYPE(svfloat32) test_svreinterpret_f32_u32(TYPE(svuint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f32,_u32)(op);
}
@@ -4717,7 +4728,7 @@ TYPE(svfloat32) test_svreinterpret_f32_u32(TYPE(svuint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 16 x float>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
-TYPE(svfloat32) test_svreinterpret_f32_u64(TYPE(svuint64) op)
+TYPE(svfloat32) test_svreinterpret_f32_u64(TYPE(svuint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f32,_u64)(op);
}
@@ -4762,7 +4773,7 @@ TYPE(svfloat32) test_svreinterpret_f32_u64(TYPE(svuint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[OP:%.*]] to <vscale x 16 x float>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
-TYPE(svfloat32) test_svreinterpret_f32_f16(TYPE(svfloat16) op)
+TYPE(svfloat32) test_svreinterpret_f32_f16(TYPE(svfloat16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f32,_f16)(op);
}
@@ -4799,7 +4810,7 @@ TYPE(svfloat32) test_svreinterpret_f32_f16(TYPE(svfloat16) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[OP:%.*]]
//
-TYPE(svfloat32) test_svreinterpret_f32_f32(TYPE(svfloat32) op)
+TYPE(svfloat32) test_svreinterpret_f32_f32(TYPE(svfloat32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f32,_f32)(op);
}
@@ -4844,7 +4855,7 @@ TYPE(svfloat32) test_svreinterpret_f32_f32(TYPE(svfloat32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[OP:%.*]] to <vscale x 16 x float>
// CPP-TUPLE4-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
-TYPE(svfloat32) test_svreinterpret_f32_f64(TYPE(svfloat64) op)
+TYPE(svfloat32) test_svreinterpret_f32_f64(TYPE(svfloat64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f32,_f64)(op);
}
@@ -4889,7 +4900,7 @@ TYPE(svfloat32) test_svreinterpret_f32_f64(TYPE(svfloat64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 8 x double>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
-TYPE(svfloat64) test_svreinterpret_f64_s8(TYPE(svint8) op)
+TYPE(svfloat64) test_svreinterpret_f64_s8(TYPE(svint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f64,_s8)(op);
}
@@ -4934,7 +4945,7 @@ TYPE(svfloat64) test_svreinterpret_f64_s8(TYPE(svint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 8 x double>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
-TYPE(svfloat64) test_svreinterpret_f64_s16(TYPE(svint16) op)
+TYPE(svfloat64) test_svreinterpret_f64_s16(TYPE(svint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f64,_s16)(op);
}
@@ -4979,7 +4990,7 @@ TYPE(svfloat64) test_svreinterpret_f64_s16(TYPE(svint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 8 x double>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
-TYPE(svfloat64) test_svreinterpret_f64_s32(TYPE(svint32) op)
+TYPE(svfloat64) test_svreinterpret_f64_s32(TYPE(svint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f64,_s32)(op);
}
@@ -5024,7 +5035,7 @@ TYPE(svfloat64) test_svreinterpret_f64_s32(TYPE(svint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 8 x double>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
-TYPE(svfloat64) test_svreinterpret_f64_s64(TYPE(svint64) op)
+TYPE(svfloat64) test_svreinterpret_f64_s64(TYPE(svint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f64,_s64)(op);
}
@@ -5069,7 +5080,7 @@ TYPE(svfloat64) test_svreinterpret_f64_s64(TYPE(svint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[OP:%.*]] to <vscale x 8 x double>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
-TYPE(svfloat64) test_svreinterpret_f64_u8(TYPE(svuint8) op)
+TYPE(svfloat64) test_svreinterpret_f64_u8(TYPE(svuint8) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f64,_u8)(op);
}
@@ -5114,7 +5125,7 @@ TYPE(svfloat64) test_svreinterpret_f64_u8(TYPE(svuint8) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[OP:%.*]] to <vscale x 8 x double>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
-TYPE(svfloat64) test_svreinterpret_f64_u16(TYPE(svuint16) op)
+TYPE(svfloat64) test_svreinterpret_f64_u16(TYPE(svuint16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f64,_u16)(op);
}
@@ -5159,7 +5170,7 @@ TYPE(svfloat64) test_svreinterpret_f64_u16(TYPE(svuint16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[OP:%.*]] to <vscale x 8 x double>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
-TYPE(svfloat64) test_svreinterpret_f64_u32(TYPE(svuint32) op)
+TYPE(svfloat64) test_svreinterpret_f64_u32(TYPE(svuint32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f64,_u32)(op);
}
@@ -5204,7 +5215,7 @@ TYPE(svfloat64) test_svreinterpret_f64_u32(TYPE(svuint32) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[OP:%.*]] to <vscale x 8 x double>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
-TYPE(svfloat64) test_svreinterpret_f64_u64(TYPE(svuint64) op)
+TYPE(svfloat64) test_svreinterpret_f64_u64(TYPE(svuint64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f64,_u64)(op);
}
@@ -5249,7 +5260,7 @@ TYPE(svfloat64) test_svreinterpret_f64_u64(TYPE(svuint64) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 32 x half> [[OP:%.*]] to <vscale x 8 x double>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
-TYPE(svfloat64) test_svreinterpret_f64_f16(TYPE(svfloat16) op)
+TYPE(svfloat64) test_svreinterpret_f64_f16(TYPE(svfloat16) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f64,_f16)(op);
}
@@ -5294,7 +5305,7 @@ TYPE(svfloat64) test_svreinterpret_f64_f16(TYPE(svfloat16) op)
// CPP-TUPLE4-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[OP:%.*]] to <vscale x 8 x double>
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
-TYPE(svfloat64) test_svreinterpret_f64_f32(TYPE(svfloat32) op)
+TYPE(svfloat64) test_svreinterpret_f64_f32(TYPE(svfloat32) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f64,_f32)(op);
}
@@ -5331,7 +5342,7 @@ TYPE(svfloat64) test_svreinterpret_f64_f32(TYPE(svfloat32) op)
// CPP-TUPLE4-NEXT: entry:
// CPP-TUPLE4-NEXT: ret <vscale x 8 x double> [[OP:%.*]]
//
-TYPE(svfloat64) test_svreinterpret_f64_f64(TYPE(svfloat64) op)
+TYPE(svfloat64) test_svreinterpret_f64_f64(TYPE(svfloat64) op) MODE_ATTR
{
return SVE_ACLE_FUNC(svreinterpret_f64,_f64)(op);
}
diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret_from_streaming_mode.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret_from_streaming_mode.c
deleted file mode 100644
index f27875836193..000000000000
--- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_reinterpret_from_streaming_mode.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: aarch64-registered-target
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -S -O1 -Werror -Wall -o /dev/null %s
-
-// Note: We need to run this test with '-O1' because oddly enough the svreinterpret is always inlined at -O0.
-
-#include <arm_sve.h>
-
-#ifdef SVE_OVERLOADED_FORMS
-// A simple used,unused... macro, long enough to represent any SVE builtin.
-#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
-#else
-#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
-#endif
-
-// Test that svreinterpret is inlined (because it should be streaming-compatible)
-__attribute__((target("sme")))
-// CHECK-LABEL: @test_svreinterpret_s16_s8_from_streaming_mode(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[OP:%.*]] to <vscale x 8 x i16>
-// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
-//
-// CPP-CHECK-LABEL: @_Z45test_svreinterpret_s16_s8_from_streaming_modeu10__SVInt8_t(
-// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[OP:%.*]] to <vscale x 8 x i16>
-// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
-//
-svint16_t test_svreinterpret_s16_s8_from_streaming_mode(svint8_t op) __arm_streaming {
- return SVE_ACLE_FUNC(svreinterpret_s16,_s8,,)(op);
-}
-
diff --git a/clang/test/CodeGen/aarch64-sve-vector-subscript-ops.c b/clang/test/CodeGen/aarch64-sve-vector-subscript-ops.c
index fb60c6d100ce..52a05d010de9 100644
--- a/clang/test/CodeGen/aarch64-sve-vector-subscript-ops.c
+++ b/clang/test/CodeGen/aarch64-sve-vector-subscript-ops.c
@@ -88,3 +88,25 @@ float subscript_float32(svfloat32_t a, size_t b) {
double subscript_float64(svfloat64_t a, size_t b) {
return a[b];
}
+
+// CHECK-LABEL: @subscript_write_float32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[VECINS:%.*]] = insertelement <vscale x 4 x float> [[A:%.*]], float 1.000000e+00, i64 [[B:%.*]]
+// CHECK-NEXT: ret <vscale x 4 x float> [[VECINS]]
+//
+svfloat32_t subscript_write_float32(svfloat32_t a, size_t b) {
+ a[b] = 1.0f;
+ return a;
+}
+
+// CHECK-LABEL: @subscript_read_write_float32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[VECEXT:%.*]] = extractelement <vscale x 4 x float> [[A:%.*]], i64 [[B:%.*]]
+// CHECK-NEXT: [[ADD:%.*]] = fadd float [[VECEXT]], 1.000000e+00
+// CHECK-NEXT: [[VECINS:%.*]] = insertelement <vscale x 4 x float> [[A]], float [[ADD]], i64 [[B]]
+// CHECK-NEXT: ret <vscale x 4 x float> [[VECINS]]
+//
+svfloat32_t subscript_read_write_float32(svfloat32_t a, size_t b) {
+ a[b] += 1.0f;
+ return a;
+}
diff --git a/clang/test/CodeGen/aarch64-varargs.c b/clang/test/CodeGen/aarch64-varargs.c
index ee4e88eda4ef..8952d6980a8d 100644
--- a/clang/test/CodeGen/aarch64-varargs.c
+++ b/clang/test/CodeGen/aarch64-varargs.c
@@ -63,10 +63,8 @@ __int128 aligned_int(void) {
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load ptr, ptr @the_list
-// CHECK: [[STACKINT:%[a-z_0-9]+]] = ptrtoint ptr [[STACK]] to i64
-// CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15
-// CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16
-// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9]+]] = inttoptr i64 [[ALIGNED_STACK_INT]] to ptr
+// CHECK: [[STACKINC:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[STACK]], i32 15
+// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9.]+]] = call ptr @llvm.ptrmask.p0.i64(ptr [[STACKINC]], i64 -16)
// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[ALIGNED_STACK_PTR]], i64 16
// CHECK: store ptr [[NEW_STACK]], ptr @the_list
// CHECK: br label %[[VAARG_END]]
@@ -377,10 +375,8 @@ underaligned_int128 underaligned_int128_test(void) {
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load ptr, ptr @the_list
-// CHECK: [[STACKINT:%[a-z_0-9]+]] = ptrtoint ptr [[STACK]] to i64
-// CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15
-// CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16
-// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9]+]] = inttoptr i64 [[ALIGNED_STACK_INT]] to ptr
+// CHECK: [[STACKINC:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[STACK]], i32 15
+// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9.]+]] = call ptr @llvm.ptrmask.p0.i64(ptr [[STACKINC]], i64 -16)
// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[ALIGNED_STACK_PTR]], i64 16
// CHECK: store ptr [[NEW_STACK]], ptr @the_list
// CHECK: br label %[[VAARG_END]]
@@ -414,10 +410,8 @@ overaligned_int128 overaligned_int128_test(void) {
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load ptr, ptr @the_list
-// CHECK: [[STACKINT:%[a-z_0-9]+]] = ptrtoint ptr [[STACK]] to i64
-// CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15
-// CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16
-// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9]+]] = inttoptr i64 [[ALIGNED_STACK_INT]] to ptr
+// CHECK: [[STACKINC:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[STACK]], i32 15
+// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9.]+]] = call ptr @llvm.ptrmask.p0.i64(ptr [[STACKINC]], i64 -16)
// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[ALIGNED_STACK_PTR]], i64 16
// CHECK: store ptr [[NEW_STACK]], ptr @the_list
// CHECK: br label %[[VAARG_END]]
@@ -688,10 +682,8 @@ overaligned_int_struct_member overaligned_int_struct_member_test(void) {
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load ptr, ptr @the_list
-// CHECK: [[STACKINT:%[a-z_0-9]+]] = ptrtoint ptr [[STACK]] to i64
-// CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15
-// CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16
-// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9]+]] = inttoptr i64 [[ALIGNED_STACK_INT]] to ptr
+// CHECK: [[STACKINC:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[STACK]], i32 15
+// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9.]+]] = call ptr @llvm.ptrmask.p0.i64(ptr [[STACKINC]], i64 -16)
// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[ALIGNED_STACK_PTR]], i64 16
// CHECK: store ptr [[NEW_STACK]], ptr @the_list
// CHECK: br label %[[VAARG_END]]
@@ -756,10 +748,8 @@ overaligned_long_long_struct_member overaligned_long_long_struct_member_test(voi
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load ptr, ptr @the_list
-// CHECK: [[STACKINT:%[a-z_0-9]+]] = ptrtoint ptr [[STACK]] to i64
-// CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15
-// CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16
-// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9]+]] = inttoptr i64 [[ALIGNED_STACK_INT]] to ptr
+// CHECK: [[STACKINC:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[STACK]], i32 15
+// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9.]+]] = call ptr @llvm.ptrmask.p0.i64(ptr [[STACKINC]], i64 -16)
// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[ALIGNED_STACK_PTR]], i64 16
// CHECK: store ptr [[NEW_STACK]], ptr @the_list
// CHECK: br label %[[VAARG_END]]
diff --git a/clang/test/CodeGen/assume_attr.c b/clang/test/CodeGen/assume_attr.c
deleted file mode 100644
index 338a625188af..000000000000
--- a/clang/test/CodeGen/assume_attr.c
+++ /dev/null
@@ -1,58 +0,0 @@
-// RUN: %clang_cc1 -emit-llvm -triple i386-linux-gnu %s -o - | FileCheck %s
-// RUN: %clang_cc1 -x c -emit-pch -o %t %s
-// RUN: %clang_cc1 -include-pch %t %s -emit-llvm -o - | FileCheck %s
-
-// TODO: for "foo" and "bar", "after" is not added as it appears "after" the first use or definition respectively. There might be a way to allow that.
-
-// CHECK: define{{.*}} void @bar() #0
-// CHECK: define{{.*}} void @baz() #1
-// CHECK: declare{{.*}} void @foo() #2
-// CHECK: attributes #0
-// CHECK-SAME: "llvm.assume"="bar:before1,bar:before2,bar:before3,bar:def1,bar:def2"
-// CHECK: attributes #1
-// CHECK-SAME: "llvm.assume"="baz:before1,baz:before2,baz:before3,baz:def1,baz:def2,baz:after"
-// CHECK: attributes #2
-// CHECK-SAME: "llvm.assume"="foo:before1,foo:before2,foo:before3"
-
-#ifndef HEADER
-#define HEADER
-
-/// foo: declarations only
-
-__attribute__((assume("foo:before1"))) void foo(void);
-
-__attribute__((assume("foo:before2")))
-__attribute__((assume("foo:before3"))) void
-foo(void);
-
-/// baz: static function declarations and a definition
-
-__attribute__((assume("baz:before1"))) static void baz(void);
-
-__attribute__((assume("baz:before2")))
-__attribute__((assume("baz:before3"))) static void
-baz(void);
-
-// Definition
-__attribute__((assume("baz:def1,baz:def2"))) static void baz(void) { foo(); }
-
-__attribute__((assume("baz:after"))) static void baz(void);
-
-/// bar: external function declarations and a definition
-
-__attribute__((assume("bar:before1"))) void bar(void);
-
-__attribute__((assume("bar:before2")))
-__attribute__((assume("bar:before3"))) void
-bar(void);
-
-// Definition
-__attribute__((assume("bar:def1,bar:def2"))) void bar(void) { baz(); }
-
-__attribute__((assume("bar:after"))) void bar(void);
-
-/// back to foo
-
-__attribute__((assume("foo:after"))) void foo(void);
-
-#endif
diff --git a/clang/test/CodeGen/attr-counted-by.c b/clang/test/CodeGen/attr-counted-by.c
index de30a00138ac..79922eb4159f 100644
--- a/clang/test/CodeGen/attr-counted-by.c
+++ b/clang/test/CodeGen/attr-counted-by.c
@@ -1098,7 +1098,7 @@ int test12_a, test12_b;
// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB20:[0-9]+]], i64 0) #[[ATTR10]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
// SANITIZE-WITH-ATTR: handler.type_mismatch6:
-// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_type_mismatch_v1_abort(ptr nonnull @[[GLOB21:[0-9]+]], i64 ptrtoint (ptr getelementptr inbounds ([[STRUCT_ANON_5:%.*]], ptr @test12_foo, i64 1, i32 0, i32 0, i32 0) to i64)) #[[ATTR10]], !nosanitize [[META2]]
+// SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_type_mismatch_v1_abort(ptr nonnull @[[GLOB21:[0-9]+]], i64 ptrtoint (ptr getelementptr inbounds (i8, ptr @test12_foo, i64 4) to i64)) #[[ATTR10]], !nosanitize [[META2]]
// SANITIZE-WITH-ATTR-NEXT: unreachable, !nosanitize [[META2]]
//
// NO-SANITIZE-WITH-ATTR-LABEL: define dso_local noundef i32 @test12(
@@ -1111,7 +1111,7 @@ int test12_a, test12_b;
// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [6 x i32], ptr [[BAZ]], i64 0, i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[TMP0]], ptr @test12_b, align 4, !tbaa [[TBAA2]]
-// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr inbounds ([[STRUCT_ANON_5:%.*]], ptr @test12_foo, i64 1, i32 0, i32 0, i32 0), align 4, !tbaa [[TBAA2]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr inbounds (i8, ptr @test12_foo, i64 4), align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[TMP1]], ptr @test12_a, align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: br label [[FOR_COND:%.*]]
// NO-SANITIZE-WITH-ATTR: for.cond:
@@ -1140,7 +1140,7 @@ int test12_a, test12_b;
// SANITIZE-WITHOUT-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB4:[0-9]+]], i64 0) #[[ATTR8]], !nosanitize [[META9]]
// SANITIZE-WITHOUT-ATTR-NEXT: unreachable, !nosanitize [[META9]]
// SANITIZE-WITHOUT-ATTR: handler.type_mismatch6:
-// SANITIZE-WITHOUT-ATTR-NEXT: tail call void @__ubsan_handle_type_mismatch_v1_abort(ptr nonnull @[[GLOB5:[0-9]+]], i64 ptrtoint (ptr getelementptr inbounds ([[STRUCT_ANON_5:%.*]], ptr @test12_foo, i64 1, i32 0, i32 0, i32 0) to i64)) #[[ATTR8]], !nosanitize [[META9]]
+// SANITIZE-WITHOUT-ATTR-NEXT: tail call void @__ubsan_handle_type_mismatch_v1_abort(ptr nonnull @[[GLOB5:[0-9]+]], i64 ptrtoint (ptr getelementptr inbounds (i8, ptr @test12_foo, i64 4) to i64)) #[[ATTR8]], !nosanitize [[META9]]
// SANITIZE-WITHOUT-ATTR-NEXT: unreachable, !nosanitize [[META9]]
//
// NO-SANITIZE-WITHOUT-ATTR-LABEL: define dso_local noundef i32 @test12(
@@ -1153,7 +1153,7 @@ int test12_a, test12_b;
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [6 x i32], ptr [[BAZ]], i64 0, i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 [[TMP0]], ptr @test12_b, align 4, !tbaa [[TBAA2]]
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr inbounds ([[STRUCT_ANON_5:%.*]], ptr @test12_foo, i64 1, i32 0, i32 0, i32 0), align 4, !tbaa [[TBAA2]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr inbounds (i8, ptr @test12_foo, i64 4), align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 [[TMP1]], ptr @test12_a, align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: br label [[FOR_COND:%.*]]
// NO-SANITIZE-WITHOUT-ATTR: for.cond:
@@ -1315,7 +1315,7 @@ int test14(int idx) {
// NO-SANITIZE-WITH-ATTR-SAME: i32 noundef [[IDX:%.*]]) local_unnamed_addr #[[ATTR4]] {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr getelementptr inbounds ([[STRUCT_ANON_8:%.*]], ptr @__const.test15.foo, i64 1, i32 0), i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr getelementptr inbounds (i8, ptr @__const.test15.foo, i64 8), i64 0, i64 [[IDXPROM]]
// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP0]]
//
@@ -1336,7 +1336,7 @@ int test14(int idx) {
// NO-SANITIZE-WITHOUT-ATTR-SAME: i32 noundef [[IDX:%.*]]) local_unnamed_addr #[[ATTR1]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr getelementptr inbounds ([[STRUCT_ANON_8:%.*]], ptr @__const.test15.foo, i64 1, i32 0), i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr getelementptr inbounds (i8, ptr @__const.test15.foo, i64 8), i64 0, i64 [[IDXPROM]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP0]]
//
diff --git a/clang/test/CodeGen/attr-cpuspecific.c b/clang/test/CodeGen/attr-cpuspecific.c
index 2c3e6931800c..628892d5809b 100644
--- a/clang/test/CodeGen/attr-cpuspecific.c
+++ b/clang/test/CodeGen/attr-cpuspecific.c
@@ -75,8 +75,8 @@ void TwoVersions(void);
// LINUX: define weak_odr ptr @TwoVersions.resolver()
// LINUX: call void @__cpu_indicator_init
// LINUX: %[[FEAT_INIT:.+]] = load i32, ptr getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, ptr @__cpu_model, i32 0, i32 3, i32 0), align 4
-// LINUX: %[[FEAT_JOIN:.+]] = and i32 %[[FEAT_INIT]], 59754495
-// LINUX: %[[FEAT_CHECK:.+]] = icmp eq i32 %[[FEAT_JOIN]], 59754495
+// LINUX: %[[FEAT_JOIN:.+]] = and i32 %[[FEAT_INIT]], 9422847
+// LINUX: %[[FEAT_CHECK:.+]] = icmp eq i32 %[[FEAT_JOIN]], 9422847
// LINUX: ret ptr @TwoVersions.Z
// LINUX: ret ptr @TwoVersions.S
// LINUX: call void @llvm.trap
@@ -85,8 +85,8 @@ void TwoVersions(void);
// WINDOWS: define weak_odr dso_local void @TwoVersions() comdat
// WINDOWS: call void @__cpu_indicator_init()
// WINDOWS: %[[FEAT_INIT:.+]] = load i32, ptr getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, ptr @__cpu_model, i32 0, i32 3, i32 0), align 4
-// WINDOWS: %[[FEAT_JOIN:.+]] = and i32 %[[FEAT_INIT]], 59754495
-// WINDOWS: %[[FEAT_CHECK:.+]] = icmp eq i32 %[[FEAT_JOIN]], 59754495
+// WINDOWS: %[[FEAT_JOIN:.+]] = and i32 %[[FEAT_INIT]], 9422847
+// WINDOWS: %[[FEAT_CHECK:.+]] = icmp eq i32 %[[FEAT_JOIN]], 9422847
// WINDOWS: call void @TwoVersions.Z()
// WINDOWS-NEXT: ret void
// WINDOWS: call void @TwoVersions.S()
@@ -354,7 +354,7 @@ void OrderDispatchUsageSpecific(void) {}
// CHECK: attributes #[[S]] = {{.*}}"target-features"="+avx,+cmov,+crc32,+cx16,+cx8,+f16c,+fsgsbase,+fxsr,+mmx,+pclmul,+popcnt,+rdrnd,+sahf,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt"
// CHECK-SAME: "tune-cpu"="ivybridge"
-// CHECK: attributes #[[K]] = {{.*}}"target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512er,+avx512f,+avx512pf,+bmi,+bmi2,+cmov,+crc32,+cx16,+cx8,+evex512,+f16c,+fma,+fsgsbase,+fxsr,+invpcid,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+prefetchwt1,+prfchw,+rdrnd,+rdseed,+sahf,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt"
+// CHECK: attributes #[[K]] = {{.*}}"target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512f,+bmi,+bmi2,+cmov,+crc32,+cx16,+cx8,+evex512,+f16c,+fma,+fsgsbase,+fxsr,+invpcid,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+prfchw,+rdrnd,+rdseed,+sahf,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt"
// CHECK-SAME: "tune-cpu"="knl"
// CHECK: attributes #[[O]] = {{.*}}"target-features"="+cmov,+cx16,+cx8,+fxsr,+mmx,+movbe,+sahf,+sse,+sse2,+sse3,+ssse3,+x87"
// CHECK-SAME: "tune-cpu"="atom"
diff --git a/clang/test/CodeGen/attr-target-x86.c b/clang/test/CodeGen/attr-target-x86.c
index 304398678216..3c2b511157f9 100644
--- a/clang/test/CodeGen/attr-target-x86.c
+++ b/clang/test/CodeGen/attr-target-x86.c
@@ -59,9 +59,9 @@ void __attribute__((target("avx10.1-512"))) avx10_1_512(void) {}
// CHECK: #0 = {{.*}}"target-cpu"="i686" "target-features"="+cmov,+cx8,+x87" "tune-cpu"="i686"
// CHECK: #1 = {{.*}}"target-cpu"="ivybridge" "target-features"="+avx,+cmov,+crc32,+cx16,+cx8,+f16c,+fsgsbase,+fxsr,+mmx,+pclmul,+popcnt,+rdrnd,+sahf,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt"
// CHECK-NOT: tune-cpu
-// CHECK: #2 = {{.*}}"target-cpu"="i686" "target-features"="+cmov,+cx8,+x87,-aes,-avx,-avx10.1-256,-avx10.1-512,-avx2,-avx512bf16,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512fp16,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vp2intersect,-avx512vpopcntdq,-avxifma,-avxneconvert,-avxvnni,-avxvnniint16,-avxvnniint8,-f16c,-fma,-fma4,-gfni,-kl,-pclmul,-sha,-sha512,-sm3,-sm4,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-vaes,-vpclmulqdq,-widekl,-xop" "tune-cpu"="i686"
+// CHECK: #2 = {{.*}}"target-cpu"="i686" "target-features"="+cmov,+cx8,+x87,-aes,-avx,-avx10.1-256,-avx10.1-512,-avx2,-avx512bf16,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512f,-avx512fp16,-avx512ifma,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vp2intersect,-avx512vpopcntdq,-avxifma,-avxneconvert,-avxvnni,-avxvnniint16,-avxvnniint8,-f16c,-fma,-fma4,-gfni,-kl,-pclmul,-sha,-sha512,-sm3,-sm4,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-vaes,-vpclmulqdq,-widekl,-xop" "tune-cpu"="i686"
// CHECK: #3 = {{.*}}"target-cpu"="i686" "target-features"="+cmov,+crc32,+cx8,+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87" "tune-cpu"="i686"
-// CHECK: #4 = {{.*}}"target-cpu"="i686" "target-features"="+cmov,+cx8,+x87,-avx,-avx10.1-256,-avx10.1-512,-avx2,-avx512bf16,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512fp16,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vp2intersect,-avx512vpopcntdq,-avxifma,-avxneconvert,-avxvnni,-avxvnniint16,-avxvnniint8,-f16c,-fma,-fma4,-sha512,-sm3,-sm4,-sse4.1,-sse4.2,-vaes,-vpclmulqdq,-xop" "tune-cpu"="i686"
+// CHECK: #4 = {{.*}}"target-cpu"="i686" "target-features"="+cmov,+cx8,+x87,-avx,-avx10.1-256,-avx10.1-512,-avx2,-avx512bf16,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512f,-avx512fp16,-avx512ifma,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vp2intersect,-avx512vpopcntdq,-avxifma,-avxneconvert,-avxvnni,-avxvnniint16,-avxvnniint8,-f16c,-fma,-fma4,-sha512,-sm3,-sm4,-sse4.1,-sse4.2,-vaes,-vpclmulqdq,-xop" "tune-cpu"="i686"
// CHECK: #5 = {{.*}}"target-cpu"="ivybridge" "target-features"="+avx,+cmov,+crc32,+cx16,+cx8,+f16c,+fsgsbase,+fxsr,+mmx,+pclmul,+popcnt,+rdrnd,+sahf,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt,-aes,-avx10.1-256,-avx10.1-512,-vaes"
// CHECK-NOT: tune-cpu
// CHECK: #6 = {{.*}}"target-cpu"="i686" "target-features"="+cmov,+cx8,+x87,-3dnow,-3dnowa,-mmx"
diff --git a/clang/test/CodeGen/builtins-wasm.c b/clang/test/CodeGen/builtins-wasm.c
index bcb15969de1c..93a6ab06081c 100644
--- a/clang/test/CodeGen/builtins-wasm.c
+++ b/clang/test/CodeGen/builtins-wasm.c
@@ -11,6 +11,7 @@ typedef unsigned char u8x16 __attribute((vector_size(16)));
typedef unsigned short u16x8 __attribute((vector_size(16)));
typedef unsigned int u32x4 __attribute((vector_size(16)));
typedef unsigned long long u64x2 __attribute((vector_size(16)));
+typedef __fp16 f16x8 __attribute((vector_size(16)));
typedef float f32x4 __attribute((vector_size(16)));
typedef double f64x2 __attribute((vector_size(16)));
@@ -813,6 +814,17 @@ void store_f16_f32(float val, __fp16 *addr) {
// WEBASSEMBLY-NEXT: ret
}
+f16x8 splat_f16x8(float a) {
+ // WEBASSEMBLY: %0 = tail call <8 x half> @llvm.wasm.splat.f16x8(float %a)
+ // WEBASSEMBLY-NEXT: ret <8 x half> %0
+ return __builtin_wasm_splat_f16x8(a);
+}
+
+float extract_lane_f16x8(f16x8 a, int i) {
+ // WEBASSEMBLY: %0 = tail call float @llvm.wasm.extract.lane.f16x8(<8 x half> %a, i32 %i)
+ // WEBASSEMBLY-NEXT: ret float %0
+ return __builtin_wasm_extract_lane_f16x8(a, i);
+}
__externref_t externref_null() {
return __builtin_wasm_ref_null_extern();
// WEBASSEMBLY: tail call ptr addrspace(10) @llvm.wasm.ref.null.extern()
diff --git a/clang/test/CodeGen/darwin-target-variant.c b/clang/test/CodeGen/darwin-target-variant.c
index 36caaaec1bdb..9f4b36a790db 100644
--- a/clang/test/CodeGen/darwin-target-variant.c
+++ b/clang/test/CodeGen/darwin-target-variant.c
@@ -2,5 +2,5 @@
// CHECK: !llvm.module.flags = !{!0, !1, !2
// CHECK: !0 = !{i32 2, !"SDK Version", [2 x i32] [i32 11, i32 1]}
-// CHECK: !1 = !{i32 4, !"darwin.target_variant.triple", !"x86_64-apple-ios14-macabi"}
+// CHECK: !1 = !{i32 2, !"darwin.target_variant.triple", !"x86_64-apple-ios14-macabi"}
// CHECK: !2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [i32 14, i32 1]}
diff --git a/clang/test/CodeGen/fat-lto-objects.c b/clang/test/CodeGen/fat-lto-objects.c
index b50567c024fc..36a73684e7bf 100644
--- a/clang/test/CodeGen/fat-lto-objects.c
+++ b/clang/test/CodeGen/fat-lto-objects.c
@@ -62,7 +62,7 @@
// ELF: .llvm.lto
-// ASM: .section .llvm.lto,"e",@progbits
+// ASM: .section .llvm.lto,"e",@llvm_lto
// ASM-NEXT: .Lllvm.embedded.object:
// ASM-NEXT: .asciz "BC
// ASM-NEXT: .size .Lllvm.embedded.object
diff --git a/clang/test/CodeGen/function-target-features.c b/clang/test/CodeGen/function-target-features.c
index 0d8bfc7e4e44..d6a73ff8224b 100644
--- a/clang/test/CodeGen/function-target-features.c
+++ b/clang/test/CodeGen/function-target-features.c
@@ -4,7 +4,7 @@
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s -target-feature +avx | FileCheck %s -check-prefix=AVX-FEATURE
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s -target-feature +avx | FileCheck %s -check-prefix=AVX-NO-CPU
-// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s -target-feature +avx512f -target-feature +avx512er | FileCheck %s -check-prefix=TWO-AVX
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s -target-feature +avx512f -target-feature +avx512bw | FileCheck %s -check-prefix=TWO-AVX
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s -target-cpu corei7 | FileCheck %s -check-prefix=CORE-CPU
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s -target-cpu corei7 -target-feature +avx | FileCheck %s -check-prefix=CORE-CPU-AND-FEATURES
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s -target-cpu x86-64 | FileCheck %s -check-prefix=X86-64-CPU
@@ -17,7 +17,7 @@ void foo(void) {}
// AVX-FEATURE: "target-features"{{.*}}+avx
// AVX-NO-CPU-NOT: target-cpu
-// TWO-AVX: "target-features"={{.*}}+avx512er{{.*}}+avx512f
+// TWO-AVX: "target-features"={{.*}}+avx512bw{{.*}}+avx512f
// CORE-CPU: "target-cpu"="corei7"
// CORE-CPU-AND-FEATURES: "target-cpu"="corei7" "target-features"={{.*}}+avx
// X86-64-CPU: "target-cpu"="x86-64"
diff --git a/clang/test/CodeGen/functions.c b/clang/test/CodeGen/functions.c
index 1bbaa80d653c..0cc999aa4916 100644
--- a/clang/test/CodeGen/functions.c
+++ b/clang/test/CodeGen/functions.c
@@ -61,3 +61,15 @@ static void test9_helper(void) {}
void test9(void) {
(void) test9_helper;
}
+
+// PR88917: don't crash
+int b();
+
+int main() {
+ return b(b);
+ // CHECK: call i32 @b(ptr noundef @b)
+}
+int b(int (*f)()){
+ return 0;
+}
+// CHECK-LABEL: define{{.*}} i32 @b(ptr noundef %f)
diff --git a/clang/test/CodeGen/nofpclass.c b/clang/test/CodeGen/nofpclass.c
index dd90d02f7759..fc4c64f9b921 100644
--- a/clang/test/CodeGen/nofpclass.c
+++ b/clang/test/CodeGen/nofpclass.c
@@ -172,7 +172,7 @@ double2 defined_func_v2f64(double2 a, double2 b, double2 c) {
// CLFINITEONLY-LABEL: define dso_local nofpclass(nan inf) float @call_extern_func
// CLFINITEONLY-SAME: (float noundef nofpclass(nan inf) [[A:%.*]], double noundef nofpclass(nan inf) [[B:%.*]], half noundef nofpclass(nan inf) [[C:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
// CLFINITEONLY-NEXT: entry:
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float @extern_func(float noundef nofpclass(nan inf) [[A]], double noundef nofpclass(nan inf) [[B]], half noundef nofpclass(nan inf) [[C]]) #[[ATTR10:[0-9]+]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float @extern_func(float noundef nofpclass(nan inf) [[A]], double noundef nofpclass(nan inf) [[B]], half noundef nofpclass(nan inf) [[C]]) #[[ATTR11:[0-9]+]]
// CLFINITEONLY-NEXT: ret float [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -249,7 +249,7 @@ float call_extern_func(float a, double b, _Float16 c) {
// CLFINITEONLY-LABEL: define dso_local nofpclass(nan inf) double @call_extern_func_vec
// CLFINITEONLY-SAME: (double noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x double> noundef nofpclass(nan inf) [[B:%.*]], i32 noundef [[C_COERCE:%.*]]) local_unnamed_addr #[[ATTR5:[0-9]+]] {
// CLFINITEONLY-NEXT: entry:
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) double @extern_func_vec(double noundef nofpclass(nan inf) [[A_COERCE]], <2 x double> noundef nofpclass(nan inf) [[B]], i32 noundef [[C_COERCE]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) double @extern_func_vec(double noundef nofpclass(nan inf) [[A_COERCE]], <2 x double> noundef nofpclass(nan inf) [[B]], i32 noundef [[C_COERCE]]) #[[ATTR11]]
// CLFINITEONLY-NEXT: ret double [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -389,7 +389,7 @@ float2 call_extern_func_vec(float2 a, double2 b, half2 c) {
// CLFINITEONLY-LABEL: define dso_local nofpclass(nan inf) <2 x float> @defined_complex_func
// CLFINITEONLY-SAME: (<2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]], <2 x half> noundef nofpclass(nan inf) [[C_COERCE:%.*]]) local_unnamed_addr #[[ATTR6:[0-9]+]] {
// CLFINITEONLY-NEXT: entry:
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <2 x float> @extern_complex(<2 x float> noundef nofpclass(nan inf) [[A_COERCE]], double noundef nofpclass(nan inf) [[B_COERCE0]], double noundef nofpclass(nan inf) [[B_COERCE1]], <2 x half> noundef nofpclass(nan inf) [[C_COERCE]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <2 x float> @extern_complex(<2 x float> noundef nofpclass(nan inf) [[A_COERCE]], double noundef nofpclass(nan inf) [[B_COERCE0]], double noundef nofpclass(nan inf) [[B_COERCE1]], <2 x half> noundef nofpclass(nan inf) [[C_COERCE]]) #[[ATTR11]]
// CLFINITEONLY-NEXT: ret <2 x float> [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -927,12 +927,14 @@ _Complex _Float16 defined_complex_func_f16_ret(_Complex _Float16 c) {
// CLFINITEONLY-NEXT: [[CF16_REAL:%.*]] = load half, ptr [[CF16]], align 8
// CLFINITEONLY-NEXT: [[CF16_IMAGP:%.*]] = getelementptr inbounds i8, ptr [[CF16]], i64 2
// CLFINITEONLY-NEXT: [[CF16_IMAG:%.*]] = load half, ptr [[CF16_IMAGP]], align 2
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR12:[0-9]+]]
// CLFINITEONLY-NEXT: [[INDIRECT_ARG_TEMP_IMAGP:%.*]] = getelementptr inbounds i8, ptr [[INDIRECT_ARG_TEMP]], i64 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE0]], ptr [[INDIRECT_ARG_TEMP]], align 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE1]], ptr [[INDIRECT_ARG_TEMP_IMAGP]], align 8
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x half> poison, half [[CF16_REAL]], i64 0
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_2_VEC_INSERT:%.*]] = insertelement <2 x half> [[COERCE5_SROA_0_0_VEC_INSERT]], half [[CF16_IMAG]], i64 1
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) @variadic(float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[INDIRECT_ARG_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) @variadic(float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[INDIRECT_ARG_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR11]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: ret float [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -1178,12 +1180,14 @@ float call_variadic(float f32, double f64, _Float16 f16,
// CLFINITEONLY-NEXT: [[CF16_REAL:%.*]] = load half, ptr [[CF16]], align 8
// CLFINITEONLY-NEXT: [[CF16_IMAGP:%.*]] = getelementptr inbounds i8, ptr [[CF16]], i64 2
// CLFINITEONLY-NEXT: [[CF16_IMAG:%.*]] = load half, ptr [[CF16_IMAGP]], align 2
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: [[INDIRECT_ARG_TEMP_IMAGP:%.*]] = getelementptr inbounds i8, ptr [[INDIRECT_ARG_TEMP]], i64 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE0]], ptr [[INDIRECT_ARG_TEMP]], align 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE1]], ptr [[INDIRECT_ARG_TEMP_IMAGP]], align 8
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x half> poison, half [[CF16_REAL]], i64 0
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_2_VEC_INSERT:%.*]] = insertelement <2 x half> [[COERCE5_SROA_0_0_VEC_INSERT]], half [[CF16_IMAG]], i64 1
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) [[FPTR]](float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[INDIRECT_ARG_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) [[FPTR]](float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[INDIRECT_ARG_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR11]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: ret float [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -1364,9 +1368,9 @@ extern __m256d extern_m256d(__m256d, ...);
//
// CLFINITEONLY: Function Attrs: convergent norecurse nounwind
// CLFINITEONLY-LABEL: define dso_local nofpclass(nan inf) <4 x double> @call_m256d
-// CLFINITEONLY-SAME: (<4 x double> noundef nofpclass(nan inf) [[X:%.*]]) local_unnamed_addr #[[ATTR8:[0-9]+]] {
+// CLFINITEONLY-SAME: (<4 x double> noundef nofpclass(nan inf) [[X:%.*]]) local_unnamed_addr #[[ATTR9:[0-9]+]] {
// CLFINITEONLY-NEXT: entry:
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <4 x double> (<4 x double>, ...) @extern_m256d(<4 x double> noundef nofpclass(nan inf) [[X]], <4 x double> noundef nofpclass(nan inf) [[X]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <4 x double> (<4 x double>, ...) @extern_m256d(<4 x double> noundef nofpclass(nan inf) [[X]], <4 x double> noundef nofpclass(nan inf) [[X]]) #[[ATTR11]]
// CLFINITEONLY-NEXT: ret <4 x double> [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -1407,9 +1411,9 @@ __m256d call_m256d(__m256d x) {
//
// CLFINITEONLY: Function Attrs: convergent norecurse nounwind
// CLFINITEONLY-LABEL: define dso_local nofpclass(nan inf) <25 x double> @call_matrix
-// CLFINITEONLY-SAME: (<25 x double> noundef nofpclass(nan inf) [[X:%.*]]) local_unnamed_addr #[[ATTR9:[0-9]+]] {
+// CLFINITEONLY-SAME: (<25 x double> noundef nofpclass(nan inf) [[X:%.*]]) local_unnamed_addr #[[ATTR10:[0-9]+]] {
// CLFINITEONLY-NEXT: entry:
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <25 x double> @extern_matrix(<25 x double> noundef nofpclass(nan inf) [[X]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <25 x double> @extern_matrix(<25 x double> noundef nofpclass(nan inf) [[X]]) #[[ATTR11]]
// CLFINITEONLY-NEXT: ret <25 x double> [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
diff --git a/clang/test/CodeGen/target-builtin-noerror.c b/clang/test/CodeGen/target-builtin-noerror.c
index b438e50848a4..2e16fd8b9fe4 100644
--- a/clang/test/CodeGen/target-builtin-noerror.c
+++ b/clang/test/CodeGen/target-builtin-noerror.c
@@ -68,8 +68,6 @@ void verifyfeaturestrings(void) {
(void)__builtin_cpu_supports("avx512bw");
(void)__builtin_cpu_supports("avx512dq");
(void)__builtin_cpu_supports("avx512cd");
- (void)__builtin_cpu_supports("avx512er");
- (void)__builtin_cpu_supports("avx512pf");
(void)__builtin_cpu_supports("avx512vbmi");
(void)__builtin_cpu_supports("avx512ifma");
(void)__builtin_cpu_supports("avx5124vnniw");
diff --git a/clang/test/CodeGenCXX/assume_attr.cpp b/clang/test/CodeGenCXX/assume_attr.cpp
index dbe76501377c..962dcc470f67 100644
--- a/clang/test/CodeGenCXX/assume_attr.cpp
+++ b/clang/test/CodeGenCXX/assume_attr.cpp
@@ -8,77 +8,77 @@
/// foo: declarations only
-__attribute__((assume("foo:before1"))) void foo();
+[[omp::assume("foo:before1")]] void foo();
-__attribute__((assume("foo:before2")))
-__attribute__((assume("foo:before3"))) void
+[[omp::assume("foo:before2")]]
+[[omp::assume("foo:before3")]] void
foo();
/// baz: static function declarations and a definition
-__attribute__((assume("baz:before1"))) static void baz();
+[[omp::assume("baz:before1")]] static void baz();
-__attribute__((assume("baz:before2")))
-__attribute__((assume("baz:before3"))) static void
+[[omp::assume("baz:before2")]]
+[[omp::assume("baz:before3")]] static void
baz();
// Definition
-__attribute__((assume("baz:def1,baz:def2"))) static void baz() { foo(); }
+[[omp::assume("baz:def1,baz:def2")]] static void baz() { foo(); }
-__attribute__((assume("baz:after"))) static void baz();
+[[omp::assume("baz:after")]] static void baz();
/// bar: external function declarations and a definition
-__attribute__((assume("bar:before1"))) void bar();
+[[omp::assume("bar:before1")]] void bar();
-__attribute__((assume("bar:before2")))
-__attribute__((assume("bar:before3"))) void
+[[omp::assume("bar:before2")]]
+[[omp::assume("bar:before3")]] void
bar();
// Definition
-__attribute__((assume("bar:def1,bar:def2"))) void bar() { baz(); }
+[[omp::assume("bar:def1,bar:def2")]] void bar() { baz(); }
-__attribute__((assume("bar:after"))) void bar();
+[[omp::assume("bar:after")]] void bar();
/// back to foo
-__attribute__((assume("foo:after"))) void foo();
+[[omp::assume("foo:after")]] void foo();
/// class tests
class C {
- __attribute__((assume("C:private_method"))) void private_method();
- __attribute__((assume("C:private_static"))) static void private_static();
+ [[omp::assume("C:private_method")]] void private_method();
+ [[omp::assume("C:private_static")]] static void private_static();
public:
- __attribute__((assume("C:public_method1"))) void public_method();
- __attribute__((assume("C:public_static1"))) static void public_static();
+ [[omp::assume("C:public_method1")]] void public_method();
+ [[omp::assume("C:public_static1")]] static void public_static();
};
-__attribute__((assume("C:public_method2"))) void C::public_method() {
+[[omp::assume("C:public_method2")]] void C::public_method() {
private_method();
}
-__attribute__((assume("C:public_static2"))) void C::public_static() {
+[[omp::assume("C:public_static2")]] void C::public_static() {
private_static();
}
/// template tests
template <typename T>
-__attribute__((assume("template_func<T>"))) void template_func() {}
+[[omp::assume("template_func<T>")]] void template_func() {}
template <>
-__attribute__((assume("template_func<float>"))) void template_func<float>() {}
+[[omp::assume("template_func<float>")]] void template_func<float>() {}
template <>
void template_func<int>() {}
template <typename T>
struct S {
- __attribute__((assume("S<T>::method"))) void method();
+ [[omp::assume("S<T>::method")]] void method();
};
template <>
-__attribute__((assume("S<float>::method"))) void S<float>::method() {}
+[[omp::assume("S<float>::method")]] void S<float>::method() {}
template <>
void S<int>::method() {}
diff --git a/clang/test/CodeGenCXX/atomicinit.cpp b/clang/test/CodeGenCXX/atomicinit.cpp
index f2398b020621..a568f17b90d0 100644
--- a/clang/test/CodeGenCXX/atomicinit.cpp
+++ b/clang/test/CodeGenCXX/atomicinit.cpp
@@ -86,7 +86,7 @@ namespace PR18097 {
};
// CHECK-LABEL: define {{.*}} @__cxx_global_var_init
// CHECK: call void @_ZN7PR180977dynamic1XC1Ei(ptr {{[^,]*}} @_ZN7PR180977dynamic1yE, i32 noundef 4)
- // CHECK: store i32 5, ptr getelementptr inbounds ({{.*}}, ptr @_ZN7PR180977dynamic1yE, i32 0, i32 1)
+ // CHECK: store i32 5, ptr getelementptr inbounds (i8, ptr @_ZN7PR180977dynamic1yE, i32 4)
Y y = { X(4), 5 };
}
@@ -110,7 +110,7 @@ namespace PR18097 {
// CHECK-LABEL: define {{.*}} @__cxx_global_var_init
// CHECK: tail call void @llvm.memcpy.p0.p0.i32(ptr{{.*}} @_ZN7PR180978constant2y2E, ptr{{.*}} @_ZN7PR180978constantL1xE, i32 3, i1 false)
// CHECK: %0 = load i32, ptr @_ZN7PR180978constant1zE
- // CHECK: store i32 %0, ptr getelementptr inbounds (%"struct.PR18097::constant::Y", ptr @_ZN7PR180978constant2y2E, i32 0, i32 1)
+ // CHECK: store i32 %0, ptr getelementptr inbounds (i8, ptr @_ZN7PR180978constant2y2E, i32 4)
int z;
constexpr X x{1};
Y y2 = { x, z };
diff --git a/clang/test/CodeGenCXX/auto-var-init.cpp b/clang/test/CodeGenCXX/auto-var-init.cpp
index 7803ed5b633f..e1568bee136e 100644
--- a/clang/test/CodeGenCXX/auto-var-init.cpp
+++ b/clang/test/CodeGenCXX/auto-var-init.cpp
@@ -1346,7 +1346,7 @@ TEST_UNINIT(base, base);
// PATTERN-O0: call void @llvm.memcpy{{.*}} @__const.test_base_uninit.uninit{{.+}}), !annotation [[AUTO_INIT]]
// ZERO-LABEL: @test_base_uninit()
// ZERO-O0: call void @llvm.memset{{.*}}, i8 0,{{.+}}), !annotation [[AUTO_INIT]]
-// ZERO-O1: store ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr] }, ptr @_ZTV4base, i64 0, i32 0, i64 2), {{.*}}, align 8
+// ZERO-O1: store ptr getelementptr inbounds inrange(-16, 16) (i8, ptr @_ZTV4base, i64 16), {{.*}}, align 8
// ZERO-O1-NOT: !annotation
TEST_BRACES(base, base);
@@ -1367,7 +1367,7 @@ TEST_UNINIT(derived, derived);
// ZERO-LABEL: @test_derived_uninit()
// ZERO-O0: call void @llvm.memset{{.*}}, i8 0, {{.+}}), !annotation [[AUTO_INIT]]
// ZERO-O1: store i64 0, {{.*}} align 8, !annotation [[AUTO_INIT]]
-// ZERO-O1: store ptr getelementptr inbounds inrange(-16, 16) ({ [4 x ptr] }, ptr @_ZTV7derived, i64 0, i32 0, i64 2), {{.*}} align 8
+// ZERO-O1: store ptr getelementptr inbounds inrange(-16, 16) (i8, ptr @_ZTV7derived, i64 16), {{.*}} align 8
TEST_BRACES(derived, derived);
// CHECK-LABEL: @test_derived_braces()
diff --git a/clang/test/CodeGenCXX/builtin-amdgcn-fence.cpp b/clang/test/CodeGenCXX/builtin-amdgcn-fence.cpp
index 630e416b893f..3af5a21ba0cd 100644
--- a/clang/test/CodeGenCXX/builtin-amdgcn-fence.cpp
+++ b/clang/test/CodeGenCXX/builtin-amdgcn-fence.cpp
@@ -1,22 +1,111 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 %s -emit-llvm -O0 -o - \
-// RUN: -triple=amdgcn-amd-amdhsa | opt -S | FileCheck %s
+// RUN: -triple=amdgcn-amd-amdhsa | FileCheck %s
+// CHECK-LABEL: define dso_local void @_Z25test_memory_fence_successv(
+// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: fence syncscope("workgroup") seq_cst
+// CHECK-NEXT: fence syncscope("agent") acquire
+// CHECK-NEXT: fence seq_cst
+// CHECK-NEXT: fence syncscope("agent") acq_rel
+// CHECK-NEXT: fence syncscope("workgroup") release
+// CHECK-NEXT: ret void
+//
void test_memory_fence_success() {
- // CHECK-LABEL: test_memory_fence_success
- // CHECK: fence syncscope("workgroup") seq_cst
__builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup");
- // CHECK: fence syncscope("agent") acquire
__builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "agent");
- // CHECK: fence seq_cst
__builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "");
- // CHECK: fence syncscope("agent") acq_rel
__builtin_amdgcn_fence(4, "agent");
- // CHECK: fence syncscope("workgroup") release
__builtin_amdgcn_fence(3, "workgroup");
}
+
+// CHECK-LABEL: define dso_local void @_Z10test_localv(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META3:![0-9]+]]
+// CHECK-NEXT: fence syncscope("agent") acquire, !mmra [[META3]]
+// CHECK-NEXT: fence seq_cst, !mmra [[META3]]
+// CHECK-NEXT: fence syncscope("agent") acq_rel, !mmra [[META3]]
+// CHECK-NEXT: fence syncscope("workgroup") release, !mmra [[META3]]
+// CHECK-NEXT: ret void
+//
+void test_local() {
+ __builtin_amdgcn_fence( __ATOMIC_SEQ_CST, "workgroup", "local");
+
+ __builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "agent", "local");
+
+ __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "", "local");
+
+ __builtin_amdgcn_fence(4, "agent", "local");
+
+ __builtin_amdgcn_fence(3, "workgroup", "local");
+}
+
+
+// CHECK-LABEL: define dso_local void @_Z11test_globalv(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META4:![0-9]+]]
+// CHECK-NEXT: fence syncscope("agent") acquire, !mmra [[META4]]
+// CHECK-NEXT: fence seq_cst, !mmra [[META4]]
+// CHECK-NEXT: fence syncscope("agent") acq_rel, !mmra [[META4]]
+// CHECK-NEXT: fence syncscope("workgroup") release, !mmra [[META4]]
+// CHECK-NEXT: ret void
+//
+void test_global() {
+ __builtin_amdgcn_fence( __ATOMIC_SEQ_CST, "workgroup", "global");
+
+ __builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "agent", "global");
+
+ __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "", "global");
+
+ __builtin_amdgcn_fence(4, "agent", "global");
+
+ __builtin_amdgcn_fence(3, "workgroup", "global");
+}
+
+// CHECK-LABEL: define dso_local void @_Z10test_imagev(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META3]]
+// CHECK-NEXT: fence syncscope("agent") acquire, !mmra [[META3]]
+// CHECK-NEXT: fence seq_cst, !mmra [[META3]]
+// CHECK-NEXT: fence syncscope("agent") acq_rel, !mmra [[META3]]
+// CHECK-NEXT: fence syncscope("workgroup") release, !mmra [[META3]]
+// CHECK-NEXT: ret void
+//
+void test_image() {
+ __builtin_amdgcn_fence( __ATOMIC_SEQ_CST, "workgroup", "local");
+
+ __builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "agent", "local");
+
+ __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "", "local");
+
+ __builtin_amdgcn_fence(4, "agent", "local");
+
+ __builtin_amdgcn_fence(3, "workgroup", "local");
+}
+
+// CHECK-LABEL: define dso_local void @_Z10test_mixedv(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META5:![0-9]+]]
+// CHECK-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META5]]
+// CHECK-NEXT: ret void
+//
+void test_mixed() {
+ __builtin_amdgcn_fence( __ATOMIC_SEQ_CST, "workgroup", "local", "global");
+ __builtin_amdgcn_fence( __ATOMIC_SEQ_CST, "workgroup", "local", "local", "global", "local", "local");
+}
+//.
+// CHECK: [[META3]] = !{!"amdgpu-as", !"local"}
+// CHECK: [[META4]] = !{!"amdgpu-as", !"global"}
+// CHECK: [[META5]] = !{[[META4]], [[META3]]}
+//.
diff --git a/clang/test/CodeGenCXX/cxx1y-sized-deallocation.cpp b/clang/test/CodeGenCXX/cxx1y-sized-deallocation.cpp
index 4e1565725152..55913aff9c19 100644
--- a/clang/test/CodeGenCXX/cxx1y-sized-deallocation.cpp
+++ b/clang/test/CodeGenCXX/cxx1y-sized-deallocation.cpp
@@ -1,12 +1,12 @@
// Check that delete exprs call the sized deallocation function if
-// -fsized-deallocation is passed in both C++11 and C++14.
+// -fsized-deallocation is passed in C++11 or std >= C++14.
// RUN: %clang_cc1 -std=c++11 -fsized-deallocation %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s
-// RUN: %clang_cc1 -std=c++14 -fsized-deallocation %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s
+// RUN: %clang_cc1 -std=c++14 %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s
-// Check that we don't used sized deallocation without -fsized-deallocation and
-// C++14.
+// Check that we don't used sized deallocation with -fno-sized-deallocation or without C++14.
// RUN: %clang_cc1 -std=c++11 %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s --check-prefix=CHECK-UNSIZED
-// RUN: %clang_cc1 -std=c++14 %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s --check-prefix=CHECK-UNSIZED
+// RUN: %clang_cc1 -std=c++14 %s -emit-llvm -triple x86_64-linux-gnu -fno-sized-deallocation -o - \
+// RUN: | FileCheck %s --check-prefix=CHECK-UNSIZED
// CHECK-UNSIZED-NOT: _ZdlPvm
// CHECK-UNSIZED-NOT: _ZdaPvm
diff --git a/clang/test/CodeGenCXX/cxx1z-aligned-allocation.cpp b/clang/test/CodeGenCXX/cxx1z-aligned-allocation.cpp
index ab2e4b3cdbbf..8823bc64a436 100644
--- a/clang/test/CodeGenCXX/cxx1z-aligned-allocation.cpp
+++ b/clang/test/CodeGenCXX/cxx1z-aligned-allocation.cpp
@@ -1,10 +1,10 @@
// Check that delete exprs call aligned (de)allocation functions if
// -faligned-allocation is passed in both C++11 and C++14.
// RUN: %clang_cc1 -std=c++11 -fexceptions -fsized-deallocation -faligned-allocation %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s
-// RUN: %clang_cc1 -std=c++14 -fexceptions -fsized-deallocation -faligned-allocation %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s
-// RUN: %clang_cc1 -std=c++1z -fexceptions -fsized-deallocation %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s
+// RUN: %clang_cc1 -std=c++14 -fexceptions -faligned-allocation %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s
+// RUN: %clang_cc1 -std=c++1z -fexceptions %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s
-// RUN: %clang_cc1 -std=c++1z -fexceptions -fsized-deallocation %s -emit-llvm -triple x86_64-windows-msvc -o - | FileCheck %s --check-prefix=CHECK-MS
+// RUN: %clang_cc1 -std=c++1z -fexceptions %s -emit-llvm -triple x86_64-windows-msvc -o - | FileCheck %s --check-prefix=CHECK-MS
// Check that we don't used aligned (de)allocation without -faligned-allocation or C++1z.
// RUN: %clang_cc1 -std=c++14 -DUNALIGNED -fexceptions %s -emit-llvm -triple x86_64-linux-gnu -o - | FileCheck %s --check-prefix=CHECK-UNALIGNED
diff --git a/clang/test/CodeGenCXX/cxx2a-destroying-delete.cpp b/clang/test/CodeGenCXX/cxx2a-destroying-delete.cpp
index 20264b67353a..f6f4a2ff735c 100644
--- a/clang/test/CodeGenCXX/cxx2a-destroying-delete.cpp
+++ b/clang/test/CodeGenCXX/cxx2a-destroying-delete.cpp
@@ -108,10 +108,10 @@ struct J {
// CHECK-MSABI-LABEL: define {{.*}}@"?j@@
J *j() {
// CHECK-ITANIUM: invoke {{.*}}@_ZN1JC1Ev(
- // CHECK-ITANIUM: call {{.*}}@_ZdlPv(
+ // CHECK-ITANIUM: call {{.*}}@_ZdlPvm(
// CHECK-NOT: }
// CHECK-MSABI: invoke {{.*}}@"??0J@@Q{{AE|EAA}}@XZ"(
- // CHECK-MSABI: call {{.*}}@"??3@YAXP{{E?}}AX@Z"(
+ // CHECK-MSABI: call {{.*}}@"??3@YAXP{{E?}}AX{{I|_K}}@Z"(
return new J;
// CHECK: }
}
diff --git a/clang/test/CodeGenCXX/cxx2b-deducing-this.cpp b/clang/test/CodeGenCXX/cxx2b-deducing-this.cpp
index b755e80db35a..649fe2afbf4e 100644
--- a/clang/test/CodeGenCXX/cxx2b-deducing-this.cpp
+++ b/clang/test/CodeGenCXX/cxx2b-deducing-this.cpp
@@ -182,3 +182,66 @@ auto dothing(int num)
fun();
}
}
+
+namespace GH87210 {
+template <typename... Ts>
+struct Overloaded : Ts... {
+ using Ts::operator()...;
+};
+
+template <typename... Ts>
+Overloaded(Ts...) -> Overloaded<Ts...>;
+
+// CHECK-LABEL: define dso_local void @_ZN7GH872101fEv()
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[X:%.*]] = alloca i32
+// CHECK-NEXT: [[Over:%.*]] = alloca %"{{.*}}Overloaded"
+// CHECK: call noundef ptr @"_ZZN7GH872101fEvENH3$_0clINS_10OverloadedIJS0_EEEEEDaRT_"(ptr {{.*}} [[Over]])
+void f() {
+ int x;
+ Overloaded o {
+ // CHECK: define internal noundef ptr @"_ZZN7GH872101fEvENH3$_0clINS_10OverloadedIJS0_EEEEEDaRT_"(ptr {{.*}} [[Self:%.*]])
+ // CHECK-NEXT: entry:
+ // CHECK-NEXT: [[SelfAddr:%.*]] = alloca ptr
+ // CHECK-NEXT: store ptr [[Self]], ptr [[SelfAddr]]
+ // CHECK-NEXT: [[SelfPtr:%.*]] = load ptr, ptr [[SelfAddr]]
+ // CHECK-NEXT: [[XRef:%.*]] = getelementptr inbounds %{{.*}}, ptr [[SelfPtr]], i32 0, i32 0
+ // CHECK-NEXT: [[X:%.*]] = load ptr, ptr [[XRef]]
+ // CHECK-NEXT: ret ptr [[X]]
+ [&](this auto& self) {
+ return &x;
+ }
+ };
+ o();
+}
+
+void g() {
+ int x;
+ Overloaded o {
+ [=](this auto& self) {
+ return x;
+ }
+ };
+ o();
+}
+}
+
+namespace GH89541 {
+// Same as above; just check that this doesn't crash.
+int one = 1;
+auto factory(int& x = one) {
+ return [&](this auto self) {
+ x;
+ };
+};
+
+using Base = decltype(factory());
+struct Derived : Base {
+ Derived() : Base(factory()) {}
+};
+
+void f() {
+ Derived d;
+ d();
+}
+}
diff --git a/clang/test/CodeGenCXX/delete-two-arg.cpp b/clang/test/CodeGenCXX/delete-two-arg.cpp
index 552634f430a8..a0dcd03bc5a9 100644
--- a/clang/test/CodeGenCXX/delete-two-arg.cpp
+++ b/clang/test/CodeGenCXX/delete-two-arg.cpp
@@ -43,7 +43,9 @@ namespace test2 {
// CHECK-NEXT: br i1 [[T1]],
// CHECK: [[T3:%.*]] = getelementptr inbounds i8, ptr [[T0]], i32 -4
// CHECK-NEXT: [[T5:%.*]] = load i32, ptr [[T3]]
- // CHECK-NEXT: call void @_ZdaPv(ptr noundef [[T3]])
+ // CHECK-NEXT: [[T6:%.*]] = mul i32 4, [[T5]]
+ // CHECK-NEXT: [[T7:%.*]] = add i32 [[T6]], 4
+ // CHECK-NEXT: call void @_ZdaPvj(ptr noundef [[T3]], i32 noundef [[T7]])
// CHECK-NEXT: br label
::delete[] p;
}
diff --git a/clang/test/CodeGenCXX/delete.cpp b/clang/test/CodeGenCXX/delete.cpp
index 1a418f48b659..d5b0dc671291 100644
--- a/clang/test/CodeGenCXX/delete.cpp
+++ b/clang/test/CodeGenCXX/delete.cpp
@@ -16,7 +16,7 @@ void t3(S *s) {
// CHECK: icmp {{.*}} null
// CHECK: br i1
- // CHECK: call void @_ZdlPv
+ // CHECK: call void @_ZdlPvm
// Check the delete is inside the 'if !null' check unless we're optimizing
// for size. FIXME: We could omit the branch entirely in this case.
@@ -35,7 +35,7 @@ struct T {
void t4(T *t) {
// CHECK: call void @_ZN1TD1Ev
// CHECK-SIZE-NEXT: br
- // CHECK: call void @_ZdlPv
+ // CHECK: call void @_ZdlPvm
delete t;
}
@@ -93,14 +93,16 @@ namespace test1 {
// CHECK-NEXT: call void @_ZN5test11AD1Ev(ptr {{[^,]*}} [[CUR]])
// CHECK-NEXT: [[ISDONE:%.*]] = icmp eq ptr [[CUR]], [[BEGIN]]
// CHECK-NEXT: br i1 [[ISDONE]]
- // CHECK: call void @_ZdaPv(ptr noundef [[ALLOC]])
+ // CHECK: [[MUL:%.*]] = mul i64 4, [[COUNT]]
+ // CHECK-NEXT: [[SIZE:%.*]] = add i64 [[MUL]], 8
+ // CHECK-NEXT: call void @_ZdaPvm(ptr noundef [[ALLOC]], i64 noundef [[SIZE]])
}
}
namespace test2 {
// CHECK-LABEL: define{{.*}} void @_ZN5test21fEPb
void f(bool *b) {
- // CHECK: call void @_ZdlPv(ptr
+ // CHECK: call void @_ZdlPvm(ptr{{.*}}i64
delete b;
// CHECK: call void @_ZdaPv(ptr
delete [] b;
@@ -137,7 +139,7 @@ namespace test4 {
// CHECK-NEXT: [[DTOR:%.*]] = load ptr, ptr [[T0]]
// CHECK-NEXT: call void [[DTOR]](ptr {{[^,]*}} [[OBJ:%.*]])
// Call the global operator delete.
- // CHECK-NEXT: call void @_ZdlPv(ptr noundef [[ALLOCATED]]) [[NUW:#[0-9]+]]
+ // CHECK-NEXT: call void @_ZdlPvm(ptr noundef [[ALLOCATED]], i64 noundef 8) [[NUW:#[0-9]+]]
::delete xp;
}
}
diff --git a/clang/test/CodeGenCXX/dllimport.cpp b/clang/test/CodeGenCXX/dllimport.cpp
index 6fec2f2982d4..484866b45389 100644
--- a/clang/test/CodeGenCXX/dllimport.cpp
+++ b/clang/test/CodeGenCXX/dllimport.cpp
@@ -205,7 +205,7 @@ USEVAR(VarTmpl<ExplicitSpec_Imported>)
// Functions
//===----------------------------------------------------------------------===//
-// GNU-DAG: declare dso_local void @_ZdlPv(ptr)
+// GNU-DAG: declare dso_local void @_ZdlPv{{j|y}}(ptr, i{{32|64}})
// Import function declaration.
// MSC-DAG: declare dllimport void @"?decl@@YAXXZ"()
@@ -358,7 +358,7 @@ __declspec(dllimport) void operator delete(void*);
__declspec(dllimport) inline int *ReferencingImportedNew() { return new int[2]; }
// MO1-DAG: define available_externally dllimport ptr @"?ReferencingImportedNew@@YAPAHXZ"
__declspec(dllimport) inline int *ReferencingImportedDelete() { delete (int*)nullptr; }
-// MO1-DAG: define available_externally dllimport ptr @"?ReferencingImportedDelete@@YAPAHXZ"
+// MO1-DAG: declare dllimport ptr @"?ReferencingImportedDelete@@YAPAHXZ"
USE(ReferencingImportedNew)
USE(ReferencingImportedDelete)
struct ClassWithDtor { ~ClassWithDtor() {} };
diff --git a/clang/test/CodeGenCXX/dynamic-cast-address-space.cpp b/clang/test/CodeGenCXX/dynamic-cast-address-space.cpp
index 83a408984b76..3d5e32516c7a 100644
--- a/clang/test/CodeGenCXX/dynamic-cast-address-space.cpp
+++ b/clang/test/CodeGenCXX/dynamic-cast-address-space.cpp
@@ -1,24 +1,127 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals all --no-generate-body-for-unused-prefixes --version 4
// RUN: %clang_cc1 -I%S %s -triple amdgcn-amd-amdhsa -emit-llvm -fcxx-exceptions -fexceptions -o - | FileCheck %s
+// RUN: %clang_cc1 -I%S %s -triple spirv64-unknown-unknown -fsycl-is-device -emit-llvm -fcxx-exceptions -fexceptions -o - | FileCheck %s --check-prefix=WITH-NONZERO-DEFAULT-AS
+
struct A { virtual void f(); };
struct B : A { };
-// CHECK: {{define.*@_Z1fP1A}}
-// CHECK-SAME: personality ptr @__gxx_personality_v0
B fail;
+//.
+// CHECK: @_ZTV1B = linkonce_odr unnamed_addr addrspace(1) constant { [3 x ptr addrspace(1)] } { [3 x ptr addrspace(1)] [ptr addrspace(1) null, ptr addrspace(1) @_ZTI1B, ptr addrspace(1) addrspacecast (ptr @_ZN1A1fEv to ptr addrspace(1))] }, comdat, align 8
+// CHECK: @fail = addrspace(1) global { ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds inrange(-16, 8) ({ [3 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTV1B, i32 0, i32 0, i32 2) }, align 8
+// CHECK: @_ZTI1A = external addrspace(1) constant ptr addrspace(1)
+// CHECK: @_ZTVN10__cxxabiv120__si_class_type_infoE = external addrspace(1) global [0 x ptr addrspace(1)]
+// CHECK: @_ZTS1B = linkonce_odr addrspace(1) constant [3 x i8] c"1B\00", comdat, align 1
+// CHECK: @_ZTI1B = linkonce_odr addrspace(1) constant { ptr addrspace(1), ptr addrspace(1), ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds (ptr addrspace(1), ptr addrspace(1) @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), ptr addrspace(1) @_ZTS1B, ptr addrspace(1) @_ZTI1A }, comdat, align 8
+// CHECK: @__oclc_ABI_version = weak_odr hidden local_unnamed_addr addrspace(4) constant i32 500
+//.
+// WITH-NONZERO-DEFAULT-AS: @_ZTV1B = linkonce_odr unnamed_addr addrspace(1) constant { [3 x ptr addrspace(1)] } { [3 x ptr addrspace(1)] [ptr addrspace(1) null, ptr addrspace(1) @_ZTI1B, ptr addrspace(1) addrspacecast (ptr @_ZN1A1fEv to ptr addrspace(1))] }, comdat, align 8
+// WITH-NONZERO-DEFAULT-AS: @fail = addrspace(1) global { ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds inrange(-16, 8) ({ [3 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTV1B, i32 0, i32 0, i32 2) }, align 8
+// WITH-NONZERO-DEFAULT-AS: @_ZTI1A = external addrspace(1) constant ptr addrspace(1)
+// WITH-NONZERO-DEFAULT-AS: @_ZTVN10__cxxabiv120__si_class_type_infoE = external addrspace(1) global [0 x ptr addrspace(1)]
+// WITH-NONZERO-DEFAULT-AS: @_ZTS1B = linkonce_odr addrspace(1) constant [3 x i8] c"1B\00", comdat, align 1
+// WITH-NONZERO-DEFAULT-AS: @_ZTI1B = linkonce_odr addrspace(1) constant { ptr addrspace(1), ptr addrspace(1), ptr addrspace(1) } { ptr addrspace(1) getelementptr inbounds (ptr addrspace(1), ptr addrspace(1) @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2), ptr addrspace(1) @_ZTS1B, ptr addrspace(1) @_ZTI1A }, comdat, align 8
+//.
+// CHECK-LABEL: define dso_local noundef nonnull align 8 dereferenceable(8) ptr @_Z1fP1A(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = call ptr @__dynamic_cast(ptr [[TMP0]], ptr addrspace(1) @_ZTI1A, ptr addrspace(1) @_ZTI1B, i64 0) #[[ATTR3:[0-9]+]]
+// CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP1]], null
+// CHECK-NEXT: br i1 [[TMP2]], label [[DYNAMIC_CAST_BAD_CAST:%.*]], label [[DYNAMIC_CAST_END:%.*]]
+// CHECK: dynamic_cast.bad_cast:
+// CHECK-NEXT: invoke void @__cxa_bad_cast() #[[ATTR4:[0-9]+]]
+// CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK: invoke.cont:
+// CHECK-NEXT: unreachable
+// CHECK: dynamic_cast.end:
+// CHECK-NEXT: br label [[TRY_CONT:%.*]]
+// CHECK: lpad:
+// CHECK-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 }
+// CHECK-NEXT: catch ptr null
+// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0
+// CHECK-NEXT: store ptr [[TMP4]], ptr addrspace(5) [[EXN_SLOT]], align 8
+// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 1
+// CHECK-NEXT: store i32 [[TMP5]], ptr addrspace(5) [[EHSELECTOR_SLOT]], align 4
+// CHECK-NEXT: br label [[CATCH:%.*]]
+// CHECK: catch:
+// CHECK-NEXT: [[EXN:%.*]] = load ptr, ptr addrspace(5) [[EXN_SLOT]], align 8
+// CHECK-NEXT: [[TMP6:%.*]] = call ptr @__cxa_begin_catch(ptr [[EXN]]) #[[ATTR3]]
+// CHECK-NEXT: call void @__cxa_end_catch()
+// CHECK-NEXT: br label [[TRY_CONT]]
+// CHECK: try.cont:
+// CHECK-NEXT: ret ptr addrspacecast (ptr addrspace(1) @fail to ptr)
+//
+// WITH-NONZERO-DEFAULT-AS-LABEL: define spir_func noundef align 8 dereferenceable(8) ptr addrspace(4) @_Z1fP1A(
+// WITH-NONZERO-DEFAULT-AS-SAME: ptr addrspace(4) noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+// WITH-NONZERO-DEFAULT-AS-NEXT: entry:
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[RETVAL:%.*]] = alloca ptr addrspace(4), align 8
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[A_ADDR:%.*]] = alloca ptr addrspace(4), align 8
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[EXN_SLOT:%.*]] = alloca ptr addrspace(4), align 8
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr [[RETVAL]] to ptr addrspace(4)
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr [[A_ADDR]] to ptr addrspace(4)
+// WITH-NONZERO-DEFAULT-AS-NEXT: store ptr addrspace(4) [[A]], ptr addrspace(4) [[A_ADDR_ASCAST]], align 8
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[TMP0:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[A_ADDR_ASCAST]], align 8
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[TMP1:%.*]] = call spir_func ptr addrspace(4) @__dynamic_cast(ptr addrspace(4) [[TMP0]], ptr addrspace(1) @_ZTI1A, ptr addrspace(1) @_ZTI1B, i64 0) #[[ATTR3:[0-9]+]]
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[TMP2:%.*]] = icmp eq ptr addrspace(4) [[TMP1]], null
+// WITH-NONZERO-DEFAULT-AS-NEXT: br i1 [[TMP2]], label [[DYNAMIC_CAST_BAD_CAST:%.*]], label [[DYNAMIC_CAST_END:%.*]]
+// WITH-NONZERO-DEFAULT-AS: dynamic_cast.bad_cast:
+// WITH-NONZERO-DEFAULT-AS-NEXT: invoke spir_func void @__cxa_bad_cast() #[[ATTR4:[0-9]+]]
+// WITH-NONZERO-DEFAULT-AS-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// WITH-NONZERO-DEFAULT-AS: invoke.cont:
+// WITH-NONZERO-DEFAULT-AS-NEXT: unreachable
+// WITH-NONZERO-DEFAULT-AS: dynamic_cast.end:
+// WITH-NONZERO-DEFAULT-AS-NEXT: br label [[TRY_CONT:%.*]]
+// WITH-NONZERO-DEFAULT-AS: lpad:
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[TMP3:%.*]] = landingpad { ptr addrspace(4), i32 }
+// WITH-NONZERO-DEFAULT-AS-NEXT: catch ptr addrspace(4) null
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[TMP4:%.*]] = extractvalue { ptr addrspace(4), i32 } [[TMP3]], 0
+// WITH-NONZERO-DEFAULT-AS-NEXT: store ptr addrspace(4) [[TMP4]], ptr [[EXN_SLOT]], align 8
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[TMP5:%.*]] = extractvalue { ptr addrspace(4), i32 } [[TMP3]], 1
+// WITH-NONZERO-DEFAULT-AS-NEXT: store i32 [[TMP5]], ptr [[EHSELECTOR_SLOT]], align 4
+// WITH-NONZERO-DEFAULT-AS-NEXT: br label [[CATCH:%.*]]
+// WITH-NONZERO-DEFAULT-AS: catch:
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[EXN:%.*]] = load ptr addrspace(4), ptr [[EXN_SLOT]], align 8
+// WITH-NONZERO-DEFAULT-AS-NEXT: [[TMP6:%.*]] = call spir_func ptr addrspace(4) @__cxa_begin_catch(ptr addrspace(4) [[EXN]]) #[[ATTR3]]
+// WITH-NONZERO-DEFAULT-AS-NEXT: call spir_func void @__cxa_end_catch()
+// WITH-NONZERO-DEFAULT-AS-NEXT: br label [[TRY_CONT]]
+// WITH-NONZERO-DEFAULT-AS: try.cont:
+// WITH-NONZERO-DEFAULT-AS-NEXT: ret ptr addrspace(4) addrspacecast (ptr addrspace(1) @fail to ptr addrspace(4))
+//
const B& f(A *a) {
try {
- // CHECK: call ptr @__dynamic_cast
- // CHECK: br i1
- // CHECK: invoke void @__cxa_bad_cast() [[NR:#[0-9]+]]
dynamic_cast<const B&>(*a);
} catch (...) {
- // CHECK: landingpad { ptr, i32 }
- // CHECK-NEXT: catch ptr null
}
return fail;
}
-// CHECK: declare ptr @__dynamic_cast(ptr, ptr addrspace(1), ptr addrspace(1), i64) [[NUW_RO:#[0-9]+]]
-// CHECK: attributes [[NUW_RO]] = { nounwind willreturn memory(read) }
-// CHECK: attributes [[NR]] = { noreturn }
+//.
+// CHECK: attributes #[[ATTR0]] = { mustprogress noinline optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+// CHECK: attributes #[[ATTR1:[0-9]+]] = { nounwind willreturn memory(read) }
+// CHECK: attributes #[[ATTR2:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+// CHECK: attributes #[[ATTR3]] = { nounwind }
+// CHECK: attributes #[[ATTR4]] = { noreturn }
+//.
+// WITH-NONZERO-DEFAULT-AS: attributes #[[ATTR0]] = { convergent mustprogress noinline norecurse nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+// WITH-NONZERO-DEFAULT-AS: attributes #[[ATTR1:[0-9]+]] = { nounwind willreturn memory(read) }
+// WITH-NONZERO-DEFAULT-AS: attributes #[[ATTR2:[0-9]+]] = { convergent nounwind "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+// WITH-NONZERO-DEFAULT-AS: attributes #[[ATTR3]] = { nounwind }
+// WITH-NONZERO-DEFAULT-AS: attributes #[[ATTR4]] = { noreturn }
+//.
+// CHECK: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 500}
+// CHECK: [[META1:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
+// CHECK: [[META2:![0-9]+]] = !{!"{{.*}}clang version {{.*}}"}
+//.
+// WITH-NONZERO-DEFAULT-AS: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
+// WITH-NONZERO-DEFAULT-AS: [[META1:![0-9]+]] = !{!"{{.*}}clang version {{.*}}"}
+//.
diff --git a/clang/test/CodeGenCXX/eh.cpp b/clang/test/CodeGenCXX/eh.cpp
index 5c592a96e27b..f174b5d84fdf 100644
--- a/clang/test/CodeGenCXX/eh.cpp
+++ b/clang/test/CodeGenCXX/eh.cpp
@@ -81,7 +81,7 @@ namespace test5 {
// CHECK: invoke void @__cxa_throw(ptr [[EXNOBJ]], ptr @_ZTIN5test51AE, ptr @_ZN5test51AD1Ev) [[NR]]
// CHECK-NEXT: to label {{%.*}} unwind label %[[HANDLER:[^ ]*]]
// : [[HANDLER]]: (can't check this in Release-Asserts builds)
-// CHECK: {{%.*}} = call i32 @llvm.eh.typeid.for(ptr @_ZTIN5test51AE)
+// CHECK: {{%.*}} = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIN5test51AE)
}
namespace test6 {
@@ -96,7 +96,7 @@ namespace test6 {
// PR7127
namespace test7 {
-// CHECK-LABEL: define{{.*}} i32 @_ZN5test73fooEv()
+// CHECK-LABEL: define{{.*}} i32 @_ZN5test73fooEv()
// CHECK-SAME: personality ptr @__gxx_personality_v0
int foo() {
// CHECK: [[CAUGHTEXNVAR:%.*]] = alloca ptr
@@ -119,7 +119,7 @@ namespace test7 {
// CHECK-NEXT: store i32 [[SELECTOR]], ptr [[SELECTORVAR]]
// CHECK-NEXT: br label
// CHECK: [[SELECTOR:%.*]] = load i32, ptr [[SELECTORVAR]]
-// CHECK-NEXT: [[T0:%.*]] = call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
+// CHECK-NEXT: [[T0:%.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
// CHECK-NEXT: icmp eq i32 [[SELECTOR]], [[T0]]
// CHECK-NEXT: br i1
// CHECK: [[T0:%.*]] = load ptr, ptr [[CAUGHTEXNVAR]]
diff --git a/clang/test/CodeGenCXX/fmv-namespace.cpp b/clang/test/CodeGenCXX/fmv-namespace.cpp
new file mode 100644
index 000000000000..5bcd0da06eeb
--- /dev/null
+++ b/clang/test/CodeGenCXX/fmv-namespace.cpp
@@ -0,0 +1,93 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals all --include-generated-funcs --version 5
+// RUN: %clang_cc1 -triple aarch64-linux-gnu -emit-llvm %s -o - | FileCheck %s
+
+namespace Name {
+int __attribute((target_version("default"))) foo() { return 0; }
+}
+
+namespace Name {
+int __attribute((target_version("sve"))) foo() { return 1; }
+}
+
+int bar() { return Name::foo(); }
+
+namespace OtherName {
+int __attribute((target_version("sve"))) foo() { return 2; }
+}
+
+int baz() { return OtherName::foo(); }
+
+//.
+// CHECK: @__aarch64_cpu_features = external dso_local global { i64 }
+// CHECK: @_ZN4Name3fooEv.ifunc = weak_odr alias i32 (), ptr @_ZN4Name3fooEv
+// CHECK: @_ZN9OtherName3fooEv.ifunc = weak_odr alias i32 (), ptr @_ZN9OtherName3fooEv
+// CHECK: @_ZN4Name3fooEv = weak_odr ifunc i32 (), ptr @_ZN4Name3fooEv.resolver
+// CHECK: @_ZN9OtherName3fooEv = weak_odr ifunc i32 (), ptr @_ZN9OtherName3fooEv.resolver
+//.
+// CHECK-LABEL: define dso_local noundef i32 @_ZN4Name3fooEv.default(
+// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret i32 0
+//
+//
+// CHECK-LABEL: define dso_local noundef i32 @_ZN4Name3fooEv._Msve(
+// CHECK-SAME: ) #[[ATTR1:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret i32 1
+//
+//
+// CHECK-LABEL: define dso_local noundef i32 @_Z3barv(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN4Name3fooEv()
+// CHECK-NEXT: ret i32 [[CALL]]
+//
+//
+// CHECK-LABEL: define weak_odr ptr @_ZN4Name3fooEv.resolver() comdat {
+// CHECK-NEXT: [[RESOLVER_ENTRY:.*:]]
+// CHECK-NEXT: call void @__init_cpu_features_resolver()
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1073741824
+// CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1073741824
+// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
+// CHECK-NEXT: br i1 [[TMP3]], label %[[RESOLVER_RETURN:.*]], label %[[RESOLVER_ELSE:.*]]
+// CHECK: [[RESOLVER_RETURN]]:
+// CHECK-NEXT: ret ptr @_ZN4Name3fooEv._Msve
+// CHECK: [[RESOLVER_ELSE]]:
+// CHECK-NEXT: ret ptr @_ZN4Name3fooEv.default
+//
+//
+// CHECK-LABEL: define dso_local noundef i32 @_ZN9OtherName3fooEv._Msve(
+// CHECK-SAME: ) #[[ATTR1]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: ret i32 2
+//
+//
+// CHECK-LABEL: define dso_local noundef i32 @_Z3bazv(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN9OtherName3fooEv()
+// CHECK-NEXT: ret i32 [[CALL]]
+//
+//
+// CHECK-LABEL: define weak_odr ptr @_ZN9OtherName3fooEv.resolver() comdat {
+// CHECK-NEXT: [[RESOLVER_ENTRY:.*:]]
+// CHECK-NEXT: call void @__init_cpu_features_resolver()
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1073741824
+// CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1073741824
+// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
+// CHECK-NEXT: br i1 [[TMP3]], label %[[RESOLVER_RETURN:.*]], label %[[RESOLVER_ELSE:.*]]
+// CHECK: [[RESOLVER_RETURN]]:
+// CHECK-NEXT: ret ptr @_ZN9OtherName3fooEv._Msve
+// CHECK: [[RESOLVER_ELSE]]:
+// CHECK-NEXT: ret ptr @_ZN9OtherName3fooEv.default
+//
+//.
+// CHECK: attributes #[[ATTR0]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+// CHECK: attributes #[[ATTR1]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon,+sve" }
+// CHECK: attributes #[[ATTR2:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+//.
+// CHECK: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
+// CHECK: [[META1:![0-9]+]] = !{!"{{.*}}clang version {{.*}}"}
+//.
diff --git a/clang/test/CodeGenCXX/new.cpp b/clang/test/CodeGenCXX/new.cpp
index e278d9acfe9e..af225529c494 100644
--- a/clang/test/CodeGenCXX/new.cpp
+++ b/clang/test/CodeGenCXX/new.cpp
@@ -15,7 +15,7 @@ void t1() {
}
// CHECK: declare noundef nonnull ptr @_Znwm(i64 noundef) [[ATTR_NOBUILTIN:#[^ ]*]]
-// CHECK: declare void @_ZdlPv(ptr noundef) [[ATTR_NOBUILTIN_NOUNWIND:#[^ ]*]]
+// CHECK: declare void @_ZdlPvm(ptr noundef, i64 noundef) [[ATTR_NOBUILTIN_NOUNWIND:#[^ ]*]]
// CHECK: declare noundef nonnull ptr @_Znam(i64 noundef) [[ATTR_NOBUILTIN]]
// CHECK: declare void @_ZdaPv(ptr noundef) [[ATTR_NOBUILTIN_NOUNWIND]]
@@ -192,7 +192,7 @@ void f() {
// CHECK: store i64 200
delete[] new (nothrow) Alloc[10][20];
// CHECK: call noalias noundef nonnull ptr @_Znwm
- // CHECK: call void @_ZdlPv(ptr
+ // CHECK: call void @_ZdlPvm(ptr noundef {{%.*}}, i64 noundef 1)
delete new bool;
// CHECK: ret void
}
@@ -317,7 +317,7 @@ namespace N3664 {
void f() {
// CHECK: call noalias noundef nonnull ptr @_Znwm(i64 noundef 4) [[ATTR_BUILTIN_NEW:#[^ ]*]]
int *p = new int; // expected-note {{allocated with 'new' here}}
- // CHECK: call void @_ZdlPv({{.*}}) [[ATTR_BUILTIN_DELETE:#[^ ]*]]
+ // CHECK: call void @_ZdlPvm({{.*}}) [[ATTR_BUILTIN_DELETE:#[^ ]*]]
delete p;
// CHECK: call noalias noundef nonnull ptr @_Znam(i64 noundef 12) [[ATTR_BUILTIN_NEW]]
diff --git a/clang/test/CodeGenCXX/nrvo.cpp b/clang/test/CodeGenCXX/nrvo.cpp
index 33dc4cf9dbc8..23ac04511514 100644
--- a/clang/test/CodeGenCXX/nrvo.cpp
+++ b/clang/test/CodeGenCXX/nrvo.cpp
@@ -628,7 +628,7 @@ void may_throw();
// CHECK-EH-03-NEXT: br label [[CATCH_DISPATCH:%.*]]
// CHECK-EH-03: catch.dispatch:
// CHECK-EH-03-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
-// CHECK-EH-03-NEXT: [[TMP3:%.*]] = call i32 @llvm.eh.typeid.for(ptr @_ZTI1X) #[[ATTR7]]
+// CHECK-EH-03-NEXT: [[TMP3:%.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTI1X) #[[ATTR7]]
// CHECK-EH-03-NEXT: [[MATCHES:%.*]] = icmp eq i32 [[SEL]], [[TMP3]]
// CHECK-EH-03-NEXT: br i1 [[MATCHES]], label [[CATCH:%.*]], label [[EH_RESUME:%.*]]
// CHECK-EH-03: catch:
@@ -707,7 +707,7 @@ void may_throw();
// CHECK-EH-11-NEXT: br label [[CATCH_DISPATCH:%.*]]
// CHECK-EH-11: catch.dispatch:
// CHECK-EH-11-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
-// CHECK-EH-11-NEXT: [[TMP3:%.*]] = call i32 @llvm.eh.typeid.for(ptr @_ZTI1X) #[[ATTR6]]
+// CHECK-EH-11-NEXT: [[TMP3:%.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTI1X) #[[ATTR6]]
// CHECK-EH-11-NEXT: [[MATCHES:%.*]] = icmp eq i32 [[SEL]], [[TMP3]]
// CHECK-EH-11-NEXT: br i1 [[MATCHES]], label [[CATCH:%.*]], label [[EH_RESUME:%.*]]
// CHECK-EH-11: catch:
diff --git a/clang/test/CodeGenCXX/ps-dllstorage-vtable-rtti.cpp b/clang/test/CodeGenCXX/ps-dllstorage-vtable-rtti.cpp
new file mode 100644
index 000000000000..377e579058ac
--- /dev/null
+++ b/clang/test/CodeGenCXX/ps-dllstorage-vtable-rtti.cpp
@@ -0,0 +1,114 @@
+/// For a class that has a vtable and typeinfo symbol for RTTI, if a user marks
+/// either:
+///
+/// (a) The entire class as dllexport (dllimport)
+/// (b) Any non-inline method of the class as dllexport (dllimport)
+///
+/// then Clang must export the vtable and typeinfo symbol from the TU where they
+/// are defined (the TU containing the definition of the Itanium C++ ABI "key
+/// function") and must import them in other modules where they are referenced.
+
+// RUN: %clang_cc1 -I%S -fdeclspec -triple x86_64-unknown-windows-itanium -emit-llvm -o - %s -fhalf-no-semantic-interposition \
+// RUN: | FileCheck %s -check-prefix=WI
+// RUN: %clang_cc1 -I%S -fdeclspec -triple x86_64-scei-windows-itanium -emit-llvm -o - %s -fhalf-no-semantic-interposition \
+// RUN: | FileCheck %s --check-prefixes=PS
+// RUN: %clang_cc1 -I%S -fdeclspec -triple x86_64-scei-ps4 -emit-llvm -o - %s -fhalf-no-semantic-interposition \
+// RUN: | FileCheck %s --check-prefixes=PS
+// RUN: %clang_cc1 -I%S -fdeclspec -triple x86_64-sie-ps5 -emit-llvm -o - %s -fhalf-no-semantic-interposition \
+// RUN: | FileCheck %s --check-prefixes=PS
+
+#include <typeinfo>
+
+/// Case (a) -- Import Aspect
+/// The entire class is imported. The typeinfo symbol must also be imported, but
+/// the vtable will not be referenced, and so does not need to be imported.
+
+// PS-DAG: @_ZTI10FullImport = {{.*}}dllimport
+// WI-DAG: @_ZTI10FullImport = external dllimport constant ptr
+struct __declspec(dllimport) FullImport {
+ virtual void inlineFunc() const {}
+ virtual void key();
+ virtual void func();
+};
+
+/// 'FullImport::key()' is the key function, so the vtable and typeinfo symbol
+/// of 'FullImport' will be defined in the TU that contains the definition of
+/// 'key()' (and they must be exported from there).
+void FullImportTest() { typeid(FullImport).name(); }
+
+/// Case (a) -- Export Aspect
+/// The entire class is exported. The vtable and typeinfo symbols must also be
+/// exported.
+
+// PS-DAG: @_ZTV10FullExport = {{.*}}dllexport
+// WI-DAG: @_ZTV10FullExport = {{.*}}dllexport
+// PS-DAG: @_ZTI10FullExport = {{.*}}dllexport
+// WI-DAG: @_ZTI10FullExport = dso_local dllexport constant {
+struct __declspec(dllexport) FullExport {
+ virtual void inlineFunc() const {}
+ virtual void key();
+ virtual void func();
+};
+
+/// This is the key function of the class 'FullExport', so the vtable and
+/// typeinfo symbols of 'FullExport' will be defined in this TU, and so they
+/// must be exported from this TU.
+void FullExport::key() { typeid(FullExport).name(); }
+
+/// Case (b) -- Import Aspect
+/// The class as a whole is not imported, but a non-inline method of the class
+/// is, so the vtable and typeinfo symbol must be imported.
+
+// PS-DAG: @_ZTV10PartImport = {{.*}}dllimport
+// WI-DAG: @_ZTV10PartImport = external dso_local unnamed_addr constant {
+// PS-DAG: @_ZTI10PartImport = {{.*}}dllimport
+// WI-DAG: @_ZTI10PartImport = external dso_local constant ptr
+struct PartImport {
+ virtual void inlineFunc() const {}
+ virtual void key();
+ __declspec(dllimport) virtual void func();
+};
+
+/// 'PartImport::key()' is the key function, so the vtable and typeinfo symbol
+/// of 'PartImport' will be defined in the TU that contains the definition of
+/// 'key()' (and they must be exported from there). Here, we will reference the
+/// vtable and typeinfo symbol, so we must also import them.
+void PartImportTest() {
+ PartImport f;
+ typeid(PartImport).name();
+}
+
+/// Case (b) -- Export Aspect
+/// The class as a whole is not exported, but a non-inline method of the class
+/// is, so the vtable and typeinfo symbol must be exported.
+
+// PS-DAG: @_ZTV10PartExport = {{.*}}dllexport
+// WI-DAG: @_ZTV10PartExport = dso_local unnamed_addr constant {
+// PS-DAG: @_ZTI10PartExport = {{.*}}dllexport
+// WI-DAG: @_ZTI10PartExport = dso_local constant {
+struct PartExport {
+ virtual void inlineFunc() const {}
+ virtual void key();
+ __declspec(dllexport) virtual void func();
+};
+
+/// This is the key function of the class 'PartExport', so the vtable and
+/// typeinfo symbol of 'PartExport' will be defined in this TU, and so they must
+/// be exported from this TU.
+void PartExport::key() { typeid(PartExport).name(); }
+
+/// Case (b) -- Export Aspect
+/// The class as a whole is not exported, but the constructor of the class
+/// is, so the vtable and typeinfo symbol must be exported.
+
+// PS-DAG: @_ZTV10ConsExport = {{.*}}dllexport
+// WI-DAG: @_ZTV10ConsExport = dso_local unnamed_addr constant {
+// PS-DAG: @_ZTI10ConsExport = {{.*}}dllexport
+// WI-DAG: @_ZTI10ConsExport = dso_local constant {
+struct ConsExport {
+ __declspec(dllexport) ConsExport();
+ virtual void key();
+};
+
+ConsExport::ConsExport() {}
+void ConsExport::key() { typeid(ConsExport).name(); }
diff --git a/clang/test/CodeGenCXX/ps4-dllstorage-vtable-rtti.cpp b/clang/test/CodeGenCXX/ps4-dllstorage-vtable-rtti.cpp
deleted file mode 100644
index 5724e78617df..000000000000
--- a/clang/test/CodeGenCXX/ps4-dllstorage-vtable-rtti.cpp
+++ /dev/null
@@ -1,211 +0,0 @@
-// For a class that has a vtable (and hence, also has a typeinfo symbol for
-// RTTI), if a user marks either:
-//
-// (a) the entire class as dllexport (dllimport), or
-// (b) all non-inline virtual methods of the class as dllexport (dllimport)
-//
-// then Clang must export the vtable and typeinfo symbol from the TU where they
-// are defined (the TU containing the definition of the Itanium C++ ABI "key
-// function"), and must import them in other modules where they are referenced.
-//
-// Conversely to point (b), if some (but not all) of the non-inline virtual
-// methods of a class are marked as dllexport (dllimport), then the vtable and
-// typeinfo symbols must not be exported (imported). This will result in a
-// link-time failure when linking the importing module. This link-time failure
-// is the desired behavior, because the Microsoft toolchain also gets a
-// link-time failure in these cases (and since __declspec(dllexport)
-// (__declspec(dllimport)) is a Microsoft extension, our intention is to mimic
-// that Microsoft behavior).
-//
-// Side note: It is within the bodies of constructors (and in some cases,
-// destructors) that the vtable is explicitly referenced. In case (a) above,
-// where the entire class is exported (imported), then all constructors (among
-// other things) are exported (imported). So for that situation, an importing
-// module for a well-formed program will not actually reference the vtable,
-// since constructor calls will all be to functions external to that module
-// (and imported into it, from the exporting module). I.e., all vtable
-// references will be in that module where the constructor and destructor
-// bodies are, therefore, there will not be a need to import the vtable in
-// that case.
-//
-// This test contains 6 test classes:
-// 2 for point (a),
-// 2 for point (b),
-// and 2 negative tests for the converse of point (b).
-//
-// The two tests for each of these points are one for importing, and one for
-// exporting.
-
-// RUN: %clang_cc1 -I%S -fdeclspec -triple x86_64-unknown-windows-itanium -emit-llvm -o - %s -fhalf-no-semantic-interposition | FileCheck %s -check-prefix=WI
-// RUN: %clang_cc1 -I%S -fdeclspec -triple x86_64-scei-windows-itanium -emit-llvm -o - %s -fhalf-no-semantic-interposition | FileCheck %s --check-prefixes=PS4,SCEI_WI
-// RUN: %clang_cc1 -I%S -fdeclspec -triple x86_64-scei-ps4 -emit-llvm -o - %s -fhalf-no-semantic-interposition | FileCheck %s --check-prefixes=PS4,SCEI_PS4
-// RUN: %clang_cc1 -I%S -fdeclspec -triple x86_64-sie-ps5 -emit-llvm -o - %s -fhalf-no-semantic-interposition | FileCheck %s --check-prefixes=PS4,SCEI_PS4
-
-#include <typeinfo>
-
-// Case (a) -- Import Aspect
-// The entire class is imported. The typeinfo symbol must also be imported,
-// but the vtable will not be referenced, and so does not need to be imported
-// (as described in the "Side note", above).
-//
-// PS4-DAG: @_ZTI10FullImport = {{.*}}dllimport
-// WI-DAG: @_ZTI10FullImport = external dllimport constant ptr
-struct __declspec(dllimport) FullImport
-{
- virtual void getId() {}
- virtual void Bump();
- virtual void Decrement();
-};
-
-// 'FullImport::Bump()' is the key function, so the vtable and typeinfo symbol
-// of 'FullImport' will be defined in the TU that contains the definition of
-// 'Bump()' (and they must be exported from there).
-void FullImportTest()
-{
- typeid(FullImport).name();
-}
-
-///////////////////////////////////////////////////////////////////
-
-// Case (a) -- Export Aspect
-// The entire class is exported. The vtable and typeinfo symbols must also be
-// exported,
-//
-// PS4-DAG: @_ZTV10FullExport ={{.*}}dllexport
-// WI-DAG: @_ZTV10FullExport ={{.*}}dllexport
-// PS4-DAG: @_ZTI10FullExport ={{.*}}dllexport
-// WI-DAG: @_ZTI10FullExport = dso_local dllexport constant {
-struct __declspec(dllexport) FullExport // Easy case: Entire class is exported.
-{
- virtual void getId() {}
- virtual void Bump();
- virtual void Decrement();
-};
-
-// This is the key function of the class 'FullExport', so the vtable and
-// typeinfo symbols of 'FullExport' will be defined in this TU, and so they
-// must be exported from this TU.
-void FullExport::Bump()
-{
- typeid(FullExport).name();
-}
-
-///////////////////////////////////////////////////////////////////
-
-// Case (b) -- Import Aspect
-// The class as a whole is not imported, but all non-inline virtual methods of
-// the class are, so the vtable and typeinfo symbol must be imported.
-//
-// PS4-DAG: @_ZTV9FooImport ={{.*}}dllimport
-// WI-DAG: @_ZTV9FooImport = linkonce_odr dso_local unnamed_addr constant {
-// PS4-DAG: @_ZTI9FooImport ={{.*}}dllimport
-// WI-DAG: @_ZTI9FooImport = linkonce_odr dso_local constant {
-
-
-struct FooImport
-{
- virtual void getId() const {}
- __declspec(dllimport) virtual void Bump();
- __declspec(dllimport) virtual void Decrement();
-};
-
-// 'FooImport::Bump()' is the key function, so the vtable and typeinfo symbol
-// of 'FooImport' will be defined in the TU that contains the definition of
-// 'Bump()' (and they must be exported from there). Here, we will reference
-// the vtable and typeinfo symbol, so we must also import them.
-void importTest()
-{
- typeid(FooImport).name();
-}
-
-///////////////////////////////////////////////////////////////////
-
-// Case (b) -- Export Aspect
-// The class as a whole is not exported, but all non-inline virtual methods of
-// the class are, so the vtable and typeinfo symbol must be exported.
-//
-// PS4-DAG: @_ZTV9FooExport ={{.*}}dllexport
-// WI-DAG: @_ZTV9FooExport = dso_local unnamed_addr constant {
-// PS4-DAG: @_ZTI9FooExport ={{.*}}dllexport
-// WI-DAG: @_ZTI9FooExport = dso_local constant {
-struct FooExport
-{
- virtual void getId() const {}
- __declspec(dllexport) virtual void Bump();
- __declspec(dllexport) virtual void Decrement();
-};
-
-// This is the key function of the class 'FooExport', so the vtable and
-// typeinfo symbol of 'FooExport' will be defined in this TU, and so they must
-// be exported from this TU.
-void FooExport::Bump()
-{
- FooImport f;
- typeid(FooExport).name();
-}
-
-///////////////////////////////////////////////////////////////////
-
-// The tests below verify that the associated vtable and typeinfo symbols are
-// not imported/exported. These are the converse of case (b).
-//
-// Note that ultimately, if the module doing the importing calls a constructor
-// of the class with the vtable, or makes a reference to the typeinfo symbol of
-// the class, then this will result in an unresolved reference (to the vtable
-// or typeinfo symbol) when linking the importing module, and thus a link-time
-// failure.
-//
-// Note that with the Microsoft toolchain there will also be a link-time
-// failure when linking the module doing the importing. With the Microsoft
-// toolchain, it will be an unresolved reference to the method 'Decrement()'
-// of the approriate class, rather than to the vtable or typeinfo symbol of
-// the class, because Microsoft defines the vtable and typeinfo symbol (weakly)
-// everywhere they are used.
-
-// Converse of case (b) -- Import Aspect
-// The class as a whole is not imported, and not all non-inline virtual methods
-// are imported, so the vtable and typeinfo symbol are not to be imported.
-//
-// CHECK-PS4: @_ZTV11FooNoImport = external dso_local unnamed_addr constant {
-// CHECK-WI: @_ZTV11FooNoImport = linkonce_odr dso_local unnamed_addr constant {
-// CHECK-PS4: @_ZTI11FooNoImport = external dso_local constant ptr{{$}}
-// CHECK-WI: @_ZTI11FooNoImport = linkonce_odr dso_local constant {
-struct FooNoImport
-{
- virtual void getId() const {}
- __declspec(dllimport) virtual void Bump();
- virtual void Decrement(); // Not imported.
- int mCounter;
-};
-
-void importNegativeTest()
-{
- FooNoImport f;
- typeid(FooNoImport).name();
-}
-
-///////////////////////////////////////////////////////////////////
-
-// Converse of case (b) -- Export Aspect
-// The class as a whole is not exported, and not all non-inline virtual methods
-// are exported, so the vtable and typeinfo symbol are not to be exported.
-//
-// SCEI_PS4-DAG: @_ZTV11FooNoImport = external unnamed_addr constant {
-// SCEI_WI-DAG: @_ZTV11FooNoExport = dso_local unnamed_addr constant {
-
-// WI-DAG: @_ZTV11FooNoExport = dso_local unnamed_addr constant {
-// SCEI_PS4-DAG: @_ZTI11FooNoExport = constant {
-// SCEI_WI-DAG: @_ZTI11FooNoExport = dso_local constant {
-// WI-DAG: @_ZTI11FooNoExport = dso_local constant {
-struct FooNoExport
-{
- virtual void getId() const {}
- __declspec(dllexport) virtual void Bump();
- virtual void Decrement(); // Not exported.
- int mCounter;
-};
-
-void FooNoExport::Bump()
-{
- typeid(FooNoExport).name();
-}
diff --git a/clang/test/CodeGenCXX/template-param-objects-address-space.cpp b/clang/test/CodeGenCXX/template-param-objects-address-space.cpp
index b54dcfe77934..b3733decdb55 100644
--- a/clang/test/CodeGenCXX/template-param-objects-address-space.cpp
+++ b/clang/test/CodeGenCXX/template-param-objects-address-space.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -std=c++20 %s -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -triple spirv64-unknown-unknown -fsycl-is-device -std=c++20 %s -emit-llvm -o - | FileCheck %s --check-prefix=WITH-NONZERO-DEFAULT-AS
struct S { char buf[32]; };
template<S s> constexpr const char *begin() { return s.buf; }
@@ -8,25 +9,34 @@ extern const void *callee(const S*);
template<S s> constexpr const void* observable_addr() { return callee(&s); }
// CHECK: [[HELLO:@_ZTAXtl1StlA32_cLc104ELc101ELc108ELc108ELc111ELc32ELc119ELc111ELc114ELc108ELc100EEEE]]
+// WITH-NONZERO-DEFAULT-AS: [[HELLO:@_ZTAXtl1StlA32_cLc104ELc101ELc108ELc108ELc111ELc32ELc119ELc111ELc114ELc108ELc100EEEE]]
// CHECK-SAME: = linkonce_odr addrspace(1) constant { <{ [11 x i8], [21 x i8] }> } { <{ [11 x i8], [21 x i8] }> <{ [11 x i8] c"hello world", [21 x i8] zeroinitializer }> }, comdat
// CHECK: @p
// CHECK-SAME: addrspace(1) global ptr addrspacecast (ptr addrspace(1) [[HELLO]] to ptr)
+// WITH-NONZERO-DEFAULT-AS: addrspace(1) global ptr addrspace(4) addrspacecast (ptr addrspace(1) [[HELLO]] to ptr addrspace(4))
const char *p = begin<S{"hello world"}>();
// CHECK: @q
// CHECK-SAME: addrspace(1) global ptr addrspacecast (ptr addrspace(1) getelementptr (i8, ptr addrspace(1) [[HELLO]], i64 11) to ptr)
+// WITH-NONZERO-DEFAULT-AS: addrspace(1) global ptr addrspace(4) addrspacecast (ptr addrspace(1) getelementptr (i8, ptr addrspace(1) [[HELLO]], i64 11) to ptr addrspace(4))
const char *q = end<S{"hello world"}>();
const void *(*r)() = &retval<S{"hello world"}>;
// CHECK: @s
// CHECK-SAME: addrspace(1) global ptr null
+// WITH-NONZERO-DEFAULT-AS: addrspace(1) global ptr addrspace(4) null
const void *s = observable_addr<S{"hello world"}>();
// CHECK: define linkonce_odr noundef ptr @_Z6retvalIXtl1StlA32_cLc104ELc101ELc108ELc108ELc111ELc32ELc119ELc111ELc114ELc108ELc100EEEEEPKvv()
+// WITH-NONZERO-DEFAULT-AS: define linkonce_odr {{.*}} noundef ptr addrspace(4) @_Z6retvalIXtl1StlA32_cLc104ELc101ELc108ELc108ELc111ELc32ELc119ELc111ELc114ELc108ELc100EEEEEPKvv()
// CHECK: ret ptr addrspacecast (ptr addrspace(1) [[HELLO]] to ptr)
+// WITH-NONZERO-DEFAULT-AS: ret ptr addrspace(4) addrspacecast (ptr addrspace(1) [[HELLO]] to ptr addrspace(4))
// CHECK: define linkonce_odr noundef ptr @_Z15observable_addrIXtl1StlA32_cLc104ELc101ELc108ELc108ELc111ELc32ELc119ELc111ELc114ELc108ELc100EEEEEPKvv()
+// WITH-NONZERO-DEFAULT-AS: define linkonce_odr {{.*}} noundef ptr addrspace(4) @_Z15observable_addrIXtl1StlA32_cLc104ELc101ELc108ELc108ELc111ELc32ELc119ELc111ELc114ELc108ELc100EEEEEPKvv()
// CHECK: %call = call noundef ptr @_Z6calleePK1S(ptr noundef addrspacecast (ptr addrspace(1) [[HELLO]] to ptr))
+// WITH-NONZERO-DEFAULT-AS: %call = call {{.*}} noundef ptr addrspace(4) @_Z6calleePK1S(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) [[HELLO]] to ptr addrspace(4)))
// CHECK: declare noundef ptr @_Z6calleePK1S(ptr noundef)
+// WITH-NONZERO-DEFAULT-AS: declare {{.*}} noundef ptr addrspace(4) @_Z6calleePK1S(ptr addrspace(4) noundef)
diff --git a/clang/test/CodeGenCXX/throw-expression-typeinfo-in-address-space.cpp b/clang/test/CodeGenCXX/throw-expression-typeinfo-in-address-space.cpp
index d8c23d427e67..3acbdd8fd97e 100644
--- a/clang/test/CodeGenCXX/throw-expression-typeinfo-in-address-space.cpp
+++ b/clang/test/CodeGenCXX/throw-expression-typeinfo-in-address-space.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 %s -triple amdgcn-amd-amdhsa -emit-llvm -fcxx-exceptions -fexceptions -std=c++11 -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple spirv64-unknown-unknown -fsycl-is-device -emit-llvm -fcxx-exceptions -fexceptions -std=c++11 -o - | FileCheck %s --check-prefix=WITH-NONZERO-DEFAULT-AS
struct X {
~X();
@@ -15,3 +16,4 @@ void f() {
}
// CHECK: declare void @__cxa_throw(ptr, ptr addrspace(1), ptr)
+// WITH-NONZERO-DEFAULT-AS: declare{{.*}} void @__cxa_throw(ptr addrspace(4), ptr addrspace(1), ptr addrspace(4))
diff --git a/clang/test/CodeGenCXX/try-catch-with-address-space.cpp b/clang/test/CodeGenCXX/try-catch-with-address-space.cpp
index 279d29f50fd4..412ac6c28725 100644
--- a/clang/test/CodeGenCXX/try-catch-with-address-space.cpp
+++ b/clang/test/CodeGenCXX/try-catch-with-address-space.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 %s -triple=amdgcn-amd-amdhsa -emit-llvm -o - -fcxx-exceptions -fexceptions | FileCheck %s
+// RUN: %clang_cc1 %s -triple=spirv64-unknown-unknown -fsycl-is-device -emit-llvm -o - -fcxx-exceptions -fexceptions | FileCheck %s --check-prefix=WITH-NONZERO-DEFAULT-AS
struct X { };
@@ -10,7 +11,8 @@ void f() {
// CHECK: ptr addrspace(1) @_ZTI1X
} catch (const X x) {
// CHECK: catch ptr addrspace(1) @_ZTI1X
- // CHECK: call i32 @llvm.eh.typeid.for(ptr addrspacecast (ptr addrspace(1) @_ZTI1X to ptr))
+ // CHECK: call i32 @llvm.eh.typeid.for.p0(ptr addrspacecast (ptr addrspace(1) @_ZTI1X to ptr))
+ // WITH-NONZERO-DEFAULT-AS: call i32 @llvm.eh.typeid.for.p4(ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTI1X to ptr addrspace(4)))
}
}
@@ -20,6 +22,7 @@ void h() {
// CHECK: ptr addrspace(1) @_ZTIPKc
} catch (char const(&)[4]) {
// CHECK: catch ptr addrspace(1) @_ZTIA4_c
- // CHECK: call i32 @llvm.eh.typeid.for(ptr addrspacecast (ptr addrspace(1) @_ZTIA4_c to ptr))
+ // CHECK: call i32 @llvm.eh.typeid.for.p0(ptr addrspacecast (ptr addrspace(1) @_ZTIA4_c to ptr))
+ // WITH-NONZERO-DEFAULT-AS: call i32 @llvm.eh.typeid.for.p4(ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTIA4_c to ptr addrspace(4)))
}
}
diff --git a/clang/test/CodeGenCXX/typeid-cxx11-with-address-space.cpp b/clang/test/CodeGenCXX/typeid-cxx11-with-address-space.cpp
index c4e7d36acff1..f6dc38ec9f29 100644
--- a/clang/test/CodeGenCXX/typeid-cxx11-with-address-space.cpp
+++ b/clang/test/CodeGenCXX/typeid-cxx11-with-address-space.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -I%S %s -triple amdgcn-amd-amdhsa -emit-llvm -std=c++11 -o - | FileCheck %s
+// RUN: %clang_cc1 -I%S %s -triple spirv64-unknown-unknown -fsycl-is-device -emit-llvm -std=c++11 -o - | FileCheck %s --check-prefix=WITH-NONZERO-DEFAULT-AS
#include <typeinfo>
namespace Test1 {
@@ -19,14 +20,17 @@ struct B : virtual A {};
struct C { int n; };
// CHECK: @_ZN5Test15itemsE ={{.*}} constant [4 x {{.*}}] [{{.*}} ptr addrspacecast (ptr addrspace(1) @_ZTIN5Test11AE to ptr), {{.*}} @_ZN5Test19make_implINS_1AEEEPvv {{.*}} ptr addrspacecast (ptr addrspace(1) @_ZTIN5Test11BE to ptr), {{.*}} @_ZN5Test19make_implINS_1BEEEPvv {{.*}} ptr addrspacecast (ptr addrspace(1) @_ZTIN5Test11CE to ptr), {{.*}} @_ZN5Test19make_implINS_1CEEEPvv {{.*}} ptr addrspacecast (ptr addrspace(1) @_ZTIi to ptr), {{.*}} @_ZN5Test19make_implIiEEPvv }]
+// WITH-NONZERO-DEFAULT-AS: @_ZN5Test15itemsE ={{.*}} addrspace(1) constant [4 x {{.*}}] [{{.*}} ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTIN5Test11AE to ptr addrspace(4)), {{.*}} @_ZN5Test19make_implINS_1AEEEPvv {{.*}} ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTIN5Test11BE to ptr addrspace(4)), {{.*}} @_ZN5Test19make_implINS_1BEEEPvv {{.*}} ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTIN5Test11CE to ptr addrspace(4)), {{.*}} @_ZN5Test19make_implINS_1CEEEPvv {{.*}} ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTIi to ptr addrspace(4)), {{.*}} @_ZN5Test19make_implIiEEPvv }]
extern constexpr Item items[] = {
item<A>("A"), item<B>("B"), item<C>("C"), item<int>("int")
};
// CHECK: @_ZN5Test11xE ={{.*}} constant ptr addrspacecast (ptr addrspace(1) @_ZTIN5Test11AE to ptr), align 8
+// WITH-NONZERO-DEFAULT-AS: @_ZN5Test11xE ={{.*}} addrspace(1) constant ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTIN5Test11AE to ptr addrspace(4)), align 8
constexpr auto &x = items[0].ti;
// CHECK: @_ZN5Test11yE ={{.*}} constant ptr addrspacecast (ptr addrspace(1) @_ZTIN5Test11BE to ptr), align 8
+// WITH-NONZERO-DEFAULT-AS: @_ZN5Test11yE ={{.*}} addrspace(1) constant ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTIN5Test11BE to ptr addrspace(4)), align 8
constexpr auto &y = typeid(B{});
}
diff --git a/clang/test/CodeGenCXX/typeid-with-address-space.cpp b/clang/test/CodeGenCXX/typeid-with-address-space.cpp
index b439770a8b63..98af17f4fc88 100644
--- a/clang/test/CodeGenCXX/typeid-with-address-space.cpp
+++ b/clang/test/CodeGenCXX/typeid-with-address-space.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -I%S %s -triple amdgcn-amd-amdhsa -emit-llvm -fcxx-exceptions -fexceptions -o - | FileCheck %s
+// RUN: %clang_cc1 -I%S %s -triple spirv64-unknown-unknown -fsycl-is-device -emit-llvm -fcxx-exceptions -fexceptions -o - | FileCheck %s --check-prefix=WITH-NONZERO-DEFAULT-AS
#include <typeinfo>
namespace Test1 {
@@ -7,19 +8,23 @@ namespace Test1 {
struct A { virtual void f(); };
// CHECK: @_ZN5Test16int_tiE ={{.*}} constant ptr addrspacecast (ptr addrspace(1) @_ZTIi to ptr), align 8
+// WITH-NONZERO-DEFAULT-AS: @_ZN5Test16int_tiE ={{.*}} constant ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTIi to ptr addrspace(4)), align 8
const std::type_info &int_ti = typeid(int);
// CHECK: @_ZN5Test14A_tiE ={{.*}} constant ptr addrspacecast (ptr addrspace(1) @_ZTIN5Test11AE to ptr), align 8
+// WITH-NONZERO-DEFAULT-AS: @_ZN5Test14A_tiE ={{.*}} constant ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTIN5Test11AE to ptr addrspace(4)), align 8
const std::type_info &A_ti = typeid(const volatile A &);
volatile char c;
// CHECK: @_ZN5Test14c_tiE ={{.*}} constant ptr addrspacecast (ptr addrspace(1) @_ZTIc to ptr), align 8
+// WITH-NONZERO-DEFAULT-AS: @_ZN5Test14c_tiE ={{.*}} constant ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTIc to ptr addrspace(4)), align 8
const std::type_info &c_ti = typeid(c);
extern const double &d;
// CHECK: @_ZN5Test14d_tiE ={{.*}} constant ptr addrspacecast (ptr addrspace(1) @_ZTId to ptr), align 8
+// WITH-NONZERO-DEFAULT-AS: @_ZN5Test14d_tiE ={{.*}} constant ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTId to ptr addrspace(4)), align 8
const std::type_info &d_ti = typeid(d);
extern A &a;
@@ -28,18 +33,24 @@ extern A &a;
const std::type_info &a_ti = typeid(a);
// CHECK: @_ZN5Test18A10_c_tiE ={{.*}} constant ptr addrspacecast (ptr addrspace(1) @_ZTIA10_c to ptr), align 8
+// WITH-NONZERO-DEFAULT-AS: @_ZN5Test18A10_c_tiE ={{.*}} constant ptr addrspace(4) addrspacecast (ptr addrspace(1) @_ZTIA10_c to ptr addrspace(4)), align 8
const std::type_info &A10_c_ti = typeid(char const[10]);
// CHECK-LABEL: define{{.*}} ptr @_ZN5Test11fEv
// CHECK-SAME: personality ptr @__gxx_personality_v0
+// WITH-NONZERO-DEFAULT-AS-LABEL: define{{.*}} ptr addrspace(4) @_ZN5Test11fEv
+// WITH-NONZERO-DEFAULT-AS-SAME: personality ptr @__gxx_personality_v0
const char *f() {
try {
// CHECK: br i1
// CHECK: invoke void @__cxa_bad_typeid() [[NR:#[0-9]+]]
+ // WITH-NONZERO-DEFAULT-AS: invoke{{.*}} void @__cxa_bad_typeid() [[NR:#[0-9]+]]
return typeid(*static_cast<A *>(0)).name();
} catch (...) {
// CHECK: landingpad { ptr, i32 }
// CHECK-NEXT: catch ptr null
+ // WITH-NONZERO-DEFAULT-AS: landingpad { ptr addrspace(4), i32 }
+ // WITH-NONZERO-DEFAULT-AS-NEXT: catch ptr addrspace(4) null
}
return 0;
diff --git a/clang/test/CodeGenCXX/typeinfo-with-address-space.cpp b/clang/test/CodeGenCXX/typeinfo-with-address-space.cpp
index 80f6ab0903e5..350303cc6e9b 100644
--- a/clang/test/CodeGenCXX/typeinfo-with-address-space.cpp
+++ b/clang/test/CodeGenCXX/typeinfo-with-address-space.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -I%S %s -triple amdgcn-amd-amdhsa -emit-llvm -o - | FileCheck %s -check-prefix=AS
+// RUN: %clang_cc1 -I%S %s -triple spirv64-unknown-unknown -fsycl-is-device -emit-llvm -o - | FileCheck %s -check-prefix=NONZERO-DEFAULT-AS
// RUN: %clang_cc1 -I%S %s -triple x86_64-linux-gnu -emit-llvm -o - | FileCheck %s -check-prefix=NO-AS
#include <typeinfo>
@@ -25,24 +26,30 @@ class B : A {
unsigned long Fn(B& b) {
// AS: %call = call noundef zeroext i1 @_ZNKSt9type_infoeqERKS_(ptr {{.*}} addrspacecast (ptr addrspace(1) @_ZTISt9type_info to ptr), ptr {{.*}} %2)
+// NONZERO-DEFAULT-AS: %call = call{{.*}} noundef zeroext i1 @_ZNKSt9type_infoeqERKS_(ptr addrspace(4) {{.*}} addrspacecast (ptr addrspace(1) @_ZTISt9type_info to ptr addrspace(4)), ptr addrspace(4) {{.*}} %2)
// NO-AS: %call = call noundef zeroext i1 @_ZNKSt9type_infoeqERKS_(ptr {{.*}} @_ZTISt9type_info, ptr {{.*}} %2)
if (typeid(std::type_info) == typeid(b))
return 42;
// AS: %call2 = call noundef zeroext i1 @_ZNKSt9type_infoneERKS_(ptr {{.*}} addrspacecast (ptr addrspace(1) @_ZTIi to ptr), ptr {{.*}} %5)
+// NONZERO-DEFAULT-AS: %call2 = call{{.*}} noundef zeroext i1 @_ZNKSt9type_infoneERKS_(ptr addrspace(4) {{.*}} addrspacecast (ptr addrspace(1) @_ZTIi to ptr addrspace(4)), ptr addrspace(4) {{.*}} %5)
// NO-AS: %call2 = call noundef zeroext i1 @_ZNKSt9type_infoneERKS_(ptr {{.*}} @_ZTIi, ptr {{.*}} %5)
if (typeid(int) != typeid(b))
return 1712;
// AS: %call5 = call noundef ptr @_ZNKSt9type_info4nameEv(ptr {{.*}} addrspacecast (ptr addrspace(1) @_ZTI1A to ptr))
+// NONZERO-DEFAULT-AS: %call5 = call{{.*}} noundef ptr addrspace(4) @_ZNKSt9type_info4nameEv(ptr addrspace(4) {{.*}} addrspacecast (ptr addrspace(1) @_ZTI1A to ptr addrspace(4)))
// NO-AS: %call5 = call noundef ptr @_ZNKSt9type_info4nameEv(ptr {{.*}} @_ZTI1A)
// AS: %call7 = call noundef ptr @_ZNKSt9type_info4nameEv(ptr {{.*}} %8)
+// NONZERO-DEFAULT-AS: %call7 = call{{.*}} noundef ptr addrspace(4) @_ZNKSt9type_info4nameEv(ptr addrspace(4) {{.*}} %8)
// NO-AS: %call7 = call noundef ptr @_ZNKSt9type_info4nameEv(ptr {{.*}} %8)
if (typeid(A).name() == typeid(b).name())
return 0;
// AS: %call11 = call noundef zeroext i1 @_ZNKSt9type_info6beforeERKS_(ptr {{.*}} %11, ptr {{.*}} addrspacecast (ptr addrspace(1) @_ZTIf to ptr))
+// NONZERO-DEFAULT-AS: %call11 = call{{.*}} noundef zeroext i1 @_ZNKSt9type_info6beforeERKS_(ptr addrspace(4) {{.*}} %11, ptr addrspace(4) {{.*}} addrspacecast (ptr addrspace(1) @_ZTIf to ptr addrspace(4)))
// NO-AS: %call11 = call noundef zeroext i1 @_ZNKSt9type_info6beforeERKS_(ptr {{.*}} %11, ptr {{.*}} @_ZTIf)
if (typeid(b).before(typeid(float)))
return 1;
// AS: %call15 = call noundef i64 @_ZNKSt9type_info9hash_codeEv(ptr {{.*}} %14)
+// NONZERO-DEFAULT-AS: %call15 = call{{.*}} noundef i64 @_ZNKSt9type_info9hash_codeEv(ptr addrspace(4) {{.*}} %14)
// NO-AS: %call15 = call noundef i64 @_ZNKSt9type_info9hash_codeEv(ptr {{.*}} %14)
return typeid(b).hash_code();
}
diff --git a/clang/test/CodeGenCXX/vtable-assume-load-address-space.cpp b/clang/test/CodeGenCXX/vtable-assume-load-address-space.cpp
index d765fe94d9b0..ecafa99d8be0 100644
--- a/clang/test/CodeGenCXX/vtable-assume-load-address-space.cpp
+++ b/clang/test/CodeGenCXX/vtable-assume-load-address-space.cpp
@@ -1,14 +1,17 @@
// RUN: %clang_cc1 %s -triple=amdgcn-amd-amdhsa -std=c++11 -emit-llvm -o %t.ll -O1 -disable-llvm-passes -fms-extensions -fstrict-vtable-pointers
+// RUN: %clang_cc1 %s -triple i686-pc-win32 -emit-llvm -o %t.ms.ll -O1 -disable-llvm-passes -fms-extensions -fstrict-vtable-pointers
+// RUN: %clang_cc1 %s -triple=spirv64-unknown-unknown -fsycl-is-device -std=c++11 -emit-llvm -o %t.ll -O1 -disable-llvm-passes -fms-extensions -fstrict-vtable-pointers
// FIXME: Assume load should not require -fstrict-vtable-pointers
// RUN: FileCheck --check-prefix=CHECK1 --input-file=%t.ll %s
// RUN: FileCheck --check-prefix=CHECK2 --input-file=%t.ll %s
// RUN: FileCheck --check-prefix=CHECK3 --input-file=%t.ll %s
// RUN: FileCheck --check-prefix=CHECK4 --input-file=%t.ll %s
-// RUN: FileCheck --check-prefix=CHECK5 --input-file=%t.ll %s
+// RUN: FileCheck --check-prefix=CHECK-MS --input-file=%t.ms.ll %s
// RUN: FileCheck --check-prefix=CHECK6 --input-file=%t.ll %s
// RUN: FileCheck --check-prefix=CHECK7 --input-file=%t.ll %s
// RUN: FileCheck --check-prefix=CHECK8 --input-file=%t.ll %s
+// RUN: FileCheck --check-prefix=CHECK9 --input-file=%t.ll %s
namespace test1 {
struct A {
@@ -23,8 +26,8 @@ struct B : A {
void g(A *a) { a->foo(); }
// CHECK1-LABEL: define{{.*}} void @_ZN5test14fooAEv()
-// CHECK1: call void @_ZN5test11AC1Ev(ptr
-// CHECK1: %[[VTABLE:.*]] = load ptr addrspace(1), ptr %{{.*}}
+// CHECK1: call{{.*}} void @_ZN5test11AC1Ev(ptr {{((addrspace(4)){0,1})}}
+// CHECK1: %[[VTABLE:.*]] = load ptr addrspace(1), ptr {{((addrspace(4)){0,1})}}{{.*}}%{{.*}}
// CHECK1: %[[CMP:.*]] = icmp eq ptr addrspace(1) %[[VTABLE]], getelementptr inbounds inrange(-16, 8) ({ [3 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTVN5test11AE, i32 0, i32 0, i32 2)
// CHECK1: call void @llvm.assume(i1 %[[CMP]])
// CHECK1-LABEL: {{^}}}
@@ -35,8 +38,8 @@ void fooA() {
}
// CHECK1-LABEL: define{{.*}} void @_ZN5test14fooBEv()
-// CHECK1: call void @_ZN5test11BC1Ev(ptr {{[^,]*}} %{{.*}})
-// CHECK1: %[[VTABLE:.*]] = load ptr addrspace(1), ptr %{{.*}}
+// CHECK1: call{{.*}} void @_ZN5test11BC1Ev(ptr {{[^,]*}} %{{.*}})
+// CHECK1: %[[VTABLE:.*]] = load ptr addrspace(1), ptr {{((addrspace(4)){0,1})}}{{.*}}%{{.*}}
// CHECK1: %[[CMP:.*]] = icmp eq ptr addrspace(1) %[[VTABLE]], getelementptr inbounds inrange(-16, 8) ({ [3 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTVN5test11BE, i32 0, i32 0, i32 2)
// CHECK1: call void @llvm.assume(i1 %[[CMP]])
// CHECK1-LABEL: {{^}}}
@@ -46,7 +49,7 @@ void fooB() {
g(&b);
}
// there should not be any assumes in the ctor that calls base ctor
-// CHECK1-LABEL: define linkonce_odr void @_ZN5test11BC2Ev(ptr
+// CHECK1-LABEL: define linkonce_odr{{.*}} void @_ZN5test11BC2Ev(ptr
// CHECK1-NOT: @llvm.assume(
// CHECK1-LABEL: {{^}}}
}
@@ -69,17 +72,17 @@ void g(A *a) { a->foo(); }
void h(B *b) { b->bar(); }
// CHECK2-LABEL: define{{.*}} void @_ZN5test24testEv()
-// CHECK2: call void @_ZN5test21CC1Ev(ptr
+// CHECK2: call{{.*}} void @_ZN5test21CC1Ev(ptr
// CHECK2: %[[VTABLE:.*]] = load ptr addrspace(1), ptr {{.*}}
// CHECK2: %[[CMP:.*]] = icmp eq ptr addrspace(1) %[[VTABLE]], getelementptr inbounds inrange(-16, 8) ({ [3 x ptr addrspace(1)], [3 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTVN5test21CE, i32 0, i32 0, i32 2)
// CHECK2: call void @llvm.assume(i1 %[[CMP]])
-// CHECK2: %[[ADD_PTR:.*]] = getelementptr inbounds i8, ptr %{{.*}}, i64 8
-// CHECK2: %[[VTABLE2:.*]] = load ptr addrspace(1), ptr %[[ADD_PTR]]
+// CHECK2: %[[ADD_PTR:.*]] = getelementptr inbounds i8, ptr {{((addrspace(4)){0,1})}}{{.*}}%{{.*}}, i64 8
+// CHECK2: %[[VTABLE2:.*]] = load ptr addrspace(1), ptr {{((addrspace(4)){0,1})}}{{.*}}%[[ADD_PTR]]
// CHECK2: %[[CMP2:.*]] = icmp eq ptr addrspace(1) %[[VTABLE2]], getelementptr inbounds inrange(-16, 8) ({ [3 x ptr addrspace(1)], [3 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTVN5test21CE, i32 0, i32 1, i32 2)
// CHECK2: call void @llvm.assume(i1 %[[CMP2]])
-// CHECK2: call void @_ZN5test21gEPNS_1AE(
+// CHECK2: call{{.*}} void @_ZN5test21gEPNS_1AE(
// CHECK2-LABEL: {{^}}}
void test() {
@@ -106,7 +109,7 @@ struct C : virtual A, B {
void g(B *a) { a->foo(); }
// CHECK3-LABEL: define{{.*}} void @_ZN5test34testEv()
-// CHECK3: call void @_ZN5test31CC1Ev(ptr
+// CHECK3: call{{.*}} void @_ZN5test31CC1Ev(ptr
// CHECK3: %[[CMP:.*]] = icmp eq ptr addrspace(1) %{{.*}}, getelementptr inbounds inrange(-24, 8) ({ [4 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTVN5test31CE, i32 0, i32 0, i32 3)
// CHECK3: call void @llvm.assume(i1 %[[CMP]])
// CHECK3-LABLEL: }
@@ -134,12 +137,12 @@ struct C : B {
void g(C *c) { c->foo(); }
// CHECK4-LABEL: define{{.*}} void @_ZN5test44testEv()
-// CHECK4: call void @_ZN5test41CC1Ev(ptr
-// CHECK4: %[[VTABLE:.*]] = load ptr addrspace(1), ptr %{{.*}}
+// CHECK4: call{{.*}} void @_ZN5test41CC1Ev(ptr
+// CHECK4: %[[VTABLE:.*]] = load ptr addrspace(1), ptr {{((addrspace(4)){0,1})}}{{.*}}%{{.*}}
// CHECK4: %[[CMP:.*]] = icmp eq ptr addrspace(1) %[[VTABLE]], getelementptr inbounds inrange(-32, 8) ({ [5 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTVN5test41CE, i32 0, i32 0, i32 4)
// CHECK4: call void @llvm.assume(i1 %[[CMP]]
-// CHECK4: %[[VTABLE2:.*]] = load ptr addrspace(1), ptr %{{.*}}
+// CHECK4: %[[VTABLE2:.*]] = load ptr addrspace(1), ptr {{((addrspace(4)){0,1})}}{{.*}}%{{.*}}
// CHECK4: %[[CMP2:.*]] = icmp eq ptr addrspace(1) %[[VTABLE2]], getelementptr inbounds inrange(-32, 8) ({ [5 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTVN5test41CE, i32 0, i32 0, i32 4)
// CHECK4: call void @llvm.assume(i1 %[[CMP2]])
// CHECK4-LABEL: {{^}}}
@@ -150,6 +153,27 @@ void test() {
}
} // test4
+namespace testMS {
+
+struct __declspec(novtable) S {
+ virtual void foo();
+};
+
+void g(S &s) { s.foo(); }
+
+// if struct has novtable specifier, then we can't generate assumes
+// CHECK-MS-LABEL: define dso_local void @"?test@testMS@@YAXXZ"()
+// CHECK-MS: call x86_thiscallcc noundef ptr @"??0S@testMS@@QAE@XZ"(
+// CHECK-MS-NOT: @llvm.assume
+// CHECK-MS-LABEL: {{^}}}
+
+void test() {
+ S s;
+ g(s);
+}
+
+} // testMS
+
namespace test6 {
struct A {
A();
@@ -161,17 +185,17 @@ struct B : A {
};
// FIXME: Because A's vtable is external, and no virtual functions are hidden,
// it's safe to generate assumption loads.
-// CHECK5-LABEL: define{{.*}} void @_ZN5test61gEv()
-// CHECK5: call void @_ZN5test61AC1Ev(
-// CHECK5-NOT: call void @llvm.assume(
+// CHECK6-LABEL: define{{.*}} void @_ZN5test61gEv()
+// CHECK6: call{{.*}} void @_ZN5test61AC1Ev(
+// CHECK6-NOT: call void @llvm.assume(
// We can't emit assumption loads for B, because if we would refer to vtable
// it would refer to functions that will not be able to find (like implicit
// inline destructor).
-// CHECK5-LABEL: call void @_ZN5test61BC1Ev(
-// CHECK5-NOT: call void @llvm.assume(
-// CHECK5-LABEL: {{^}}}
+// CHECK6-LABEL: call{{.*}} void @_ZN5test61BC1Ev(
+// CHECK6-NOT: call void @llvm.assume(
+// CHECK6-LABEL: {{^}}}
void g() {
A *a = new A;
B *b = new B;
@@ -180,7 +204,7 @@ void g() {
namespace test7 {
// Because A's key function is defined here, vtable is generated in this TU
-// CHECK6: @_ZTVN5test71AE ={{.*}} unnamed_addr addrspace(1) constant
+// CHECK7: @_ZTVN5test71AE ={{.*}} unnamed_addr addrspace(1) constant
struct A {
A();
virtual void foo();
@@ -188,10 +212,10 @@ struct A {
};
void A::foo() {}
-// CHECK6-LABEL: define{{.*}} void @_ZN5test71gEv()
-// CHECK6: call void @_ZN5test71AC1Ev(
-// CHECK6: call void @llvm.assume(
-// CHECK6-LABEL: {{^}}}
+// CHECK7-LABEL: define{{.*}} void @_ZN5test71gEv()
+// CHECK7: call{{.*}} void @_ZN5test71AC1Ev(
+// CHECK7: call void @llvm.assume(
+// CHECK7-LABEL: {{^}}}
void g() {
A *a = new A();
a->bar();
@@ -205,14 +229,14 @@ struct A {
virtual void bar();
};
-// CHECK7-DAG: @_ZTVN5test81BE = available_externally unnamed_addr addrspace(1) constant
+// CHECK8-DAG: @_ZTVN5test81BE = available_externally unnamed_addr addrspace(1) constant
struct B : A {
B();
void foo();
void bar();
};
-// CHECK7-DAG: @_ZTVN5test81CE = linkonce_odr unnamed_addr addrspace(1) constant
+// CHECK8-DAG: @_ZTVN5test81CE = linkonce_odr unnamed_addr addrspace(1) constant
struct C : A {
C();
void bar();
@@ -227,14 +251,14 @@ struct D : A {
};
void D::bar() {}
-// CHECK7-DAG: @_ZTVN5test81EE = linkonce_odr unnamed_addr addrspace(1) constant
+// CHECK8-DAG: @_ZTVN5test81EE = linkonce_odr unnamed_addr addrspace(1) constant
struct E : A {
E();
};
-// CHECK7-LABEL: define{{.*}} void @_ZN5test81bEv()
-// CHECK7: call void @llvm.assume(
-// CHECK7-LABEL: {{^}}}
+// CHECK8-LABEL: define{{.*}} void @_ZN5test81bEv()
+// CHECK8: call void @llvm.assume(
+// CHECK8-LABEL: {{^}}}
void b() {
B b;
b.bar();
@@ -243,26 +267,26 @@ void b() {
// FIXME: C has inline virtual functions which prohibits as from generating
// assumption loads, but because vtable is generated in this TU (key function
// defined here) it would be correct to refer to it.
-// CHECK7-LABEL: define{{.*}} void @_ZN5test81cEv()
-// CHECK7-NOT: call void @llvm.assume(
-// CHECK7-LABEL: {{^}}}
+// CHECK8-LABEL: define{{.*}} void @_ZN5test81cEv()
+// CHECK8-NOT: call void @llvm.assume(
+// CHECK8-LABEL: {{^}}}
void c() {
C c;
c.bar();
}
// FIXME: We could generate assumption loads here.
-// CHECK7-LABEL: define{{.*}} void @_ZN5test81dEv()
-// CHECK7-NOT: call void @llvm.assume(
-// CHECK7-LABEL: {{^}}}
+// CHECK8-LABEL: define{{.*}} void @_ZN5test81dEv()
+// CHECK8-NOT: call void @llvm.assume(
+// CHECK8-LABEL: {{^}}}
void d() {
D d;
d.bar();
}
-// CHECK7-LABEL: define{{.*}} void @_ZN5test81eEv()
-// CHECK7: call void @llvm.assume(
-// CHECK7-LABEL: {{^}}}
+// CHECK8-LABEL: define{{.*}} void @_ZN5test81eEv()
+// CHECK8: call void @llvm.assume(
+// CHECK8-LABEL: {{^}}}
void e() {
E e;
e.bar();
@@ -276,9 +300,9 @@ struct S {
__attribute__((visibility("hidden"))) virtual void doStuff();
};
-// CHECK8-LABEL: define{{.*}} void @_ZN5test94testEv()
-// CHECK8-NOT: @llvm.assume(
-// CHECK8: }
+// CHECK9-LABEL: define{{.*}} void @_ZN5test94testEv()
+// CHECK9-NOT: @llvm.assume(
+// CHECK9: }
void test() {
S *s = new S();
s->doStuff();
diff --git a/clang/test/CodeGenCXX/vtable-pointer-initialization-address-space.cpp b/clang/test/CodeGenCXX/vtable-pointer-initialization-address-space.cpp
index a3f12f0ebfc8..876d0845cc51 100644
--- a/clang/test/CodeGenCXX/vtable-pointer-initialization-address-space.cpp
+++ b/clang/test/CodeGenCXX/vtable-pointer-initialization-address-space.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 %s -triple=amdgcn-amd-amdhsa -std=c++11 -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple=spirv64-unknown-unknown -fsycl-is-device -std=c++11 -emit-llvm -o - | FileCheck %s --check-prefix=WITH-NONZERO-DEFAULT-AS
struct Field {
Field();
@@ -24,6 +25,7 @@ struct A : Base {
// CHECK: store ptr addrspace(1) getelementptr inbounds inrange(-16, 8) ({ [3 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTV1A, i32 0, i32 0, i32 2)
// CHECK: call void @_ZN5FieldC1Ev(
// CHECK: ret void
+// WITH-NONZERO-DEFAULT-AS-LABEL: define{{.*}} void @_ZN1AC2Ev(ptr addrspace(4) {{[^,]*}} %this) unnamed_addr
A::A() { }
// CHECK-LABEL: define{{.*}} void @_ZN1AD2Ev(ptr {{[^,]*}} %this) unnamed_addr
@@ -31,6 +33,7 @@ A::A() { }
// CHECK: call void @_ZN5FieldD1Ev(
// CHECK: call void @_ZN4BaseD2Ev(
// CHECK: ret void
+// WITH-NONZERO-DEFAULT-AS-LABEL: define{{.*}} void @_ZN1AD2Ev(ptr addrspace(4) {{[^,]*}} %this) unnamed_addr
A::~A() { }
struct B : Base {
@@ -43,18 +46,22 @@ void f() { B b; }
// CHECK-LABEL: define linkonce_odr void @_ZN1BC1Ev(ptr {{[^,]*}} %this) unnamed_addr
// CHECK: call void @_ZN1BC2Ev(
+// WITH-NONZERO-DEFAULT-AS-LABEL: define linkonce_odr{{.*}} void @_ZN1BC1Ev(ptr addrspace(4) {{[^,]*}} %this) unnamed_addr
// CHECK-LABEL: define linkonce_odr void @_ZN1BD1Ev(ptr {{[^,]*}} %this) unnamed_addr
// CHECK: call void @_ZN1BD2Ev(
+// WITH-NONZERO-DEFAULT-AS-LABEL: define linkonce_odr{{.*}} void @_ZN1BD1Ev(ptr addrspace(4) {{[^,]*}} %this) unnamed_addr
// CHECK-LABEL: define linkonce_odr void @_ZN1BC2Ev(ptr {{[^,]*}} %this) unnamed_addr
// CHECK: call void @_ZN4BaseC2Ev(
// CHECK: store ptr addrspace(1) getelementptr inbounds inrange(-16, 8) ({ [3 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTV1B, i32 0, i32 0, i32 2)
// CHECK: call void @_ZN5FieldC1Ev
// CHECK: ret void
+// WITH-NONZERO-DEFAULT-AS-LABEL: define linkonce_odr{{.*}} void @_ZN1BC2Ev(ptr addrspace(4) {{[^,]*}} %this) unnamed_addr
// CHECK-LABEL: define linkonce_odr void @_ZN1BD2Ev(ptr {{[^,]*}} %this) unnamed_addr
// CHECK: store ptr addrspace(1) getelementptr inbounds inrange(-16, 8) ({ [3 x ptr addrspace(1)] }, ptr addrspace(1) @_ZTV1B, i32 0, i32 0, i32 2)
// CHECK: call void @_ZN5FieldD1Ev(
// CHECK: call void @_ZN4BaseD2Ev(
// CHECK: ret void
+// WITH-NONZERO-DEFAULT-AS-LABEL: define linkonce_odr{{.*}} void @_ZN1BD2Ev(ptr addrspace(4) {{[^,]*}} %this) unnamed_addr
diff --git a/clang/test/CodeGenCXX/vtt-address-space.cpp b/clang/test/CodeGenCXX/vtt-address-space.cpp
index 24f4e2a755da..4c3d0a534611 100644
--- a/clang/test/CodeGenCXX/vtt-address-space.cpp
+++ b/clang/test/CodeGenCXX/vtt-address-space.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 %s -triple=amdgcn-amd-amdhsa -std=c++11 -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple=spirv64-unknown-unknown -fsycl-is-device -std=c++11 -emit-llvm -o - | FileCheck %s --check-prefix=WITH-NONZERO-DEFAULT-AS
// This is the sample from the C++ Itanium ABI, p2.6.2.
namespace Test {
@@ -25,3 +26,9 @@ namespace Test {
// CHECK: define linkonce_odr void @_ZN4Test2V2C2Ev(ptr noundef nonnull align 8 dereferenceable(20) %this, ptr addrspace(1) noundef %vtt)
// CHECK: define linkonce_odr void @_ZN4Test2C1C2Ev(ptr noundef nonnull align 8 dereferenceable(12) %this, ptr addrspace(1) noundef %vtt)
// CHECK: define linkonce_odr void @_ZN4Test2C2C2Ev(ptr noundef nonnull align 8 dereferenceable(12) %this, ptr addrspace(1) noundef %vtt)
+// WITH-NONZERO-DEFAULT-AS: call {{.*}} void @_ZN4Test2V2C2Ev(ptr addrspace(4) noundef align 8 dereferenceable_or_null(20) %2, ptr addrspace(1) noundef getelementptr inbounds ([13 x ptr addrspace(1)], ptr addrspace(1) @_ZTTN4Test1DE, i64 0, i64 11))
+// WITH-NONZERO-DEFAULT-AS: call {{.*}} void @_ZN4Test2C1C2Ev(ptr addrspace(4) noundef align 8 dereferenceable_or_null(12) %this1, ptr addrspace(1) noundef getelementptr inbounds ([13 x ptr addrspace(1)], ptr addrspace(1) @_ZTTN4Test1DE, i64 0, i64 1))
+// WITH-NONZERO-DEFAULT-AS: call {{.*}} void @_ZN4Test2C2C2Ev(ptr addrspace(4) noundef align 8 dereferenceable_or_null(12) %3, ptr addrspace(1) noundef getelementptr inbounds ([13 x ptr addrspace(1)], ptr addrspace(1) @_ZTTN4Test1DE, i64 0, i64 3))
+// WITH-NONZERO-DEFAULT-AS: define linkonce_odr {{.*}} void @_ZN4Test2V2C2Ev(ptr addrspace(4) noundef align 8 dereferenceable_or_null(20) %this, ptr addrspace(1) noundef %vtt)
+// WITH-NONZERO-DEFAULT-AS: define linkonce_odr {{.*}} void @_ZN4Test2C1C2Ev(ptr addrspace(4) noundef align 8 dereferenceable_or_null(12) %this, ptr addrspace(1) noundef %vtt)
+// WITH-NONZERO-DEFAULT-AS: define linkonce_odr {{.*}} void @_ZN4Test2C2C2Ev(ptr addrspace(4) noundef align 8 dereferenceable_or_null(12) %this, ptr addrspace(1) noundef %vtt)
diff --git a/clang/test/CodeGenCXX/wasm-eh.cpp b/clang/test/CodeGenCXX/wasm-eh.cpp
index af023f52191b..9dc15633bfed 100644
--- a/clang/test/CodeGenCXX/wasm-eh.cpp
+++ b/clang/test/CodeGenCXX/wasm-eh.cpp
@@ -1,4 +1,8 @@
// REQUIRES: webassembly-registered-target
+
+// RUN: %clang -E -dM %s -target wasm32-unknown-unknown -fwasm-exceptions | FileCheck %s -check-prefix PREPROCESSOR
+// PREPROCESSOR: #define __WASM_EXCEPTIONS__ 1
+
// RUN: %clang_cc1 %s -triple wasm32-unknown-unknown -fms-extensions -fexceptions -fcxx-exceptions -mllvm -wasm-enable-eh -exception-model=wasm -target-feature +exception-handling -emit-llvm -o - -std=c++11 | FileCheck %s
// RUN: %clang_cc1 %s -triple wasm64-unknown-unknown -fms-extensions -fexceptions -fcxx-exceptions -mllvm -wasm-enable-eh -exception-model=wasm -target-feature +exception-handling -emit-llvm -o - -std=c++11 | FileCheck %s
@@ -34,7 +38,7 @@ void test0() {
// CHECK-NEXT: %[[EXN:.*]] = call ptr @llvm.wasm.get.exception(token %[[CATCHPAD]])
// CHECK-NEXT: store ptr %[[EXN]], ptr %exn.slot
// CHECK-NEXT: %[[SELECTOR:.*]] = call i32 @llvm.wasm.get.ehselector(token %[[CATCHPAD]])
-// CHECK-NEXT: %[[TYPEID:.*]] = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #7
+// CHECK-NEXT: %[[TYPEID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi) #7
// CHECK-NEXT: %[[MATCHES:.*]] = icmp eq i32 %[[SELECTOR]], %[[TYPEID]]
// CHECK-NEXT: br i1 %[[MATCHES]], label %[[CATCH_INT_BB:.*]], label %[[CATCH_FALLTHROUGH_BB:.*]]
@@ -51,7 +55,7 @@ void test0() {
// CHECK-NEXT: br label %[[TRY_CONT_BB:.*]]
// CHECK: [[CATCH_FALLTHROUGH_BB]]
-// CHECK-NEXT: %[[TYPEID:.*]] = call i32 @llvm.eh.typeid.for(ptr @_ZTId) #7
+// CHECK-NEXT: %[[TYPEID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTId) #7
// CHECK-NEXT: %[[MATCHES:.*]] = icmp eq i32 %[[SELECTOR]], %[[TYPEID]]
// CHECK-NEXT: br i1 %[[MATCHES]], label %[[CATCH_FLOAT_BB:.*]], label %[[RETHROW_BB:.*]]
diff --git a/clang/test/CodeGenCXX/weak-external.cpp b/clang/test/CodeGenCXX/weak-external.cpp
index 5eb262cdbead..e30d4defd455 100644
--- a/clang/test/CodeGenCXX/weak-external.cpp
+++ b/clang/test/CodeGenCXX/weak-external.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -fcxx-exceptions -fexceptions -triple %itanium_abi_triple %s -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -fcxx-exceptions -fexceptions -triple x86_64-unknown-linux-gnu %s -emit-llvm -o - | FileCheck %s
// PR4262
// CHECK-NOT: _ZNSs12_S_constructIPKcEEPcT_S3_RKSaIcESt20forward_iterator_tag
diff --git a/clang/test/CodeGenCXX/windows-implicit-dllexport-template-specialization.cpp b/clang/test/CodeGenCXX/windows-implicit-dllexport-template-specialization.cpp
index 3a5693275824..d281826ee70f 100644
--- a/clang/test/CodeGenCXX/windows-implicit-dllexport-template-specialization.cpp
+++ b/clang/test/CodeGenCXX/windows-implicit-dllexport-template-specialization.cpp
@@ -1,7 +1,7 @@
// RUN: %clang_cc1 -std=c++11 -triple i686-windows -fdeclspec -emit-llvm %s -o - | FileCheck %s -check-prefix CHECK-MS
-// RUN: %clang_cc1 -std=c++11 -triple i686-windows-itanium -fdeclspec -emit-llvm %s -o - | FileCheck %s -check-prefix CHECK-IA
-// RUN: %clang_cc1 -std=c++11 -triple x86_64-scei-ps4 -fdeclspec -emit-llvm %s -o - | FileCheck %s -check-prefix CHECK-PS4
-// RUN: %clang_cc1 -std=c++11 -triple x86_64-sie-ps5 -fdeclspec -emit-llvm %s -o - | FileCheck %s -check-prefix CHECK-PS4
+// RUN: %clang_cc1 -std=c++11 -triple i686-windows-itanium -fdeclspec -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -std=c++11 -triple x86_64-scei-ps4 -fdeclspec -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -std=c++11 -triple x86_64-sie-ps5 -fdeclspec -emit-llvm %s -o - | FileCheck %s
template <typename>
struct s {};
@@ -15,8 +15,5 @@ template class __declspec(dllexport) t<char>;
// CHECK-MS: dllexport {{.*}} @"??4?$t@D@@QAEAAV0@ABV0@@Z"
// CHECK-MS: dllexport {{.*}} @"??4?$s@D@@QAEAAU0@ABU0@@Z"
-// CHECK-IA: dllexport {{.*}} @_ZN1tIcEaSERKS0_
-// CHECK-IA: dllexport {{.*}} @_ZN1sIcEaSERKS0_
-
-// CHECK-PS4-NOT: @_ZN1tIcEaSERKS0_
-// CHECK-PS4-NOT: @_ZN1sIcEaSERKS0_
+// CHECK: dllexport {{.*}} @_ZN1tIcEaSERKS0_
+// CHECK: dllexport {{.*}} @_ZN1sIcEaSERKS0_
diff --git a/clang/test/CodeGenCXX/windows-itanium-dllexport.cpp b/clang/test/CodeGenCXX/windows-itanium-dllexport.cpp
index c09fa30d761a..334cebff99da 100644
--- a/clang/test/CodeGenCXX/windows-itanium-dllexport.cpp
+++ b/clang/test/CodeGenCXX/windows-itanium-dllexport.cpp
@@ -1,6 +1,6 @@
// RUN: %clang_cc1 -emit-llvm -triple i686-windows-itanium -fdeclspec %s -o - | FileCheck %s --check-prefixes=CHECK,WI
-// RUN: %clang_cc1 -emit-llvm -triple x86_64-scei-ps4 -fdeclspec %s -o - | FileCheck %s --check-prefixes=CHECK,PS4
-// RUN: %clang_cc1 -emit-llvm -triple x86_64-sie-ps5 -fdeclspec %s -o - | FileCheck %s --check-prefixes=CHECK,PS4
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-scei-ps4 -fdeclspec %s -o - | FileCheck %s --check-prefixes=CHECK,PS
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-sie-ps5 -fdeclspec %s -o - | FileCheck %s --check-prefixes=CHECK,PS
#define JOIN2(x, y) x##y
#define JOIN(x, y) JOIN2(x, y)
@@ -27,18 +27,14 @@ template class __declspec(dllexport) c<int>;
extern template class c<char>;
template class __declspec(dllexport) c<char>;
-// WI: define {{.*}} dllexport {{.*}} @_ZN1cIcEaSERKS0_
-// WI: define {{.*}} dllexport {{.*}} @_ZN1cIcE1fEv
-// PS4-NOT: @_ZN1cIcEaSERKS0_
-// PS4: define weak_odr void @_ZN1cIcE1fEv
+// CHECK: define {{.*}} dllexport {{.*}} @_ZN1cIcEaSERKS0_
+// CHECK: define {{.*}} dllexport {{.*}} @_ZN1cIcE1fEv
c<double> g;
template class __declspec(dllexport) c<double>;
-// WI: define {{.*}} dllexport {{.*}} @_ZN1cIdEaSERKS0_
-// WI: define {{.*}} dllexport {{.*}} @_ZN1cIdE1fEv
-// PS4-NOT: @_ZN1cIdEaSERKS0_
-// PS4: define weak_odr void @_ZN1cIdE1fEv
+// CHECK: define {{.*}} dllexport {{.*}} @_ZN1cIdEaSERKS0_
+// CHECK: define {{.*}} dllexport {{.*}} @_ZN1cIdE1fEv
template <class T>
struct outer {
@@ -59,4 +55,4 @@ USEMEMFUNC(outer<char>::inner, f)
// CHECK-DAG: declare dllimport {{.*}} @_ZN5outerIcE1fEv
// WI-DAG: define {{.*}} @_ZN5outerIcE5inner1fEv
-// PS4-DAG: declare {{.*}} @_ZN5outerIcE5inner1fEv
+// PS-DAG: declare {{.*}} @_ZN5outerIcE5inner1fEv
diff --git a/clang/test/CodeGenCoroutines/coro-aligned-alloc-2.cpp b/clang/test/CodeGenCoroutines/coro-aligned-alloc-2.cpp
index 21c2e45b890f..bfa124bb4dc4 100644
--- a/clang/test/CodeGenCoroutines/coro-aligned-alloc-2.cpp
+++ b/clang/test/CodeGenCoroutines/coro-aligned-alloc-2.cpp
@@ -1,9 +1,7 @@
// Tests that the combination of -fcoro-aligned-allocation and -fsized-deallocation works well.
// Test the compiler will chose sized deallocation correctly.
-// This is only enabled with `-fsized-deallocation` which is off by default.
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 \
// RUN: -fcoro-aligned-allocation -emit-llvm %s -o - -disable-llvm-passes \
-// RUN: -fsized-deallocation \
// RUN: | FileCheck %s
#include "Inputs/coroutine.h"
diff --git a/clang/test/CodeGenCoroutines/coro-aligned-alloc.cpp b/clang/test/CodeGenCoroutines/coro-aligned-alloc.cpp
index 8019926b730c..156fa64f454c 100644
--- a/clang/test/CodeGenCoroutines/coro-aligned-alloc.cpp
+++ b/clang/test/CodeGenCoroutines/coro-aligned-alloc.cpp
@@ -26,8 +26,9 @@ struct task {
// CHECK: %[[aligned_new:.+]] = call{{.*}}@_ZnwmSt11align_val_t({{.*}}%[[coro_size]],{{.*}}%[[coro_align]])
// CHECK: coro.free:
+// CHECK: %[[coro_size_for_free:.+]] = call{{.*}}@llvm.coro.size
// CHECK: %[[coro_align_for_free:.+]] = call{{.*}}@llvm.coro.align
-// CHECK: call void @_ZdlPvSt11align_val_t({{.*}}[[coro_align_for_free]]
+// CHECK: call void @_ZdlPvmSt11align_val_t({{.*}}%[[coro_size_for_free]],{{.*}}%[[coro_align_for_free]])
task f() {
co_return 43;
@@ -58,8 +59,9 @@ void *operator new(std::size_t, std::align_val_t, std::nothrow_t) noexcept;
// CHECK: %[[aligned_new:.+]] = call{{.*}}@_ZnwmSt11align_val_tSt9nothrow_t({{.*}}%[[coro_size]],{{.*}}%[[coro_align]])
// CHECK: coro.free:
+// CHECK: %[[coro_size_for_free:.+]] = call{{.*}}@llvm.coro.size
// CHECK: %[[coro_align_for_free:.+]] = call{{.*}}@llvm.coro.align
-// CHECK: call void @_ZdlPvSt11align_val_t({{.*}}[[coro_align_for_free]]
+// CHECK: call void @_ZdlPvmSt11align_val_t({{.*}}%[[coro_size_for_free]],{{.*}}%[[coro_align_for_free]])
task2 f2() {
co_return 43;
diff --git a/clang/test/CodeGenCoroutines/coro-alloc.cpp b/clang/test/CodeGenCoroutines/coro-alloc.cpp
index d026a0d7df22..7b3be7e0b7f9 100644
--- a/clang/test/CodeGenCoroutines/coro-alloc.cpp
+++ b/clang/test/CodeGenCoroutines/coro-alloc.cpp
@@ -70,7 +70,8 @@ extern "C" void f0(global_new_delete_tag) {
// CHECK: br i1 %[[NeedDealloc]], label %[[FreeBB:.+]], label %[[Afterwards:.+]]
// CHECK: [[FreeBB]]:
- // CHECK: call void @_ZdlPv(ptr noundef %[[MEM]])
+ // CHECK: %[[SIZE:.+]] = call i64 @llvm.coro.size.i64()
+ // CHECK: call void @_ZdlPvm(ptr noundef %[[MEM]], i64 noundef %[[SIZE]])
// CHECK: br label %[[Afterwards]]
// CHECK: [[Afterwards]]:
@@ -99,7 +100,8 @@ extern "C" void f1(promise_new_tag ) {
// CHECK: %[[FRAME:.+]] = call ptr @llvm.coro.begin(
// CHECK: %[[MEM:.+]] = call ptr @llvm.coro.free(token %[[ID]], ptr %[[FRAME]])
- // CHECK: call void @_ZdlPv(ptr noundef %[[MEM]])
+ // CHECK: %[[SIZE:.+]] = call i64 @llvm.coro.size.i64()
+ // CHECK: call void @_ZdlPvm(ptr noundef %[[MEM]], i64 noundef %[[SIZE]])
co_return;
}
diff --git a/clang/test/CodeGenCoroutines/coro-cleanup.cpp b/clang/test/CodeGenCoroutines/coro-cleanup.cpp
index 98f150758e2d..4e77ac25af1b 100644
--- a/clang/test/CodeGenCoroutines/coro-cleanup.cpp
+++ b/clang/test/CodeGenCoroutines/coro-cleanup.cpp
@@ -84,11 +84,13 @@ void f() {
// CHECK: [[Cleanup]]:
// CHECK: call void @_ZNSt16coroutine_traitsIJvEE12promise_typeD1Ev(
// CHECK: %[[Mem0:.+]] = call ptr @llvm.coro.free(
- // CHECK: call void @_ZdlPv(ptr noundef %[[Mem0]]
+ // CHECK: %[[SIZE:.+]] = call i64 @llvm.coro.size.i64()
+ // CHECK: call void @_ZdlPvm(ptr noundef %[[Mem0]], i64 noundef %[[SIZE]])
// CHECK: [[Dealloc]]:
// THROWEND: %[[Mem:.+]] = call ptr @llvm.coro.free(
- // THROWEND: call void @_ZdlPv(ptr noundef %[[Mem]])
+ // THROWEND: %[[SIZE:.+]] = call i64 @llvm.coro.size.i64()
+ // THROWEND: call void @_ZdlPvm(ptr noundef %[[Mem]], i64 noundef %[[SIZE]])
co_return;
}
diff --git a/clang/test/CodeGenCoroutines/coro-dealloc.cpp b/clang/test/CodeGenCoroutines/coro-dealloc.cpp
index 3cdba6cafdc0..5a699ac9b585 100644
--- a/clang/test/CodeGenCoroutines/coro-dealloc.cpp
+++ b/clang/test/CodeGenCoroutines/coro-dealloc.cpp
@@ -1,6 +1,5 @@
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 \
// RUN: -emit-llvm %s -o - -disable-llvm-passes \
-// RUN: -fsized-deallocation \
// RUN: | FileCheck %s
#include "Inputs/coroutine.h"
@@ -21,7 +20,6 @@ struct task {
};
// Test the compiler will chose sized deallocation correctly.
-// This is only enabled with `-fsized-deallocation` which is off by default.
void operator delete(void *ptr, std::size_t size) noexcept;
// CHECK: define{{.*}}@_Z1fv
diff --git a/clang/test/CodeGenCoroutines/coro-gro.cpp b/clang/test/CodeGenCoroutines/coro-gro.cpp
index d4c3ff589e34..b62134317cef 100644
--- a/clang/test/CodeGenCoroutines/coro-gro.cpp
+++ b/clang/test/CodeGenCoroutines/coro-gro.cpp
@@ -51,7 +51,8 @@ int f() {
// CHECK: call void @_ZNSt16coroutine_traitsIiJEE12promise_typeD1Ev(
// CHECK: %[[Mem:.+]] = call ptr @llvm.coro.free(
- // CHECK: call void @_ZdlPv(ptr noundef %[[Mem]])
+ // CHECK: %[[SIZE:.+]] = call i64 @llvm.coro.size.i64()
+ // CHECK: call void @_ZdlPvm(ptr noundef %[[Mem]], i64 noundef %[[SIZE]])
// Initialize retval from Gro and destroy Gro
// Note this also tests delaying initialization when Gro and function return
diff --git a/clang/test/CodeGenCoroutines/pr56919.cpp b/clang/test/CodeGenCoroutines/pr56919.cpp
index c7de08ef72d7..baa8c27ce664 100644
--- a/clang/test/CodeGenCoroutines/pr56919.cpp
+++ b/clang/test/CodeGenCoroutines/pr56919.cpp
@@ -111,12 +111,15 @@ Task<void> Bar() { co_await Baz(); }
// CHECK: _Z3Quxv.destroy:{{.*}}
// CHECK-NEXT: #
-// CHECK-NEXT: jmp _ZdlPv
+// CHECK-NEXT: movl $40, %esi
+// CHECK-NEXT: jmp _ZdlPvm@PLT
// CHECK: _Z3Bazv.destroy:{{.*}}
// CHECK-NEXT: #
-// CHECK-NEXT: jmp _ZdlPv
+// CHECK-NEXT: movl $80, %esi
+// CHECK-NEXT: jmp _ZdlPvm
// CHECK: _Z3Barv.destroy:{{.*}}
// CHECK-NEXT: #
-// CHECK-NEXT: jmp _ZdlPv
+// CHECK-NEXT: movl $120, %esi
+// CHECK-NEXT: jmp _ZdlPvm
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx940.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx940.cl
new file mode 100644
index 000000000000..fc5649d8a41f
--- /dev/null
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx940.cl
@@ -0,0 +1,52 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown -target-cpu gfx940 -emit-llvm -o - %s | FileCheck %s
+// REQUIRES: amdgpu-registered-target
+
+typedef unsigned int u32;
+typedef unsigned short u16;
+typedef unsigned char u8;
+
+// CHECK-LABEL: @test_global_load_lds_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[DST_ADDR:%.*]] = alloca ptr addrspace(3), align 4, addrspace(5)
+// CHECK-NEXT: store ptr addrspace(1) [[SRC:%.*]], ptr addrspace(5) [[SRC_ADDR]], align 8
+// CHECK-NEXT: store ptr addrspace(3) [[DST:%.*]], ptr addrspace(5) [[DST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[SRC_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr addrspace(3), ptr addrspace(5) [[DST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) [[TMP0]], ptr addrspace(3) [[TMP1]], i32 4, i32 0, i32 0)
+// CHECK-NEXT: ret void
+//
+void test_global_load_lds_u32(global u32* src, local u32 *dst) {
+ __builtin_amdgcn_global_load_lds(src, dst, /*size=*/4, /*offset=*/0, /*aux=*/0);
+}
+
+// CHECK-LABEL: @test_global_load_lds_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[DST_ADDR:%.*]] = alloca ptr addrspace(3), align 4, addrspace(5)
+// CHECK-NEXT: store ptr addrspace(1) [[SRC:%.*]], ptr addrspace(5) [[SRC_ADDR]], align 8
+// CHECK-NEXT: store ptr addrspace(3) [[DST:%.*]], ptr addrspace(5) [[DST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[SRC_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr addrspace(3), ptr addrspace(5) [[DST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) [[TMP0]], ptr addrspace(3) [[TMP1]], i32 2, i32 0, i32 0)
+// CHECK-NEXT: ret void
+//
+void test_global_load_lds_u16(global u16* src, local u16 *dst) {
+ __builtin_amdgcn_global_load_lds(src, dst, /*size=*/2, /*offset=*/0, /*aux=*/0);
+}
+
+// CHECK-LABEL: @test_global_load_lds_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[DST_ADDR:%.*]] = alloca ptr addrspace(3), align 4, addrspace(5)
+// CHECK-NEXT: store ptr addrspace(1) [[SRC:%.*]], ptr addrspace(5) [[SRC_ADDR]], align 8
+// CHECK-NEXT: store ptr addrspace(3) [[DST:%.*]], ptr addrspace(5) [[DST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(1), ptr addrspace(5) [[SRC_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr addrspace(3), ptr addrspace(5) [[DST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) [[TMP0]], ptr addrspace(3) [[TMP1]], i32 1, i32 0, i32 0)
+// CHECK-NEXT: ret void
+//
+void test_global_load_lds_u8(global u8* src, local u8 *dst) {
+ __builtin_amdgcn_global_load_lds(src, dst, /*size=*/1, /*offset=*/0, /*aux=*/0);
+}
diff --git a/clang/test/CodeGenOpenCLCXX/array-type-infinite-loop.clcpp b/clang/test/CodeGenOpenCLCXX/array-type-infinite-loop.clcpp
new file mode 100644
index 000000000000..db9d7eb3281f
--- /dev/null
+++ b/clang/test/CodeGenOpenCLCXX/array-type-infinite-loop.clcpp
@@ -0,0 +1,25 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+//RUN: %clang_cc1 %s -triple spir -emit-llvm -O1 -o - | FileCheck %s
+
+// CHECK-LABEL: define dso_local spir_kernel void @test(
+// CHECK-SAME: ptr addrspace(1) nocapture noundef readonly align 8 [[IN:%.*]], ptr addrspace(1) nocapture noundef writeonly align 8 [[OUT:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] !kernel_arg_addr_space [[META3:![0-9]+]] !kernel_arg_access_qual [[META4:![0-9]+]] !kernel_arg_type [[META5:![0-9]+]] !kernel_arg_base_type [[META5]] !kernel_arg_type_qual [[META6:![0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[IN]], i32 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspace(1) [[ARRAYIDX1]], align 8, !tbaa [[TBAA7:![0-9]+]]
+// CHECK-NEXT: store i64 [[TMP0]], ptr addrspace(1) [[OUT]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT: ret void
+//
+__kernel void test(__global long *In, __global long *Out) {
+ long m[4] = { In[0], In[1], 0, 0 };
+ *Out = m[1];
+}
+//.
+// CHECK: [[META3]] = !{i32 1, i32 1}
+// CHECK: [[META4]] = !{!"none", !"none"}
+// CHECK: [[META5]] = !{!"long*", !"long*"}
+// CHECK: [[META6]] = !{!"", !""}
+// CHECK: [[TBAA7]] = !{[[META8:![0-9]+]], [[META8]], i64 0}
+// CHECK: [[META8]] = !{!"long", [[META9:![0-9]+]], i64 0}
+// CHECK: [[META9]] = !{!"omnipotent char", [[META10:![0-9]+]], i64 0}
+// CHECK: [[META10]] = !{!"Simple C++ TBAA"}
+//.
diff --git a/clang/test/CoverageMapping/builtinmacro.c b/clang/test/CoverageMapping/builtinmacro.c
index abcdc191523a..5d5a176aa7d8 100644
--- a/clang/test/CoverageMapping/builtinmacro.c
+++ b/clang/test/CoverageMapping/builtinmacro.c
@@ -4,7 +4,7 @@
// CHECK: filename
const char *filename (const char *name) { // CHECK-NEXT: File 0, [[@LINE]]:41 -> [[@LINE+3]]:2 = #0
- static const char this_file[] = __FILE__;
+ static const char this_file[] = __FILE__; // CHECK-NEXT: File 0, [[@LINE]]:35 -> [[@LINE]]:35 = #0
return this_file;
}
diff --git a/clang/test/CoverageMapping/macros.c b/clang/test/CoverageMapping/macros.c
index 6bd3be434139..fcf21170ef13 100644
--- a/clang/test/CoverageMapping/macros.c
+++ b/clang/test/CoverageMapping/macros.c
@@ -80,12 +80,14 @@ void func7(void) { // CHECK-NEXT: File 0, [[@LINE]]:18 -> [[@LINE+6]]:2 = #0
int kk,ll; // CHECK-NEXT: File 0, [[@LINE+1]]:7 -> [[@LINE+1]]:8 = #0
if (k) // CHECK-NEXT: Branch,File 0, [[@LINE]]:7 -> [[@LINE]]:8 = #1
m(k); // CHECK-NEXT: Gap,File 0, [[@LINE-1]]:9 -> [[@LINE]]:5 = #1
- else // CHECK-NEXT: Expansion,File 0, [[@LINE-1]]:5 -> [[@LINE-1]]:6 = #0
+ else // CHECK-NEXT: Expansion,File 0, [[@LINE-1]]:5 -> [[@LINE-1]]:6 = #1
l = m(l); // CHECK-NEXT: Gap,File 0, [[@LINE-2]]:7 -> [[@LINE]]:5 = (#0 - #1)
} // CHECK-NEXT: File 0, [[@LINE-1]]:5 -> [[@LINE-1]]:10 = (#0 - #1)
// CHECK-NEXT: Expansion,File 0, [[@LINE-2]]:9 -> [[@LINE-2]]:10 = (#0 - #1)
- // CHECK-NEXT: File 1, [[@LINE-9]]:14 -> [[@LINE-9]]:18 = #0
- // CHECK-NEXT: File 2, [[@LINE-10]]:14 -> [[@LINE-10]]:15 = (#0 - #1)
+ // CHECK-NEXT: File 1, [[@LINE-9]]:14 -> [[@LINE-9]]:17 = #1
+ // CHECK-NEXT: File 1, [[@LINE-10]]:14 -> [[@LINE-10]]:18 = #0
+ // CHECK-NEXT: File 2, [[@LINE-11]]:14 -> [[@LINE-11]]:17 = (#0 - #1)
+ // CHECK-NEXT: File 2, [[@LINE-12]]:14 -> [[@LINE-12]]:15 = (#0 - #1)
int main(int argc, const char *argv[]) {
func();
diff --git a/clang/test/CoverageMapping/mcdc-scratch-space.c b/clang/test/CoverageMapping/mcdc-scratch-space.c
new file mode 100644
index 000000000000..2b5b12d9dcad
--- /dev/null
+++ b/clang/test/CoverageMapping/mcdc-scratch-space.c
@@ -0,0 +1,65 @@
+// RUN: %clang_cc1 -triple %itanium_abi_triple -std=c99 -fcoverage-mcdc -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -emit-llvm-only %s | FileCheck %s
+
+// CHECK: builtin_macro0:
+int builtin_macro0(int a) {
+ // CHECK: Decision,File 0, [[@LINE+1]]:11 -> [[@LINE+2]]:15 = M:0, C:2
+ return (__LINE__ // CHECK: Branch,File 0, [[@LINE]]:11 -> [[@LINE]]:11 = 0, 0 [1,2,0]
+ && a); // CHECK: Branch,File 0, [[@LINE]]:14 -> [[@LINE]]:15 = #2, (#1 - #2) [2,0,0]
+}
+
+// CHECK: builtin_macro1:
+int builtin_macro1(int a) {
+ // CHECK: Decision,File 0, [[@LINE+1]]:11 -> [[@LINE+2]]:22 = M:0, C:2
+ return (a // CHECK: Branch,File 0, [[@LINE]]:11 -> [[@LINE]]:12 = (#0 - #1), #1 [1,0,2]
+ || __LINE__); // CHECK: Branch,File 0, [[@LINE]]:14 -> [[@LINE]]:14 = 0, 0 [2,0,0]
+}
+
+#define PRE(x) pre_##x
+
+// CHECK: pre0:
+int pre0(int pre_a, int b_post) {
+ // CHECK: Decision,File 0, [[@LINE+2]]:11 -> [[@LINE+3]]:20 = M:0, C:2
+ // CHECK: Expansion,File 0, [[@LINE+1]]:11 -> [[@LINE+1]]:14 = #0 (Expanded file = 1)
+ return (PRE(a)
+ && b_post);
+ // CHECK: Branch,File 0, [[@LINE-1]]:14 -> [[@LINE-1]]:20 = #2, (#1 - #2) [2,0,0]
+ // CHECK: Branch,File 1, [[@LINE-9]]:16 -> [[@LINE-9]]:22 = #1, (#0 - #1) [1,2,0]
+}
+
+#define pre_foo pre_a
+
+// CHECK: pre1:
+int pre1(int pre_a, int b_post) {
+ // CHECK: Decision,File 0, [[@LINE+3]]:11 -> [[@LINE+4]]:20 = M:0, C:2
+ // CHECK: Expansion,File 0, [[@LINE+2]]:11 -> [[@LINE+2]]:14 = #0 (Expanded file = 1)
+ // CHECK: Branch,File 0, [[@LINE+2]]:14 -> [[@LINE+2]]:20 = #2, (#1 - #2) [2,0,0]
+ return (PRE(foo)
+ && b_post);
+ // CHECK: Expansion,File 1, 17:16 -> 17:20 = #0 (Expanded file = 2)
+ // CHECK: Branch,File 2, 29:17 -> 29:22 = #1, (#0 - #1) [1,2,0]
+}
+
+#define POST(x) x##_post
+
+// CHECK: post0:
+int post0(int pre_a, int b_post) {
+ // CHECK: Decision,File 0, [[@LINE+2]]:11 -> [[@LINE+3]]:18 = M:0, C:2
+ // CHECK: Branch,File 0, [[@LINE+1]]:11 -> [[@LINE+1]]:16 = (#0 - #1), #1 [1,0,2]
+ return (pre_a
+ || POST(b));
+ // CHECK: Expansion,File 0, [[@LINE-1]]:14 -> [[@LINE-1]]:18 = #1 (Expanded file = 1)
+ // CHECK: Branch,File 1, [[@LINE-9]]:17 -> [[@LINE-9]]:20 = (#1 - #2), #2 [2,0,0]
+}
+
+#define bar_post b_post
+
+// CHECK: post1:
+int post1(int pre_a, int b_post) {
+ // CHECK: Decision,File 0, [[@LINE+3]]:11 -> [[@LINE+4]]:18 = M:0, C:2
+ // CHECK: Branch,File 0, [[@LINE+2]]:11 -> [[@LINE+2]]:16 = (#0 - #1), #1 [1,0,2]
+ // CHECK: Expansion,File 0, [[@LINE+2]]:14 -> [[@LINE+2]]:18 = 0 (Expanded file = 1)
+ return (pre_a
+ || POST(bar));
+ // CHECK: Expansion,File 1, 42:17 -> 42:18 = #1 (Expanded file = 2)
+ // CHECK: Branch,File 2, 54:18 -> 54:24 = (#1 - #2), #2 [2,0,0]
+}
diff --git a/clang/test/CoverageMapping/mcdc-system-headers.cpp b/clang/test/CoverageMapping/mcdc-system-headers.cpp
new file mode 100644
index 000000000000..4dfbb17c2bba
--- /dev/null
+++ b/clang/test/CoverageMapping/mcdc-system-headers.cpp
@@ -0,0 +1,50 @@
+// RUN: %clang_cc1 -std=c++11 -triple %itanium_abi_triple -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -fcoverage-mcdc -mllvm -system-headers-coverage -emit-llvm-only -o - %s | FileCheck %s --check-prefixes=CHECK,W_SYS
+// RUN: %clang_cc1 -std=c++11 -triple %itanium_abi_triple -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -fcoverage-mcdc -emit-llvm-only -o - %s | FileCheck %s --check-prefixes=CHECK,X_SYS
+
+#ifdef IS_SYSHEADER
+
+#pragma clang system_header
+#define CONST 42
+#define EXPR1(x) (x)
+#define EXPR2(x) ((x) && (x))
+
+#else
+
+#define IS_SYSHEADER
+#include __FILE__
+
+// CHECK: _Z5func0i:
+int func0(int a) {
+ // CHECK: Decision,File 0, [[@LINE+3]]:11 -> [[@LINE+3]]:21 = M:0, C:2
+ // W_SYS: Expansion,File 0, [[@LINE+2]]:11 -> [[@LINE+2]]:16 = #0 (Expanded file = 1)
+ // X_SYS: Branch,File 0, [[@LINE+1]]:11 -> [[@LINE+1]]:11 = 0, 0 [1,2,0]
+ return (CONST && a);
+ // CHECK: Branch,File 0, [[@LINE-1]]:20 -> [[@LINE-1]]:21 = #2, (#1 - #2) [2,0,0]
+ // W_SYS: Branch,File 1, [[@LINE-16]]:15 -> [[@LINE-16]]:17 = 0, 0 [1,2,0]
+}
+
+// CHECK: _Z5func1ii:
+int func1(int a, int b) {
+ // CHECK: Decision,File 0, [[@LINE+2]]:11 -> [[@LINE+2]]:21 = M:0, C:2
+ // CHECK: Branch,File 0, [[@LINE+1]]:11 -> [[@LINE+1]]:12 = (#0 - #1), #1 [1,0,2]
+ return (a || EXPR1(b));
+ // W_SYS: Expansion,File 0, [[@LINE-1]]:16 -> [[@LINE-1]]:21 = #1 (Expanded file = 1)
+ // W_SYS: Branch,File 1, [[@LINE-24]]:18 -> [[@LINE-24]]:21 = (#1 - #2), #2 [2,0,0]
+ // X_SYS: Branch,File 0, [[@LINE-3]]:16 -> [[@LINE-3]]:16 = (#1 - #2), #2 [2,0,0]
+}
+
+// CHECK: _Z5func2ii:
+int func2(int a, int b) {
+ // W_SYS: Decision,File 0, [[@LINE+5]]:11 -> [[@LINE+5]]:28 = M:0, C:3
+ // X_SYS: Decision,File 0, [[@LINE+4]]:11 -> [[@LINE+4]]:28 = M:0, C:2
+ // W_SYS: Expansion,File 0, [[@LINE+3]]:11 -> [[@LINE+3]]:16 = #0 (Expanded file = 1)
+ // W_SYS: Expansion,File 0, [[@LINE+2]]:23 -> [[@LINE+2]]:28 = #1 (Expanded file = 2)
+ // X_SYS: Branch,File 0, [[@LINE+1]]:11 -> [[@LINE+1]]:11 = #1, (#0 - #1) [1,2,0]
+ return (EXPR2(a) && EXPR1(a));
+ // W_SYS: Branch,File 1, [[@LINE-35]]:19 -> [[@LINE-35]]:22 = #3, (#0 - #3) [1,3,0]
+ // W_SYS: Branch,File 1, [[@LINE-36]]:26 -> [[@LINE-36]]:29 = #4, (#3 - #4) [3,2,0]
+ // W_SYS: Branch,File 2, [[@LINE-38]]:18 -> [[@LINE-38]]:21 = #2, (#1 - #2) [2,0,0]
+ // X_SYS: Branch,File 0, [[@LINE-4]]:23 -> [[@LINE-4]]:23 = #2, (#1 - #2) [2,0,0]
+}
+
+#endif
diff --git a/clang/test/CoverageMapping/templates.cpp b/clang/test/CoverageMapping/templates.cpp
index 143e566a33cb..7e7f2208f114 100644
--- a/clang/test/CoverageMapping/templates.cpp
+++ b/clang/test/CoverageMapping/templates.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -mllvm -emptyline-comment-coverage=false -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -emit-llvm-only -main-file-name templates.cpp %s | FileCheck %s
+// RUN: %clang_cc1 -std=c++20 -mllvm -emptyline-comment-coverage=false -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -emit-llvm-only -main-file-name templates.cpp %s | FileCheck %s
template<typename T>
void unused(T x) {
@@ -30,5 +30,6 @@ namespace structural_value_crash {
void test() {
tpl_fn<arr>();
+ tpl_fn<&arr[1]>();
}
}
diff --git a/clang/test/Driver/Ofast.c b/clang/test/Driver/Ofast.c
index 1f9fc78ec1ef..8b7f2217eca2 100644
--- a/clang/test/Driver/Ofast.c
+++ b/clang/test/Driver/Ofast.c
@@ -3,7 +3,9 @@
// RUN: %clang -fno-fast-math -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST %s
// RUN: %clang -fno-strict-aliasing -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST %s
// RUN: %clang -fno-vectorize -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST %s
-// RUN: %clang -Ofast -O2 -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST-O2 %s
+// RUN: %clang -Ofast -O2 -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST-O2 \
+// RUN: %if target={{.*-windows-msvc.*}} %{ --check-prefix=CHECK-OFAST-O2-ALIASING-MSVC %} \
+// RUN: %else %{ --check-prefix=CHECK-OFAST-O2-ALIASING %} %s
// RUN: %clang -Ofast -fno-fast-math -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST-NO-FAST-MATH %s
// RUN: %clang -Ofast -fno-strict-aliasing -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST-NO-STRICT-ALIASING %s
// RUN: %clang -Ofast -fno-vectorize -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST-NO-VECTORIZE %s
@@ -15,7 +17,8 @@
// CHECK-OFAST: -vectorize-loops
// CHECK-OFAST-O2: -cc1
-// CHECK-OFAST-O2-NOT: -relaxed-aliasing
+// CHECK-OFAST-O2-ALIASING-NOT: -relaxed-aliasing
+// CHECK-OFAST-O2-ALIASING-MSVC: -relaxed-aliasing
// CHECK-OFAST-O2-NOT: -ffast-math
// CHECK-OFAST-O2-NOT: -Ofast
// CHECK-OFAST-O2: -vectorize-loops
diff --git a/clang/test/Driver/aarch64-v95a.c b/clang/test/Driver/aarch64-v95a.c
index 1037da65c8cb..62878f212762 100644
--- a/clang/test/Driver/aarch64-v95a.c
+++ b/clang/test/Driver/aarch64-v95a.c
@@ -6,7 +6,7 @@
// RUN: %clang -target aarch64 -mlittle-endian -march=armv9.5-a -### -c %s 2>&1 | FileCheck -check-prefix=GENERICV95A %s
// RUN: %clang -target aarch64_be -mlittle-endian -march=armv9.5a -### -c %s 2>&1 | FileCheck -check-prefix=GENERICV95A %s
// RUN: %clang -target aarch64_be -mlittle-endian -march=armv9.5-a -### -c %s 2>&1 | FileCheck -check-prefix=GENERICV95A %s
-// GENERICV95A: "-cc1"{{.*}} "-triple" "aarch64{{.*}}" "-target-cpu" "generic" "-target-feature" "+v9.5a"
+// GENERICV95A: "-cc1"{{.*}} "-triple" "aarch64{{.*}}" "-target-cpu" "generic" "-target-feature" "+v9.5a"{{.*}} "-target-feature" "+cpa"{{.*}} "-target-feature" "+faminmax"{{.*}} "-target-feature" "+lut"
// RUN: %clang -target aarch64_be -march=armv9.5a -### -c %s 2>&1 | FileCheck -check-prefix=GENERICV95A-BE %s
// RUN: %clang -target aarch64_be -march=armv9.5-a -### -c %s 2>&1 | FileCheck -check-prefix=GENERICV95A-BE %s
@@ -14,14 +14,10 @@
// RUN: %clang -target aarch64 -mbig-endian -march=armv9.5-a -### -c %s 2>&1 | FileCheck -check-prefix=GENERICV95A-BE %s
// RUN: %clang -target aarch64_be -mbig-endian -march=armv9.5a -### -c %s 2>&1 | FileCheck -check-prefix=GENERICV95A-BE %s
// RUN: %clang -target aarch64_be -mbig-endian -march=armv9.5-a -### -c %s 2>&1 | FileCheck -check-prefix=GENERICV95A-BE %s
-// GENERICV95A-BE: "-cc1"{{.*}} "-triple" "aarch64_be{{.*}}" "-target-cpu" "generic" "-target-feature" "+v9.5a"
+// GENERICV95A-BE: "-cc1"{{.*}} "-triple" "aarch64_be{{.*}}" "-target-cpu" "generic" "-target-feature" "+v9.5a"{{.*}} "-target-feature" "+cpa"{{.*}} "-target-feature" "+faminmax"{{.*}} "-target-feature" "+lut"
// ===== Features supported on aarch64 =====
-// RUN: %clang -target aarch64 -march=armv9.5a+cpa -### -c %s 2>&1 | FileCheck -check-prefix=V95A-CPA %s
-// RUN: %clang -target aarch64 -march=armv9.5-a+cpa -### -c %s 2>&1 | FileCheck -check-prefix=V95A-CPA %s
-// V95A-CPA: "-cc1"{{.*}} "-triple" "aarch64{{.*}}" "-target-cpu" "generic" "-target-feature" "+v9.5a"{{.*}} "-target-feature" "+cpa"
-
// RUN: %clang -target aarch64 -march=armv9.5a+pauth-lr -### -c %s 2>&1 | FileCheck -check-prefix=V95A-PAUTHLR %s
// RUN: %clang -target aarch64 -march=armv9.5-a+pauth-lr -### -c %s 2>&1 | FileCheck -check-prefix=V95A-PAUTHLR %s
// V95A-PAUTHLR: "-cc1"{{.*}} "-triple" "aarch64{{.*}}" "-target-cpu" "generic" "-target-feature" "+v9.5a"{{.*}} "-target-feature" "+pauth-lr"
diff --git a/clang/test/Driver/android-unversioned-fallback-warning.cpp b/clang/test/Driver/android-unversioned-fallback-warning.cpp
index 62a951d14eff..da666cc4d9fa 100644
--- a/clang/test/Driver/android-unversioned-fallback-warning.cpp
+++ b/clang/test/Driver/android-unversioned-fallback-warning.cpp
@@ -14,14 +14,14 @@
// RUN: %clang --target=aarch64-none-linux-android -ccc-install-dir %t/bin \
// RUN: -resource-dir %t/resource -### -c %s 2>&1 | \
// RUN: FileCheck --check-prefix=NO-WARNING %s
-// NO-WARNING-NOT: Using unversioned Android target directory
+// NO-WARNING-NOT: using unversioned Android target directory
// RUN: %clang --target=aarch64-none-linux-android21 -ccc-install-dir %t/bin \
// RUN: -resource-dir %t/resource -### -c %s 2>&1 | \
// RUN: FileCheck --check-prefix=ANDROID21 -DDIR=%t -DSEP=%{fs-sep} %s
-// ANDROID21-DAG: Using unversioned Android target directory [[DIR]]/bin[[SEP]]..[[SEP]]include[[SEP]]aarch64-none-linux-android
-// ANDROID21-DAG: Using unversioned Android target directory [[DIR]]/bin[[SEP]]..[[SEP]]lib[[SEP]]aarch64-none-linux-android
-// ANDROID21-DAG: Using unversioned Android target directory [[DIR]]/resource[[SEP]]lib[[SEP]]aarch64-none-linux-android
+// ANDROID21-DAG: using unversioned Android target directory [[DIR]]/bin[[SEP]]..[[SEP]]include[[SEP]]aarch64-none-linux-android
+// ANDROID21-DAG: using unversioned Android target directory [[DIR]]/bin[[SEP]]..[[SEP]]lib[[SEP]]aarch64-none-linux-android
+// ANDROID21-DAG: using unversioned Android target directory [[DIR]]/resource[[SEP]]lib[[SEP]]aarch64-none-linux-android
// 23 or newer should use the versioned directory
// RUN: %clang --target=aarch64-none-linux-android23 -ccc-install-dir %t/bin \
diff --git a/clang/test/Driver/cl-options.c b/clang/test/Driver/cl-options.c
index 75f49deca065..733f243d3c69 100644
--- a/clang/test/Driver/cl-options.c
+++ b/clang/test/Driver/cl-options.c
@@ -740,9 +740,10 @@
// NOCLANG-SAME: "-vectorize-slp"
// NOCLANG-NOT: "--dependent-lib=msvcrt"
-// RUN: %clang_cl -O2 -MD /clang:-fno-slp-vectorize /clang:-MD /clang:-MF /clang:my_dependency_file.dep -### -- %s 2>&1 | FileCheck -check-prefix=CLANG %s
+// RUN: %clang_cl -O2 -MD /clang:-fno-slp-vectorize /clang:-MD /clang:-MF /clang:my_dependency_file.dep /c /Fo%/t/cl-options.obj -### -- %s 2>&1 | FileCheck -DPREFIX=%/t -check-prefix=CLANG %s
// CLANG: "--dependent-lib=msvcrt"
// CLANG-SAME: "-dependency-file" "my_dependency_file.dep"
+// CLANG-SAME: "-MT" "[[PREFIX]]/cl-options.obj"
// CLANG-NOT: "--dependent-lib=libcmt"
// CLANG-NOT: "-vectorize-slp"
diff --git a/clang/test/Driver/cl-x86-flags.c b/clang/test/Driver/cl-x86-flags.c
index 716b02f02a15..51b16f0ce354 100644
--- a/clang/test/Driver/cl-x86-flags.c
+++ b/clang/test/Driver/cl-x86-flags.c
@@ -69,10 +69,7 @@
// RUN: %clang_cl -m32 -arch:avx2 --target=i386-pc-windows -### -- 2>&1 %s | FileCheck -check-prefix=avx2 %s
// avx2: invalid /arch: argument
-// RUN: %clang_cl -m32 -arch:AVX512F --target=i386-pc-windows /c /Fo%t.obj -Xclang -verify=KNL1 -DTEST_32_ARCH_AVX512F -- %s
-// KNL1-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
-// KNL1-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
-// KNL1-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
+// RUN: %clang_cl -m32 -arch:AVX512F --target=i386-pc-windows /c /Fo%t.obj -Xclang -verify -DTEST_32_ARCH_AVX512F -- %s
#if defined(TEST_32_ARCH_AVX512F)
#if _M_IX86_FP != 2 || !__AVX__ || !__AVX2__ || !__AVX512F__ || __AVX512BW__
#error fail
@@ -112,10 +109,7 @@
// RUN: %clang_cl -m64 -arch:avx2 --target=x86_64-pc-windows -### -- 2>&1 %s | FileCheck -check-prefix=avx264 %s
// avx264: invalid /arch: argument
-// RUN: %clang_cl -m64 -arch:AVX512F --target=i386-pc-windows /c /Fo%t.obj -Xclang -verify=KNL2 -DTEST_64_ARCH_AVX512F -- %s
-// KNL2-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
-// KNL2-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
-// KNL2-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
+// RUN: %clang_cl -m64 -arch:AVX512F --target=i386-pc-windows /c /Fo%t.obj -Xclang -verify -DTEST_64_ARCH_AVX512F -- %s
#if defined(TEST_64_ARCH_AVX512F)
#if _M_IX86_FP || !__AVX__ || !__AVX2__ || !__AVX512F__ || __AVX512BW__
#error fail
diff --git a/clang/test/Driver/clang_f_opts.c b/clang/test/Driver/clang_f_opts.c
index 472d0725a793..d69cd199ac61 100644
--- a/clang/test/Driver/clang_f_opts.c
+++ b/clang/test/Driver/clang_f_opts.c
@@ -623,3 +623,9 @@
// RUN: %clang -### --target=aarch64-windows-msvc -fno-ms-volatile %s 2>&1 | FileCheck -check-prefix=CHECK-NO-MS-VOLATILE %s
// CHECK-MS-VOLATILE: -fms-volatile
// CHECK-NO-MS-VOLATILE-NOT: -fms-volatile
+
+// RUN: %clang -### --target=x86_64-pc-windows-msvc %s 2>&1 | FileCheck -check-prefix=CHECK-NO-STRICT-ALIASING %s
+// RUN: %clang -### --target=x86_64-pc-windows-msvc -fstrict-aliasing %s 2>&1 | FileCheck -check-prefix=CHECK-STRICT-ALIASING %s
+// RUN: %clang -### --target=x86_64-pc-windows-msvc -fno-strict-aliasing %s 2>&1 | FileCheck -check-prefix=CHECK-NO-STRICT-ALIASING %s
+// CHECK-STRICT-ALIASING-NOT: -relaxed-aliasing
+// CHECK-NO-STRICT-ALIASING: -relaxed-aliasing
diff --git a/clang/test/Driver/cuda-cross-compiling.c b/clang/test/Driver/cuda-cross-compiling.c
index a1719a6fbe04..203bc063a010 100644
--- a/clang/test/Driver/cuda-cross-compiling.c
+++ b/clang/test/Driver/cuda-cross-compiling.c
@@ -83,8 +83,8 @@
// RUN: not %clang -target nvptx64-nvidia-cuda -march=generic %s -### 2>&1 \
// RUN: | FileCheck -check-prefix=MISSING %s
-// MISSING: error: Must pass in an explicit nvptx64 gpu architecture to 'ptxas'
-// MISSING: error: Must pass in an explicit nvptx64 gpu architecture to 'nvlink'
+// MISSING: error: must pass in an explicit nvptx64 gpu architecture to 'ptxas'
+// MISSING: error: must pass in an explicit nvptx64 gpu architecture to 'nvlink'
// RUN: %clang -target nvptx64-nvidia-cuda -flto -c %s -### 2>&1 \
// RUN: | FileCheck -check-prefix=GENERIC %s
diff --git a/clang/test/Driver/dxc_dxv_path.hlsl b/clang/test/Driver/dxc_dxv_path.hlsl
index 4845de11d5b0..db2c87063ac3 100644
--- a/clang/test/Driver/dxc_dxv_path.hlsl
+++ b/clang/test/Driver/dxc_dxv_path.hlsl
@@ -1,7 +1,7 @@
// RUN: %clang_dxc -I test -Tlib_6_3 -### %s 2>&1 | FileCheck %s
// Make sure report warning.
-// CHECK:dxv not found.
+// CHECK:dxv not found
// RUN: echo "dxv" > %T/dxv && chmod 754 %T/dxv && %clang_dxc --dxv-path=%T %s -Tlib_6_3 -### 2>&1 | FileCheck %s --check-prefix=DXV_PATH
// DXV_PATH:dxv{{(.exe)?}}" "-" "-o" "-"
diff --git a/clang/test/Driver/fast-math.c b/clang/test/Driver/fast-math.c
index 274f1f22ea5e..ffd081948914 100644
--- a/clang/test/Driver/fast-math.c
+++ b/clang/test/Driver/fast-math.c
@@ -67,31 +67,31 @@
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
//
// Target defaults for -fmath-errno (reusing the above checks).
-// RUN: %clang -### -target i686-unknown-linux -c %s 2>&1 \
+// RUN: %clang -### --target=i686-unknown-linux -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,ERRNO %s
// RUN: %clang -### -target i686-apple-darwin -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
-// RUN: %clang -### -target x86_64-unknown-freebsd -c %s 2>&1 \
+// RUN: %clang -### --target=x86_64-unknown-freebsd -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
-// RUN: %clang -### -target x86_64-unknown-netbsd -c %s 2>&1 \
+// RUN: %clang -### --target=x86_64-unknown-netbsd -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
-// RUN: %clang -### -target x86_64-unknown-openbsd -c %s 2>&1 \
+// RUN: %clang -### --target=x86_64-unknown-openbsd -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
// RUN: %clang -### --target=x86_64-unknown-haiku -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
-// RUN: %clang -### -target x86_64-unknown-dragonfly -c %s 2>&1 \
+// RUN: %clang -### --target=x86_64-unknown-dragonfly -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
-// RUN: %clang -### -target x86_64-fuchsia -c %s 2>&1 \
+// RUN: %clang -### --target=x86_64-fuchsia -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
-// RUN: %clang -### -target x86_64-linux-android -c %s 2>&1 \
+// RUN: %clang -### --target=x86_64-linux-android -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
-// RUN: %clang -### -target x86_64-linux-musl -c %s 2>&1 \
+// RUN: %clang -### --target=x86_64-linux-musl -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
// RUN: %clang -### --target=amdgcn-amd-amdhsa -nogpuinc -nogpulib -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
-// RUN: %clang -### -target amdgcn-amd-amdpal -c %s 2>&1 \
+// RUN: %clang -### --target=amdgcn-amd-amdpal -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
-// RUN: %clang -### -target amdgcn-mesa-mesa3d -c %s 2>&1 \
+// RUN: %clang -### --target=amdgcn-mesa-mesa3d -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
//
// Check that -ffast-math disables -fmath-errno, and -fno-fast-math merely
@@ -103,9 +103,9 @@
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
// RUN: %clang -### -ffast-math -fmath-errno -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,ERRNO %s
-// RUN: %clang -### -target i686-unknown-linux -fno-fast-math -c %s 2>&1 \
+// RUN: %clang -### --target=i686-unknown-linux -fno-fast-math -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,ERRNO %s
-// RUN: %clang -### -target i686-unknown-linux -fno-math-errno -fno-fast-math -c %s 2>&1 \
+// RUN: %clang -### --target=i686-unknown-linux -fno-math-errno -fno-fast-math -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,ERRNO %s
// RUN: %clang -### -target i686-apple-darwin -fno-fast-math -c %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK,NO-ERRNO %s
diff --git a/clang/test/Driver/fat-archive-unbundle-ext.c b/clang/test/Driver/fat-archive-unbundle-ext.c
index e98b872f0c0c..e797acccf02b 100644
--- a/clang/test/Driver/fat-archive-unbundle-ext.c
+++ b/clang/test/Driver/fat-archive-unbundle-ext.c
@@ -2,7 +2,7 @@
// UNSUPPORTED: target={{.*-windows.*}}, target={{.*}}-macosx{{.*}}, target={{.*-darwin.*}}, target={{.*}}-aix{{.*}}
// Generate dummy fat object
-// RUN: %clang -O0 -target %itanium_abi_triple %s -c -o %t.host.o
+// RUN: %clang -O0 --target=%itanium_abi_triple %s -c -o %t.host.o
// RUN: echo 'Content of device file' > %t.tgt.o
// RUN: clang-offload-bundler -type=o -targets=host-%itanium_abi_triple,openmp-%itanium_abi_triple -input=%t.host.o -input=%t.tgt.o -output=%t.fat.obj
diff --git a/clang/test/Driver/fatal-warnings.c b/clang/test/Driver/fatal-warnings.c
index 6239b25e8917..12c239cf1208 100644
--- a/clang/test/Driver/fatal-warnings.c
+++ b/clang/test/Driver/fatal-warnings.c
@@ -1,5 +1,5 @@
-// RUN: %clang -### %s -c -o tmp.o -target i686-pc-linux-gnu -integrated-as -Wa,--fatal-warnings 2>&1 | FileCheck %s
-// RUN: not %clang %s -c -o %t.o -target i686-pc-linux-gnu -integrated-as -Wa,--fatal-warnings 2>&1 %t.log
+// RUN: %clang -### %s -c -o tmp.o --target=i686-pc-linux-gnu -integrated-as -Wa,--fatal-warnings 2>&1 | FileCheck %s
+// RUN: not %clang %s -c -o %t.o --target=i686-pc-linux-gnu -integrated-as -Wa,--fatal-warnings 2>&1 %t.log
// FileCheck --check-prefix=CHECK-AS %s -input-file %t.log
// CHECK: "-cc1" {{.*}} "-massembler-fatal-warnings"
diff --git a/clang/test/Driver/fbinutils-version.c b/clang/test/Driver/fbinutils-version.c
index 56a49ed2540f..14b44b4d9dd0 100644
--- a/clang/test/Driver/fbinutils-version.c
+++ b/clang/test/Driver/fbinutils-version.c
@@ -1,29 +1,29 @@
-// RUN: %clang -### -c -target x86_64-linux %s -fbinutils-version=none 2>&1 | FileCheck %s --check-prefix=NONE
+// RUN: %clang -### -c --target=x86_64-linux %s -fbinutils-version=none 2>&1 | FileCheck %s --check-prefix=NONE
// NONE: "-fbinutils-version=none"
-// RUN: %clang -### -c -target aarch64-linux %s -fbinutils-version=2 2>&1 | FileCheck %s --check-prefix=CHECK2
+// RUN: %clang -### -c --target=aarch64-linux %s -fbinutils-version=2 2>&1 | FileCheck %s --check-prefix=CHECK2
// CHECK2: "-fbinutils-version=2"
-// RUN: %clang -### -c -target aarch64-linux %s -fbinutils-version=2.35 2>&1 | FileCheck %s --check-prefix=CHECK2_35
+// RUN: %clang -### -c --target=aarch64-linux %s -fbinutils-version=2.35 2>&1 | FileCheck %s --check-prefix=CHECK2_35
// CHECK2_35: "-fbinutils-version=2.35"
/// Disallow -fbinutils-version=0 because we use $major==0 to indicate the MC
/// default in the backend.
-// RUN: not %clang -c -target x86_64-linux %s -fbinutils-version=0 2>&1 | FileCheck %s --check-prefix=ERR0
+// RUN: not %clang -c --target=x86_64-linux %s -fbinutils-version=0 2>&1 | FileCheck %s --check-prefix=ERR0
// ERR0: error: invalid argument '0' to -fbinutils-version=
-// RUN: not %clang -c -target x86_64-linux %s -fbinutils-version=nan 2>&1 | FileCheck %s --check-prefix=ERR1
+// RUN: not %clang -c --target=x86_64-linux %s -fbinutils-version=nan 2>&1 | FileCheck %s --check-prefix=ERR1
// ERR1: error: invalid argument 'nan' to -fbinutils-version=
-// RUN: not %clang -c -target x86_64-linux %s -fbinutils-version=2. 2>&1 | FileCheck %s --check-prefix=ERR2
+// RUN: not %clang -c --target=x86_64-linux %s -fbinutils-version=2. 2>&1 | FileCheck %s --check-prefix=ERR2
// ERR2: error: invalid argument '2.' to -fbinutils-version=
-// RUN: not %clang -c -target x86_64-linux %s -fbinutils-version=3.-14 2>&1 | FileCheck %s --check-prefix=ERR3
+// RUN: not %clang -c --target=x86_64-linux %s -fbinutils-version=3.-14 2>&1 | FileCheck %s --check-prefix=ERR3
// ERR3: error: invalid argument '3.-14' to -fbinutils-version=
diff --git a/clang/test/Driver/fdirect-access-external-data.c b/clang/test/Driver/fdirect-access-external-data.c
index a6da776e6977..4dfb700d6c45 100644
--- a/clang/test/Driver/fdirect-access-external-data.c
+++ b/clang/test/Driver/fdirect-access-external-data.c
@@ -1,13 +1,13 @@
/// -fno-pic code defaults to -fdirect-access-external-data.
-// RUN: %clang -### -c -target x86_64 %s 2>&1 | FileCheck %s --check-prefix=DEFAULT
-// RUN: %clang -### -c -target x86_64 %s -fdirect-access-external-data 2>&1 | FileCheck %s --check-prefix=DEFAULT
-// RUN: %clang -### -c -target x86_64 %s -fdirect-access-external-data -fno-direct-access-external-data 2>&1 | FileCheck %s --check-prefix=INDIRECT
+// RUN: %clang -### -c --target=x86_64 %s 2>&1 | FileCheck %s --check-prefix=DEFAULT
+// RUN: %clang -### -c --target=x86_64 %s -fdirect-access-external-data 2>&1 | FileCheck %s --check-prefix=DEFAULT
+// RUN: %clang -### -c --target=x86_64 %s -fdirect-access-external-data -fno-direct-access-external-data 2>&1 | FileCheck %s --check-prefix=INDIRECT
/// -fpie/-fpic code defaults to -fdirect-access-external-data.
-// RUN: %clang -### -c -target x86_64 %s -fpie 2>&1 | FileCheck %s --check-prefix=DEFAULT
-// RUN: %clang -### -c -target x86_64 %s -fpie -fno-direct-access-external-data -fdirect-access-external-data 2>&1 | FileCheck %s --check-prefix=DIRECT
-// RUN: %clang -### -c -target aarch64 %s -fpic 2>&1 | FileCheck %s --check-prefix=DEFAULT
-// RUN: %clang -### -c -target aarch64 %s -fpic -fdirect-access-external-data 2>&1 | FileCheck %s --check-prefix=DIRECT
+// RUN: %clang -### -c --target=x86_64 %s -fpie 2>&1 | FileCheck %s --check-prefix=DEFAULT
+// RUN: %clang -### -c --target=x86_64 %s -fpie -fno-direct-access-external-data -fdirect-access-external-data 2>&1 | FileCheck %s --check-prefix=DIRECT
+// RUN: %clang -### -c --target=aarch64 %s -fpic 2>&1 | FileCheck %s --check-prefix=DEFAULT
+// RUN: %clang -### -c --target=aarch64 %s -fpic -fdirect-access-external-data 2>&1 | FileCheck %s --check-prefix=DIRECT
/// loongarch* targets default to -fno-direct-access-external-data even for -fno-pic.
// RUN: %clang -### -c --target=loongarch64 -fno-pic %s 2>&1 | FileCheck %s --check-prefix=INDIRECT
diff --git a/clang/test/Driver/fembed-bitcode.c b/clang/test/Driver/fembed-bitcode.c
index 970500525a50..9081314d121c 100644
--- a/clang/test/Driver/fembed-bitcode.c
+++ b/clang/test/Driver/fembed-bitcode.c
@@ -1,5 +1,5 @@
// RUN: %clang -target x86_64-apple-macosx -fembed-bitcode=all -c %s -o /dev/null -### 2>&1 \
-// RUN: | FileCheck -check-prefix CHECK-X64 %s
+// RUN: | FileCheck --check-prefix=CHECK-X64 %s
// CHECK-X64: "-cc1"
@@ -7,7 +7,7 @@
// CHECK-X64-NOT: "-fdebug-compilation-dir
// RUN: %clang -target armv7-apple-ios -fembed-bitcode=all -c %s -o /dev/null -### 2>&1 \
-// RUN: | FileCheck -check-prefix CHECK-ARM %s
+// RUN: | FileCheck --check-prefix=CHECK-ARM %s
// CHECK-ARM: "-cc1"
@@ -17,7 +17,7 @@
// CHECK-ARM-NOT: "-fdebug-compilation-dir
// RUN: %clang -target arm64-apple-ios -fembed-bitcode=all -c %s -o /dev/null -### 2>&1 \
-// RUN: | FileCheck -check-prefix CHECK-AARCH64 %s
+// RUN: | FileCheck --check-prefix=CHECK-AARCH64 %s
// CHECK-AARCH64: "-cc1"
@@ -26,12 +26,12 @@
// CHECK-AARCH64: "darwinpcs"
// CHECK-AARCH64-NOT: "-fdebug-compilation-dir
-// RUN: %clang -target hexagon-unknown-elf -ffixed-r19 -fembed-bitcode=all -c %s -### 2>&1 \
+// RUN: %clang --target=hexagon-unknown-elf -ffixed-r19 -fembed-bitcode=all -c %s -### 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-HEXAGON %s
// CHECK-HEXAGON: "-target-feature"
// CHECK-HEXAGON: "+reserved-r19"
//
-// RUN: %clang -target wasm32-unknown-unknown -fembed-bitcode=all -pthread -c %s -o /dev/null -### 2>&1 \
+// RUN: %clang --target=wasm32-unknown-unknown -fembed-bitcode=all -pthread -c %s -o /dev/null -### 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-WASM %s
// CHECK-WASM: "-cc1"
diff --git a/clang/test/Driver/fexcess-precision.c b/clang/test/Driver/fexcess-precision.c
index 68579b606c9b..0aa1022f17fd 100644
--- a/clang/test/Driver/fexcess-precision.c
+++ b/clang/test/Driver/fexcess-precision.c
@@ -1,19 +1,19 @@
// Note: %s must be preceded by --, otherwise it may be interpreted as a
// command-line option, e.g. on Mac where %s is commonly under /Users.
-// RUN: %clang -### -target i386 -fexcess-precision=fast -c %s 2>&1 \
+// RUN: %clang -### --target=i386 -fexcess-precision=fast -c %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-FAST %s
-// RUN: %clang_cl -### -target i386 -fexcess-precision=fast -c -- %s 2>&1 \
+// RUN: %clang_cl -### --target=i386 -fexcess-precision=fast -c -- %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-FAST %s
-// RUN: %clang -### -target i386 -fexcess-precision=standard -c %s 2>&1 \
+// RUN: %clang -### --target=i386 -fexcess-precision=standard -c %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-STD %s
-// RUN: %clang_cl -### -target i386 -fexcess-precision=standard -c -- %s 2>&1 \
+// RUN: %clang_cl -### --target=i386 -fexcess-precision=standard -c -- %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-STD %s
-// RUN: %clang -### -target i386 -fexcess-precision=16 -c %s 2>&1 \
+// RUN: %clang -### --target=i386 -fexcess-precision=16 -c %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NONE %s
-// RUN: %clang_cl -### -target i386 -fexcess-precision=16 -c -- %s 2>&1 \
+// RUN: %clang_cl -### --target=i386 -fexcess-precision=16 -c -- %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NONE %s
// RUN: not %clang -### --target=i386 -fexcess-precision=none -c %s 2>&1 \
@@ -21,19 +21,19 @@
// RUN: not %clang_cl -### --target=i386 -fexcess-precision=none -c -- %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-ERR-NONE %s
-// RUN: %clang -### -target x86_64 -fexcess-precision=fast -c %s 2>&1 \
+// RUN: %clang -### --target=x86_64 -fexcess-precision=fast -c %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-FAST %s
-// RUN: %clang_cl -### -target x86_64 -fexcess-precision=fast -c -- %s 2>&1 \
+// RUN: %clang_cl -### --target=x86_64 -fexcess-precision=fast -c -- %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-FAST %s
-// RUN: %clang -### -target x86_64 -fexcess-precision=standard -c %s 2>&1 \
+// RUN: %clang -### --target=x86_64 -fexcess-precision=standard -c %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-STD %s
-// RUN: %clang_cl -### -target x86_64 -fexcess-precision=standard -c \
+// RUN: %clang_cl -### --target=x86_64 -fexcess-precision=standard -c \
// RUN: -- %s 2>&1 | FileCheck --check-prefix=CHECK-STD %s
-// RUN: %clang -### -target x86_64 -fexcess-precision=16 -c %s 2>&1 \
+// RUN: %clang -### --target=x86_64 -fexcess-precision=16 -c %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NONE %s
-// RUN: %clang_cl -### -target x86_64 -fexcess-precision=16 -c -- %s 2>&1 \
+// RUN: %clang_cl -### --target=x86_64 -fexcess-precision=16 -c -- %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NONE %s
// RUN: not %clang -### --target=x86_64 -fexcess-precision=none -c %s 2>&1 \
@@ -41,14 +41,14 @@
// RUN: not %clang_cl -### --target=x86_64 -fexcess-precision=none -c -- %s 2>&1 \
// RUN: | FileCheck --check-prefixes=CHECK-ERR-NONE %s
-// RUN: %clang -### -target aarch64 -fexcess-precision=fast -c %s 2>&1 \
+// RUN: %clang -### --target=aarch64 -fexcess-precision=fast -c %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK %s
-// RUN: %clang_cl -### -target aarch64 -fexcess-precision=fast -c -- %s 2>&1 \
+// RUN: %clang_cl -### --target=aarch64 -fexcess-precision=fast -c -- %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK %s
-// RUN: %clang -### -target aarch64 -fexcess-precision=standard -c %s 2>&1 \
+// RUN: %clang -### --target=aarch64 -fexcess-precision=standard -c %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK %s
-// RUN: %clang_cl -### -target aarch64 -fexcess-precision=standard -c \
+// RUN: %clang_cl -### --target=aarch64 -fexcess-precision=standard -c \
// RUN: -- %s 2>&1 | FileCheck --check-prefix=CHECK %s
// RUN: not %clang -### --target=aarch64 -fexcess-precision=16 -c %s 2>&1 \
diff --git a/clang/test/Driver/fextend-args.c b/clang/test/Driver/fextend-args.c
index 7f19f8c5ec48..0b721202a000 100644
--- a/clang/test/Driver/fextend-args.c
+++ b/clang/test/Driver/fextend-args.c
@@ -5,7 +5,7 @@
// RUN: | FileCheck -check-prefix=CHECK-64 %s
// Unsupported target
-// RUN: not %clang -target aarch64-unknown-windows-msvc -fextend-arguments=32 %s 2>&1 \
+// RUN: not %clang --target=aarch64-unknown-windows-msvc -fextend-arguments=32 %s 2>&1 \
// RUN: | FileCheck -check-prefix=UNSUPPORTED-TARGET %s
// Invalid option value
diff --git a/clang/test/Driver/fforce-dwarf-frame.c b/clang/test/Driver/fforce-dwarf-frame.c
index fb5442c56a40..c4bc2619e0ef 100644
--- a/clang/test/Driver/fforce-dwarf-frame.c
+++ b/clang/test/Driver/fforce-dwarf-frame.c
@@ -1,6 +1,6 @@
-// RUN: %clang -target arm -c -### %s -fforce-dwarf-frame 2>&1 | FileCheck --check-prefix=CHECK-ALWAYS %s
-// RUN: %clang -target arm -c -### %s -fno-force-dwarf-frame 2>&1 | FileCheck --check-prefix=CHECK-NO-ALWAYS %s
-// RUN: %clang -target arm -c -### %s 2>&1 | FileCheck --check-prefix=CHECK-NO-ALWAYS %s
+// RUN: %clang --target=arm -c -### %s -fforce-dwarf-frame 2>&1 | FileCheck --check-prefix=CHECK-ALWAYS %s
+// RUN: %clang --target=arm -c -### %s -fno-force-dwarf-frame 2>&1 | FileCheck --check-prefix=CHECK-NO-ALWAYS %s
+// RUN: %clang --target=arm -c -### %s 2>&1 | FileCheck --check-prefix=CHECK-NO-ALWAYS %s
// CHECK-ALWAYS: -fforce-dwarf-frame
// CHECK-NO-ALWAYS-NOT: -fforce-dwarf-frame
diff --git a/clang/test/Driver/fgnuc-version.c b/clang/test/Driver/fgnuc-version.c
index dea82bbaae0a..c5c8ca1c159a 100644
--- a/clang/test/Driver/fgnuc-version.c
+++ b/clang/test/Driver/fgnuc-version.c
@@ -2,25 +2,25 @@
// Verify -fgnuc-version parsing
//
-// RUN: %clang -c %s -target i686-linux -### 2>&1 | FileCheck %s -check-prefix GNUC-DEFAULT
+// RUN: %clang -c %s --target=i686-linux -### 2>&1 | FileCheck %s --check-prefix=GNUC-DEFAULT
// GNUC-DEFAULT: "-fgnuc-version=4.2.1"
-// RUN: %clang -c %s -target i686-linux -fgnuc-version=100.99.99 -### 2>&1 | FileCheck %s -check-prefix GNUC-OVERRIDE
+// RUN: %clang -c %s --target=i686-linux -fgnuc-version=100.99.99 -### 2>&1 | FileCheck %s --check-prefix=GNUC-OVERRIDE
// GNUC-OVERRIDE: "-fgnuc-version=100.99.99"
-// RUN: %clang -c %s -target i686-linux -fgnuc-version=0 -### 2>&1 | FileCheck %s -check-prefix GNUC-DISABLE
-// RUN: %clang -c %s -target i686-linux -fgnuc-version= -### 2>&1 | FileCheck %s -check-prefix GNUC-DISABLE
+// RUN: %clang -c %s --target=i686-linux -fgnuc-version=0 -### 2>&1 | FileCheck %s --check-prefix=GNUC-DISABLE
+// RUN: %clang -c %s --target=i686-linux -fgnuc-version= -### 2>&1 | FileCheck %s --check-prefix=GNUC-DISABLE
// GNUC-DISABLE-NOT: "-fgnuc-version=
-// RUN: not %clang -c %s -target i686-linux -fgnuc-version=100.100.10 2>&1 | FileCheck %s -check-prefix GNUC-INVALID
-// RUN: not %clang -c %s -target i686-linux -fgnuc-version=100.10.100 2>&1 | FileCheck %s -check-prefix GNUC-INVALID
-// RUN: not %clang -c %s -target i686-linux -fgnuc-version=-1.0.0 2>&1 | FileCheck %s -check-prefix GNUC-INVALID
+// RUN: not %clang -c %s --target=i686-linux -fgnuc-version=100.100.10 2>&1 | FileCheck %s --check-prefix=GNUC-INVALID
+// RUN: not %clang -c %s --target=i686-linux -fgnuc-version=100.10.100 2>&1 | FileCheck %s --check-prefix=GNUC-INVALID
+// RUN: not %clang -c %s --target=i686-linux -fgnuc-version=-1.0.0 2>&1 | FileCheck %s --check-prefix=GNUC-INVALID
// GNUC-INVALID: error: invalid value {{.*}} in '-fgnuc-version={{.*}}'
-// RUN: %clang -fgnuc-version=100.99.99 %s -dM -E -o - | FileCheck %s -check-prefix GNUC-LARGE
+// RUN: %clang -fgnuc-version=100.99.99 %s -dM -E -o - | FileCheck %s --check-prefix=GNUC-LARGE
// GNUC-LARGE: #define __GNUC_MINOR__ 99
// GNUC-LARGE: #define __GNUC_PATCHLEVEL__ 99
// GNUC-LARGE: #define __GNUC__ 100
-// RUN: %clang -fgnuc-version=100.99.99 -x c++ %s -dM -E -o - | FileCheck %s -check-prefix GXX-LARGE
+// RUN: %clang -fgnuc-version=100.99.99 -x c++ %s -dM -E -o - | FileCheck %s --check-prefix=GXX-LARGE
// GXX-LARGE: #define __GNUG__ 100
diff --git a/clang/test/Driver/flags.c b/clang/test/Driver/flags.c
index da25a5cd3335..16b760609c36 100644
--- a/clang/test/Driver/flags.c
+++ b/clang/test/Driver/flags.c
@@ -25,11 +25,11 @@
// RUN: %clang -target armv7-apple-darwin10 -### -S -mno-implicit-float -mimplicit-float %s 2>&1 | FileCheck -check-prefix=TEST8 %s
// TEST8-NOT: "-no-implicit-float"
-// RUN: %clang -target x86_64-linux-gnu -### -c -fclang-abi-compat=3.2 %s 2>&1 | FileCheck -check-prefix=TEST9 %s
+// RUN: %clang --target=x86_64-linux-gnu -### -c -fclang-abi-compat=3.2 %s 2>&1 | FileCheck -check-prefix=TEST9 %s
// TEST9: "-fclang-abi-compat=3.2"
//
-// RUN: %clang -target riscv32 -### -S -mno-implicit-float %s 2>&1 | FileCheck -check-prefix=TEST10 %s
+// RUN: %clang --target=riscv32 -### -S -mno-implicit-float %s 2>&1 | FileCheck -check-prefix=TEST10 %s
// TEST10: "-no-implicit-float"
//
-// RUN: %clang -target riscv64 -### -S -mno-implicit-float %s 2>&1 | FileCheck -check-prefix=TEST11 %s
+// RUN: %clang --target=riscv64 -### -S -mno-implicit-float %s 2>&1 | FileCheck -check-prefix=TEST11 %s
// TEST11: "-no-implicit-float"
diff --git a/clang/test/Driver/flang/msvc-link.f90 b/clang/test/Driver/flang/msvc-link.f90
index 536da2599431..463749510eb5 100644
--- a/clang/test/Driver/flang/msvc-link.f90
+++ b/clang/test/Driver/flang/msvc-link.f90
@@ -1,4 +1,4 @@
-! RUN: %clang --driver-mode=flang -target x86_64-pc-windows-msvc -### %s -Ltest 2>&1 | FileCheck %s
+! RUN: %clang --driver-mode=flang --target=x86_64-pc-windows-msvc -### %s -Ltest 2>&1 | FileCheck %s
!
! Test that user provided paths come before the Flang runtimes
! CHECK: "-libpath:test"
diff --git a/clang/test/Driver/fmemprof.cpp b/clang/test/Driver/fmemprof.cpp
index b00d9f2c81e2..5165c4452fd5 100644
--- a/clang/test/Driver/fmemprof.cpp
+++ b/clang/test/Driver/fmemprof.cpp
@@ -1,7 +1,7 @@
-// RUN: %clangxx -target x86_64-linux-gnu -fmemory-profile %s -### 2>&1 | FileCheck %s
-// RUN: %clangxx -target x86_64-linux-gnu -fmemory-profile=foo %s -### 2>&1 | FileCheck %s --check-prefix=DIR
-// RUN: %clangxx -target x86_64-linux-gnu -fmemory-profile -fno-memory-profile %s -### 2>&1 | FileCheck %s --check-prefix=OFF
-// RUN: %clangxx -target x86_64-linux-gnu -fmemory-profile=foo -fno-memory-profile %s -### 2>&1 | FileCheck %s --check-prefix=OFF
+// RUN: %clangxx --target=x86_64-linux-gnu -fmemory-profile %s -### 2>&1 | FileCheck %s
+// RUN: %clangxx --target=x86_64-linux-gnu -fmemory-profile=foo %s -### 2>&1 | FileCheck %s --check-prefix=DIR
+// RUN: %clangxx --target=x86_64-linux-gnu -fmemory-profile -fno-memory-profile %s -### 2>&1 | FileCheck %s --check-prefix=OFF
+// RUN: %clangxx --target=x86_64-linux-gnu -fmemory-profile=foo -fno-memory-profile %s -### 2>&1 | FileCheck %s --check-prefix=OFF
// CHECK: "-cc1" {{.*}} "-fmemory-profile"
// CHECK: ld{{.*}}libclang_rt.memprof{{.*}}libclang_rt.memprof_cxx
// DIR: "-cc1" {{.*}} "-fmemory-profile=foo"
@@ -9,7 +9,7 @@
// OFF-NOT: "-fmemory-profile"
// OFF-NOT: libclang_rt.memprof
-// RUN: %clangxx -target x86_64-linux-gnu -fmemory-profile-use=foo %s -### 2>&1 | FileCheck %s --check-prefix=USE
+// RUN: %clangxx --target=x86_64-linux-gnu -fmemory-profile-use=foo %s -### 2>&1 | FileCheck %s --check-prefix=USE
// USE: "-cc1" {{.*}} "-fmemory-profile-use=foo"
// RUN: not %clangxx --target=x86_64-linux-gnu -fmemory-profile -fmemory-profile-use=foo %s -### 2>&1 | FileCheck %s --check-prefix=CONFLICTWITHMEMPROFINSTR
diff --git a/clang/test/Driver/fopenmp.c b/clang/test/Driver/fopenmp.c
index 291946923b3e..7d343eeee0f3 100644
--- a/clang/test/Driver/fopenmp.c
+++ b/clang/test/Driver/fopenmp.c
@@ -1,27 +1,27 @@
-// RUN: %clang -target x86_64-linux-gnu -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target x86_64-linux-gnu -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
-// RUN: %clang -target x86_64-linux-gnu -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
// RUN: %clang -target x86_64-apple-darwin -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
// RUN: %clang -target x86_64-apple-darwin -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
// RUN: %clang -target x86_64-apple-darwin -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target x86_64-freebsd -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target x86_64-freebsd -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
-// RUN: %clang -target x86_64-freebsd -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target x86_64-netbsd -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target x86_64-netbsd -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
-// RUN: %clang -target x86_64-netbsd -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target x86_64-openbsd -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target x86_64-openbsd -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
-// RUN: %clang -target x86_64-openbsd -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target x86_64-dragonfly -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target x86_64-dragonfly -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
-// RUN: %clang -target x86_64-dragonfly -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target i386-pc-solaris2.11 -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target i386-pc-solaris2.11 -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
-// RUN: %clang -target i386-pc-solaris2.11 -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target x86_64-windows-gnu -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
-// RUN: %clang -target x86_64-windows-gnu -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
-// RUN: %clang -target x86_64-windows-gnu -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-freebsd -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-freebsd -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
+// RUN: %clang --target=x86_64-freebsd -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-netbsd -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-netbsd -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
+// RUN: %clang --target=x86_64-netbsd -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-openbsd -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-openbsd -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
+// RUN: %clang --target=x86_64-openbsd -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-dragonfly -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-dragonfly -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
+// RUN: %clang --target=x86_64-dragonfly -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=i386-pc-solaris2.11 -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=i386-pc-solaris2.11 -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
+// RUN: %clang --target=i386-pc-solaris2.11 -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-windows-gnu -fopenmp=libomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
+// RUN: %clang --target=x86_64-windows-gnu -fopenmp=libgomp -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
+// RUN: %clang --target=x86_64-windows-gnu -fopenmp=libiomp5 -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
// RUN: %clang_cl --target=x86_64-windows-msvc /clang:-fopenmp=libomp /openmp -### -- %s 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
// RUN: %clang_cl --target=x86_64-windows-msvc /clang:-fopenmp=libgomp /openmp -### -- %s 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-NO-OPENMP
// RUN: %clang_cl --target=x86_64-windows-msvc /clang:-fopenmp=libiomp5 /openmp -### -- %s 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMP
@@ -36,99 +36,99 @@
// CHECK-CC1-NO-OPENMP: "-cc1"
// CHECK-CC1-NO-OPENMP-NOT: "-fopenmp"
//
-// RUN: %clang -target x86_64-linux-gnu -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
-// RUN: %clang -target x86_64-linux-gnu -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-RT
-// RUN: %clang -target x86_64-linux-gnu -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-RT
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
//
-// RUN: %clang -target x86_64-linux-gnu -fopenmp=libomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
-// RUN: %clang -target x86_64-linux-gnu -fopenmp=libgomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-RT
-// RUN: %clang -target x86_64-linux-gnu -fopenmp=libiomp5 -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
-// RUN: %clang -target x86_64-linux-gnu -fopenmp=libiomp5 -static -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp=libomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp=libgomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-RT
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp=libiomp5 -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp=libiomp5 -static -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
//
-// RUN: %clang -nostdlib -target x86_64-linux-gnu -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
-// RUN: %clang -nostdlib -target x86_64-linux-gnu -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
-// RUN: %clang -nostdlib -target x86_64-linux-gnu -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
+// RUN: %clang -nostdlib --target=x86_64-linux-gnu -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
+// RUN: %clang -nostdlib --target=x86_64-linux-gnu -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
+// RUN: %clang -nostdlib --target=x86_64-linux-gnu -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
//
-// RUN: %clang -target x86_64-darwin -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
-// RUN: %clang -target x86_64-darwin -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
-// RUN: %clang -target x86_64-darwin -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
+// RUN: %clang --target=x86_64-darwin -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
+// RUN: %clang --target=x86_64-darwin -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
+// RUN: %clang --target=x86_64-darwin -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
//
-// RUN: %clang -nostdlib -target x86_64-darwin -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
-// RUN: %clang -nostdlib -target x86_64-darwin -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
-// RUN: %clang -nostdlib -target x86_64-darwin -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
+// RUN: %clang -nostdlib --target=x86_64-darwin -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
+// RUN: %clang -nostdlib --target=x86_64-darwin -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
+// RUN: %clang -nostdlib --target=x86_64-darwin -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
//
-// RUN: %clang -target x86_64-freebsd -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
-// RUN: %clang -target x86_64-freebsd -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
-// RUN: %clang -target x86_64-freebsd -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
+// RUN: %clang --target=x86_64-freebsd -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
+// RUN: %clang --target=x86_64-freebsd -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
+// RUN: %clang --target=x86_64-freebsd -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
//
-// RUN: %clang -target x86_64-freebsd -fopenmp=libomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
-// RUN: %clang -target x86_64-freebsd -fopenmp=libgomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-NO-RT
-// RUN: %clang -target x86_64-freebsd -fopenmp=libiomp5 -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
-// RUN: %clang -target x86_64-freebsd -fopenmp=libiomp5 -static -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
+// RUN: %clang --target=x86_64-freebsd -fopenmp=libomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
+// RUN: %clang --target=x86_64-freebsd -fopenmp=libgomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-NO-RT
+// RUN: %clang --target=x86_64-freebsd -fopenmp=libiomp5 -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
+// RUN: %clang --target=x86_64-freebsd -fopenmp=libiomp5 -static -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
//
-// RUN: %clang -nostdlib -target x86_64-freebsd -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
-// RUN: %clang -nostdlib -target x86_64-freebsd -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
-// RUN: %clang -nostdlib -target x86_64-freebsd -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
+// RUN: %clang -nostdlib --target=x86_64-freebsd -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
+// RUN: %clang -nostdlib --target=x86_64-freebsd -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
+// RUN: %clang -nostdlib --target=x86_64-freebsd -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
//
-// RUN: %clang -target x86_64-netbsd -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
-// RUN: %clang -target x86_64-netbsd -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
-// RUN: %clang -target x86_64-netbsd -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
+// RUN: %clang --target=x86_64-netbsd -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
+// RUN: %clang --target=x86_64-netbsd -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
+// RUN: %clang --target=x86_64-netbsd -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
//
-// RUN: %clang -target x86_64-netbsd -fopenmp=libomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
-// RUN: %clang -target x86_64-netbsd -fopenmp=libgomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-NO-RT
-// RUN: %clang -target x86_64-netbsd -fopenmp=libiomp5 -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
-// RUN: %clang -target x86_64-netbsd -fopenmp=libiomp5 -static -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
+// RUN: %clang --target=x86_64-netbsd -fopenmp=libomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
+// RUN: %clang --target=x86_64-netbsd -fopenmp=libgomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-NO-RT
+// RUN: %clang --target=x86_64-netbsd -fopenmp=libiomp5 -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
+// RUN: %clang --target=x86_64-netbsd -fopenmp=libiomp5 -static -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
//
-// RUN: %clang -nostdlib -target x86_64-netbsd -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
-// RUN: %clang -nostdlib -target x86_64-netbsd -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
-// RUN: %clang -nostdlib -target x86_64-netbsd -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
+// RUN: %clang -nostdlib --target=x86_64-netbsd -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
+// RUN: %clang -nostdlib --target=x86_64-netbsd -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
+// RUN: %clang -nostdlib --target=x86_64-netbsd -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
//
-// RUN: %clang -target x86_64-openbsd -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
-// RUN: %clang -target x86_64-openbsd -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
-// RUN: %clang -target x86_64-openbsd -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
+// RUN: %clang --target=x86_64-openbsd -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
+// RUN: %clang --target=x86_64-openbsd -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
+// RUN: %clang --target=x86_64-openbsd -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
//
-// RUN: %clang -target x86_64-openbsd -fopenmp=libomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
-// RUN: %clang -target x86_64-openbsd -fopenmp=libgomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-NO-RT
-// RUN: %clang -target x86_64-openbsd -fopenmp=libiomp5 -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
-// RUN: %clang -target x86_64-openbsd -fopenmp=libiomp5 -static -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
+// RUN: %clang --target=x86_64-openbsd -fopenmp=libomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
+// RUN: %clang --target=x86_64-openbsd -fopenmp=libgomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-NO-RT
+// RUN: %clang --target=x86_64-openbsd -fopenmp=libiomp5 -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
+// RUN: %clang --target=x86_64-openbsd -fopenmp=libiomp5 -static -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
//
-// RUN: %clang -nostdlib -target x86_64-openbsd -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
-// RUN: %clang -nostdlib -target x86_64-openbsd -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
-// RUN: %clang -nostdlib -target x86_64-openbsd -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
+// RUN: %clang -nostdlib --target=x86_64-openbsd -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
+// RUN: %clang -nostdlib --target=x86_64-openbsd -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
+// RUN: %clang -nostdlib --target=x86_64-openbsd -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
//
-// RUN: %clang -target x86_64-dragonfly -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
-// RUN: %clang -target x86_64-dragonfly -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
-// RUN: %clang -target x86_64-dragonfly -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
+// RUN: %clang --target=x86_64-dragonfly -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
+// RUN: %clang --target=x86_64-dragonfly -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
+// RUN: %clang --target=x86_64-dragonfly -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
//
-// RUN: %clang -target x86_64-dragonfly -fopenmp=libomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
-// RUN: %clang -target x86_64-dragonfly -fopenmp=libgomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-NO-RT
-// RUN: %clang -target x86_64-dragonfly -fopenmp=libiomp5 -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
-// RUN: %clang -target x86_64-dragonfly -fopenmp=libiomp5 -static -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
+// RUN: %clang --target=x86_64-dragonfly -fopenmp=libomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
+// RUN: %clang --target=x86_64-dragonfly -fopenmp=libgomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-NO-RT
+// RUN: %clang --target=x86_64-dragonfly -fopenmp=libiomp5 -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
+// RUN: %clang --target=x86_64-dragonfly -fopenmp=libiomp5 -static -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
//
-// RUN: %clang -nostdlib -target x86_64-dragonfly -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
-// RUN: %clang -nostdlib -target x86_64-dragonfly -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
-// RUN: %clang -nostdlib -target x86_64-dragonfly -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
+// RUN: %clang -nostdlib --target=x86_64-dragonfly -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
+// RUN: %clang -nostdlib --target=x86_64-dragonfly -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
+// RUN: %clang -nostdlib --target=x86_64-dragonfly -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
//
-// RUN: %clang -target i386-pc-solaris2.11 -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
-// RUN: %clang -target i386-pc-solaris2.11 -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
-// RUN: %clang -target i386-pc-solaris2.11 -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
+// RUN: %clang --target=i386-pc-solaris2.11 -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
+// RUN: %clang --target=i386-pc-solaris2.11 -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
+// RUN: %clang --target=i386-pc-solaris2.11 -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5
//
-// RUN: %clang -target i386-pc-solaris2.11 -fopenmp=libomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
-// RUN: %clang -target i386-pc-solaris2.11 -fopenmp=libgomp -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-NO-RT
-// RUN: %clang -target i386-pc-solaris2.11 -fopenmp=libiomp5 -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
-// RUN: %clang -target i386-pc-solaris2.11 -fopenmp=libiomp5 -static -static-openmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
+// RUN: %clang --target=i386-pc-solaris2.11 -fopenmp=libomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-OMP
+// RUN: %clang --target=i386-pc-solaris2.11 -fopenmp=libgomp -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-GOMP --check-prefix=CHECK-LD-STATIC-GOMP-NO-RT
+// RUN: %clang --target=i386-pc-solaris2.11 -fopenmp=libiomp5 -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5
+// RUN: %clang --target=i386-pc-solaris2.11 -fopenmp=libiomp5 -static -static-openmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC
//
-// RUN: %clang -nostdlib -target i386-pc-solaris2.11 -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
-// RUN: %clang -nostdlib -target i386-pc-solaris2.11 -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
-// RUN: %clang -nostdlib -target i386-pc-solaris2.11 -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
+// RUN: %clang -nostdlib --target=i386-pc-solaris2.11 -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
+// RUN: %clang -nostdlib --target=i386-pc-solaris2.11 -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
+// RUN: %clang -nostdlib --target=i386-pc-solaris2.11 -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5
//
-// RUN: %clang -target x86_64-windows-gnu -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
-// RUN: %clang -target x86_64-windows-gnu -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
-// RUN: %clang -target x86_64-windows-gnu -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5MD
+// RUN: %clang --target=x86_64-windows-gnu -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-OMP
+// RUN: %clang --target=x86_64-windows-gnu -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-GOMP --check-prefix=CHECK-LD-GOMP-NO-RT
+// RUN: %clang --target=x86_64-windows-gnu -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-IOMP5MD
//
-// RUN: %clang -nostdlib -target x86_64-windows-gnu -fopenmp=libomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
-// RUN: %clang -nostdlib -target x86_64-windows-gnu -fopenmp=libgomp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
-// RUN: %clang -nostdlib -target x86_64-windows-gnu -fopenmp=libiomp5 %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5MD
+// RUN: %clang -nostdlib --target=x86_64-windows-gnu -fopenmp=libomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OMP
+// RUN: %clang -nostdlib --target=x86_64-windows-gnu -fopenmp=libgomp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-GOMP
+// RUN: %clang -nostdlib --target=x86_64-windows-gnu -fopenmp=libiomp5 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IOMP5MD
//
// CHECK-LD-OMP: "{{.*}}ld{{(.exe)?}}"
// CHECK-LD-OMP: "-lomp"
@@ -172,7 +172,7 @@
// CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC: "-{{B?}}static" {{.*}} "-liomp5"
// CHECK-LD-STATIC-IOMP5-NO-BDYNAMIC-NOT: "-Bdynamic"
//
-// RUN: %clang -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-enable-irbuilder -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMPIRBUILDER
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp=libomp -fopenmp-enable-irbuilder -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-CC1-OPENMPIRBUILDER
//
// CHECK-CC1-OPENMPIRBUILDER: "-cc1"
// CHECK-CC1-OPENMPIRBUILDER-SAME: "-fopenmp"
@@ -184,14 +184,14 @@
// test the CC1 invocation. Instead, just ensure we do eventually link *some*
// OpenMP runtime.
//
-// RUN: %clang -target x86_64-linux-gnu -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
-// RUN: %clang -target x86_64-darwin -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
-// RUN: %clang -target x86_64-freebsd -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
-// RUN: %clang -target x86_64-netbsd -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
-// RUN: %clang -target x86_64-openbsd -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
-// RUN: %clang -target x86_64-dragonfly -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
-// RUN: %clang -target i386-pc-solaris2.11 -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
-// RUN: %clang -target x86_64-windows-gnu -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANYMD
+// RUN: %clang --target=x86_64-linux-gnu -fopenmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
+// RUN: %clang --target=x86_64-darwin -fopenmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
+// RUN: %clang --target=x86_64-freebsd -fopenmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
+// RUN: %clang --target=x86_64-netbsd -fopenmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
+// RUN: %clang --target=x86_64-openbsd -fopenmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
+// RUN: %clang --target=x86_64-dragonfly -fopenmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
+// RUN: %clang --target=i386-pc-solaris2.11 -fopenmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
+// RUN: %clang --target=x86_64-windows-gnu -fopenmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANYMD
//
// CHECK-LD-ANY: "{{.*}}ld{{(.exe)?}}"
// CHECK-LD-ANY: "-l{{(omp|gomp|iomp5)}}"
diff --git a/clang/test/Driver/fortran.f95 b/clang/test/Driver/fortran.f95
index db3ff2da17e8..275b1886b2fd 100644
--- a/clang/test/Driver/fortran.f95
+++ b/clang/test/Driver/fortran.f95
@@ -1,21 +1,21 @@
! Check that the clang driver can invoke gcc to compile Fortran when in
! --driver-mode=clang. This is legacy behaviour - see also --driver-mode=flang.
-! RUN: %clang -target x86_64-unknown-linux-gnu -integrated-as -c %s -### 2>&1 \
+! RUN: %clang --target=x86_64-unknown-linux-gnu -integrated-as -c %s -### 2>&1 \
! RUN: | FileCheck --check-prefix=CHECK-OBJECT %s
! CHECK-OBJECT: gcc
! CHECK-OBJECT: "-c"
! CHECK-OBJECT: "-x" "f95"
! CHECK-OBJECT-NOT: "-cc1as"
-! RUN: %clang -target x86_64-unknown-linux-gnu -integrated-as -S %s -### 2>&1 \
+! RUN: %clang --target=x86_64-unknown-linux-gnu -integrated-as -S %s -### 2>&1 \
! RUN: | FileCheck --check-prefix=CHECK-ASM %s
! CHECK-ASM: gcc
! CHECK-ASM: "-S"
! CHECK-ASM: "-x" "f95"
! CHECK-ASM-NOT: "-cc1"
-! RUN: %clang -Wall -target x86_64-unknown-linux-gnu -integrated-as %s -o %t -### 2>&1 | FileCheck --check-prefix=CHECK-WARN %s
+! RUN: %clang -Wall --target=x86_64-unknown-linux-gnu -integrated-as %s -### 2>&1 | FileCheck --check-prefix=CHECK-WARN %s
! CHECK-WARN: gcc
! CHECK-WARN-NOT: "-Wall"
! CHECK-WARN: ld
diff --git a/clang/test/Driver/fpatchable-function-entry.c b/clang/test/Driver/fpatchable-function-entry.c
index 4d0d609584c8..ab04fd39ffa1 100644
--- a/clang/test/Driver/fpatchable-function-entry.c
+++ b/clang/test/Driver/fpatchable-function-entry.c
@@ -1,23 +1,23 @@
-// RUN: %clang -target i386 %s -fpatchable-function-entry=1 -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target x86_64 %s -fpatchable-function-entry=1 -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target aarch64 %s -fpatchable-function-entry=1 -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target aarch64 %s -fpatchable-function-entry=1,0 -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target loongarch32 %s -fpatchable-function-entry=1,0 -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target loongarch64 %s -fpatchable-function-entry=1,0 -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target riscv32 %s -fpatchable-function-entry=1,0 -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target riscv64 %s -fpatchable-function-entry=1,0 -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=i386 %s -fpatchable-function-entry=1 -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=x86_64 %s -fpatchable-function-entry=1 -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=aarch64 %s -fpatchable-function-entry=1 -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=aarch64 %s -fpatchable-function-entry=1,0 -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=loongarch32 %s -fpatchable-function-entry=1,0 -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=loongarch64 %s -fpatchable-function-entry=1,0 -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=riscv32 %s -fpatchable-function-entry=1,0 -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=riscv64 %s -fpatchable-function-entry=1,0 -c -### 2>&1 | FileCheck %s
// CHECK: "-fpatchable-function-entry=1"
-// RUN: %clang -target aarch64 -fsyntax-only %s -fpatchable-function-entry=1,1 -c -### 2>&1 | FileCheck --check-prefix=11 %s
+// RUN: %clang --target=aarch64 -fsyntax-only %s -fpatchable-function-entry=1,1 -c -### 2>&1 | FileCheck --check-prefix=11 %s
// 11: "-fpatchable-function-entry=1" "-fpatchable-function-entry-offset=1"
-// RUN: %clang -target aarch64 -fsyntax-only %s -fpatchable-function-entry=2,1 -c -### 2>&1 | FileCheck --check-prefix=21 %s
+// RUN: %clang --target=aarch64 -fsyntax-only %s -fpatchable-function-entry=2,1 -c -### 2>&1 | FileCheck --check-prefix=21 %s
// 21: "-fpatchable-function-entry=2" "-fpatchable-function-entry-offset=1"
-// RUN: not %clang -target ppc64 -fsyntax-only %s -fpatchable-function-entry=1 2>&1 | FileCheck --check-prefix=TARGET %s
+// RUN: not %clang --target=ppc64 -fsyntax-only %s -fpatchable-function-entry=1 2>&1 | FileCheck --check-prefix=TARGET %s
// TARGET: error: unsupported option '-fpatchable-function-entry=1' for target 'ppc64'
-// RUN: not %clang -target x86_64 -fsyntax-only %s -fpatchable-function-entry=1,0, 2>&1 | FileCheck --check-prefix=EXCESS %s
+// RUN: not %clang --target=x86_64 -fsyntax-only %s -fpatchable-function-entry=1,0, 2>&1 | FileCheck --check-prefix=EXCESS %s
// EXCESS: error: invalid argument '1,0,' to -fpatchable-function-entry=
-// RUN: not %clang -target aarch64-linux -fsyntax-only %s -fxray-instrument -fpatchable-function-entry=1 2>&1 | FileCheck --check-prefix=XRAY %s
+// RUN: not %clang --target=aarch64-linux -fsyntax-only %s -fxray-instrument -fpatchable-function-entry=1 2>&1 | FileCheck --check-prefix=XRAY %s
// XRAY: error: invalid argument '-fxray-instrument' not allowed with '-fpatchable-function-entry='
diff --git a/clang/test/Driver/frame-pointer-elim.c b/clang/test/Driver/frame-pointer-elim.c
index e1b0a468ab82..cdedcc7ae4c8 100644
--- a/clang/test/Driver/frame-pointer-elim.c
+++ b/clang/test/Driver/frame-pointer-elim.c
@@ -6,39 +6,39 @@
// KEEP-NONE: "-mframe-pointer=none"
// On Linux x86, omit frame pointer when optimization is enabled.
-// RUN: %clang -### -target i386-linux -S -fomit-frame-pointer %s 2>&1 | \
+// RUN: %clang -### --target=i386-linux -S -fomit-frame-pointer %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
-// RUN: %clang -### -target i386-linux -S -O1 %s 2>&1 | \
+// RUN: %clang -### --target=i386-linux -S -O1 %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
// -fno-omit-frame-pointer or -pg disables frame pointer omission.
-// RUN: %clang -### -target i386-linux -S %s 2>&1 | \
+// RUN: %clang -### --target=i386-linux -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target i386-linux -S -O1 -fno-omit-frame-pointer %s 2>&1 | \
+// RUN: %clang -### --target=i386-linux -S -O1 -fno-omit-frame-pointer %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target i386-linux -S -O1 -pg %s 2>&1 | \
+// RUN: %clang -### --target=i386-linux -S -O1 -pg %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
// -momit-leaf-frame-pointer omits leaf frame pointer.
// -fno-omit-frame-pointer loses out to -momit-leaf-frame-pointer.
-// RUN: %clang -### -target i386 -S -momit-leaf-frame-pointer %s 2>&1 | \
+// RUN: %clang -### --target=i386 -S -momit-leaf-frame-pointer %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
-// RUN: %clang -### -target i386-linux -S -O1 -fno-omit-frame-pointer -momit-leaf-frame-pointer %s 2>&1 | \
+// RUN: %clang -### --target=i386-linux -S -O1 -fno-omit-frame-pointer -momit-leaf-frame-pointer %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
-// RUN: %clang -### -target i386-linux -S -O1 -momit-leaf-frame-pointer %s 2>&1 | \
+// RUN: %clang -### --target=i386-linux -S -O1 -momit-leaf-frame-pointer %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
// fno-omit-frame-pointer -momit-leaf-frame-pointer can be overwritten by
// fomit-frame-pointer later on the command without warning
-// RUN: %clang -### -target i386-linux -S -O1 -fno-omit-frame-pointer -momit-leaf-frame-pointer -fomit-frame-pointer %s 2>&1 | \
+// RUN: %clang -### --target=i386-linux -S -O1 -fno-omit-frame-pointer -momit-leaf-frame-pointer -fomit-frame-pointer %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
-// RUN: %clang -### -target i386-linux -S -O1 -fno-omit-frame-pointer -momit-leaf-frame-pointer %s 2>&1 | \
+// RUN: %clang -### --target=i386-linux -S -O1 -fno-omit-frame-pointer -momit-leaf-frame-pointer %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
// Explicit or default -fomit-frame-pointer wins over -mno-omit-leaf-frame-pointer.
-// RUN: %clang -### -target i386 -S %s -fomit-frame-pointer -mno-omit-leaf-frame-pointer 2>&1 | \
+// RUN: %clang -### --target=i386 -S %s -fomit-frame-pointer -mno-omit-leaf-frame-pointer 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
-// RUN: %clang -### -target i386-linux -S %s -O1 -mno-omit-leaf-frame-pointer 2>&1 | \
+// RUN: %clang -### --target=i386-linux -S %s -O1 -mno-omit-leaf-frame-pointer 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
// -pg -fomit-frame-pointer => error.
@@ -48,10 +48,10 @@
// CHECK-MIX-NO-OMIT-FP-PG-NOT: '-fomit-frame-pointer' not allowed with '-pg'
// NetBSD follows the same rules as Linux.
-// RUN: %clang -### -target x86_64-unknown-netbsd -S -O1 %s 2>&1 | \
+// RUN: %clang -### --target=x86_64-unknown-netbsd -S -O1 %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
-// RUN: %clang -### -target x86_64-unknown-netbsd -S %s 2>&1 | \
+// RUN: %clang -### --target=x86_64-unknown-netbsd -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
// Darwin disables omitting the leaf frame pointer even under optimization
@@ -62,10 +62,10 @@
// RUN: %clang -### -target i386-apple-darwin -S -O1 %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target i386-darwin -S -fomit-frame-pointer %s 2>&1 | \
+// RUN: %clang -### --target=i386-darwin -S -fomit-frame-pointer %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
-// RUN: %clang -### -target i386-darwin -S -momit-leaf-frame-pointer %s 2>&1 | \
+// RUN: %clang -### --target=i386-darwin -S -momit-leaf-frame-pointer %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
// RUN: %clang -### -target armv7s-apple-ios -fomit-frame-pointer %s 2>&1 | \
@@ -85,19 +85,19 @@
// On AArch64, PS4, PS5, and VE, default to omitting the frame pointer on leaf
// functions
-// RUN: %clang -### -target aarch64 -S %s 2>&1 | \
+// RUN: %clang -### --target=aarch64 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
-// RUN: %clang -### -target x86_64-scei-ps4 -S %s 2>&1 | \
+// RUN: %clang -### --target=x86_64-scei-ps4 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
-// RUN: %clang -### -target x86_64-scei-ps4 -S -O2 %s 2>&1 | \
+// RUN: %clang -### --target=x86_64-scei-ps4 -S -O2 %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
-// RUN: %clang -### -target x86_64-sie-ps5 -S %s 2>&1 | \
+// RUN: %clang -### --target=x86_64-sie-ps5 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
-// RUN: %clang -### -target x86_64-sie-ps5 -S -O2 %s 2>&1 | \
+// RUN: %clang -### --target=x86_64-sie-ps5 -S -O2 %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
// RUN: %clang -### -target aarch64-apple-darwin -arch arm64_32 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
-// RUN: %clang -### -target ve-unknown-linux-gnu -S %s 2>&1 | \
+// RUN: %clang -### --target=ve-unknown-linux-gnu -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
// RUN: %clang -### --target=aarch64-linux-android -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
@@ -106,57 +106,57 @@
// RUN: %clang -### --target=aarch64-linux-android -S -Os %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
-// RUN: %clang -### -target powerpc64 -S %s 2>&1 | \
+// RUN: %clang -### --target=powerpc64 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target powerpc64 -S -O1 %s 2>&1 | \
+// RUN: %clang -### --target=powerpc64 -S -O1 %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
// SPARC targets omit the frame pointer when optimizations are enabled.
-// RUN: %clang -### -target sparc -S %s 2>&1 | \
+// RUN: %clang -### --target=sparc -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target sparc -S -O1 %s 2>&1 | \
+// RUN: %clang -### --target=sparc -S -O1 %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
-// RUN: %clang -### -target sparcel -S %s 2>&1 | \
+// RUN: %clang -### --target=sparcel -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target sparcel -S -O1 %s 2>&1 | \
+// RUN: %clang -### --target=sparcel -S -O1 %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
-// RUN: %clang -### -target sparc64 -S %s 2>&1 | \
+// RUN: %clang -### --target=sparc64 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target sparc64 -S -O1 %s 2>&1 | \
+// RUN: %clang -### --target=sparc64 -S -O1 %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
// M68k targets omit the frame pointer when optimizations are enabled.
-// RUN: %clang -### -target m68k -S %s 2>&1 | \
+// RUN: %clang -### --target=m68k -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target m68k -S -O1 %s 2>&1 | \
+// RUN: %clang -### --target=m68k -S -O1 %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
// For AAarch32 (A32, T32) linux targets, default omit frame pointer when
// optimizations are enabled.
-// RUN: %clang -### -target arm-linux-gnueabihf- -marm -S %s 2>&1 | \
+// RUN: %clang -### --target=arm-linux-gnueabihf- -marm -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target arm-linux-gnueabihf- -mthumb -S %s 2>&1 | \
+// RUN: %clang -### --target=arm-linux-gnueabihf- -mthumb -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target arm-linux-gnueabihf- -marm -mbig-endian -S %s 2>&1 | \
+// RUN: %clang -### --target=arm-linux-gnueabihf- -marm -mbig-endian -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target arm-linux-gnueabihf- -mthumb -mbig-endian -S %s 2>&1 | \
+// RUN: %clang -### --target=arm-linux-gnueabihf- -mthumb -mbig-endian -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target arm-linux-gnueabihf- -marm -O1 -S %s 2>&1 | \
+// RUN: %clang -### --target=arm-linux-gnueabihf- -marm -O1 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
-// RUN: %clang -### -target arm-linux-gnueabihf- -mthumb -O1 -S %s 2>&1 | \
+// RUN: %clang -### --target=arm-linux-gnueabihf- -mthumb -O1 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
-// RUN: %clang -### -target arm-linux-gnueabihf- -marm -mbig-endian -O1 -S %s 2>&1 | \
+// RUN: %clang -### --target=arm-linux-gnueabihf- -marm -mbig-endian -O1 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
-// RUN: %clang -### -target arm-linux-gnueabihf- -mthumb -mbig-endian -O1 -S %s 2>&1 | \
+// RUN: %clang -### --target=arm-linux-gnueabihf- -mthumb -mbig-endian -O1 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NONE %s
// For Android, keep the framepointers always.
-// RUN: %clang -### -target armv7a-linux-androideabi- -marm -O1 -S %s 2>&1 | \
+// RUN: %clang -### --target=armv7a-linux-androideabi- -marm -O1 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target armv7a-linux-androideabi- -mthumb -O1 -S %s 2>&1 | \
+// RUN: %clang -### --target=armv7a-linux-androideabi- -mthumb -O1 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target armv7a-linux-androideabi- -marm -mbig-endian -O1 -S %s 2>&1 | \
+// RUN: %clang -### --target=armv7a-linux-androideabi- -marm -mbig-endian -O1 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
-// RUN: %clang -### -target armv7a-linux-androideabi- -mthumb -mbig-endian -O1 -S %s 2>&1 | \
+// RUN: %clang -### --target=armv7a-linux-androideabi- -mthumb -mbig-endian -O1 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-ALL %s
// RUN: %clang -### --target=riscv64-linux-android -O1 -S %s 2>&1 | \
// RUN: FileCheck --check-prefix=KEEP-NON-LEAF %s
diff --git a/clang/test/Driver/freebsd-mips-as.c b/clang/test/Driver/freebsd-mips-as.c
index a053c2180e52..428644ab78a9 100644
--- a/clang/test/Driver/freebsd-mips-as.c
+++ b/clang/test/Driver/freebsd-mips-as.c
@@ -1,91 +1,91 @@
// Check passing options to the assembler for MIPS targets.
//
-// RUN: %clang -target mips-unknown-freebsd -### \
+// RUN: %clang --target=mips-unknown-freebsd -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32-EB-AS %s
// MIPS32-EB-AS: as{{(.exe)?}}" "-march" "mips2" "-mabi" "32" "-EB"
// MIPS32-EB-AS-NOT: "-KPIC"
//
-// RUN: %clang -target mips-unknown-freebsd -### \
+// RUN: %clang --target=mips-unknown-freebsd -### \
// RUN: -no-integrated-as -fPIC -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32-EB-PIC %s
// MIPS32-EB-PIC: as{{(.exe)?}}" "-march" "mips2" "-mabi" "32" "-EB"
// MIPS32-EB-PIC: "-KPIC"
//
-// RUN: %clang -target mips-unknown-freebsd -### \
+// RUN: %clang --target=mips-unknown-freebsd -### \
// RUN: -no-integrated-as -fpic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32-EB-PIC-SMALL %s
// MIPS32-EB-PIC-SMALL: as{{(.exe)?}}" "-march" "mips2" "-mabi" "32" "-EB"
// MIPS32-EB-PIC-SMALL: "-KPIC"
//
-// RUN: %clang -target mips-unknown-freebsd -### \
+// RUN: %clang --target=mips-unknown-freebsd -### \
// RUN: -no-integrated-as -fPIE -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32-EB-PIE %s
// MIPS32-EB-PIE: as{{(.exe)?}}" "-march" "mips2" "-mabi" "32" "-EB"
// MIPS32-EB-PIE: "-KPIC"
//
-// RUN: %clang -target mips-unknown-freebsd -### \
+// RUN: %clang --target=mips-unknown-freebsd -### \
// RUN: -no-integrated-as -fpie -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32-EB-PIE-SMALL %s
// MIPS32-EB-PIE-SMALL: as{{(.exe)?}}" "-march" "mips2" "-mabi" "32" "-EB"
// MIPS32-EB-PIE-SMALL: "-KPIC"
//
-// RUN: %clang -target mipsel-unknown-freebsd -### \
+// RUN: %clang --target=mipsel-unknown-freebsd -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32-DEF-EL-AS %s
// MIPS32-DEF-EL-AS: as{{(.exe)?}}" "-march" "mips2" "-mabi" "32" "-EL"
//
-// RUN: %clang -target mips64-unknown-freebsd -### \
+// RUN: %clang --target=mips64-unknown-freebsd -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64-EB-AS %s
// MIPS64-EB-AS: as{{(.exe)?}}" "-march" "mips3" "-mabi" "64" "-EB"
//
-// RUN: %clang -target mips64el-unknown-freebsd -### \
+// RUN: %clang --target=mips64el-unknown-freebsd -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64-DEF-EL-AS %s
// MIPS64-DEF-EL-AS: as{{(.exe)?}}" "-march" "mips3" "-mabi" "64" "-EL"
//
-// RUN: %clang -target mips64-unknown-freebsd -mabi=n32 -### \
+// RUN: %clang --target=mips64-unknown-freebsd -mabi=n32 -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-N32 %s
// MIPS-N32: as{{(.exe)?}}" "-march" "mips3" "-mabi" "n32" "-EB"
//
-// RUN: %clang -target mipsel-unknown-freebsd -mabi=32 -### \
+// RUN: %clang --target=mipsel-unknown-freebsd -mabi=32 -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32-EL-AS %s
// MIPS32-EL-AS: as{{(.exe)?}}" "-march" "mips2" "-mabi" "32" "-EL"
//
-// RUN: %clang -target mips64el-unknown-freebsd -mabi=64 -### \
+// RUN: %clang --target=mips64el-unknown-freebsd -mabi=64 -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64-EL-AS %s
// MIPS64-EL-AS: as{{(.exe)?}}" "-march" "mips3" "-mabi" "64" "-EL"
//
-// RUN: %clang -target mips-linux-freebsd -march=mips32r2 -### \
+// RUN: %clang --target=mips-linux-freebsd -march=mips32r2 -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-32R2 %s
// MIPS-32R2: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-EB"
//
-// RUN: %clang -target mips-unknown-freebsd -mips32 -### \
+// RUN: %clang --target=mips-unknown-freebsd -mips32 -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-32 %s
// MIPS-ALIAS-32: as{{(.exe)?}}" "-march" "mips32" "-mabi" "32" "-EB"
//
-// RUN: %clang -target mips-unknown-freebsd -mips32r2 -### \
+// RUN: %clang --target=mips-unknown-freebsd -mips32r2 -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-32R2 %s
// MIPS-ALIAS-32R2: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-EB"
//
-// RUN: %clang -target mips64-unknown-freebsd -mips64 -### \
+// RUN: %clang --target=mips64-unknown-freebsd -mips64 -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64 %s
// MIPS-ALIAS-64: as{{(.exe)?}}" "-march" "mips64" "-mabi" "64" "-EB"
//
-// RUN: %clang -target mips64-unknown-freebsd -mips64r2 -### \
+// RUN: %clang --target=mips64-unknown-freebsd -mips64r2 -### \
// RUN: -no-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64R2 %s
// MIPS-ALIAS-64R2: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "64" "-EB"
//
-// RUN: %clang -target mips-unknown-freebsd -### \
+// RUN: %clang --target=mips-unknown-freebsd -### \
// RUN: -no-integrated-as -G0 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32-EB-AS-G0 %s
// MIPS32-EB-AS-G0: as{{(.exe)?}}" "-march" "mips2" "-mabi" "32" "-EB" "-G0"
diff --git a/clang/test/Driver/freebsd.cpp b/clang/test/Driver/freebsd.cpp
index 6ddab9199905..dc8c98d3c3cb 100644
--- a/clang/test/Driver/freebsd.cpp
+++ b/clang/test/Driver/freebsd.cpp
@@ -1,15 +1,15 @@
-// RUN: %clangxx %s -### -o %t.o -target amd64-unknown-freebsd -stdlib=platform 2>&1 \
+// RUN: %clangxx %s -### -o %t.o --target=amd64-unknown-freebsd -stdlib=platform 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-DEFAULT %s
-// RUN: %clangxx %s -### -o %t.o -target amd64-unknown-freebsd10.0 -stdlib=platform 2>&1 \
+// RUN: %clangxx %s -### -o %t.o --target=amd64-unknown-freebsd10.0 -stdlib=platform 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-TEN %s
// CHECK-DEFAULT: "-lc++" "-lm"
// CHECK-TEN: "-lc++" "-lm"
-// RUN: %clangxx %s -### -pg -o %t.o -target amd64-unknown-freebsd -stdlib=platform 2>&1 \
+// RUN: %clangxx %s -### -pg -o %t.o --target=amd64-unknown-freebsd -stdlib=platform 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-PG-DEFAULT %s
-// RUN: %clangxx %s -### -pg -o %t.o -target amd64-unknown-freebsd14.0 -stdlib=platform 2>&1 \
+// RUN: %clangxx %s -### -pg -o %t.o --target=amd64-unknown-freebsd14.0 -stdlib=platform 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-PG-FOURTEEN %s
-// RUN: %clangxx %s -### -pg -o %t.o -target amd64-unknown-freebsd10.0 -stdlib=platform 2>&1 \
+// RUN: %clangxx %s -### -pg -o %t.o --target=amd64-unknown-freebsd10.0 -stdlib=platform 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-PG-TEN %s
// CHECK-PG-DEFAULT: "-lc++" "-lm"
// CHECK-PG-FOURTEEN: "-lc++" "-lm"
diff --git a/clang/test/Driver/fsanitize-coverage.c b/clang/test/Driver/fsanitize-coverage.c
index d34ad5f6698f..c2de897f80ee 100644
--- a/clang/test/Driver/fsanitize-coverage.c
+++ b/clang/test/Driver/fsanitize-coverage.c
@@ -1,45 +1,45 @@
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=0 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-0
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge -fsanitize-coverage=0 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-0
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-0
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=0 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-0
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge -fsanitize-coverage=0 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-0
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-0
// CHECK-SANITIZE-COVERAGE-0-NOT: fsanitize-coverage-type
// CHECK-SANITIZE-COVERAGE-0: -fsanitize=address
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=kernel-address -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=hwaddress -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=kernel-hwaddress -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=memory -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=kernel-memory -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=leak -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=bounds -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=bool -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=dataflow -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=thread -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=kcfi -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target %itanium_abi_triple -fsanitize=float-divide-by-zero -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=kernel-address -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=hwaddress -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=kernel-hwaddress -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=memory -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=kernel-memory -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=leak -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=bounds -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=bool -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=dataflow -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=thread -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=kcfi -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=%itanium_abi_triple -fsanitize=float-divide-by-zero -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC
// CHECK-SANITIZE-COVERAGE-FUNC: fsanitize-coverage-type=1
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=bb %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-BB
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=bb %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-BB
// CHECK-SANITIZE-COVERAGE-BB: fsanitize-coverage-type=2
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-EDGE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-EDGE
// CHECK-SANITIZE-COVERAGE-EDGE: fsanitize-coverage-type=3
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge,indirect-calls %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC_INDIR
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge,indirect-calls %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FUNC_INDIR
// CHECK-SANITIZE-COVERAGE-FUNC_INDIR: fsanitize-coverage-type=3
// CHECK-SANITIZE-COVERAGE-FUNC_INDIR: fsanitize-coverage-indirect-calls
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=1 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-1
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=1 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-1
// CHECK-SANITIZE-COVERAGE-1: warning: argument '-fsanitize-coverage=1' is deprecated, use '-fsanitize-coverage=trace-pc-guard' instead
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_FUNC_BB_EDGE_DEPRECATED
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=bb %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_FUNC_BB_EDGE_DEPRECATED
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_FUNC_BB_EDGE_DEPRECATED
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_FUNC_BB_EDGE_DEPRECATED
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=bb %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_FUNC_BB_EDGE_DEPRECATED
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_FUNC_BB_EDGE_DEPRECATED
// CHECK_FUNC_BB_EDGE_DEPRECATED: warning: argument '-fsanitize-coverage=[func|bb|edge]' is deprecated, use '-fsanitize-coverage=[func|bb|edge],[trace-pc-guard|trace-pc],[control-flow]' instead
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge,indirect-calls,trace-pc,trace-cmp,trace-loads,trace-stores,trace-div,trace-gep %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FEATURES
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=edge,indirect-calls,trace-pc,trace-cmp,trace-loads,trace-stores,trace-div,trace-gep %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-SANITIZE-COVERAGE-FEATURES
// CHECK-SANITIZE-COVERAGE-FEATURES: -fsanitize-coverage-type=3
// CHECK-SANITIZE-COVERAGE-FEATURES: -fsanitize-coverage-indirect-calls
// CHECK-SANITIZE-COVERAGE-FEATURES: -fsanitize-coverage-trace-cmp
@@ -49,7 +49,7 @@
// CHECK-SANITIZE-COVERAGE-FEATURES: -fsanitize-coverage-trace-loads
// CHECK-SANITIZE-COVERAGE-FEATURES: -fsanitize-coverage-trace-stores
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func,edge,indirect-calls,trace-cmp -fno-sanitize-coverage=edge,indirect-calls %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-MASK
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func,edge,indirect-calls,trace-cmp -fno-sanitize-coverage=edge,indirect-calls %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-MASK
// CHECK-MASK: -fsanitize-coverage-type=1
// CHECK-MASK: -fsanitize-coverage-trace-cmp
// CHECK-MASK-NOT: -fsanitize-coverage-
@@ -60,30 +60,30 @@
// RUN: not %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func -fsanitize-coverage=edge %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-INCOMPATIBLE
// CHECK-INCOMPATIBLE: error: invalid argument '-fsanitize-coverage=func' not allowed with '-fsanitize-coverage=edge'
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=8bit-counters %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-8BIT
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=8bit-counters %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-8BIT
// CHECK-8BIT: warning: argument '-fsanitize-coverage=8bit-counters' is deprecated, use '-fsanitize-coverage=trace-pc-guard' instead
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=trace-bb %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE-BB
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=trace-bb %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE-BB
// CHECK-TRACE-BB: warning: argument '-fsanitize-coverage=trace-bb' is deprecated, use '-fsanitize-coverage=trace-pc-guard' instead
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_EDGE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=edge,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_EDGE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_EDGE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=edge,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_EDGE
// CHECK-TRACE_PC_EDGE: -fsanitize-coverage-type=3
// CHECK-TRACE_PC_EDGE: -fsanitize-coverage-trace-pc
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=func,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_FUNC
// CHECK-TRACE_PC_FUNC: -fsanitize-coverage-type=1
// CHECK-TRACE_PC_FUNC: -fsanitize-coverage-trace-pc
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_GUARD_EDGE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=edge,trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_GUARD_EDGE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_GUARD_EDGE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=edge,trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_GUARD_EDGE
// CHECK-TRACE_PC_GUARD_EDGE: -fsanitize-coverage-type=3
// CHECK-TRACE_PC_GUARD_EDGE: -fsanitize-coverage-trace-pc-guard
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=func,trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_GUARD_FUNC
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=func,trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-TRACE_PC_GUARD_FUNC
// CHECK-TRACE_PC_GUARD_FUNC: -fsanitize-coverage-type=1
// CHECK-TRACE_PC_GUARD_FUNC: -fsanitize-coverage-trace-pc-guard
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=stack-depth %s \
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=stack-depth %s \
// RUN: -### 2>&1 | FileCheck %s --check-prefix=CHECK-STACK-DEPTH
-// RUN: %clang -target x86_64-linux-gnu \
+// RUN: %clang --target=x86_64-linux-gnu \
// RUN: -fsanitize-coverage=trace-pc-guard,stack-depth %s -### 2>&1 | \
// RUN: FileCheck %s --check-prefix=CHECK-STACK-DEPTH-PC-GUARD
// CHECK-STACK-DEPTH: -fsanitize-coverage-type=1
@@ -92,35 +92,35 @@
// CHECK-STACK-DEPTH-PC-GUARD: -fsanitize-coverage-trace-pc-guard
// CHECK-STACK-DEPTH-PC-GUARD: -fsanitize-coverage-stack-depth
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=trace-cmp,indirect-calls %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-TYPE-NECESSARY
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=trace-cmp,indirect-calls %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-TYPE-NECESSARY
// CHECK-NO-TYPE-NECESSARY-NOT: error:
// CHECK-NO-TYPE-NECESSARY: -fsanitize-coverage-indirect-calls
// CHECK-NO-TYPE-NECESSARY: -fsanitize-coverage-trace-cmp
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func -fsanitize-coverage=trace-cmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-EXTEND-LEGACY
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-coverage=func -fsanitize-coverage=trace-cmp %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-EXTEND-LEGACY
// CHECK-EXTEND-LEGACY: -fsanitize-coverage-type=1
// CHECK-EXTEND-LEGACY: -fsanitize-coverage-trace-cmp
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=no-prune,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_NOPRUNE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=no-prune,func,trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_NOPRUNE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=no-prune,trace-pc %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_NOPRUNE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=no-prune,func,trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_NOPRUNE
// CHECK_NOPRUNE: -fsanitize-coverage-no-prune
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=inline-8bit-counters %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_INLINE8BIT
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=bb,inline-8bit-counters %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_INLINE8BIT
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=inline-8bit-counters %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_INLINE8BIT
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=bb,inline-8bit-counters %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_INLINE8BIT
// CHECK_INLINE8BIT-NOT: warning:
// CHECK_INLINE8BIT: -fsanitize-coverage-inline-8bit-counters
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=inline-8bit-counters,pc-table %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_PC_TABLE_FOR_INLINE8BIT
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=trace-pc-guard,pc-table %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_PC_TABLE_FOR_INLINE8BIT
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=inline-8bit-counters,pc-table %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_PC_TABLE_FOR_INLINE8BIT
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=trace-pc-guard,pc-table %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_PC_TABLE_FOR_INLINE8BIT
// CHECK_PC_TABLE_FOR_INLINE8BIT: -fsanitize-coverage-pc-table
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=inline-bool-flag %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_INLINE_BOOL_FLAG
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=bb,inline-bool-flag %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_INLINE_BOOL_FLAG
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=inline-bool-flag %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_INLINE_BOOL_FLAG
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=bb,inline-bool-flag %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_INLINE_BOOL_FLAG
// CHECK_INLINE_BOOL_FLAG-NOT: warning:
// CHECK_INLINE_BOOL_FLAG: -fsanitize-coverage-inline-bool-flag
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=inline-bool-flag,pc-table %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_PC_TABLE_FOR_INLINEBOOL
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-coverage=trace-pc-guard,pc-table %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_PC_TABLE_FOR_INLINEBOOL
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=inline-bool-flag,pc-table %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_PC_TABLE_FOR_INLINEBOOL
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-coverage=trace-pc-guard,pc-table %s -### 2>&1 | FileCheck %s --check-prefix=CHECK_PC_TABLE_FOR_INLINEBOOL
// CHECK_PC_TABLE_FOR_INLINEBOOL: -fsanitize-coverage-pc-table
// RUN: %clang_cl --target=i386-pc-win32 -fsanitize=address -fsanitize-coverage=func,trace-pc-guard -c -### -- %s 2>&1 | FileCheck %s -check-prefix=CLANG-CL-COVERAGE
@@ -131,11 +131,11 @@
// CLANG-CL-COVERAGE: -fsanitize-coverage-type=1
// CLANG-CL-COVERAGE: -fsanitize=address
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=safe-stack -fsanitize-coverage=trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-VS-SAFESTACK
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=safe-stack -fsanitize-coverage=trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-VS-SAFESTACK
// CHECK-VS-SAFESTACK: -fsanitize-coverage-trace-pc-guard
// CHECK-VS-SAFESTACK: -fsanitize=safe-stack
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=safe-stack -fsanitize-coverage=trace-pc-guard -fno-sanitize=safe-stack %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-SAFESTACK
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=safe-stack -fsanitize-coverage=trace-pc-guard -fno-sanitize=safe-stack %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-SAFESTACK
// CHECK-NO-SAFESTACK-NOT: error:
// CHECK-NO-SAFESTACK-NOT: warning:
// CHECK-NO-SAFESTACK-NOT: argument unused
@@ -143,11 +143,11 @@
// CHECK-NO-SAFESTACK-NOT: -fsanitize=safe-stack
// CHECK-NO-SAFESTACK: -fsanitize-coverage-trace-pc-guard
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=shadow-call-stack -fsanitize-coverage=trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-VS-SHADOWCALLSTACK
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=shadow-call-stack -fsanitize-coverage=trace-pc-guard %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-VS-SHADOWCALLSTACK
// CHECK-VS-SHADOWCALLSTACK: -fsanitize-coverage-trace-pc-guard
// CHECK-VS-SHADOWCALLSTACK: -fsanitize=shadow-call-stack
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=shadow-call-stack -fsanitize-coverage=trace-pc-guard -fno-sanitize=shadow-call-stack %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-SAFESTACK
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=shadow-call-stack -fsanitize-coverage=trace-pc-guard -fno-sanitize=shadow-call-stack %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-SAFESTACK
// CHECK-NO-SHADOWCALLSTACK-NOT: error:
// CHECK-NO-SHADOWCALLSTACK-NOT: warning:
// CHECK-NO-SHADOWCALLSTACK-NOT: argument unused
diff --git a/clang/test/Driver/fsanitize-ignorelist.c b/clang/test/Driver/fsanitize-ignorelist.c
index c4669e50bb09..7dd666a45319 100644
--- a/clang/test/Driver/fsanitize-ignorelist.c
+++ b/clang/test/Driver/fsanitize-ignorelist.c
@@ -11,37 +11,37 @@
// RUN: echo "fun:bar" > %t.second
// RUN: echo "badline" > %t.bad
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-ignorelist=%t.good -fsanitize-ignorelist=%t.second %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-IGNORELIST
-// RUN: %clang -target aarch64-linux-gnu -fsanitize=hwaddress -fsanitize-ignorelist=%t.good -fsanitize-ignorelist=%t.second %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-IGNORELIST
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-ignorelist=%t.good -fsanitize-ignorelist=%t.second %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-IGNORELIST
+// RUN: %clang --target=aarch64-linux-gnu -fsanitize=hwaddress -fsanitize-ignorelist=%t.good -fsanitize-ignorelist=%t.second %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-IGNORELIST
// CHECK-IGNORELIST: -fsanitize-ignorelist={{.*}}.good" "-fsanitize-ignorelist={{.*}}.second
// Check that the default ignorelist is not added as an extra dependency.
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-IGNORELIST-ASAN --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-IGNORELIST-ASAN --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
// CHECK-DEFAULT-IGNORELIST-ASAN: -fsanitize-system-ignorelist={{.*[^w]}}asan_ignorelist.txt
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=hwaddress -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-IGNORELIST-HWASAN --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=hwaddress -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-IGNORELIST-HWASAN --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
// CHECK-DEFAULT-IGNORELIST-HWASAN: -fsanitize-system-ignorelist={{.*}}hwasan_ignorelist.txt
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=integer -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=nullability -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=alignment -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
-// RUN: %clang -target %itanium_abi_triple -fsanitize=float-divide-by-zero -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=integer -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=nullability -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=alignment -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
+// RUN: %clang --target=%itanium_abi_triple -fsanitize=float-divide-by-zero -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
// CHECK-DEFAULT-UBSAN-IGNORELIST: -fsanitize-system-ignorelist={{.*}}ubsan_ignorelist.txt
// Check that combining ubsan and another sanitizer results in both ignorelists being used.
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined,address -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --check-prefix=CHECK-DEFAULT-IGNORELIST-ASAN --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined,address -resource-dir=%S/Inputs/resource_dir %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DEFAULT-UBSAN-IGNORELIST --check-prefix=CHECK-DEFAULT-IGNORELIST-ASAN --implicit-check-not=fdepfile-entry --implicit-check-not=-fsanitize-ignorelist=
// Ignore -fsanitize-ignorelist flag if there is no -fsanitize flag.
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-ignorelist=%t.good %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-SANITIZE --check-prefix=DELIMITERS
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-ignorelist=%t.good %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-SANITIZE --check-prefix=DELIMITERS
// CHECK-NO-SANITIZE-NOT: -fsanitize-ignorelist
// Ignore -fsanitize-ignorelist flag if there is no -fsanitize flag.
// Now, check for the absence of -fdepfile-entry flags.
-// RUN: %clang -target x86_64-linux-gnu -fsanitize-ignorelist=%t.good %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-SANITIZE2 --check-prefix=DELIMITERS
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize-ignorelist=%t.good %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-SANITIZE2 --check-prefix=DELIMITERS
// CHECK-NO-SANITIZE2-NOT: -fdepfile-entry
// Flag -fno-sanitize-ignorelist wins if it is specified later.
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-ignorelist=%t.good -fno-sanitize-ignorelist %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IGNORELIST --check-prefix=DELIMITERS
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-ignorelist=%t.good -fno-sanitize-ignorelist %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-IGNORELIST --check-prefix=DELIMITERS
// CHECK-NO-IGNORELIST-NOT: -fsanitize-ignorelist
// Driver barks on unexisting ignorelist files.
@@ -53,13 +53,13 @@
// CHECK-BAD-IGNORELIST: error: malformed sanitizer ignorelist: 'error parsing file '{{.*}}.bad': malformed line 1: 'badline''
// -fno-sanitize-ignorelist disables all ignorelists specified earlier.
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fsanitize-ignorelist=%t.good -fno-sanitize-ignorelist -fsanitize-ignorelist=%t.second %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-ONLY-FIRST-DISABLED --implicit-check-not=-fsanitize-ignorelist=
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fsanitize-ignorelist=%t.good -fno-sanitize-ignorelist -fsanitize-ignorelist=%t.second %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-ONLY-FIRST-DISABLED --implicit-check-not=-fsanitize-ignorelist=
// CHECK-ONLY_FIRST-DISABLED-NOT: good
// CHECK-ONLY-FIRST-DISABLED: -fsanitize-ignorelist={{.*}}.second
// CHECK-ONLY_FIRST-DISABLED-NOT: good
// -fno-sanitize-ignorelist disables the system ignorelists.
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=address -fno-sanitize-ignorelist %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DISABLED-SYSTEM --check-prefix=DELIMITERS
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=address -fno-sanitize-ignorelist %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-DISABLED-SYSTEM --check-prefix=DELIMITERS
// CHECK-DISABLED-SYSTEM-NOT: -fsanitize-system-ignorelist
// If cfi_ignorelist.txt cannot be found in the resource dir, driver should fail.
@@ -67,7 +67,7 @@
// CHECK-MISSING-CFI-IGNORELIST: error: missing sanitizer ignorelist: '{{.*}}cfi_ignorelist.txt'
// -fno-sanitize-ignorelist disables checking for cfi_ignorelist.txt in the resource dir.
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=cfi -flto -fvisibility=default -fno-sanitize-ignorelist -resource-dir=/dev/null %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-MISSING-CFI-NO-IGNORELIST
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=cfi -flto -fvisibility=default -fno-sanitize-ignorelist -resource-dir=/dev/null %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-MISSING-CFI-NO-IGNORELIST
// CHECK-MISSING-CFI-NO-IGNORELIST-NOT: error: no such file or directory: '{{.*}}cfi_ignorelist.txt'
// DELIMITERS: {{^ *"}}
diff --git a/clang/test/Driver/fsanitize-memory-param-retval.c b/clang/test/Driver/fsanitize-memory-param-retval.c
index 79ade32178b6..99d8cb7f55e5 100644
--- a/clang/test/Driver/fsanitize-memory-param-retval.c
+++ b/clang/test/Driver/fsanitize-memory-param-retval.c
@@ -1,14 +1,14 @@
-// RUN: %clang -target i386-gnu-linux %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target x86_64-linux-gnu %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target aarch64-linux-gnu %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target riscv32-linux-gnu %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target riscv64-linux-gnu %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
-// RUN: %clang -target x86_64-linux-gnu %s -fsanitize=kernel-memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=i386-gnu-linux %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=x86_64-linux-gnu %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=aarch64-linux-gnu %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=riscv32-linux-gnu %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=riscv64-linux-gnu %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
+// RUN: %clang --target=x86_64-linux-gnu %s -fsanitize=kernel-memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck %s
// CHECK: "-fno-sanitize-memory-param-retval"
-// RUN: %clang -target aarch64-linux-gnu -fsyntax-only %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck --check-prefix=11 %s
+// RUN: %clang --target=aarch64-linux-gnu -fsyntax-only %s -fsanitize=memory -fno-sanitize-memory-param-retval -c -### 2>&1 | FileCheck --check-prefix=11 %s
// 11: "-fno-sanitize-memory-param-retval"
-// RUN: not %clang -target x86_64-linux-gnu -fsyntax-only %s -fsanitize=memory -fno-sanitize-memory-param-retval=1 2>&1 | FileCheck --check-prefix=EXCESS %s
+// RUN: not %clang --target=x86_64-linux-gnu -fsyntax-only %s -fsanitize=memory -fno-sanitize-memory-param-retval=1 2>&1 | FileCheck --check-prefix=EXCESS %s
// EXCESS: error: unknown argument: '-fno-sanitize-memory-param-retval=
diff --git a/clang/test/Driver/fsanitize-metadata-ignorelist.c b/clang/test/Driver/fsanitize-metadata-ignorelist.c
index 65a45ccb1404..ad5f4be16768 100644
--- a/clang/test/Driver/fsanitize-metadata-ignorelist.c
+++ b/clang/test/Driver/fsanitize-metadata-ignorelist.c
@@ -3,12 +3,12 @@
// RUN: echo "fun:foo" > %t.1
// RUN: echo "fun:bar" > %t.2
-// RUN: %clang -target x86_64-linux-gnu -fexperimental-sanitize-metadata=all -fexperimental-sanitize-metadata-ignorelist=%t.1 -fexperimental-sanitize-metadata-ignorelist=%t.2 %s -### 2>&1 | FileCheck %s
-// RUN: %clang -target aarch64-linux-gnu -fexperimental-sanitize-metadata=atomics -fexperimental-sanitize-metadata-ignorelist=%t.1 -fexperimental-sanitize-metadata-ignorelist=%t.2 %s -### 2>&1 | FileCheck %s
+// RUN: %clang --target=x86_64-linux-gnu -fexperimental-sanitize-metadata=all -fexperimental-sanitize-metadata-ignorelist=%t.1 -fexperimental-sanitize-metadata-ignorelist=%t.2 %s -### 2>&1 | FileCheck %s
+// RUN: %clang --target=aarch64-linux-gnu -fexperimental-sanitize-metadata=atomics -fexperimental-sanitize-metadata-ignorelist=%t.1 -fexperimental-sanitize-metadata-ignorelist=%t.2 %s -### 2>&1 | FileCheck %s
// CHECK: "-fexperimental-sanitize-metadata-ignorelist={{.*}}.1" "-fexperimental-sanitize-metadata-ignorelist={{.*}}.2"
// Verify -fsanitize-metadata-ignorelist flag not passed if there is no -fsanitize-metadata flag.
-// RUN: %clang -target x86_64-linux-gnu -fexperimental-sanitize-metadata-ignorelist=%t.1 -fexperimental-sanitize-metadata-ignorelist=%t.2 %s -### 2>&1 | FileCheck %s --check-prefix=NOSANMD
-// RUN: %clang -target aarch64-linux-gnu -fexperimental-sanitize-metadata-ignorelist=%t.1 -fexperimental-sanitize-metadata-ignorelist=%t.2 %s -### 2>&1 | FileCheck %s --check-prefix=NOSANMD
+// RUN: %clang --target=x86_64-linux-gnu -fexperimental-sanitize-metadata-ignorelist=%t.1 -fexperimental-sanitize-metadata-ignorelist=%t.2 %s -### 2>&1 | FileCheck %s --check-prefix=NOSANMD
+// RUN: %clang --target=aarch64-linux-gnu -fexperimental-sanitize-metadata-ignorelist=%t.1 -fexperimental-sanitize-metadata-ignorelist=%t.2 %s -### 2>&1 | FileCheck %s --check-prefix=NOSANMD
// NOSANMD: warning: argument unused during compilation: '-fexperimental-sanitize-metadata-ignorelist
// NOSANMD-NOT: "-fexperimental-sanitize-metadata-ignorelist
diff --git a/clang/test/Driver/fsanitize-object-size.c b/clang/test/Driver/fsanitize-object-size.c
index 50c67838df39..78c720288641 100644
--- a/clang/test/Driver/fsanitize-object-size.c
+++ b/clang/test/Driver/fsanitize-object-size.c
@@ -1,27 +1,27 @@
// Check that the object size check is disabled at -O0.
//
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=object-size %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=object-size %s -O0 -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=null,object-size %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -Werror -fsanitize=null,object-size %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OSIZE-NO-WARNING
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=object-size %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=object-size %s -O0 -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=null,object-size %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -Werror -fsanitize=null,object-size %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-OSIZE-NO-WARNING
// Check that the object size check is enabled at other optimization levels.
//
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined -O1 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=object-size -O2 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=object-size -O3 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=object-size -O4 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=object-size -Ofast %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=object-size -Os %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=object-size -Oz %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=object-size -Og %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined -O1 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=object-size -O2 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=object-size -O3 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=object-size -O4 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=object-size -Ofast %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=object-size -Os %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=object-size -Oz %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=object-size -Og %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
// Use of trap mode shouldn't affect the object size check.
//
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined -fsanitize-trap=undefined -O1 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined-trap -O1 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
-// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined-trap -fsanitize-undefined-trap-on-error -O1 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined -fsanitize-trap=undefined -O1 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined-trap -O1 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
+// RUN: %clang --target=x86_64-linux-gnu -fsanitize=undefined-trap -fsanitize-undefined-trap-on-error -O1 %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-HAS-OSIZE
// CHECK-HAS-OSIZE-NOT: warning: the object size sanitizer
// CHECK-HAS-OSIZE: -fsanitize={{[^ ]*}}object-size
diff --git a/clang/test/Driver/fsemantic-interposition.c b/clang/test/Driver/fsemantic-interposition.c
index 0ee0dbb3be34..aaa44878483c 100644
--- a/clang/test/Driver/fsemantic-interposition.c
+++ b/clang/test/Driver/fsemantic-interposition.c
@@ -1,20 +1,20 @@
-// RUN: %clang --sysroot=%S/Inputs -target x86_64 %s -Werror -fpic -fsemantic-interposition -c -### 2>&1 | FileCheck %s
-// RUN: %clang --sysroot=%S/Inputs -target x86_64 %s -Werror -fPIC -fsemantic-interposition -c -### 2>&1 | FileCheck %s
+// RUN: %clang --sysroot=%S/Inputs --target=x86_64 %s -Werror -fpic -fsemantic-interposition -c -### 2>&1 | FileCheck %s
+// RUN: %clang --sysroot=%S/Inputs --target=x86_64 %s -Werror -fPIC -fsemantic-interposition -c -### 2>&1 | FileCheck %s
// CHECK: "-fsemantic-interposition"
/// No-op for -fno-pic/-fpie.
-// RUN: %clang --sysroot=%S/Inputs -target x86_64 %s -Werror -fsemantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NOOP %s
-// RUN: %clang --sysroot=%S/Inputs -target x86_64 %s -Werror -fPIE -fsemantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NOOP %s
+// RUN: %clang --sysroot=%S/Inputs --target=x86_64 %s -Werror -fsemantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NOOP %s
+// RUN: %clang --sysroot=%S/Inputs --target=x86_64 %s -Werror -fPIE -fsemantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NOOP %s
// NOOP-NOT: "-fsemantic-interposition"
// NOOP-NOT: "-fno-semantic-interposition"
/// If -fno-semantic-interposition is specified and the target supports local
/// aliases, neither CC1 option is set.
-// RUN: %clang --sysroot=%S/Inputs -target aarch64 %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NO %s
-// RUN: %clang --sysroot=%S/Inputs -target riscv32 %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NO %s
-// RUN: %clang --sysroot=%S/Inputs -target riscv64 %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NO %s
-// RUN: %clang --sysroot=%S/Inputs -target i386 %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NO %s
-// RUN: %clang --sysroot=%S/Inputs -target x86_64 %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NO %s
+// RUN: %clang --sysroot=%S/Inputs --target=aarch64 %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NO %s
+// RUN: %clang --sysroot=%S/Inputs --target=riscv32 %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NO %s
+// RUN: %clang --sysroot=%S/Inputs --target=riscv64 %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NO %s
+// RUN: %clang --sysroot=%S/Inputs --target=i386 %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NO %s
+// RUN: %clang --sysroot=%S/Inputs --target=x86_64 %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=NO %s
// NO-NOT: "-fsemantic-interposition"
// NO-NOT: "-fhalf-no-semantic-interposition"
@@ -23,8 +23,8 @@
/// local aliases, use the traditional half-baked behavor: interprocedural
/// optimizations are allowed but local aliases are not used. If references are
/// not optimized out, semantic interposition at runtime is possible.
-// RUN: %clang --sysroot=%S/Inputs -target ppc64le %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=HALF %s
+// RUN: %clang --sysroot=%S/Inputs --target=ppc64le %s -Werror -fPIC -fno-semantic-interposition -c -### 2>&1 | FileCheck --check-prefix=HALF %s
-// RUN: %clang --sysroot=%S/Inputs -target x86_64 %s -Werror -fPIC -c -### 2>&1 | FileCheck --check-prefix=HALF %s
+// RUN: %clang --sysroot=%S/Inputs --target=x86_64 %s -Werror -fPIC -c -### 2>&1 | FileCheck --check-prefix=HALF %s
//
// HALF: "-fhalf-no-semantic-interposition"
diff --git a/clang/test/Driver/fsjlj-exceptions.c b/clang/test/Driver/fsjlj-exceptions.c
index fd16a51b1f69..122513f6b611 100644
--- a/clang/test/Driver/fsjlj-exceptions.c
+++ b/clang/test/Driver/fsjlj-exceptions.c
@@ -1,6 +1,6 @@
// RUN: %clang -target armv7-apple-ios -fexceptions -c %s -o /dev/null -### 2>&1 | FileCheck -check-prefix CHECK-IOS %s
-// RUN: %clang -target i686-windows-gnu -fexceptions -c %s -o /dev/null -### 2>&1 | FileCheck -check-prefix CHECK-MINGW-DEFAULT %s
-// RUN: %clang -target i686-windows-gnu -fexceptions -fsjlj-exceptions -c %s -o /dev/null -### 2>&1 | FileCheck -check-prefix CHECK-MINGW-SJLJ %s
+// RUN: %clang --target=i686-windows-gnu -fexceptions -c %s -o /dev/null -### 2>&1 | FileCheck --check-prefix=CHECK-MINGW-DEFAULT %s
+// RUN: %clang --target=i686-windows-gnu -fexceptions -fsjlj-exceptions -c %s -o /dev/null -### 2>&1 | FileCheck --check-prefix=CHECK-MINGW-SJLJ %s
// CHECK-IOS: -exception-model=sjlj
// CHECK-MINGW-DEFAULT-NOT: -exception-model=sjlj
diff --git a/clang/test/Driver/fuse-ld-windows.c b/clang/test/Driver/fuse-ld-windows.c
index 089f2961b75d..8a5af61c6e09 100644
--- a/clang/test/Driver/fuse-ld-windows.c
+++ b/clang/test/Driver/fuse-ld-windows.c
@@ -1,23 +1,23 @@
// REQUIRES: system-windows
// We used to require adding ".exe" suffix when cross-compiling on Windows.
-// RUN: %clang %s -### -o %t.o -target i386-unknown-linux \
+// RUN: %clang %s -### -o %t.o --target=i386-unknown-linux \
// RUN: -B %S/Inputs/fuse_ld_windows -fuse-ld=foo 2>&1 \
// RUN: | FileCheck %s
// Check that the old variant still works.
-// RUN: %clang %s -### -o %t.o -target i386-unknown-linux \
+// RUN: %clang %s -### -o %t.o --target=i386-unknown-linux \
// RUN: -B %S/Inputs/fuse_ld_windows -fuse-ld=foo.exe 2>&1 \
// RUN: | FileCheck %s
// With the full path, the extension can be omitted, too,
// because Windows allows that.
-// RUN: %clang %s -### -o %t.o -target i386-unknown-linux \
+// RUN: %clang %s -### -o %t.o --target=i386-unknown-linux \
// RUN: -fuse-ld=%S/Inputs/fuse_ld_windows/ld.foo 2>&1 \
// RUN: | FileCheck %s
// Check that the full path with the extension works too.
-// RUN: %clang %s -### -o %t.o -target i386-unknown-linux \
+// RUN: %clang %s -### -o %t.o --target=i386-unknown-linux \
// RUN: -fuse-ld=%S/Inputs/fuse_ld_windows/ld.foo.exe 2>&1 \
// RUN: | FileCheck %s
diff --git a/clang/test/Driver/fuse-ld.c b/clang/test/Driver/fuse-ld.c
index ef2f8c92a370..f807434dad10 100644
--- a/clang/test/Driver/fuse-ld.c
+++ b/clang/test/Driver/fuse-ld.c
@@ -15,88 +15,88 @@
// CHECK-NO-WARN-NOT: warning:
// RUN: %clang %s -### \
-// RUN: -target x86_64-unknown-freebsd 2>&1 \
+// RUN: --target=x86_64-unknown-freebsd 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-FREEBSD-LD
// CHECK-FREEBSD-LD: ld
// RUN: %clang %s -### -fuse-ld=bfd \
// RUN: --sysroot=%S/Inputs/basic_freebsd_tree \
-// RUN: -target x86_64-unknown-freebsd \
+// RUN: --target=x86_64-unknown-freebsd \
// RUN: -B%S/Inputs/basic_freebsd_tree/usr/bin 2>&1 \
// RUN: | FileCheck %s -check-prefix=CHECK-FREEBSD-BFD
// CHECK-FREEBSD-BFD: Inputs/basic_freebsd_tree/usr/bin{{/|\\+}}ld.bfd
// RUN: %clang %s -### -fuse-ld=gold \
// RUN: --sysroot=%S/Inputs/basic_freebsd_tree \
-// RUN: -target x86_64-unknown-freebsd \
+// RUN: --target=x86_64-unknown-freebsd \
// RUN: -B%S/Inputs/basic_freebsd_tree/usr/bin 2>&1 \
// RUN: | FileCheck %s -check-prefix=CHECK-FREEBSD-GOLD
// CHECK-FREEBSD-GOLD: Inputs/basic_freebsd_tree/usr/bin{{/|\\+}}ld.gold
// RUN: not %clang %s -### -fuse-ld=plib \
// RUN: --sysroot=%S/Inputs/basic_freebsd_tree \
-// RUN: -target x86_64-unknown-freebsd \
+// RUN: --target=x86_64-unknown-freebsd \
// RUN: -B%S/Inputs/basic_freebsd_tree/usr/bin 2>&1 \
// RUN: | FileCheck %s -check-prefix=CHECK-FREEBSD-PLIB
// CHECK-FREEBSD-PLIB: error: invalid linker name
// RUN: %clang %s -### -fuse-ld=ld \
-// RUN: -target arm-linux-androideabi \
+// RUN: --target=arm-linux-androideabi \
// RUN: -B%S/Inputs/basic_android_tree/bin/arm-linux-androideabi- 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ANDROID-ARM-LD
// CHECK-ANDROID-ARM-LD: ld.lld
// RUN: %clang %s -### -fuse-ld=bfd \
-// RUN: -target arm-linux-androideabi \
+// RUN: --target=arm-linux-androideabi \
// RUN: -B%S/Inputs/basic_android_tree/bin/arm-linux-androideabi- 2>&1 \
// RUN: | FileCheck %s -check-prefix=CHECK-ANDROID-ARM-BFD
// CHECK-ANDROID-ARM-BFD: Inputs/basic_android_tree/bin{{/|\\+}}arm-linux-androideabi-ld.bfd
// RUN: %clang %s -### -fuse-ld=gold \
-// RUN: -target arm-linux-androideabi \
+// RUN: --target=arm-linux-androideabi \
// RUN: -B%S/Inputs/basic_android_tree/bin/arm-linux-androideabi- 2>&1 \
// RUN: | FileCheck %s -check-prefix=CHECK-ANDROID-ARM-GOLD
// CHECK-ANDROID-ARM-GOLD: Inputs/basic_android_tree/bin{{/|\\+}}arm-linux-androideabi-ld.gold
// RUN: %clang %s -### -fuse-ld=ld \
-// RUN: -target arm-linux-androideabi \
+// RUN: --target=arm-linux-androideabi \
// RUN: --gcc-toolchain=%S/Inputs/basic_android_tree 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-ANDROID-ARM-LD-TC
// CHECK-ANDROID-ARM-LD-TC: ld.lld
// RUN: %clang %s -### -fuse-ld=bfd \
-// RUN: -target arm-linux-androideabi \
+// RUN: --target=arm-linux-androideabi \
// RUN: --gcc-toolchain=%S/Inputs/basic_android_tree 2>&1 \
// RUN: | FileCheck %s -check-prefix=CHECK-ANDROID-ARM-BFD-TC
// CHECK-ANDROID-ARM-BFD-TC: Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/../../../../arm-linux-androideabi/bin{{/|\\+}}ld.bfd
// RUN: %clang %s -### -fuse-ld=gold \
-// RUN: -target arm-linux-androideabi \
+// RUN: --target=arm-linux-androideabi \
// RUN: --gcc-toolchain=%S/Inputs/basic_android_tree 2>&1 \
// RUN: | FileCheck %s -check-prefix=CHECK-ANDROID-ARM-GOLD-TC
// CHECK-ANDROID-ARM-GOLD-TC: Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/../../../../arm-linux-androideabi/bin{{/|\\+}}ld.gold
// RUN: %clang %s -### -fuse-ld=link \
-// RUN: -target i686-unknown-windows-msvc 2>&1 \
+// RUN: --target=i686-unknown-windows-msvc 2>&1 \
// RUN: | FileCheck %s --check-prefix CHECK-WINDOWS-MSVC-LINK
// CHECK-WINDOWS-MSVC-LINK: "{{.*}}link.exe"
// CHECK-WINDOWS-MSVC-LINK-SAME: "-out:{{.*}}"
// RUN: %clang %s -### -fuse-ld=lld \
-// RUN: -target i686-unknown-windows-msvc 2>&1 \
+// RUN: --target=i686-unknown-windows-msvc 2>&1 \
// RUN: | FileCheck %s --check-prefix CHECK-WINDOWS-MSVC-LLD
// CHECK-WINDOWS-MSVC-LLD: "{{.*}}lld-link{{\.exe"|"}}
// CHECK-WINDOWS-MSVC-LLD-SAME: "-out:{{.*}}"
// RUN: %clang %s -### -fuse-ld=lld-link \
-// RUN: -target i686-unknown-windows-msvc 2>&1 \
+// RUN: --target=i686-unknown-windows-msvc 2>&1 \
// RUN: | FileCheck %s --check-prefix CHECK-WINDOWS-MSVC-LLD-LINK
// CHECK-WINDOWS-MSVC-LLD-LINK: "{{.*}}lld-link{{\.exe"|"}}
// CHECK-WINDOWS-MSVC-LLD-LINK-SAME: "-out:{{.*}}"
// RUN: %clang %s -### -fuse-ld=bfd \
-// RUN: -target i686-unknown-windows-msvc \
+// RUN: --target=i686-unknown-windows-msvc \
// RUN: -B %S/Inputs/Windows/usr/bin 2>&1 \
// RUN: | FileCheck %s --check-prefix CHECK-WINDOWS-MSVC-BFD
// CHECK-WINDOWS-MSVC-BFD: "{{.*}}ld.bfd"
diff --git a/clang/test/Driver/fuzzer.c b/clang/test/Driver/fuzzer.c
index 14caf7690057..409fbfac8ce1 100644
--- a/clang/test/Driver/fuzzer.c
+++ b/clang/test/Driver/fuzzer.c
@@ -8,7 +8,7 @@
// CHECK-COVERAGE-SAME: -fsanitize-coverage-pc-table
// CHECK-FUZZER-LIB: libclang_rt.fuzzer
-// RUN: %clang -fsanitize=fuzzer -target i386-unknown-linux -stdlib=platform %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBCXX-LINUX %s
+// RUN: %clang -fsanitize=fuzzer --target=i386-unknown-linux -stdlib=platform %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBCXX-LINUX %s
//
// CHECK-LIBCXX-LINUX: -lstdc++
@@ -29,18 +29,18 @@
// Check that we respect whether thes tandard library should be linked
// statically.
//
-// RUN: %clang -fsanitize=fuzzer -target i386-unknown-linux -stdlib=libstdc++ %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBSTDCXX-DYNAMIC %s
+// RUN: %clang -fsanitize=fuzzer --target=i386-unknown-linux -stdlib=libstdc++ %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBSTDCXX-DYNAMIC %s
// CHECK-LIBSTDCXX-DYNAMIC-NOT: -Bstatic
// CHECK-LIBSTDCXX-DYNAMIC: -lstdc++
//
-// RUN: %clang -fsanitize=fuzzer -target i386-unknown-linux -stdlib=libstdc++ -static-libstdc++ %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBSTDCXX-STATIC %s
+// RUN: %clang -fsanitize=fuzzer --target=i386-unknown-linux -stdlib=libstdc++ -static-libstdc++ %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBSTDCXX-STATIC %s
// CHECK-LIBSTDCXX-STATIC: "-Bstatic" "-lstdc++"
//
-// RUN: %clang -fsanitize=fuzzer -target i386-unknown-linux -stdlib=libc++ %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBCXX-DYNAMIC %s
+// RUN: %clang -fsanitize=fuzzer --target=i386-unknown-linux -stdlib=libc++ %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBCXX-DYNAMIC %s
// CHECK-LIBCXX-DYNAMIC-NOT: -Bstatic
// CHECK-LIBCXX-DYNAMIC: -lc++
//
-// RUN: %clang -fsanitize=fuzzer -target i386-unknown-linux -stdlib=libc++ -static-libstdc++ %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBCXX-STATIC %s
+// RUN: %clang -fsanitize=fuzzer --target=i386-unknown-linux -stdlib=libc++ -static-libstdc++ %s -### 2>&1 | FileCheck --check-prefixes=CHECK-LIBCXX-STATIC %s
// CHECK-LIBCXX-STATIC: "-Bstatic" "-lc++"
int LLVMFuzzerTestOneInput(const char *Data, long Size) {
diff --git a/clang/test/Driver/fveclib.c b/clang/test/Driver/fveclib.c
index 8a230284bcdf..9b0f1ce13aa2 100644
--- a/clang/test/Driver/fveclib.c
+++ b/clang/test/Driver/fveclib.c
@@ -1,11 +1,11 @@
-// RUN: %clang -### -c -fveclib=none %s 2>&1 | FileCheck -check-prefix CHECK-NOLIB %s
-// RUN: %clang -### -c -fveclib=Accelerate %s 2>&1 | FileCheck -check-prefix CHECK-ACCELERATE %s
-// RUN: %clang -### -c -fveclib=libmvec %s 2>&1 | FileCheck -check-prefix CHECK-libmvec %s
-// RUN: %clang -### -c -fveclib=MASSV %s 2>&1 | FileCheck -check-prefix CHECK-MASSV %s
-// RUN: %clang -### -c -fveclib=Darwin_libsystem_m %s 2>&1 | FileCheck -check-prefix CHECK-DARWIN_LIBSYSTEM_M %s
-// RUN: %clang -### -c --target=aarch64-none-none -fveclib=SLEEF %s 2>&1 | FileCheck -check-prefix CHECK-SLEEF %s
-// RUN: %clang -### -c --target=aarch64-none-none -fveclib=ArmPL %s 2>&1 | FileCheck -check-prefix CHECK-ARMPL %s
-// RUN: not %clang -c -fveclib=something %s 2>&1 | FileCheck -check-prefix CHECK-INVALID %s
+// RUN: %clang -### -c -fveclib=none %s 2>&1 | FileCheck --check-prefix=CHECK-NOLIB %s
+// RUN: %clang -### -c -fveclib=Accelerate %s 2>&1 | FileCheck --check-prefix=CHECK-ACCELERATE %s
+// RUN: %clang -### -c -fveclib=libmvec %s 2>&1 | FileCheck --check-prefix=CHECK-libmvec %s
+// RUN: %clang -### -c -fveclib=MASSV %s 2>&1 | FileCheck --check-prefix=CHECK-MASSV %s
+// RUN: %clang -### -c -fveclib=Darwin_libsystem_m %s 2>&1 | FileCheck --check-prefix=CHECK-DARWIN_LIBSYSTEM_M %s
+// RUN: %clang -### -c --target=aarch64 -fveclib=SLEEF %s 2>&1 | FileCheck --check-prefix=CHECK-SLEEF %s
+// RUN: %clang -### -c --target=aarch64 -fveclib=ArmPL %s 2>&1 | FileCheck --check-prefix=CHECK-ARMPL %s
+// RUN: not %clang -c -fveclib=something %s 2>&1 | FileCheck --check-prefix=CHECK-INVALID %s
// CHECK-NOLIB: "-fveclib=none"
// CHECK-ACCELERATE: "-fveclib=Accelerate"
@@ -17,10 +17,10 @@
// CHECK-INVALID: error: invalid value 'something' in '-fveclib=something'
-// RUN: not %clang --target=x86-none-none -c -fveclib=SLEEF %s 2>&1 | FileCheck -check-prefix CHECK-ERROR %s
-// RUN: not %clang --target=x86-none-none -c -fveclib=ArmPL %s 2>&1 | FileCheck -check-prefix CHECK-ERROR %s
-// RUN: not %clang --target=aarch64-none-none -c -fveclib=LIBMVEC-X86 %s 2>&1 | FileCheck -check-prefix CHECK-ERROR %s
-// RUN: not %clang --target=aarch64-none-none -c -fveclib=SVML %s 2>&1 | FileCheck -check-prefix CHECK-ERROR %s
+// RUN: not %clang --target=x86 -c -fveclib=SLEEF %s 2>&1 | FileCheck --check-prefix=CHECK-ERROR %s
+// RUN: not %clang --target=x86 -c -fveclib=ArmPL %s 2>&1 | FileCheck --check-prefix=CHECK-ERROR %s
+// RUN: not %clang --target=aarch64 -c -fveclib=LIBMVEC-X86 %s 2>&1 | FileCheck --check-prefix=CHECK-ERROR %s
+// RUN: not %clang --target=aarch64 -c -fveclib=SVML %s 2>&1 | FileCheck --check-prefix=CHECK-ERROR %s
// CHECK-ERROR: unsupported option {{.*}} for target
// RUN: %clang -fveclib=Accelerate %s -target arm64-apple-ios8.0.0 -### 2>&1 | FileCheck --check-prefix=CHECK-LINK %s
@@ -35,17 +35,17 @@
/* Verify that the correct vector library is passed to LTO flags. */
-// RUN: %clang -### --target=x86_64-unknown-linux-gnu -fveclib=LIBMVEC -flto %s 2>&1 | FileCheck -check-prefix CHECK-LTO-LIBMVEC %s
+// RUN: %clang -### --target=x86_64-unknown-linux-gnu -fveclib=LIBMVEC -flto %s 2>&1 | FileCheck --check-prefix=CHECK-LTO-LIBMVEC %s
// CHECK-LTO-LIBMVEC: "-plugin-opt=-vector-library=LIBMVEC-X86"
-// RUN: %clang -### --target=powerpc64-unknown-linux-gnu -fveclib=MASSV -flto %s 2>&1 | FileCheck -check-prefix CHECK-LTO-MASSV %s
+// RUN: %clang -### --target=powerpc64-unknown-linux-gnu -fveclib=MASSV -flto %s 2>&1 | FileCheck --check-prefix=CHECK-LTO-MASSV %s
// CHECK-LTO-MASSV: "-plugin-opt=-vector-library=MASSV"
-// RUN: %clang -### --target=x86_64-unknown-linux-gnu -fveclib=SVML -flto %s 2>&1 | FileCheck -check-prefix CHECK-LTO-SVML %s
+// RUN: %clang -### --target=x86_64-unknown-linux-gnu -fveclib=SVML -flto %s 2>&1 | FileCheck --check-prefix=CHECK-LTO-SVML %s
// CHECK-LTO-SVML: "-plugin-opt=-vector-library=SVML"
-// RUN: %clang -### --target=aarch64-linux-gnu -fveclib=SLEEF -flto %s 2>&1 | FileCheck -check-prefix CHECK-LTO-SLEEF %s
+// RUN: %clang -### --target=aarch64-linux-gnu -fveclib=SLEEF -flto %s 2>&1 | FileCheck --check-prefix=CHECK-LTO-SLEEF %s
// CHECK-LTO-SLEEF: "-plugin-opt=-vector-library=sleefgnuabi"
-// RUN: %clang -### --target=aarch64-linux-gnu -fveclib=ArmPL -flto %s 2>&1 | FileCheck -check-prefix CHECK-LTO-ARMPL %s
+// RUN: %clang -### --target=aarch64-linux-gnu -fveclib=ArmPL -flto %s 2>&1 | FileCheck --check-prefix=CHECK-LTO-ARMPL %s
// CHECK-LTO-ARMPL: "-plugin-opt=-vector-library=ArmPL"
diff --git a/clang/test/Driver/loongarch-mlasx-error.c b/clang/test/Driver/loongarch-mlasx-error.c
index e66f277f7c29..1d88f0f1a7c6 100644
--- a/clang/test/Driver/loongarch-mlasx-error.c
+++ b/clang/test/Driver/loongarch-mlasx-error.c
@@ -11,5 +11,5 @@
// RUN: not %clang --target=loongarch64 %s -fsyntax-only -mlasx -mno-lsx 2>&1 \
// RUN: FileCheck --check-prefix=ERROR_LASX_FPU128 %s
-// ERROR_LASX_FPU64: error: wrong fpu width; LASX depends on 64-bit FPU.
-// ERROR_LASX_FPU128: error: invalid option combination; LASX depends on LSX.
+// ERROR_LASX_FPU64: error: wrong fpu width; LASX depends on 64-bit FPU
+// ERROR_LASX_FPU128: error: invalid option combination; LASX depends on LSX
diff --git a/clang/test/Driver/loongarch-mlsx-error.c b/clang/test/Driver/loongarch-mlsx-error.c
index bd6b8e2718bf..db1f6fb2e5a0 100644
--- a/clang/test/Driver/loongarch-mlsx-error.c
+++ b/clang/test/Driver/loongarch-mlsx-error.c
@@ -9,4 +9,4 @@
// RUN: not %clang --target=loongarch64 %s -fsyntax-only -mlsx -mfpu=none 2>&1 \
// RUN: FileCheck --check-prefix=ERROR_LSX_FPU64 %s
-// ERROR_LSX_FPU64: error: wrong fpu width; LSX depends on 64-bit FPU.
+// ERROR_LSX_FPU64: error: wrong fpu width; LSX depends on 64-bit FPU
diff --git a/clang/test/Driver/m68k-features.cpp b/clang/test/Driver/m68k-features.cpp
index a5222a72a57f..67cdc0fe1b4f 100644
--- a/clang/test/Driver/m68k-features.cpp
+++ b/clang/test/Driver/m68k-features.cpp
@@ -1,79 +1,79 @@
// REQUIRES: m68k-registered-target
-// RUN: %clang -target m68k -ffixed-a0 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-a0 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-A0 < %t %s
// CHECK-FIXED-A0: "-target-feature" "+reserve-a0"
-// RUN: %clang -target m68k -ffixed-a1 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-a1 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-A1 < %t %s
// CHECK-FIXED-A1: "-target-feature" "+reserve-a1"
-// RUN: %clang -target m68k -ffixed-a2 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-a2 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-A2 < %t %s
// CHECK-FIXED-A2: "-target-feature" "+reserve-a2"
-// RUN: %clang -target m68k -ffixed-a3 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-a3 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-A3 < %t %s
// CHECK-FIXED-A3: "-target-feature" "+reserve-a3"
-// RUN: %clang -target m68k -ffixed-a4 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-a4 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-A4 < %t %s
// CHECK-FIXED-A4: "-target-feature" "+reserve-a4"
-// RUN: %clang -target m68k -ffixed-a5 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-a5 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-A5 < %t %s
// CHECK-FIXED-A5: "-target-feature" "+reserve-a5"
-// RUN: %clang -target m68k -ffixed-a6 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-a6 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-A6 < %t %s
// CHECK-FIXED-A6: "-target-feature" "+reserve-a6"
-// RUN: %clang -target m68k -ffixed-d0 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-d0 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-D0 < %t %s
// CHECK-FIXED-D0: "-target-feature" "+reserve-d0"
-// RUN: %clang -target m68k -ffixed-d1 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-d1 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-D1 < %t %s
// CHECK-FIXED-D1: "-target-feature" "+reserve-d1"
-// RUN: %clang -target m68k -ffixed-d2 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-d2 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-D2 < %t %s
// CHECK-FIXED-D2: "-target-feature" "+reserve-d2"
-// RUN: %clang -target m68k -ffixed-d3 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-d3 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-D3 < %t %s
// CHECK-FIXED-D3: "-target-feature" "+reserve-d3"
-// RUN: %clang -target m68k -ffixed-d4 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-d4 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-D4 < %t %s
// CHECK-FIXED-D4: "-target-feature" "+reserve-d4"
-// RUN: %clang -target m68k -ffixed-d5 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-d5 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-D5 < %t %s
// CHECK-FIXED-D5: "-target-feature" "+reserve-d5"
-// RUN: %clang -target m68k -ffixed-d6 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-d6 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-D6 < %t %s
// CHECK-FIXED-D6: "-target-feature" "+reserve-d6"
-// RUN: %clang -target m68k -ffixed-d7 -### %s 2> %t
+// RUN: %clang --target=m68k -ffixed-d7 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-FIXED-D7 < %t %s
// CHECK-FIXED-D7: "-target-feature" "+reserve-d7"
// ==== Floating point ====
-// RUN: %clang -target m68k -m68000 -mhard-float -### %s 2> %t
+// RUN: %clang --target=m68k -m68000 -mhard-float -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-MX881 < %t %s
-// RUN: %clang -target m68k -m68000 -m68881 -### %s 2> %t
+// RUN: %clang --target=m68k -m68000 -m68881 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-MX881 < %t %s
-// RUN: %clang -target m68k -m68010 -mhard-float -### %s 2> %t
+// RUN: %clang --target=m68k -m68010 -mhard-float -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-MX881 < %t %s
-// RUN: %clang -target m68k -m68010 -m68881 -### %s 2> %t
+// RUN: %clang --target=m68k -m68010 -m68881 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-MX881 < %t %s
-// RUN: %clang -target m68k -m68020 -### %s 2> %t
+// RUN: %clang --target=m68k -m68020 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-MX881 < %t %s
-// RUN: %clang -target m68k -m68030 -### %s 2> %t
+// RUN: %clang --target=m68k -m68030 -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-MX882 < %t %s
// CHECK-MX881: "-target-feature" "+isa-68881"
diff --git a/clang/test/Driver/m68k-macros.cpp b/clang/test/Driver/m68k-macros.cpp
index c61248ee0232..ae5b3e0ddd37 100644
--- a/clang/test/Driver/m68k-macros.cpp
+++ b/clang/test/Driver/m68k-macros.cpp
@@ -4,18 +4,18 @@
// CHECK-MX881: #define __HAVE_68881__ 1
// CHECK-NOMX881-NOT: #define __HAVE_68881__ 1
-// RUN: %clang -target m68k-unknown-linux -m68000 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX,CHECK-NOMX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68000 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX,CHECK-MX-GNU,CHECK-NOMX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68000 -mhard-float -dM -E %s | FileCheck --check-prefix=CHECK-MX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68000 -m68881 -dM -E %s | FileCheck --check-prefix=CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68000 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX,CHECK-NOMX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68000 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX,CHECK-MX-GNU,CHECK-NOMX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68000 -mhard-float -dM -E %s | FileCheck --check-prefix=CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68000 -m68881 -dM -E %s | FileCheck --check-prefix=CHECK-MX881 %s
// CHECK-MX: #define __mc68000 1
// CHECK-MX: #define __mc68000__ 1
// CHECK-MX-GNU: #define mc68000 1
-// RUN: %clang -target m68k-unknown-linux -m68010 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX10,CHECK-NOMX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68010 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX10,CHECK-MX10-GNU,CHECK-NOMX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68010 -mhard-float -dM -E %s | FileCheck --check-prefix=CHECK-MX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68010 -m68881 -dM -E %s | FileCheck --check-prefix=CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68010 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX10,CHECK-NOMX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68010 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX10,CHECK-MX10-GNU,CHECK-NOMX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68010 -mhard-float -dM -E %s | FileCheck --check-prefix=CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68010 -m68881 -dM -E %s | FileCheck --check-prefix=CHECK-MX881 %s
// CHECK-MX10: #define __mc68000 1
// CHECK-MX10: #define __mc68000__ 1
// CHECK-MX10: #define __mc68010 1
@@ -23,9 +23,9 @@
// CHECK-MX10-GNU: #define mc68000 1
// CHECK-MX10-GNU: #define mc68010 1
-// RUN: %clang -target m68k-unknown-linux -m68020 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX20,CHECK-MX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68020 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX20,CHECK-MX20-GNU,CHECK-MX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68020 -msoft-float -dM -E %s | FileCheck --check-prefix=CHECK-NOMX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68020 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX20,CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68020 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX20,CHECK-MX20-GNU,CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68020 -msoft-float -dM -E %s | FileCheck --check-prefix=CHECK-NOMX881 %s
// CHECK-MX20: #define __mc68000 1
// CHECK-MX20: #define __mc68000__ 1
// CHECK-MX20: #define __mc68020 1
@@ -33,9 +33,9 @@
// CHECK-MX20-GNU: #define mc68000 1
// CHECK-MX20-GNU: #define mc68020 1
-// RUN: %clang -target m68k-unknown-linux -m68030 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX30,CHECK-MX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68030 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX30,CHECK-MX30-GNU,CHECK-MX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68030 -msoft-float -dM -E %s | FileCheck --check-prefix=CHECK-NOMX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68030 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX30,CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68030 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX30,CHECK-MX30-GNU,CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68030 -msoft-float -dM -E %s | FileCheck --check-prefix=CHECK-NOMX881 %s
// CHECK-MX30: #define __mc68000 1
// CHECK-MX30: #define __mc68000__ 1
// CHECK-MX30: #define __mc68030 1
@@ -43,9 +43,9 @@
// CHECK-MX30-GNU: #define mc68000 1
// CHECK-MX30-GNU: #define mc68030 1
-// RUN: %clang -target m68k-unknown-linux -m68040 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX40,CHECK-MX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68040 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX40,CHECK-MX40-GNU,CHECK-MX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68040 -msoft-float -dM -E %s | FileCheck --check-prefix=CHECK-NOMX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68040 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX40,CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68040 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX40,CHECK-MX40-GNU,CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68040 -msoft-float -dM -E %s | FileCheck --check-prefix=CHECK-NOMX881 %s
// CHECK-MX40: #define __mc68000 1
// CHECK-MX40: #define __mc68000__ 1
// CHECK-MX40: #define __mc68040 1
@@ -53,9 +53,9 @@
// CHECK-MX40-GNU: #define mc68000 1
// CHECK-MX40-GNU: #define mc68040 1
-// RUN: %clang -target m68k-unknown-linux -m68060 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX60,CHECK-MX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68060 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX60,CHECK-MX60-GNU,CHECK-MX881 %s
-// RUN: %clang -target m68k-unknown-linux -m68060 -msoft-float -dM -E %s | FileCheck --check-prefix=CHECK-NOMX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68060 -std=c++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX60,CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68060 -std=gnu++11 -dM -E %s | FileCheck --check-prefixes=CHECK-MX60,CHECK-MX60-GNU,CHECK-MX881 %s
+// RUN: %clang --target=m68k-unknown-linux -m68060 -msoft-float -dM -E %s | FileCheck --check-prefix=CHECK-NOMX881 %s
// CHECK-MX60: #define __mc68000 1
// CHECK-MX60: #define __mc68000__ 1
// CHECK-MX60: #define __mc68060 1
diff --git a/clang/test/Driver/m68k-sub-archs.cpp b/clang/test/Driver/m68k-sub-archs.cpp
index e5517d237cdd..e21ed404ca00 100644
--- a/clang/test/Driver/m68k-sub-archs.cpp
+++ b/clang/test/Driver/m68k-sub-archs.cpp
@@ -1,35 +1,35 @@
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=68000 %s 2>&1 | FileCheck --check-prefix=CHECK-M00 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=m68000 %s 2>&1 | FileCheck --check-prefix=CHECK-M00 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=M68000 %s 2>&1 | FileCheck --check-prefix=CHECK-M00 %s
-// RUN: %clang -### -target m68k-unknown-linux -m68000 %s 2>&1 | FileCheck --check-prefix=CHECK-M00 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=68000 %s 2>&1 | FileCheck --check-prefix=CHECK-M00 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=m68000 %s 2>&1 | FileCheck --check-prefix=CHECK-M00 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=M68000 %s 2>&1 | FileCheck --check-prefix=CHECK-M00 %s
+// RUN: %clang -### --target=m68k-unknown-linux -m68000 %s 2>&1 | FileCheck --check-prefix=CHECK-M00 %s
// CHECK-M00: "-target-cpu" "M68000"
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=68010 %s 2>&1 | FileCheck --check-prefix=CHECK-M10 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=m68010 %s 2>&1 | FileCheck --check-prefix=CHECK-M10 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=M68010 %s 2>&1 | FileCheck --check-prefix=CHECK-M10 %s
-// RUN: %clang -### -target m68k-unknown-linux -m68010 %s 2>&1 | FileCheck --check-prefix=CHECK-M10 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=68010 %s 2>&1 | FileCheck --check-prefix=CHECK-M10 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=m68010 %s 2>&1 | FileCheck --check-prefix=CHECK-M10 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=M68010 %s 2>&1 | FileCheck --check-prefix=CHECK-M10 %s
+// RUN: %clang -### --target=m68k-unknown-linux -m68010 %s 2>&1 | FileCheck --check-prefix=CHECK-M10 %s
// CHECK-M10: "-target-cpu" "M68010"
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=68020 %s 2>&1 | FileCheck --check-prefix=CHECK-M20 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=m68020 %s 2>&1 | FileCheck --check-prefix=CHECK-M20 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=M68020 %s 2>&1 | FileCheck --check-prefix=CHECK-M20 %s
-// RUN: %clang -### -target m68k-unknown-linux -m68020 %s 2>&1 | FileCheck --check-prefix=CHECK-M20 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=68020 %s 2>&1 | FileCheck --check-prefix=CHECK-M20 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=m68020 %s 2>&1 | FileCheck --check-prefix=CHECK-M20 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=M68020 %s 2>&1 | FileCheck --check-prefix=CHECK-M20 %s
+// RUN: %clang -### --target=m68k-unknown-linux -m68020 %s 2>&1 | FileCheck --check-prefix=CHECK-M20 %s
// CHECK-M20: "-target-cpu" "M68020"
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=68030 %s 2>&1 | FileCheck --check-prefix=CHECK-M30 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=m68030 %s 2>&1 | FileCheck --check-prefix=CHECK-M30 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=M68030 %s 2>&1 | FileCheck --check-prefix=CHECK-M30 %s
-// RUN: %clang -### -target m68k-unknown-linux -m68030 %s 2>&1 | FileCheck --check-prefix=CHECK-M30 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=68030 %s 2>&1 | FileCheck --check-prefix=CHECK-M30 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=m68030 %s 2>&1 | FileCheck --check-prefix=CHECK-M30 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=M68030 %s 2>&1 | FileCheck --check-prefix=CHECK-M30 %s
+// RUN: %clang -### --target=m68k-unknown-linux -m68030 %s 2>&1 | FileCheck --check-prefix=CHECK-M30 %s
// CHECK-M30: "-target-cpu" "M68030"
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=68040 %s 2>&1 | FileCheck --check-prefix=CHECK-M40 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=m68040 %s 2>&1 | FileCheck --check-prefix=CHECK-M40 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=M68040 %s 2>&1 | FileCheck --check-prefix=CHECK-M40 %s
-// RUN: %clang -### -target m68k-unknown-linux -m68040 %s 2>&1 | FileCheck --check-prefix=CHECK-M40 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=68040 %s 2>&1 | FileCheck --check-prefix=CHECK-M40 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=m68040 %s 2>&1 | FileCheck --check-prefix=CHECK-M40 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=M68040 %s 2>&1 | FileCheck --check-prefix=CHECK-M40 %s
+// RUN: %clang -### --target=m68k-unknown-linux -m68040 %s 2>&1 | FileCheck --check-prefix=CHECK-M40 %s
// CHECK-M40: "-target-cpu" "M68040"
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=68060 %s 2>&1 | FileCheck --check-prefix=CHECK-M60 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=m68060 %s 2>&1 | FileCheck --check-prefix=CHECK-M60 %s
-// RUN: %clang -### -target m68k-unknown-linux -mcpu=M68060 %s 2>&1 | FileCheck --check-prefix=CHECK-M60 %s
-// RUN: %clang -### -target m68k-unknown-linux -m68060 %s 2>&1 | FileCheck --check-prefix=CHECK-M60 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=68060 %s 2>&1 | FileCheck --check-prefix=CHECK-M60 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=m68060 %s 2>&1 | FileCheck --check-prefix=CHECK-M60 %s
+// RUN: %clang -### --target=m68k-unknown-linux -mcpu=M68060 %s 2>&1 | FileCheck --check-prefix=CHECK-M60 %s
+// RUN: %clang -### --target=m68k-unknown-linux -m68060 %s 2>&1 | FileCheck --check-prefix=CHECK-M60 %s
// CHECK-M60: "-target-cpu" "M68060"
diff --git a/clang/test/Driver/masm.c b/clang/test/Driver/masm.c
index 90ecaa2fe796..92cacc4cc95e 100644
--- a/clang/test/Driver/masm.c
+++ b/clang/test/Driver/masm.c
@@ -1,7 +1,7 @@
-// RUN: %clang -target i386-unknown-linux -masm=intel -S %s -### 2>&1 | FileCheck --check-prefix=CHECK-INTEL %s
-// RUN: %clang -target i386-unknown-linux -masm=att -S %s -### 2>&1 | FileCheck --check-prefix=CHECK-ATT %s
+// RUN: %clang --target=i386-unknown-linux -masm=intel -S %s -### 2>&1 | FileCheck --check-prefix=CHECK-INTEL %s
+// RUN: %clang --target=i386-unknown-linux -masm=att -S %s -### 2>&1 | FileCheck --check-prefix=CHECK-ATT %s
// RUN: not %clang --target=i386-unknown-linux -S -masm=somerequired %s -### 2>&1 | FileCheck --check-prefix=CHECK-SOMEREQUIRED %s
-// RUN: %clang -target arm-unknown-eabi -S -masm=intel %s -### 2>&1 | FileCheck --check-prefix=CHECK-ARM %s
+// RUN: %clang --target=arm-unknown-eabi -S -masm=intel %s -### 2>&1 | FileCheck --check-prefix=CHECK-ARM %s
// RUN: %clang_cl --target=x86_64 /FA -### -- %s 2>&1 | FileCheck --check-prefix=CHECK-CL %s
int f() {
diff --git a/clang/test/Driver/masm.s b/clang/test/Driver/masm.s
index d08c63ac24ca..c1c34747bcf6 100644
--- a/clang/test/Driver/masm.s
+++ b/clang/test/Driver/masm.s
@@ -1,7 +1,7 @@
-// RUN: %clang -target i386-unknown-linux -masm=intel -c %s -### 2>&1 | FileCheck --check-prefix=CHECK-INTEL %s
-// RUN: %clang -target i386-unknown-linux -masm=att -c %s -### 2>&1 | FileCheck --check-prefix=CHECK-ATT %s
+// RUN: %clang --target=i386-unknown-linux -masm=intel -c %s -### 2>&1 | FileCheck --check-prefix=CHECK-INTEL %s
+// RUN: %clang --target=i386-unknown-linux -masm=att -c %s -### 2>&1 | FileCheck --check-prefix=CHECK-ATT %s
// RUN: not %clang --target=i386-unknown-linux -c -masm=somerequired %s -### 2>&1 | FileCheck --check-prefix=CHECK-SOMEREQUIRED %s
-// RUN: %clang -target arm-unknown-eabi -c -masm=intel %s -### 2>&1 | FileCheck --check-prefix=CHECK-ARM %s
+// RUN: %clang --target=arm-unknown-eabi -c -masm=intel %s -### 2>&1 | FileCheck --check-prefix=CHECK-ARM %s
// CHECK-INTEL: -x86-asm-syntax=intel
// CHECK-ATT: -x86-asm-syntax=att
diff --git a/clang/test/Driver/mbackchain.c b/clang/test/Driver/mbackchain.c
index 7aa020741c97..bab555685620 100644
--- a/clang/test/Driver/mbackchain.c
+++ b/clang/test/Driver/mbackchain.c
@@ -1,5 +1,5 @@
// RUN: not %clang --target=s390x -c -### %s -mpacked-stack -mbackchain 2>&1 | FileCheck %s
-// RUN: %clang -target s390x -c -### %s -mpacked-stack -mbackchain -msoft-float \
+// RUN: %clang --target=s390x -c -### %s -mpacked-stack -mbackchain -msoft-float \
// RUN: 2>&1 | FileCheck %s --check-prefix=KERNEL-BUILD
// REQUIRES: systemz-registered-target
diff --git a/clang/test/Driver/mcount.c b/clang/test/Driver/mcount.c
index b9139ab8cb13..70f554745b9c 100644
--- a/clang/test/Driver/mcount.c
+++ b/clang/test/Driver/mcount.c
@@ -1,10 +1,10 @@
-// RUN: %clang -target s390x -c -### %s -mnop-mcount -mrecord-mcount 2>&1 | FileCheck %s
+// RUN: %clang --target=s390x -c -### %s -mnop-mcount -mrecord-mcount 2>&1 | FileCheck %s
// CHECK: "-mnop-mcount"
// CHECK: "-mrecord-mcount"
-// RUN: not %clang -target x86_64 -c -### %s -mnop-mcount -mrecord-mcount 2>&1 | FileCheck --check-prefix=ERR1 %s
-// RUN: not %clang -target aarch64 -c -### %s -mnop-mcount -mrecord-mcount 2>&1 | FileCheck --check-prefix=ERR2 %s
+// RUN: not %clang --target=x86_64 -c -### %s -mnop-mcount -mrecord-mcount 2>&1 | FileCheck --check-prefix=ERR1 %s
+// RUN: not %clang --target=aarch64 -c -### %s -mnop-mcount -mrecord-mcount 2>&1 | FileCheck --check-prefix=ERR2 %s
// ERR1: error: unsupported option '-mnop-mcount' for target 'x86_64'
// ERR1: error: unsupported option '-mrecord-mcount' for target 'x86_64'
diff --git a/clang/test/Driver/mdouble.c b/clang/test/Driver/mdouble.c
index 0d4881edea43..71e953ec4853 100644
--- a/clang/test/Driver/mdouble.c
+++ b/clang/test/Driver/mdouble.c
@@ -1,4 +1,4 @@
-// RUN: %clang -target avr -c -### %s -mdouble=64 2>&1 | FileCheck %s
+// RUN: %clang --target=avr -c -### %s -mdouble=64 2>&1 | FileCheck %s
// CHECK: "-mdouble=64"
diff --git a/clang/test/Driver/memtag-stack.c b/clang/test/Driver/memtag-stack.c
index 58003fd1b02b..8ee49bec3e36 100644
--- a/clang/test/Driver/memtag-stack.c
+++ b/clang/test/Driver/memtag-stack.c
@@ -1,7 +1,7 @@
-// RUN: %clang -target aarch64-unknown-linux -march=armv8+memtag -fsanitize=memtag-stack -mllvm -stack-safety-print=1 %s -S -o - 2>&1 | FileCheck %s --check-prefix=CHECK-NO-SAFETY
-// RUN: %clang -O1 -target aarch64-unknown-linux -march=armv8+memtag -fsanitize=memtag-stack -mllvm -stack-safety-print=1 %s -S -o - 2>&1 | FileCheck %s --check-prefix=CHECK-SAFETY
-// RUN: %clang -O2 -target aarch64-unknown-linux -march=armv8+memtag -fsanitize=memtag-stack -mllvm -stack-safety-print=1 %s -S -o - 2>&1 | FileCheck %s --check-prefix=CHECK-SAFETY
-// RUN: %clang -O3 -target aarch64-unknown-linux -march=armv8+memtag -fsanitize=memtag-stack -mllvm -stack-safety-print=1 %s -S -o - 2>&1 | FileCheck %s --check-prefix=CHECK-SAFETY
+// RUN: %clang --target=aarch64 -march=armv8+memtag -fsanitize=memtag-stack -mllvm -stack-safety-print=1 %s -S -o - 2>&1 | FileCheck %s --check-prefix=CHECK-NO-SAFETY
+// RUN: %clang -O1 --target=aarch64 -march=armv8+memtag -fsanitize=memtag-stack -mllvm -stack-safety-print=1 %s -S -o - 2>&1 | FileCheck %s --check-prefix=CHECK-SAFETY
+// RUN: %clang -O2 --target=aarch64 -march=armv8+memtag -fsanitize=memtag-stack -mllvm -stack-safety-print=1 %s -S -o - 2>&1 | FileCheck %s --check-prefix=CHECK-SAFETY
+// RUN: %clang -O3 --target=aarch64 -march=armv8+memtag -fsanitize=memtag-stack -mllvm -stack-safety-print=1 %s -S -o - 2>&1 | FileCheck %s --check-prefix=CHECK-SAFETY
// REQUIRES: aarch64-registered-target
diff --git a/clang/test/Driver/mfentry.c b/clang/test/Driver/mfentry.c
index 934f5c71c26b..9251ce54ee7b 100644
--- a/clang/test/Driver/mfentry.c
+++ b/clang/test/Driver/mfentry.c
@@ -1,12 +1,12 @@
-// RUN: %clang -target s390x -c -### %s -mfentry 2>&1 | FileCheck %s
-// RUN: %clang -target i386 -c -### %s -mfentry 2>&1 | FileCheck %s
-// RUN: %clang -target x86_64 -c -### %s -mfentry 2>&1 | FileCheck %s
-// RUN: %clang -target x86_64-linux-gnu -pg -mfentry -O0 -### -E %s 2>&1 | FileCheck -check-prefix=FP %s
-// RUN: %clang -target x86_64-linux-gnu -pg -mfentry -O2 -fno-omit-frame-pointer -### -E %s 2>&1 | FileCheck -check-prefix=FP %s
-// RUN: %clang -target x86_64-linux-gnu -pg -mfentry -O2 -### -E %s 2>&1 | FileCheck -check-prefix=NOFP %s
-// RUN: %clang -target x86_64 -pg -mfentry -O0 -### -E %s 2>&1 | FileCheck -check-prefix=FP %s
-// RUN: %clang -target x86_64 -pg -mfentry -O2 -fno-omit-frame-pointer -### -E %s 2>&1 | FileCheck -check-prefix=FP %s
-// RUN: %clang -target x86_64 -pg -mfentry -O2 -### -E %s 2>&1 | FileCheck -check-prefix=FP %s
+// RUN: %clang --target=s390x -c -### %s -mfentry 2>&1 | FileCheck %s
+// RUN: %clang --target=i386 -c -### %s -mfentry 2>&1 | FileCheck %s
+// RUN: %clang --target=x86_64 -c -### %s -mfentry 2>&1 | FileCheck %s
+// RUN: %clang --target=x86_64-linux-gnu -pg -mfentry -O0 -### -E %s 2>&1 | FileCheck -check-prefix=FP %s
+// RUN: %clang --target=x86_64-linux-gnu -pg -mfentry -O2 -fno-omit-frame-pointer -### -E %s 2>&1 | FileCheck -check-prefix=FP %s
+// RUN: %clang --target=x86_64-linux-gnu -pg -mfentry -O2 -### -E %s 2>&1 | FileCheck -check-prefix=NOFP %s
+// RUN: %clang --target=x86_64 -pg -mfentry -O0 -### -E %s 2>&1 | FileCheck -check-prefix=FP %s
+// RUN: %clang --target=x86_64 -pg -mfentry -O2 -fno-omit-frame-pointer -### -E %s 2>&1 | FileCheck -check-prefix=FP %s
+// RUN: %clang --target=x86_64 -pg -mfentry -O2 -### -E %s 2>&1 | FileCheck -check-prefix=FP %s
// CHECK: "-mfentry"
diff --git a/clang/test/Driver/mglobal-merge.c b/clang/test/Driver/mglobal-merge.c
index 4ea7ae03e78f..42019e1cae00 100644
--- a/clang/test/Driver/mglobal-merge.c
+++ b/clang/test/Driver/mglobal-merge.c
@@ -1,40 +1,40 @@
-// RUN: %clang -target armv7-unknown-unknown -### -fsyntax-only %s 2> %t \
+// RUN: %clang --target=armv7 -### -fsyntax-only %s 2> %t \
// RUN: -mno-global-merge
// RUN: FileCheck --check-prefix=CHECK-NGM-ARM < %t %s
-// RUN: %clang -target aarch64-unknown-unknown -### -fsyntax-only %s 2> %t \
+// RUN: %clang --target=aarch64 -### -fsyntax-only %s 2> %t \
// RUN: -mno-global-merge
// RUN: FileCheck --check-prefix=CHECK-NGM-AARCH64 < %t %s
-// RUN: %clang -target x86_64-unknown-unknown -### -fsyntax-only %s 2> %t \
+// RUN: %clang --target=x86_64 -### -fsyntax-only %s 2> %t \
// RUN: -mno-global-merge
// RUN: FileCheck --check-prefix=CHECK-NONE < %t %s
// CHECK-NGM-ARM: "-mllvm" "-arm-global-merge=false"
// CHECK-NGM-AARCH64: "-mllvm" "-aarch64-enable-global-merge=false"
-// RUN: %clang -target armv7-unknown-unknown -### -fsyntax-only %s 2> %t \
+// RUN: %clang --target=armv7 -### -fsyntax-only %s 2> %t \
// RUN: -mglobal-merge
// RUN: FileCheck --check-prefix=CHECK-GM-ARM < %t %s
-// RUN: %clang -target aarch64-unknown-unknown -### -fsyntax-only %s 2> %t \
+// RUN: %clang --target=aarch64 -### -fsyntax-only %s 2> %t \
// RUN: -mglobal-merge
// RUN: FileCheck --check-prefix=CHECK-GM-AARCH64 < %t %s
-// RUN: %clang -target x86_64-unknown-unknown -### -fsyntax-only %s 2> %t \
+// RUN: %clang --target=x86_64 -### -fsyntax-only %s 2> %t \
// RUN: -mglobal-merge
// RUN: FileCheck --check-prefix=CHECK-NONE < %t %s
// CHECK-GM-ARM: "-mllvm" "-arm-global-merge=true"
// CHECK-GM-AARCH64: "-mllvm" "-aarch64-enable-global-merge=true"
-// RUN: %clang -target armv7-unknown-unknown -### -fsyntax-only %s 2> %t
+// RUN: %clang --target=armv7 -### -fsyntax-only %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-NONE < %t %s
-// RUN: %clang -target aarch64-unknown-unknown -### -fsyntax-only %s 2> %t
+// RUN: %clang --target=aarch64 -### -fsyntax-only %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-NONE < %t %s
-// RUN: %clang -target x86_64-unknown-unknown -### -fsyntax-only %s 2> %t
+// RUN: %clang --target=x86_64 -### -fsyntax-only %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-NONE < %t %s
// CHECK-NONE-NOT: -global-merge=
diff --git a/clang/test/Driver/mingw-implicit-extension-windows.c b/clang/test/Driver/mingw-implicit-extension-windows.c
index bc15f6abb266..6320a4a93672 100644
--- a/clang/test/Driver/mingw-implicit-extension-windows.c
+++ b/clang/test/Driver/mingw-implicit-extension-windows.c
@@ -1,10 +1,10 @@
// Test how an implicit .exe extension is added.
-// RUN: %clang -target i686-windows-gnu -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s -o outputname 2>&1 | FileCheck %s --check-prefix=CHECK-OUTPUTNAME-EXE
+// RUN: %clang --target=i686-windows-gnu -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s -o outputname 2>&1 | FileCheck %s --check-prefix=CHECK-OUTPUTNAME-EXE
-// RUN: %clang -target i686-windows-gnu -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s -o outputname.exe 2>&1 | FileCheck %s --check-prefix=CHECK-OUTPUTNAME-EXE
+// RUN: %clang --target=i686-windows-gnu -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s -o outputname.exe 2>&1 | FileCheck %s --check-prefix=CHECK-OUTPUTNAME-EXE
-// RUN: %clang -target i686-windows-gnu -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s -o outputname.q 2>&1 | FileCheck %s --check-prefix=CHECK-OUTPUTNAME-Q
+// RUN: %clang --target=i686-windows-gnu -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s -o outputname.q 2>&1 | FileCheck %s --check-prefix=CHECK-OUTPUTNAME-Q
// CHECK-OUTPUTNAME-EXE: "-o" "outputname.exe"
// CHECK-OUTPUTNAME-Q: "-o" "outputname.q"
diff --git a/clang/test/Driver/mingw-libgcc.c b/clang/test/Driver/mingw-libgcc.c
index bfe2360ed4e6..f9635a803609 100644
--- a/clang/test/Driver/mingw-libgcc.c
+++ b/clang/test/Driver/mingw-libgcc.c
@@ -2,24 +2,24 @@
// Verified with gcc version 5.1.0 (i686-posix-dwarf-rev0, Built by MinGW-W64 project).
// gcc, static
-// RUN: %clang -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefixes=CHECK_STATIC,CHECK_BDYNAMIC %s
-// RUN: %clang -static -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefixes=CHECK_STATIC,CHECK_BSTATIC %s
-// RUN: %clang -static-libgcc -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefixes=CHECK_STATIC,CHECK_BDYNAMIC %s
-// RUN: %clang -static -shared -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefixes=CHECK_STATIC,CHECK_SHARED,CHECK_BSTATIC %s
-// RUN: %clang -static-libgcc -shared -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefixes=CHECK_STATIC,CHECK_SHARED,CHECK_BDYNAMIC %s
+// RUN: %clang -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefixes=CHECK_STATIC,CHECK_BDYNAMIC %s
+// RUN: %clang -static -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefixes=CHECK_STATIC,CHECK_BSTATIC %s
+// RUN: %clang -static-libgcc -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefixes=CHECK_STATIC,CHECK_BDYNAMIC %s
+// RUN: %clang -static -shared -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefixes=CHECK_STATIC,CHECK_SHARED,CHECK_BSTATIC %s
+// RUN: %clang -static-libgcc -shared -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefixes=CHECK_STATIC,CHECK_SHARED,CHECK_BDYNAMIC %s
// gcc, dynamic
-// RUN: %clang -shared -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_DYNAMIC %s
+// RUN: %clang -shared -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_DYNAMIC %s
// g++, static
-// RUN: %clang -static --driver-mode=g++ -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_STATIC %s
-// RUN: %clang -static-libgcc --driver-mode=g++ -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_STATIC %s
-// RUN: %clang -static -shared --driver-mode=g++ -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_STATIC %s
-// RUN: %clang -static-libgcc -shared --driver-mode=g++ -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_STATIC %s
+// RUN: %clang -static --driver-mode=g++ -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_STATIC %s
+// RUN: %clang -static-libgcc --driver-mode=g++ -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_STATIC %s
+// RUN: %clang -static -shared --driver-mode=g++ -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_STATIC %s
+// RUN: %clang -static-libgcc -shared --driver-mode=g++ -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_STATIC %s
// g++, dynamic
-// RUN: %clang --driver-mode=g++ -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_DYNAMIC %s
-// RUN: %clang -shared --driver-mode=g++ -v -target i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_DYNAMIC %s
+// RUN: %clang --driver-mode=g++ -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_DYNAMIC %s
+// RUN: %clang -shared --driver-mode=g++ -v --target=i686-pc-windows-gnu -rtlib=platform -### %s 2>&1 | FileCheck -check-prefix=CHECK_DYNAMIC %s
// CHECK_SHARED: "--shared"
// CHECK_BSTATIC: "-Bstatic"
diff --git a/clang/test/Driver/mingw-msvcrt.c b/clang/test/Driver/mingw-msvcrt.c
index 48a30d8469cb..340ce1f57b0f 100644
--- a/clang/test/Driver/mingw-msvcrt.c
+++ b/clang/test/Driver/mingw-msvcrt.c
@@ -1,8 +1,8 @@
-// RUN: %clang -v -target i686-pc-windows-gnu -### %s 2>&1 | FileCheck -check-prefix=CHECK_DEFAULT %s
-// RUN: %clang -v -target i686-pc-windows-gnu -lmsvcr120 -### %s 2>&1 | FileCheck -check-prefix=CHECK_MSVCR120 %s
-// RUN: %clang -v -target i686-pc-windows-gnu -lucrtbase -### %s 2>&1 | FileCheck -check-prefix=CHECK_UCRTBASE %s
-// RUN: %clang -v -target i686-pc-windows-gnu -lucrt -### %s 2>&1 | FileCheck -check-prefix=CHECK_UCRT %s
-// RUN: %clang -v -target i686-pc-windows-gnu -lcrtdll -### %s 2>&1 | FileCheck -check-prefix=CHECK_CRTDLL %s
+// RUN: %clang -v --target=i686-pc-windows-gnu -### %s 2>&1 | FileCheck -check-prefix=CHECK_DEFAULT %s
+// RUN: %clang -v --target=i686-pc-windows-gnu -lmsvcr120 -### %s 2>&1 | FileCheck -check-prefix=CHECK_MSVCR120 %s
+// RUN: %clang -v --target=i686-pc-windows-gnu -lucrtbase -### %s 2>&1 | FileCheck -check-prefix=CHECK_UCRTBASE %s
+// RUN: %clang -v --target=i686-pc-windows-gnu -lucrt -### %s 2>&1 | FileCheck -check-prefix=CHECK_UCRT %s
+// RUN: %clang -v --target=i686-pc-windows-gnu -lcrtdll -### %s 2>&1 | FileCheck -check-prefix=CHECK_CRTDLL %s
// CHECK_DEFAULT: "-lmingwex" "-lmsvcrt" "-ladvapi32"
// CHECK_DEFAULT-SAME: "-lmsvcrt" "-lkernel32" "{{.*}}crtend.o"
diff --git a/clang/test/Driver/mingw-sanitizers.c b/clang/test/Driver/mingw-sanitizers.c
index 2325f8f0f1f2..618c1b7ba332 100644
--- a/clang/test/Driver/mingw-sanitizers.c
+++ b/clang/test/Driver/mingw-sanitizers.c
@@ -1,6 +1,6 @@
// RUN: touch %t.a
-// RUN: %clang -target i686-windows-gnu %s -### -fsanitize=address -lcomponent %/t.a 2>&1 | FileCheck --check-prefixes=ASAN-ALL,ASAN-I686 -DINPUT=%/t.a %s
-// RUN: %clang -target x86_64-windows-gnu %s -### -fsanitize=address -lcomponent %/t.a 2>&1 | FileCheck --check-prefixes=ASAN-ALL,ASAN-X86_64 -DINPUT=%/t.a %s
+// RUN: %clang --target=i686-windows-gnu %s -### -fsanitize=address -lcomponent %/t.a 2>&1 | FileCheck --check-prefixes=ASAN-ALL,ASAN-I686 -DINPUT=%/t.a %s
+// RUN: %clang --target=x86_64-windows-gnu %s -### -fsanitize=address -lcomponent %/t.a 2>&1 | FileCheck --check-prefixes=ASAN-ALL,ASAN-X86_64 -DINPUT=%/t.a %s
//
// ASAN-ALL-NOT:"-l{{[^"]+"]}}"
// ASAN-ALL-NOT:"[[INPUT]]"
@@ -17,4 +17,4 @@
// ASAN-X86_64: "--require-defined" "__asan_seh_interceptor"
// ASAN-X86_64: "--whole-archive" "{{[^"]*}}libclang_rt.asan_dynamic_runtime_thunk.a" "--no-whole-archive"
-// RUN: %clang -target x86_64-windows-gnu %s -### -fsanitize=vptr
+// RUN: %clang --target=x86_64-windows-gnu %s -### -fsanitize=vptr
diff --git a/clang/test/Driver/mingw-sysroot.cpp b/clang/test/Driver/mingw-sysroot.cpp
index 5d512e666970..de5cdedcbff1 100644
--- a/clang/test/Driver/mingw-sysroot.cpp
+++ b/clang/test/Driver/mingw-sysroot.cpp
@@ -33,7 +33,7 @@
// directory to the path - this would end up including /usr/include for
// cross toolchains installed in /usr.
-// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %clang -target x86_64-w64-mingw32 -rtlib=platform -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_GCC %s --implicit-check-not="\"{{.*}}/testroot-gcc{{/|\\\\}}include\""
+// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %clang --target=x86_64-w64-mingw32 -rtlib=platform -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_GCC %s --implicit-check-not="\"{{.*}}/testroot-gcc{{/|\\\\}}include\""
// CHECK_TESTROOT_GCC: "-internal-isystem" "[[BASE:[^"]+]]/testroot-gcc{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}10.2-posix{{/|\\\\}}include{{/|\\\\}}c++"
// CHECK_TESTROOT_GCC-SAME: {{^}} "-internal-isystem" "[[BASE]]/testroot-gcc{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}10.2-posix{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}x86_64-w64-mingw32"
// CHECK_TESTROOT_GCC-SAME: {{^}} "-internal-isystem" "[[BASE]]/testroot-gcc{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}10.2-posix{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
@@ -45,7 +45,7 @@
// If we pass --sysroot explicitly, then we do include <sysroot>/include
// even when cross compiling.
-// RUN: %clang -target x86_64-w64-mingw32 -rtlib=platform -stdlib=libstdc++ --sysroot="%T/testroot-gcc" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_GCC_EXPLICIT %s
+// RUN: %clang --target=x86_64-w64-mingw32 -rtlib=platform -stdlib=libstdc++ --sysroot="%T/testroot-gcc" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_GCC_EXPLICIT %s
// CHECK_TESTROOT_GCC_EXPLICIT: "-internal-isystem" "{{[^"]+}}/testroot-gcc{{/|\\\\}}include"
@@ -63,7 +63,7 @@
// happens to be in the same directory as gcc, make sure we still can pick up
// the libgcc directory:
-// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-gcc/bin/x86_64-w64-mingw32-clang -target x86_64-w64-mingw32 -rtlib=platform -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_GCC %s
+// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-gcc/bin/x86_64-w64-mingw32-clang --target=x86_64-w64-mingw32 -rtlib=platform -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_GCC %s
// If we're executing clang from a directory with what looks like a mingw sysroot,
diff --git a/clang/test/Driver/mingw-windowsapp.c b/clang/test/Driver/mingw-windowsapp.c
index bf6f2ec3fa3d..d0a44952b30a 100644
--- a/clang/test/Driver/mingw-windowsapp.c
+++ b/clang/test/Driver/mingw-windowsapp.c
@@ -1,5 +1,5 @@
-// RUN: %clang -v -target i686-pc-windows-gnu -### %s 2>&1 | FileCheck -check-prefix=CHECK_DEFAULT %s
-// RUN: %clang -v -target i686-pc-windows-gnu -### %s -lwindowsapp 2>&1 | FileCheck -check-prefix=CHECK_WINDOWSAPP %s
+// RUN: %clang -v --target=i686-pc-windows-gnu -### %s 2>&1 | FileCheck -check-prefix=CHECK_DEFAULT %s
+// RUN: %clang -v --target=i686-pc-windows-gnu -### %s -lwindowsapp 2>&1 | FileCheck -check-prefix=CHECK_WINDOWSAPP %s
// CHECK_DEFAULT: "-lmsvcrt" "-ladvapi32" "-lshell32" "-luser32" "-lkernel32" "-lmingw32"
// CHECK_WINDOWSAPP: "-lwindowsapp" "-lmingw32"
diff --git a/clang/test/Driver/mingw.cpp b/clang/test/Driver/mingw.cpp
index e42ff4554e45..4a9ba4d259b4 100644
--- a/clang/test/Driver/mingw.cpp
+++ b/clang/test/Driver/mingw.cpp
@@ -1,15 +1,15 @@
-// RUN: %clang -target i686-windows-gnu -rtlib=platform -c -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_CLANG_TREE %s
+// RUN: %clang --target=i686-windows-gnu -rtlib=platform -c -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_CLANG_TREE %s
// CHECK_MINGW_CLANG_TREE: "[[BASE:[^"]+]]/Inputs/mingw_clang_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
// CHECK_MINGW_CLANG_TREE: "[[BASE:[^"]+]]/Inputs/mingw_clang_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}usr{{/|\\\\}}include"
// CHECK_MINGW_CLANG_TREE: "[[BASE]]/Inputs/mingw_clang_tree/mingw32{{/|\\\\}}include"
-// RUN: %clang -target i686-windows-gnu -rtlib=platform -stdlib=libc++ -c -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_CLANG_TREE_LIBCXX %s
+// RUN: %clang --target=i686-windows-gnu -rtlib=platform -stdlib=libc++ -c -### --sysroot=%S/Inputs/mingw_clang_tree/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_CLANG_TREE_LIBCXX %s
// CHECK_MINGW_CLANG_TREE_LIBCXX: "[[BASE:[^"]+]]/Inputs/mingw_clang_tree/mingw32{{/|\\\\}}include{{/|\\\\}}i686-unknown-windows-gnu{{/|\\\\}}c++{{/|\\\\}}v1"
// CHECK_MINGW_CLANG_TREE_LIBCXX: "[[BASE:[^"]+]]/Inputs/mingw_clang_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}v1"
-// RUN: %clang -target i686-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_mingw_org_tree/mingw %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_ORG_TREE %s
+// RUN: %clang --target=i686-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_mingw_org_tree/mingw %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_ORG_TREE %s
// CHECK_MINGW_ORG_TREE: "[[BASE:[^"]+]]/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include{{/|\\\\}}c++"
// CHECK_MINGW_ORG_TREE: "[[BASE]]/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}mingw32"
// CHECK_MINGW_ORG_TREE: "[[BASE]]/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}mingw32{{/|\\\\}}4.8.1{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
@@ -17,14 +17,14 @@
// CHECK_MINGW_ORG_TREE: "[[BASE]]/Inputs/mingw_mingw_org_tree/mingw{{/|\\\\}}include"
-// RUN: %clang -target i686-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_mingw_builds_tree/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_BUILDS_TREE %s
+// RUN: %clang --target=i686-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_mingw_builds_tree/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_BUILDS_TREE %s
// CHECK_MINGW_BUILDS_TREE: "[[BASE:[^"]+]]/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++"
// CHECK_MINGW_BUILDS_TREE: "[[BASE]]/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}i686-w64-mingw32"
// CHECK_MINGW_BUILDS_TREE: "[[BASE]]/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
// CHECK_MINGW_BUILDS_TREE: "[[BASE]]/Inputs/mingw_mingw_builds_tree/mingw32{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
-// RUN: %clang -target i686-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_msys2_tree/msys64/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_MSYS_TREE %s
+// RUN: %clang --target=i686-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_msys2_tree/msys64/mingw32 %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_MSYS_TREE %s
// CHECK_MINGW_MSYS_TREE: "[[BASE:[^"]+]]/Inputs/mingw_msys2_tree/msys64{{/|\\\\}}mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.9.2"
// CHECK_MINGW_MSYS_TREE: "[[BASE]]/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.9.2{{/|\\\\}}i686-w64-mingw32"
// CHECK_MINGW_MSYS_TREE: "[[BASE]]/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.9.2{{/|\\\\}}backward"
@@ -32,55 +32,55 @@
// CHECK_MINGW_MSYS_TREE: "[[BASE]]/Inputs/mingw_msys2_tree/msys64/mingw32{{/|\\\\}}include"
-// RUN: %clang -target x86_64-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_opensuse_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_OPENSUSE_TREE %s
+// RUN: %clang --target=x86_64-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_opensuse_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_OPENSUSE_TREE %s
// CHECK_MINGW_OPENSUSE_TREE: "[[BASE:[^"]+]]/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include{{/|\\\\}}c++"
// CHECK_MINGW_OPENSUSE_TREE: "[[BASE]]/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}x86_64-w64-mingw32"
// CHECK_MINGW_OPENSUSE_TREE: "[[BASE]]/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}lib64{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}5.1.0{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
// CHECK_MINGW_OPENSUSE_TREE: "[[BASE]]/Inputs/mingw_opensuse_tree/usr{{/|\\\\}}x86_64-w64-mingw32/sys-root/mingw{{/|\\\\}}include"
-// RUN: %clang -target x86_64-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_fedora_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_FEDORA_TREE %s
+// RUN: %clang --target=x86_64-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_fedora_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_FEDORA_TREE %s
// CHECK_MINGW_FEDORA_TREE: "[[BASE:[^"]+]]/Inputs/mingw_fedora_tree/usr{{/|\\\\}}x86_64-w64-mingw32ucrt/sys-root/mingw{{/|\\\\}}include{{/|\\\\}}c++"
// CHECK_MINGW_FEDORA_TREE: "[[BASE]]/Inputs/mingw_fedora_tree/usr{{/|\\\\}}x86_64-w64-mingw32ucrt/sys-root/mingw{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}x86_64-w64-mingw32ucrt"
// CHECK_MINGW_FEDORA_TREE: "[[BASE]]/Inputs/mingw_fedora_tree/usr{{/|\\\\}}x86_64-w64-mingw32ucrt/sys-root/mingw{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
// CHECK_MINGW_FEDORA_TREE: "[[BASE]]/Inputs/mingw_fedora_tree/usr{{/|\\\\}}x86_64-w64-mingw32ucrt/sys-root/mingw{{/|\\\\}}include"
-// RUN: %clang -target i686-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_arch_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_ARCH_TREE %s
+// RUN: %clang --target=i686-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_arch_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_ARCH_TREE %s
// CHECK_MINGW_ARCH_TREE: "[[BASE:[^"]+]]/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}5.1.0"
// CHECK_MINGW_ARCH_TREE: "[[BASE]]/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}5.1.0{{/|\\\\}}i686-w64-mingw32"
// CHECK_MINGW_ARCH_TREE: "[[BASE]]/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}5.1.0{{/|\\\\}}backward"
// CHECK_MINGW_ARCH_TREE: "[[BASE]]/Inputs/mingw_arch_tree/usr{{/|\\\\}}i686-w64-mingw32{{/|\\\\}}include"
-// RUN: %clang -target x86_64-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_ubuntu_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_UBUNTU_TREE %s
+// RUN: %clang --target=x86_64-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_ubuntu_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_UBUNTU_TREE %s
// CHECK_MINGW_UBUNTU_TREE: "[[BASE:[^"]+]]/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.8"
// CHECK_MINGW_UBUNTU_TREE: "[[BASE]]/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.8{{/|\\\\}}x86_64-w64-mingw32"
// CHECK_MINGW_UBUNTU_TREE: "[[BASE]]/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}4.8{{/|\\\\}}backward"
// CHECK_MINGW_UBUNTU_TREE: "[[BASE]]/Inputs/mingw_ubuntu_tree/usr{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}include"
-// RUN: %clang -target x86_64-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_ubuntu_posix_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_UBUNTU_POSIX_TREE %s
+// RUN: %clang --target=x86_64-pc-windows-gnu -rtlib=platform -stdlib=libstdc++ -c -### --sysroot=%S/Inputs/mingw_ubuntu_posix_tree/usr %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_UBUNTU_POSIX_TREE %s
// CHECK_MINGW_UBUNTU_POSIX_TREE: "[[BASE:[^"]+]]/Inputs/mingw_ubuntu_posix_tree/usr{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}10.2-posix{{/|\\\\}}include{{/|\\\\}}c++"
// CHECK_MINGW_UBUNTU_POSIX_TREE: "[[BASE]]/Inputs/mingw_ubuntu_posix_tree/usr{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}10.2-posix{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}x86_64-w64-mingw32"
// CHECK_MINGW_UBUNTU_POSIX_TREE: "[[BASE]]/Inputs/mingw_ubuntu_posix_tree/usr{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}10.2-posix{{/|\\\\}}include{{/|\\\\}}c++{{/|\\\\}}backward"
// CHECK_MINGW_UBUNTU_POSIX_TREE: "[[BASE]]/Inputs/mingw_ubuntu_posix_tree/usr{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}include"
-// RUN: %clang -target i686-windows-gnu -E -### %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_NO_UNICODE %s
-// RUN: %clang -target i686-windows-gnu -E -### %s -municode 2>&1 | FileCheck -check-prefix=CHECK_MINGW_UNICODE %s
+// RUN: %clang --target=i686-windows-gnu -E -### %s 2>&1 | FileCheck -check-prefix=CHECK_MINGW_NO_UNICODE %s
+// RUN: %clang --target=i686-windows-gnu -E -### %s -municode 2>&1 | FileCheck -check-prefix=CHECK_MINGW_UNICODE %s
// CHECK_MINGW_NO_UNICODE-NOT: "-DUNICODE"
// CHECK_MINGW_UNICODE: "-DUNICODE"
-// RUN: %clang -target i686-windows-gnu -### %s 2>&1 | FileCheck -check-prefix=CHECK_NO_SUBSYS %s
-// RUN: %clang -target i686-windows-gnu -### %s -mwindows -mconsole 2>&1 | FileCheck -check-prefix=CHECK_SUBSYS_CONSOLE %s
-// RUN: %clang -target i686-windows-gnu -### %s -mconsole -mwindows 2>&1 | FileCheck -check-prefix=CHECK_SUBSYS_WINDOWS %s
+// RUN: %clang --target=i686-windows-gnu -### %s 2>&1 | FileCheck -check-prefix=CHECK_NO_SUBSYS %s
+// RUN: %clang --target=i686-windows-gnu -### %s -mwindows -mconsole 2>&1 | FileCheck -check-prefix=CHECK_SUBSYS_CONSOLE %s
+// RUN: %clang --target=i686-windows-gnu -### %s -mconsole -mwindows 2>&1 | FileCheck -check-prefix=CHECK_SUBSYS_WINDOWS %s
// CHECK_NO_SUBSYS-NOT: "--subsystem"
// CHECK_SUBSYS_CONSOLE: "--subsystem" "console"
// CHECK_SUBSYS_WINDOWS: "--subsystem" "windows"
-// RUN: %clang -target i686-windows-gnu -### %s 2>&1 | FileCheck -check-prefix=CHECK_NO_INIT_ARRAY %s
+// RUN: %clang --target=i686-windows-gnu -### %s 2>&1 | FileCheck -check-prefix=CHECK_NO_INIT_ARRAY %s
// CHECK_NO_INIT_ARRAY: "-fno-use-init-array"
-// RUN: %clang -target arm64ec-windows-gnu -### -o /dev/null %s 2>&1 \
+// RUN: %clang --target=arm64ec-windows-gnu -### -o /dev/null %s 2>&1 \
// RUN: | FileCheck %s --check-prefix CHECK_MINGW_EC_LINK
// CHECK_MINGW_EC_LINK: "-m" "arm64ecpe"
diff --git a/clang/test/Driver/mips-abi.c b/clang/test/Driver/mips-abi.c
index 98384ce8b315..05277520a94b 100644
--- a/clang/test/Driver/mips-abi.c
+++ b/clang/test/Driver/mips-abi.c
@@ -2,168 +2,168 @@
//
// REQUIRES: mips-registered-target
//
-// RUN: %clang -target mips-linux-gnu -### -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32R2-O32 %s
-// RUN: %clang -target mips64-linux-gnu -mips32r2 -mabi=32 -### -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mips32r2 -mabi=32 -### -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32R2-O32 %s
// MIPS32R2-O32: "-target-cpu" "mips32r2"
// MIPS32R2-O32: "-target-abi" "o32"
//
-// RUN: %clang -target mips64-linux-gnu -### -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -### -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R2-N64 %s
-// RUN: %clang -target mips-img-linux-gnu -mips64r2 -### -c %s 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -mips64r2 -### -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R2-N64 %s
-// RUN: %clang -target mips-mti-linux-gnu -mips64r2 -### -c %s 2>&1 \
+// RUN: %clang --target=mips-mti-linux-gnu -mips64r2 -### -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R2-N64 %s
-// RUN: %clang -target mips-linux-gnu -mips64r2 -mabi=64 -### -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips64r2 -mabi=64 -### -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R2-N64 %s
// MIPS64R2-N64: "-target-cpu" "mips64r2"
// MIPS64R2-N64: "-target-abi" "n64"
//
-// RUN: %clang -target mips64-linux-gnu -### -mips64r3 -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -### -mips64r3 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R3-N64 %s
-// RUN: %clang -target mips-img-linux-gnu -mips64r3 -### -c %s 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -mips64r3 -### -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R3-N64 %s
-// RUN: %clang -target mips-mti-linux-gnu -mips64r3 -### -c %s 2>&1 \
+// RUN: %clang --target=mips-mti-linux-gnu -mips64r3 -### -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R3-N64 %s
// MIPS64R3-N64: "-target-cpu" "mips64r3"
// MIPS64R3-N64: "-target-abi" "n64"
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mabi=32 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ABI-32 %s
// MIPS-ABI-32: "-target-cpu" "mips32r2"
// MIPS-ABI-32: "-target-abi" "o32"
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mabi=o32 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ABI-O32 %s
// MIPS-ABI-O32: "-target-cpu" "mips32r2"
// MIPS-ABI-O32: "-target-abi" "o32"
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mabi=n32 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ABI-N32 %s
// MIPS-ABI-N32: "-target-cpu" "mips64r2"
// MIPS-ABI-N32: "-target-abi" "n32"
//
-// RUN: %clang -target mips64-linux-gnu -### -c %s \
+// RUN: %clang --target=mips64-linux-gnu -### -c %s \
// RUN: -mabi=64 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ABI-64 %s
// MIPS-ABI-64: "-target-cpu" "mips64r2"
// MIPS-ABI-64: "-target-abi" "n64"
//
-// RUN: %clang -target mips64-linux-gnu -### -c %s \
+// RUN: %clang --target=mips64-linux-gnu -### -c %s \
// RUN: -mabi=n64 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ABI-N64 %s
// MIPS-ABI-N64: "-target-cpu" "mips64r2"
// MIPS-ABI-N64: "-target-abi" "n64"
//
-// RUN: not %clang -target mips64-linux-gnu -c %s \
+// RUN: not %clang --target=mips64-linux-gnu -c %s \
// RUN: -mabi=o64 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ABI-O64 %s
// MIPS-ABI-O64: error: unknown target ABI 'o64'
//
-// RUN: not %clang -target mips-linux-gnu -c %s \
+// RUN: not %clang --target=mips-linux-gnu -c %s \
// RUN: -mabi=unknown 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ABI-UNKNOWN %s
// MIPS-ABI-UNKNOWN: error: unknown target ABI 'unknown'
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -march=mips1 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-1 %s
// MIPS-ARCH-1: "-target-cpu" "mips1"
// MIPS-ARCH-1: "-target-abi" "o32"
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -march=mips2 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-2 %s
// MIPS-ARCH-2: "-target-cpu" "mips2"
// MIPS-ARCH-2: "-target-abi" "o32"
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -march=mips3 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-3 %s
// MIPS-ARCH-3: "-target-cpu" "mips3"
// MIPS-ARCH-3: "-target-abi" "o32"
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -march=mips4 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-4 %s
// MIPS-ARCH-4: "-target-cpu" "mips4"
// MIPS-ARCH-4: "-target-abi" "o32"
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -march=mips5 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-5 %s
// MIPS-ARCH-5: "-target-cpu" "mips5"
// MIPS-ARCH-5: "-target-abi" "o32"
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -march=mips32 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-32 %s
// MIPS-ARCH-32: "-target-cpu" "mips32"
// MIPS-ARCH-32: "-target-abi" "o32"
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -march=mips32r2 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-32R2 %s
// MIPS-ARCH-32R2: "-target-cpu" "mips32r2"
// MIPS-ARCH-32R2: "-target-abi" "o32"
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -march=p5600 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-P5600 %s
// MIPS-ARCH-P5600: "-target-cpu" "p5600"
// MIPS-ARCH-P5600: "-target-abi" "o32"
//
-// RUN: not %clang -target mips-linux-gnu -c %s \
+// RUN: not %clang --target=mips-linux-gnu -c %s \
// RUN: -march=p5600 -mabi=64 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-P5600-N64 %s
// MIPS-ARCH-P5600-N64: error: ABI 'n64' is not supported on CPU 'p5600'
//
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -march=mips64 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-3264 %s
// MIPS-ARCH-3264: "-target-cpu" "mips64"
// MIPS-ARCH-3264: "-target-abi" "o32"
//
-// RUN: %clang -target mips64-linux-gnu -### -c %s \
+// RUN: %clang --target=mips64-linux-gnu -### -c %s \
// RUN: -march=mips64 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-64 %s
// MIPS-ARCH-64: "-target-cpu" "mips64"
// MIPS-ARCH-64: "-target-abi" "n64"
//
-// RUN: %clang -target mips64-linux-gnu -### -c %s \
+// RUN: %clang --target=mips64-linux-gnu -### -c %s \
// RUN: -march=mips64r2 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-64R2 %s
// MIPS-ARCH-64R2: "-target-cpu" "mips64r2"
// MIPS-ARCH-64R2: "-target-abi" "n64"
//
-// RUN: %clang -target mips64-linux-gnu -### -c %s \
+// RUN: %clang --target=mips64-linux-gnu -### -c %s \
// RUN: -march=octeon 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-OCTEON %s
// MIPS-ARCH-OCTEON: "-target-cpu" "octeon"
// MIPS-ARCH-OCTEON: "-target-abi" "n64"
//
-// RUN: %clang -target mips64-linux-gnu -### -c %s \
+// RUN: %clang --target=mips64-linux-gnu -### -c %s \
// RUN: -march=octeon+ 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-OCTEONP %s
// MIPS-ARCH-OCTEONP: "-target-cpu" "octeon+"
// MIPS-ARCH-OCTEONP: "-target-abi" "n64"
//
-// RUN: not %clang -target mips64-linux-gnu -c %s \
+// RUN: not %clang --target=mips64-linux-gnu -c %s \
// RUN: -march=mips32 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-6432 %s
// MIPS-ARCH-6432: error: ABI 'n64' is not supported on CPU 'mips32'
//
-// RUN: not %clang -target mips-linux-gnu -c %s \
+// RUN: not %clang --target=mips-linux-gnu -c %s \
// RUN: -march=unknown 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ARCH-UNKNOWN %s
// MIPS-ARCH-UNKNOWN: error: unknown target CPU 'unknown'
// Check adjusting of target triple accordingly to `-mabi` option.
-// RUN: %clang -target mips64-linux-gnuabi64 -mabi=32 -### %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnuabi64 -mabi=32 -### %s 2>&1 \
// RUN: | FileCheck -check-prefix=TARGET-O32 %s
// TARGET-O32: "-triple" "mips-unknown-linux-gnu"
// TARGET-O32: "-target-cpu" "mips32r2"
@@ -171,7 +171,7 @@
// TARGET-O32: ld{{(.exe)?}}"
// TARGET-O32: "-m" "elf32btsmip"
-// RUN: %clang -target mips-linux-gnu -mabi=n32 -### %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mabi=n32 -### %s 2>&1 \
// RUN: | FileCheck -check-prefix=TARGET-N32 %s
// TARGET-N32: "-triple" "mips64-unknown-linux-gnuabin32"
// TARGET-N32: "-target-cpu" "mips64r2"
@@ -179,7 +179,7 @@
// TARGET-N32: ld{{(.exe)?}}"
// TARGET-N32: "-m" "elf32btsmipn32"
-// RUN: %clang -target mips-linux-gnu -mabi=64 -### %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mabi=64 -### %s 2>&1 \
// RUN: | FileCheck -check-prefix=TARGET-N64 %s
// TARGET-N64: "-triple" "mips64-unknown-linux-gnuabi64"
// TARGET-N64: "-target-cpu" "mips64r2"
diff --git a/clang/test/Driver/mips-abicalls-error.c b/clang/test/Driver/mips-abicalls-error.c
index 03ef68b02de6..a576208a16ed 100644
--- a/clang/test/Driver/mips-abicalls-error.c
+++ b/clang/test/Driver/mips-abicalls-error.c
@@ -1,2 +1,2 @@
-// RUN: not %clang -c -target mips64-linux-gnu -fPIC -mno-abicalls %s 2>&1 | FileCheck %s
+// RUN: not %clang -c --target=mips64-linux-gnu -fPIC -mno-abicalls %s 2>&1 | FileCheck %s
// CHECK: error: position-independent code requires '-mabicalls'
diff --git a/clang/test/Driver/mips-abicalls-warning.c b/clang/test/Driver/mips-abicalls-warning.c
index 09f341eb9a33..f65d83112707 100644
--- a/clang/test/Driver/mips-abicalls-warning.c
+++ b/clang/test/Driver/mips-abicalls-warning.c
@@ -1,30 +1,30 @@
// REQUIRES: mips-registered-target
-// RUN: %clang -### -c -target mips64-mti-elf -fno-pic %s 2>&1 | FileCheck -check-prefix=CHECK-PIC1-IMPLICIT %s
+// RUN: %clang -### -c --target=mips64-mti-elf -fno-pic %s 2>&1 | FileCheck -check-prefix=CHECK-PIC1-IMPLICIT %s
// CHECK-PIC1-IMPLICIT: warning: ignoring '-fno-pic' option as it cannot be used with implicit usage of -mabicalls and the N64 ABI
-// RUN: %clang -### -c -target mips64-mti-elf -fno-pic -mabicalls %s 2>&1 | FileCheck -check-prefix=CHECK-PIC1-EXPLICIT %s
+// RUN: %clang -### -c --target=mips64-mti-elf -fno-pic -mabicalls %s 2>&1 | FileCheck -check-prefix=CHECK-PIC1-EXPLICIT %s
// CHECK-PIC1-EXPLICIT: warning: ignoring '-fno-pic' option as it cannot be used with -mabicalls and the N64 ABI
-// RUN: %clang -### -c -target mips64-mti-elf -fno-PIC %s 2>&1 | FileCheck -check-prefix=CHECK-PIC2-IMPLICIT %s
+// RUN: %clang -### -c --target=mips64-mti-elf -fno-PIC %s 2>&1 | FileCheck -check-prefix=CHECK-PIC2-IMPLICIT %s
// CHECK-PIC2-IMPLICIT: warning: ignoring '-fno-PIC' option as it cannot be used with implicit usage of -mabicalls and the N64 ABI
-// RUN: %clang -### -c -target mips64-mti-elf -fno-PIC -mabicalls %s 2>&1 | FileCheck -check-prefix=CHECK-PIC2-EXPLICIT %s
+// RUN: %clang -### -c --target=mips64-mti-elf -fno-PIC -mabicalls %s 2>&1 | FileCheck -check-prefix=CHECK-PIC2-EXPLICIT %s
// CHECK-PIC2-EXPLICIT: warning: ignoring '-fno-PIC' option as it cannot be used with -mabicalls and the N64 ABI
-// RUN: %clang -### -c -target mips64-mti-elf -fno-pie %s 2>&1 | FileCheck -check-prefix=CHECK-PIE1-IMPLICIT %s
+// RUN: %clang -### -c --target=mips64-mti-elf -fno-pie %s 2>&1 | FileCheck -check-prefix=CHECK-PIE1-IMPLICIT %s
// CHECK-PIE1-IMPLICIT: warning: ignoring '-fno-pie' option as it cannot be used with implicit usage of -mabicalls and the N64 ABI
-// RUN: %clang -### -c -target mips64-mti-elf -fno-pie -mabicalls %s 2>&1 | FileCheck -check-prefix=CHECK-PIE1-EXPLICIT %s
+// RUN: %clang -### -c --target=mips64-mti-elf -fno-pie -mabicalls %s 2>&1 | FileCheck -check-prefix=CHECK-PIE1-EXPLICIT %s
// CHECK-PIE1-EXPLICIT: warning: ignoring '-fno-pie' option as it cannot be used with -mabicalls and the N64 ABI
-// RUN: %clang -### -c -target mips64-mti-elf -fno-PIE %s 2>&1 | FileCheck -check-prefix=CHECK-PIE2-IMPLICIT %s
+// RUN: %clang -### -c --target=mips64-mti-elf -fno-PIE %s 2>&1 | FileCheck -check-prefix=CHECK-PIE2-IMPLICIT %s
// CHECK-PIE2-IMPLICIT: warning: ignoring '-fno-PIE' option as it cannot be used with implicit usage of -mabicalls and the N64 ABI
-// RUN: %clang -### -c -target mips64-mti-elf -fno-PIE -mabicalls %s 2>&1 | FileCheck -check-prefix=CHECK-PIE2-EXPLICIT %s
+// RUN: %clang -### -c --target=mips64-mti-elf -fno-PIE -mabicalls %s 2>&1 | FileCheck -check-prefix=CHECK-PIE2-EXPLICIT %s
// CHECK-PIE2-EXPLICIT: warning: ignoring '-fno-PIE' option as it cannot be used with -mabicalls and the N64 ABI
-// RUN: %clang -### -c -target mips-mti-elf -mlong-calls %s 2>&1 | FileCheck -check-prefix=LONGCALL-IMP %s
+// RUN: %clang -### -c --target=mips-mti-elf -mlong-calls %s 2>&1 | FileCheck -check-prefix=LONGCALL-IMP %s
// LONGCALL-IMP: warning: ignoring '-mlong-calls' option as it is not currently supported with the implicit usage of -mabicalls
-// RUN: %clang -### -c -target mips-mti-elf -mlong-calls -mabicalls %s 2>&1 | FileCheck -check-prefix=LONGCALL-EXP %s
+// RUN: %clang -### -c --target=mips-mti-elf -mlong-calls -mabicalls %s 2>&1 | FileCheck -check-prefix=LONGCALL-EXP %s
// LONGCALL-EXP: warning: ignoring '-mlong-calls' option as it is not currently supported with -mabicalls
diff --git a/clang/test/Driver/mips-as.c b/clang/test/Driver/mips-as.c
index 14fbb18c9350..fc366f529ffb 100644
--- a/clang/test/Driver/mips-as.c
+++ b/clang/test/Driver/mips-as.c
@@ -1,275 +1,275 @@
// Check passing options to the assembler for MIPS targets.
//
-// RUN: %clang -target mips-linux-gnu -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32R2-EB-AS %s
-// RUN: %clang -target mipsel-linux-gnu -### \
-// RUN: -no-integrated-as -fno-pic -c -EB %s 2>&1 \
+// RUN: %clang --target=mipsel-linux-gnu -### \
+// RUN: -fno-integrated-as -fno-pic -c -EB %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32R2-EB-AS %s
// MIPS32R2-EB-AS: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
// MIPS32R2-EB-AS-NOT: "{{[ A-Za-z\\\/]*}}as{{(.exe)?}}{{.*}}"-KPIC"
//
-// RUN: %clang -target mips-linux-gnu -### \
-// RUN: -no-integrated-as -fPIC -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### \
+// RUN: -fno-integrated-as -fPIC -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32R2-EB-PIC %s
// MIPS32R2-EB-PIC: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-call_nonpic" "-EB"
// MIPS32R2-EB-PIC: "-KPIC"
//
-// RUN: %clang -target mipsel-linux-gnu -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mipsel-linux-gnu -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32R2-DEF-EL-AS %s
// MIPS32R2-DEF-EL-AS: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EL"
//
-// RUN: %clang -target mips64-linux-gnu -### \
-// RUN: -no-integrated-as -fno-pic -mno-abicalls -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -### \
+// RUN: -fno-integrated-as -fno-pic -mno-abicalls -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R2-EB-AS %s
// MIPS64R2-EB-AS: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "64" "-mno-shared" "-EB"
//
-// RUN: %clang -target mips64-linux-gnu -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R2-EB-AS-PIC %s
// MIPS64R2-EB-AS-PIC: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64el-linux-gnu -### \
-// RUN: -no-integrated-as -fno-pic -c -fno-pic -mno-abicalls %s 2>&1 \
+// RUN: %clang --target=mips64el-linux-gnu -### \
+// RUN: -fno-integrated-as -fno-pic -c -fno-pic -mno-abicalls %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R2-DEF-EL-AS %s
// MIPS64R2-DEF-EL-AS: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "64" "-mno-shared" "-EL"
//
-// RUN: %clang -target mips64el-linux-gnu -### \
-// RUN: -no-integrated-as -c %s 2>&1 \
+// RUN: %clang --target=mips64el-linux-gnu -### \
+// RUN: -fno-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R2-DEF-EL-AS-PIC %s
// MIPS64R2-DEF-EL-AS-PIC: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "64" "-EL" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -mabi=n32 -### \
-// RUN: -no-integrated-as -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mabi=n32 -### \
+// RUN: -fno-integrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-N32-PIC %s
// MIPS-N32-PIC: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "n32" "-call_nonpic" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -mabi=n32 -### \
-// RUN: -no-integrated-as -fno-pic -c %s -fno-pic 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mabi=n32 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s -fno-pic 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-N32 %s
// MIPS-N32: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "n32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mipsel-linux-gnu -mabi=32 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mipsel-linux-gnu -mabi=32 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32R2-EL-AS %s
-// RUN: %clang -target mips-linux-gnu -mabi=32 -### \
-// RUN: -no-integrated-as -fno-pic -c %s -EL 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mabi=32 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s -EL 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS32R2-EL-AS %s
// MIPS32R2-EL-AS: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EL"
//
-// RUN: %clang -target mips64el-linux-gnu -mabi=64 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips64el-linux-gnu -mabi=64 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R2-EL-AS-PIC %s
// MIPS64R2-EL-AS-PIC: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "64" "-EL" "-KPIC"
//
-// RUN: %clang -target mips64el-linux-gnu -mabi=64 -### \
-// RUN: -no-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
+// RUN: %clang --target=mips64el-linux-gnu -mabi=64 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS64R2-EL-AS %s
// MIPS64R2-EL-AS: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "64" "-mno-shared" "-EL"
//
-// RUN: %clang -target mips-linux-gnu -march=mips32r2 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -march=mips32r2 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-32R2 %s
// MIPS-32R2: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -march=p5600 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -march=p5600 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-P5600 %s
// MIPS-P5600: as{{(.exe)?}}" "-march" "p5600" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips64-linux-gnu -march=octeon -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -march=octeon -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-OCTEON-PIC %s
// MIPS-OCTEON-PIC: as{{(.exe)?}}" "-march" "octeon" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -march=octeon -### \
-// RUN: -no-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -march=octeon -### \
+// RUN: -fno-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-OCTEON %s
// MIPS-OCTEON: as{{(.exe)?}}" "-march" "octeon" "-mabi" "64" "-mno-shared" "-EB"
//
-// RUN: %clang -target mips64-linux-gnu -march=octeon+ -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -march=octeon+ -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-OCTEONP-PIC %s
// MIPS-OCTEONP-PIC: as{{(.exe)?}}" "-march" "octeon+" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -march=octeon+ -### \
-// RUN: -no-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -march=octeon+ -### \
+// RUN: -fno-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-OCTEONP %s
// MIPS-OCTEONP: as{{(.exe)?}}" "-march" "octeon+" "-mabi" "64" "-mno-shared" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -mips1 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips1 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-1 %s
// MIPS-ALIAS-1: as{{(.exe)?}}" "-march" "mips1" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -mips2 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips2 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-2 %s
// MIPS-ALIAS-2: as{{(.exe)?}}" "-march" "mips2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -mips3 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips3 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-3 %s
// MIPS-ALIAS-3: as{{(.exe)?}}" "-march" "mips3" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -mips4 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips4 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-4 %s
// MIPS-ALIAS-4: as{{(.exe)?}}" "-march" "mips4" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -mips5 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips5 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-5 %s
// MIPS-ALIAS-5: as{{(.exe)?}}" "-march" "mips5" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -mips32 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips32 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-32 %s
// MIPS-ALIAS-32: as{{(.exe)?}}" "-march" "mips32" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -mips32r2 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips32r2 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-32R2 %s
// MIPS-ALIAS-32R2: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -mips32r3 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips32r3 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-32R3 %s
// MIPS-ALIAS-32R3: as{{(.exe)?}}" "-march" "mips32r3" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -mips32r5 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips32r5 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-32R5 %s
// MIPS-ALIAS-32R5: as{{(.exe)?}}" "-march" "mips32r5" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -mips32r6 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips32r6 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-32R6 %s
// MIPS-ALIAS-32R6: as{{(.exe)?}}" "-march" "mips32r6" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
//
-// RUN: %clang -target mips64-linux-gnu -mips64 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mips64 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64-PIC %s
// MIPS-ALIAS-64-PIC: as{{(.exe)?}}" "-march" "mips64" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -mips64 -### \
-// RUN: -no-integrated-as -fno-pic -c -fno-pic -mno-abicalls %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mips64 -### \
+// RUN: -fno-integrated-as -fno-pic -c -fno-pic -mno-abicalls %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64 %s
// MIPS-ALIAS-64: as{{(.exe)?}}" "-march" "mips64" "-mabi" "64" "-mno-shared" "-EB"
//
-// RUN: %clang -target mips64-linux-gnu -mips64r2 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mips64r2 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64R2-PIC %s
// MIPS-ALIAS-64R2-PIC: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -mips64r3 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mips64r3 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64R3-PIC %s
// MIPS-ALIAS-64R3-PIC: as{{(.exe)?}}" "-march" "mips64r3" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -mips64r3 -### \
-// RUN: -no-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mips64r3 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64R3 %s
// MIPS-ALIAS-64R3: as{{(.exe)?}}" "-march" "mips64r3" "-mabi" "64" "-mno-shared" "-EB"
//
-// RUN: %clang -target mips64-linux-gnu -mips64r5 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mips64r5 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64R5-PIC %s
// MIPS-ALIAS-64R5-PIC: as{{(.exe)?}}" "-march" "mips64r5" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -mips64r5 -### \
-// RUN: -no-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mips64r5 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64R5 %s
// MIPS-ALIAS-64R5: as{{(.exe)?}}" "-march" "mips64r5" "-mabi" "64" "-mno-shared" "-EB"
//
-// RUN: %clang -target mips64-linux-gnu -mips64r6 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mips64r6 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64R6-PIC %s
// MIPS-ALIAS-64R6-PIC: as{{(.exe)?}}" "-march" "mips64r6" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -mips64r6 -### \
-// RUN: -no-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -mips64r6 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s -fno-pic -mno-abicalls 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64R6 %s
// MIPS-ALIAS-64R6: as{{(.exe)?}}" "-march" "mips64r6" "-mabi" "64" "-mno-shared" "-EB"
//
-// RUN: %clang -target mips-linux-gnu -mno-mips16 -mips16 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mno-mips16 -mips16 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-16 %s
// MIPS-16: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB" "-mfpxx" "-mips16"
//
-// RUN: %clang -target mips-linux-gnu -mips16 -mno-mips16 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mips16 -mno-mips16 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-N16 %s
// MIPS-N16: as{{(.exe)?}}"
// MIPS-N16: -no-mips16
//
-// RUN: %clang -target mips-linux-gnu -mno-micromips -mmicromips -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mno-micromips -mmicromips -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-MICRO %s
// MIPS-MICRO: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB" "-mfpxx" "-mmicromips"
//
-// RUN: %clang -target mips-linux-gnu -mmicromips -mno-micromips -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mmicromips -mno-micromips -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-NMICRO %s
// MIPS-NMICRO: as{{(.exe)?}}"
// MIPS-NMICRO-NOT: {{[A-Za-z\\\/]*}}as{{(.exe)?}}{{.*}}"-mmicromips"
//
-// RUN: %clang -target mips-linux-gnu -mno-dsp -mdsp -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mno-dsp -mdsp -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-DSP %s
// MIPS-DSP: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB" "-mfpxx" "-mdsp"
//
-// RUN: %clang -target mips-linux-gnu -mdsp -mno-dsp -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mdsp -mno-dsp -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-NDSP %s
// MIPS-NDSP: as{{(.exe)?}}"
// MIPS-NDSP-NOT: "{{[ A-Za-z\\\/]*}}as{{(.exe)?}}{{.*}}"-mdsp"
//
-// RUN: %clang -target mips-linux-gnu -mno-dspr2 -mdspr2 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mno-dspr2 -mdspr2 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-DSPR2 %s
// MIPS-DSPR2: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB" "-mfpxx" "-mdspr2"
//
-// RUN: %clang -target mips-linux-gnu -mdspr2 -mno-dspr2 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mdspr2 -mno-dspr2 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-NDSPR2 %s
// MIPS-NDSPR2: as{{(.exe)?}}"
// MIPS-NDSPR2-NOT: "{{[ A-Za-z\\\/]*}}as{{(.exe)?}}{{.*}}"-mdspr2"
//
-// RUN: %clang -target mips-linux-gnu -mnan=legacy -mnan=2008 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mnan=legacy -mnan=2008 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-NAN2008 %s
// MIPS-NAN2008: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB" "-mnan=2008"
//
-// RUN: %clang -target mips-linux-gnu -mnan=2008 -mnan=legacy -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mnan=2008 -mnan=legacy -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-NAN-LEGACY %s
// MIPS-NAN-LEGACY: as{{(.exe)?}}"
// MIPS-NAN-LEGACY-NOT: "{{[ A-Za-z\\\/]*}}as{{(.exe)?}}{{.*}}"-mnan={{.*}}"
//
-// RUN: %clang -target mips-linux-gnu -mfp64 -mfpxx -mfp32 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mfp64 -mfpxx -mfp32 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-MFP32 %s
// MIPS-MFP32: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB" "-mfp32"
//
-// RUN: %clang -target mips-linux-gnu -mfp32 -mfp64 -mfpxx -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mfp32 -mfp64 -mfpxx -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-MFPXX %s
// MIPS-MFPXX: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB" "-mfpxx"
//
-// RUN: %clang -target mips-linux-gnu -mfpxx -mfp32 -mfp64 -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mfpxx -mfp32 -mfp64 -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-MFP64 %s
// MIPS-MFP64: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB" "-mfp64"
//
-// RUN: %clang -target mips-linux-gnu -mno-msa -mmsa -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mno-msa -mmsa -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-MSA %s
-// MIPS-MSA: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB" "-mfpxx" "-mmsa"
+// MIPS-MSA: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB" "-mmsa"
//
-// RUN: %clang -target mips-linux-gnu -mmsa -mno-msa -### \
-// RUN: -no-integrated-as -fno-pic -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -mmsa -mno-msa -### \
+// RUN: -fno-integrated-as -fno-pic -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MIPS-NMSA %s
// MIPS-NMSA: as{{(.exe)?}}"
// MIPS-NMSA-NOT: "{{[ A-Za-z\\\/]*}}as{{(.exe)?}}{{.*}}"-mmsa"
@@ -277,137 +277,137 @@
// We've already tested MIPS32r2 and MIPS64r2 thoroughly. Do minimal tests on
// the remaining CPU's since it was possible to pass on a -mabi with no value
// when the CPU name is absent from a StringSwitch in getMipsCPUAndABI()
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -fno-pic -c %s -mcpu=mips1 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -fno-pic -c %s -mcpu=mips1 \
// RUN: 2>&1 | FileCheck -check-prefix=MIPS1-EB-AS %s
// MIPS1-EB-AS: as{{(.exe)?}}" "-march" "mips1" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
// MIPS1-EB-AS-NOT: "{{[ A-Za-z\\\/]*}}as{{(.exe)?}}{{.*}}"-KPIC"
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -fno-pic -c %s -mcpu=mips2 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -fno-pic -c %s -mcpu=mips2 \
// RUN: 2>&1 | FileCheck -check-prefix=MIPS2-EB-AS %s
// MIPS2-EB-AS: as{{(.exe)?}}" "-march" "mips2" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
// MIPS2-EB-AS-NOT: "{{[ A-Za-z\\\/]*}}as{{(.exe)?}}{{.*}}"-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -### -no-integrated-as -fno-pic -c %s -mcpu=mips3 \
+// RUN: %clang --target=mips64-linux-gnu -### -fno-integrated-as -fno-pic -c %s -mcpu=mips3 \
// RUN: 2>&1 | FileCheck -check-prefix=MIPS3-EB-AS %s
// MIPS3-EB-AS: as{{(.exe)?}}" "-march" "mips3" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -### -no-integrated-as -fno-pic -c %s -mcpu=mips4 \
+// RUN: %clang --target=mips64-linux-gnu -### -fno-integrated-as -fno-pic -c %s -mcpu=mips4 \
// RUN: 2>&1 | FileCheck -check-prefix=MIPS4-EB-AS %s
// MIPS4-EB-AS: as{{(.exe)?}}" "-march" "mips4" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -### -no-integrated-as -fno-pic -c %s -mcpu=mips5 \
+// RUN: %clang --target=mips64-linux-gnu -### -fno-integrated-as -fno-pic -c %s -mcpu=mips5 \
// RUN: 2>&1 | FileCheck -check-prefix=MIPS5-EB-AS %s
// MIPS5-EB-AS: as{{(.exe)?}}" "-march" "mips5" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -fno-pic -c %s -mcpu=mips32 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -fno-pic -c %s -mcpu=mips32 \
// RUN: 2>&1 | FileCheck -check-prefix=MIPS32-EB-AS %s
// MIPS32-EB-AS: as{{(.exe)?}}" "-march" "mips32" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
// MIPS32-EB-AS-NOT: "{{[ A-Za-z\\\/]*}}as{{(.exe)?}}{{.*}}"-KPIC"
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -fno-pic -c %s -mcpu=mips32r6 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -fno-pic -c %s -mcpu=mips32r6 \
// RUN: 2>&1 | FileCheck -check-prefix=MIPS32R6-EB-AS %s
// MIPS32R6-EB-AS: as{{(.exe)?}}" "-march" "mips32r6" "-mabi" "32" "-mno-shared" "-call_nonpic" "-EB"
// MIPS32R6-EB-AS-NOT: "{{[ A-Za-z\\\/]*}}as{{(.exe)?}}{{.*}}"-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -### -no-integrated-as -fno-pic -c %s -mcpu=mips64 \
+// RUN: %clang --target=mips64-linux-gnu -### -fno-integrated-as -fno-pic -c %s -mcpu=mips64 \
// RUN: 2>&1 | FileCheck -check-prefix=MIPS64-EB-AS %s
// MIPS64-EB-AS: as{{(.exe)?}}" "-march" "mips64" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips64-linux-gnu -### -no-integrated-as -fno-pic -c %s -mcpu=mips64r6 \
+// RUN: %clang --target=mips64-linux-gnu -### -fno-integrated-as -fno-pic -c %s -mcpu=mips64r6 \
// RUN: 2>&1 | FileCheck -check-prefix=MIPS64R6-EB-AS %s
// MIPS64R6-EB-AS: as{{(.exe)?}}" "-march" "mips64r6" "-mabi" "64" "-EB" "-KPIC"
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -msoft-float -mhard-float -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -msoft-float -mhard-float -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=HARDFLOAT --implicit-check-not=-msoft-float %s
// HARDFLOAT: as{{(.exe)?}}"
// HARDFLOAT: -mhard-float
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -mhard-float -msoft-float -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -mhard-float -msoft-float -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=SOFTFLOAT --implicit-check-not=-mhard-float %s
// SOFTFLOAT: as{{(.exe)?}}"
// SOFTFLOAT: -msoft-float
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -mno-odd-spreg -modd-spreg -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -mno-odd-spreg -modd-spreg -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=ODDSPREG --implicit-check-not=-mno-odd-spreg %s
// ODDSPREG: as{{(.exe)?}}"
// ODDSPREG: -modd-spreg
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -modd-spreg -mno-odd-spreg -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -modd-spreg -mno-odd-spreg -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=NOODDSPREG --implicit-check-not=-modd-spreg %s
// NOODDSPREG: as{{(.exe)?}}"
// NOODDSPREG: -mno-odd-spreg
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -mdouble-float -msingle-float -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -mdouble-float -msingle-float -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=SINGLEFLOAT --implicit-check-not=-mdouble-float %s
// SINGLEFLOAT: as{{(.exe)?}}"
// SINGLEFLOAT: -msingle-float
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -msingle-float -mdouble-float -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -msingle-float -mdouble-float -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=DOUBLEFLOAT --implicit-check-not=-msingle-float %s
// DOUBLEFLOAT: as{{(.exe)?}}"
// DOUBLEFLOAT: -mdouble-float
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -msoft-float -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -msoft-float -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=SOFTFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// SOFTFLOAT-IMPLICIT-FPXX: as{{(.exe)?}}"
// SOFTFLOAT-IMPLICIT-FPXX: -msoft-float
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -msoft-float -mfpxx -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -msoft-float -mfpxx -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=SOFTFLOAT-EXPLICIT-FPXX %s
// SOFTFLOAT-EXPLICIT-FPXX: as{{(.exe)?}}"
// SOFTFLOAT-EXPLICIT-FPXX: -mfpxx
// SOFTFLOAT-EXPLICIT-FPXX: -msoft-float
//
-// RUN: %clang -target mips-mti-linux-gnu -### -no-integrated-as -msoft-float -c %s 2>&1 \
+// RUN: %clang --target=mips-mti-linux-gnu -### -fno-integrated-as -msoft-float -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MTI-SOFTFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// MTI-SOFTFLOAT-IMPLICIT-FPXX: as{{(.exe)?}}"
// MTI-SOFTFLOAT-IMPLICIT-FPXX: -msoft-float
//
-// RUN: %clang -target mips-mti-linux-gnu -### -no-integrated-as -msoft-float -mfpxx -c %s 2>&1 \
+// RUN: %clang --target=mips-mti-linux-gnu -### -fno-integrated-as -msoft-float -mfpxx -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MTI-SOFTFLOAT-EXPLICIT-FPXX %s
// MTI-SOFTFLOAT-EXPLICIT-FPXX: as{{(.exe)?}}"
// MTI-SOFTFLOAT-EXPLICIT-FPXX: -mfpxx
// MTI-SOFTFLOAT-EXPLICIT-FPXX: -msoft-float
//
-// RUN: %clang -target mips-img-linux-gnu -### -no-integrated-as -msoft-float -c %s 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -### -fno-integrated-as -msoft-float -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=IMG-SOFTFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// IMG-SOFTFLOAT-IMPLICIT-FPXX: as{{(.exe)?}}"
// IMG-SOFTFLOAT-IMPLICIT-FPXX: -msoft-float
//
-// RUN: %clang -target mips-img-linux-gnu -### -no-integrated-as -msoft-float -mfpxx -c %s 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -### -fno-integrated-as -msoft-float -mfpxx -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=IMG-SOFTFLOAT-EXPLICIT-FPXX %s
// IMG-SOFTFLOAT-EXPLICIT-FPXX: as{{(.exe)?}}"
// IMG-SOFTFLOAT-EXPLICIT-FPXX: -mfpxx
// IMG-SOFTFLOAT-EXPLICIT-FPXX: -msoft-float
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -msingle-float -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -msingle-float -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=SINGLEFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// SINGLEFLOAT-IMPLICIT-FPXX: as{{(.exe)?}}"
// SINGLEFLOAT-IMPLICIT-FPXX: -msingle-float
//
-// RUN: %clang -target mips-linux-gnu -### -no-integrated-as -msingle-float -mfpxx -c %s 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -fno-integrated-as -msingle-float -mfpxx -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=SINGLEFLOAT-EXPLICIT-FPXX %s
// SINGLEFLOAT-EXPLICIT-FPXX: as{{(.exe)?}}"
// SINGLEFLOAT-EXPLICIT-FPXX: -mfpxx
// SINGLEFLOAT-EXPLICIT-FPXX: -msingle-float
//
-// RUN: %clang -target mips-mti-linux-gnu -### -no-integrated-as -msingle-float -c %s 2>&1 \
+// RUN: %clang --target=mips-mti-linux-gnu -### -fno-integrated-as -msingle-float -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MTI-SINGLEFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// MTI-SINGLEFLOAT-IMPLICIT-FPXX: as{{(.exe)?}}"
// MTI-SINGLEFLOAT-IMPLICIT-FPXX: -msingle-float
//
-// RUN: %clang -target mips-mti-linux-gnu -### -no-integrated-as -msingle-float -mfpxx -c %s 2>&1 \
+// RUN: %clang --target=mips-mti-linux-gnu -### -fno-integrated-as -msingle-float -mfpxx -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=MTI-SINGLEFLOAT-EXPLICIT-FPXX %s
// MTI-SINGLEFLOAT-EXPLICIT-FPXX: as{{(.exe)?}}"
// MTI-SINGLEFLOAT-EXPLICIT-FPXX: -mfpxx
// MTI-SINGLEFLOAT-EXPLICIT-FPXX: -msingle-float
//
-// RUN: %clang -target mips-img-linux-gnu -### -no-integrated-as -msingle-float -c %s 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -### -fno-integrated-as -msingle-float -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=IMG-SINGLEFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// IMG-SINGLEFLOAT-IMPLICIT-FPXX: as{{(.exe)?}}"
// IMG-SINGLEFLOAT-IMPLICIT-FPXX: -msingle-float
//
-// RUN: %clang -target mips-img-linux-gnu -### -no-integrated-as -msingle-float -mfpxx -c %s 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -### -fno-integrated-as -msingle-float -mfpxx -c %s 2>&1 \
// RUN: | FileCheck -check-prefix=IMG-SINGLEFLOAT-EXPLICIT-FPXX %s
// IMG-SINGLEFLOAT-EXPLICIT-FPXX: as{{(.exe)?}}"
// IMG-SINGLEFLOAT-EXPLICIT-FPXX: -mfpxx
diff --git a/clang/test/Driver/mips-features.c b/clang/test/Driver/mips-features.c
index 5e92dccaa02a..ee370051d1eb 100644
--- a/clang/test/Driver/mips-features.c
+++ b/clang/test/Driver/mips-features.c
@@ -1,249 +1,255 @@
// Check handling MIPS specific features options.
//
// -mabicalls
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mabicalls 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mabicalls 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MABICALLS %s
// CHECK-MABICALLS: "-target-feature" "-noabicalls"
//
// -mno-abicalls
-// RUN: %clang -target mips-linux-gnu -### -c %s -mabicalls -mno-abicalls 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mabicalls -mno-abicalls 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MNOABICALLS %s
// CHECK-MNOABICALLS: "-target-feature" "+noabicalls"
//
// -mno-abicalls non-PIC N64
-// RUN: %clang -target mips64-linux-gnu -### -c -fno-PIC -mno-abicalls %s 2>&1 \
+// RUN: %clang --target=mips64-linux-gnu -### -c -fno-PIC -mno-abicalls %s 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MNOABICALLS-N64NPIC %s
// CHECK-MNOABICALLS-N64NPIC: "-target-feature" "+noabicalls"
//
// -mgpopt
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-gpopt -mgpopt -Wno-unsupported-gpopt 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-gpopt -mgpopt -Wno-unsupported-gpopt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MGPOPT-DEF-ABICALLS %s
// CHECK-MGPOPT-DEF-ABICALLS-NOT: "-mllvm" "-mgpopt"
//
// -mabicalls -mgpopt
-// RUN: %clang -target mips-linux-gnu -### -c %s -mabicalls -mno-gpopt -mgpopt -Wno-unsupported-gpopt 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mabicalls -mno-gpopt -mgpopt -Wno-unsupported-gpopt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MGPOPT-EXPLICIT-ABICALLS %s
// CHECK-MGPOPT-EXPLICIT-ABICALLS-NOT: "-mllvm" "-mgpopt"
//
// -mno-abicalls -mgpopt
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mno-gpopt -mgpopt 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mno-gpopt -mgpopt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MGPOPT %s
// CHECK-MGPOPT: "-mllvm" "-mgpopt"
//
// -mno-abicalls -mno-gpopt
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mgpopt -mno-gpopt 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mgpopt -mno-gpopt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MNOGPOPT %s
// CHECK-MNOGPOPT-NOT: "-mllvm" "-mgpopt"
//
// -mno-abicalls
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MGPOPTDEF %s
// CHECK-MGPOPTDEF: "-mllvm" "-mgpopt"
//
// -mgpopt -mno-abicalls -mlocal-sdata
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mno-gpopt -mgpopt -mno-local-sdata -mlocal-sdata 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mno-gpopt -mgpopt -mno-local-sdata -mlocal-sdata 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MLOCALSDATA %s
// CHECK-MLOCALSDATA: "-mllvm" "-mlocal-sdata=1"
//
// -mgpopt -mno-abicalls -mno-local-sdata
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mno-gpopt -mgpopt -mlocal-sdata -mno-local-sdata 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mno-gpopt -mgpopt -mlocal-sdata -mno-local-sdata 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MNOLOCALSDATA %s
// CHECK-MNOLOCALSDATA: "-mllvm" "-mlocal-sdata=0"
//
// -mgpopt -mno-abicalls
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mgpopt 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mgpopt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MLOCALSDATADEF %s
// CHECK-MLOCALSDATADEF-NOT: "-mllvm" "-mlocal-sdata"
//
// -mno-abicalls -mgpopt -mextern-sdata
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mgpopt -mno-extern-sdata -mextern-sdata 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mgpopt -mno-extern-sdata -mextern-sdata 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MEXTERNSDATA %s
// CHECK-MEXTERNSDATA: "-mllvm" "-mextern-sdata=1"
//
// -mno-abicalls -mgpopt -mno-extern-sdata
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mgpopt -mextern-sdata -mno-extern-sdata 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mgpopt -mextern-sdata -mno-extern-sdata 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MNOEXTERNSDATA %s
// CHECK-MNOEXTERNSDATA: "-mllvm" "-mextern-sdata=0"
//
// -mno-abicalls -mgpopt
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mgpopt 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mgpopt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MEXTERNSDATADEF %s
// CHECK-MEXTERNSDATADEF-NOT: "-mllvm" "-mextern-sdata"
//
// -mno-abicalls -mgpopt -membedded-data
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mgpopt -mno-embedded-data -membedded-data 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mgpopt -mno-embedded-data -membedded-data 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MEMBEDDEDDATA %s
// CHECK-MEMBEDDEDDATA: "-mllvm" "-membedded-data=1"
//
// -mno-abicalls -mgpopt -mno-embedded-data
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mgpopt -membedded-data -mno-embedded-data 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mgpopt -membedded-data -mno-embedded-data 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MNOEMBEDDEDDATA %s
// CHECK-MNOEMBEDDEDDATA: "-mllvm" "-membedded-data=0"
//
// -mno-abicalls -mgpopt
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-abicalls -mgpopt 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-abicalls -mgpopt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MEMBEDDEDDATADEF %s
// CHECK-MEMBEDDEDDATADEF-NOT: "-mllvm" "-membedded-data"
//
// MIPS64 + N64: -fno-pic -> -mno-abicalls -mgpopt
-// RUN: %clang -target mips64-mti-elf -mabi=64 -### -c %s -fno-pic -mno-abicalls 2>&1 \
+// RUN: %clang --target=mips64-mti-elf -mabi=64 -### -c %s -fno-pic -mno-abicalls 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-N64-GPOPT %s
// CHECK-N64-GPOPT: "-target-feature" "+noabicalls"
// CHECK-N64-GPOPT: "-mllvm" "-mgpopt"
//
// MIPS64 + N64: -fno-pic -mno-gpopt
-// RUN: %clang -target mips64-mti-elf -mabi=64 -### -c %s -fno-pic -mno-abicalls -mno-gpopt 2>&1 \
+// RUN: %clang --target=mips64-mti-elf -mabi=64 -### -c %s -fno-pic -mno-abicalls -mno-gpopt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-N64-MNO-GPOPT %s
// CHECK-N64-MNO-GPOPT: "-target-feature" "+noabicalls"
// CHECK-N64-MNO-GPOPT-NOT: "-mllvm" "-mgpopt"
//
// MIPS64 + N64: -mgpopt (-fpic is implicit)
-// RUN: %clang -target mips64-mti-linux-gnu -mabi=64 -### -c %s -mgpopt 2>&1 \
+// RUN: %clang --target=mips64-mti-linux-gnu -mabi=64 -### -c %s -mgpopt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-N64-PIC-GPOPT %s
// CHECK-N64-PIC-GPOPT-NOT: "-mllvm" "-mgpopt"
// CHECK-N64-PIC-GPOPT: ignoring '-mgpopt' option as it cannot be used with the implicit usage of -mabicalls
//
// -mips16
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mno-mips16 -mips16 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MIPS16 %s
// CHECK-MIPS16: "-target-feature" "+mips16"
//
// -mno-mips16
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mips16 -mno-mips16 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NOMIPS16 %s
// CHECK-NOMIPS16: "-target-feature" "-mips16"
//
// -mmicromips
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mno-micromips -mmicromips 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MICROMIPS %s
// CHECK-MICROMIPS: "-target-feature" "+micromips"
//
// -mno-micromips
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mmicromips -mno-micromips 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NOMICROMIPS %s
// CHECK-NOMICROMIPS: "-target-feature" "-micromips"
//
// -mdsp
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mno-dsp -mdsp 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MDSP %s
// CHECK-MDSP: "-target-feature" "+dsp"
//
// -mno-dsp
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mdsp -mno-dsp 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NOMDSP %s
// CHECK-NOMDSP: "-target-feature" "-dsp"
//
// -mdspr2
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mno-dspr2 -mdspr2 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MDSPR2 %s
// CHECK-MDSPR2: "-target-feature" "+dspr2"
//
// -mno-dspr2
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mdspr2 -mno-dspr2 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NOMDSPR2 %s
// CHECK-NOMDSPR2: "-target-feature" "-dspr2"
//
// -mmsa
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mno-msa -mmsa 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MMSA %s
// CHECK-MMSA: "-target-feature" "+msa"
//
// -mno-msa
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mmsa -mno-msa 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NOMMSA %s
// CHECK-NOMMSA: "-target-feature" "-msa"
//
+// -mmsa
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
+// RUN: -mmsa 2>&1 \
+// RUN: | FileCheck --check-prefix=CHECK-MMSA-MFP64 %s
+// CHECK-MMSA-MFP64: "-target-feature" "+msa" "-target-feature" "+fp64"
+//
// -mmt
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mno-mt -mmt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MMT %s
// CHECK-MMT: "-target-feature" "+mt"
//
// -mno-mt
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mmt -mno-mt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NOMMT %s
// CHECK-NOMMT: "-target-feature" "-mt"
//
// -modd-spreg
-// RUN: %clang -target mips-linux-gnu -### -c %s -mno-odd-spreg -modd-spreg 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mno-odd-spreg -modd-spreg 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MODDSPREG %s
// CHECK-MODDSPREG: "-target-feature" "-nooddspreg"
//
// -mno-odd-spreg
-// RUN: %clang -target mips-linux-gnu -### -c %s -modd-spreg -mno-odd-spreg 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -modd-spreg -mno-odd-spreg 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NOMODDSPREG %s
// CHECK-NOMODDSPREG: "-target-feature" "+nooddspreg"
//
// -mfpxx
-// RUN: %clang -target mips-linux-gnu -### -c %s -mfpxx 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mfpxx 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MFPXX %s
// CHECK-MFPXX: "-target-feature" "+fpxx"
// CHECK-MFPXX: "-target-feature" "+nooddspreg"
//
// -mfpxx -modd-spreg
-// RUN: %clang -target mips-linux-gnu -### -c %s -mfpxx -modd-spreg 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -mfpxx -modd-spreg 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MFPXX-ODDSPREG %s
// CHECK-MFPXX-ODDSPREG: "-target-feature" "+fpxx"
// CHECK-MFPXX-ODDSPREG: "-target-feature" "-nooddspreg"
//
// -mfp64
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mfp32 -mfp64 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MFP64 %s
// CHECK-MFP64: "-target-feature" "+fp64"
//
// -mfp32
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mfp64 -mfp32 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NOMFP64 %s
// CHECK-NOMFP64: "-target-feature" "-fp64"
//
// -mnan=2008
-// RUN: %clang -target mips-linux-gnu -march=mips32r3 -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -march=mips32r3 -### -c %s \
// RUN: -mnan=legacy -mnan=2008 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NAN2008 %s
// CHECK-NAN2008: "-target-feature" "+nan2008" "-target-feature" "+abs2008"
//
// -mnan=2008 -mabs=legacy
-// RUN: %clang -target mips-linux-gnu -march=mips32r3 -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -march=mips32r3 -### -c %s \
// RUN: -mabs=legacy -mnan=2008 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-ABSLEGACYNAN2008 %s
// CHECK-ABSLEGACYNAN2008: "-target-feature" "+nan2008" "-target-feature" "-abs2008"
//
// -mnan=legacy
-// RUN: %clang -target mips-linux-gnu -march=mips32r3 -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -march=mips32r3 -### -c %s \
// RUN: -mnan=2008 -mnan=legacy 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NANLEGACY %s
// CHECK-NANLEGACY: "-target-feature" "-nan2008"
//
// -mabs=2008 on pre R2
-// RUN: %clang -target mips-linux-gnu -march=mips32 -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -march=mips32 -### -c %s \
// RUN: -mabs=2008 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-ABSLEGACY %s
//
// -mabs=2008
-// RUN: %clang -target mips-linux-gnu -march=mips32r3 -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -march=mips32r3 -### -c %s \
// RUN: -mabs=2008 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-ABS2008 %s
//
// -mabs=legacy
-// RUN: %clang -target mips-linux-gnu -march=mips32r3 -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -march=mips32r3 -### -c %s \
// RUN: -mabs=legacy 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-ABSLEGACY %s
//
// -mabs=legacy on R6
-// RUN: %clang -target mips-linux-gnu -march=mips32r6 -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -march=mips32r6 -### -c %s \
// RUN: -mabs=legacy 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-ABS2008 %s
//
@@ -253,147 +259,147 @@
// CHECK-ABS2008-NOT: "-target-feature" "-abs2008"
//
// -mcompact-branches=never
-// RUN: %clang -target mips-linux-gnu -march=mips32r6 -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -march=mips32r6 -### -c %s \
// RUN: -mcompact-branches=never 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-CBNEVER %s
// CHECK-CBNEVER: "-mllvm" "-mips-compact-branches=never"
//
// -mcompact-branches=optimal
-// RUN: %clang -target mips-linux-gnu -march=mips32r6 -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -march=mips32r6 -### -c %s \
// RUN: -mcompact-branches=optimal 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-CBOPTIMAL %s
// CHECK-CBOPTIMAL: "-mllvm" "-mips-compact-branches=optimal"
//
// -mcompact-branches=always
-// RUN: %clang -target mips-linux-gnu -march=mips32r6 -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -march=mips32r6 -### -c %s \
// RUN: -mcompact-branches=always 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-CBALWAYS %s
// CHECK-CBALWAYS: "-mllvm" "-mips-compact-branches=always"
//
// -mxgot
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mno-xgot -mxgot 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-XGOT %s
// CHECK-XGOT: "-target-feature" "+xgot"
//
// -mno-xgot
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mxgot -mno-xgot 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NOXGOT %s
// CHECK-NOXGOT: "-target-feature" "-xgot"
//
// -mldc1-sdc1
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mno-ldc1-sdc1 -mldc1-sdc1 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-LDC1SDC1 %s
// CHECK-LDC1SDC1-NOT: "-mllvm" "-mno-ldc1-sdc1"
//
// -mno-ldc1-sdc1
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mldc1-sdc1 -mno-ldc1-sdc1 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NOLDC1SDC1 %s
// CHECK-NOLDC1SDC1: "-mllvm" "-mno-ldc1-sdc1"
//
// -mcheck-zero-division
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mno-check-zero-division -mcheck-zero-division 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-ZERODIV %s
// CHECK-ZERODIV-NOT: "-mllvm" "-mno-check-zero-division"
//
// -mno-check-zero-division
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -mcheck-zero-division -mno-check-zero-division 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NOZERODIV %s
// CHECK-NOZERODIV: "-mllvm" "-mno-check-zero-division"
//
// -G
-// RUN: %clang -target mips-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-linux-gnu -### -c %s \
// RUN: -G 16 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MIPS-G %s
// CHECK-MIPS-G: "-mllvm" "-mips-ssection-threshold=16"
//
// -msoft-float (unknown vendor)
-// RUN: %clang -target mips-linux-gnu -### -c %s -msoft-float 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -msoft-float 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-SOFTFLOAT %s
// CHECK-SOFTFLOAT: "-target-feature" "+soft-float"
// CHECK-SOFTFLOAT-NOT: "-target-feature" "+fpxx"
//
// -msoft-float -mfpxx (unknown vendor)
-// RUN: %clang -target mips-linux-gnu -### -c %s -msoft-float -mfpxx 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -msoft-float -mfpxx 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-SOFTFLOAT-FPXX %s
// CHECK-SOFTFLOAT-FPXX: "-target-feature" "+soft-float"
// CHECK-SOFTFLOAT-FPXX: "-target-feature" "+fpxx"
//
// -msoft-float (MTI)
-// RUN: %clang -target mips-mti-linux-gnu -### -c %s -msoft-float 2>&1 \
+// RUN: %clang --target=mips-mti-linux-gnu -### -c %s -msoft-float 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MTI-SOFTFLOAT %s
// CHECK-MTI-SOFTFLOAT: "-target-feature" "+soft-float"
// CHECK-MTI-SOFTFLOAT-NOT: "-target-feature" "+fpxx"
//
// -msoft-float -mfpxx (MTI)
-// RUN: %clang -target mips-mti-linux-gnu -### -c %s -msoft-float -mfpxx 2>&1 \
+// RUN: %clang --target=mips-mti-linux-gnu -### -c %s -msoft-float -mfpxx 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MTI-SOFTFLOAT-FPXX %s
// CHECK-MTI-SOFTFLOAT-FPXX: "-target-feature" "+soft-float"
// CHECK-MTI-SOFTFLOAT-FPXX: "-target-feature" "+fpxx"
//
// -msoft-float (IMG)
-// RUN: %clang -target mips-img-linux-gnu -### -c %s -msoft-float 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -### -c %s -msoft-float 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-IMG-SOFTFLOAT %s
// CHECK-IMG-SOFTFLOAT: "-target-feature" "+soft-float"
// CHECK-IMG-SOFTFLOAT-NOT: "-target-feature" "+fpxx"
//
// -msoft-float -mfpxx (IMG)
-// RUN: %clang -target mips-img-linux-gnu -### -c %s -msoft-float -mfpxx 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -### -c %s -msoft-float -mfpxx 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-IMG-SOFTFLOAT-FPXX %s
// CHECK-IMG-SOFTFLOAT-FPXX: "-target-feature" "+soft-float"
// CHECK-IMG-SOFTFLOAT-FPXX: "-target-feature" "+fpxx"
//
// -msingle-float (unknown vendor)
-// RUN: %clang -target mips-linux-gnu -### -c %s -msingle-float 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -msingle-float 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-SINGLEFLOAT %s
// CHECK-SINGLEFLOAT: "-target-feature" "+single-float"
// CHECK-SINGLEFLOAT-NOT: "-target-feature" "+fpxx"
//
// -msingle-float -mfpxx (unknown vendor)
-// RUN: %clang -target mips-linux-gnu -### -c %s -msingle-float -mfpxx 2>&1 \
+// RUN: %clang --target=mips-linux-gnu -### -c %s -msingle-float -mfpxx 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-SINGLEFLOAT-FPXX %s
// CHECK-SINGLEFLOAT-FPXX: "-target-feature" "+single-float"
// CHECK-SINGLEFLOAT-FPXX: "-target-feature" "+fpxx"
//
// -msingle-float (MTI)
-// RUN: %clang -target mips-mti-linux-gnu -### -c %s -msingle-float 2>&1 \
+// RUN: %clang --target=mips-mti-linux-gnu -### -c %s -msingle-float 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MTI-SINGLEFLOAT %s
// CHECK-MTI-SINGLEFLOAT: "-target-feature" "+single-float"
// CHECK-MTI-SINGLEFLOAT-NOT: "-target-feature" "+fpxx"
//
// -msingle-float -mfpxx (MTI)
-// RUN: %clang -target mips-mti-linux-gnu -### -c %s -msingle-float -mfpxx 2>&1 \
+// RUN: %clang --target=mips-mti-linux-gnu -### -c %s -msingle-float -mfpxx 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-MTI-SINGLEFLOAT-FPXX %s
// CHECK-MTI-SINGLEFLOAT-FPXX: "-target-feature" "+single-float"
// CHECK-MTI-SINGLEFLOAT-FPXX: "-target-feature" "+fpxx"
//
// -msingle-float (IMG)
-// RUN: %clang -target mips-img-linux-gnu -### -c %s -msingle-float 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -### -c %s -msingle-float 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-IMG-SINGLEFLOAT %s
// CHECK-IMG-SINGLEFLOAT: "-target-feature" "+single-float"
// CHECK-IMG-SINGLEFLOAT-NOT: "-target-feature" "+fpxx"
//
// -msingle-float -mfpxx (IMG)
-// RUN: %clang -target mips-img-linux-gnu -### -c %s -msingle-float -mfpxx 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -### -c %s -msingle-float -mfpxx 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-IMG-SINGLEFLOAT-FPXX %s
// CHECK-IMG-SINGLEFLOAT-FPXX: "-target-feature" "+single-float"
// CHECK-IMG-SINGLEFLOAT-FPXX: "-target-feature" "+fpxx"
// -mlong-call
-// RUN: %clang -target mips-img-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-img-linux-gnu -### -c %s \
// RUN: -mno-abicalls -mlong-calls 2>&1 \
// RUN: | FileCheck --check-prefix=LONG-CALLS-ON %s
-// RUN: %clang -target mips-img-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-img-linux-gnu -### -c %s \
// RUN: -mno-abicalls -mno-long-calls 2>&1 \
// RUN: | FileCheck --check-prefix=LONG-CALLS-OFF %s
-// RUN: %clang -target mips-img-linux-gnu -### -c %s 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -### -c %s 2>&1 \
// RUN: | FileCheck --check-prefix=LONG-CALLS-DEF %s
-// RUN: %clang -target mips-img-linux-gnu -### -c %s -mlong-calls 2>&1 \
+// RUN: %clang --target=mips-img-linux-gnu -### -c %s -mlong-calls 2>&1 \
// RUN: | FileCheck --check-prefix=LONG-CALLS-DEF %s
// LONG-CALLS-ON: "-target-feature" "+long-calls"
// LONG-CALLS-OFF: "-target-feature" "-long-calls"
@@ -410,81 +416,81 @@
// NO-BRANCH-LIKELY: argument unused during compilation: '-mno-branch-likely'
// -mindirect-jump=hazard
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -mindirect-jump=hazard 2>&1 \
// RUN: | FileCheck --check-prefix=INDIRECT-BH %s
// INDIRECT-BH: "-target-feature" "+use-indirect-jump-hazard"
//
// -mcrc
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -mno-crc -mcrc 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-CRC %s
// CHECK-CRC: "-target-feature" "+crc"
//
// -mno-crc
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -mcrc -mno-crc 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NO-CRC %s
// CHECK-NO-CRC: "-target-feature" "-crc"
//
// -mvirt
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -mno-virt -mvirt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-VIRT %s
// CHECK-VIRT: "-target-feature" "+virt"
//
// -mno-virt
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -mvirt -mno-virt 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NO-VIRT %s
// CHECK-NO-VIRT: "-target-feature" "-virt"
//
// -mginv
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -mno-ginv -mginv 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-GINV %s
// CHECK-GINV: "-target-feature" "+ginv"
//
// -mno-ginv
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -mginv -mno-ginv 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NO-GINV %s
// CHECK-NO-GINV: "-target-feature" "-ginv"
//
// -mrelax-pic-calls
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -mno-relax-pic-calls -mrelax-pic-calls 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-RELAX-PIC-CALLS %s
// CHECK-RELAX-PIC-CALLS-NOT: "-mllvm" "-mips-jalr-reloc=0"
//
// -mno-relax-pic-calls
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -mrelax-pic-calls -mno-relax-pic-calls 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NO-RELAX-PIC-CALLS %s
// CHECK-NO-RELAX-PIC-CALLS: "-mllvm" "-mips-jalr-reloc=0"
//
// -mno-unaligned-access
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -munaligned-access -mno-strict-align \
// RUN: -mno-unaligned-access 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-STRICT-ALIGN %s
// CHECK-STRICT-ALIGN: "-target-feature" "+strict-align"
//
// -munaligned-access
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -mno-unaligned-access -mstrict-align \
// RUN: -munaligned-access 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NO-STRICT-ALIGN %s
// CHECK-NO-STRICT-ALIGN: "-target-feature" "-strict-align"
//
// -mstrict-align
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -munaligned-access -mno-strict-align \
// RUN: -mstrict-align 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-STRICT-ALIGN %s
//
// -mno-strict-align
-// RUN: %clang -target mips-unknown-linux-gnu -### -c %s \
+// RUN: %clang --target=mips-unknown-linux-gnu -### -c %s \
// RUN: -mno-unaligned-access -mstrict-align \
// RUN: -mno-strict-align 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-NO-STRICT-ALIGN %s
diff --git a/clang/test/Driver/mips-float.c b/clang/test/Driver/mips-float.c
index 2f1b813a1532..e33400845b1d 100644
--- a/clang/test/Driver/mips-float.c
+++ b/clang/test/Driver/mips-float.c
@@ -3,13 +3,13 @@
//
// Default
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu \
+// RUN: --target=mips-linux-gnu \
// RUN: | FileCheck --check-prefix=CHECK-DEF %s
// CHECK-DEF: "-mfloat-abi" "hard"
//
// Default on FreeBSD
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-freebsd12 \
+// RUN: --target=mips-freebsd12 \
// RUN: | FileCheck --check-prefix=DEF-FREEBSD %s
// DEF-FREEBSD: "-target-feature" "+soft-float"
// DEF-FREEBSD: "-msoft-float"
@@ -17,13 +17,13 @@
//
// -mhard-float
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -mhard-float \
+// RUN: --target=mips-linux-gnu -mhard-float \
// RUN: | FileCheck --check-prefix=CHECK-HARD %s
// CHECK-HARD: "-mfloat-abi" "hard"
//
// -msoft-float
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -msoft-float \
+// RUN: --target=mips-linux-gnu -msoft-float \
// RUN: | FileCheck --check-prefix=CHECK-SOFT %s
// CHECK-SOFT: "-target-feature" "+soft-float"
// CHECK-SOFT: "-msoft-float"
@@ -31,13 +31,13 @@
//
// -mfloat-abi=hard
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -mfloat-abi=hard \
+// RUN: --target=mips-linux-gnu -mfloat-abi=hard \
// RUN: | FileCheck --check-prefix=CHECK-ABI-HARD %s
// CHECK-ABI-HARD: "-mfloat-abi" "hard"
//
// -mfloat-abi=soft
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -mfloat-abi=soft \
+// RUN: --target=mips-linux-gnu -mfloat-abi=soft \
// RUN: | FileCheck --check-prefix=CHECK-ABI-SOFT %s
// CHECK-ABI-SOFT: "-target-feature" "+soft-float"
// CHECK-ABI-SOFT: "-msoft-float"
@@ -45,42 +45,42 @@
//
// -mdouble-float
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -msingle-float -mdouble-float \
+// RUN: --target=mips-linux-gnu -msingle-float -mdouble-float \
// RUN: | FileCheck --check-prefix=CHECK-ABI-DOUBLE %s
// CHECK-ABI-DOUBLE: "-mfloat-abi" "hard"
// CHECK-ABI-DOUBLE-NOT: "+single-float"
//
// -msingle-float
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -mdouble-float -msingle-float \
+// RUN: --target=mips-linux-gnu -mdouble-float -msingle-float \
// RUN: | FileCheck --check-prefix=CHECK-ABI-SINGLE %s
// CHECK-ABI-SINGLE: "-target-feature" "+single-float"
// CHECK-ABI-SINGLE: "-mfloat-abi" "hard"
//
// -msoft-float -msingle-float
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -msoft-float -msingle-float \
+// RUN: --target=mips-linux-gnu -msoft-float -msingle-float \
// RUN: | FileCheck --check-prefix=CHECK-ABI-SOFT-SINGLE %s
// CHECK-ABI-SOFT-SINGLE: "-target-feature" "+single-float"
// CHECK-ABI-SOFT-SINGLE: "-mfloat-abi" "soft"
//
// Default -mips16
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -mips16 \
+// RUN: --target=mips-linux-gnu -mips16 \
// RUN: | FileCheck --check-prefix=CHECK-DEF-MIPS16 %s
// CHECK-DEF-MIPS16: "-target-feature" "+mips16"
// CHECK-DEF-MIPS16: "-mfloat-abi" "hard"
//
// -mhard-float -mips16
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -mhard-float -mips16 \
+// RUN: --target=mips-linux-gnu -mhard-float -mips16 \
// RUN: | FileCheck --check-prefix=CHECK-HARD-MIPS16 %s
// CHECK-HARD-MIPS16: "-target-feature" "+mips16"
// CHECK-HARD-MIPS16: "-mfloat-abi" "hard"
//
// -msoft-float -mips16
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -msoft-float -mips16 \
+// RUN: --target=mips-linux-gnu -msoft-float -mips16 \
// RUN: | FileCheck --check-prefix=CHECK-SOFT-MIPS16 %s
// CHECK-SOFT-MIPS16: "-target-feature" "+soft-float"
// CHECK-SOFT-MIPS16: "-target-feature" "+mips16"
@@ -89,14 +89,14 @@
//
// -mfloat-abi=hard -mips16
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -mfloat-abi=hard -mips16 \
+// RUN: --target=mips-linux-gnu -mfloat-abi=hard -mips16 \
// RUN: | FileCheck --check-prefix=CHECK-ABI-HARD-MIPS16 %s
// CHECK-ABI-HARD-MIPS16: "-target-feature" "+mips16"
// CHECK-ABI-HARD-MIPS16: "-mfloat-abi" "hard"
//
// -mfloat-abi=soft -mips16
// RUN: %clang -c %s -### -o %t.o 2>&1 \
-// RUN: -target mips-linux-gnu -mfloat-abi=soft -mips16 \
+// RUN: --target=mips-linux-gnu -mfloat-abi=soft -mips16 \
// RUN: | FileCheck --check-prefix=CHECK-ABI-SOFT-MIPS16 %s
// CHECK-ABI-SOFT-MIPS16: "-target-feature" "+soft-float"
// CHECK-ABI-SOFT-MIPS16: "-target-feature" "+mips16"
diff --git a/clang/test/Driver/mips-gpopt-warning.c b/clang/test/Driver/mips-gpopt-warning.c
index b6677413729f..2bd63b4d6518 100644
--- a/clang/test/Driver/mips-gpopt-warning.c
+++ b/clang/test/Driver/mips-gpopt-warning.c
@@ -1,6 +1,6 @@
// REQUIRES: mips-registered-target
-// RUN: %clang -### -c -target mips-mti-elf %s -mgpopt 2>&1 | FileCheck -check-prefix=IMPLICIT %s
+// RUN: %clang -### -c --target=mips-mti-elf %s -mgpopt 2>&1 | FileCheck -check-prefix=IMPLICIT %s
// IMPLICIT: warning: ignoring '-mgpopt' option as it cannot be used with the implicit usage of -mabicalls
-// RUN: %clang -### -c -target mips-mti-elf %s -mgpopt -mabicalls 2>&1 | FileCheck -check-prefix=EXPLICIT %s
+// RUN: %clang -### -c --target=mips-mti-elf %s -mgpopt -mabicalls 2>&1 | FileCheck -check-prefix=EXPLICIT %s
// EXPLICIT: warning: ignoring '-mgpopt' option as it cannot be used with -mabicalls
diff --git a/clang/test/Driver/mips-ias-Wa.s b/clang/test/Driver/mips-ias-Wa.s
index bc65872d99c7..88846af60690 100644
--- a/clang/test/Driver/mips-ias-Wa.s
+++ b/clang/test/Driver/mips-ias-Wa.s
@@ -1,136 +1,136 @@
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=TRAP-DEFAULT %s
// TRAP-DEFAULT: -cc1as
// TRAP-DEFAULT-NOT: "-target-feature" "-use-tcc-in-div"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,--trap 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,--trap 2>&1 | \
// RUN: FileCheck -check-prefix=TRAP-ON %s
// TRAP-ON: -cc1as
// TRAP-ON: "-target-feature" "+use-tcc-in-div"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,--break 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,--break 2>&1 | \
// RUN: FileCheck -check-prefix=TRAP-OFF %s
// TRAP-OFF: -cc1as
// TRAP-OFF: "-target-feature" "-use-tcc-in-div"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,--trap,--break 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,--trap,--break 2>&1 | \
// RUN: FileCheck -check-prefix=TRAP-BOTH-TRAP-FIRST %s
// TRAP-BOTH-TRAP-FIRST: -cc1as
// TRAP-BOTH-TRAP-FIRST: "-target-feature" "+use-tcc-in-div" "-target-feature" "-use-tcc-in-div"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,--break,--trap 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,--break,--trap 2>&1 | \
// RUN: FileCheck -check-prefix=TRAP-BOTH-BREAK-FIRST %s
// TRAP-BOTH-BREAK-FIRST: -cc1as
// TRAP-BOTH-BREAK-FIRST: "-target-feature" "-use-tcc-in-div" "-target-feature" "+use-tcc-in-div"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=MSOFT-FLOAT-DEFAULT %s
// MSOFT-FLOAT-DEFAULT: -cc1as
// MSOFT-FLOAT-DEFAULT-NOT: "-target-feature" "-soft-float"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-msoft-float 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-msoft-float 2>&1 | \
// RUN: FileCheck -check-prefix=MSOFT-FLOAT-ON %s
// MSOFT-FLOAT-ON: -cc1as
// MSOFT-FLOAT-ON: "-target-feature" "+soft-float"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mhard-float 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mhard-float 2>&1 | \
// RUN: FileCheck -check-prefix=MSOFT-FLOAT-OFF %s
// MSOFT-FLOAT-OFF: -cc1as
// MSOFT-FLOAT-OFF: "-target-feature" "-soft-float"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-msoft-float,-mhard-float 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-msoft-float,-mhard-float 2>&1 | \
// RUN: FileCheck -check-prefix=MSOFT-FLOAT-BOTH-MSOFT-FLOAT-FIRST %s
// MSOFT-FLOAT-BOTH-MSOFT-FLOAT-FIRST: -cc1as
// MSOFT-FLOAT-BOTH-MSOFT-FLOAT-FIRST: "-target-feature" "+soft-float" "-target-feature" "-soft-float"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mhard-float,-msoft-float 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mhard-float,-msoft-float 2>&1 | \
// RUN: FileCheck -check-prefix=MSOFT-FLOAT-BOTH-MHARD-FLOAT-FIRST %s
// MSOFT-FLOAT-BOTH-MHARD-FLOAT-FIRST: -cc1as
// MSOFT-FLOAT-BOTH-MHARD-FLOAT-FIRST: "-target-feature" "-soft-float" "-target-feature" "+soft-float"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips1 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips1 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS1 %s
// MIPS1: -cc1as
// MIPS1: "-target-feature" "+mips1"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips2 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips2 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS2 %s
// MIPS2: -cc1as
// MIPS2: "-target-feature" "+mips2"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips3 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips3 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS3 %s
// MIPS3: -cc1as
// MIPS3: "-target-feature" "+mips3"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips4 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips4 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS4 %s
// MIPS4: -cc1as
// MIPS4: "-target-feature" "+mips4"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips5 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips5 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS5 %s
// MIPS5: -cc1as
// MIPS5: "-target-feature" "+mips5"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips32 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips32 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS32 %s
// MIPS32: -cc1as
// MIPS32: "-target-feature" "+mips32"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips32r2 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips32r2 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS32R2 %s
// MIPS32R2: -cc1as
// MIPS32R2: "-target-feature" "+mips32r2"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips32r3 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips32r3 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS32R3 %s
// MIPS32R3: -cc1as
// MIPS32R3: "-target-feature" "+mips32r3"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips32r5 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips32r5 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS32R5 %s
// MIPS32R5: -cc1as
// MIPS32R5: "-target-feature" "+mips32r5"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips32r6 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips32r6 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS32R6 %s
// MIPS32R6: -cc1as
// MIPS32R6: "-target-feature" "+mips32r6"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS64 %s
// MIPS64: -cc1as
// MIPS64: "-target-feature" "+mips64"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64r2 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64r2 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS64R2 %s
// MIPS64R2: -cc1as
// MIPS64R2: "-target-feature" "+mips64r2"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64r3 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64r3 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS64R3 %s
// MIPS64R3: -cc1as
// MIPS64R3: "-target-feature" "+mips64r3"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64r5 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64r5 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS64R5 %s
// MIPS64R5: -cc1as
// MIPS64R5: "-target-feature" "+mips64r5"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64r6 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64r6 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS64R6 %s
// MIPS64R6: -cc1as
// MIPS64R6: "-target-feature" "+mips64r6"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64r2,-mips4 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64r2,-mips4 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS64R2-MIPS4 %s
// MIPS64R2-MIPS4: -cc1as
// MIPS64R2-MIPS4-NOT: "-target-feature" "+mips64r2"
// MIPS64R2-MIPS4: "-target-feature" "+mips4"
// MIPS64R2-MIPS4-NOT: "-target-feature" "+mips64r2"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64,-mips32,-mips32r2 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -Wa,-mips64,-mips32,-mips32r2 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS64-MIPS32-MIPS32R2 %s
// MIPS64-MIPS32-MIPS32R2: -cc1as
// MIPS64-MIPS32-MIPS32R2-NOT: "-target-feature" "+mips64"
diff --git a/clang/test/Driver/mips-integrated-as.s b/clang/test/Driver/mips-integrated-as.s
index e248ba7f77e9..1714596acca9 100644
--- a/clang/test/Driver/mips-integrated-as.s
+++ b/clang/test/Driver/mips-integrated-as.s
@@ -1,20 +1,20 @@
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=ABI-O32 %s
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mabi=32 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mabi=32 2>&1 | \
// RUN: FileCheck -check-prefix=ABI-O32 %s
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mabi=o32 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mabi=o32 2>&1 | \
// RUN: FileCheck -check-prefix=ABI-O32 %s
// ABI-O32: -cc1as
// ABI-O32: "-target-abi" "o32"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mabi=eabi 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mabi=eabi 2>&1 | \
// RUN: FileCheck -check-prefix=ABI-EABI32 %s
// ABI-EABI32: -cc1as
// ABI-EABI32: "-target-abi" "eabi"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mips64 -mabi=n32 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mips64 -mabi=n32 2>&1 | \
// RUN: FileCheck -check-prefix=ABI-N32 %s
-// RUN: %clang -target mips64-linux-gnu -### -fintegrated-as -c %s -mabi=n32 2>&1 | \
+// RUN: %clang --target=mips64-linux-gnu -### -fintegrated-as -c %s -mabi=n32 2>&1 | \
// RUN: FileCheck -check-prefix=ABI-N32 %s
// ABI-N32: -cc1as
// ABI-N32: "-target-abi" "n32"
@@ -22,284 +22,284 @@
// FIXME: We should also test '-target mips-linux-gnu -mips64' defaults to the
// default 64-bit ABI (N64 but GCC uses N32). It currently selects O32
// because of the triple.
-// RUN: %clang -target mips64-linux-gnu -### -fintegrated-as -c %s -mips64 2>&1 | \
+// RUN: %clang --target=mips64-linux-gnu -### -fintegrated-as -c %s -mips64 2>&1 | \
// RUN: FileCheck -check-prefix=ABI-N64 %s
//
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mips64 -mabi=64 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mips64 -mabi=64 2>&1 | \
// RUN: FileCheck -check-prefix=ABI-N64 %s
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mips64 -mabi=n64 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mips64 -mabi=n64 2>&1 | \
// RUN: FileCheck -check-prefix=ABI-N64 %s
-// RUN: %clang -target mips64-linux-gnu -### -fintegrated-as -c %s -mips64 -mabi=64 2>&1 | \
+// RUN: %clang --target=mips64-linux-gnu -### -fintegrated-as -c %s -mips64 -mabi=64 2>&1 | \
// RUN: FileCheck -check-prefix=ABI-N64 %s
-// RUN: %clang -target mips64-linux-gnu -### -fintegrated-as -c %s -mips64 -mabi=n64 2>&1 | \
+// RUN: %clang --target=mips64-linux-gnu -### -fintegrated-as -c %s -mips64 -mabi=n64 2>&1 | \
// RUN: FileCheck -check-prefix=ABI-N64 %s
// ABI-N64: -cc1as
// ABI-N64: "-target-abi" "n64"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -msoft-float 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -msoft-float 2>&1 | \
// RUN: FileCheck -check-prefix=SOFTFLOAT %s
// SOFTFLOAT: -cc1as
// SOFTFLOAT: "-target-feature" "+soft-float"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=HARDFLOAT %s
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mhard-float 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mhard-float 2>&1 | \
// RUN: FileCheck -check-prefix=HARDFLOAT %s
// HARDFLOAT: -cc1as
// HARDFLOAT-NOT: "-target-feature" "+soft-float"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=NAN-DEFAULT %s
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mips32r6 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mips32r6 2>&1 | \
// RUN: FileCheck -check-prefix=NAN-DEFAULT %s
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mips64r6 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mips64r6 2>&1 | \
// RUN: FileCheck -check-prefix=NAN-DEFAULT %s
// NAN-DEFAULT: -cc1as
// NAN-DEFAULT-NOT: "-target-feature" "{{[-+]}}nan2008"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mnan=legacy 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mnan=legacy 2>&1 | \
// RUN: FileCheck -check-prefix=NAN-LEGACY %s
// NAN-LEGACY: -cc1as
// NAN-LEGACY: "-target-feature" "-nan2008"
-// RUN: %clang -target mips-linux-gnu -march=mips32r6 -### -fintegrated-as -c %s -mnan=2008 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -march=mips32r6 -### -fintegrated-as -c %s -mnan=2008 2>&1 | \
// RUN: FileCheck -check-prefix=NAN-2008 %s
// NAN-2008: -cc1as
// NAN-2008: "-target-feature" "+nan2008"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=DEFAULT-FLOAT %s
// DEFAULT-FLOAT: -cc1as
// DEFAULT-FLOAT-NOT: "-target-feature" "{{[+-]}}single-float"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -msingle-float 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -msingle-float 2>&1 | \
// RUN: FileCheck -check-prefix=SINGLE-FLOAT %s
// SINGLE-FLOAT: -cc1as
// SINGLE-FLOAT: "-target-feature" "+single-float"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mdouble-float 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mdouble-float 2>&1 | \
// RUN: FileCheck -check-prefix=DOUBLE-FLOAT %s
// DOUBLE-FLOAT: -cc1as
// DOUBLE-FLOAT: "-target-feature" "-single-float"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS16-DEFAULT %s
// MIPS16-DEFAULT: -cc1as
// MIPS16-DEFAULT-NOT: "-target-feature" "{{[+-]}}mips16"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mips16 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mips16 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS16-ON %s
// MIPS16-ON: -cc1as
// MIPS16-ON: "-target-feature" "+mips16"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mno-mips16 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mno-mips16 2>&1 | \
// RUN: FileCheck -check-prefix=MIPS16-OFF %s
// MIPS16-OFF: -cc1as
// MIPS16-OFF: "-target-feature" "-mips16"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=MICROMIPS-DEFAULT %s
// MICROMIPS-DEFAULT: -cc1as
// MICROMIPS-DEFAULT-NOT: "-target-feature" "{{[+-]}}micromips"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mmicromips 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mmicromips 2>&1 | \
// RUN: FileCheck -check-prefix=MICROMIPS-ON %s
// MICROMIPS-ON: -cc1as
// MICROMIPS-ON: "-target-feature" "+micromips"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mno-micromips 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mno-micromips 2>&1 | \
// RUN: FileCheck -check-prefix=MICROMIPS-OFF %s
// MICROMIPS-OFF: -cc1as
// MICROMIPS-OFF: "-target-feature" "-micromips"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=DSP-DEFAULT %s
// DSP-DEFAULT: -cc1as
// DSP-DEFAULT-NOT: "-target-feature" "{{[+-]}}dsp"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mdsp 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mdsp 2>&1 | \
// RUN: FileCheck -check-prefix=DSP-ON %s
// DSP-ON: -cc1as
// DSP-ON: "-target-feature" "+dsp"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mno-dsp 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mno-dsp 2>&1 | \
// RUN: FileCheck -check-prefix=DSP-OFF %s
// DSP-OFF: -cc1as
// DSP-OFF: "-target-feature" "-dsp"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=DSPR2-DEFAULT %s
// DSPR2-DEFAULT: -cc1as
// DSPR2-DEFAULT-NOT: "-target-feature" "{{[+-]}}dspr2"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mdspr2 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mdspr2 2>&1 | \
// RUN: FileCheck -check-prefix=DSPR2-ON %s
// DSPR2-ON: -cc1as
// DSPR2-ON: "-target-feature" "+dspr2"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mno-dspr2 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mno-dspr2 2>&1 | \
// RUN: FileCheck -check-prefix=DSPR2-OFF %s
// DSPR2-OFF: -cc1as
// DSPR2-OFF: "-target-feature" "-dspr2"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=MSA-DEFAULT %s
// MSA-DEFAULT: -cc1as
// MSA-DEFAULT-NOT: "-target-feature" "{{[+-]}}msa"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mmsa 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mmsa 2>&1 | \
// RUN: FileCheck -check-prefix=MSA-ON %s
// MSA-ON: -cc1as
// MSA-ON: "-target-feature" "+msa"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mno-msa 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mno-msa 2>&1 | \
// RUN: FileCheck -check-prefix=MSA-OFF %s
// MSA-OFF: -cc1as
// MSA-OFF: "-target-feature" "-msa"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=FPXX-DEFAULT %s
// FPXX-DEFAULT: -cc1as
// FPXX-DEFAULT: "-target-feature" "+fpxx"
// FPXX-DEFAULT: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mfp32 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mfp32 2>&1 | \
// RUN: FileCheck -check-prefix=FP32 %s
// FP32: -cc1as
// FP32: "-target-feature" "-fp64"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mfpxx 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mfpxx 2>&1 | \
// RUN: FileCheck -check-prefix=FPXX %s
// FPXX: -cc1as
// FPXX: "-target-feature" "+fpxx"
// FPXX: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mfp64 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mfp64 2>&1 | \
// RUN: FileCheck -check-prefix=FP64 %s
// FP64: -cc1as
// FP64: "-target-feature" "+fp64"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=ODDSPREG-DEFAULT %s
// ODDSPREG-DEFAULT: -cc1as
// ODDSPREG-DEFAULT: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -modd-spreg 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -modd-spreg 2>&1 | \
// RUN: FileCheck -check-prefix=ODDSPREG-ON %s
// ODDSPREG-ON: -cc1as
// ODDSPREG-ON: "-target-feature" "-nooddspreg"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mno-odd-spreg 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mno-odd-spreg 2>&1 | \
// RUN: FileCheck -check-prefix=ODDSPREG-OFF %s
// ODDSPREG-OFF: -cc1as
// ODDSPREG-OFF: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mfpxx -modd-spreg 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mfpxx -modd-spreg 2>&1 | \
// RUN: FileCheck -check-prefix=FPXX-ODDSPREG %s
// FPXX-ODDSPREG: -cc1as
// FPXX-ODDSPREG: "-target-feature" "+fpxx"
// FPXX-ODDSPREG: "-target-feature" "-nooddspreg"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mabicalls 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mabicalls 2>&1 | \
// RUN: FileCheck -check-prefix=ABICALLS-ON %s
// ABICALLS-ON: -cc1as
// ABICALLS-ON: "-target-feature" "-noabicalls"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -c %s -mno-abicalls 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -c %s -mno-abicalls 2>&1 | \
// RUN: FileCheck -check-prefix=ABICALLS-OFF %s
// ABICALLS-OFF: -cc1as
// ABICALLS-OFF: "-target-feature" "+noabicalls"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -msoft-float -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -msoft-float -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=SOFTFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// SOFTFLOAT-IMPLICIT-FPXX: -cc1as
// SOFTFLOAT-IMPLICIT-FPXX: "-target-feature" "+soft-float"
// SOFTFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+fpxx"
// SOFTFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -msoft-float -mfpxx -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -msoft-float -mfpxx -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=SOFTFLOAT-EXPLICIT-FPXX %s
// SOFTFLOAT-EXPLICIT-FPXX: -cc1as
// SOFTFLOAT-EXPLICIT-FPXX: "-target-feature" "+soft-float"
// SOFTFLOAT-EXPLICIT-FPXX: "-target-feature" "+fpxx"
// SOFTFLOAT-EXPLICIT-FPXX: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-mti-linux-gnu -### -fintegrated-as -msoft-float -c %s 2>&1 | \
+// RUN: %clang --target=mips-mti-linux-gnu -### -fintegrated-as -msoft-float -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=MTI-SOFTFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// MTI-SOFTFLOAT-IMPLICIT-FPXX: -cc1as
// MTI-SOFTFLOAT-IMPLICIT-FPXX: "-target-feature" "+soft-float"
// MTI-SOFTFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+fpxx"
// MTI-SOFTFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-mti-linux-gnu -### -fintegrated-as -msoft-float -mfpxx -c %s 2>&1 | \
+// RUN: %clang --target=mips-mti-linux-gnu -### -fintegrated-as -msoft-float -mfpxx -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=MTI-SOFTFLOAT-EXPLICIT-FPXX %s
// MTI-SOFTFLOAT-EXPLICIT-FPXX: -cc1as
// MTI-SOFTFLOAT-EXPLICIT-FPXX: "-target-feature" "+soft-float"
// MTI-SOFTFLOAT-EXPLICIT-FPXX: "-target-feature" "+fpxx"
// MTI-SOFTFLOAT-EXPLICIT-FPXX: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-img-linux-gnu -### -fintegrated-as -msoft-float -c %s 2>&1 | \
+// RUN: %clang --target=mips-img-linux-gnu -### -fintegrated-as -msoft-float -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=IMG-SOFTFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// IMG-SOFTFLOAT-IMPLICIT-FPXX: -cc1as
// IMG-SOFTFLOAT-IMPLICIT-FPXX: "-target-feature" "+soft-float"
// IMG-SOFTFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+fpxx"
// IMG-SOFTFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-img-linux-gnu -### -fintegrated-as -msoft-float -mfpxx -c %s 2>&1 | \
+// RUN: %clang --target=mips-img-linux-gnu -### -fintegrated-as -msoft-float -mfpxx -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=IMG-SOFTFLOAT-EXPLICIT-FPXX %s
// IMG-SOFTFLOAT-EXPLICIT-FPXX: -cc1as
// IMG-SOFTFLOAT-EXPLICIT-FPXX: "-target-feature" "+soft-float"
// IMG-SOFTFLOAT-EXPLICIT-FPXX: "-target-feature" "+fpxx"
// IMG-SOFTFLOAT-EXPLICIT-FPXX: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -msingle-float -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -msingle-float -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=SINGLEFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// SINGLEFLOAT-IMPLICIT-FPXX: -cc1as
// SINGLEFLOAT-IMPLICIT-FPXX: "-target-feature" "+single-float"
// SINGLEFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+fpxx"
// SINGLEFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -msingle-float -mfpxx -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -msingle-float -mfpxx -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=SINGLEFLOAT-EXPLICIT-FPXX %s
// SINGLEFLOAT-EXPLICIT-FPXX: -cc1as
// SINGLEFLOAT-EXPLICIT-FPXX: "-target-feature" "+single-float"
// SINGLEFLOAT-EXPLICIT-FPXX: "-target-feature" "+fpxx"
// SINGLEFLOAT-EXPLICIT-FPXX: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-mti-linux-gnu -### -fintegrated-as -msingle-float -c %s 2>&1 | \
+// RUN: %clang --target=mips-mti-linux-gnu -### -fintegrated-as -msingle-float -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=MTI-SINGLEFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// MTI-SINGLEFLOAT-IMPLICIT-FPXX: -cc1as
// MTI-SINGLEFLOAT-IMPLICIT-FPXX: "-target-feature" "+single-float"
// MTI-SINGLEFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+fpxx"
// MTI-SINGLEFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-mti-linux-gnu -### -fintegrated-as -msingle-float -mfpxx -c %s 2>&1 | \
+// RUN: %clang --target=mips-mti-linux-gnu -### -fintegrated-as -msingle-float -mfpxx -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=MTI-SINGLEFLOAT-EXPLICIT-FPXX %s
// MTI-SINGLEFLOAT-EXPLICIT-FPXX: -cc1as
// MTI-SINGLEFLOAT-EXPLICIT-FPXX: "-target-feature" "+single-float"
// MTI-SINGLEFLOAT-EXPLICIT-FPXX: "-target-feature" "+fpxx"
// MTI-SINGLEFLOAT-EXPLICIT-FPXX: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-img-linux-gnu -### -fintegrated-as -msingle-float -c %s 2>&1 | \
+// RUN: %clang --target=mips-img-linux-gnu -### -fintegrated-as -msingle-float -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=IMG-SINGLEFLOAT-IMPLICIT-FPXX --implicit-check-not=-mfpxx %s
// IMG-SINGLEFLOAT-IMPLICIT-FPXX: -cc1as
// IMG-SINGLEFLOAT-IMPLICIT-FPXX: "-target-feature" "+single-float"
// IMG-SINGLEFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+fpxx"
// IMG-SINGLEFLOAT-IMPLICIT-FPXX-NOT: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-img-linux-gnu -### -fintegrated-as -msingle-float -mfpxx -c %s 2>&1 | \
+// RUN: %clang --target=mips-img-linux-gnu -### -fintegrated-as -msingle-float -mfpxx -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=IMG-SINGLEFLOAT-EXPLICIT-FPXX %s
// IMG-SINGLEFLOAT-EXPLICIT-FPXX: -cc1as
// IMG-SINGLEFLOAT-EXPLICIT-FPXX: "-target-feature" "+single-float"
// IMG-SINGLEFLOAT-EXPLICIT-FPXX: "-target-feature" "+fpxx"
// IMG-SINGLEFLOAT-EXPLICIT-FPXX: "-target-feature" "+nooddspreg"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -mxgot -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -mxgot -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=XGOT %s
// XGOT: -cc1as
// XGOT: "-target-feature" "+xgot"
-// RUN: %clang -target mips-linux-gnu -### -fintegrated-as -mno-xgot -c %s 2>&1 | \
+// RUN: %clang --target=mips-linux-gnu -### -fintegrated-as -mno-xgot -c %s 2>&1 | \
// RUN: FileCheck -check-prefix=NOXGOT %s
// NOXGOT: -cc1as
// NOXGOT: "-target-feature" "-xgot"
diff --git a/clang/test/Driver/mips-mabs-warning.c b/clang/test/Driver/mips-mabs-warning.c
index 93175be4ccb8..866393fbc485 100644
--- a/clang/test/Driver/mips-mabs-warning.c
+++ b/clang/test/Driver/mips-mabs-warning.c
@@ -1,6 +1,6 @@
// REQUIRES: mips-registered-target
-// RUN: %clang -c -target mips-unknown-gnu -mcpu=mips32 -mabs=2008 %s 2>&1 | FileCheck -check-prefix=NO2008 %s
+// RUN: %clang -c --target=mips-unknown-gnu -mcpu=mips32 -mabs=2008 %s 2>&1 | FileCheck -check-prefix=NO2008 %s
// NO2008: warning: ignoring '-mabs=2008' option because the 'mips32' architecture does not support it [-Wunsupported-abs]
-// RUN: %clang -c -target mips-unknown-gnu -mcpu=mips32r6 -mabs=legacy %s 2>&1 | FileCheck -check-prefix=NOLEGACY %s
+// RUN: %clang -c --target=mips-unknown-gnu -mcpu=mips32r6 -mabs=legacy %s 2>&1 | FileCheck -check-prefix=NOLEGACY %s
// NOLEGACY: warning: ignoring '-mabs=legacy' option because the 'mips32r6' architecture does not support it [-Wunsupported-abs]
diff --git a/clang/test/Driver/mlong-double-128.c b/clang/test/Driver/mlong-double-128.c
index d2f57c6a0157..a6eea7cb8d4f 100644
--- a/clang/test/Driver/mlong-double-128.c
+++ b/clang/test/Driver/mlong-double-128.c
@@ -1,15 +1,15 @@
-// RUN: %clang -target powerpc-linux-musl -c -### %s -mlong-double-128 2>&1 | FileCheck %s
-// RUN: %clang -target powerpc64-pc-freebsd12 -c -### %s -mlong-double-128 2>&1 | FileCheck %s
-// RUN: %clang -target powerpc64le-linux-musl -c -### %s -mlong-double-128 2>&1 | FileCheck %s
-// RUN: %clang -target i686-linux-gnu -c -### %s -mlong-double-128 2>&1 | FileCheck %s
+// RUN: %clang --target=powerpc-linux-musl -c -### %s -mlong-double-128 2>&1 | FileCheck %s
+// RUN: %clang --target=powerpc64-pc-freebsd12 -c -### %s -mlong-double-128 2>&1 | FileCheck %s
+// RUN: %clang --target=powerpc64le-linux-musl -c -### %s -mlong-double-128 2>&1 | FileCheck %s
+// RUN: %clang --target=i686-linux-gnu -c -### %s -mlong-double-128 2>&1 | FileCheck %s
-// RUN: %clang -target x86_64-linux-musl -c -### %s -mlong-double-128 -mlong-double-80 2>&1 | FileCheck --implicit-check-not=-mlong-double-128 /dev/null
-// RUN: %clang -target x86_64-linux-musl -c -### %s -mlong-double-80 -mlong-double-128 2>&1 | FileCheck %s
+// RUN: %clang --target=x86_64-linux-musl -c -### %s -mlong-double-128 -mlong-double-80 2>&1 | FileCheck --implicit-check-not=-mlong-double-128 /dev/null
+// RUN: %clang --target=x86_64-linux-musl -c -### %s -mlong-double-80 -mlong-double-128 2>&1 | FileCheck %s
// CHECK: "-mlong-double-128"
-// RUN: not %clang -target aarch64 -c -### %s -mlong-double-128 2>&1 | FileCheck --check-prefix=ERR %s
-// RUN: not %clang -target powerpc -c -### %s -mlong-double-80 2>&1 | FileCheck --check-prefix=ERR2 %s
+// RUN: not %clang --target=aarch64 -c -### %s -mlong-double-128 2>&1 | FileCheck --check-prefix=ERR %s
+// RUN: not %clang --target=powerpc -c -### %s -mlong-double-80 2>&1 | FileCheck --check-prefix=ERR2 %s
// ERR: error: unsupported option '-mlong-double-128' for target 'aarch64'
// ERR2: error: unsupported option '-mlong-double-80' for target 'powerpc'
diff --git a/clang/test/Driver/mlong-double-64.c b/clang/test/Driver/mlong-double-64.c
index f9a441ab926a..df09eccf8c6a 100644
--- a/clang/test/Driver/mlong-double-64.c
+++ b/clang/test/Driver/mlong-double-64.c
@@ -1,8 +1,8 @@
-// RUN: %clang -target powerpc-linux-musl -c -### %s -mlong-double-64 2>&1 | FileCheck %s
-// RUN: %clang -target powerpc64-pc-freebsd12 -c -### %s -mlong-double-64 2>&1 | FileCheck %s
-// RUN: %clang -target powerpc64le-linux-musl -c -### %s -mlong-double-64 2>&1 | FileCheck %s
-// RUN: %clang -target i686-linux-gnu -c -### %s -mlong-double-64 2>&1 | FileCheck %s
-// RUN: %clang -target x86_64-linux-musl -c -### %s -mlong-double-64 2>&1 | FileCheck %s
+// RUN: %clang --target=powerpc-linux-musl -c -### %s -mlong-double-64 2>&1 | FileCheck %s
+// RUN: %clang --target=powerpc64-pc-freebsd12 -c -### %s -mlong-double-64 2>&1 | FileCheck %s
+// RUN: %clang --target=powerpc64le-linux-musl -c -### %s -mlong-double-64 2>&1 | FileCheck %s
+// RUN: %clang --target=i686-linux-gnu -c -### %s -mlong-double-64 2>&1 | FileCheck %s
+// RUN: %clang --target=x86_64-linux-musl -c -### %s -mlong-double-64 2>&1 | FileCheck %s
// CHECK: "-mlong-double-64"
diff --git a/clang/test/Driver/module-output.cppm b/clang/test/Driver/module-output.cppm
index dea9cf998a54..bf7bfbf3cb57 100644
--- a/clang/test/Driver/module-output.cppm
+++ b/clang/test/Driver/module-output.cppm
@@ -22,8 +22,8 @@
//
// Tests that clang will reject the command line if it specifies -fmodule-output with
// multiple archs.
-// RUN: not %clang %t/Hello.cppm -fmodule-output -arch i386 -arch x86_64 -### -target \
-// RUN: x86_64-apple-darwin 2>&1 | FileCheck %t/Hello.cppm -check-prefix=MULTIPLE-ARCH
+// RUN: not %clang %t/Hello.cppm -fmodule-output -arch i386 -arch x86_64 -### \
+// RUN: --target=x86_64-apple-darwin 2>&1 | FileCheck %t/Hello.cppm -check-prefix=MULTIPLE-ARCH
// Tests that the .pcm file will be generated in the same path with the specified one
// in the comamnd line.
diff --git a/clang/test/Driver/ms-bitfields.c b/clang/test/Driver/ms-bitfields.c
index 031ed41e2aad..d5a3656b3d11 100644
--- a/clang/test/Driver/ms-bitfields.c
+++ b/clang/test/Driver/ms-bitfields.c
@@ -1,5 +1,5 @@
-// RUN: %clang -### -target x86_64-linux-gnu %s 2>&1 | FileCheck %s -check-prefix=NO-MSBITFIELDS
-// RUN: %clang -### -target x86_64-windows-gnu %s 2>&1 | FileCheck %s -check-prefix=MSBITFIELDS
+// RUN: %clang -### --target=x86_64-linux-gnu %s 2>&1 | FileCheck %s -check-prefix=NO-MSBITFIELDS
+// RUN: %clang -### --target=x86_64-windows-gnu %s 2>&1 | FileCheck %s -check-prefix=MSBITFIELDS
// RUN: %clang -### -mno-ms-bitfields -mms-bitfields %s 2>&1 | FileCheck %s -check-prefix=MSBITFIELDS
// RUN: %clang -### -mms-bitfields -mno-ms-bitfields %s 2>&1 | FileCheck %s -check-prefix=NO-MSBITFIELDS
diff --git a/clang/test/Driver/ms-define-stdc.c b/clang/test/Driver/ms-define-stdc.c
new file mode 100644
index 000000000000..d5e873d21a76
--- /dev/null
+++ b/clang/test/Driver/ms-define-stdc.c
@@ -0,0 +1,11 @@
+// Note: %s must be preceded by --, otherwise it may be interpreted as a
+// command-line option, e.g. on Mac where %s is commonly under /Users.
+//
+// Note: see also cl-zc.cpp
+
+// RUN: %clang_cl /TC /dev/null /E -Xclang -dM /Zc:__STDC__- 2>&1 | FileCheck %s --check-prefix=ZCSTDCIGNORED
+// ZCSTDCIGNORED-NOT: #define __STDC__ 1
+// ZCSTDCIGNORED: argument unused during compilation
+
+// RUN: not %clang -Xclang -fno-ms-define-stdc %s 2>&1 | FileCheck %s --check-prefix="NOARG"
+// NOARG: error: unknown argument: '-fno-ms-define-stdc'
diff --git a/clang/test/Driver/msan.c b/clang/test/Driver/msan.c
index 339840a7a960..7ee196cd969c 100644
--- a/clang/test/Driver/msan.c
+++ b/clang/test/Driver/msan.c
@@ -1,29 +1,29 @@
// REQUIRES: x86-registered-target
-// RUN: %clang -target mips64-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
-// RUN: %clang -target mips64el-unknown-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
-// RUN: %clang -target powerpc64-unknown-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
-// RUN: %clang -target powerpc64le-unknown-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
+// RUN: %clang --target=mips64-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
+// RUN: %clang --target=mips64el-unknown-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
+// RUN: %clang --target=powerpc64-unknown-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
+// RUN: %clang --target=powerpc64le-unknown-linux-gnu -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
// Verify that -fsanitize=memory and -fsanitize=kernel-memory invoke MSan/KMSAN instrumentation.
-// RUN: %clang -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
-// RUN: %clang -O1 -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s
-// RUN: %clang -O2 -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s
-// RUN: %clang -O3 -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s
-// RUN: %clang -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -flto=thin -o - | FileCheck %s --check-prefixes=CHECK0
-// RUN: %clang -O2 -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -flto=thin -o - | FileCheck %s
-// RUN: %clang -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -flto -o - | FileCheck %s --check-prefixes=CHECK0
-// RUN: %clang -O2 -target x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -flto -o - | FileCheck %s
+// RUN: %clang --target=x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
+// RUN: %clang -O1 --target=x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s
+// RUN: %clang -O2 --target=x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s
+// RUN: %clang -O3 --target=x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -o - | FileCheck %s
+// RUN: %clang --target=x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -flto=thin -o - | FileCheck %s --check-prefixes=CHECK0
+// RUN: %clang -O2 --target=x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -flto=thin -o - | FileCheck %s
+// RUN: %clang --target=x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -flto -o - | FileCheck %s --check-prefixes=CHECK0
+// RUN: %clang -O2 --target=x86_64-unknown-linux -fsanitize=memory %s -S -emit-llvm -flto -o - | FileCheck %s
-// RUN: %clang -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
-// RUN: %clang -O1 -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s
-// RUN: %clang -O2 -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s
-// RUN: %clang -O3 -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s
-// RUN: %clang -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -flto=thin -o - | FileCheck %s --check-prefixes=CHECK0
-// RUN: %clang -O2 -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -flto=thin -o - | FileCheck %s
-// RUN: %clang -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -flto -o - | FileCheck %s --check-prefixes=CHECK0
-// RUN: %clang -O2 -target x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -flto -o - | FileCheck %s
+// RUN: %clang --target=x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK0
+// RUN: %clang -O1 --target=x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s
+// RUN: %clang -O2 --target=x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s
+// RUN: %clang -O3 --target=x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -o - | FileCheck %s
+// RUN: %clang --target=x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -flto=thin -o - | FileCheck %s --check-prefixes=CHECK0
+// RUN: %clang -O2 --target=x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -flto=thin -o - | FileCheck %s
+// RUN: %clang --target=x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -flto -o - | FileCheck %s --check-prefixes=CHECK0
+// RUN: %clang -O2 --target=x86_64-unknown-linux -fsanitize=kernel-memory %s -S -emit-llvm -flto -o - | FileCheck %s
int foo(int *a) { return *a; }
// CHECK0: = alloca
diff --git a/clang/test/Driver/msc-version.c b/clang/test/Driver/msc-version.c
index ec87e4d41eb4..80dd5b0eafd4 100644
--- a/clang/test/Driver/msc-version.c
+++ b/clang/test/Driver/msc-version.c
@@ -2,25 +2,25 @@
// Verify -fms-compatibility-version parsing
//
-// RUN: %clang -target i686-windows -fms-compatibility -fms-compatibility-version=14 -dM -E - </dev/null -o - | FileCheck %s -check-prefix CHECK-MSC-VERSION-MAJOR
+// RUN: %clang --target=i686-windows -fms-compatibility -fms-compatibility-version=14 -dM -E - </dev/null -o - | FileCheck %s --check-prefix=CHECK-MSC-VERSION-MAJOR
// CHECK-MSC-VERSION-MAJOR: _MSC_BUILD 1
// CHECK-MSC-VERSION-MAJOR: _MSC_FULL_VER 140000000
// CHECK-MSC-VERSION-MAJOR: _MSC_VER 1400
-// RUN: %clang -target i686-windows -fms-compatibility -fms-compatibility-version=15.00 -dM -E - </dev/null -o - | FileCheck %s -check-prefix CHECK-MSC-VERSION-MAJOR-MINOR
+// RUN: %clang --target=i686-windows -fms-compatibility -fms-compatibility-version=15.00 -dM -E - </dev/null -o - | FileCheck %s --check-prefix=CHECK-MSC-VERSION-MAJOR-MINOR
// CHECK-MSC-VERSION-MAJOR-MINOR: _MSC_BUILD 1
// CHECK-MSC-VERSION-MAJOR-MINOR: _MSC_FULL_VER 150000000
// CHECK-MSC-VERSION-MAJOR-MINOR: _MSC_VER 1500
-// RUN: %clang -target i686-windows -fms-compatibility -fms-compatibility-version=15.00.20706 -dM -E - </dev/null -o - | FileCheck %s -check-prefix CHECK-MSC-VERSION-MAJOR-MINOR-BUILD
+// RUN: %clang --target=i686-windows -fms-compatibility -fms-compatibility-version=15.00.20706 -dM -E - </dev/null -o - | FileCheck %s --check-prefix=CHECK-MSC-VERSION-MAJOR-MINOR-BUILD
// CHECK-MSC-VERSION-MAJOR-MINOR-BUILD: _MSC_BUILD 1
// CHECK-MSC-VERSION-MAJOR-MINOR-BUILD: _MSC_FULL_VER 150020706
// CHECK-MSC-VERSION-MAJOR-MINOR-BUILD: _MSC_VER 1500
-// RUN: %clang -target i686-windows -fms-compatibility -fms-compatibility-version=15.00.20706.01 -dM -E - </dev/null -o - | FileCheck %s -check-prefix CHECK-MSC-VERSION-MAJOR-MINOR-BUILD-PATCH
+// RUN: %clang --target=i686-windows -fms-compatibility -fms-compatibility-version=15.00.20706.01 -dM -E - </dev/null -o - | FileCheck %s --check-prefix=CHECK-MSC-VERSION-MAJOR-MINOR-BUILD-PATCH
// CHECK-MSC-VERSION-MAJOR-MINOR-BUILD-PATCH: _MSC_BUILD 1
// CHECK-MSC-VERSION-MAJOR-MINOR-BUILD-PATCH: _MSC_FULL_VER 150020706
@@ -31,7 +31,7 @@
// Verify -fmsc-version and -fms-compatibility-version diagnostic
//
-// RUN: not %clang -target i686-windows -fms-compatibility -fmsc-version=1700 -fms-compatibility-version=17.00.50727.1 -E - </dev/null 2>&1 | FileCheck %s -check-prefix CHECK-BASIC-EXTENDED-DIAGNOSTIC
+// RUN: not %clang --target=i686-windows -fms-compatibility -fmsc-version=1700 -fms-compatibility-version=17.00.50727.1 -E - </dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-BASIC-EXTENDED-DIAGNOSTIC
// CHECK-BASIC-EXTENDED-DIAGNOSTIC: invalid argument '-fmsc-version={{.*}}' not allowed with '-fms-compatibility-version={{.*}}'
@@ -40,17 +40,17 @@
// Verify -fmsc-version to -fms-compatibility-version conversion
//
-// RUN: %clang -### -target i686-windows -fms-compatibility -fmsc-version=17 -E - </dev/null -o /dev/null 2>&1 | FileCheck %s -check-prefix CHECK-MSC-17
+// RUN: %clang -### --target=i686-windows -fms-compatibility -fmsc-version=17 -E - </dev/null -o /dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-MSC-17
// CHECK-MSC-17-NOT: "-fmsc-version=1700"
// CHECK-MSC-17: "-fms-compatibility-version=17"
-// RUN: %clang -### -target i686-windows -fms-compatibility -fmsc-version=1600 -E - </dev/null -o /dev/null 2>&1 | FileCheck %s -check-prefix CHECK-MSC-16
+// RUN: %clang -### --target=i686-windows -fms-compatibility -fmsc-version=1600 -E - </dev/null -o /dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-MSC-16
// CHECK-MSC-16-NOT: "-fmsc-version=1600"
// CHECK-MSC-16: "-fms-compatibility-version=16.0"
-// RUN: %clang -### -target i686-windows -fms-compatibility -fmsc-version=150020706 -E - </dev/null -o /dev/null 2>&1 | FileCheck %s -check-prefix CHECK-MSC-15
+// RUN: %clang -### --target=i686-windows -fms-compatibility -fmsc-version=150020706 -E - </dev/null -o /dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-MSC-15
// CHECK-MSC-15-NOT: "-fmsc-version=150020706"
// CHECK-MSC-15: "-fms-compatibility-version=15.0.20706"
@@ -59,7 +59,7 @@
// Verify default version with -fms-extensions
//
-// RUN: %clang -target i686-windows -fms-extensions -dM -E - </dev/null -o - | FileCheck %s -check-prefix CHECK-MS-EXTENSIONS
+// RUN: %clang --target=i686-windows -fms-extensions -dM -E - </dev/null -o - | FileCheck %s --check-prefix=CHECK-MS-EXTENSIONS
// CHECK-MS-EXTENSIONS: _MSC_BUILD 1
// CHECK-MS-EXTENSIONS: _MSC_FULL_VER {{.+}}
diff --git a/clang/test/Driver/msp430-hwmult.c b/clang/test/Driver/msp430-hwmult.c
index 5e6035d93775..3ba6bfcdce5f 100644
--- a/clang/test/Driver/msp430-hwmult.c
+++ b/clang/test/Driver/msp430-hwmult.c
@@ -1,42 +1,42 @@
// Test that different values of -mhwmult pick correct
// MSP430 hwmult target-feature(s).
-// RUN: %clang -### -target msp430 %s 2>&1 | FileCheck %s
-// RUN: %clang -### -target msp430 %s -mhwmult=auto 2>&1 | FileCheck %s
+// RUN: %clang -### --target=msp430 %s 2>&1 | FileCheck %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=auto 2>&1 | FileCheck %s
// CHECK-NOT: "-target-feature" "+hwmult16"
// CHECK-NOT: "-target-feature" "+hwmult32"
// CHECK-NOT: "-target-feature" "+hwmultf5"
-// RUN: %clang -### -target msp430 %s -mhwmult=none 2>&1 | FileCheck --check-prefix=CHECK-NONE %s
-// RUN: %clang -### -target msp430 %s -mhwmult=none -mmcu=msp430f147 2>&1 | FileCheck --check-prefix=CHECK-NONE %s
-// RUN: %clang -### -target msp430 %s -mhwmult=none -mmcu=msp430f4783 2>&1 | FileCheck --check-prefix=CHECK-NONE %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=none 2>&1 | FileCheck --check-prefix=CHECK-NONE %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=none -mmcu=msp430f147 2>&1 | FileCheck --check-prefix=CHECK-NONE %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=none -mmcu=msp430f4783 2>&1 | FileCheck --check-prefix=CHECK-NONE %s
// CHECK-NONE: "-target-feature" "-hwmult16"
// CHECK-NONE: "-target-feature" "-hwmult32"
// CHECK-NONE: "-target-feature" "-hwmultf5"
-// RUN: %clang -### -target msp430 %s -mhwmult=16bit 2>&1 | FileCheck --check-prefix=CHECK-16 %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=16bit 2>&1 | FileCheck --check-prefix=CHECK-16 %s
// CHECK-16: "-target-feature" "+hwmult16"
-// RUN: %clang -### -target msp430 %s -mhwmult=32bit 2>&1 | FileCheck --check-prefix=CHECK-32 %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=32bit 2>&1 | FileCheck --check-prefix=CHECK-32 %s
// CHECK-32: "-target-feature" "+hwmult32"
-// RUN: %clang -### -target msp430 %s -mhwmult=f5series 2>&1 | FileCheck --check-prefix=CHECK-F5 %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=f5series 2>&1 | FileCheck --check-prefix=CHECK-F5 %s
// CHECK-F5: "-target-feature" "+hwmultf5"
// RUN: not %clang -### --target=msp430 %s -mhwmult=rrr 2>&1 | FileCheck --check-prefix=INVL-ARG %s
// INVL-ARG: error: unsupported argument 'rrr' to option '-mhwmult='
-// RUN: %clang -### -target msp430 %s -mhwmult=auto 2>&1 | FileCheck --check-prefix=WRN-NODEV %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=auto 2>&1 | FileCheck --check-prefix=WRN-NODEV %s
// WRN-NODEV: warning: no MCU device specified, but '-mhwmult' is set to 'auto',
// assuming no hardware multiply; use '-mmcu' to specify a MSP430 device,
// or '-mhwmult' to set hardware multiply type explicitly.
-// RUN: %clang -### -target msp430 %s -mhwmult=16bit -mmcu=msp430c111 2>&1 | FileCheck --check-prefix=WRN-UNSUP %s
-// RUN: %clang -### -target msp430 %s -mhwmult=32bit -mmcu=msp430c111 2>&1 | FileCheck --check-prefix=WRN-UNSUP %s
-// RUN: %clang -### -target msp430 %s -mhwmult=f5series -mmcu=msp430c111 2>&1 | FileCheck --check-prefix=WRN-UNSUP %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=16bit -mmcu=msp430c111 2>&1 | FileCheck --check-prefix=WRN-UNSUP %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=32bit -mmcu=msp430c111 2>&1 | FileCheck --check-prefix=WRN-UNSUP %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=f5series -mmcu=msp430c111 2>&1 | FileCheck --check-prefix=WRN-UNSUP %s
// WRN-UNSUP: warning: the given MCU does not support hardware multiply, but '-mhwmult' is set to
-// RUN: %clang -### -target msp430 %s -mhwmult=16bit -mmcu=msp430f4783 2>&1 | FileCheck --check-prefix=WRN-MISMCH %s
-// RUN: %clang -### -target msp430 %s -mhwmult=32bit -mmcu=msp430f147 2>&1 | FileCheck --check-prefix=WRN-MISMCH %s
-// RUN: %clang -### -target msp430 %s -mhwmult=f5series -mmcu=msp430f4783 2>&1 | FileCheck --check-prefix=WRN-MISMCH %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=16bit -mmcu=msp430f4783 2>&1 | FileCheck --check-prefix=WRN-MISMCH %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=32bit -mmcu=msp430f147 2>&1 | FileCheck --check-prefix=WRN-MISMCH %s
+// RUN: %clang -### --target=msp430 %s -mhwmult=f5series -mmcu=msp430f4783 2>&1 | FileCheck --check-prefix=WRN-MISMCH %s
// WRN-MISMCH: warning: the given MCU supports {{.*}} hardware multiply, but '-mhwmult' is set to {{.*}}
diff --git a/clang/test/Driver/msvc-compiler-rt.c b/clang/test/Driver/msvc-compiler-rt.c
index 9651662aa703..33fa8c829579 100644
--- a/clang/test/Driver/msvc-compiler-rt.c
+++ b/clang/test/Driver/msvc-compiler-rt.c
@@ -1,6 +1,6 @@
-// RUN: %clang -target x86_64-pc-windows-msvc --rtlib=compiler-rt -### %s 2>&1 | FileCheck %s -check-prefix MSVC-COMPILER-RT
-// RUN: %clang -target x86_64-pc-windows-msvc --rtlib=compiler-rt --rtlib=platform -### %s 2>&1 | FileCheck %s -check-prefix MSVC-DEFAULT
-// RUN: not %clang %s -target x86_64-pc-windows-msvc --rtlib=libgcc 2>&1 | FileCheck %s -check-prefix CHECK-ERROR
+// RUN: %clang --target=x86_64-pc-windows-msvc --rtlib=compiler-rt -### %s 2>&1 | FileCheck %s --check-prefix=MSVC-COMPILER-RT
+// RUN: %clang --target=x86_64-pc-windows-msvc --rtlib=compiler-rt --rtlib=platform -### %s 2>&1 | FileCheck %s --check-prefix=MSVC-DEFAULT
+// RUN: not %clang %s --target=x86_64-pc-windows-msvc --rtlib=libgcc 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
// MSVC-COMPILER-RT: "{{.*}}clang_rt.builtins{{.*}}"
// MSVC-DEFAULT-NOT: "{{.*}}clang_rt.builtins{{.*}}"
diff --git a/clang/test/Driver/msvc-static-rtti.cpp b/clang/test/Driver/msvc-static-rtti.cpp
index 680c5f8518b2..c29c7f4c40ca 100644
--- a/clang/test/Driver/msvc-static-rtti.cpp
+++ b/clang/test/Driver/msvc-static-rtti.cpp
@@ -1,5 +1,5 @@
-// RUN: %clang -target x86_64-pc-windows-msvc -fno-rtti -### %s 2>&1 | FileCheck %s -check-prefix NO-RTTI
-// RUN: %clang -target x86_64-pc-windows-msvc -frtti -### %s 2>&1 | FileCheck %s -check-prefix RTTI
+// RUN: %clang --target=x86_64-pc-windows-msvc -fno-rtti -### %s 2>&1 | FileCheck %s --check-prefix=NO-RTTI
+// RUN: %clang --target=x86_64-pc-windows-msvc -frtti -### %s 2>&1 | FileCheck %s --check-prefix=RTTI
// RTTI-NOT: -D_HAS_STATIC_RTTI=0
// NO-RTTI: -D_HAS_STATIC_RTTI=0
diff --git a/clang/test/Driver/msvc-triple.c b/clang/test/Driver/msvc-triple.c
index 42bd02a158ea..c546c1a405d4 100644
--- a/clang/test/Driver/msvc-triple.c
+++ b/clang/test/Driver/msvc-triple.c
@@ -1,7 +1,7 @@
-// RUN: %clang -target i686-pc-windows-msvc19 -S -emit-llvm %s -o - | FileCheck %s --check-prefix=TARGET-19
-// RUN: %clang -target i686-pc-windows-msvc -S -emit-llvm %s -o - -fms-compatibility-version=19 | FileCheck %s --check-prefix=OVERRIDE-19
-// RUN: %clang -target i686-pc-windows-msvc-elf -S -emit-llvm %s -o - | FileCheck %s --check-prefix=ELF-DEFAULT
-// RUN: %clang -target i686-pc-windows-msvc -S -emit-llvm %s -o - | FileCheck %s --check-prefix=DEFAULT
+// RUN: %clang --target=i686-pc-windows-msvc19 -S -emit-llvm %s -o - | FileCheck %s --check-prefix=TARGET-19
+// RUN: %clang --target=i686-pc-windows-msvc -S -emit-llvm %s -o - -fms-compatibility-version=19 | FileCheck %s --check-prefix=OVERRIDE-19
+// RUN: %clang --target=i686-pc-windows-msvc-elf -S -emit-llvm %s -o - | FileCheck %s --check-prefix=ELF-DEFAULT
+// RUN: %clang --target=i686-pc-windows-msvc -S -emit-llvm %s -o - | FileCheck %s --check-prefix=DEFAULT
// TARGET-19: target triple = "i686-pc-windows-msvc19.0.0"
// OVERRIDE-19: target triple = "i686-pc-windows-msvc19.0.0"
diff --git a/clang/test/Driver/msvc_forward.c b/clang/test/Driver/msvc_forward.c
index 15f941ef95de..5e6bdca2491f 100644
--- a/clang/test/Driver/msvc_forward.c
+++ b/clang/test/Driver/msvc_forward.c
@@ -1,4 +1,4 @@
-// RUN: %clang -target i686-pc-win32 -loldnames -lkernel32.lib -luser32.lib -### %s 2>&1 | FileCheck %s
+// RUN: %clang --target=i686-pc-win32 -loldnames -lkernel32.lib -luser32.lib -### %s 2>&1 | FileCheck %s
// CHECK-NOT: "-loldnames.lib"
// CHECK-NOT: "-lkernel32.lib"
// CHECK-NOT: "-luser32.lib"
diff --git a/clang/test/Driver/objc-encode-cxx-class-template-spec.m b/clang/test/Driver/objc-encode-cxx-class-template-spec.m
index 23d114f1a4fa..174bbe0fe6b2 100644
--- a/clang/test/Driver/objc-encode-cxx-class-template-spec.m
+++ b/clang/test/Driver/objc-encode-cxx-class-template-spec.m
@@ -1,7 +1,7 @@
// RUN: %clang -target arm64-apple-ios11 -### %s -o - 2>&1 | FileCheck -check-prefix=DISABLE-ENC %s
// RUN: %clang -target arm64-apple-ios11 -fobjc-encode-cxx-class-template-spec -### %s -o - 2>&1 | FileCheck -check-prefix=ENABLE-ENC %s
-// RUN: %clang -target x86_64-linux-gnu -fobjc-runtime=gnustep -### %s -o - 2>&1 | FileCheck -check-prefix=ENABLE-ENC %s
-// RUN: %clang -target x86_64-linux-gnu -fobjc-runtime=gnustep -fno-objc-encode-cxx-class-template-spec -### %s -o - 2>&1 | FileCheck -check-prefix=DISABLE-ENC %s
+// RUN: %clang --target=x86_64-linux-gnu -fobjc-runtime=gnustep -### %s -o - 2>&1 | FileCheck -check-prefix=ENABLE-ENC %s
+// RUN: %clang --target=x86_64-linux-gnu -fobjc-runtime=gnustep -fno-objc-encode-cxx-class-template-spec -### %s -o - 2>&1 | FileCheck -check-prefix=DISABLE-ENC %s
// DISABLE-ENC-NOT: -fobjc-encode-cxx-class-template-spec
// ENABLE-ENC: -fobjc-encode-cxx-class-template-spec
diff --git a/clang/test/Driver/openbsd.cpp b/clang/test/Driver/openbsd.cpp
index 906b0d22242d..01aa09b75f27 100644
--- a/clang/test/Driver/openbsd.cpp
+++ b/clang/test/Driver/openbsd.cpp
@@ -1,22 +1,22 @@
// Check libraries used when linking C++
-// RUN: %clangxx %s -### -o %t.o -target amd64-pc-openbsd 2>&1 \
+// RUN: %clangxx %s -### -o %t.o --target=amd64-pc-openbsd 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-CXX %s
-// RUN: %clangxx %s -### -o %t.o -target i686-pc-openbsd 2>&1 \
+// RUN: %clangxx %s -### -o %t.o --target=i686-pc-openbsd 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-CXX %s
-// RUN: %clangxx %s -### -o %t.o -target aarch64-unknown-openbsd 2>&1 \
+// RUN: %clangxx %s -### -o %t.o --target=aarch64-unknown-openbsd 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-CXX %s
-// RUN: %clangxx %s -### -o %t.o -target arm-unknown-openbsd 2>&1 \
+// RUN: %clangxx %s -### -o %t.o --target=arm-unknown-openbsd 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-CXX %s
// CHECK-CXX: "-lc++" "-lc++abi" "-lpthread" "-lm"
// Check for profiling variants of libraries when linking C++
-// RUN: %clangxx %s -### -pg -o %t.o -target amd64-pc-openbsd 2>&1 \
+// RUN: %clangxx %s -### -pg -o %t.o --target=amd64-pc-openbsd 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-PG-CXX %s
-// RUN: %clangxx %s -### -pg -o %t.o -target i686-pc-openbsd 2>&1 \
+// RUN: %clangxx %s -### -pg -o %t.o --target=i686-pc-openbsd 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-PG-CXX %s
-// RUN: %clangxx %s -### -pg -o %t.o -target aarch64-unknown-openbsd 2>&1 \
+// RUN: %clangxx %s -### -pg -o %t.o --target=aarch64-unknown-openbsd 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-PG-CXX %s
-// RUN: %clangxx %s -### -pg -o %t.o -target arm-unknown-openbsd 2>&1 \
+// RUN: %clangxx %s -### -pg -o %t.o --target=arm-unknown-openbsd 2>&1 \
// RUN: | FileCheck --check-prefix=CHECK-PG-CXX %s
// CHECK-PG-CXX: "-lc++_p" "-lc++abi_p" "-lpthread_p" "-lm_p"
diff --git a/clang/test/Driver/opencl.cl b/clang/test/Driver/opencl.cl
index b9f52e07f3b1..aba37fc328fb 100644
--- a/clang/test/Driver/opencl.cl
+++ b/clang/test/Driver/opencl.cl
@@ -21,7 +21,7 @@
// RUN: %clang -S -### -fno-offload-uniform-block -cl-uniform-work-group-size %s 2>&1 | FileCheck --check-prefix=CHECK-UNIFORM-WG %s
// RUN: not %clang -cl-std=c99 -DOPENCL %s 2>&1 | FileCheck --check-prefix=CHECK-C99 %s
// RUN: not %clang -cl-std=invalid -DOPENCL %s 2>&1 | FileCheck --check-prefix=CHECK-INVALID %s
-// RUN: %clang -S -### -target spir-unknown-unknown %s 2>&1 | FileCheck --check-prefix=CHECK-W-SPIR-COMPAT %s
+// RUN: %clang -S -### --target=spir %s 2>&1 | FileCheck --check-prefix=CHECK-W-SPIR-COMPAT %s
// RUN: %clang -S -### --target=amdgcn-amd-amdhsa-opencl -nogpuinc -nogpulib %s 2>&1 | FileCheck --check-prefix=CHECK-NO-W-SPIR-COMPAT %s
// RUN: %clang -S -### -cl-ext="+test_ext" %s 2>&1 | FileCheck --check-prefix=CHECK-EXT %s
diff --git a/clang/test/Driver/openmp-offload-infer.c b/clang/test/Driver/openmp-offload-infer.c
index 50333293eb7d..388860abc01a 100644
--- a/clang/test/Driver/openmp-offload-infer.c
+++ b/clang/test/Driver/openmp-offload-infer.c
@@ -43,7 +43,7 @@
// RUN: --offload-arch=sm_70 --offload-arch=gfx908 --offload-arch=skylake \
// RUN: -nogpulib %s 2>&1 | FileCheck %s --check-prefix=CHECK-FAILED
-// CHECK-FAILED: error: failed to deduce triple for target architecture 'skylake'; specify the triple using '-fopenmp-targets' and '-Xopenmp-target' instead.
+// CHECK-FAILED: error: failed to deduce triple for target architecture 'skylake'; specify the triple using '-fopenmp-targets' and '-Xopenmp-target' instead
// RUN: %clang -### --target=x86_64-unknown-linux-gnu -ccc-print-bindings -fopenmp=libomp \
// RUN: --offload-arch=sm_70 --offload-arch=gfx908 -fno-openmp \
diff --git a/clang/test/Driver/openmp-system-arch.c b/clang/test/Driver/openmp-system-arch.c
index 4e024e6b11d1..a48c1e76fa75 100644
--- a/clang/test/Driver/openmp-system-arch.c
+++ b/clang/test/Driver/openmp-system-arch.c
@@ -31,7 +31,7 @@
// RUN: not %clang -### --target=x86_64-unknown-linux-gnu -nogpulib -fopenmp=libomp --offload-arch= \
// RUN: --nvptx-arch-tool=%t/nvptx_arch_empty --amdgpu-arch-tool=%t/amdgpu_arch_empty %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=NO-OUTPUT-ERROR
-// NO-OUTPUT-ERROR: error: failed to deduce triple for target architecture 'native'; specify the triple using '-fopenmp-targets' and '-Xopenmp-target' instead.
+// NO-OUTPUT-ERROR: error: failed to deduce triple for target architecture 'native'; specify the triple using '-fopenmp-targets' and '-Xopenmp-target' instead
// case when amdgpu-arch succeeds.
// RUN: %clang -### --target=x86_64-unknown-linux-gnu -nogpulib -fopenmp=libomp --offload-arch=native \
diff --git a/clang/test/Driver/ps4-ps5-visibility-dllstorageclass.c b/clang/test/Driver/ps4-ps5-visibility-dllstorageclass.c
index 430827805a8f..71f8661679eb 100644
--- a/clang/test/Driver/ps4-ps5-visibility-dllstorageclass.c
+++ b/clang/test/Driver/ps4-ps5-visibility-dllstorageclass.c
@@ -1,16 +1,19 @@
// Check behaviour of -fvisibility-from-dllstorageclass options for PS4/PS5.
// DEFINE: %{triple} =
+// DEFINE: %{prefix} =
// DEFINE: %{run} = \
// DEFINE: %clang -### -target %{triple} %s -Werror -o - 2>&1 | \
-// DEFINE: FileCheck %s --check-prefix=DEFAULTS \
+// DEFINE: FileCheck %s --check-prefixes=DEFAULTS,%{prefix} \
// DEFINE: --implicit-check-not=-fvisibility-from-dllstorageclass \
// DEFINE: --implicit-check-not=-fvisibility-dllexport \
// DEFINE: --implicit-check-not=-fvisibility-nodllstorageclass \
// DEFINE: --implicit-check-not=-fvisibility-externs-dllimport \
// DEFINE: --implicit-check-not=-fvisibility-externs-nodllstorageclass
+// REDEFINE: %{prefix} = DEFAULTS-PS4
// REDEFINE: %{triple} = x86_64-scei-ps4
// RUN: %{run}
+// REDEFINE: %{prefix} = DEFAULTS-PS5
// REDEFINE: %{triple} = x86_64-sie-ps5
// RUN: %{run}
//
@@ -20,25 +23,29 @@
// REDEFINE: -fvisibility-from-dllstorageclass \
// REDEFINE: -Werror \
// REDEFINE: %s -o - 2>&1 | \
-// REDEFINE: FileCheck %s --check-prefix=DEFAULTS \
+// REDEFINE: FileCheck %s --check-prefixes=DEFAULTS,%{prefix} \
// REDEFINE: --implicit-check-not=-fvisibility-from-dllstorageclass \
// REDEFINE: --implicit-check-not=-fvisibility-dllexport \
// REDEFINE: --implicit-check-not=-fvisibility-nodllstorageclass \
// REDEFINE: --implicit-check-not=-fvisibility-externs-dllimport \
// REDEFINE: --implicit-check-not=-fvisibility-externs-nodllstorageclass
+// REDEFINE: %{prefix} = DEFAULTS-PS4
// REDEFINE: %{triple} = x86_64-scei-ps4
// RUN: %{run}
+// REDEFINE: %{prefix} = DEFAULTS-PS5
// REDEFINE: %{triple} = x86_64-sie-ps5
// RUN: %{run}
// DEFAULTS: "-fvisibility-from-dllstorageclass"
// DEFAULTS-SAME: "-fvisibility-dllexport=protected"
-// DEFAULTS-SAME: "-fvisibility-nodllstorageclass=hidden"
+// DEFAULTS-PS4-SAME: "-fvisibility-nodllstorageclass=hidden"
+// DEFAULTS-PS5-SAME: "-fvisibility-nodllstorageclass=keep"
// DEFAULTS-SAME: "-fvisibility-externs-dllimport=default"
-// DEFAULTS-SAME: "-fvisibility-externs-nodllstorageclass=default"
+// DEFAULTS-PS4-SAME: "-fvisibility-externs-nodllstorageclass=default"
+// DEFAULTS-PS5-SAME: "-fvisibility-externs-nodllstorageclass=keep"
// REDEFINE: %{run} = \
-// REDEFINE: %clang -### -target x86_64-scei-ps4 \
+// REDEFINE: %clang -### -target %{triple} \
// REDEFINE: -fvisibility-from-dllstorageclass \
// REDEFINE: -fvisibility-dllexport=hidden \
// REDEFINE: -fvisibility-nodllstorageclass=protected \
@@ -64,37 +71,41 @@
// UNUSED-NEXT: warning: argument unused during compilation: '-fvisibility-externs-nodllstorageclass=protected'
// REDEFINE: %{run} = \
-// REDEFINE: %clang -### -target x86_64-scei-ps4 \
+// REDEFINE: %clang -### -target %{triple} \
// REDEFINE: -fvisibility-nodllstorageclass=protected \
// REDEFINE: -fvisibility-externs-dllimport=hidden \
// REDEFINE: -Werror \
// REDEFINE: %s -o - 2>&1 | \
-// REDEFINE: FileCheck %s -check-prefix=SOME \
+// REDEFINE: FileCheck %s -check-prefixes=SOME,%{prefix} \
// REDEFINE: --implicit-check-not=-fvisibility-from-dllstorageclass \
// REDEFINE: --implicit-check-not=-fvisibility-dllexport \
// REDEFINE: --implicit-check-not=-fvisibility-nodllstorageclass \
// REDEFINE: --implicit-check-not=-fvisibility-externs-dllimport \
// REDEFINE: --implicit-check-not=-fvisibility-externs-nodllstorageclass
+// REDEFINE: %{prefix} = SOME-PS4
// REDEFINE: %{triple} = x86_64-scei-ps4
// RUN: %{run}
+// REDEFINE: %{prefix} = SOME-PS5
// REDEFINE: %{triple} = x86_64-sie-ps5
// RUN: %{run}
// REDEFINE: %{run} = \
-// REDEFINE: %clang -### -target x86_64-scei-ps4 \
+// REDEFINE: %clang -### -target %{triple} \
// REDEFINE: -fvisibility-from-dllstorageclass \
// REDEFINE: -fvisibility-nodllstorageclass=protected \
// REDEFINE: -fvisibility-externs-dllimport=hidden \
// REDEFINE: -Werror \
// REDEFINE: %s -o - 2>&1 | \
-// REDEFINE: FileCheck %s -check-prefix=SOME \
+// REDEFINE: FileCheck %s -check-prefixes=SOME,%{prefix} \
// REDEFINE: --implicit-check-not=-fvisibility-from-dllstorageclass \
// REDEFINE: --implicit-check-not=-fvisibility-dllexport \
// REDEFINE: --implicit-check-not=-fvisibility-nodllstorageclass \
// REDEFINE: --implicit-check-not=-fvisibility-externs-dllimport \
// REDEFINE: --implicit-check-not=-fvisibility-externs-nodllstorageclass
+// REDEFINE: %{prefix} = SOME-PS4
// REDEFINE: %{triple} = x86_64-scei-ps4
// RUN: %{run}
+// REDEFINE: %{prefix} = SOME-PS5
// REDEFINE: %{triple} = x86_64-sie-ps5
// RUN: %{run}
@@ -102,10 +113,11 @@
// SOME-SAME: "-fvisibility-dllexport=protected"
// SOME-SAME: "-fvisibility-nodllstorageclass=protected"
// SOME-SAME: "-fvisibility-externs-dllimport=hidden"
-// SOME-SAME: "-fvisibility-externs-nodllstorageclass=default"
+// SOME-PS4-SAME: "-fvisibility-externs-nodllstorageclass=default"
+// SOME-PS5-SAME: "-fvisibility-externs-nodllstorageclass=keep"
// REDEFINE: %{run} = \
-// REDEFINE: %clang -### -target x86_64-scei-ps4 \
+// REDEFINE: %clang -### -target %{triple} \
// REDEFINE: -fvisibility-dllexport=default \
// REDEFINE: -fvisibility-dllexport=hidden \
// REDEFINE: -fvisibility-nodllstorageclass=default \
@@ -121,14 +133,15 @@
// REDEFINE: --implicit-check-not=-fvisibility-dllexport \
// REDEFINE: --implicit-check-not=-fvisibility-nodllstorageclass \
// REDEFINE: --implicit-check-not=-fvisibility-externs-dllimport \
-// REDEFINE: --implicit-check-not=-fvisibility-externs-nodllstorageclass
+// REDEFINE: --implicit-check-not=-fvisibility-externs-nodllstorageclass \
+// REDEFINE: --implicit-check-not="warning: argument unused"
// REDEFINE: %{triple} = x86_64-scei-ps4
// RUN: %{run}
// REDEFINE: %{triple} = x86_64-sie-ps5
// RUN: %{run}
// REDEFINE: %{run} = \
-// REDEFINE: %clang -### -target x86_64-scei-ps4 \
+// REDEFINE: %clang -### -target %{triple} \
// REDEFINE: -fvisibility-from-dllstorageclass \
// REDEFINE: -fvisibility-dllexport=default \
// REDEFINE: -fvisibility-dllexport=hidden \
diff --git a/clang/test/Driver/ps4-visibility.cl b/clang/test/Driver/ps4-visibility.cl
new file mode 100644
index 000000000000..a0ed7c71f1f0
--- /dev/null
+++ b/clang/test/Driver/ps4-visibility.cl
@@ -0,0 +1,32 @@
+/// Check PS4 specific interactions between visibility options.
+/// Detailed testing of -fvisibility-from-dllstorageclass is covered elsewhere.
+
+/// Check defaults.
+// RUN: %clang -### -target x86_64-scei-ps4 -x cl -c -emit-llvm %s 2>&1 | \
+// RUN: FileCheck -check-prefix=DEFAULT %s --implicit-check-not=fvisibility --implicit-check-not=ftype-visibility --implicit-check-not=dllstorageclass
+// DEFAULT-DAG: "-fvisibility-from-dllstorageclass"
+// DEFAULT-DAG: "-fvisibility-dllexport=protected"
+// DEFAULT-DAG: "-fvisibility-nodllstorageclass=hidden"
+// DEFAULT-DAG: "-fvisibility-externs-dllimport=default"
+// DEFAULT-DAG: "-fvisibility-externs-nodllstorageclass=default"
+
+/// Check that -fvisibility-from-dllstorageclass is added in the presence of -fvisibility=.
+// RUN: %clang -### -target x86_64-scei-ps4 -x cl -c -emit-llvm -fvisibility=default %s 2>&1 | \
+// RUN: FileCheck -check-prefixes=DEFAULT,VISEQUALS %s --implicit-check-not=fvisibility --implicit-check-not=ftype-visibility --implicit-check-not=dllstorageclass
+// VISEQUALS-DAG: "-fvisibility=default"
+
+/// Check that -fvisibility-from-dllstorageclass is added in the presence of -fvisibility-ms-compat.
+// RUN: %clang -### -target x86_64-scei-ps4 -x cl -c -emit-llvm -fvisibility-ms-compat %s 2>&1 | \
+// RUN: FileCheck -check-prefixes=DEFAULT,MSCOMPT %s --implicit-check-not=fvisibility --implicit-check-not=ftype-visibility --implicit-check-not=dllstorageclass
+// MSCOMPT-DAG: "-fvisibility=hidden"
+// MSCOMPT-DAG: "-ftype-visibility=default"
+
+/// -fvisibility-from-dllstorageclass added explicitly.
+// RUN: %clang -### -target x86_64-scei-ps4 -x cl -c -emit-llvm -fvisibility-from-dllstorageclass %s 2>&1 | \
+// RUN: FileCheck -check-prefixes=DEFAULT %s --implicit-check-not=fvisibility --implicit-check-not=ftype-visibility --implicit-check-not=dllstorageclass
+
+/// -fvisibility-from-dllstorageclass disabled explicitly.
+// RUN: %clang -### -target x86_64-scei-ps4 -x cl -c -emit-llvm -fno-visibility-from-dllstorageclass %s 2>&1 | \
+// RUN: FileCheck -check-prefixes=NOVISFROM %s --implicit-check-not=fvisibility --implicit-check-not=ftype-visibility --implicit-check-not=dllstorageclass
+// NOVISFROM-NOT: "-fvisibility-from-dllstorageclass"
+
diff --git a/clang/test/Driver/ps5-visibility.cl b/clang/test/Driver/ps5-visibility.cl
new file mode 100644
index 000000000000..ad144057be63
--- /dev/null
+++ b/clang/test/Driver/ps5-visibility.cl
@@ -0,0 +1,33 @@
+/// Check PS5 specific interactions between visibility options.
+/// Detailed testing of -fvisibility-from-dllstorageclass is covered elsewhere.
+
+/// Check defaults.
+// RUN: %clang -### -target x86_64-sie-ps5 -x cl -c -emit-llvm %s 2>&1 | \
+// RUN: FileCheck -check-prefixes=VDEFAULT,VGND_DEFAULT,DEFAULT %s --implicit-check-not=fvisibility --implicit-check-not=ftype-visibility --implicit-check-not=dllstorageclass
+// VDEFAULT-DAG: "-fvisibility=hidden"
+// VGND_DEFAULT-DAG: "-fvisibility-global-new-delete=source"
+// DEFAULT-DAG: "-fvisibility-from-dllstorageclass"
+// DEFAULT-DAG: "-fvisibility-dllexport=protected"
+// DEFAULT-DAG: "-fvisibility-nodllstorageclass=keep"
+// DEFAULT-DAG: "-fvisibility-externs-dllimport=default"
+// DEFAULT-DAG: "-fvisibility-externs-nodllstorageclass=keep"
+
+/// -fvisibility= specified explicitly.
+// RUN: %clang -### -target x86_64-sie-ps5 -x cl -c -emit-llvm -fvisibility=protected %s 2>&1 | \
+// RUN: FileCheck -check-prefixes=VPROTECTED,VGND_DEFAULT,DEFAULT %s --implicit-check-not=fvisibility --implicit-check-not=ftype-visibility --implicit-check-not=dllstorageclass
+// VPROTECTED-DAG: "-fvisibility=protected"
+
+/// -fvisibility-ms-compat added explicitly.
+// RUN: %clang -### -target x86_64-sie-ps5 -x cl -c -emit-llvm -fvisibility-ms-compat %s 2>&1 | \
+// RUN: FileCheck -check-prefixes=MSCOMPT,VGND_DEFAULT,DEFAULT %s --implicit-check-not=fvisibility --implicit-check-not=ftype-visibility --implicit-check-not=dllstorageclass
+// MSCOMPT-DAG: "-fvisibility=hidden"
+// MSCOMPT-DAG: "-ftype-visibility=default"
+
+/// -fvisibility-from-dllstorageclass added explicitly.
+// RUN: %clang -### -target x86_64-sie-ps5 -x cl -c -emit-llvm -fvisibility-from-dllstorageclass %s 2>&1 | \
+// RUN: FileCheck -check-prefixes=VDEFAULT,VGND_DEFAULT,DEFAULT %s --implicit-check-not=fvisibility --implicit-check-not=ftype-visibility --implicit-check-not=dllstorageclass
+
+/// -fvisibility-from-dllstorageclass disabled explicitly.
+// RUN: %clang -### -target x86_64-sie-ps5 -x cl -c -emit-llvm -fno-visibility-from-dllstorageclass %s 2>&1 | \
+// RUN: FileCheck -check-prefixes=VDEFAULT,VGND_DEFAULT,NOVISFROM %s --implicit-check-not=fvisibility --implicit-check-not=ftype-visibility --implicit-check-not=dllstorageclass
+// NOVISFROM-NOT: "-fvisibility-from-dllstorageclass"
diff --git a/clang/test/Driver/tocdata-cc1.c b/clang/test/Driver/tocdata-cc1.c
index fe0d97ea02db..e00383deecef 100644
--- a/clang/test/Driver/tocdata-cc1.c
+++ b/clang/test/Driver/tocdata-cc1.c
@@ -1,16 +1,13 @@
// RUN: %clang -### --target=powerpc-ibm-aix-xcoff -mcmodel=medium -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-NOTOC %s
+// RUN: | FileCheck %s
// RUN: %clang -### --target=powerpc-ibm-aix-xcoff -mcmodel=large -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-NOTOC %s
+// RUN: | FileCheck %s
// RUN: %clang -### --target=powerpc-ibm-aix-xcoff -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-TOC %s
+// RUN: | FileCheck %s
// RUN: %clang -### --target=powerpc64-ibm-aix-xcoff -mcmodel=medium -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-NOTOC %s
+// RUN: | FileCheck %s
// RUN: %clang -### --target=powerpc64-ibm-aix-xcoff -mcmodel=large -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-NOTOC %s
+// RUN: | FileCheck %s
// RUN: %clang -### --target=powerpc64-ibm-aix-xcoff -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-TOC %s
-// CHECK-NOTOC: warning: ignoring '-mtocdata' as it is only supported for -mcmodel=small
-// CHECK-NOTOC-NOT: "-cc1"{{.*}}" "-mtocdata"
-// CHECK-TOC: "-cc1"{{.*}}" "-mtocdata"
-// CHECK-TOC-NOT: warning: ignoring '-mtocdata' as it is only supported for -mcmodel=small
+// RUN: | FileCheck %s
+// CHECK: "-cc1"{{.*}}" "-mtocdata"
diff --git a/clang/test/Driver/x-args.c b/clang/test/Driver/x-args.c
index 17bb5d99404d..06c9c7a46156 100644
--- a/clang/test/Driver/x-args.c
+++ b/clang/test/Driver/x-args.c
@@ -6,6 +6,4 @@
// RUN: %clang -fsyntax-only %s -xc %s -xc++ -fsyntax-only 2>&1 | FileCheck %s
// CHECK: '-x c++' after last input file has no effect
-// RUN: not %clang_cl /WX /clang:-xc /clang:-E /clang:-dM -- %s 2>&1 | FileCheck --implicit-check-not="error:" -check-prefix=CL %s
-// RUN: not %clang_cl /TC /WX /clang:-xc /clang:-E /clang:-dM -- %s 2>&1 | FileCheck --implicit-check-not="error:" -check-prefix=CL %s
-// CL: error: unsupported option '-x c'; did you mean '/TC' or '/TP'?
+// RUN: %clang_cl -fsyntax-only /WX -xc++ -- %s
diff --git a/clang/test/Driver/x86-target-features.c b/clang/test/Driver/x86-target-features.c
index 25f8f66bc321..1d5f001c23fc 100644
--- a/clang/test/Driver/x86-target-features.c
+++ b/clang/test/Driver/x86-target-features.c
@@ -21,10 +21,10 @@
// SSE4-AES: "-target-feature" "+sse4.2" "-target-feature" "+aes"
// NO-SSE4-AES: "-target-feature" "-sse4.1" "-target-feature" "-aes"
-// RUN: %clang --target=i386 -march=i386 -mavx -mavx2 -mavx512f -mavx512cd -mavx512er -mavx512pf -mavx512dq -mavx512bw -mavx512vl -mavx512vbmi -mavx512vbmi2 -mavx512ifma %s -### 2>&1 | FileCheck -check-prefix=AVX %s
-// RUN: %clang --target=i386 -march=i386 -mno-avx -mno-avx2 -mno-avx512f -mno-avx512cd -mno-avx512er -mno-avx512pf -mno-avx512dq -mno-avx512bw -mno-avx512vl -mno-avx512vbmi -mno-avx512vbmi2 -mno-avx512ifma %s -### 2>&1 | FileCheck -check-prefix=NO-AVX %s
-// AVX: "-target-feature" "+avx" "-target-feature" "+avx2" "-target-feature" "+avx512f" "-target-feature" "+avx512cd" "-target-feature" "+avx512er" "-target-feature" "+avx512pf" "-target-feature" "+avx512dq" "-target-feature" "+avx512bw" "-target-feature" "+avx512vl" "-target-feature" "+avx512vbmi" "-target-feature" "+avx512vbmi2" "-target-feature" "+avx512ifma"
-// NO-AVX: "-target-feature" "-avx" "-target-feature" "-avx2" "-target-feature" "-avx512f" "-target-feature" "-avx512cd" "-target-feature" "-avx512er" "-target-feature" "-avx512pf" "-target-feature" "-avx512dq" "-target-feature" "-avx512bw" "-target-feature" "-avx512vl" "-target-feature" "-avx512vbmi" "-target-feature" "-avx512vbmi2" "-target-feature" "-avx512ifma"
+// RUN: %clang --target=i386 -march=i386 -mavx -mavx2 -mavx512f -mavx512cd -mavx512dq -mavx512bw -mavx512vl -mavx512vbmi -mavx512vbmi2 -mavx512ifma %s -### 2>&1 | FileCheck -check-prefix=AVX %s
+// RUN: %clang --target=i386 -march=i386 -mno-avx -mno-avx2 -mno-avx512f -mno-avx512cd -mno-avx512dq -mno-avx512bw -mno-avx512vl -mno-avx512vbmi -mno-avx512vbmi2 -mno-avx512ifma %s -### 2>&1 | FileCheck -check-prefix=NO-AVX %s
+// AVX: "-target-feature" "+avx" "-target-feature" "+avx2" "-target-feature" "+avx512f" "-target-feature" "+avx512cd" "-target-feature" "+avx512dq" "-target-feature" "+avx512bw" "-target-feature" "+avx512vl" "-target-feature" "+avx512vbmi" "-target-feature" "+avx512vbmi2" "-target-feature" "+avx512ifma"
+// NO-AVX: "-target-feature" "-avx" "-target-feature" "-avx2" "-target-feature" "-avx512f" "-target-feature" "-avx512cd" "-target-feature" "-avx512dq" "-target-feature" "-avx512bw" "-target-feature" "-avx512vl" "-target-feature" "-avx512vbmi" "-target-feature" "-avx512vbmi2" "-target-feature" "-avx512ifma"
// RUN: %clang --target=i386 -march=i386 -mpclmul -mrdrnd -mfsgsbase -mbmi -mbmi2 %s -### 2>&1 | FileCheck -check-prefix=BMI %s
// RUN: %clang --target=i386 -march=i386 -mno-pclmul -mno-rdrnd -mno-fsgsbase -mno-bmi -mno-bmi2 %s -### 2>&1 | FileCheck -check-prefix=NO-BMI %s
@@ -86,11 +86,6 @@
// SGX: "-target-feature" "+sgx"
// NO-SGX: "-target-feature" "-sgx"
-// RUN: %clang --target=i386 -march=i386 -mprefetchwt1 %s -### 2>&1 | FileCheck -check-prefix=PREFETCHWT1 %s
-// RUN: %clang --target=i386 -march=i386 -mno-prefetchwt1 %s -### 2>&1 | FileCheck -check-prefix=NO-PREFETCHWT1 %s
-// PREFETCHWT1: "-target-feature" "+prefetchwt1"
-// NO-PREFETCHWT1: "-target-feature" "-prefetchwt1"
-
// RUN: %clang --target=i386 -march=i386 -mprefetchi %s -### -o %t.o 2>&1 | FileCheck -check-prefix=PREFETCHI %s
// RUN: %clang --target=i386 -march=i386 -mno-prefetchi %s -### -o %t.o 2>&1 | FileCheck -check-prefix=NO-PREFETCHI %s
// PREFETCHI: "-target-feature" "+prefetchi"
diff --git a/clang/test/ExtractAPI/non_type_template.cpp b/clang/test/ExtractAPI/non_type_template.cpp
index 4e65eb790ca1..85f38e39c82b 100644
--- a/clang/test/ExtractAPI/non_type_template.cpp
+++ b/clang/test/ExtractAPI/non_type_template.cpp
@@ -310,4 +310,48 @@ NestedTemplateTemplateParamPack<Bar, Bar> var;
// VAR-NEXT: }
// VAR-NEXT: ]
+template <typename T>
+class TypeContainer {
+ public:
+ // RUN: FileCheck %s --input-file %t/output.symbols.json --check-prefix TYPE
+ typedef Foo<T> Type;
+// TYPE-LABEL: "!testLabel": "c:non_type_template.cpp@ST>1#T@TypeContainer@T@Type",
+// TYPE: "declarationFragments": [
+// TYPE-NEXT: {
+// TYPE-NEXT: "kind": "keyword",
+// TYPE-NEXT: "spelling": "typedef"
+// TYPE-NEXT: },
+// TYPE-NEXT: {
+// TYPE-NEXT: "kind": "text",
+// TYPE-NEXT: "spelling": " "
+// TYPE-NEXT: },
+// TYPE-NEXT: {
+// TYPE-NEXT: "kind": "typeIdentifier",
+// TYPE-NEXT: "preciseIdentifier": "c:@ST>2#T#NI@Foo",
+// TYPE-NEXT: "spelling": "Foo"
+// TYPE-NEXT: },
+// TYPE-NEXT: {
+// TYPE-NEXT: "kind": "text",
+// TYPE-NEXT: "spelling": "<"
+// TYPE-NEXT: },
+// TYPE-NEXT: {
+// TYPE-NEXT: "kind": "typeIdentifier",
+// TYPE-NEXT: "preciseIdentifier": "c:t0.0",
+// TYPE-NEXT: "spelling": "T"
+// TYPE-NEXT: },
+// TYPE-NEXT: {
+// TYPE-NEXT: "kind": "text",
+// TYPE-NEXT: "spelling": "> "
+// TYPE-NEXT: },
+// TYPE-NEXT: {
+// TYPE-NEXT: "kind": "identifier",
+// TYPE-NEXT: "spelling": "Type"
+// TYPE-NEXT: },
+// TYPE-NEXT: {
+// TYPE-NEXT: "kind": "text",
+// TYPE-NEXT: "spelling": ";"
+// TYPE-NEXT: }
+// TYPE-NEXT: ]
+};
+
// expected-no-diagnostics
diff --git a/clang/test/ExtractAPI/objc_external_category.m b/clang/test/ExtractAPI/objc_external_category.m
index 47e699cb91c0..8afc92489f28 100644
--- a/clang/test/ExtractAPI/objc_external_category.m
+++ b/clang/test/ExtractAPI/objc_external_category.m
@@ -4,6 +4,9 @@
// RUN: --emit-extension-symbol-graphs --symbol-graph-dir=%t/symbols \
// RUN: --product-name=Module -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/modules-cache \
// RUN: -triple arm64-apple-macosx -x objective-c-header %t/input.h -verify
+// RUN: %clang_cc1 -extract-api --pretty-sgf --emit-sgf-symbol-labels-for-testing \
+// RUN: --product-name=Module -o %t/ModuleNoExt.symbols.json -triple arm64-apple-macosx \
+// RUN: -x objective-c-header %t/input.h
//--- input.h
#include "ExternalModule.h"
@@ -28,15 +31,20 @@ module ExternalModule {
header "ExternalModule.h"
}
+// Main symbol graph from the build with extension SGFs
// RUN: FileCheck %s --input-file %t/symbols/Module.symbols.json --check-prefix MOD
+
// MOD-NOT: "!testRelLabel": "memberOf $ c:objc(cs)ExtInterface(py)Property $ c:objc(cs)ExtInterface"
// MOD-NOT: "!testRelLabel": "memberOf $ c:objc(cs)ExtInterface(im)InstanceMethod $ c:objc(cs)ExtInterface"
// MOD-NOT: "!testRelLabel": "memberOf $ c:objc(cs)ExtInterface(cm)ClassMethod $ c:objc(cs)ExtInterface"
-// MOD-NOT: "!testLabel": "c:objc(cs)ExtInterface(py)Property"
-// MOD-NOT: "!testLabel": "c:objc(cs)ExtInterface(im)InstanceMethod"
-// MOD-NOT: "!testLabel": "c:objc(cs)ExtInterface(cm)ClassMethod"
-// MOD-NOT: "!testLabel": "c:objc(cs)ExtInterface"
-// MOD-DAG: "!testLabel": "c:objc(cs)ModInterface"
+// MOD-NOT: "c:objc(cs)ExtInterface(py)Property"
+// MOD-NOT: "c:objc(cs)ExtInterface(im)InstanceMethod"
+// MOD-NOT: "c:objc(cs)ExtInterface(cm)ClassMethod"
+// MOD-NOT: "c:objc(cs)ExtInterface"
+// MOD-DAG: "c:objc(cs)ModInterface"
+
+// Symbol graph from the build without extension SGFs should be identical to main symbol graph with extension SGFs
+// RUN: diff %t/symbols/Module.symbols.json %t/ModuleNoExt.symbols.json
// RUN: FileCheck %s --input-file %t/symbols/ExternalModule@Module.symbols.json --check-prefix EXT
// EXT-DAG: "!testRelLabel": "memberOf $ c:objc(cs)ExtInterface(py)Property $ c:objc(cs)ExtInterface"
diff --git a/clang/test/Frontend/optimization-remark-options.c b/clang/test/Frontend/optimization-remark-options.c
index 96e480d140be..357273a65063 100644
--- a/clang/test/Frontend/optimization-remark-options.c
+++ b/clang/test/Frontend/optimization-remark-options.c
@@ -1,7 +1,7 @@
// REQUIRES: x86-registered-target
// RUN: %clang -O1 -fvectorize -target x86_64-unknown-unknown -mllvm -vectorize-memory-check-threshold=8 -Rpass-analysis=loop-vectorize -emit-llvm -S %s -o - 2>&1 | FileCheck %s
-// CHECK: {{.*}}:10:11: remark: loop not vectorized: cannot prove it is safe to reorder floating-point operations; allow reordering by specifying '#pragma clang loop vectorize(enable)' before the loop or by providing the compiler option '-ffast-math'.
+// CHECK: {{.*}}:10:11: remark: loop not vectorized: cannot prove it is safe to reorder floating-point operations; allow reordering by specifying '#pragma clang loop vectorize(enable)' before the loop or by providing the compiler option '-ffast-math'
double foo(int N) {
double v = 0.0;
@@ -12,7 +12,7 @@ double foo(int N) {
return v;
}
-// CHECK: {{.*}}:18:3: remark: loop not vectorized: cannot prove it is safe to reorder memory operations; allow reordering by specifying '#pragma clang loop vectorize(enable)' before the loop. If the arrays will always be independent specify '#pragma clang loop vectorize(assume_safety)' before the loop or provide the '__restrict__' qualifier with the independent array arguments. Erroneous results will occur if these options are incorrectly applied!
+// CHECK: {{.*}}:18:3: remark: loop not vectorized: cannot prove it is safe to reorder memory operations; allow reordering by specifying '#pragma clang loop vectorize(enable)' before the loop; if the arrays will always be independent, specify '#pragma clang loop vectorize(assume_safety)' before the loop or provide the '__restrict__' qualifier with the independent array arguments -- erroneous results will occur if these options are incorrectly applied
void foo2(int *dw, int *uw, int *A, int *B, int *C, int *D, int N) {
for (long i = 0; i < N; i++) {
diff --git a/clang/test/Frontend/x86-target-cpu.c b/clang/test/Frontend/x86-target-cpu.c
index 6b99b2c8574a..6c8502ac2c21 100644
--- a/clang/test/Frontend/x86-target-cpu.c
+++ b/clang/test/Frontend/x86-target-cpu.c
@@ -15,14 +15,8 @@
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu cannonlake -verify %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu icelake-client -verify %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu icelake-server -verify %s
-// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu knl -verify=knl %s
-// knl-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
-// knl-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
-// knl-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
-// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu knm -verify=knm %s
-// knm-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
-// knm-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
-// knm-warning@*:* {{KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.}}
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu knl -verify %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu knm -verify %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu bonnell -verify %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu silvermont -verify %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-cpu k8 -verify %s
diff --git a/clang/test/InstallAPI/alias_list.test b/clang/test/InstallAPI/alias_list.test
index 3e12221e088c..aba7e395cca9 100644
--- a/clang/test/InstallAPI/alias_list.test
+++ b/clang/test/InstallAPI/alias_list.test
@@ -23,7 +23,7 @@
; RUN: -o %t/AliasList.tbd 2>&1 | FileCheck -allow-empty %s \
; RUN: --check-prefix=INVALID
-; INVALID: error: could not read alias list {{.*}} missing alias for: _hidden
+; INVALID: error: could not read symbol alias input list {{.*}}invalid.txt': invalid input format: missing alias for: _hidden
;--- Frameworks/AliasList.framework/Headers/AliasList.h
// simple alias from one symbol to another.
diff --git a/clang/test/InstallAPI/binary-attributes.test b/clang/test/InstallAPI/binary-attributes.test
index b28e99f64454..fd9ff12998a3 100644
--- a/clang/test/InstallAPI/binary-attributes.test
+++ b/clang/test/InstallAPI/binary-attributes.test
@@ -30,13 +30,13 @@
; RUN: -install_name /System/Library/Frameworks/Simple.framework/Versions/A/Simple \
; RUN: -current_version 1.2.3 -compatibility_version 1 -fapplication-extension \
; RUN: -o tmp.tbd --verify-against=%t/Simple 2>&1 | FileCheck -check-prefix=APPEXTSAFE %s
-; APPEXTSAFE: error: ApplicationExtensionSafe flag does not match: 'true' (provided) vs 'false' (found)
+; APPEXTSAFE: error: the ApplicationExtensionSafe flag does not match: 'true' (provided) vs 'false' (found)
; RUN: not clang-installapi -target x86_64-apple-macos10.12 \
; RUN: -install_name /System/Library/Frameworks/Simple.framework/Versions/A/Simple \
; RUN: -current_version 1.2.3 -compatibility_version 1 -not_for_dyld_shared_cache \
; RUN: -o tmp.tbd --verify-against=%t/Simple 2>&1 | FileCheck -check-prefix=SHARED_CACHE %s
-; SHARED_CACHE: error: NotForDyldSharedCache flag does not match: 'true' (provided) vs 'false' (found)
+; SHARED_CACHE: error: the NotForDyldSharedCache flag does not match: 'true' (provided) vs 'false' (found)
; RUN: not clang-installapi -target x86_64-apple-macos10.12 \
; RUN: -install_name /System/Library/Frameworks/Simple.framework/Versions/A/Simple \
diff --git a/clang/test/InstallAPI/exclusive-passes-2.test b/clang/test/InstallAPI/exclusive-passes-2.test
index 3e7a6d777d5a..132b27df383c 100644
--- a/clang/test/InstallAPI/exclusive-passes-2.test
+++ b/clang/test/InstallAPI/exclusive-passes-2.test
@@ -11,6 +11,15 @@
; RUN: -DFoo -XApple -DDarwin=1 -XElf -DNONDarwin=1 2>&1 | FileCheck -allow-empty %s
; RUN: llvm-readtapi --compare %t/output.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+; RUN: clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib \
+; RUN: -current_version 1 -compatibility_version 1 \
+; RUN: -I%S/Inputs/LibFoo/usr/include -dynamiclib \
+; RUN: -extra-public-header %S/Inputs/LibFoo/usr/include/foo.h \
+; RUN: -o %t/output2.tbd \
+; RUN: -DFoo -optionlist %t/options.json 2>&1 | FileCheck -allow-empty %s
+; RUN: llvm-readtapi --compare %t/output.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+
; CHECK-NOT: error
; CHECK-NOT: warning
diff --git a/clang/test/InstallAPI/exclusive-passes-3.test b/clang/test/InstallAPI/exclusive-passes-3.test
new file mode 100644
index 000000000000..3a9b64c9f7b8
--- /dev/null
+++ b/clang/test/InstallAPI/exclusive-passes-3.test
@@ -0,0 +1,86 @@
+; RUN: rm -rf %t
+; RUN: split-file %s %t
+
+// "Apple" label has split options between the optionlist & command line.
+; RUN: clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib -current_version 1 \
+; RUN: -compatibility_version 1 \
+; RUN: -extra-public-header %t/usr/include/opts.h \
+; RUN: -optionlist %t/options.json -XApple -DCLI_OPT=1 \
+; RUN: -I%S/Inputs/LibFoo/usr/include \
+; RUN: -I%t/usr/include -dynamiclib -o %t/output.tbd 2>&1 | FileCheck %s -allow-empty
+; RUN: llvm-readtapi --compare %t/output.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+
+// Validate duplicated options give same result.
+; RUN: clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib -current_version 1 \
+; RUN: -compatibility_version 1 \
+; RUN: -extra-public-header %t/usr/include/opts.h \
+; RUN: -optionlist %t/options.json -XApple -DCLI_OPT=1 \
+; RUN: -I%S/Inputs/LibFoo/usr/include \
+; RUN: -XApple -DDarwin -XElf -DNONDarwin \
+; RUN: -I%t/usr/include -dynamiclib -o %t/output2.tbd 2>&1 | FileCheck %s -allow-empty
+; RUN: llvm-readtapi --compare %t/output2.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+
+; CHECK-NOT: error
+; CHECK-NOT: warning
+
+;--- usr/include/opts.h
+#ifndef OPTS_H
+#define OPTS_H
+#include <macro_defs.h>
+
+#if defined(CLI_OPT) && CLI_OPT
+ #define SUFFIX "$final"
+#else
+ #define SUFFIX
+#endif
+
+
+#define __STRING(x) #x
+#define PLATFORM_ALIAS(sym) __asm("_" __STRING(sym) DARWIN LINUX SUFFIX)
+extern int foo() PLATFORM_ALIAS(foo);
+
+#endif
+
+;--- expected.tbd
+{
+ "main_library": {
+ "exported_symbols": [
+ {
+ "text": {
+ "global": [
+ "_foo$darwin$final",
+ "_foo$linux",
+ "_foo"
+ ]
+ }
+ }
+ ],
+ "flags": [
+ {
+ "attributes": [
+ "not_app_extension_safe"
+ ]
+ }
+ ],
+ "install_names": [
+ {
+ "name": "@rpath/libfoo.dylib"
+ }
+ ],
+ "target_info": [
+ {
+ "min_deployment": "12",
+ "target": "arm64-macos"
+ }
+ ]
+ },
+ "tapi_tbd_version": 5
+}
+
+//--- options.json
+{
+ "Apple" : ["-DDarwin=1"],
+ "Elf" : ["-DNONDarwin=1"]
+}
diff --git a/clang/test/InstallAPI/exclusive-passes.test b/clang/test/InstallAPI/exclusive-passes.test
index 29b0fc3d7a2a..8e2d01ebaab1 100644
--- a/clang/test/InstallAPI/exclusive-passes.test
+++ b/clang/test/InstallAPI/exclusive-passes.test
@@ -10,6 +10,15 @@
; RUN: -o %t/output.tbd -v 2>&1 | FileCheck %s --check-prefix=INSTALLAPI
; RUN: llvm-readtapi --compare %t/output.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+// Try with -optionlist.
+; RUN: clang-installapi \
+; RUN: -target arm64-apple-macos12 -install_name @rpath/libfoo.dylib \
+; RUN: -current_version 1 -compatibility_version 1 \
+; RUN: -I%S/Inputs/LibFoo/usr/include -dynamiclib \
+; RUN: -extra-public-header %S/Inputs/LibFoo/usr/include/public.h \
+; RUN: -optionlist %t/options.json -o %t/output2.tbd 2>&1 | FileCheck %s -allow-empty
+; RUN: llvm-readtapi --compare %t/output2.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+
; CHECK-NOT: error
; CHECK-NOT: warning
@@ -17,6 +26,12 @@
; INSTALLAPI: Apple Public Headers:
; INSTALLAPI: Elf Public Headers:
+;--- options.json
+{
+ "Apple" : ["-DDarwin=1"],
+ "Elf" : ["-DNONDarwin=1"]
+}
+
;--- expected.tbd
{
"main_library": {
diff --git a/clang/test/InstallAPI/invalid-exclusive-passes.test b/clang/test/InstallAPI/invalid-exclusive-passes.test
index c23c918f0bfb..4b0b64efba08 100644
--- a/clang/test/InstallAPI/invalid-exclusive-passes.test
+++ b/clang/test/InstallAPI/invalid-exclusive-passes.test
@@ -30,6 +30,39 @@
; RUN: -o %t/output.tbd 2>&1 | FileCheck %s --check-prefix=INVALID_PROJECT_OPT
; INVALID_PROJECT_OPT: error: invalid argument '-Xproject' not allowed with '-fprofile-instr-generate'
+// Validate arguments not allowed with -X passed via json
+; RUN: not clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib -current_version 1 -compatibility_version 1 \
+; RUN: -optionlist %t/options.json -I/fake/path \
+; RUN: -I%t -dynamiclib -o %t/output.tbd 2>&1 | FileCheck %s --check-prefix=INVALID_JSON_OPT
+; INVALID_JSON_OPT: error: invalid argument '-XApple' not allowed with '-I/fake/path'
+
+// Validate invalid json path
+; RUN: not clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib -current_version 1 \
+; RUN: -compatibility_version 1 -optionlist %t/invalid_loc.json \
+; RUN: -I/fake/path -I%t -dynamiclib \
+; RUN: -o %t/output.tbd %t 2>&1 | FileCheck %s --check-prefix=INVALID_JSON_LOC -DMSG=%errc_ENOENT
+; INVALID_JSON_LOC: error: cannot open file {{.*}}invalid_loc.json': [[MSG]]
+
+// Validate invalid json format
+; RUN: not clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib -current_version 1 \
+; RUN: -compatibility_version 1 -optionlist %t/invalid_format.json \
+; RUN: -I/fake/path -isysroot %sysroot -I%t -dynamiclib \
+; RUN: -o %t/output.tbd %t 2>&1 | FileCheck %s --check-prefix=INVALID_JSON_FORMAT
+; INVALID_JSON_FORMAT: error: could not read option input list {{.*}}invalid_format.json': invalid input format
+
+;--- options.json
+{
+ "Apple" : ["-I/fake/path"]
+}
+
+;--- invalid_format.json
+{
+ "Apple" : {"opt" : "-I/fake/path"}
+}
+
;--- inputs.json
{
"headers": [ ],
diff --git a/clang/test/Lexer/cxx-features.cpp b/clang/test/Lexer/cxx-features.cpp
index 41550cf02aa3..4c2aa3ae2c54 100644
--- a/clang/test/Lexer/cxx-features.cpp
+++ b/clang/test/Lexer/cxx-features.cpp
@@ -1,17 +1,17 @@
// RUN: %clang_cc1 -std=c++98 -fcxx-exceptions -verify %s
// RUN: %clang_cc1 -std=c++11 -fcxx-exceptions -verify %s
-// RUN: %clang_cc1 -std=c++14 -fcxx-exceptions -fsized-deallocation -verify %s
-// RUN: %clang_cc1 -std=c++17 -fcxx-exceptions -fsized-deallocation -verify %s
-// RUN: %clang_cc1 -std=c++20 -fcxx-exceptions -fsized-deallocation -verify %s
-// RUN: %clang_cc1 -std=c++23 -fcxx-exceptions -fsized-deallocation -verify %s
-// RUN: %clang_cc1 -std=c++2c -fcxx-exceptions -fsized-deallocation -verify %s
+// RUN: %clang_cc1 -std=c++14 -fcxx-exceptions -verify %s
+// RUN: %clang_cc1 -std=c++17 -fcxx-exceptions -verify %s
+// RUN: %clang_cc1 -std=c++20 -fcxx-exceptions -verify %s
+// RUN: %clang_cc1 -std=c++23 -fcxx-exceptions -verify %s
+// RUN: %clang_cc1 -std=c++2c -fcxx-exceptions -verify %s
//
-// RUN: %clang_cc1 -std=c++17 -fcxx-exceptions -fsized-deallocation -fno-relaxed-template-template-args -DNO_RELAXED_TEMPLATE_TEMPLATE_ARGS=1 -verify %s
-// RUN: %clang_cc1 -std=c++17 -fcxx-exceptions -fsized-deallocation -DCONCEPTS_TS=1 -verify %s
-// RUN: %clang_cc1 -std=c++14 -fno-rtti -fno-threadsafe-statics -verify %s -DNO_EXCEPTIONS -DNO_RTTI -DNO_THREADSAFE_STATICS -fsized-deallocation
-// RUN: %clang_cc1 -std=c++14 -fchar8_t -DNO_EXCEPTIONS -DCHAR8_T -verify -fsized-deallocation %s
-// RUN: %clang_cc1 -std=c++2a -fno-char8_t -DNO_EXCEPTIONS -DNO_CHAR8_T -verify -fsized-deallocation %s
+// RUN: %clang_cc1 -std=c++17 -fcxx-exceptions -fno-relaxed-template-template-args -DNO_RELAXED_TEMPLATE_TEMPLATE_ARGS=1 -verify %s
+// RUN: %clang_cc1 -std=c++17 -fcxx-exceptions -DCONCEPTS_TS=1 -verify %s
+// RUN: %clang_cc1 -std=c++14 -fno-rtti -fno-threadsafe-statics -verify %s -DNO_EXCEPTIONS -DNO_RTTI -DNO_THREADSAFE_STATICS
+// RUN: %clang_cc1 -std=c++14 -fchar8_t -DNO_EXCEPTIONS -DCHAR8_T -verify %s
+// RUN: %clang_cc1 -std=c++2a -fno-char8_t -DNO_EXCEPTIONS -DNO_CHAR8_T -verify %s
// expected-no-diagnostics
diff --git a/clang/test/Misc/diag-template-diffing.cpp b/clang/test/Misc/diag-template-diffing-cxx11.cpp
index eefeb0b1117c..eefeb0b1117c 100644
--- a/clang/test/Misc/diag-template-diffing.cpp
+++ b/clang/test/Misc/diag-template-diffing-cxx11.cpp
diff --git a/clang/test/Misc/diag-template-diffing-cxx26.cpp b/clang/test/Misc/diag-template-diffing-cxx26.cpp
new file mode 100644
index 000000000000..2b6dd86a9885
--- /dev/null
+++ b/clang/test/Misc/diag-template-diffing-cxx26.cpp
@@ -0,0 +1,49 @@
+// RUN: %clang_cc1 -fsyntax-only %s -std=c++26 -verify=expected,notree
+// RUN: %clang_cc1 -fsyntax-only %s -std=c++26 -fno-elide-type -verify=expected,notree
+// RUN: %clang_cc1 -fsyntax-only %s -std=c++26 -fdiagnostics-show-template-tree -verify=expected,tree
+// RUN: %clang_cc1 -fsyntax-only %s -std=c++26 -fno-elide-type -fdiagnostics-show-template-tree -verify=expected,tree
+
+namespace GH93068 {
+ int n[2];
+
+ template <auto> struct A {}; // #A
+
+ namespace t1 {
+ // notree-error@#1 {{no viable conversion from 'A<0>' to 'A<n + 1>'}}
+
+ /* tree-error@#1 {{no viable conversion
+ A<
+ [0 != n + 1]>}}*/
+
+ A<n + 1> v1 = A<0>(); // #1
+ // expected-note@#A {{no known conversion from 'A<0>' to 'const A<&n[1]> &' for 1st argument}}
+ // expected-note@#A {{no known conversion from 'A<0>' to 'A<&n[1]> &&' for 1st argument}}
+
+ // notree-error@#2 {{no viable conversion from 'A<n>' to 'A<n + 1>'}}
+ /* tree-error@#2 {{no viable conversion
+ A<
+ [n != n + 1]>}}*/
+
+ A<n + 1> v2 = A<n>(); // #2
+ // expected-note@#A {{no known conversion from 'A<n>' to 'const A<&n[1]> &' for 1st argument}}
+ // expected-note@#A {{no known conversion from 'A<n>' to 'A<&n[1]> &&' for 1st argument}}
+ } // namespace t1
+
+ namespace t2 {
+ A<n> v1;
+ A<n + 1> v2;
+
+ // notree-note@#A {{no known conversion from 'A<n>' to 'const A<(no argument)>' for 1st argument}}
+ // notree-note@#A {{no known conversion from 'A<n>' to 'A<(no argument)>' for 1st argument}}
+
+ /* tree-note@#A {{no known conversion from argument type to parameter type for 1st argument
+ [(no qualifiers) != const] A<
+ [n != (no argument)]>}}*/
+
+ /* tree-note@#A {{no known conversion from argument type to parameter type for 1st argument
+ A<
+ [n != (no argument)]>}}*/
+
+ void f() { v2 = v1; } // expected-error {{no viable overloaded '='}}
+ } // namespace t2
+} // namespace GH93068
diff --git a/clang/test/Modules/implicit-module-remap.cpp b/clang/test/Modules/implicit-module-remap.cpp
new file mode 100644
index 000000000000..47927b969401
--- /dev/null
+++ b/clang/test/Modules/implicit-module-remap.cpp
@@ -0,0 +1,21 @@
+// RUN: rm -rf %t
+// RUN: split-file %s %t
+// RUN: cd %t
+//
+// RUN: %clang_cc1 -fmodules -fmodule-map-file=module.modulemap -fmodules-cache-path=%t -remap-file "test.cpp;%t/test.cpp" %t/test.cpp
+
+//--- a.h
+#define FOO
+
+//--- module.modulemap
+module a {
+ header "a.h"
+}
+
+//--- test.cpp
+#include "a.h"
+
+#ifndef FOO
+#error foo
+#endif
+
diff --git a/clang/test/Modules/no-implicit-declarations.cppm b/clang/test/Modules/no-implicit-declarations.cppm
new file mode 100644
index 000000000000..319d3a432ea2
--- /dev/null
+++ b/clang/test/Modules/no-implicit-declarations.cppm
@@ -0,0 +1,26 @@
+// RUN: rm -rf %t
+// RUN: mkdir %t
+//
+// RUN: %clang_cc1 -std=c++20 %s -emit-module-interface -o %t/a.pcm
+// RUN: llvm-bcanalyzer --dump --disable-histogram --show-binary-blobs %t/a.pcm > %t/a.dump
+// RUN: cat %t/a.dump | FileCheck %s
+//
+// RUN: %clang_cc1 -std=c++20 %s -emit-reduced-module-interface -o %t/a.pcm
+// RUN: llvm-bcanalyzer --dump --disable-histogram --show-binary-blobs %t/a.pcm > %t/a.dump
+// RUN: cat %t/a.dump | FileCheck %s
+
+export module a;
+// Contain something at least to make sure the compiler won't
+// optimize this out.
+export int a = 43;
+
+// CHECK: <DECLTYPES_BLOCK
+// CHECK-NOT: <DECL_TYPEDEF
+// CHECK: <DECL_CONTEXT_LEXICAL
+// CHECK: <UnknownCode
+// CHECK: <TYPE_TYPEDEF
+// CHECK: <DECL_VAR
+// CHECK: <EXPR_INTEGER_LITERAL
+// CHECK: <STMT_STOP
+// CHECK: <TYPE_RECORD
+// CHECK: </DECLTYPES_BLOCK>
diff --git a/clang/test/Modules/pr91418.cppm b/clang/test/Modules/pr91418.cppm
new file mode 100644
index 000000000000..b507df162643
--- /dev/null
+++ b/clang/test/Modules/pr91418.cppm
@@ -0,0 +1,65 @@
+// RUN: rm -rf %t
+// RUN: mkdir -p %t
+// RUN: split-file %s %t
+//
+// RUN: %clang_cc1 -triple %itanium_abi_triple -std=c++20 -x c++-header %t/foo.h \
+// RUN: -emit-pch -o %t/foo.pch
+// RUN: %clang_cc1 -triple %itanium_abi_triple -std=c++20 %t/use.cpp -include-pch \
+// RUN: %t/foo.pch -emit-llvm -o - | FileCheck %t/use.cpp
+
+//--- foo.h
+#ifndef FOO_H
+#define FOO_H
+typedef float __m128 __attribute__((__vector_size__(16), __aligned__(16)));
+
+static __inline__ __m128 __attribute__((__always_inline__, __min_vector_width__(128)))
+_mm_setr_ps(float __z, float __y, float __x, float __w)
+{
+ return __extension__ (__m128){ __z, __y, __x, __w };
+}
+
+typedef __m128 VR;
+
+inline VR MakeVR( float X, float Y, float Z, float W )
+{
+ return _mm_setr_ps( X, Y, Z, W );
+}
+
+extern "C" float sqrtf(float);
+
+namespace VectorSinConstantsSSE
+{
+ float a = (16 * sqrtf(0.225f));
+ VR A = MakeVR(a, a, a, a);
+ static const float b = (16 * sqrtf(0.225f));
+ static const VR B = MakeVR(b, b, b, b);
+}
+
+#endif // FOO_H
+
+//--- use.cpp
+#include "foo.h"
+float use() {
+ return VectorSinConstantsSSE::A[0] + VectorSinConstantsSSE::A[1] +
+ VectorSinConstantsSSE::A[2] + VectorSinConstantsSSE::A[3] +
+ VectorSinConstantsSSE::B[0] + VectorSinConstantsSSE::B[1] +
+ VectorSinConstantsSSE::B[2] + VectorSinConstantsSSE::B[3];
+}
+
+// CHECK: define{{.*}}@__cxx_global_var_init(
+// CHECK: store{{.*}}, ptr @_ZN21VectorSinConstantsSSE1aE
+
+// CHECK: define{{.*}}@__cxx_global_var_init.1(
+// CHECK: store{{.*}}, ptr @_ZN21VectorSinConstantsSSE1AE
+
+// CHECK: define{{.*}}@__cxx_global_var_init.2(
+// CHECK: store{{.*}}, ptr @_ZN21VectorSinConstantsSSEL1BE
+
+// CHECK: define{{.*}}@__cxx_global_var_init.3(
+// CHECK: store{{.*}}, ptr @_ZN21VectorSinConstantsSSEL1bE
+
+// CHECK: @_GLOBAL__sub_I_use.cpp
+// CHECK: call{{.*}}@__cxx_global_var_init(
+// CHECK: call{{.*}}@__cxx_global_var_init.1(
+// CHECK: call{{.*}}@__cxx_global_var_init.3(
+// CHECK: call{{.*}}@__cxx_global_var_init.2(
diff --git a/clang/test/OpenMP/assumes_codegen.cpp b/clang/test/OpenMP/assumes_codegen.cpp
index 4a2518a51ec3..4206e5a9caab 100644
--- a/clang/test/OpenMP/assumes_codegen.cpp
+++ b/clang/test/OpenMP/assumes_codegen.cpp
@@ -67,46 +67,46 @@ int lambda_outer() {
}
#pragma omp end assumes
-// AST: void foo() __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) {
-// AST-NEXT: }
-// AST-NEXT: class BAR {
-// AST-NEXT: public:
-// AST-NEXT: __attribute__((assume("ompx_range_bar_only"))) __attribute__((assume("ompx_range_bar_only_2"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) BAR() {
-// AST-NEXT: }
-// AST-NEXT: __attribute__((assume("ompx_range_bar_only"))) __attribute__((assume("ompx_range_bar_only_2"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) void bar1() {
-// AST-NEXT: }
-// AST-NEXT: __attribute__((assume("ompx_range_bar_only"))) __attribute__((assume("ompx_range_bar_only_2"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) static void bar2() {
-// AST-NEXT: }
-// AST-NEXT: };
-// AST-NEXT: __attribute__((assume("ompx_range_bar_only"))) __attribute__((assume("ompx_range_bar_only_2"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) void bar() {
-// AST-NEXT: BAR b;
-// AST-NEXT: }
-// AST-NEXT: __attribute__((assume("ompx_1234"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) void baz();
-// AST-NEXT: template <typename T> class BAZ {
-// AST-NEXT: public:
-// AST-NEXT: __attribute__((assume("ompx_1234"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) BAZ<T>() {
-// AST-NEXT: }
-// AST-NEXT: __attribute__((assume("ompx_1234"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) void baz1() {
-// AST-NEXT: }
-// AST-NEXT: __attribute__((assume("ompx_1234"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) static void baz2() {
-// AST-NEXT: }
-// AST-NEXT: };
-// AST-NEXT: template<> class BAZ<float> {
-// AST-NEXT: public:
-// AST-NEXT: __attribute__((assume("ompx_1234"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) BAZ() {
-// AST-NEXT: }
-// AST-NEXT: __attribute__((assume("ompx_1234"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) void baz1();
-// AST-NEXT: __attribute__((assume("ompx_1234"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) static void baz2();
-// AST-NEXT: };
-// AST-NEXT: __attribute__((assume("ompx_1234"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) void baz() {
-// AST-NEXT: BAZ<float> b;
-// AST-NEXT: }
-// AST-NEXT: __attribute__((assume("ompx_lambda_assumption"))) __attribute__((assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses"))) __attribute__((assume("omp_no_openmp"))) int lambda_outer() {
-// AST-NEXT: auto lambda_inner = []() {
-// AST-NEXT: return 42;
-// AST-NEXT: };
-// AST-NEXT: return lambda_inner();
-// AST-NEXT: }
+// AST{LITERAL}: void foo() [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] {
+// AST-NEXT{LITERAL}: }
+// AST-NEXT{LITERAL}: class BAR {
+// AST-NEXT{LITERAL}: public:
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_range_bar_only")]] [[omp::assume("ompx_range_bar_only_2")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] BAR() {
+// AST-NEXT{LITERAL}: }
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_range_bar_only")]] [[omp::assume("ompx_range_bar_only_2")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] void bar1() {
+// AST-NEXT{LITERAL}: }
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_range_bar_only")]] [[omp::assume("ompx_range_bar_only_2")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] static void bar2() {
+// AST-NEXT{LITERAL}: }
+// AST-NEXT{LITERAL}: };
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_range_bar_only")]] [[omp::assume("ompx_range_bar_only_2")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] void bar() {
+// AST-NEXT{LITERAL}: BAR b;
+// AST-NEXT{LITERAL}: }
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_1234")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] void baz();
+// AST-NEXT{LITERAL}: template <typename T> class BAZ {
+// AST-NEXT{LITERAL}: public:
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_1234")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] BAZ<T>() {
+// AST-NEXT{LITERAL}: }
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_1234")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] void baz1() {
+// AST-NEXT{LITERAL}: }
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_1234")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] static void baz2() {
+// AST-NEXT{LITERAL}: }
+// AST-NEXT{LITERAL}: };
+// AST-NEXT{LITERAL}: template<> class BAZ<float> {
+// AST-NEXT{LITERAL}: public:
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_1234")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] BAZ() {
+// AST-NEXT{LITERAL}: }
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_1234")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] void baz1();
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_1234")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] static void baz2();
+// AST-NEXT{LITERAL}: };
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_1234")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] void baz() {
+// AST-NEXT{LITERAL}: BAZ<float> b;
+// AST-NEXT{LITERAL}: }
+// AST-NEXT{LITERAL}: [[omp::assume("ompx_lambda_assumption")]] [[omp::assume("omp_no_openmp_routines,ompx_another_warning,ompx_after_invalid_clauses")]] [[omp::assume("omp_no_openmp")]] int lambda_outer() {
+// AST-NEXT{LITERAL}: auto lambda_inner = []() {
+// AST-NEXT{LITERAL}: return 42;
+// AST-NEXT{LITERAL}: };
+// AST-NEXT{LITERAL}: return lambda_inner();
+// AST-NEXT{LITERAL}: }
#endif
diff --git a/clang/test/OpenMP/assumes_print.cpp b/clang/test/OpenMP/assumes_print.cpp
index d8bdaaaf4518..9254c29ab833 100644
--- a/clang/test/OpenMP/assumes_print.cpp
+++ b/clang/test/OpenMP/assumes_print.cpp
@@ -37,8 +37,8 @@ void baz() {
}
#pragma omp end assumes
-// CHECK: void foo() __attribute__((assume("omp_no_openmp_routines"))) __attribute__((assume("omp_no_openmp")))
-// CHECK: __attribute__((assume("ompx_range_bar_only"))) __attribute__((assume("ompx_range_bar_only_2"))) __attribute__((assume("omp_no_openmp_routines"))) __attribute__((assume("omp_no_openmp"))) void bar()
-// CHECK: __attribute__((assume("ompx_1234"))) __attribute__((assume("omp_no_openmp_routines"))) __attribute__((assume("omp_no_openmp"))) void baz()
+// CHECK{LITERAL}: void foo() [[omp::assume("omp_no_openmp_routines")]] [[omp::assume("omp_no_openmp")]]
+// CHECK{LITERAL}: [[omp::assume("ompx_range_bar_only")]] [[omp::assume("ompx_range_bar_only_2")]] [[omp::assume("omp_no_openmp_routines")]] [[omp::assume("omp_no_openmp")]] void bar()
+// CHECK{LITERAL}: [[omp::assume("ompx_1234")]] [[omp::assume("omp_no_openmp_routines")]] [[omp::assume("omp_no_openmp")]] void baz()
#endif
diff --git a/clang/test/OpenMP/assumes_template_print.cpp b/clang/test/OpenMP/assumes_template_print.cpp
index 614138b2ee0b..f8857ffadf78 100644
--- a/clang/test/OpenMP/assumes_template_print.cpp
+++ b/clang/test/OpenMP/assumes_template_print.cpp
@@ -17,7 +17,7 @@ template <typename T>
struct S {
int a;
// CHECK: template <typename T> struct S {
-// CHECK: void foo() __attribute__((assume("ompx_global_assumption"))) {
+// CHECK{LITERAL}: void foo() [[omp::assume("ompx_global_assumption")]] {
void foo() {
#pragma omp parallel
{}
@@ -25,15 +25,15 @@ struct S {
};
// CHECK: template<> struct S<int> {
-// CHECK: void foo() __attribute__((assume("ompx_global_assumption"))) {
+// CHECK{LITERAL}: void foo() [[omp::assume("ompx_global_assumption")]] {
#pragma omp begin assumes no_openmp
-// CHECK: __attribute__((assume("omp_no_openmp"))) void S_with_assumes_no_call() __attribute__((assume("ompx_global_assumption"))) {
+// CHECK{LITERAL}: [[omp::assume("omp_no_openmp")]] void S_with_assumes_no_call() [[omp::assume("ompx_global_assumption")]] {
void S_with_assumes_no_call() {
S<int> s;
s.a = 0;
}
-// CHECK: __attribute__((assume("omp_no_openmp"))) void S_with_assumes_call() __attribute__((assume("ompx_global_assumption"))) {
+// CHECK{LITERAL}: [[omp::assume("omp_no_openmp")]] void S_with_assumes_call() [[omp::assume("ompx_global_assumption")]] {
void S_with_assumes_call() {
S<int> s;
s.a = 0;
@@ -42,7 +42,7 @@ void S_with_assumes_call() {
}
#pragma omp end assumes
-// CHECK: void S_without_assumes() __attribute__((assume("ompx_global_assumption"))) {
+// CHECK{LITERAL}: void S_without_assumes() [[omp::assume("ompx_global_assumption")]] {
void S_without_assumes() {
S<int> s;
s.foo();
@@ -54,7 +54,7 @@ void S_without_assumes() {
template <typename T>
struct P {
// CHECK: template <typename T> struct P {
-// CHECK: __attribute__((assume("ompx_global_assumption"))) void foo() {
+// CHECK{LITERAL}: [[omp::assume("ompx_global_assumption")]] void foo() {
int a;
void foo() {
#pragma omp parallel
@@ -65,21 +65,21 @@ struct P {
// TODO: Avoid the duplication here:
// CHECK: template<> struct P<int> {
-// CHECK: __attribute__((assume("ompx_global_assumption"))) __attribute__((assume("ompx_global_assumption"))) void foo() {
+// CHECK{LITERAL}: [[omp::assume("ompx_global_assumption")]] [[omp::assume("ompx_global_assumption")]] void foo() {
-// CHECK: __attribute__((assume("ompx_global_assumption"))) void P_without_assumes() {
+// CHECK{LITERAL}: [[omp::assume("ompx_global_assumption")]] void P_without_assumes() {
void P_without_assumes() {
P<int> p;
p.foo();
}
#pragma omp begin assumes no_openmp
-// CHECK: __attribute__((assume("omp_no_openmp"))) __attribute__((assume("ompx_global_assumption"))) void P_with_assumes_no_call() {
+// CHECK{LITERAL}: [[omp::assume("omp_no_openmp")]] [[omp::assume("ompx_global_assumption")]] void P_with_assumes_no_call() {
void P_with_assumes_no_call() {
P<int> p;
p.a = 0;
}
-// CHECK: __attribute__((assume("omp_no_openmp"))) __attribute__((assume("ompx_global_assumption"))) void P_with_assumes_call() {
+// CHECK{LITERAL}: [[omp::assume("omp_no_openmp")]] [[omp::assume("ompx_global_assumption")]] void P_with_assumes_call() {
void P_with_assumes_call() {
P<int> p;
p.a = 0;
diff --git a/clang/test/OpenMP/atomic_messages.c b/clang/test/OpenMP/atomic_messages.c
index 9f6662a9e136..f4e7db52494a 100644
--- a/clang/test/OpenMP/atomic_messages.c
+++ b/clang/test/OpenMP/atomic_messages.c
@@ -405,67 +405,67 @@ void compare(void) {
int x = 0;
int d = 0;
int e = 0;
-// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected compound statement}}
#pragma omp atomic compare
{}
-// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected exactly one expression statement}}
#pragma omp atomic compare
{
x = d;
x = e;
}
-// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected assignment statement}}
#pragma omp atomic compare
{ x += d; }
-// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected assignment statement}}
#pragma omp atomic compare
{ bbar(); }
-// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected conditional operator}}
#pragma omp atomic compare
{ x = d; }
-// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect binary operator in conditional expression}}
#pragma omp atomic compare
{ x = ffoo() ? e : x; }
-// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect '<', '>' or '==' as order operator}}
#pragma omp atomic compare
{ x = x >= e ? e : x; }
-// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect comparison in a form of 'x == e', 'e == x', 'x ordop expr', or 'expr ordop x'}}
#pragma omp atomic compare
{ x = d > e ? e : x; }
-// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect result value to be at false expression}}
#pragma omp atomic compare
{ x = d > x ? e : d; }
-// omp51-error@+4 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+4 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+3 {{expect binary operator in conditional expression}}
#pragma omp atomic compare
{
if (foo())
x = d;
}
-// omp51-error@+4 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+4 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+3 {{expect '<', '>' or '==' as order operator}}
#pragma omp atomic compare
{
if (x >= d)
x = d;
}
-// omp51-error@+4 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+4 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+3 {{expect comparison in a form of 'x == e', 'e == x', 'x ordop expr', or 'expr ordop x'}}
#pragma omp atomic compare
{
if (e > d)
x = d;
}
-// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected exactly one expression statement}}
#pragma omp atomic compare
{
@@ -473,7 +473,7 @@ void compare(void) {
x = e;
d = e;
}
-// omp51-error@+7 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+7 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+6 {{unexpected 'else' statement}}
#pragma omp atomic compare
{
@@ -491,61 +491,61 @@ void compare_capture(void) {
int v = 0;
int r = 0;
float dr = 0.0;
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected compound statement}}
#pragma omp atomic compare capture
if (x == e) {}
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected exactly one expression statement}}
#pragma omp atomic compare capture
if (x == e) {
x = d;
v = x;
}
-// omp51-error@+4 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+4 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+3 {{expected assignment statement}}
#pragma omp atomic compare capture
if (x == e) {
bbar();
}
-// omp51-error@+4 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+4 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+3 {{expected assignment statement}}
#pragma omp atomic compare capture
if (x == e) {
x += d;
}
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect binary operator in conditional expression}}
#pragma omp atomic compare capture
if (ffoo()) {
x = d;
}
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect '==' operator}}
#pragma omp atomic compare capture
if (x > e) {
x = d;
}
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect comparison in a form of 'x == e', 'e == x', 'x ordop expr', or 'expr ordop x'}}
#pragma omp atomic compare capture
if (d == e) {
x = d;
}
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect 'else' statement}}
#pragma omp atomic compare capture
if (x == e) {
x = d;
}
-// omp51-error@+5 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+5 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+4 {{expected compound statement}}
#pragma omp atomic compare capture
if (x == e) {
x = d;
} else {
}
-// omp51-error@+5 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+5 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+4 {{expected exactly one expression statement}}
#pragma omp atomic compare capture
if (x == e) {
@@ -554,7 +554,7 @@ void compare_capture(void) {
v = x;
d = e;
}
-// omp51-error@+6 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+6 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+5 {{expected assignment statement}}
#pragma omp atomic compare capture
if (x == e) {
@@ -562,7 +562,7 @@ void compare_capture(void) {
} else {
bbar();
}
-// omp51-error@+6 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+6 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+5 {{expected assignment statement}}
#pragma omp atomic compare capture
if (x == e) {
@@ -570,7 +570,7 @@ void compare_capture(void) {
} else {
v += x;
}
-// omp51-error@+6 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+6 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+5 {{expect an assignment statement 'v = x'}}
#pragma omp atomic compare capture
if (x == e) {
@@ -578,35 +578,35 @@ void compare_capture(void) {
} else {
v = d;
}
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected compound statement}}
#pragma omp atomic compare capture
{}
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect a compound statement}}
#pragma omp atomic compare capture
x = x > e ? e : x;
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect a 'if' statement}}
#pragma omp atomic compare capture
{ x = x > e ? e : x; }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect a form 'r = x == e; if (r) ...'}}
#pragma omp atomic compare capture
{ r = x == e; if (x == d) { x = e; } }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected assignment statement}}
#pragma omp atomic compare capture
{ r = x == e; if (r) { bbar(); } }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected assignment statement}}
#pragma omp atomic compare capture
{ r = x == e; if (r) { x += d; } }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected compound statement}}
#pragma omp atomic compare capture
{ r = x == e; if (r) {} }
-// omp51-error@+5 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+5 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+4 {{expected exactly one expression statement}}
#pragma omp atomic compare capture
{
@@ -616,19 +616,19 @@ void compare_capture(void) {
v = x;
}
}
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect '==' operator}}
#pragma omp atomic compare capture
{ r = x > e; if (r) { x = d; } }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect comparison in a form of 'x == e', 'e == x', 'x ordop expr', or 'expr ordop x'}}
#pragma omp atomic compare capture
{ r = d == e; if (r) { x = d; } }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected compound statement}}
#pragma omp atomic compare capture
{ r = x == e; if (r) { x = d; } else {} }
-// omp51-error@+7 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+7 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+6 {{expected exactly one expression statement}}
#pragma omp atomic compare capture
{
@@ -640,40 +640,40 @@ void compare_capture(void) {
d = e;
}
}
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected assignment statement}}
#pragma omp atomic compare capture
{ r = x == e; if (r) { x = d; } else { bbar(); } }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected assignment statement}}
#pragma omp atomic compare capture
{ r = x == e; if (r) { x = d; } else { v += x; } }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect an assignment statement 'v = x'}}
#pragma omp atomic compare capture
{ r = x == e; if (r) { x = d; } else { v = d; } }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected assignment statement}}
#pragma omp atomic compare capture
{ v += x; if (x == e) { x = d; } }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected assignment statement}}
#pragma omp atomic compare capture
{ if (x == e) { x = d; } v += x; }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect an assignment statement 'v = x'}}
#pragma omp atomic compare capture
{ v = d; if (x == e) { x = d; } }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect an assignment statement 'v = x'}}
#pragma omp atomic compare capture
{ if (x == e) { x = d; } v = d; }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expected assignment statement}}
#pragma omp atomic compare capture
{ v = x; bbar(); }
-// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}}
+// omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'}}
// omp51-note@+2 {{expect integer value}}
#pragma omp atomic compare capture
{ dr = x == e; if (dr) { x = d; } }
diff --git a/clang/test/OpenMP/distribute_firstprivate_messages.cpp b/clang/test/OpenMP/distribute_firstprivate_messages.cpp
index 30fa8be519ef..f507c86b601f 100644
--- a/clang/test/OpenMP/distribute_firstprivate_messages.cpp
+++ b/clang/test/OpenMP/distribute_firstprivate_messages.cpp
@@ -95,7 +95,7 @@ int main(int argc, char **argv) {
for (i = 0; i < argc; ++i) foo();
#pragma omp target
#pragma omp teams
- #pragma omp distribute firstprivate (a, b, c, d, f) // expected-error {{firstprivate variable with incomplete type 'S1'}} expected-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-error {{no matching constructor for initialization of 'S3'}}
+ #pragma omp distribute firstprivate (a, b, c, d, f) // expected-error {{firstprivate variable with incomplete type 'S1'}} expected-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-error {{no matching constructor for initialization of 'S3'}}
for (i = 0; i < argc; ++i) foo();
#pragma omp target
#pragma omp teams
@@ -103,11 +103,11 @@ int main(int argc, char **argv) {
for (i = 0; i < argc; ++i) foo();
#pragma omp target
#pragma omp teams
- #pragma omp distribute firstprivate(ba) // expected-warning {{Type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+ #pragma omp distribute firstprivate(ba) // expected-warning {{type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i) foo();
#pragma omp target
#pragma omp teams
- #pragma omp distribute firstprivate(ca) // expected-error {{no matching constructor for initialization of 'S3'}} expected-warning {{Type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+ #pragma omp distribute firstprivate(ca) // expected-error {{no matching constructor for initialization of 'S3'}} expected-warning {{type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i) foo();
#pragma omp target
#pragma omp teams
diff --git a/clang/test/OpenMP/distribute_parallel_for_firstprivate_messages.cpp b/clang/test/OpenMP/distribute_parallel_for_firstprivate_messages.cpp
index 84d6337be34b..4bed1fe2c3a3 100644
--- a/clang/test/OpenMP/distribute_parallel_for_firstprivate_messages.cpp
+++ b/clang/test/OpenMP/distribute_parallel_for_firstprivate_messages.cpp
@@ -119,7 +119,7 @@ int foomain(int argc, char **argv) {
++k;
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for firstprivate(a, b) // expected-error {{firstprivate variable with incomplete type 'S1'}} expected-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for firstprivate(a, b) // expected-error {{firstprivate variable with incomplete type 'S1'}} expected-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int k = 0; k < argc; ++k)
++k;
#pragma omp target
@@ -129,7 +129,7 @@ int foomain(int argc, char **argv) {
++k;
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for firstprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for firstprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int k = 0; k < argc; ++k)
++k;
#pragma omp target
@@ -241,7 +241,7 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for firstprivate(a, b, c, d, f) // expected-error {{firstprivate variable with incomplete type 'S1'}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for firstprivate(a, b, c, d, f) // expected-error {{firstprivate variable with incomplete type 'S1'}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -256,12 +256,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for firstprivate(ba) // expected-warning {{Type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for firstprivate(ba) // expected-warning {{type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for firstprivate(ca) // expected-warning {{Type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for firstprivate(ca) // expected-warning {{type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -292,12 +292,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for firstprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for firstprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for firstprivate(m) // expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for firstprivate(m) // expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -329,13 +329,13 @@ int main(int argc, char **argv) {
// expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for lastprivate(g) firstprivate(g) // expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for lastprivate(g) firstprivate(g) // expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
// expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for lastprivate(n) firstprivate(n) // expected-error {{calling a private constructor of class 'S6'}} expected-warning {{Type 'S6' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for lastprivate(n) firstprivate(n) // expected-error {{calling a private constructor of class 'S6'}} expected-warning {{type 'S6' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp parallel
diff --git a/clang/test/OpenMP/distribute_parallel_for_lastprivate_messages.cpp b/clang/test/OpenMP/distribute_parallel_for_lastprivate_messages.cpp
index f403922e14e8..0a0962ef57c1 100644
--- a/clang/test/OpenMP/distribute_parallel_for_lastprivate_messages.cpp
+++ b/clang/test/OpenMP/distribute_parallel_for_lastprivate_messages.cpp
@@ -119,7 +119,7 @@ int foomain(int argc, char **argv) {
++k;
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for lastprivate(a, b) // expected-error {{lastprivate variable with incomplete type 'S1'}} expected-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for lastprivate(a, b) // expected-error {{lastprivate variable with incomplete type 'S1'}} expected-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int k = 0; k < argc; ++k)
++k;
#pragma omp target
@@ -129,7 +129,7 @@ int foomain(int argc, char **argv) {
++k;
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for lastprivate(e, g) // expected-error 2 {{calling a private constructor of class 'S4'}} expected-warning 2 {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for lastprivate(e, g) // expected-error 2 {{calling a private constructor of class 'S4'}} expected-warning 2 {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int k = 0; k < argc; ++k)
++k;
#pragma omp target
@@ -228,7 +228,7 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for lastprivate(a, b, c, d, f) // expected-error {{lastprivate variable with incomplete type 'S1'}} expected-error 1 {{const-qualified variable without mutable fields cannot be lastprivate}} expected-error 2 {{const-qualified variable cannot be lastprivate}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for lastprivate(a, b, c, d, f) // expected-error {{lastprivate variable with incomplete type 'S1'}} expected-error 1 {{const-qualified variable without mutable fields cannot be lastprivate}} expected-error 2 {{const-qualified variable cannot be lastprivate}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -243,12 +243,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for lastprivate(ba) // expected-warning {{Type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for lastprivate(ba) // expected-warning {{type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for lastprivate(ca) // expected-error {{const-qualified variable without mutable fields cannot be lastprivate}} expected-warning {{Type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for lastprivate(ca) // expected-error {{const-qualified variable without mutable fields cannot be lastprivate}} expected-warning {{type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -279,12 +279,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for lastprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for lastprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for lastprivate(m) // expected-error {{'operator=' is a private member of 'S3'}} expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for lastprivate(m) // expected-error {{'operator=' is a private member of 'S3'}} expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -325,13 +325,13 @@ int main(int argc, char **argv) {
// expected-error@+3 {{firstprivate variable cannot be lastprivate}} expected-note@+3 {{defined as firstprivate}}
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for firstprivate(m) lastprivate(m) // expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for firstprivate(m) lastprivate(m) // expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
// expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for lastprivate(n) firstprivate(n) // expected-error {{calling a private constructor of class 'S6'}} expected-warning {{Type 'S6' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for lastprivate(n) firstprivate(n) // expected-error {{calling a private constructor of class 'S6'}} expected-warning {{type 'S6' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
static int si;
diff --git a/clang/test/OpenMP/distribute_parallel_for_private_messages.cpp b/clang/test/OpenMP/distribute_parallel_for_private_messages.cpp
index d25598e46f81..2e0e75096a26 100644
--- a/clang/test/OpenMP/distribute_parallel_for_private_messages.cpp
+++ b/clang/test/OpenMP/distribute_parallel_for_private_messages.cpp
@@ -50,7 +50,7 @@ public:
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for private(a) private(this->a) private(s.a) // expected-error {{expected variable name or data member of current class}}
- for (int k = 0; k < s.a; ++k) // expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (int k = 0; k < s.a; ++k) // expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
++s.a;
return *this;
}
diff --git a/clang/test/OpenMP/distribute_parallel_for_reduction_messages.cpp b/clang/test/OpenMP/distribute_parallel_for_reduction_messages.cpp
index 6b3d9da9a3a6..864fb597214b 100644
--- a/clang/test/OpenMP/distribute_parallel_for_reduction_messages.cpp
+++ b/clang/test/OpenMP/distribute_parallel_for_reduction_messages.cpp
@@ -187,7 +187,7 @@ T tmain(T argc) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for reduction(+ : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 3 {{const-qualified variable cannot be reduction}} expected-error 2 {{'operator+' is a private member of 'S2'}} expected-warning 2 {{Type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning 2 {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for reduction(+ : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 3 {{const-qualified variable cannot be reduction}} expected-error 2 {{'operator+' is a private member of 'S2'}} expected-warning 2 {{type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning 2 {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
@@ -232,7 +232,7 @@ T tmain(T argc) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for reduction(+ : h, k) // expected-error {{threadprivate or thread local variable cannot be reduction}} expected-warning 2 {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for reduction(+ : h, k) // expected-error {{threadprivate or thread local variable cannot be reduction}} expected-warning 2 {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
@@ -371,12 +371,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for reduction(+ : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 2 {{const-qualified variable cannot be reduction}} expected-error {{'operator+' is a private member of 'S2'}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{Type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for reduction(+ : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 2 {{const-qualified variable cannot be reduction}} expected-error {{'operator+' is a private member of 'S2'}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for reduction(min : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 2 {{arguments of OpenMP clause 'reduction' for 'min' or 'max' must be of arithmetic type}} expected-error 2 {{const-qualified variable cannot be reduction}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{Type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for reduction(min : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 2 {{arguments of OpenMP clause 'reduction' for 'min' or 'max' must be of arithmetic type}} expected-error 2 {{const-qualified variable cannot be reduction}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
@@ -386,12 +386,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for reduction(+ : ba) // expected-error {{const-qualified variable cannot be reduction}} expected-warning {{Type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for reduction(+ : ba) // expected-error {{const-qualified variable cannot be reduction}} expected-warning {{type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for reduction(* : ca) // expected-error {{const-qualified variable cannot be reduction}} expected-warning {{Type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for reduction(* : ca) // expected-error {{const-qualified variable cannot be reduction}} expected-warning {{type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
@@ -416,12 +416,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for reduction(& : e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-error {{invalid operands to binary expression ('S5' and 'S5')}} expected-warning {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for reduction(& : e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-error {{invalid operands to binary expression ('S5' and 'S5')}} expected-warning {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for reduction(+ : h, k, B::x) // expected-error 2 {{threadprivate or thread local variable cannot be reduction}} expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for reduction(+ : h, k, B::x) // expected-error 2 {{threadprivate or thread local variable cannot be reduction}} expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
@@ -437,12 +437,12 @@ int main(int argc, char **argv) {
#pragma omp parallel private(k)
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for reduction(+ : p), reduction(+ : p) // expected-error 2 {{argument of OpenMP clause 'reduction' must reference the same object in all threads}} expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for reduction(+ : p), reduction(+ : p) // expected-error 2 {{argument of OpenMP clause 'reduction' must reference the same object in all threads}} expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for reduction(+ : p), reduction(+ : p) // expected-error {{variable can appear only once in OpenMP 'reduction' clause}} expected-note {{previously referenced here}} expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for reduction(+ : p), reduction(+ : p) // expected-error {{variable can appear only once in OpenMP 'reduction' clause}} expected-note {{previously referenced here}} expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
diff --git a/clang/test/OpenMP/distribute_parallel_for_simd_private_messages.cpp b/clang/test/OpenMP/distribute_parallel_for_simd_private_messages.cpp
index 43bc6ad8e637..0cb8c01625db 100644
--- a/clang/test/OpenMP/distribute_parallel_for_simd_private_messages.cpp
+++ b/clang/test/OpenMP/distribute_parallel_for_simd_private_messages.cpp
@@ -50,7 +50,7 @@ public:
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(a) private(this->a) private(s.a) // expected-error {{expected variable name or data member of current class}}
- for (int k = 0; k < s.a; ++k) // expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (int k = 0; k < s.a; ++k) // expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
++s.a;
return *this;
}
diff --git a/clang/test/OpenMP/distribute_parallel_for_simd_shared_messages.cpp b/clang/test/OpenMP/distribute_parallel_for_simd_shared_messages.cpp
index 7c83e4c674c6..6dc6e777fb33 100644
--- a/clang/test/OpenMP/distribute_parallel_for_simd_shared_messages.cpp
+++ b/clang/test/OpenMP/distribute_parallel_for_simd_shared_messages.cpp
@@ -117,7 +117,7 @@ T tmain(T argc, S **argv) {
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for simd shared (a, b, c, d, f) // expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for simd shared (a, b, c, d, f) // expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for(int k = 0 ; k < n ; k++) {
acc++;
}
@@ -131,14 +131,14 @@ T tmain(T argc, S **argv) {
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for simd shared(ba) // expected-warning {{Type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for simd shared(ba) // expected-warning {{type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for(int k = 0 ; k < n ; k++) {
acc++;
}
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for simd shared(ca) // expected-warning {{Type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for simd shared(ca) // expected-warning {{type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for(int k = 0 ; k < n ; k++) {
acc++;
}
@@ -152,7 +152,7 @@ T tmain(T argc, S **argv) {
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for simd shared(e, g) // expected-warning {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for simd shared(e, g) // expected-warning {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
for(int k = 0 ; k < n ; k++) {
acc++;
}
@@ -291,7 +291,7 @@ int main(int argc, char **argv) {
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for simd shared (a, b, c, d, f) // expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for simd shared (a, b, c, d, f) // expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for(int k = 0 ; k < n ; k++) {
acc++;
}
@@ -305,14 +305,14 @@ int main(int argc, char **argv) {
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for simd shared(ba) // expected-warning {{Type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for simd shared(ba) // expected-warning {{type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for(int k = 0 ; k < n ; k++) {
acc++;
}
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for simd shared(ca) // expected-warning {{Type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for simd shared(ca) // expected-warning {{type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for(int k = 0 ; k < n ; k++) {
acc++;
}
@@ -326,7 +326,7 @@ int main(int argc, char **argv) {
#pragma omp target
#pragma omp teams
-#pragma omp distribute parallel for simd shared(e, g) // expected-warning {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute parallel for simd shared(e, g) // expected-warning {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
for(int k = 0 ; k < n ; k++) {
acc++;
}
diff --git a/clang/test/OpenMP/distribute_simd_firstprivate_messages.cpp b/clang/test/OpenMP/distribute_simd_firstprivate_messages.cpp
index 43057fe5bacc..bc1dfcfe7ab4 100644
--- a/clang/test/OpenMP/distribute_simd_firstprivate_messages.cpp
+++ b/clang/test/OpenMP/distribute_simd_firstprivate_messages.cpp
@@ -111,7 +111,7 @@ int foomain(int argc, char **argv) {
++k;
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd firstprivate(z, a, b) // expected-error {{firstprivate variable with incomplete type 'S1'}} expected-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd firstprivate(z, a, b) // expected-error {{firstprivate variable with incomplete type 'S1'}} expected-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int k = 0; k < argc; ++k)
++k;
#pragma omp target
@@ -121,7 +121,7 @@ int foomain(int argc, char **argv) {
++k;
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd firstprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd firstprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int k = 0; k < argc; ++k)
++k;
#pragma omp target
@@ -233,7 +233,7 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd firstprivate(a, b, c, d, f) // expected-error {{firstprivate variable with incomplete type 'S1'}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd firstprivate(a, b, c, d, f) // expected-error {{firstprivate variable with incomplete type 'S1'}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -248,12 +248,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd firstprivate(ba) // expected-warning {{Type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd firstprivate(ba) // expected-warning {{type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd firstprivate(ca) // expected-warning {{Type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd firstprivate(ca) // expected-warning {{type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -284,12 +284,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd firstprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd firstprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd firstprivate(m) // expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd firstprivate(m) // expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -321,13 +321,13 @@ int main(int argc, char **argv) {
// expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd lastprivate(g) firstprivate(g) //expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd lastprivate(g) firstprivate(g) //expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
// expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd lastprivate(n) firstprivate(n) // expected-error {{calling a private constructor of class 'S6'}} expected-warning {{Type 'S6' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd lastprivate(n) firstprivate(n) // expected-error {{calling a private constructor of class 'S6'}} expected-warning {{type 'S6' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp parallel
diff --git a/clang/test/OpenMP/distribute_simd_lastprivate_messages.cpp b/clang/test/OpenMP/distribute_simd_lastprivate_messages.cpp
index 7658288242ab..379f57547498 100644
--- a/clang/test/OpenMP/distribute_simd_lastprivate_messages.cpp
+++ b/clang/test/OpenMP/distribute_simd_lastprivate_messages.cpp
@@ -120,7 +120,7 @@ int foomain(int argc, char **argv) {
++k;
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd lastprivate(a, b) // expected-error {{lastprivate variable with incomplete type 'S1'}} expected-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd lastprivate(a, b) // expected-error {{lastprivate variable with incomplete type 'S1'}} expected-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int k = 0; k < argc; ++k)
++k;
#pragma omp target
@@ -130,7 +130,7 @@ int foomain(int argc, char **argv) {
++k;
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd lastprivate(e, g) // expected-error 2 {{calling a private constructor of class 'S4'}} expected-warning 2 {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd lastprivate(e, g) // expected-error 2 {{calling a private constructor of class 'S4'}} expected-warning 2 {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int k = 0; k < argc; ++k)
++k;
#pragma omp target
@@ -229,7 +229,7 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd lastprivate(a, b, c, d, f) // expected-error {{lastprivate variable with incomplete type 'S1'}} expected-error 1 {{const-qualified variable without mutable fields cannot be lastprivate}} expected-error 2 {{const-qualified variable cannot be lastprivate}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd lastprivate(a, b, c, d, f) // expected-error {{lastprivate variable with incomplete type 'S1'}} expected-error 1 {{const-qualified variable without mutable fields cannot be lastprivate}} expected-error 2 {{const-qualified variable cannot be lastprivate}} expected-error {{incomplete type 'S1' where a complete type is required}} expected-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -244,12 +244,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd lastprivate(ba) // expected-warning {{Type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd lastprivate(ba) // expected-warning {{type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd lastprivate(ca) // expected-error {{const-qualified variable without mutable fields cannot be lastprivate}} expected-warning {{Type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd lastprivate(ca) // expected-error {{const-qualified variable without mutable fields cannot be lastprivate}} expected-warning {{type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -280,12 +280,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd lastprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd lastprivate(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-warning {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd lastprivate(m) // expected-error {{'operator=' is a private member of 'S3'}} expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd lastprivate(m) // expected-error {{'operator=' is a private member of 'S3'}} expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
#pragma omp target
@@ -326,13 +326,13 @@ int main(int argc, char **argv) {
// expected-error@+3 {{firstprivate variable cannot be lastprivate}} expected-note@+3 {{defined as firstprivate}}
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd firstprivate(m) lastprivate(m) // expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd firstprivate(m) lastprivate(m) // expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
// expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd lastprivate(n) firstprivate(n) // expected-error {{calling a private constructor of class 'S6'}} expected-warning {{Type 'S6' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd lastprivate(n) firstprivate(n) // expected-error {{calling a private constructor of class 'S6'}} expected-warning {{type 'S6' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i)
foo();
static int si;
diff --git a/clang/test/OpenMP/distribute_simd_loop_messages.cpp b/clang/test/OpenMP/distribute_simd_loop_messages.cpp
index 5a55f9569b8d..e56c7dfbddab 100644
--- a/clang/test/OpenMP/distribute_simd_loop_messages.cpp
+++ b/clang/test/OpenMP/distribute_simd_loop_messages.cpp
@@ -14,7 +14,7 @@ public:
#pragma omp target
#pragma omp teams
#pragma omp distribute simd
- for (int k = 0; k < s.a; ++k) // expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (int k = 0; k < s.a; ++k) // expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
++s.a;
return *this;
}
@@ -490,7 +490,7 @@ int test_with_random_access_iterator() {
#pragma omp target
#pragma omp teams
#pragma omp distribute simd
- for (GoodIter I = begin; I < end; ++I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I < end; ++I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams
@@ -501,41 +501,41 @@ int test_with_random_access_iterator() {
#pragma omp target
#pragma omp teams
#pragma omp distribute simd
- for (GoodIter I = begin; I >= end; --I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I >= end; --I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
#pragma omp distribute simd
- for (GoodIter I(begin); I < end; ++I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(begin); I < end; ++I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
#pragma omp distribute simd
- for (GoodIter I(nullptr); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(nullptr); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
#pragma omp distribute simd
- for (GoodIter I(0); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(0); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
#pragma omp distribute simd
- for (GoodIter I(1,2); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(1,2); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams
#pragma omp distribute simd
- for (begin = GoodIter(0); begin < end; ++begin) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (begin = GoodIter(0); begin < end; ++begin) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++begin;
#pragma omp target
#pragma omp teams
#pragma omp distribute simd
- for (begin = GoodIter(1,2); begin < end; ++begin) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (begin = GoodIter(1,2); begin < end; ++begin) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++begin;
#pragma omp target
#pragma omp teams
@@ -546,7 +546,7 @@ int test_with_random_access_iterator() {
#pragma omp target
#pragma omp teams
#pragma omp distribute simd
- for (begin = end; begin < end; ++begin) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (begin = end; begin < end; ++begin) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++begin;
#pragma omp target
#pragma omp teams
@@ -576,7 +576,7 @@ int test_with_random_access_iterator() {
#pragma omp target
#pragma omp teams
#pragma omp distribute simd
- for (GoodIter I = begin; I >= end; I = I - 1) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I >= end; I = I - 1) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams
@@ -600,7 +600,7 @@ int test_with_random_access_iterator() {
#pragma omp target
#pragma omp teams
#pragma omp distribute simd
- for (Iter0 I = begin0; I < end0; ++I) // expected-warning 2 {{Type 'Iter0' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (Iter0 I = begin0; I < end0; ++I) // expected-warning 2 {{type 'Iter0' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
@@ -608,7 +608,7 @@ int test_with_random_access_iterator() {
// Initializer is constructor without params.
// expected-warning@+2 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
#pragma omp distribute simd
- for (Iter0 I; I < end0; ++I) // expected-warning {{Type 'Iter0' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (Iter0 I; I < end0; ++I) // expected-warning {{type 'Iter0' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
Iter1 begin1, end1;
@@ -654,7 +654,7 @@ template <typename IT, int ST> class TC {
// expected-note@+3 {{loop step is expected to be positive due to this condition}}
// expected-error@+2 {{increment expression must cause 'I' to increase on each iteration of OpenMP for loop}}
#pragma omp distribute simd
- for (IT I = begin; I <= end; I += ST) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I <= end; I += ST) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
#pragma omp distribute simd
@@ -697,7 +697,7 @@ template <typename IT, int ST=0> int dotest_gt(IT begin, IT end) {
#pragma omp target
#pragma omp teams
#pragma omp distribute simd
- for (IT I = begin; I < end; I+=TC<int,ST>::step()) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; I+=TC<int,ST>::step()) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
}
diff --git a/clang/test/OpenMP/distribute_simd_private_messages.cpp b/clang/test/OpenMP/distribute_simd_private_messages.cpp
index 261a46ac6099..8be71938e0fa 100644
--- a/clang/test/OpenMP/distribute_simd_private_messages.cpp
+++ b/clang/test/OpenMP/distribute_simd_private_messages.cpp
@@ -50,7 +50,7 @@ public:
#pragma omp target
#pragma omp teams
#pragma omp distribute simd private(a) private(this->a) private(s.a) // expected-error {{expected variable name or data member of current class}}
- for (int k = 0; k < s.a; ++k) // expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (int k = 0; k < s.a; ++k) // expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
++s.a;
return *this;
}
diff --git a/clang/test/OpenMP/distribute_simd_reduction_messages.cpp b/clang/test/OpenMP/distribute_simd_reduction_messages.cpp
index d27360ac9b2c..03b6ee5f4a25 100644
--- a/clang/test/OpenMP/distribute_simd_reduction_messages.cpp
+++ b/clang/test/OpenMP/distribute_simd_reduction_messages.cpp
@@ -187,7 +187,7 @@ T tmain(T argc) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd reduction(+ : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 3 {{const-qualified variable cannot be reduction}} expected-error 2 {{'operator+' is a private member of 'S2'}} expected-warning 2 {{Type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning 2 {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd reduction(+ : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 3 {{const-qualified variable cannot be reduction}} expected-error 2 {{'operator+' is a private member of 'S2'}} expected-warning 2 {{type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning 2 {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
@@ -232,7 +232,7 @@ T tmain(T argc) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd reduction(+ : h, k) // expected-error {{threadprivate or thread local variable cannot be reduction}} expected-warning 2 {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd reduction(+ : h, k) // expected-error {{threadprivate or thread local variable cannot be reduction}} expected-warning 2 {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
@@ -376,12 +376,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd reduction(+ : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 2 {{const-qualified variable cannot be reduction}} expected-error {{'operator+' is a private member of 'S2'}} expected-warning {{Type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}} expected-error {{incomplete type 'S1' where a complete type is required}}
+#pragma omp distribute simd reduction(+ : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 2 {{const-qualified variable cannot be reduction}} expected-error {{'operator+' is a private member of 'S2'}} expected-warning {{type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}} expected-error {{incomplete type 'S1' where a complete type is required}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd reduction(min : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 2 {{arguments of OpenMP clause 'reduction' for 'min' or 'max' must be of arithmetic type}} expected-error 2 {{const-qualified variable cannot be reduction}} expected-warning {{Type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}} expected-error {{incomplete type 'S1' where a complete type is required}}
+#pragma omp distribute simd reduction(min : a, b, c, d, f) // expected-error {{a reduction list item with incomplete type 'S1'}} expected-error 2 {{arguments of OpenMP clause 'reduction' for 'min' or 'max' must be of arithmetic type}} expected-error 2 {{const-qualified variable cannot be reduction}} expected-warning {{type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}} expected-error {{incomplete type 'S1' where a complete type is required}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
@@ -391,12 +391,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd reduction(+ : ba) // expected-error {{const-qualified variable cannot be reduction}} expected-warning {{Type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd reduction(+ : ba) // expected-error {{const-qualified variable cannot be reduction}} expected-warning {{type 'const S2[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd reduction(* : ca) // expected-error {{const-qualified variable cannot be reduction}} expected-warning {{Type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd reduction(* : ca) // expected-error {{const-qualified variable cannot be reduction}} expected-warning {{type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
@@ -421,12 +421,12 @@ int main(int argc, char **argv) {
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd reduction(& : e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-error {{invalid operands to binary expression ('S5' and 'S5')}} expected-warning {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}}
+#pragma omp distribute simd reduction(& : e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} expected-error {{invalid operands to binary expression ('S5' and 'S5')}} expected-warning {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
#pragma omp teams
-#pragma omp distribute simd reduction(+ : h, k, B::x) // expected-error 2 {{threadprivate or thread local variable cannot be reduction}} expected-warning {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp distribute simd reduction(+ : h, k, B::x) // expected-error 2 {{threadprivate or thread local variable cannot be reduction}} expected-warning {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; ++i)
foo();
#pragma omp target
@@ -440,7 +440,7 @@ int main(int argc, char **argv) {
for (int i = 0; i < 10; ++i)
foo();
#if __cplusplus < 201103L // < C++11
-// expected-warning@+5 {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+// expected-warning@+5 {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
#endif
#pragma omp parallel private(k)
#pragma omp target
@@ -449,7 +449,7 @@ int main(int argc, char **argv) {
for (int i = 0; i < 10; ++i)
foo();
#if __cplusplus < 201103L // < C++11
-// expected-warning@+4 {{Type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+// expected-warning@+4 {{type 'S3' is not trivially copyable and not guaranteed to be mapped correctly}}
#endif
#pragma omp target
#pragma omp teams
diff --git a/clang/test/OpenMP/nvptx_lambda_capturing.cpp b/clang/test/OpenMP/nvptx_lambda_capturing.cpp
index 641fbc38dd6b..efea8d4a0561 100644
--- a/clang/test/OpenMP/nvptx_lambda_capturing.cpp
+++ b/clang/test/OpenMP/nvptx_lambda_capturing.cpp
@@ -1165,8 +1165,113 @@ int main(int argc, char **argv) {
// CHECK2-NEXT: ret void
//
//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27
+// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[L_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
+// CHECK3-NEXT: [[_TMP2:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: store ptr [[L]], ptr [[L_ADDR]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[L_ADDR]], align 8
+// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27_kernel_environment, ptr [[DYN_PTR]])
+// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
+// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK3: user_code.entry:
+// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8
+// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[L1]], ptr align 8 [[TMP3]], i64 8, i1 false)
+// CHECK3-NEXT: store ptr [[L1]], ptr [[_TMP2]], align 8
+// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[_TMP2]], align 8
+// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP4]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8
+// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[_TMP2]], align 8
+// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZZN1S3fooEvENKUlvE_clEv(ptr noundef nonnull align 8 dereferenceable(8) [[TMP6]]) #[[ATTR7:[0-9]+]]
+// CHECK3-NEXT: call void @__kmpc_target_deinit()
+// CHECK3-NEXT: ret void
+// CHECK3: worker.exit:
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@_ZZN1S3fooEvENKUlvE_clEv
+// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8
+// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[TMP1]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+// CHECK3-NEXT: ret i32 [[TMP2]]
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29
+// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR3:[0-9]+]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[L_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8
+// CHECK3-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: store ptr [[L]], ptr [[L_ADDR]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[L_ADDR]], align 8
+// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29_kernel_environment, ptr [[DYN_PTR]])
+// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
+// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
+// CHECK3: user_code.entry:
+// CHECK3-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 8
+// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
+// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8
+// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
+// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP6]], align 8
+// CHECK3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 2)
+// CHECK3-NEXT: call void @__kmpc_target_deinit()
+// CHECK3-NEXT: ret void
+// CHECK3: worker.exit:
+// CHECK3-NEXT: ret void
+//
+//
+// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29_omp_outlined
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK3-NEXT: entry:
+// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[L_ADDR:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
+// CHECK3-NEXT: [[_TMP2:%.*]] = alloca ptr, align 8
+// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
+// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: store ptr [[L]], ptr [[L_ADDR]], align 8
+// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[L_ADDR]], align 8
+// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
+// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 8
+// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[L1]], ptr align 8 [[TMP2]], i64 8, i1 false)
+// CHECK3-NEXT: store ptr [[L1]], ptr [[_TMP2]], align 8
+// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[_TMP2]], align 8
+// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP3]], i32 0, i32 0
+// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
+// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[_TMP2]], align 8
+// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZZN1S3fooEvENKUlvE_clEv(ptr noundef nonnull align 8 dereferenceable(8) [[TMP5]]) #[[ATTR7]]
+// CHECK3-NEXT: ret void
+//
+//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l41
-// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[ARGC:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef [[D:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[ARGC:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef [[D:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i64, align 8
@@ -1178,7 +1283,7 @@ int main(int argc, char **argv) {
// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[_TMP2:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
+// CHECK3-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
// CHECK3-NEXT: [[_TMP4:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[B5:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[_TMP6:%.*]] = alloca ptr, align 8
@@ -1214,20 +1319,20 @@ int main(int argc, char **argv) {
// CHECK3-NEXT: store i32 [[TMP9]], ptr [[C7]], align 4
// CHECK3-NEXT: store ptr [[C7]], ptr [[_TMP8]], align 8
// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP4]], align 8
-// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP10]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP10]], i32 0, i32 0
// CHECK3-NEXT: store ptr [[ARGC_ADDR]], ptr [[TMP11]], align 8
-// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP10]], i32 0, i32 1
+// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP10]], i32 0, i32 1
// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[_TMP6]], align 8
// CHECK3-NEXT: store ptr [[TMP13]], ptr [[TMP12]], align 8
-// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP10]], i32 0, i32 2
+// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP10]], i32 0, i32 2
// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[_TMP8]], align 8
// CHECK3-NEXT: store ptr [[TMP15]], ptr [[TMP14]], align 8
-// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP10]], i32 0, i32 3
+// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP10]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[D_ADDR]], ptr [[TMP16]], align 8
-// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP10]], i32 0, i32 4
+// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP10]], i32 0, i32 4
// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP17]], align 8
// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[_TMP4]], align 8
-// CHECK3-NEXT: [[CALL:%.*]] = call noundef i64 @"_ZZ4mainENK3$_0clEv"(ptr noundef nonnull align 8 dereferenceable(40) [[TMP18]]) #[[ATTR7:[0-9]+]]
+// CHECK3-NEXT: [[CALL:%.*]] = call noundef i64 @"_ZZ4mainENK3$_0clEv"(ptr noundef nonnull align 8 dereferenceable(40) [[TMP18]]) #[[ATTR7]]
// CHECK3-NEXT: call void @__kmpc_target_deinit()
// CHECK3-NEXT: ret void
// CHECK3: worker.exit:
@@ -1235,7 +1340,7 @@ int main(int argc, char **argv) {
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43
-// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef [[D:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR3:[0-9]+]] {
+// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef [[D:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR3]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca ptr, align 8
@@ -1267,7 +1372,7 @@ int main(int argc, char **argv) {
// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP5]], -1
// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK3: user_code.entry:
-// CHECK3-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
+// CHECK3-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP]], align 8
// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[_TMP1]], align 8
// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[D_ADDR]], align 8
@@ -1292,7 +1397,7 @@ int main(int argc, char **argv) {
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l43_omp_outlined
-// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef [[D:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[ARGC:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[C:%.*]], ptr noundef [[D:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(40) [[L:%.*]]) #[[ATTR4]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -1305,7 +1410,7 @@ int main(int argc, char **argv) {
// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[_TMP2:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
+// CHECK3-NEXT: [[L3:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
// CHECK3-NEXT: [[_TMP4:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[ARGC5:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[B6:%.*]] = alloca i32, align 4
@@ -1345,128 +1450,23 @@ int main(int argc, char **argv) {
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP3]], align 4
// CHECK3-NEXT: store i32 [[TMP11]], ptr [[A10]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[_TMP4]], align 8
-// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP12]], i32 0, i32 0
+// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP12]], i32 0, i32 0
// CHECK3-NEXT: store ptr [[ARGC5]], ptr [[TMP13]], align 8
-// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP12]], i32 0, i32 1
+// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP12]], i32 0, i32 1
// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[_TMP7]], align 8
// CHECK3-NEXT: store ptr [[TMP15]], ptr [[TMP14]], align 8
-// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP12]], i32 0, i32 2
+// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP12]], i32 0, i32 2
// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[_TMP9]], align 8
// CHECK3-NEXT: store ptr [[TMP17]], ptr [[TMP16]], align 8
-// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP12]], i32 0, i32 3
+// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP12]], i32 0, i32 3
// CHECK3-NEXT: store ptr [[D_ADDR]], ptr [[TMP18]], align 8
-// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[TMP12]], i32 0, i32 4
+// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP12]], i32 0, i32 4
// CHECK3-NEXT: store ptr [[A10]], ptr [[TMP19]], align 8
// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[_TMP4]], align 8
// CHECK3-NEXT: [[CALL:%.*]] = call noundef i64 @"_ZZ4mainENK3$_0clEv"(ptr noundef nonnull align 8 dereferenceable(40) [[TMP20]]) #[[ATTR7]]
// CHECK3-NEXT: ret void
//
//
-// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27
-// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR0]] {
-// CHECK3-NEXT: entry:
-// CHECK3-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[L_ADDR:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
-// CHECK3-NEXT: [[_TMP2:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
-// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK3-NEXT: store ptr [[L]], ptr [[L_ADDR]], align 8
-// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[L_ADDR]], align 8
-// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
-// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l27_kernel_environment, ptr [[DYN_PTR]])
-// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
-// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
-// CHECK3: user_code.entry:
-// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8
-// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[L1]], ptr align 8 [[TMP3]], i64 8, i1 false)
-// CHECK3-NEXT: store ptr [[L1]], ptr [[_TMP2]], align 8
-// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[_TMP2]], align 8
-// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP4]], i32 0, i32 0
-// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8
-// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[_TMP2]], align 8
-// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZZN1S3fooEvENKUlvE_clEv(ptr noundef nonnull align 8 dereferenceable(8) [[TMP6]]) #[[ATTR7]]
-// CHECK3-NEXT: call void @__kmpc_target_deinit()
-// CHECK3-NEXT: ret void
-// CHECK3: worker.exit:
-// CHECK3-NEXT: ret void
-//
-//
-// CHECK3-LABEL: define {{[^@]+}}@_ZZN1S3fooEvENKUlvE_clEv
-// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 {
-// CHECK3-NEXT: entry:
-// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_1:%.*]], ptr [[THIS1]], i32 0, i32 0
-// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8
-// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[TMP1]], i32 0, i32 0
-// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
-// CHECK3-NEXT: ret i32 [[TMP2]]
-//
-//
-// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29
-// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR3]] {
-// CHECK3-NEXT: entry:
-// CHECK3-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[L_ADDR:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8
-// CHECK3-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
-// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK3-NEXT: store ptr [[L]], ptr [[L_ADDR]], align 8
-// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[L_ADDR]], align 8
-// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
-// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29_kernel_environment, ptr [[DYN_PTR]])
-// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
-// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
-// CHECK3: user_code.entry:
-// CHECK3-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
-// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 8
-// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
-// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8
-// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
-// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP6]], align 8
-// CHECK3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 2)
-// CHECK3-NEXT: call void @__kmpc_target_deinit()
-// CHECK3-NEXT: ret void
-// CHECK3: worker.exit:
-// CHECK3-NEXT: ret void
-//
-//
-// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l29_omp_outlined
-// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[L:%.*]]) #[[ATTR4]] {
-// CHECK3-NEXT: entry:
-// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[L_ADDR:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[L1:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
-// CHECK3-NEXT: [[_TMP2:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
-// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
-// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK3-NEXT: store ptr [[L]], ptr [[L_ADDR]], align 8
-// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[L_ADDR]], align 8
-// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8
-// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP]], align 8
-// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[L1]], ptr align 8 [[TMP2]], i64 8, i1 false)
-// CHECK3-NEXT: store ptr [[L1]], ptr [[_TMP2]], align 8
-// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[_TMP2]], align 8
-// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], ptr [[TMP3]], i32 0, i32 0
-// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
-// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[_TMP2]], align 8
-// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZZN1S3fooEvENKUlvE_clEv(ptr noundef nonnull align 8 dereferenceable(8) [[TMP5]]) #[[ATTR7]]
-// CHECK3-NEXT: ret void
-//
-//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooIZN1S3fooEvEUlvE_EiRKT__l18
// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[T:%.*]]) #[[ATTR3]] {
// CHECK3-NEXT: entry:
@@ -1500,7 +1500,7 @@ int main(int argc, char **argv) {
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[T_ADDR:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8
-// CHECK3-NEXT: [[T1:%.*]] = alloca [[CLASS_ANON_1:%.*]], align 8
+// CHECK3-NEXT: [[T1:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
// CHECK3-NEXT: [[_TMP2:%.*]] = alloca ptr, align 8
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
diff --git a/clang/test/OpenMP/reduction_implicit_map.cpp b/clang/test/OpenMP/reduction_implicit_map.cpp
index 0f67cdc56ddc..765e90bcba85 100644
--- a/clang/test/OpenMP/reduction_implicit_map.cpp
+++ b/clang/test/OpenMP/reduction_implicit_map.cpp
@@ -47,7 +47,7 @@ int bar() {
S2 o[5];
//warnig "copyable and not guaranteed to be mapped correctly" and
//implicit map generated.
-#pragma omp target parallel reduction(+:o[0]) //expected-warning {{Type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp target parallel reduction(+:o[0]) //expected-warning {{type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 10; i++);
double b[10][10][10];
//no error no implicit map generated, the map for b is generated but not
diff --git a/clang/test/OpenMP/remarks_parallel_in_multiple_target_state_machines.c b/clang/test/OpenMP/remarks_parallel_in_multiple_target_state_machines.c
index 2f829d2ad094..1afedc6683f8 100644
--- a/clang/test/OpenMP/remarks_parallel_in_multiple_target_state_machines.c
+++ b/clang/test/OpenMP/remarks_parallel_in_multiple_target_state_machines.c
@@ -4,7 +4,7 @@
// host-no-diagnostics
-void baz(void) __attribute__((assume("omp_no_openmp")));
+[[omp::assume("omp_no_openmp")]] void baz(void);
void bar1(void) {
#pragma omp parallel // #0
@@ -24,7 +24,7 @@ void foo1(void) {
// all-remark@#2 {{Rewriting generic-mode kernel with a customized state machine. [OMP131]}}
{
- baz(); // all-remark {{Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override. [OMP121]}}
+ baz(); // all-remark {{Value has potential side effects preventing SPMD-mode execution. Add `[[omp::assume("ompx_spmd_amenable")]]` to the called function to override. [OMP121]}}
#pragma omp parallel // #3
{
}
@@ -39,7 +39,7 @@ void foo2(void) {
#pragma omp target teams // #5
// all-remark@#5 {{Rewriting generic-mode kernel with a customized state machine. [OMP131]}}
{
- baz(); // all-remark {{Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override. [OMP121]}}
+ baz(); // all-remark {{Value has potential side effects preventing SPMD-mode execution. Add `[[omp::assume("ompx_spmd_amenable")]]` to the called function to override. [OMP121]}}
#pragma omp parallel // #6
{
}
@@ -57,7 +57,7 @@ void foo3(void) {
#pragma omp target teams // #8
// all-remark@#8 {{Rewriting generic-mode kernel with a customized state machine. [OMP131]}}
{
- baz(); // all-remark {{Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override. [OMP121]}}
+ baz(); // all-remark {{Value has potential side effects preventing SPMD-mode execution. Add `[[omp::assume("ompx_spmd_amenable")]]` to the called function to override. [OMP121]}}
#pragma omp parallel // #9
{
}
diff --git a/clang/test/OpenMP/remarks_parallel_in_target_state_machine.c b/clang/test/OpenMP/remarks_parallel_in_target_state_machine.c
index c48a4b966077..5ce8f1fa4046 100644
--- a/clang/test/OpenMP/remarks_parallel_in_target_state_machine.c
+++ b/clang/test/OpenMP/remarks_parallel_in_target_state_machine.c
@@ -3,7 +3,7 @@
// host-no-diagnostics
-void baz(void) __attribute__((assume("omp_no_openmp")));
+[[omp::assume("omp_no_openmp")]] void baz(void);
void bar(void) {
#pragma omp parallel // #1 \
@@ -16,7 +16,7 @@ void foo(void) {
#pragma omp target teams // #2
// expected-remark@#2 {{Rewriting generic-mode kernel with a customized state machine. [OMP131]}}
{
- baz(); // expected-remark {{Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override. [OMP121]}}
+ baz(); // expected-remark {{Value has potential side effects preventing SPMD-mode execution. Add `[[omp::assume("ompx_spmd_amenable")]]` to the called function to override. [OMP121]}}
#pragma omp parallel
{
}
diff --git a/clang/test/OpenMP/requires_default_atomic_mem_order_messages.cpp b/clang/test/OpenMP/requires_default_atomic_mem_order_messages.cpp
index 19f6ede043d8..5160fbbfb4a7 100644
--- a/clang/test/OpenMP/requires_default_atomic_mem_order_messages.cpp
+++ b/clang/test/OpenMP/requires_default_atomic_mem_order_messages.cpp
@@ -7,6 +7,6 @@ void foo2() {
}
#pragma omp requires atomic_default_mem_order(seq_cst) // expected-error {{'atomic' region encountered before requires directive with 'atomic_default_mem_order' clause}} expected-note 2 {{atomic_default_mem_order clause previously used here}}
-#pragma omp requires atomic_default_mem_order(acq_rel) // expected-error {{'atomic' region encountered before requires directive with 'atomic_default_mem_order' clause}} expected-error {{Only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
-#pragma omp requires atomic_default_mem_order(relaxed) // expected-error {{'atomic' region encountered before requires directive with 'atomic_default_mem_order' clause}} expected-error {{Only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
+#pragma omp requires atomic_default_mem_order(acq_rel) // expected-error {{'atomic' region encountered before requires directive with 'atomic_default_mem_order' clause}} expected-error {{only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
+#pragma omp requires atomic_default_mem_order(relaxed) // expected-error {{'atomic' region encountered before requires directive with 'atomic_default_mem_order' clause}} expected-error {{only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
#pragma omp requires atomic_default_mem_order(release) // expected-error {{expected 'seq_cst', 'acq_rel' or 'relaxed' in OpenMP clause 'atomic_default_mem_order'}} expected-error {{expected at least one clause on '#pragma omp requires' directive}}
diff --git a/clang/test/OpenMP/requires_messages.cpp b/clang/test/OpenMP/requires_messages.cpp
index 10d311631b10..dbb2b317067b 100644
--- a/clang/test/OpenMP/requires_messages.cpp
+++ b/clang/test/OpenMP/requires_messages.cpp
@@ -6,39 +6,39 @@ int a;
#pragma omp requires unified_shared_memory // rev-note {{unified_shared_memory clause previously used here}} expected-note{{unified_shared_memory clause previously used here}}
-#pragma omp requires unified_shared_memory, unified_shared_memory // expected-error {{Only one unified_shared_memory clause can appear on a requires directive in a single translation unit}} expected-error {{directive '#pragma omp requires' cannot contain more than one 'unified_shared_memory' clause}}
+#pragma omp requires unified_shared_memory, unified_shared_memory // expected-error {{only one unified_shared_memory clause can appear on a requires directive in a single translation unit}} expected-error {{directive '#pragma omp requires' cannot contain more than one 'unified_shared_memory' clause}}
-#pragma omp requires unified_address // expected-error {{Only one unified_address clause can appear on a requires directive in a single translation unit}}
+#pragma omp requires unified_address // expected-error {{only one unified_address clause can appear on a requires directive in a single translation unit}}
-#pragma omp requires unified_address, unified_address // expected-error {{Only one unified_address clause can appear on a requires directive in a single translation unit}} expected-error {{directive '#pragma omp requires' cannot contain more than one 'unified_address' clause}}
+#pragma omp requires unified_address, unified_address // expected-error {{only one unified_address clause can appear on a requires directive in a single translation unit}} expected-error {{directive '#pragma omp requires' cannot contain more than one 'unified_address' clause}}
#ifdef OMP99
#pragma omp requires reverse_offload // rev-note {{reverse_offload clause previously used here}} rev-note {{reverse_offload clause previously used here}}
-#pragma omp requires reverse_offload, reverse_offload // rev-error {{Only one reverse_offload clause can appear on a requires directive in a single translation unit}} rev-error {{directive '#pragma omp requires' cannot contain more than one 'reverse_offload' clause}}
+#pragma omp requires reverse_offload, reverse_offload // rev-error {{only one reverse_offload clause can appear on a requires directive in a single translation unit}} rev-error {{directive '#pragma omp requires' cannot contain more than one 'reverse_offload' clause}}
#endif
#pragma omp requires dynamic_allocators // rev-note {{dynamic_allocators clause previously used here}} expected-note {{dynamic_allocators clause previously used here}}
-#pragma omp requires dynamic_allocators, dynamic_allocators // expected-error {{Only one dynamic_allocators clause can appear on a requires directive in a single translation unit}} expected-error {{directive '#pragma omp requires' cannot contain more than one 'dynamic_allocators' clause}}
+#pragma omp requires dynamic_allocators, dynamic_allocators // expected-error {{only one dynamic_allocators clause can appear on a requires directive in a single translation unit}} expected-error {{directive '#pragma omp requires' cannot contain more than one 'dynamic_allocators' clause}}
#pragma omp requires atomic_default_mem_order(seq_cst) // rev-note {{atomic_default_mem_order clause previously used here}} expected-note {{atomic_default_mem_order clause previously used here}} expected-note {{atomic_default_mem_order clause previously used here}} expected-note {{atomic_default_mem_order clause previously used here}} expected-note {{atomic_default_mem_order clause previously used here}}
-#pragma omp requires atomic_default_mem_order(acq_rel) // expected-error {{Only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
+#pragma omp requires atomic_default_mem_order(acq_rel) // expected-error {{only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
-#pragma omp requires atomic_default_mem_order(relaxed) // expected-error {{Only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
+#pragma omp requires atomic_default_mem_order(relaxed) // expected-error {{only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
#pragma omp requires atomic_default_mem_order // expected-error {{expected '(' after 'atomic_default_mem_order'}} expected-error {{expected at least one clause on '#pragma omp requires' directive}}
#pragma omp requires atomic_default_mem_order( // expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{expected 'seq_cst', 'acq_rel' or 'relaxed' in OpenMP clause 'atomic_default_mem_order'}} expected-error {{expected at least one clause on '#pragma omp requires' directive}}
-#pragma omp requires atomic_default_mem_order(seq_cst // expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{Only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
+#pragma omp requires atomic_default_mem_order(seq_cst // expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
#pragma omp requires atomic_default_mem_order(invalid_modifier) // expected-error {{expected 'seq_cst', 'acq_rel' or 'relaxed' in OpenMP clause 'atomic_default_mem_order'}} expected-error {{expected at least one clause on '#pragma omp requires' directive}}
#pragma omp requires atomic_default_mem_order(shared) // expected-error {{expected 'seq_cst', 'acq_rel' or 'relaxed' in OpenMP clause 'atomic_default_mem_order'}} expected-error {{expected at least one clause on '#pragma omp requires' directive}}
-#pragma omp requires atomic_default_mem_order(acq_rel), atomic_default_mem_order(relaxed) // expected-error {{directive '#pragma omp requires' cannot contain more than one 'atomic_default_mem_order' claus}} expected-error {{Only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
+#pragma omp requires atomic_default_mem_order(acq_rel), atomic_default_mem_order(relaxed) // expected-error {{directive '#pragma omp requires' cannot contain more than one 'atomic_default_mem_order' claus}} expected-error {{only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
#pragma omp requires // expected-error {{expected at least one clause on '#pragma omp requires' directive}}
@@ -46,18 +46,18 @@ int a;
#pragma omp requires nowait // expected-error {{unexpected OpenMP clause 'nowait' in directive '#pragma omp requires'}} expected-error {{expected at least one clause on '#pragma omp requires' directive}}
-#pragma omp requires unified_address, invalid_clause // expected-warning {{extra tokens at the end of '#pragma omp requires' are ignored}} expected-error {{Only one unified_address clause can appear on a requires directive in a single translation unit}}
+#pragma omp requires unified_address, invalid_clause // expected-warning {{extra tokens at the end of '#pragma omp requires' are ignored}} expected-error {{only one unified_address clause can appear on a requires directive in a single translation unit}}
#pragma omp requires invalid_clause unified_address // expected-warning {{extra tokens at the end of '#pragma omp requires' are ignored}} expected-error {{expected at least one clause on '#pragma omp requires' directive}}
#ifdef OMP99
-#pragma omp requires unified_shared_memory, unified_address, reverse_offload, dynamic_allocators, atomic_default_mem_order(seq_cst) // rev-error {{Only one unified_shared_memory clause can appear on a requires directive in a single translation unit}} rev-error{{Only one unified_address clause can appear on a requires directive in a single translation unit}} rev-error{{Only one reverse_offload clause can appear on a requires directive in a single translation unit}} rev-error{{Only one dynamic_allocators clause can appear on a requires directive in a single translation unit}} rev-error {{Only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
+#pragma omp requires unified_shared_memory, unified_address, reverse_offload, dynamic_allocators, atomic_default_mem_order(seq_cst) // rev-error {{only one unified_shared_memory clause can appear on a requires directive in a single translation unit}} rev-error{{only one unified_address clause can appear on a requires directive in a single translation unit}} rev-error{{only one reverse_offload clause can appear on a requires directive in a single translation unit}} rev-error{{only one dynamic_allocators clause can appear on a requires directive in a single translation unit}} rev-error {{only one atomic_default_mem_order clause can appear on a requires directive in a single translation unit}}
#endif
namespace A {
- #pragma omp requires unified_address // expected-error {{Only one unified_address clause can appear on a requires directive in a single translation unit}}
+ #pragma omp requires unified_address // expected-error {{only one unified_address clause can appear on a requires directive in a single translation unit}}
namespace B {
- #pragma omp requires unified_address // expected-error {{Only one unified_address clause can appear on a requires directive in a single translation unit}}
+ #pragma omp requires unified_address // expected-error {{only one unified_address clause can appear on a requires directive in a single translation unit}}
}
}
diff --git a/clang/test/OpenMP/target_device_ancestor_messages.cpp b/clang/test/OpenMP/target_device_ancestor_messages.cpp
index bc1d668d1914..e6705b369c70 100644
--- a/clang/test/OpenMP/target_device_ancestor_messages.cpp
+++ b/clang/test/OpenMP/target_device_ancestor_messages.cpp
@@ -2,6 +2,6 @@
// RUN: %clang_cc1 -triple=x86_64 -verify -fopenmp-simd -fopenmp-targets=x86_64 -x c++ -fexceptions -fcxx-exceptions %s
void bar() {
-#pragma omp target device(ancestor : 1) // expected-error {{Device clause with ancestor device-modifier used without specifying 'requires reverse_offload'}}
+#pragma omp target device(ancestor : 1) // expected-error {{device clause with ancestor device-modifier used without specifying 'requires reverse_offload'}}
;
}
diff --git a/clang/test/OpenMP/target_firstprivate_messages.cpp b/clang/test/OpenMP/target_firstprivate_messages.cpp
index 9b211297f531..2eafb367c0c4 100644
--- a/clang/test/OpenMP/target_firstprivate_messages.cpp
+++ b/clang/test/OpenMP/target_firstprivate_messages.cpp
@@ -56,7 +56,7 @@ public:
S5(int v) : a(v) {}
S5 &operator=(S5 &s) {
#pragma omp target firstprivate(a) firstprivate(this->a) firstprivate(s.a) // expected-error {{expected variable name or data member of current class}}
- for (int k = 0; k < s.a; ++k) // expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (int k = 0; k < s.a; ++k) // expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
++s.a;
return *this;
}
diff --git a/clang/test/OpenMP/target_map_messages.cpp b/clang/test/OpenMP/target_map_messages.cpp
index 3bd432b47e63..10f46687d637 100644
--- a/clang/test/OpenMP/target_map_messages.cpp
+++ b/clang/test/OpenMP/target_map_messages.cpp
@@ -681,13 +681,13 @@ T tmain(T argc) {
#pragma omp target data map(tofrom: argc > 0 ? x : y) // lt50-error 2 {{expected expression containing only member accesses and/or array sections based on named variables}} ge50-error 2 {{expected addressable lvalue in 'map' clause}}
#pragma omp target data map(argc)
#pragma omp target data map(S1) // expected-error {{'S1' does not refer to a value}}
-#pragma omp target data map(a, b, c, d, f) // expected-error {{incomplete type 'S1' where a complete type is required}} warn-warning 2 {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} warn-warning 2 {{Type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
-#pragma omp target data map(ba) // warn-warning 2 {{Type 'const S2 [5]' is not trivially copyable and not guaranteed to be mapped correctly}}
-#pragma omp target data map(ca) // warn-warning 2 {{Type 'const S3 [5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp target data map(a, b, c, d, f) // expected-error {{incomplete type 'S1' where a complete type is required}} warn-warning 2 {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} warn-warning 2 {{type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp target data map(ba) // warn-warning 2 {{type 'const S2 [5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp target data map(ca) // warn-warning 2 {{type 'const S3 [5]' is not trivially copyable and not guaranteed to be mapped correctly}}
#pragma omp target data map(da)
#pragma omp target data map(S2::S2s)
#pragma omp target data map(S2::S2sc)
-#pragma omp target data map(e, g) // warn-warning 2 {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} warn-warning 2 {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp target data map(e, g) // warn-warning 2 {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} warn-warning 2 {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
#pragma omp target data map(h) // expected-error {{threadprivate variables are not allowed in 'map' clause}}
#pragma omp target data map(k) map(k) // lt50-error 2 {{variable already marked as mapped in current construct}} lt50-note 2 {{used here}}
#pragma omp target map(k), map(k[:5]) // lt50-error 2 {{pointer cannot be mapped along with a section derived from itself}} lt50-note 2 {{used here}}
@@ -815,14 +815,14 @@ int main(int argc, char **argv) {
#pragma omp target data map(tofrom: argc > 0 ? argv[1] : argv[2]) // lt50-error {{expected expression containing only member accesses and/or array sections based on named variables}} ge50-error {{expected addressable lvalue in 'map' clause}}
#pragma omp target data map(argc)
#pragma omp target data map(S1) // expected-error {{'S1' does not refer to a value}}
-#pragma omp target data map(a, b, c, d, f) // expected-error {{incomplete type 'S1' where a complete type is required}} warn-warning {{Type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} warn-warning {{Type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp target data map(a, b, c, d, f) // expected-error {{incomplete type 'S1' where a complete type is required}} warn-warning {{type 'const S2' is not trivially copyable and not guaranteed to be mapped correctly}} warn-warning {{type 'const S3' is not trivially copyable and not guaranteed to be mapped correctly}}
#pragma omp target data map(argv[1])
-#pragma omp target data map(ba) // warn-warning {{Type 'const S2 [5]' is not trivially copyable and not guaranteed to be mapped correctly}}
-#pragma omp target data map(ca) // warn-warning {{Type 'const S3 [5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp target data map(ba) // warn-warning {{type 'const S2 [5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp target data map(ca) // warn-warning {{type 'const S3 [5]' is not trivially copyable and not guaranteed to be mapped correctly}}
#pragma omp target data map(da)
#pragma omp target data map(S2::S2s)
#pragma omp target data map(S2::S2sc)
-#pragma omp target data map(e, g) // warn-warning {{Type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} warn-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp target data map(e, g) // warn-warning {{type 'S4' is not trivially copyable and not guaranteed to be mapped correctly}} warn-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
#pragma omp target data map(h) // expected-error {{threadprivate variables are not allowed in 'map' clause}}
#pragma omp target data map(k), map(k) // lt50-error {{variable already marked as mapped in current construct}} lt50-note {{used here}}
#pragma omp target map(k), map(k[:5]) // lt50-error {{pointer cannot be mapped along with a section derived from itself}} lt50-note {{used here}}
@@ -872,7 +872,7 @@ int main(int argc, char **argv) {
{}
#pragma omp target firstprivate(j) map(j) // expected-error {{firstprivate variable cannot be in a map clause in '#pragma omp target' directive}} expected-note {{defined as firstprivate}}
{}
-#pragma omp target map(m) // warn-warning {{Type 'S6<int>' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp target map(m) // warn-warning {{type 'S6<int>' is not trivially copyable and not guaranteed to be mapped correctly}}
{}
#pragma omp target
{ s.a++; }
@@ -920,7 +920,7 @@ int main(int argc, char **argv) {
{ s.a++; }
#pragma omp target map(s.s.s.b[:2])
{ s.s.s.b[0]++; }
-#pragma omp target map(s8[0:1], s9) // warn-warning {{Type 'class S8' is not trivially copyable and not guaranteed to be mapped correctly}} warn-warning {{Type 'class S9' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp target map(s8[0:1], s9) // warn-warning {{type 'class S8' is not trivially copyable and not guaranteed to be mapped correctly}} warn-warning {{type 'class S9' is not trivially copyable and not guaranteed to be mapped correctly}}
{}
int **BB, *offset, *a;
diff --git a/clang/test/OpenMP/target_parallel_for_private_messages.cpp b/clang/test/OpenMP/target_parallel_for_private_messages.cpp
index 1c31badf51cd..81b4be4923d7 100644
--- a/clang/test/OpenMP/target_parallel_for_private_messages.cpp
+++ b/clang/test/OpenMP/target_parallel_for_private_messages.cpp
@@ -56,7 +56,7 @@ public:
S5(int v) : a(v) {}
S5 &operator=(S5 &s) {
#pragma omp target parallel for private(a) private(this->a) private(s.a) // expected-error {{expected variable name or data member of current class}}
- for (int k = 0; k < s.a; ++k) // expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (int k = 0; k < s.a; ++k) // expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
++s.a;
return *this;
}
diff --git a/clang/test/OpenMP/target_parallel_for_simd_private_messages.cpp b/clang/test/OpenMP/target_parallel_for_simd_private_messages.cpp
index db9d495698b0..c9b5bac0e693 100644
--- a/clang/test/OpenMP/target_parallel_for_simd_private_messages.cpp
+++ b/clang/test/OpenMP/target_parallel_for_simd_private_messages.cpp
@@ -56,7 +56,7 @@ public:
S5(int v) : a(v) {}
S5 &operator=(S5 &s) {
#pragma omp target parallel for simd private(a) private(this->a) private(s.a) // expected-error {{expected variable name or data member of current class}}
- for (int k = 0; k < s.a; ++k) // expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (int k = 0; k < s.a; ++k) // expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
++s.a;
return *this;
}
diff --git a/clang/test/OpenMP/target_private_messages.cpp b/clang/test/OpenMP/target_private_messages.cpp
index 7ee0c8cffb9c..8cdd3a11e87a 100644
--- a/clang/test/OpenMP/target_private_messages.cpp
+++ b/clang/test/OpenMP/target_private_messages.cpp
@@ -50,7 +50,7 @@ public:
S5(int v) : a(v) {}
S5 &operator=(S5 &s) {
#pragma omp target private(a) private(this->a) private(s.a) // expected-error {{expected variable name or data member of current class}}
- for (int k = 0; k < s.a; ++k) // expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (int k = 0; k < s.a; ++k) // expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
++s.a;
return *this;
}
diff --git a/clang/test/OpenMP/target_simd_private_messages.cpp b/clang/test/OpenMP/target_simd_private_messages.cpp
index 4a55a506d4ab..f6e4e714f8ff 100644
--- a/clang/test/OpenMP/target_simd_private_messages.cpp
+++ b/clang/test/OpenMP/target_simd_private_messages.cpp
@@ -56,7 +56,7 @@ public:
S5(int v) : a(v) {}
S5 &operator=(S5 &s) {
#pragma omp target simd private(a) private(this->a) private(s.a) // expected-error {{expected variable name or data member of current class}}
- for (int k = 0; k < s.a; ++k) // expected-warning {{Type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (int k = 0; k < s.a; ++k) // expected-warning {{type 'S5' is not trivially copyable and not guaranteed to be mapped correctly}}
++s.a;
return *this;
}
diff --git a/clang/test/OpenMP/target_teams_distribute_firstprivate_messages.cpp b/clang/test/OpenMP/target_teams_distribute_firstprivate_messages.cpp
index fccf5515998d..195af52b7892 100644
--- a/clang/test/OpenMP/target_teams_distribute_firstprivate_messages.cpp
+++ b/clang/test/OpenMP/target_teams_distribute_firstprivate_messages.cpp
@@ -119,7 +119,7 @@ int main(int argc, char **argv) {
for (i = 0; i < argc; ++i) foo();
#pragma omp target
-#pragma omp teams distribute firstprivate(ca) // expected-error {{no matching constructor for initialization of 'S3'}} expected-warning {{Type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp teams distribute firstprivate(ca) // expected-error {{no matching constructor for initialization of 'S3'}} expected-warning {{type 'const S3[5]' is not trivially copyable and not guaranteed to be mapped correctly}}
for (i = 0; i < argc; ++i) foo();
#pragma omp target teams distribute firstprivate(da, z)
diff --git a/clang/test/OpenMP/target_update_messages.cpp b/clang/test/OpenMP/target_update_messages.cpp
index 2bf0ade9fe91..83191059202c 100644
--- a/clang/test/OpenMP/target_update_messages.cpp
+++ b/clang/test/OpenMP/target_update_messages.cpp
@@ -18,14 +18,14 @@ static int y;
#pragma omp declare target(y)
void yyy() {
-#pragma omp target update to(y) // expected-error {{the host cannot update a declare target variable that is not externally visible.}}
+#pragma omp target update to(y) // expected-error {{the host cannot update a declare target variable that is not externally visible}}
}
int __attribute__((visibility("hidden"))) z;
#pragma omp declare target(z)
void zzz() {
-#pragma omp target update from(z) // expected-error {{the host cannot update a declare target variable that is not externally visible.}}
+#pragma omp target update from(z) // expected-error {{the host cannot update a declare target variable that is not externally visible}}
}
void foo() {
diff --git a/clang/test/OpenMP/teams_distribute_loop_messages.cpp b/clang/test/OpenMP/teams_distribute_loop_messages.cpp
index 167f653e2cd7..e5f146679e5f 100644
--- a/clang/test/OpenMP/teams_distribute_loop_messages.cpp
+++ b/clang/test/OpenMP/teams_distribute_loop_messages.cpp
@@ -416,7 +416,7 @@ int test_with_random_access_iterator() {
Iter0 begin0, end0;
#pragma omp target
#pragma omp teams distribute
- for (GoodIter I = begin; I < end; ++I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I < end; ++I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute
@@ -425,31 +425,31 @@ int test_with_random_access_iterator() {
++I;
#pragma omp target
#pragma omp teams distribute
- for (GoodIter I = begin; I >= end; --I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I >= end; --I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(begin); I < end; ++I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(begin); I < end; ++I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(nullptr); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(nullptr); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(0); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(0); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(1, 2); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(1, 2); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute
- for (begin = GoodIter(0); begin < end; ++begin) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (begin = GoodIter(0); begin < end; ++begin) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++begin;
#pragma omp target
#pragma omp teams distribute
@@ -464,7 +464,7 @@ int test_with_random_access_iterator() {
++begin;
#pragma omp target
#pragma omp teams distribute
- for (begin = end; begin < end; ++begin) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (begin = end; begin < end; ++begin) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++begin;
#pragma omp target
#pragma omp teams distribute
@@ -489,7 +489,7 @@ int test_with_random_access_iterator() {
++I;
#pragma omp target
#pragma omp teams distribute
- for (GoodIter I = begin; I >= end; I = I - 1) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I >= end; I = I - 1) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute
@@ -551,19 +551,19 @@ public:
#pragma omp teams distribute
// expected-note@+2 {{loop step is expected to be positive due to this condition}}
// expected-error@+1 {{increment expression must cause 'I' to increase on each iteration of OpenMP for loop}}
- for (IT I = begin; I < end; I = I + ST) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; I = I + ST) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
#pragma omp target
#pragma omp teams distribute
// expected-note@+2 {{loop step is expected to be positive due to this condition}}
// expected-error@+1 {{increment expression must cause 'I' to increase on each iteration of OpenMP for loop}}
- for (IT I = begin; I <= end; I += ST) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I <= end; I += ST) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
#pragma omp target
#pragma omp teams distribute
- for (IT I = begin; I < end; ++I) { // expected-warning 4 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; ++I) { // expected-warning 4 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
}
@@ -599,7 +599,7 @@ int dotest_gt(IT begin, IT end) {
#pragma omp target
#pragma omp teams distribute
- for (IT I = begin; I < end; I += TC<int, ST>::step()) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; I += TC<int, ST>::step()) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
}
@@ -702,7 +702,7 @@ void test_loop_firstprivate_lastprivate() {
S s(4);
// expected-error@+2 {{lastprivate variable cannot be firstprivate}} expected-note@+2 {{defined as lastprivate}}
#pragma omp target
-#pragma omp teams distribute lastprivate(s) firstprivate(s) // expected-error {{calling a private constructor of class 'S'}} expected-warning {{Type 'S' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp teams distribute lastprivate(s) firstprivate(s) // expected-error {{calling a private constructor of class 'S'}} expected-warning {{type 'S' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 16; ++i)
;
}
diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_loop_messages.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_loop_messages.cpp
index cdfc5eaec228..67e3ce4dc157 100644
--- a/clang/test/OpenMP/teams_distribute_parallel_for_loop_messages.cpp
+++ b/clang/test/OpenMP/teams_distribute_parallel_for_loop_messages.cpp
@@ -414,7 +414,7 @@ int test_with_random_access_iterator() {
Iter0 begin0, end0;
#pragma omp target
#pragma omp teams distribute parallel for
- for (GoodIter I = begin; I < end; ++I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I < end; ++I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for
@@ -423,31 +423,31 @@ int test_with_random_access_iterator() {
++I;
#pragma omp target
#pragma omp teams distribute parallel for
- for (GoodIter I = begin; I >= end; --I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I >= end; --I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(begin); I < end; ++I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(begin); I < end; ++I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(nullptr); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(nullptr); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(0); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(0); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(1, 2); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(1, 2); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for
- for (begin = GoodIter(0); begin < end; ++begin) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (begin = GoodIter(0); begin < end; ++begin) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++begin;
#pragma omp target
#pragma omp teams distribute parallel for
@@ -462,7 +462,7 @@ int test_with_random_access_iterator() {
++begin;
#pragma omp target
#pragma omp teams distribute parallel for
- for (begin = end; begin < end; ++begin) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (begin = end; begin < end; ++begin) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++begin;
#pragma omp target
#pragma omp teams distribute parallel for
@@ -487,7 +487,7 @@ int test_with_random_access_iterator() {
++I;
#pragma omp target
#pragma omp teams distribute parallel for
- for (GoodIter I = begin; I >= end; I = I - 1) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I >= end; I = I - 1) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for
@@ -549,19 +549,19 @@ public:
#pragma omp teams distribute parallel for
// expected-note@+2 {{loop step is expected to be positive due to this condition}}
// expected-error@+1 {{increment expression must cause 'I' to increase on each iteration of OpenMP for loop}}
- for (IT I = begin; I < end; I = I + ST) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; I = I + ST) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
#pragma omp target
#pragma omp teams distribute parallel for
// expected-note@+2 {{loop step is expected to be positive due to this condition}}
// expected-error@+1 {{increment expression must cause 'I' to increase on each iteration of OpenMP for loop}}
- for (IT I = begin; I <= end; I += ST) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I <= end; I += ST) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
#pragma omp target
#pragma omp teams distribute parallel for
- for (IT I = begin; I < end; ++I) { // expected-warning 4 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; ++I) { // expected-warning 4 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
}
@@ -597,7 +597,7 @@ int dotest_gt(IT begin, IT end) {
#pragma omp target
#pragma omp teams distribute parallel for
- for (IT I = begin; I < end; I += TC<int, ST>::step()) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; I += TC<int, ST>::step()) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
}
@@ -697,7 +697,7 @@ void test_loop_firstprivate_lastprivate() {
S s(4);
// expected-error@+2 {{lastprivate variable cannot be firstprivate}} expected-note@+2 {{defined as lastprivate}}
#pragma omp target
-#pragma omp teams distribute parallel for lastprivate(s) firstprivate(s) // expected-error {{calling a private constructor of class 'S'}} expected-warning {{Type 'S' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp teams distribute parallel for lastprivate(s) firstprivate(s) // expected-error {{calling a private constructor of class 'S'}} expected-warning {{type 'S' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 16; ++i)
;
}
diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_simd_loop_messages.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_simd_loop_messages.cpp
index 645035a3a163..7ee8b9c9d367 100644
--- a/clang/test/OpenMP/teams_distribute_parallel_for_simd_loop_messages.cpp
+++ b/clang/test/OpenMP/teams_distribute_parallel_for_simd_loop_messages.cpp
@@ -416,7 +416,7 @@ int test_with_random_access_iterator() {
Iter0 begin0, end0;
#pragma omp target
#pragma omp teams distribute parallel for simd
- for (GoodIter I = begin; I < end; ++I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I < end; ++I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for simd
@@ -425,31 +425,31 @@ int test_with_random_access_iterator() {
++I;
#pragma omp target
#pragma omp teams distribute parallel for simd
- for (GoodIter I = begin; I >= end; --I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I >= end; --I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for simd
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(begin); I < end; ++I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(begin); I < end; ++I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for simd
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(nullptr); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(nullptr); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for simd
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(0); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(0); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for simd
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(1, 2); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(1, 2); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for simd
- for (begin = GoodIter(0); begin < end; ++begin) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (begin = GoodIter(0); begin < end; ++begin) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++begin;
#pragma omp target
#pragma omp teams distribute parallel for simd
@@ -464,7 +464,7 @@ int test_with_random_access_iterator() {
++begin;
#pragma omp target
#pragma omp teams distribute parallel for simd
- for (begin = end; begin < end; ++begin) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (begin = end; begin < end; ++begin) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++begin;
#pragma omp target
#pragma omp teams distribute parallel for simd
@@ -489,7 +489,7 @@ int test_with_random_access_iterator() {
++I;
#pragma omp target
#pragma omp teams distribute parallel for simd
- for (GoodIter I = begin; I >= end; I = I - 1) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I >= end; I = I - 1) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute parallel for simd
@@ -551,19 +551,19 @@ public:
#pragma omp teams distribute parallel for simd
// expected-note@+2 {{loop step is expected to be positive due to this condition}}
// expected-error@+1 {{increment expression must cause 'I' to increase on each iteration of OpenMP for loop}}
- for (IT I = begin; I < end; I = I + ST) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; I = I + ST) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
#pragma omp target
#pragma omp teams distribute parallel for simd
// expected-note@+2 {{loop step is expected to be positive due to this condition}}
// expected-error@+1 {{increment expression must cause 'I' to increase on each iteration of OpenMP for loop}}
- for (IT I = begin; I <= end; I += ST) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I <= end; I += ST) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
#pragma omp target
#pragma omp teams distribute parallel for simd
- for (IT I = begin; I < end; ++I) { // expected-warning 4 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; ++I) { // expected-warning 4 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
}
@@ -599,7 +599,7 @@ int dotest_gt(IT begin, IT end) {
#pragma omp target
#pragma omp teams distribute parallel for simd
- for (IT I = begin; I < end; I += TC<int, ST>::step()) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; I += TC<int, ST>::step()) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
}
@@ -699,7 +699,7 @@ void test_loop_firstprivate_lastprivate() {
S s(4);
// expected-error@+2 {{lastprivate variable cannot be firstprivate}} expected-note@+2 {{defined as lastprivate}}
#pragma omp target
-#pragma omp teams distribute parallel for simd lastprivate(s) firstprivate(s) // expected-error {{calling a private constructor of class 'S'}} expected-warning {{Type 'S' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp teams distribute parallel for simd lastprivate(s) firstprivate(s) // expected-error {{calling a private constructor of class 'S'}} expected-warning {{type 'S' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 16; ++i)
;
}
diff --git a/clang/test/OpenMP/teams_distribute_simd_loop_messages.cpp b/clang/test/OpenMP/teams_distribute_simd_loop_messages.cpp
index 13eef6a98b3d..8bfddbf6e9ee 100644
--- a/clang/test/OpenMP/teams_distribute_simd_loop_messages.cpp
+++ b/clang/test/OpenMP/teams_distribute_simd_loop_messages.cpp
@@ -416,7 +416,7 @@ int test_with_random_access_iterator() {
Iter0 begin0, end0;
#pragma omp target
#pragma omp teams distribute simd
- for (GoodIter I = begin; I < end; ++I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I < end; ++I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute simd
@@ -425,31 +425,31 @@ int test_with_random_access_iterator() {
++I;
#pragma omp target
#pragma omp teams distribute simd
- for (GoodIter I = begin; I >= end; --I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I >= end; --I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute simd
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(begin); I < end; ++I) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(begin); I < end; ++I) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute simd
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(nullptr); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(nullptr); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute simd
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(0); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(0); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute simd
// expected-warning@+1 {{initialization clause of OpenMP for loop is not in canonical form ('var = init' or 'T var = init')}}
- for (GoodIter I(1, 2); I < end; ++I) // expected-warning {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I(1, 2); I < end; ++I) // expected-warning {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute simd
- for (begin = GoodIter(0); begin < end; ++begin) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (begin = GoodIter(0); begin < end; ++begin) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++begin;
#pragma omp target
#pragma omp teams distribute simd
@@ -464,7 +464,7 @@ int test_with_random_access_iterator() {
++begin;
#pragma omp target
#pragma omp teams distribute simd
- for (begin = end; begin < end; ++begin) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (begin = end; begin < end; ++begin) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++begin;
#pragma omp target
#pragma omp teams distribute simd
@@ -489,7 +489,7 @@ int test_with_random_access_iterator() {
++I;
#pragma omp target
#pragma omp teams distribute simd
- for (GoodIter I = begin; I >= end; I = I - 1) // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (GoodIter I = begin; I >= end; I = I - 1) // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
#pragma omp target
#pragma omp teams distribute simd
@@ -551,19 +551,19 @@ public:
#pragma omp teams distribute simd
// expected-note@+2 {{loop step is expected to be positive due to this condition}}
// expected-error@+1 {{increment expression must cause 'I' to increase on each iteration of OpenMP for loop}}
- for (IT I = begin; I < end; I = I + ST) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; I = I + ST) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
#pragma omp target
#pragma omp teams distribute simd
// expected-note@+2 {{loop step is expected to be positive due to this condition}}
// expected-error@+1 {{increment expression must cause 'I' to increase on each iteration of OpenMP for loop}}
- for (IT I = begin; I <= end; I += ST) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I <= end; I += ST) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
#pragma omp target
#pragma omp teams distribute simd
- for (IT I = begin; I < end; ++I) { // expected-warning 4 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; ++I) { // expected-warning 4 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
}
@@ -599,7 +599,7 @@ int dotest_gt(IT begin, IT end) {
#pragma omp target
#pragma omp teams distribute simd
- for (IT I = begin; I < end; I += TC<int, ST>::step()) { // expected-warning 2 {{Type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
+ for (IT I = begin; I < end; I += TC<int, ST>::step()) { // expected-warning 2 {{type 'GoodIter' is not trivially copyable and not guaranteed to be mapped correctly}}
++I;
}
}
@@ -699,7 +699,7 @@ void test_loop_firstprivate_lastprivate() {
S s(4);
// expected-error@+2 {{lastprivate variable cannot be firstprivate}} expected-note@+2 {{defined as lastprivate}}
#pragma omp target
-#pragma omp teams distribute simd lastprivate(s) firstprivate(s) // expected-error {{calling a private constructor of class 'S'}} expected-warning {{Type 'S' is not trivially copyable and not guaranteed to be mapped correctly}}
+#pragma omp teams distribute simd lastprivate(s) firstprivate(s) // expected-error {{calling a private constructor of class 'S'}} expected-warning {{type 'S' is not trivially copyable and not guaranteed to be mapped correctly}}
for (int i = 0; i < 16; ++i)
;
}
diff --git a/clang/test/OpenMP/threadprivate_codegen.cpp b/clang/test/OpenMP/threadprivate_codegen.cpp
index d0bd2b411ec8..b27783be829d 100644
--- a/clang/test/OpenMP/threadprivate_codegen.cpp
+++ b/clang/test/OpenMP/threadprivate_codegen.cpp
@@ -1039,40 +1039,40 @@ int foobar() {
// CHECK1-NEXT: [[ARRAYINIT_BEGIN1:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[ARRAYINIT_BEGIN1]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN1]], i32 noundef 1)
-// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[ARRAYINIT_BEGIN1]], i64 1
// CHECK1-NEXT: store ptr [[ARRAYINIT_ELEMENT]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
-// CHECK1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK1: invoke.cont3:
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT]], i64 1
// CHECK1-NEXT: store ptr [[ARRAYINIT_ELEMENT4]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT4]], i32 noundef 3)
-// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
+// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
// CHECK1: invoke.cont5:
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT7:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 1
// CHECK1-NEXT: store ptr [[ARRAYINIT_ELEMENT7]], ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK1-NEXT: [[ARRAYINIT_BEGIN8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_ELEMENT7]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[ARRAYINIT_BEGIN8]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN8]], i32 noundef 4)
-// CHECK1-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]]
+// CHECK1-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]]
// CHECK1: invoke.cont11:
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT12:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_BEGIN8]], i64 1
// CHECK1-NEXT: store ptr [[ARRAYINIT_ELEMENT12]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT12]], i32 noundef 5)
-// CHECK1-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]]
+// CHECK1-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]]
// CHECK1: invoke.cont13:
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT14:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT12]], i64 1
// CHECK1-NEXT: store ptr [[ARRAYINIT_ELEMENT14]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT14]], i32 noundef 6)
-// CHECK1-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]]
+// CHECK1-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]]
// CHECK1: invoke.cont15:
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK1-NEXT: ret ptr [[TMP2]]
// CHECK1: lpad:
// CHECK1-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 }
-// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: cleanup
// CHECK1-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0
// CHECK1-NEXT: store ptr [[TMP4]], ptr [[EXN_SLOT]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 1
@@ -1090,7 +1090,7 @@ int foobar() {
// CHECK1-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK1: lpad10:
// CHECK1-NEXT: [[TMP7:%.*]] = landingpad { ptr, i32 }
-// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: cleanup
// CHECK1-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 0
// CHECK1-NEXT: store ptr [[TMP8]], ptr [[EXN_SLOT]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 1
@@ -1254,34 +1254,34 @@ int foobar() {
// CHECK1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// CHECK1: invoke.cont2:
// CHECK1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK1: invoke.cont3:
// CHECK1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// CHECK1: invoke.cont7:
// CHECK1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// CHECK1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// CHECK1: invoke.cont8:
// CHECK1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// CHECK1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// CHECK1: invoke.cont9:
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]]
// CHECK1-NEXT: ret void
// CHECK1: lpad:
// CHECK1-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: cleanup
// CHECK1-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// CHECK1-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -1299,7 +1299,7 @@ int foobar() {
// CHECK1-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK1: lpad6:
// CHECK1-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: cleanup
// CHECK1-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// CHECK1-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -1375,7 +1375,7 @@ int foobar() {
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// CHECK1-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP5]])
-// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]]
// CHECK1-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]]
@@ -1436,7 +1436,7 @@ int foobar() {
// CHECK1-NEXT: ret i32 [[TMP32]]
// CHECK1: lpad:
// CHECK1-NEXT: [[TMP33:%.*]] = landingpad { ptr, i32 }
-// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: cleanup
// CHECK1-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0
// CHECK1-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8
// CHECK1-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1
@@ -1525,7 +1525,7 @@ int foobar() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK1-SAME: () #[[ATTR5:[0-9]+]] {
+// CHECK1-SAME: () #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
@@ -1777,34 +1777,34 @@ int foobar() {
// CHECK2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK2: invoke.cont:
// CHECK2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// CHECK2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// CHECK2: invoke.cont2:
// CHECK2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK2: invoke.cont3:
// CHECK2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// CHECK2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// CHECK2: invoke.cont7:
// CHECK2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// CHECK2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// CHECK2: invoke.cont8:
// CHECK2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// CHECK2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// CHECK2: invoke.cont9:
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]]
// CHECK2-NEXT: ret void
// CHECK2: lpad:
// CHECK2-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK2-NEXT: cleanup
+// CHECK2-NEXT: cleanup
// CHECK2-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// CHECK2-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -1822,7 +1822,7 @@ int foobar() {
// CHECK2-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK2: lpad6:
// CHECK2-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK2-NEXT: cleanup
+// CHECK2-NEXT: cleanup
// CHECK2-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// CHECK2-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -1891,40 +1891,40 @@ int foobar() {
// CHECK2-NEXT: [[ARRAYINIT_BEGIN1:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0
// CHECK2-NEXT: store ptr [[ARRAYINIT_BEGIN1]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN1]], i32 noundef 1)
-// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK2: invoke.cont:
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[ARRAYINIT_BEGIN1]], i64 1
// CHECK2-NEXT: store ptr [[ARRAYINIT_ELEMENT]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
-// CHECK2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK2: invoke.cont3:
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT]], i64 1
// CHECK2-NEXT: store ptr [[ARRAYINIT_ELEMENT4]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT4]], i32 noundef 3)
-// CHECK2-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
+// CHECK2-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
// CHECK2: invoke.cont5:
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT7:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 1
// CHECK2-NEXT: store ptr [[ARRAYINIT_ELEMENT7]], ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK2-NEXT: [[ARRAYINIT_BEGIN8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_ELEMENT7]], i64 0, i64 0
// CHECK2-NEXT: store ptr [[ARRAYINIT_BEGIN8]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN8]], i32 noundef 4)
-// CHECK2-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]]
+// CHECK2-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]]
// CHECK2: invoke.cont11:
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT12:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_BEGIN8]], i64 1
// CHECK2-NEXT: store ptr [[ARRAYINIT_ELEMENT12]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT12]], i32 noundef 5)
-// CHECK2-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]]
+// CHECK2-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]]
// CHECK2: invoke.cont13:
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT14:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT12]], i64 1
// CHECK2-NEXT: store ptr [[ARRAYINIT_ELEMENT14]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT14]], i32 noundef 6)
-// CHECK2-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]]
+// CHECK2-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]]
// CHECK2: invoke.cont15:
// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK2-NEXT: ret ptr [[TMP2]]
// CHECK2: lpad:
// CHECK2-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 }
-// CHECK2-NEXT: cleanup
+// CHECK2-NEXT: cleanup
// CHECK2-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0
// CHECK2-NEXT: store ptr [[TMP4]], ptr [[EXN_SLOT]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 1
@@ -1942,7 +1942,7 @@ int foobar() {
// CHECK2-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK2: lpad10:
// CHECK2-NEXT: [[TMP7:%.*]] = landingpad { ptr, i32 }
-// CHECK2-NEXT: cleanup
+// CHECK2-NEXT: cleanup
// CHECK2-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 0
// CHECK2-NEXT: store ptr [[TMP8]], ptr [[EXN_SLOT]], align 8
// CHECK2-NEXT: [[TMP9:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 1
@@ -2029,7 +2029,7 @@ int foobar() {
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// CHECK2-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP5]])
-// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK2: invoke.cont:
// CHECK2-NEXT: [[TMP6:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]]
// CHECK2-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]]
@@ -2090,7 +2090,7 @@ int foobar() {
// CHECK2-NEXT: ret i32 [[TMP32]]
// CHECK2: lpad:
// CHECK2-NEXT: [[TMP33:%.*]] = landingpad { ptr, i32 }
-// CHECK2-NEXT: cleanup
+// CHECK2-NEXT: cleanup
// CHECK2-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0
// CHECK2-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8
// CHECK2-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1
@@ -2154,7 +2154,7 @@ int foobar() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK2-SAME: () #[[ATTR5:[0-9]+]] {
+// CHECK2-SAME: () #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
@@ -2452,34 +2452,34 @@ int foobar() {
// SIMD1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// SIMD1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// SIMD1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// SIMD1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// SIMD1: invoke.cont:
// SIMD1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// SIMD1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// SIMD1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// SIMD1: invoke.cont2:
// SIMD1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// SIMD1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// SIMD1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// SIMD1: invoke.cont3:
// SIMD1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// SIMD1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// SIMD1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// SIMD1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// SIMD1: invoke.cont7:
// SIMD1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// SIMD1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// SIMD1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// SIMD1: invoke.cont8:
// SIMD1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// SIMD1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// SIMD1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// SIMD1: invoke.cont9:
// SIMD1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]]
// SIMD1-NEXT: ret void
// SIMD1: lpad:
// SIMD1-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// SIMD1-NEXT: cleanup
+// SIMD1-NEXT: cleanup
// SIMD1-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// SIMD1-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// SIMD1-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -2497,7 +2497,7 @@ int foobar() {
// SIMD1-NEXT: br label [[EHCLEANUP:%.*]]
// SIMD1: lpad6:
// SIMD1-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// SIMD1-NEXT: cleanup
+// SIMD1-NEXT: cleanup
// SIMD1-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// SIMD1-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// SIMD1-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -2568,7 +2568,7 @@ int foobar() {
// SIMD1: init:
// SIMD1-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4
// SIMD1-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]])
-// SIMD1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// SIMD1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// SIMD1: invoke.cont:
// SIMD1-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]]
// SIMD1-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]]
@@ -2613,7 +2613,7 @@ int foobar() {
// SIMD1-NEXT: ret i32 [[TMP21]]
// SIMD1: lpad:
// SIMD1-NEXT: [[TMP22:%.*]] = landingpad { ptr, i32 }
-// SIMD1-NEXT: cleanup
+// SIMD1-NEXT: cleanup
// SIMD1-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0
// SIMD1-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8
// SIMD1-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1
@@ -2652,7 +2652,7 @@ int foobar() {
//
//
// SIMD1-LABEL: define {{[^@]+}}@_Z6foobarv
-// SIMD1-SAME: () #[[ATTR5:[0-9]+]] {
+// SIMD1-SAME: () #[[ATTR2]] {
// SIMD1-NEXT: entry:
// SIMD1-NEXT: [[RES:%.*]] = alloca i32, align 4
// SIMD1-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4
@@ -2840,179 +2840,179 @@ int foobar() {
// SIMD2-LABEL: define {{[^@]+}}@__cxx_global_var_init
// SIMD2-SAME: () #[[ATTR0:[0-9]+]] !dbg [[DBG115:![0-9]+]] {
// SIMD2-NEXT: entry:
-// SIMD2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG119:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG121:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG122:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG118:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG120:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG121:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S1C1Ei
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG123:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG122:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META124:![0-9]+]], metadata !DIExpression()), !dbg [[DBG126:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META123:![0-9]+]], metadata !DIExpression()), !dbg [[DBG125:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META127:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META126:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG129:![0-9]+]]
-// SIMD2-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG129]]
-// SIMD2-NEXT: ret void, !dbg [[DBG130:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG128:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG128]]
+// SIMD2-NEXT: ret void, !dbg [[DBG129:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S1D1Ev
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG131:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG130:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META131:![0-9]+]], metadata !DIExpression()), !dbg [[DBG132:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG134:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG135:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG133:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG134:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// SIMD2-SAME: () #[[ATTR0]] !dbg [[DBG136:![0-9]+]] {
+// SIMD2-SAME: () #[[ATTR0]] !dbg [[DBG135:![0-9]+]] {
// SIMD2-NEXT: entry:
-// SIMD2-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG137:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG139:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG140:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG136:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG138:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG139:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG141:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG140:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META142:![0-9]+]], metadata !DIExpression()), !dbg [[DBG144:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META141:![0-9]+]], metadata !DIExpression()), !dbg [[DBG143:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META145:![0-9]+]], metadata !DIExpression()), !dbg [[DBG146:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG145:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG147:![0-9]+]]
-// SIMD2-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG147]]
-// SIMD2-NEXT: ret void, !dbg [[DBG148:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG146:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG146]]
+// SIMD2-NEXT: ret void, !dbg [[DBG147:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG149:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG148:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META150:![0-9]+]], metadata !DIExpression()), !dbg [[DBG151:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META149:![0-9]+]], metadata !DIExpression()), !dbg [[DBG150:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG152:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG153:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG151:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG152:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// SIMD2-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG154:![0-9]+]] {
+// SIMD2-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG153:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// SIMD2-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// SIMD2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155:![0-9]+]]
-// SIMD2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157:![0-9]+]]
+// SIMD2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154:![0-9]+]]
+// SIMD2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156:![0-9]+]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// SIMD2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG158:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG157:![0-9]+]]
// SIMD2: invoke.cont:
-// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// SIMD2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG159:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG158:![0-9]+]]
// SIMD2: invoke.cont2:
-// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// SIMD2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG160:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG159:![0-9]+]]
// SIMD2: invoke.cont3:
-// SIMD2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155]]
-// SIMD2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161:![0-9]+]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160:![0-9]+]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// SIMD2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG162:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG161:![0-9]+]]
// SIMD2: invoke.cont7:
-// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// SIMD2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG163:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG162:![0-9]+]]
// SIMD2: invoke.cont8:
-// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// SIMD2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG164:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG163:![0-9]+]]
// SIMD2: invoke.cont9:
-// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG165:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG164:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG164]]
// SIMD2: lpad:
// SIMD2-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// SIMD2-NEXT: cleanup, !dbg [[DBG166:![0-9]+]]
-// SIMD2-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG166]]
-// SIMD2-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG166]]
-// SIMD2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG166]]
-// SIMD2-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG166]]
-// SIMD2-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG157]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG157]]
+// SIMD2-NEXT: cleanup, !dbg [[DBG165:![0-9]+]]
+// SIMD2-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG165]]
+// SIMD2-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG165]]
+// SIMD2-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG156]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG156]]
// SIMD2: arraydestroy.body:
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG157]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG157]]
-// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG157]]
-// SIMD2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG157]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG157]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG156]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG156]]
+// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG156]]
+// SIMD2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG156]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG156]]
// SIMD2: arraydestroy.done4:
-// SIMD2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG157]]
+// SIMD2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG156]]
// SIMD2: lpad6:
// SIMD2-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// SIMD2-NEXT: cleanup, !dbg [[DBG166]]
-// SIMD2-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG166]]
-// SIMD2-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG166]]
-// SIMD2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG166]]
-// SIMD2-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG166]]
-// SIMD2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG161]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG161]]
+// SIMD2-NEXT: cleanup, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG165]]
+// SIMD2-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG165]]
+// SIMD2-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG160]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG160]]
// SIMD2: arraydestroy.body11:
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG161]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG161]]
-// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG161]]
-// SIMD2-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG161]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG161]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG160]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG160]]
+// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG160]]
+// SIMD2-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG160]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG160]]
// SIMD2: arraydestroy.done15:
-// SIMD2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG161]]
+// SIMD2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG160]]
// SIMD2: ehcleanup:
-// SIMD2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155]]
-// SIMD2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG155]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG155]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG155]]
+// SIMD2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154]]
+// SIMD2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG154]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG154]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG154]]
// SIMD2: arraydestroy.body17:
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG155]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG155]]
-// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG155]]
-// SIMD2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG155]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG155]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG154]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG154]]
+// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG154]]
+// SIMD2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG154]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG154]]
// SIMD2: arraydestroy.done21:
-// SIMD2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG155]]
+// SIMD2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG154]]
// SIMD2: eh.resume:
-// SIMD2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG155]]
-// SIMD2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG155]]
-// SIMD2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG155]]
-// SIMD2-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG155]]
-// SIMD2-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG155]]
+// SIMD2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG154]]
+// SIMD2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG154]]
+// SIMD2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG154]]
+// SIMD2-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG154]]
+// SIMD2-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG154]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// SIMD2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG167:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG166:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META171:![0-9]+]], metadata !DIExpression()), !dbg [[DBG172:![0-9]+]]
-// SIMD2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG172]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META170:![0-9]+]], metadata !DIExpression()), !dbg [[DBG171:![0-9]+]]
+// SIMD2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG171]]
// SIMD2: arraydestroy.body:
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG172]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG172]]
-// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG172]]
-// SIMD2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG172]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG172]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG171]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG171]]
+// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG171]]
+// SIMD2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG171]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG171]]
// SIMD2: arraydestroy.done1:
-// SIMD2-NEXT: ret void, !dbg [[DBG172]]
+// SIMD2-NEXT: ret void, !dbg [[DBG171]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@main
@@ -3023,302 +3023,302 @@ int foobar() {
// SIMD2-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META173:![0-9]+]], metadata !DIExpression()), !dbg [[DBG174:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG175:![0-9]+]]
-// SIMD2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG175]]
-// SIMD2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG175]], !prof [[PROF176:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META172:![0-9]+]], metadata !DIExpression()), !dbg [[DBG173:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG174:![0-9]+]]
+// SIMD2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG174]]
+// SIMD2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG174]], !prof [[PROF175:![0-9]+]]
// SIMD2: init.check:
-// SIMD2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG175]]
-// SIMD2-NEXT: br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]], !dbg [[DBG175]]
+// SIMD2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG174]]
+// SIMD2-NEXT: br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]], !dbg [[DBG174]]
// SIMD2: init:
-// SIMD2-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG177:![0-9]+]]
+// SIMD2-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG176:![0-9]+]]
// SIMD2-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]])
-// SIMD2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG178:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG177:![0-9]+]]
// SIMD2: invoke.cont:
-// SIMD2-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD2-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD2-NEXT: br label [[INIT_END]], !dbg [[DBG175]]
+// SIMD2-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD2-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD2-NEXT: br label [[INIT_END]], !dbg [[DBG174]]
// SIMD2: init.end:
-// SIMD2-NEXT: [[TMP4:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG179:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP4]], ptr [[RES]], align 4, !dbg [[DBG180:![0-9]+]]
-// SIMD2-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZZ4mainE2sm, align 8, !dbg [[DBG181:![0-9]+]]
-// SIMD2-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG182:![0-9]+]]
-// SIMD2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG182]]
-// SIMD2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG182]]
-// SIMD2-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG183:![0-9]+]]
-// SIMD2-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG184:![0-9]+]]
-// SIMD2-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG184]]
-// SIMD2-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG184]]
-// SIMD2-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG185:![0-9]+]]
-// SIMD2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG186:![0-9]+]]
-// SIMD2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG186]]
-// SIMD2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG186]]
-// SIMD2-NEXT: [[TMP11:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG187:![0-9]+]]
-// SIMD2-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG188:![0-9]+]]
-// SIMD2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG188]]
-// SIMD2-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG188]]
-// SIMD2-NEXT: [[TMP13:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG189:![0-9]+]]
-// SIMD2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG190:![0-9]+]]
-// SIMD2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG190]]
-// SIMD2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG190]]
-// SIMD2-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG191:![0-9]+]]
-// SIMD2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG192:![0-9]+]]
-// SIMD2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG192]]
-// SIMD2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG192]]
-// SIMD2-NEXT: [[TMP17:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG193:![0-9]+]]
-// SIMD2-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG193]]
-// SIMD2-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG194:![0-9]+]]
-// SIMD2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG194]]
-// SIMD2-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG194]]
-// SIMD2-NEXT: [[TMP19:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG195:![0-9]+]]
-// SIMD2-NEXT: [[TMP20:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG196:![0-9]+]]
-// SIMD2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], [[TMP19]], !dbg [[DBG196]]
-// SIMD2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG196]]
-// SIMD2-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG197:![0-9]+]]
-// SIMD2-NEXT: ret i32 [[TMP21]], !dbg [[DBG198:![0-9]+]]
+// SIMD2-NEXT: [[TMP4:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG178:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP4]], ptr [[RES]], align 4, !dbg [[DBG179:![0-9]+]]
+// SIMD2-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZZ4mainE2sm, align 8, !dbg [[DBG180:![0-9]+]]
+// SIMD2-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG181:![0-9]+]]
+// SIMD2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG181]]
+// SIMD2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG181]]
+// SIMD2-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG182:![0-9]+]]
+// SIMD2-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG183:![0-9]+]]
+// SIMD2-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG183]]
+// SIMD2-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG183]]
+// SIMD2-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG184:![0-9]+]]
+// SIMD2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG185:![0-9]+]]
+// SIMD2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG185]]
+// SIMD2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG185]]
+// SIMD2-NEXT: [[TMP11:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG186:![0-9]+]]
+// SIMD2-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG187:![0-9]+]]
+// SIMD2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG187]]
+// SIMD2-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG187]]
+// SIMD2-NEXT: [[TMP13:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG188:![0-9]+]]
+// SIMD2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG189:![0-9]+]]
+// SIMD2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG189]]
+// SIMD2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG189]]
+// SIMD2-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG190:![0-9]+]]
+// SIMD2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG191:![0-9]+]]
+// SIMD2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG191]]
+// SIMD2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG191]]
+// SIMD2-NEXT: [[TMP17:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG192:![0-9]+]]
+// SIMD2-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG192]]
+// SIMD2-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG193:![0-9]+]]
+// SIMD2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG193]]
+// SIMD2-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG193]]
+// SIMD2-NEXT: [[TMP19:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG194:![0-9]+]]
+// SIMD2-NEXT: [[TMP20:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG195:![0-9]+]]
+// SIMD2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], [[TMP19]], !dbg [[DBG195]]
+// SIMD2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG195]]
+// SIMD2-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG196:![0-9]+]]
+// SIMD2-NEXT: ret i32 [[TMP21]], !dbg [[DBG197:![0-9]+]]
// SIMD2: lpad:
// SIMD2-NEXT: [[TMP22:%.*]] = landingpad { ptr, i32 }
-// SIMD2-NEXT: cleanup, !dbg [[DBG199:![0-9]+]]
-// SIMD2-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0, !dbg [[DBG199]]
-// SIMD2-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG199]]
-// SIMD2-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1, !dbg [[DBG199]]
-// SIMD2-NEXT: store i32 [[TMP24]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG199]]
-// SIMD2-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG175]]
+// SIMD2-NEXT: cleanup, !dbg [[DBG198:![0-9]+]]
+// SIMD2-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0, !dbg [[DBG198]]
+// SIMD2-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG198]]
+// SIMD2-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1, !dbg [[DBG198]]
+// SIMD2-NEXT: store i32 [[TMP24]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG198]]
+// SIMD2-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG174]]
// SIMD2: eh.resume:
-// SIMD2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG175]]
-// SIMD2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG175]]
-// SIMD2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG175]]
-// SIMD2-NEXT: [[LPAD_VAL8:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG175]]
-// SIMD2-NEXT: resume { ptr, i32 } [[LPAD_VAL8]], !dbg [[DBG175]]
+// SIMD2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG174]]
+// SIMD2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG174]]
+// SIMD2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG174]]
+// SIMD2-NEXT: [[LPAD_VAL8:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG174]]
+// SIMD2-NEXT: resume { ptr, i32 } [[LPAD_VAL8]], !dbg [[DBG174]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC1Ei
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG200:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG199:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG203:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META200:![0-9]+]], metadata !DIExpression()), !dbg [[DBG202:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META204:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META203:![0-9]+]], metadata !DIExpression()), !dbg [[DBG204:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG206:![0-9]+]]
-// SIMD2-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG206]]
-// SIMD2-NEXT: ret void, !dbg [[DBG207:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG205:![0-9]+]]
+// SIMD2-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG205]]
+// SIMD2-NEXT: ret void, !dbg [[DBG206:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD1Ev
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG208:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG207:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META209:![0-9]+]], metadata !DIExpression()), !dbg [[DBG210:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META208:![0-9]+]], metadata !DIExpression()), !dbg [[DBG209:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG211:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG212:![0-9]+]]
+// SIMD2-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG210:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG211:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_Z6foobarv
-// SIMD2-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG213:![0-9]+]] {
+// SIMD2-SAME: () #[[ATTR2]] !dbg [[DBG212:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[RES:%.*]] = alloca i32, align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META214:![0-9]+]], metadata !DIExpression()), !dbg [[DBG215:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG216:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP0]], ptr [[RES]], align 4, !dbg [[DBG217:![0-9]+]]
-// SIMD2-NEXT: [[TMP1:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG218:![0-9]+]]
-// SIMD2-NEXT: [[TMP2:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG219:![0-9]+]]
-// SIMD2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[TMP1]], !dbg [[DBG219]]
-// SIMD2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG219]]
-// SIMD2-NEXT: [[TMP3:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG220:![0-9]+]]
-// SIMD2-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG221:![0-9]+]]
-// SIMD2-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG221]]
-// SIMD2-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG221]]
-// SIMD2-NEXT: [[TMP5:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG222:![0-9]+]]
-// SIMD2-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG223:![0-9]+]]
-// SIMD2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG223]]
-// SIMD2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG223]]
-// SIMD2-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG224:![0-9]+]]
-// SIMD2-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG225:![0-9]+]]
-// SIMD2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG225]]
-// SIMD2-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG225]]
-// SIMD2-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG226:![0-9]+]]
-// SIMD2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG227:![0-9]+]]
-// SIMD2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG227]]
-// SIMD2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG227]]
-// SIMD2-NEXT: [[TMP11:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG228:![0-9]+]]
-// SIMD2-NEXT: [[CONV:%.*]] = fptosi float [[TMP11]] to i32, !dbg [[DBG228]]
-// SIMD2-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG229:![0-9]+]]
-// SIMD2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], [[CONV]], !dbg [[DBG229]]
-// SIMD2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG229]]
-// SIMD2-NEXT: [[TMP13:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG230:![0-9]+]]
-// SIMD2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
-// SIMD2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG231]]
-// SIMD2-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG231]]
-// SIMD2-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG232:![0-9]+]]
-// SIMD2-NEXT: ret i32 [[TMP15]], !dbg [[DBG233:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META213:![0-9]+]], metadata !DIExpression()), !dbg [[DBG214:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG215:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP0]], ptr [[RES]], align 4, !dbg [[DBG216:![0-9]+]]
+// SIMD2-NEXT: [[TMP1:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG217:![0-9]+]]
+// SIMD2-NEXT: [[TMP2:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG218:![0-9]+]]
+// SIMD2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[TMP1]], !dbg [[DBG218]]
+// SIMD2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG218]]
+// SIMD2-NEXT: [[TMP3:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG219:![0-9]+]]
+// SIMD2-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG220:![0-9]+]]
+// SIMD2-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG220]]
+// SIMD2-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG220]]
+// SIMD2-NEXT: [[TMP5:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG221:![0-9]+]]
+// SIMD2-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG222:![0-9]+]]
+// SIMD2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG222]]
+// SIMD2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG222]]
+// SIMD2-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG223:![0-9]+]]
+// SIMD2-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG224:![0-9]+]]
+// SIMD2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG224]]
+// SIMD2-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG224]]
+// SIMD2-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG225:![0-9]+]]
+// SIMD2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG226:![0-9]+]]
+// SIMD2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG226]]
+// SIMD2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG226]]
+// SIMD2-NEXT: [[TMP11:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG227:![0-9]+]]
+// SIMD2-NEXT: [[CONV:%.*]] = fptosi float [[TMP11]] to i32, !dbg [[DBG227]]
+// SIMD2-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG228:![0-9]+]]
+// SIMD2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], [[CONV]], !dbg [[DBG228]]
+// SIMD2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG228]]
+// SIMD2-NEXT: [[TMP13:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG229:![0-9]+]]
+// SIMD2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG230:![0-9]+]]
+// SIMD2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG230]]
+// SIMD2-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG230]]
+// SIMD2-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
+// SIMD2-NEXT: ret i32 [[TMP15]], !dbg [[DBG232:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
-// SIMD2-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG234:![0-9]+]] {
+// SIMD2-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG233:![0-9]+]] {
// SIMD2-NEXT: entry:
-// SIMD2-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG235:![0-9]+]]
-// SIMD2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG235]]
-// SIMD2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG235]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG234:![0-9]+]]
+// SIMD2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG234]]
+// SIMD2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG234]]
// SIMD2: init.check:
-// SIMD2-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG235]]
-// SIMD2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG236:![0-9]+]]
-// SIMD2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG235]]
-// SIMD2-NEXT: br label [[INIT_END]], !dbg [[DBG235]]
+// SIMD2-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG234]]
+// SIMD2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG235:![0-9]+]]
+// SIMD2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG234]]
+// SIMD2-NEXT: br label [[INIT_END]], !dbg [[DBG234]]
// SIMD2: init.end:
-// SIMD2-NEXT: ret void, !dbg [[DBG238:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG237:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG239:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG238:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META240:![0-9]+]], metadata !DIExpression()), !dbg [[DBG242:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META239:![0-9]+]], metadata !DIExpression()), !dbg [[DBG241:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META243:![0-9]+]], metadata !DIExpression()), !dbg [[DBG244:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META242:![0-9]+]], metadata !DIExpression()), !dbg [[DBG243:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG245:![0-9]+]]
-// SIMD2-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG245]]
-// SIMD2-NEXT: ret void, !dbg [[DBG246:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG244:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG244]]
+// SIMD2-NEXT: ret void, !dbg [[DBG245:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG247:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG246:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META248:![0-9]+]], metadata !DIExpression()), !dbg [[DBG249:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META247:![0-9]+]], metadata !DIExpression()), !dbg [[DBG248:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG250:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG251:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG249:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG250:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG252:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG251:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META253:![0-9]+]], metadata !DIExpression()), !dbg [[DBG254:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META252:![0-9]+]], metadata !DIExpression()), !dbg [[DBG253:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META255:![0-9]+]], metadata !DIExpression()), !dbg [[DBG256:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META254:![0-9]+]], metadata !DIExpression()), !dbg [[DBG255:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG257:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG258:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG257]]
-// SIMD2-NEXT: ret void, !dbg [[DBG259:![0-9]+]]
+// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG256:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG257:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG256]]
+// SIMD2-NEXT: ret void, !dbg [[DBG258:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG260:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG259:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META261:![0-9]+]], metadata !DIExpression()), !dbg [[DBG262:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META260:![0-9]+]], metadata !DIExpression()), !dbg [[DBG261:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG263:![0-9]+]]
-// SIMD2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG265:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG266:![0-9]+]]
+// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG262:![0-9]+]]
+// SIMD2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG264:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG265:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG267:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG266:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META268:![0-9]+]], metadata !DIExpression()), !dbg [[DBG269:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META267:![0-9]+]], metadata !DIExpression()), !dbg [[DBG268:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META270:![0-9]+]], metadata !DIExpression()), !dbg [[DBG271:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META269:![0-9]+]], metadata !DIExpression()), !dbg [[DBG270:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG272:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG273:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG272]]
-// SIMD2-NEXT: ret void, !dbg [[DBG274:![0-9]+]]
+// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG271:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG272:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG271]]
+// SIMD2-NEXT: ret void, !dbg [[DBG273:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG275:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG274:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META276:![0-9]+]], metadata !DIExpression()), !dbg [[DBG277:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META275:![0-9]+]], metadata !DIExpression()), !dbg [[DBG276:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG278:![0-9]+]]
-// SIMD2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG280:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG281:![0-9]+]]
+// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG277:![0-9]+]]
+// SIMD2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG279:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG280:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG282:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG281:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META283:![0-9]+]], metadata !DIExpression()), !dbg [[DBG284:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META282:![0-9]+]], metadata !DIExpression()), !dbg [[DBG283:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META285:![0-9]+]], metadata !DIExpression()), !dbg [[DBG286:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META284:![0-9]+]], metadata !DIExpression()), !dbg [[DBG285:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG287:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG288:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG287]]
-// SIMD2-NEXT: ret void, !dbg [[DBG289:![0-9]+]]
+// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG286:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG287:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG286]]
+// SIMD2-NEXT: ret void, !dbg [[DBG288:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG290:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG289:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META291:![0-9]+]], metadata !DIExpression()), !dbg [[DBG292:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META290:![0-9]+]], metadata !DIExpression()), !dbg [[DBG291:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG293:![0-9]+]]
-// SIMD2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG295:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG296:![0-9]+]]
+// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG292:![0-9]+]]
+// SIMD2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG294:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG295:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG297:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG296:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META298:![0-9]+]], metadata !DIExpression()), !dbg [[DBG299:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META297:![0-9]+]], metadata !DIExpression()), !dbg [[DBG298:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META300:![0-9]+]], metadata !DIExpression()), !dbg [[DBG301:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG300:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG302:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG303:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG302]]
-// SIMD2-NEXT: ret void, !dbg [[DBG304:![0-9]+]]
+// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG301:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG302:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG301]]
+// SIMD2-NEXT: ret void, !dbg [[DBG303:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG305:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG304:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META306:![0-9]+]], metadata !DIExpression()), !dbg [[DBG307:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META305:![0-9]+]], metadata !DIExpression()), !dbg [[DBG306:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG308:![0-9]+]]
-// SIMD2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG310:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG311:![0-9]+]]
+// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG307:![0-9]+]]
+// SIMD2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG309:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG310:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// SIMD2-SAME: () #[[ATTR0]] !dbg [[DBG312:![0-9]+]] {
+// SIMD2-SAME: () #[[ATTR0]] !dbg [[DBG311:![0-9]+]] {
// SIMD2-NEXT: entry:
-// SIMD2-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG314:![0-9]+]]
-// SIMD2-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG314]]
-// SIMD2-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG314]]
+// SIMD2-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG313:![0-9]+]]
+// SIMD2-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG313]]
+// SIMD2-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG313]]
// SIMD2-NEXT: ret void
//
//
@@ -3445,34 +3445,34 @@ int foobar() {
// CHECK-TLS1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK-TLS1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK-TLS1: invoke.cont:
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// CHECK-TLS1: invoke.cont2:
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK-TLS1: invoke.cont3:
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// CHECK-TLS1: invoke.cont7:
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// CHECK-TLS1: invoke.cont8:
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// CHECK-TLS1: invoke.cont9:
// CHECK-TLS1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]]
// CHECK-TLS1-NEXT: ret void
// CHECK-TLS1: lpad:
// CHECK-TLS1-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS1-NEXT: cleanup
+// CHECK-TLS1-NEXT: cleanup
// CHECK-TLS1-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// CHECK-TLS1-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// CHECK-TLS1-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -3490,7 +3490,7 @@ int foobar() {
// CHECK-TLS1-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK-TLS1: lpad6:
// CHECK-TLS1-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS1-NEXT: cleanup
+// CHECK-TLS1-NEXT: cleanup
// CHECK-TLS1-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// CHECK-TLS1-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// CHECK-TLS1-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -3708,7 +3708,7 @@ int foobar() {
//
//
// CHECK-TLS1-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK-TLS1-SAME: () #[[ATTR7:[0-9]+]] {
+// CHECK-TLS1-SAME: () #[[ATTR1]] {
// CHECK-TLS1-NEXT: entry:
// CHECK-TLS1-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK-TLS1-NEXT: [[TMP0:%.*]] = call ptr @_ZTWN6Static1sE()
@@ -3997,7 +3997,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK-TLS2-SAME: () #[[ATTR6:[0-9]+]] {
+// CHECK-TLS2-SAME: () #[[ATTR2]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = call ptr @_ZTWN6Static1sE()
@@ -4050,7 +4050,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__cxx_global_var_init
-// CHECK-TLS2-SAME: () #[[ATTR7:[0-9]+]] {
+// CHECK-TLS2-SAME: () #[[ATTR6:[0-9]+]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5)
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR4]]
@@ -4106,7 +4106,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// CHECK-TLS2-SAME: () #[[ATTR7]] {
+// CHECK-TLS2-SAME: () #[[ATTR6]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27)
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR4]]
@@ -4162,7 +4162,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// CHECK-TLS2-SAME: () #[[ATTR7]] personality ptr @__gxx_personality_v0 {
+// CHECK-TLS2-SAME: () #[[ATTR6]] personality ptr @__gxx_personality_v0 {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// CHECK-TLS2-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
@@ -4172,34 +4172,34 @@ int foobar() {
// CHECK-TLS2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK-TLS2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK-TLS2: invoke.cont:
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// CHECK-TLS2: invoke.cont2:
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK-TLS2: invoke.cont3:
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// CHECK-TLS2: invoke.cont7:
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// CHECK-TLS2: invoke.cont8:
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// CHECK-TLS2: invoke.cont9:
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR4]]
// CHECK-TLS2-NEXT: ret void
// CHECK-TLS2: lpad:
// CHECK-TLS2-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS2-NEXT: cleanup
+// CHECK-TLS2-NEXT: cleanup
// CHECK-TLS2-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// CHECK-TLS2-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// CHECK-TLS2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -4217,7 +4217,7 @@ int foobar() {
// CHECK-TLS2-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK-TLS2: lpad6:
// CHECK-TLS2-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS2-NEXT: cleanup
+// CHECK-TLS2-NEXT: cleanup
// CHECK-TLS2-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// CHECK-TLS2-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// CHECK-TLS2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -4255,7 +4255,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// CHECK-TLS2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR7]] {
+// CHECK-TLS2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR6]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
@@ -4296,7 +4296,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
-// CHECK-TLS2-SAME: () #[[ATTR7]] {
+// CHECK-TLS2-SAME: () #[[ATTR6]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8
// CHECK-TLS2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
@@ -4359,14 +4359,14 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// CHECK-TLS2-SAME: () #[[ATTR7]] {
+// CHECK-TLS2-SAME: () #[[ATTR6]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: call void @__cxx_global_var_init.1()
// CHECK-TLS2-NEXT: ret void
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__tls_init
-// CHECK-TLS2-SAME: () #[[ATTR7]] {
+// CHECK-TLS2-SAME: () #[[ATTR6]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = load i8, ptr @__tls_guard, align 1
// CHECK-TLS2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
@@ -4383,235 +4383,235 @@ int foobar() {
// CHECK-TLS3-LABEL: define {{[^@]+}}@__cxx_global_var_init
// CHECK-TLS3-SAME: () #[[ATTR0:[0-9]+]] !dbg [[DBG116:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
-// CHECK-TLS3-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG120:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG122:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG123:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG119:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG121:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG122:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S1C1Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG124:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG123:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META125:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META124:![0-9]+]], metadata !DIExpression()), !dbg [[DBG126:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META128:![0-9]+]], metadata !DIExpression()), !dbg [[DBG129:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META127:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG130:![0-9]+]]
-// CHECK-TLS3-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG130]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG131:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG129:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG129]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG130:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S1D1Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG132:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG131:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META133:![0-9]+]], metadata !DIExpression()), !dbg [[DBG134:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG135:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG136:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG134:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG135:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG137:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG136:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META138:![0-9]+]], metadata !DIExpression()), !dbg [[DBG139:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META137:![0-9]+]], metadata !DIExpression()), !dbg [[DBG138:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META140:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META139:![0-9]+]], metadata !DIExpression()), !dbg [[DBG140:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG142:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG143:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG142]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG144:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG141:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG142:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG141]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG143:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG145:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG144:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META146:![0-9]+]], metadata !DIExpression()), !dbg [[DBG147:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META145:![0-9]+]], metadata !DIExpression()), !dbg [[DBG146:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG148:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG150:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG151:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG147:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG149:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG150:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG152:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG151:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
-// CHECK-TLS3-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG153:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG155:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG156:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG152:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG154:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG155:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG157:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG156:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META158:![0-9]+]], metadata !DIExpression()), !dbg [[DBG160:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META157:![0-9]+]], metadata !DIExpression()), !dbg [[DBG159:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META161:![0-9]+]], metadata !DIExpression()), !dbg [[DBG162:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META160:![0-9]+]], metadata !DIExpression()), !dbg [[DBG161:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG163:![0-9]+]]
-// CHECK-TLS3-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG163]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG164:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG162:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG162]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG163:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG165:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG164:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META166:![0-9]+]], metadata !DIExpression()), !dbg [[DBG167:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META165:![0-9]+]], metadata !DIExpression()), !dbg [[DBG166:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG168:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG169:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG167:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG168:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG170:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG169:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META171:![0-9]+]], metadata !DIExpression()), !dbg [[DBG172:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META170:![0-9]+]], metadata !DIExpression()), !dbg [[DBG171:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META173:![0-9]+]], metadata !DIExpression()), !dbg [[DBG174:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META172:![0-9]+]], metadata !DIExpression()), !dbg [[DBG173:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG175:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG176:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG175]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG177:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG174:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG175:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG174]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG176:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG178:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG177:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META179:![0-9]+]], metadata !DIExpression()), !dbg [[DBG180:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META178:![0-9]+]], metadata !DIExpression()), !dbg [[DBG179:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG181:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG183:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG184:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG180:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG182:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG183:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// CHECK-TLS3-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG185:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG184:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// CHECK-TLS3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG186:![0-9]+]]
-// CHECK-TLS3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG188:![0-9]+]]
+// CHECK-TLS3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG185:![0-9]+]]
+// CHECK-TLS3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG187:![0-9]+]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG189:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG188:![0-9]+]]
// CHECK-TLS3: invoke.cont:
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG188]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG187]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG190:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG189:![0-9]+]]
// CHECK-TLS3: invoke.cont2:
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG188]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG187]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG191:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG190:![0-9]+]]
// CHECK-TLS3: invoke.cont3:
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG192:![0-9]+]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG191:![0-9]+]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG193:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG192:![0-9]+]]
// CHECK-TLS3: invoke.cont7:
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG192]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG191]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG194:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG193:![0-9]+]]
// CHECK-TLS3: invoke.cont8:
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG192]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG191]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG195:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG194:![0-9]+]]
// CHECK-TLS3: invoke.cont9:
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG196:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG195:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG195]]
// CHECK-TLS3: lpad:
// CHECK-TLS3-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS3-NEXT: cleanup, !dbg [[DBG197:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG188]]
+// CHECK-TLS3-NEXT: cleanup, !dbg [[DBG196:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG187]]
// CHECK-TLS3: arraydestroy.body:
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG188]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG187]]
// CHECK-TLS3: arraydestroy.done4:
-// CHECK-TLS3-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG188]]
+// CHECK-TLS3-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG187]]
// CHECK-TLS3: lpad6:
// CHECK-TLS3-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS3-NEXT: cleanup, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG192]]
+// CHECK-TLS3-NEXT: cleanup, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG191]]
// CHECK-TLS3: arraydestroy.body11:
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG192]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG191]]
// CHECK-TLS3: arraydestroy.done15:
-// CHECK-TLS3-NEXT: br label [[EHCLEANUP]], !dbg [[DBG192]]
+// CHECK-TLS3-NEXT: br label [[EHCLEANUP]], !dbg [[DBG191]]
// CHECK-TLS3: ehcleanup:
-// CHECK-TLS3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG186]]
+// CHECK-TLS3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG185]]
// CHECK-TLS3: arraydestroy.body17:
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG186]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG185]]
// CHECK-TLS3: arraydestroy.done21:
-// CHECK-TLS3-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG186]]
+// CHECK-TLS3-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG185]]
// CHECK-TLS3: eh.resume:
-// CHECK-TLS3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG186]]
+// CHECK-TLS3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG185]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// CHECK-TLS3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG198:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG197:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META202:![0-9]+]], metadata !DIExpression()), !dbg [[DBG203:![0-9]+]]
-// CHECK-TLS3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG203]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG202:![0-9]+]]
+// CHECK-TLS3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG202]]
// CHECK-TLS3: arraydestroy.body:
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG203]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG203]]
-// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG203]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG203]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG203]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG202]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG202]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG202]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG202]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG202]]
// CHECK-TLS3: arraydestroy.done1:
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG203]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG202]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@main
@@ -4620,72 +4620,72 @@ int foobar() {
// CHECK-TLS3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META204:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVZ4mainE2sm, align 1, !dbg [[DBG206:![0-9]+]]
-// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG206]]
-// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG206]], !prof [[PROF207:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META203:![0-9]+]], metadata !DIExpression()), !dbg [[DBG204:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVZ4mainE2sm, align 1, !dbg [[DBG205:![0-9]+]]
+// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG205]]
+// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG205]], !prof [[PROF206:![0-9]+]]
// CHECK-TLS3: init.check:
-// CHECK-TLS3-NEXT: [[TMP1:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG208:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG209:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG209]]
-// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]]), !dbg [[DBG210:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP3:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG206]]
-// CHECK-TLS3-NEXT: store i8 1, ptr @_ZGVZ4mainE2sm, align 1, !dbg [[DBG206]]
-// CHECK-TLS3-NEXT: br label [[INIT_END]], !dbg [[DBG206]]
+// CHECK-TLS3-NEXT: [[TMP1:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG207:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG208:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG208]]
+// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]]), !dbg [[DBG209:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP3:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG205]]
+// CHECK-TLS3-NEXT: store i8 1, ptr @_ZGVZ4mainE2sm, align 1, !dbg [[DBG205]]
+// CHECK-TLS3-NEXT: br label [[INIT_END]], !dbg [[DBG205]]
// CHECK-TLS3: init.end:
-// CHECK-TLS3-NEXT: [[TMP4:%.*]] = call ptr @_ZTWN6Static1sE(), !dbg [[DBG211:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG212:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG212]]
-// CHECK-TLS3-NEXT: store i32 [[TMP5]], ptr [[RES]], align 4, !dbg [[DBG213:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP6:%.*]] = call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @_ZZ4mainE2sm), !dbg [[DBG214:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP6]], i32 0, i32 0, !dbg [[DBG215:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG215]]
-// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG216:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG216]]
-// CHECK-TLS3-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG216]]
-// CHECK-TLS3-NEXT: [[TMP9:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG217:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG218:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP10:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG218]]
-// CHECK-TLS3-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG219:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG219]]
-// CHECK-TLS3-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG219]]
-// CHECK-TLS3-NEXT: [[TMP12:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG220:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG221:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG221]]
-// CHECK-TLS3-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG221]]
-// CHECK-TLS3-NEXT: [[TMP14:%.*]] = call ptr @_ZTW3gs3(), !dbg [[DBG222:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP14]], i32 0, i32 0, !dbg [[DBG223:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP15:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG223]]
-// CHECK-TLS3-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG224:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG224]]
-// CHECK-TLS3-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG224]]
-// CHECK-TLS3-NEXT: [[TMP17:%.*]] = call ptr @_ZTW5arr_x(), !dbg [[DBG225:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP17]], i64 0, i64 1, !dbg [[DBG225]]
-// CHECK-TLS3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG225]]
-// CHECK-TLS3-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG226:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP18:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG226]]
-// CHECK-TLS3-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG227:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG227]]
-// CHECK-TLS3-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG227]]
-// CHECK-TLS3-NEXT: [[TMP20:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIiE2stE), !dbg [[DBG228:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4, !dbg [[DBG228]]
-// CHECK-TLS3-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG229:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG229]]
-// CHECK-TLS3-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG229]]
-// CHECK-TLS3-NEXT: [[TMP23:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIfE2stE), !dbg [[DBG230:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP24:%.*]] = load float, ptr [[TMP23]], align 4, !dbg [[DBG230]]
-// CHECK-TLS3-NEXT: [[CONV:%.*]] = fptosi float [[TMP24]] to i32, !dbg [[DBG230]]
-// CHECK-TLS3-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP25]], [[CONV]], !dbg [[DBG231]]
-// CHECK-TLS3-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG231]]
-// CHECK-TLS3-NEXT: [[TMP26:%.*]] = call ptr @_ZTWN2STI2S4E2stE(), !dbg [[DBG232:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP26]], i32 0, i32 0, !dbg [[DBG233:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG233]]
-// CHECK-TLS3-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG234:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP28]], [[TMP27]], !dbg [[DBG234]]
-// CHECK-TLS3-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG234]]
-// CHECK-TLS3-NEXT: [[TMP29:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG235:![0-9]+]]
-// CHECK-TLS3-NEXT: ret i32 [[TMP29]], !dbg [[DBG236:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP4:%.*]] = call ptr @_ZTWN6Static1sE(), !dbg [[DBG210:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG211:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG211]]
+// CHECK-TLS3-NEXT: store i32 [[TMP5]], ptr [[RES]], align 4, !dbg [[DBG212:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP6:%.*]] = call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @_ZZ4mainE2sm), !dbg [[DBG213:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP6]], i32 0, i32 0, !dbg [[DBG214:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG214]]
+// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG215:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG215]]
+// CHECK-TLS3-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG215]]
+// CHECK-TLS3-NEXT: [[TMP9:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG216:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG217:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP10:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG217]]
+// CHECK-TLS3-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG218:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG218]]
+// CHECK-TLS3-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG218]]
+// CHECK-TLS3-NEXT: [[TMP12:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG219:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG220:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG220]]
+// CHECK-TLS3-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG220]]
+// CHECK-TLS3-NEXT: [[TMP14:%.*]] = call ptr @_ZTW3gs3(), !dbg [[DBG221:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP14]], i32 0, i32 0, !dbg [[DBG222:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP15:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG222]]
+// CHECK-TLS3-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG223:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG223]]
+// CHECK-TLS3-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG223]]
+// CHECK-TLS3-NEXT: [[TMP17:%.*]] = call ptr @_ZTW5arr_x(), !dbg [[DBG224:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP17]], i64 0, i64 1, !dbg [[DBG224]]
+// CHECK-TLS3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG224]]
+// CHECK-TLS3-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG225:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP18:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG225]]
+// CHECK-TLS3-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG226:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG226]]
+// CHECK-TLS3-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG226]]
+// CHECK-TLS3-NEXT: [[TMP20:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIiE2stE), !dbg [[DBG227:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4, !dbg [[DBG227]]
+// CHECK-TLS3-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG228:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG228]]
+// CHECK-TLS3-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG228]]
+// CHECK-TLS3-NEXT: [[TMP23:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIfE2stE), !dbg [[DBG229:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP24:%.*]] = load float, ptr [[TMP23]], align 4, !dbg [[DBG229]]
+// CHECK-TLS3-NEXT: [[CONV:%.*]] = fptosi float [[TMP24]] to i32, !dbg [[DBG229]]
+// CHECK-TLS3-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG230:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP25]], [[CONV]], !dbg [[DBG230]]
+// CHECK-TLS3-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG230]]
+// CHECK-TLS3-NEXT: [[TMP26:%.*]] = call ptr @_ZTWN2STI2S4E2stE(), !dbg [[DBG231:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP26]], i32 0, i32 0, !dbg [[DBG232:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG232]]
+// CHECK-TLS3-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG233:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP28]], [[TMP27]], !dbg [[DBG233]]
+// CHECK-TLS3-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG233]]
+// CHECK-TLS3-NEXT: [[TMP29:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG234:![0-9]+]]
+// CHECK-TLS3-NEXT: ret i32 [[TMP29]], !dbg [[DBG235:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZTWL3gs1
@@ -4696,29 +4696,29 @@ int foobar() {
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC1Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG237:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG236:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META238:![0-9]+]], metadata !DIExpression()), !dbg [[DBG240:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META237:![0-9]+]], metadata !DIExpression()), !dbg [[DBG239:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META241:![0-9]+]], metadata !DIExpression()), !dbg [[DBG242:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META240:![0-9]+]], metadata !DIExpression()), !dbg [[DBG241:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG243:![0-9]+]]
-// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG243]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG244:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG242:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG242]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG243:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD1Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG245:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG244:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META246:![0-9]+]], metadata !DIExpression()), !dbg [[DBG247:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META245:![0-9]+]], metadata !DIExpression()), !dbg [[DBG246:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG248:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG249:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG247:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG248:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZTWN6Static1sE
@@ -4758,174 +4758,174 @@ int foobar() {
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG250:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG249:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META251:![0-9]+]], metadata !DIExpression()), !dbg [[DBG252:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META250:![0-9]+]], metadata !DIExpression()), !dbg [[DBG251:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META253:![0-9]+]], metadata !DIExpression()), !dbg [[DBG254:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META252:![0-9]+]], metadata !DIExpression()), !dbg [[DBG253:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG255:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG256:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG255]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG257:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG254:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG255:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG254]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG256:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG258:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG257:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META259:![0-9]+]], metadata !DIExpression()), !dbg [[DBG260:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META258:![0-9]+]], metadata !DIExpression()), !dbg [[DBG259:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG261:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG263:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG264:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG260:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG262:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG263:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK-TLS3-SAME: () #[[ATTR7:[0-9]+]] !dbg [[DBG265:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR1]] !dbg [[DBG264:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[RES:%.*]] = alloca i32, align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META266:![0-9]+]], metadata !DIExpression()), !dbg [[DBG267:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call ptr @_ZTWN6Static1sE(), !dbg [[DBG268:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP0]], i32 0, i32 0, !dbg [[DBG269:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG269]]
-// CHECK-TLS3-NEXT: store i32 [[TMP1]], ptr [[RES]], align 4, !dbg [[DBG270:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP2:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG271:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP2]], i32 0, i32 0, !dbg [[DBG272:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG272]]
-// CHECK-TLS3-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG273:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG273]]
-// CHECK-TLS3-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG273]]
-// CHECK-TLS3-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG274:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG275:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG275]]
-// CHECK-TLS3-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG275]]
-// CHECK-TLS3-NEXT: [[TMP7:%.*]] = call ptr @_ZTW3gs3(), !dbg [[DBG276:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG277:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG277]]
-// CHECK-TLS3-NEXT: [[TMP9:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG278:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], [[TMP8]], !dbg [[DBG278]]
-// CHECK-TLS3-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG278]]
-// CHECK-TLS3-NEXT: [[TMP10:%.*]] = call ptr @_ZTW5arr_x(), !dbg [[DBG279:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP10]], i64 0, i64 1, !dbg [[DBG279]]
-// CHECK-TLS3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG279]]
-// CHECK-TLS3-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG280:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP11:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG280]]
-// CHECK-TLS3-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG281:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG281]]
-// CHECK-TLS3-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG281]]
-// CHECK-TLS3-NEXT: [[TMP13:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIiE2stE), !dbg [[DBG282:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !dbg [[DBG282]]
-// CHECK-TLS3-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG283:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], [[TMP14]], !dbg [[DBG283]]
-// CHECK-TLS3-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG283]]
-// CHECK-TLS3-NEXT: [[TMP16:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIfE2stE), !dbg [[DBG284:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP17:%.*]] = load float, ptr [[TMP16]], align 4, !dbg [[DBG284]]
-// CHECK-TLS3-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG284]]
-// CHECK-TLS3-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG285:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG285]]
-// CHECK-TLS3-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG285]]
-// CHECK-TLS3-NEXT: [[TMP19:%.*]] = call ptr @_ZTWN2STI2S4E2stE(), !dbg [[DBG286:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP19]], i32 0, i32 0, !dbg [[DBG287:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP20:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG287]]
-// CHECK-TLS3-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG288:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP21]], [[TMP20]], !dbg [[DBG288]]
-// CHECK-TLS3-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG288]]
-// CHECK-TLS3-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG289:![0-9]+]]
-// CHECK-TLS3-NEXT: ret i32 [[TMP22]], !dbg [[DBG290:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META265:![0-9]+]], metadata !DIExpression()), !dbg [[DBG266:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call ptr @_ZTWN6Static1sE(), !dbg [[DBG267:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP0]], i32 0, i32 0, !dbg [[DBG268:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG268]]
+// CHECK-TLS3-NEXT: store i32 [[TMP1]], ptr [[RES]], align 4, !dbg [[DBG269:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP2:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG270:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP2]], i32 0, i32 0, !dbg [[DBG271:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG271]]
+// CHECK-TLS3-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG272:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG272]]
+// CHECK-TLS3-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG272]]
+// CHECK-TLS3-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG273:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG274:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG274]]
+// CHECK-TLS3-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG274]]
+// CHECK-TLS3-NEXT: [[TMP7:%.*]] = call ptr @_ZTW3gs3(), !dbg [[DBG275:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG276:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG276]]
+// CHECK-TLS3-NEXT: [[TMP9:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG277:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], [[TMP8]], !dbg [[DBG277]]
+// CHECK-TLS3-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG277]]
+// CHECK-TLS3-NEXT: [[TMP10:%.*]] = call ptr @_ZTW5arr_x(), !dbg [[DBG278:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP10]], i64 0, i64 1, !dbg [[DBG278]]
+// CHECK-TLS3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG278]]
+// CHECK-TLS3-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG279:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP11:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG279]]
+// CHECK-TLS3-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG280:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG280]]
+// CHECK-TLS3-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG280]]
+// CHECK-TLS3-NEXT: [[TMP13:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIiE2stE), !dbg [[DBG281:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !dbg [[DBG281]]
+// CHECK-TLS3-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG282:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], [[TMP14]], !dbg [[DBG282]]
+// CHECK-TLS3-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG282]]
+// CHECK-TLS3-NEXT: [[TMP16:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIfE2stE), !dbg [[DBG283:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP17:%.*]] = load float, ptr [[TMP16]], align 4, !dbg [[DBG283]]
+// CHECK-TLS3-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG283]]
+// CHECK-TLS3-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG284:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG284]]
+// CHECK-TLS3-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG284]]
+// CHECK-TLS3-NEXT: [[TMP19:%.*]] = call ptr @_ZTWN2STI2S4E2stE(), !dbg [[DBG285:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP19]], i32 0, i32 0, !dbg [[DBG286:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP20:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG286]]
+// CHECK-TLS3-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG287:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP21]], [[TMP20]], !dbg [[DBG287]]
+// CHECK-TLS3-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG287]]
+// CHECK-TLS3-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG288:![0-9]+]]
+// CHECK-TLS3-NEXT: ret i32 [[TMP22]], !dbg [[DBG289:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
-// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG291:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG290:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG292:![0-9]+]]
-// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG292]]
-// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG292]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG291:![0-9]+]]
+// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG291]]
+// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG291]]
// CHECK-TLS3: init.check:
-// CHECK-TLS3-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG292]]
-// CHECK-TLS3-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG293:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG292]]
-// CHECK-TLS3-NEXT: br label [[INIT_END]], !dbg [[DBG292]]
+// CHECK-TLS3-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG291]]
+// CHECK-TLS3-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG292:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG291]]
+// CHECK-TLS3-NEXT: br label [[INIT_END]], !dbg [[DBG291]]
// CHECK-TLS3: init.end:
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG295:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG294:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG296:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG295:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META297:![0-9]+]], metadata !DIExpression()), !dbg [[DBG299:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META296:![0-9]+]], metadata !DIExpression()), !dbg [[DBG298:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META300:![0-9]+]], metadata !DIExpression()), !dbg [[DBG301:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG300:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG302:![0-9]+]]
-// CHECK-TLS3-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG302]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG303:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG301:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG301]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG302:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG304:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG303:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META305:![0-9]+]], metadata !DIExpression()), !dbg [[DBG306:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META304:![0-9]+]], metadata !DIExpression()), !dbg [[DBG305:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG307:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG308:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG306:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG307:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG309:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG308:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META310:![0-9]+]], metadata !DIExpression()), !dbg [[DBG311:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META309:![0-9]+]], metadata !DIExpression()), !dbg [[DBG310:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META312:![0-9]+]], metadata !DIExpression()), !dbg [[DBG313:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META311:![0-9]+]], metadata !DIExpression()), !dbg [[DBG312:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG314:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG315:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG314]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG316:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG313:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG314:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG313]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG315:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG317:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG316:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META318:![0-9]+]], metadata !DIExpression()), !dbg [[DBG319:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META317:![0-9]+]], metadata !DIExpression()), !dbg [[DBG318:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG320:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG322:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG323:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG319:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG321:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG322:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG324:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG323:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
-// CHECK-TLS3-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG326:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG325:![0-9]+]]
// CHECK-TLS3-NEXT: ret void
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@__tls_init
-// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG327:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG326:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @__tls_guard, align 1, !dbg [[DBG328:![0-9]+]]
-// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG328]]
-// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT:%.*]], label [[EXIT:%.*]], !dbg [[DBG328]], !prof [[PROF207]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @__tls_guard, align 1, !dbg [[DBG327:![0-9]+]]
+// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG327]]
+// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT:%.*]], label [[EXIT:%.*]], !dbg [[DBG327]], !prof [[PROF206]]
// CHECK-TLS3: init:
-// CHECK-TLS3-NEXT: store i8 1, ptr @__tls_guard, align 1, !dbg [[DBG328]]
-// CHECK-TLS3-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG328]]
-// CHECK-TLS3-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG328]]
-// CHECK-TLS3-NEXT: br label [[EXIT]], !dbg [[DBG328]]
+// CHECK-TLS3-NEXT: store i8 1, ptr @__tls_guard, align 1, !dbg [[DBG327]]
+// CHECK-TLS3-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG327]]
+// CHECK-TLS3-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG327]]
+// CHECK-TLS3-NEXT: br label [[EXIT]], !dbg [[DBG327]]
// CHECK-TLS3: exit:
// CHECK-TLS3-NEXT: ret void
//
@@ -4936,7 +4936,7 @@ int foobar() {
// CHECK-TLS4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META116:![0-9]+]], metadata !DIExpression()), !dbg [[DBG117:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META116:![0-9]+]], metadata !DIExpression()), !dbg [[DBG117:![0-9]+]]
// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVZ4mainE2sm, align 1, !dbg [[DBG118:![0-9]+]]
// CHECK-TLS4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG118]]
// CHECK-TLS4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG118]], !prof [[PROF119:![0-9]+]]
@@ -5017,9 +5017,9 @@ int foobar() {
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META150:![0-9]+]], metadata !DIExpression()), !dbg [[DBG152:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META150:![0-9]+]], metadata !DIExpression()), !dbg [[DBG152:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META153:![0-9]+]], metadata !DIExpression()), !dbg [[DBG154:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META153:![0-9]+]], metadata !DIExpression()), !dbg [[DBG154:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG155:![0-9]+]]
// CHECK-TLS4-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG155]]
@@ -5031,7 +5031,7 @@ int foobar() {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META158:![0-9]+]], metadata !DIExpression()), !dbg [[DBG159:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META158:![0-9]+]], metadata !DIExpression()), !dbg [[DBG159:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-TLS4-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR5]], !dbg [[DBG160:![0-9]+]]
// CHECK-TLS4-NEXT: ret void, !dbg [[DBG161:![0-9]+]]
@@ -5086,10 +5086,10 @@ int foobar() {
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK-TLS4-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG162:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR3]] !dbg [[DBG162:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[RES:%.*]] = alloca i32, align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META163:![0-9]+]], metadata !DIExpression()), !dbg [[DBG164:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META163:![0-9]+]], metadata !DIExpression()), !dbg [[DBG164:![0-9]+]]
// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call ptr @_ZTWN6Static1sE(), !dbg [[DBG165:![0-9]+]]
// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP0]], i32 0, i32 0, !dbg [[DBG166:![0-9]+]]
// CHECK-TLS4-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG166]]
@@ -5140,354 +5140,354 @@ int foobar() {
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__cxx_global_var_init
-// CHECK-TLS4-SAME: () #[[ATTR7:[0-9]+]] !dbg [[DBG188:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG188:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
-// CHECK-TLS4-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG192:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG194:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG195:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG191:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG193:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG194:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S1C1Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG196:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG195:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META197:![0-9]+]], metadata !DIExpression()), !dbg [[DBG199:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META196:![0-9]+]], metadata !DIExpression()), !dbg [[DBG198:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META200:![0-9]+]], metadata !DIExpression()), !dbg [[DBG201:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META199:![0-9]+]], metadata !DIExpression()), !dbg [[DBG200:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG202:![0-9]+]]
-// CHECK-TLS4-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG202]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG203:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG201:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG201]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG202:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S1D1Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG204:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG203:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META205:![0-9]+]], metadata !DIExpression()), !dbg [[DBG206:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META204:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR5]], !dbg [[DBG207:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG208:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR5]], !dbg [[DBG206:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG207:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG209:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG208:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META210:![0-9]+]], metadata !DIExpression()), !dbg [[DBG211:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META209:![0-9]+]], metadata !DIExpression()), !dbg [[DBG210:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META212:![0-9]+]], metadata !DIExpression()), !dbg [[DBG213:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META211:![0-9]+]], metadata !DIExpression()), !dbg [[DBG212:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG214:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG215:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG214]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG216:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG213:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG214:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG213]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG215:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG217:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG216:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META218:![0-9]+]], metadata !DIExpression()), !dbg [[DBG219:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META217:![0-9]+]], metadata !DIExpression()), !dbg [[DBG218:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG220:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG222:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG223:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG219:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG221:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG222:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// CHECK-TLS4-SAME: () #[[ATTR7]] !dbg [[DBG224:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6]] !dbg [[DBG223:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
-// CHECK-TLS4-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG225:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG227:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG228:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG224:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG226:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG227:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG229:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG228:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META230:![0-9]+]], metadata !DIExpression()), !dbg [[DBG232:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META229:![0-9]+]], metadata !DIExpression()), !dbg [[DBG231:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META233:![0-9]+]], metadata !DIExpression()), !dbg [[DBG234:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META232:![0-9]+]], metadata !DIExpression()), !dbg [[DBG233:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG235:![0-9]+]]
-// CHECK-TLS4-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG235]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG236:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG234:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG234]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG235:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG237:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG236:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META238:![0-9]+]], metadata !DIExpression()), !dbg [[DBG239:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META237:![0-9]+]], metadata !DIExpression()), !dbg [[DBG238:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR5]], !dbg [[DBG240:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG241:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR5]], !dbg [[DBG239:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG240:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG242:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG241:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META243:![0-9]+]], metadata !DIExpression()), !dbg [[DBG244:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META242:![0-9]+]], metadata !DIExpression()), !dbg [[DBG243:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META245:![0-9]+]], metadata !DIExpression()), !dbg [[DBG246:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META244:![0-9]+]], metadata !DIExpression()), !dbg [[DBG245:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG247:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG248:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG247]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG249:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG246:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG247:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG246]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG248:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG250:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG249:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META251:![0-9]+]], metadata !DIExpression()), !dbg [[DBG252:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META250:![0-9]+]], metadata !DIExpression()), !dbg [[DBG251:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG253:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG255:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG256:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG252:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG254:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG255:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// CHECK-TLS4-SAME: () #[[ATTR7]] personality ptr @__gxx_personality_v0 !dbg [[DBG257:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6]] personality ptr @__gxx_personality_v0 !dbg [[DBG256:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// CHECK-TLS4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG258:![0-9]+]]
-// CHECK-TLS4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG260:![0-9]+]]
+// CHECK-TLS4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG257:![0-9]+]]
+// CHECK-TLS4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG259:![0-9]+]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG261:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG260:![0-9]+]]
// CHECK-TLS4: invoke.cont:
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG260]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG259]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG262:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG261:![0-9]+]]
// CHECK-TLS4: invoke.cont2:
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG260]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG259]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG263:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG262:![0-9]+]]
// CHECK-TLS4: invoke.cont3:
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG264:![0-9]+]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG263:![0-9]+]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG265:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG264:![0-9]+]]
// CHECK-TLS4: invoke.cont7:
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG264]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG263]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG266:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG265:![0-9]+]]
// CHECK-TLS4: invoke.cont8:
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG264]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG263]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG267:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG266:![0-9]+]]
// CHECK-TLS4: invoke.cont9:
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG268:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG267:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG267]]
// CHECK-TLS4: lpad:
// CHECK-TLS4-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS4-NEXT: cleanup, !dbg [[DBG269:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG260]]
+// CHECK-TLS4-NEXT: cleanup, !dbg [[DBG268:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG259]]
// CHECK-TLS4: arraydestroy.body:
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]], !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG260]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]], !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG259]]
// CHECK-TLS4: arraydestroy.done4:
-// CHECK-TLS4-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG260]]
+// CHECK-TLS4-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG259]]
// CHECK-TLS4: lpad6:
// CHECK-TLS4-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS4-NEXT: cleanup, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG264]]
+// CHECK-TLS4-NEXT: cleanup, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG263]]
// CHECK-TLS4: arraydestroy.body11:
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR5]], !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG264]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR5]], !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG263]]
// CHECK-TLS4: arraydestroy.done15:
-// CHECK-TLS4-NEXT: br label [[EHCLEANUP]], !dbg [[DBG264]]
+// CHECK-TLS4-NEXT: br label [[EHCLEANUP]], !dbg [[DBG263]]
// CHECK-TLS4: ehcleanup:
-// CHECK-TLS4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG258]]
+// CHECK-TLS4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG257]]
// CHECK-TLS4: arraydestroy.body17:
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR5]], !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG258]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR5]], !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG257]]
// CHECK-TLS4: arraydestroy.done21:
-// CHECK-TLS4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG258]]
+// CHECK-TLS4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG257]]
// CHECK-TLS4: eh.resume:
-// CHECK-TLS4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG258]]
+// CHECK-TLS4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG257]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// CHECK-TLS4-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR7]] !dbg [[DBG270:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR6]] !dbg [[DBG269:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META274:![0-9]+]], metadata !DIExpression()), !dbg [[DBG275:![0-9]+]]
-// CHECK-TLS4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG275]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META273:![0-9]+]], metadata !DIExpression()), !dbg [[DBG274:![0-9]+]]
+// CHECK-TLS4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG274]]
// CHECK-TLS4: arraydestroy.body:
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG275]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG275]]
-// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]], !dbg [[DBG275]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG275]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG275]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG274]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG274]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]], !dbg [[DBG274]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG274]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG274]]
// CHECK-TLS4: arraydestroy.done1:
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG275]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG274]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] align 2 !dbg [[DBG276:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] align 2 !dbg [[DBG275:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META277:![0-9]+]], metadata !DIExpression()), !dbg [[DBG278:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META276:![0-9]+]], metadata !DIExpression()), !dbg [[DBG277:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META279:![0-9]+]], metadata !DIExpression()), !dbg [[DBG280:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META278:![0-9]+]], metadata !DIExpression()), !dbg [[DBG279:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG281:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG282:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG281]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG283:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG280:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG281:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG280]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG282:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] align 2 !dbg [[DBG284:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] align 2 !dbg [[DBG283:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META285:![0-9]+]], metadata !DIExpression()), !dbg [[DBG286:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META284:![0-9]+]], metadata !DIExpression()), !dbg [[DBG285:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG287:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG289:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG290:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG286:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG288:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG289:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
-// CHECK-TLS4-SAME: () #[[ATTR7]] !dbg [[DBG291:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6]] !dbg [[DBG290:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG292:![0-9]+]]
-// CHECK-TLS4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG292]]
-// CHECK-TLS4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG292]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG291:![0-9]+]]
+// CHECK-TLS4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG291]]
+// CHECK-TLS4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG291]]
// CHECK-TLS4: init.check:
-// CHECK-TLS4-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG292]]
-// CHECK-TLS4-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG293:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG292]]
-// CHECK-TLS4-NEXT: br label [[INIT_END]], !dbg [[DBG292]]
+// CHECK-TLS4-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG291]]
+// CHECK-TLS4-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG292:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG291]]
+// CHECK-TLS4-NEXT: br label [[INIT_END]], !dbg [[DBG291]]
// CHECK-TLS4: init.end:
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG295:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG294:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG296:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG295:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META297:![0-9]+]], metadata !DIExpression()), !dbg [[DBG299:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META296:![0-9]+]], metadata !DIExpression()), !dbg [[DBG298:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META300:![0-9]+]], metadata !DIExpression()), !dbg [[DBG301:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG300:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG302:![0-9]+]]
-// CHECK-TLS4-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG302]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG303:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG301:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG301]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG302:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG304:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG303:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META305:![0-9]+]], metadata !DIExpression()), !dbg [[DBG306:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META304:![0-9]+]], metadata !DIExpression()), !dbg [[DBG305:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR5]], !dbg [[DBG307:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG308:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR5]], !dbg [[DBG306:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG307:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG309:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG308:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META310:![0-9]+]], metadata !DIExpression()), !dbg [[DBG311:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META309:![0-9]+]], metadata !DIExpression()), !dbg [[DBG310:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META312:![0-9]+]], metadata !DIExpression()), !dbg [[DBG313:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META311:![0-9]+]], metadata !DIExpression()), !dbg [[DBG312:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG314:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG315:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG314]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG316:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG313:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG314:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG313]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG315:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG317:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG316:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META318:![0-9]+]], metadata !DIExpression()), !dbg [[DBG319:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META317:![0-9]+]], metadata !DIExpression()), !dbg [[DBG318:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG320:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG322:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG323:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG319:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG321:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG322:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// CHECK-TLS4-SAME: () #[[ATTR7]] !dbg [[DBG324:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6]] !dbg [[DBG323:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
-// CHECK-TLS4-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG326:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG325:![0-9]+]]
// CHECK-TLS4-NEXT: ret void
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__tls_init
-// CHECK-TLS4-SAME: () #[[ATTR7]] !dbg [[DBG327:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6]] !dbg [[DBG326:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i8, ptr @__tls_guard, align 1, !dbg [[DBG328:![0-9]+]]
-// CHECK-TLS4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG328]]
-// CHECK-TLS4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT:%.*]], label [[EXIT:%.*]], !dbg [[DBG328]], !prof [[PROF119]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i8, ptr @__tls_guard, align 1, !dbg [[DBG327:![0-9]+]]
+// CHECK-TLS4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG327]]
+// CHECK-TLS4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT:%.*]], label [[EXIT:%.*]], !dbg [[DBG327]], !prof [[PROF119]]
// CHECK-TLS4: init:
-// CHECK-TLS4-NEXT: store i8 1, ptr @__tls_guard, align 1, !dbg [[DBG328]]
-// CHECK-TLS4-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG328]]
-// CHECK-TLS4-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG328]]
-// CHECK-TLS4-NEXT: br label [[EXIT]], !dbg [[DBG328]]
+// CHECK-TLS4-NEXT: store i8 1, ptr @__tls_guard, align 1, !dbg [[DBG327]]
+// CHECK-TLS4-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG327]]
+// CHECK-TLS4-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG327]]
+// CHECK-TLS4-NEXT: br label [[EXIT]], !dbg [[DBG327]]
// CHECK-TLS4: exit:
// CHECK-TLS4-NEXT: ret void
//
@@ -5565,34 +5565,34 @@ int foobar() {
// SIMD3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// SIMD3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// SIMD3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// SIMD3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// SIMD3: invoke.cont:
// SIMD3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// SIMD3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// SIMD3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// SIMD3: invoke.cont2:
// SIMD3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// SIMD3-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// SIMD3-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// SIMD3: invoke.cont3:
// SIMD3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// SIMD3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// SIMD3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// SIMD3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// SIMD3: invoke.cont7:
// SIMD3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// SIMD3-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// SIMD3-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// SIMD3: invoke.cont8:
// SIMD3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// SIMD3-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// SIMD3-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// SIMD3: invoke.cont9:
// SIMD3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]]
// SIMD3-NEXT: ret void
// SIMD3: lpad:
// SIMD3-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// SIMD3-NEXT: cleanup
+// SIMD3-NEXT: cleanup
// SIMD3-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// SIMD3-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// SIMD3-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -5610,7 +5610,7 @@ int foobar() {
// SIMD3-NEXT: br label [[EHCLEANUP:%.*]]
// SIMD3: lpad6:
// SIMD3-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// SIMD3-NEXT: cleanup
+// SIMD3-NEXT: cleanup
// SIMD3-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// SIMD3-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// SIMD3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -5681,7 +5681,7 @@ int foobar() {
// SIMD3: init:
// SIMD3-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4
// SIMD3-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]])
-// SIMD3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// SIMD3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// SIMD3: invoke.cont:
// SIMD3-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]]
// SIMD3-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]]
@@ -5726,7 +5726,7 @@ int foobar() {
// SIMD3-NEXT: ret i32 [[TMP21]]
// SIMD3: lpad:
// SIMD3-NEXT: [[TMP22:%.*]] = landingpad { ptr, i32 }
-// SIMD3-NEXT: cleanup
+// SIMD3-NEXT: cleanup
// SIMD3-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0
// SIMD3-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8
// SIMD3-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1
@@ -5765,7 +5765,7 @@ int foobar() {
//
//
// SIMD3-LABEL: define {{[^@]+}}@_Z6foobarv
-// SIMD3-SAME: () #[[ATTR5:[0-9]+]] {
+// SIMD3-SAME: () #[[ATTR2]] {
// SIMD3-NEXT: entry:
// SIMD3-NEXT: [[RES:%.*]] = alloca i32, align 4
// SIMD3-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4
@@ -5953,179 +5953,179 @@ int foobar() {
// SIMD4-LABEL: define {{[^@]+}}@__cxx_global_var_init
// SIMD4-SAME: () #[[ATTR0:[0-9]+]] !dbg [[DBG115:![0-9]+]] {
// SIMD4-NEXT: entry:
-// SIMD4-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG119:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG121:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG122:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG118:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG120:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG121:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S1C1Ei
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG123:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG122:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META124:![0-9]+]], metadata !DIExpression()), !dbg [[DBG126:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META123:![0-9]+]], metadata !DIExpression()), !dbg [[DBG125:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META127:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META126:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG129:![0-9]+]]
-// SIMD4-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG129]]
-// SIMD4-NEXT: ret void, !dbg [[DBG130:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG128:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG128]]
+// SIMD4-NEXT: ret void, !dbg [[DBG129:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S1D1Ev
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG131:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG130:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META131:![0-9]+]], metadata !DIExpression()), !dbg [[DBG132:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG134:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG135:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG133:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG134:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// SIMD4-SAME: () #[[ATTR0]] !dbg [[DBG136:![0-9]+]] {
+// SIMD4-SAME: () #[[ATTR0]] !dbg [[DBG135:![0-9]+]] {
// SIMD4-NEXT: entry:
-// SIMD4-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG137:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG139:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG140:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG136:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG138:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG139:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG141:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG140:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META142:![0-9]+]], metadata !DIExpression()), !dbg [[DBG144:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META141:![0-9]+]], metadata !DIExpression()), !dbg [[DBG143:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META145:![0-9]+]], metadata !DIExpression()), !dbg [[DBG146:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG145:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG147:![0-9]+]]
-// SIMD4-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG147]]
-// SIMD4-NEXT: ret void, !dbg [[DBG148:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG146:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG146]]
+// SIMD4-NEXT: ret void, !dbg [[DBG147:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG149:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG148:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META150:![0-9]+]], metadata !DIExpression()), !dbg [[DBG151:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META149:![0-9]+]], metadata !DIExpression()), !dbg [[DBG150:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG152:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG153:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG151:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG152:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// SIMD4-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG154:![0-9]+]] {
+// SIMD4-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG153:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// SIMD4-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// SIMD4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155:![0-9]+]]
-// SIMD4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157:![0-9]+]]
+// SIMD4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154:![0-9]+]]
+// SIMD4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156:![0-9]+]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// SIMD4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG158:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG157:![0-9]+]]
// SIMD4: invoke.cont:
-// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// SIMD4-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG159:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG158:![0-9]+]]
// SIMD4: invoke.cont2:
-// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// SIMD4-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG160:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG159:![0-9]+]]
// SIMD4: invoke.cont3:
-// SIMD4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155]]
-// SIMD4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161:![0-9]+]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160:![0-9]+]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// SIMD4-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG162:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG161:![0-9]+]]
// SIMD4: invoke.cont7:
-// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// SIMD4-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG163:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG162:![0-9]+]]
// SIMD4: invoke.cont8:
-// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// SIMD4-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG164:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG163:![0-9]+]]
// SIMD4: invoke.cont9:
-// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG165:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG164:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG164]]
// SIMD4: lpad:
// SIMD4-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// SIMD4-NEXT: cleanup, !dbg [[DBG166:![0-9]+]]
-// SIMD4-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG166]]
-// SIMD4-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG166]]
-// SIMD4-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG166]]
-// SIMD4-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG166]]
-// SIMD4-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG157]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG157]]
+// SIMD4-NEXT: cleanup, !dbg [[DBG165:![0-9]+]]
+// SIMD4-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG165]]
+// SIMD4-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG165]]
+// SIMD4-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG156]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG156]]
// SIMD4: arraydestroy.body:
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG157]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG157]]
-// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG157]]
-// SIMD4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG157]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG157]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG156]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG156]]
+// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG156]]
+// SIMD4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG156]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG156]]
// SIMD4: arraydestroy.done4:
-// SIMD4-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG157]]
+// SIMD4-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG156]]
// SIMD4: lpad6:
// SIMD4-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// SIMD4-NEXT: cleanup, !dbg [[DBG166]]
-// SIMD4-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG166]]
-// SIMD4-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG166]]
-// SIMD4-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG166]]
-// SIMD4-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG166]]
-// SIMD4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG161]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG161]]
+// SIMD4-NEXT: cleanup, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG165]]
+// SIMD4-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG165]]
+// SIMD4-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG160]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG160]]
// SIMD4: arraydestroy.body11:
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG161]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG161]]
-// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG161]]
-// SIMD4-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG161]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG161]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG160]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG160]]
+// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG160]]
+// SIMD4-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG160]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG160]]
// SIMD4: arraydestroy.done15:
-// SIMD4-NEXT: br label [[EHCLEANUP]], !dbg [[DBG161]]
+// SIMD4-NEXT: br label [[EHCLEANUP]], !dbg [[DBG160]]
// SIMD4: ehcleanup:
-// SIMD4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155]]
-// SIMD4-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG155]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG155]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG155]]
+// SIMD4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154]]
+// SIMD4-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG154]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG154]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG154]]
// SIMD4: arraydestroy.body17:
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG155]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG155]]
-// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG155]]
-// SIMD4-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG155]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG155]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG154]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG154]]
+// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG154]]
+// SIMD4-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG154]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG154]]
// SIMD4: arraydestroy.done21:
-// SIMD4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG155]]
+// SIMD4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG154]]
// SIMD4: eh.resume:
-// SIMD4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG155]]
-// SIMD4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG155]]
-// SIMD4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG155]]
-// SIMD4-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG155]]
-// SIMD4-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG155]]
+// SIMD4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG154]]
+// SIMD4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG154]]
+// SIMD4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG154]]
+// SIMD4-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG154]]
+// SIMD4-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG154]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// SIMD4-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG167:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG166:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META171:![0-9]+]], metadata !DIExpression()), !dbg [[DBG172:![0-9]+]]
-// SIMD4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG172]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META170:![0-9]+]], metadata !DIExpression()), !dbg [[DBG171:![0-9]+]]
+// SIMD4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG171]]
// SIMD4: arraydestroy.body:
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG172]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG172]]
-// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG172]]
-// SIMD4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG172]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG172]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG171]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG171]]
+// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG171]]
+// SIMD4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG171]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG171]]
// SIMD4: arraydestroy.done1:
-// SIMD4-NEXT: ret void, !dbg [[DBG172]]
+// SIMD4-NEXT: ret void, !dbg [[DBG171]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@main
@@ -6136,302 +6136,302 @@ int foobar() {
// SIMD4-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META173:![0-9]+]], metadata !DIExpression()), !dbg [[DBG174:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG175:![0-9]+]]
-// SIMD4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG175]]
-// SIMD4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG175]], !prof [[PROF176:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META172:![0-9]+]], metadata !DIExpression()), !dbg [[DBG173:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG174:![0-9]+]]
+// SIMD4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG174]]
+// SIMD4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG174]], !prof [[PROF175:![0-9]+]]
// SIMD4: init.check:
-// SIMD4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG175]]
-// SIMD4-NEXT: br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]], !dbg [[DBG175]]
+// SIMD4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG174]]
+// SIMD4-NEXT: br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]], !dbg [[DBG174]]
// SIMD4: init:
-// SIMD4-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG177:![0-9]+]]
+// SIMD4-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG176:![0-9]+]]
// SIMD4-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]])
-// SIMD4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG178:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG177:![0-9]+]]
// SIMD4: invoke.cont:
-// SIMD4-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD4-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD4-NEXT: br label [[INIT_END]], !dbg [[DBG175]]
+// SIMD4-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD4-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD4-NEXT: br label [[INIT_END]], !dbg [[DBG174]]
// SIMD4: init.end:
-// SIMD4-NEXT: [[TMP4:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG179:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP4]], ptr [[RES]], align 4, !dbg [[DBG180:![0-9]+]]
-// SIMD4-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZZ4mainE2sm, align 8, !dbg [[DBG181:![0-9]+]]
-// SIMD4-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG182:![0-9]+]]
-// SIMD4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG182]]
-// SIMD4-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG182]]
-// SIMD4-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG183:![0-9]+]]
-// SIMD4-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG184:![0-9]+]]
-// SIMD4-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG184]]
-// SIMD4-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG184]]
-// SIMD4-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG185:![0-9]+]]
-// SIMD4-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG186:![0-9]+]]
-// SIMD4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG186]]
-// SIMD4-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG186]]
-// SIMD4-NEXT: [[TMP11:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG187:![0-9]+]]
-// SIMD4-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG188:![0-9]+]]
-// SIMD4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG188]]
-// SIMD4-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG188]]
-// SIMD4-NEXT: [[TMP13:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG189:![0-9]+]]
-// SIMD4-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG190:![0-9]+]]
-// SIMD4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG190]]
-// SIMD4-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG190]]
-// SIMD4-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG191:![0-9]+]]
-// SIMD4-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG192:![0-9]+]]
-// SIMD4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG192]]
-// SIMD4-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG192]]
-// SIMD4-NEXT: [[TMP17:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG193:![0-9]+]]
-// SIMD4-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG193]]
-// SIMD4-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG194:![0-9]+]]
-// SIMD4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG194]]
-// SIMD4-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG194]]
-// SIMD4-NEXT: [[TMP19:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG195:![0-9]+]]
-// SIMD4-NEXT: [[TMP20:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG196:![0-9]+]]
-// SIMD4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], [[TMP19]], !dbg [[DBG196]]
-// SIMD4-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG196]]
-// SIMD4-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG197:![0-9]+]]
-// SIMD4-NEXT: ret i32 [[TMP21]], !dbg [[DBG198:![0-9]+]]
+// SIMD4-NEXT: [[TMP4:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG178:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP4]], ptr [[RES]], align 4, !dbg [[DBG179:![0-9]+]]
+// SIMD4-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZZ4mainE2sm, align 8, !dbg [[DBG180:![0-9]+]]
+// SIMD4-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG181:![0-9]+]]
+// SIMD4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG181]]
+// SIMD4-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG181]]
+// SIMD4-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG182:![0-9]+]]
+// SIMD4-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG183:![0-9]+]]
+// SIMD4-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG183]]
+// SIMD4-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG183]]
+// SIMD4-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG184:![0-9]+]]
+// SIMD4-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG185:![0-9]+]]
+// SIMD4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG185]]
+// SIMD4-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG185]]
+// SIMD4-NEXT: [[TMP11:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG186:![0-9]+]]
+// SIMD4-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG187:![0-9]+]]
+// SIMD4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG187]]
+// SIMD4-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG187]]
+// SIMD4-NEXT: [[TMP13:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG188:![0-9]+]]
+// SIMD4-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG189:![0-9]+]]
+// SIMD4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG189]]
+// SIMD4-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG189]]
+// SIMD4-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG190:![0-9]+]]
+// SIMD4-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG191:![0-9]+]]
+// SIMD4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG191]]
+// SIMD4-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG191]]
+// SIMD4-NEXT: [[TMP17:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG192:![0-9]+]]
+// SIMD4-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG192]]
+// SIMD4-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG193:![0-9]+]]
+// SIMD4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG193]]
+// SIMD4-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG193]]
+// SIMD4-NEXT: [[TMP19:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG194:![0-9]+]]
+// SIMD4-NEXT: [[TMP20:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG195:![0-9]+]]
+// SIMD4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], [[TMP19]], !dbg [[DBG195]]
+// SIMD4-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG195]]
+// SIMD4-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG196:![0-9]+]]
+// SIMD4-NEXT: ret i32 [[TMP21]], !dbg [[DBG197:![0-9]+]]
// SIMD4: lpad:
// SIMD4-NEXT: [[TMP22:%.*]] = landingpad { ptr, i32 }
-// SIMD4-NEXT: cleanup, !dbg [[DBG199:![0-9]+]]
-// SIMD4-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0, !dbg [[DBG199]]
-// SIMD4-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG199]]
-// SIMD4-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1, !dbg [[DBG199]]
-// SIMD4-NEXT: store i32 [[TMP24]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG199]]
-// SIMD4-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG175]]
+// SIMD4-NEXT: cleanup, !dbg [[DBG198:![0-9]+]]
+// SIMD4-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0, !dbg [[DBG198]]
+// SIMD4-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG198]]
+// SIMD4-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1, !dbg [[DBG198]]
+// SIMD4-NEXT: store i32 [[TMP24]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG198]]
+// SIMD4-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG174]]
// SIMD4: eh.resume:
-// SIMD4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG175]]
-// SIMD4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG175]]
-// SIMD4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG175]]
-// SIMD4-NEXT: [[LPAD_VAL8:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG175]]
-// SIMD4-NEXT: resume { ptr, i32 } [[LPAD_VAL8]], !dbg [[DBG175]]
+// SIMD4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG174]]
+// SIMD4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG174]]
+// SIMD4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG174]]
+// SIMD4-NEXT: [[LPAD_VAL8:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG174]]
+// SIMD4-NEXT: resume { ptr, i32 } [[LPAD_VAL8]], !dbg [[DBG174]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC1Ei
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG200:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG199:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG203:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META200:![0-9]+]], metadata !DIExpression()), !dbg [[DBG202:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META204:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META203:![0-9]+]], metadata !DIExpression()), !dbg [[DBG204:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG206:![0-9]+]]
-// SIMD4-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG206]]
-// SIMD4-NEXT: ret void, !dbg [[DBG207:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG205:![0-9]+]]
+// SIMD4-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG205]]
+// SIMD4-NEXT: ret void, !dbg [[DBG206:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD1Ev
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG208:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG207:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META209:![0-9]+]], metadata !DIExpression()), !dbg [[DBG210:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META208:![0-9]+]], metadata !DIExpression()), !dbg [[DBG209:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG211:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG212:![0-9]+]]
+// SIMD4-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG210:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG211:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_Z6foobarv
-// SIMD4-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG213:![0-9]+]] {
+// SIMD4-SAME: () #[[ATTR2]] !dbg [[DBG212:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[RES:%.*]] = alloca i32, align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META214:![0-9]+]], metadata !DIExpression()), !dbg [[DBG215:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG216:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP0]], ptr [[RES]], align 4, !dbg [[DBG217:![0-9]+]]
-// SIMD4-NEXT: [[TMP1:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG218:![0-9]+]]
-// SIMD4-NEXT: [[TMP2:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG219:![0-9]+]]
-// SIMD4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[TMP1]], !dbg [[DBG219]]
-// SIMD4-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG219]]
-// SIMD4-NEXT: [[TMP3:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG220:![0-9]+]]
-// SIMD4-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG221:![0-9]+]]
-// SIMD4-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG221]]
-// SIMD4-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG221]]
-// SIMD4-NEXT: [[TMP5:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG222:![0-9]+]]
-// SIMD4-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG223:![0-9]+]]
-// SIMD4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG223]]
-// SIMD4-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG223]]
-// SIMD4-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG224:![0-9]+]]
-// SIMD4-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG225:![0-9]+]]
-// SIMD4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG225]]
-// SIMD4-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG225]]
-// SIMD4-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG226:![0-9]+]]
-// SIMD4-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG227:![0-9]+]]
-// SIMD4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG227]]
-// SIMD4-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG227]]
-// SIMD4-NEXT: [[TMP11:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG228:![0-9]+]]
-// SIMD4-NEXT: [[CONV:%.*]] = fptosi float [[TMP11]] to i32, !dbg [[DBG228]]
-// SIMD4-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG229:![0-9]+]]
-// SIMD4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], [[CONV]], !dbg [[DBG229]]
-// SIMD4-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG229]]
-// SIMD4-NEXT: [[TMP13:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG230:![0-9]+]]
-// SIMD4-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
-// SIMD4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG231]]
-// SIMD4-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG231]]
-// SIMD4-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG232:![0-9]+]]
-// SIMD4-NEXT: ret i32 [[TMP15]], !dbg [[DBG233:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META213:![0-9]+]], metadata !DIExpression()), !dbg [[DBG214:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG215:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP0]], ptr [[RES]], align 4, !dbg [[DBG216:![0-9]+]]
+// SIMD4-NEXT: [[TMP1:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG217:![0-9]+]]
+// SIMD4-NEXT: [[TMP2:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG218:![0-9]+]]
+// SIMD4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[TMP1]], !dbg [[DBG218]]
+// SIMD4-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG218]]
+// SIMD4-NEXT: [[TMP3:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG219:![0-9]+]]
+// SIMD4-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG220:![0-9]+]]
+// SIMD4-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG220]]
+// SIMD4-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG220]]
+// SIMD4-NEXT: [[TMP5:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG221:![0-9]+]]
+// SIMD4-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG222:![0-9]+]]
+// SIMD4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG222]]
+// SIMD4-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG222]]
+// SIMD4-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG223:![0-9]+]]
+// SIMD4-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG224:![0-9]+]]
+// SIMD4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG224]]
+// SIMD4-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG224]]
+// SIMD4-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG225:![0-9]+]]
+// SIMD4-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG226:![0-9]+]]
+// SIMD4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG226]]
+// SIMD4-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG226]]
+// SIMD4-NEXT: [[TMP11:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG227:![0-9]+]]
+// SIMD4-NEXT: [[CONV:%.*]] = fptosi float [[TMP11]] to i32, !dbg [[DBG227]]
+// SIMD4-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG228:![0-9]+]]
+// SIMD4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], [[CONV]], !dbg [[DBG228]]
+// SIMD4-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG228]]
+// SIMD4-NEXT: [[TMP13:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG229:![0-9]+]]
+// SIMD4-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG230:![0-9]+]]
+// SIMD4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG230]]
+// SIMD4-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG230]]
+// SIMD4-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
+// SIMD4-NEXT: ret i32 [[TMP15]], !dbg [[DBG232:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
-// SIMD4-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG234:![0-9]+]] {
+// SIMD4-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG233:![0-9]+]] {
// SIMD4-NEXT: entry:
-// SIMD4-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG235:![0-9]+]]
-// SIMD4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG235]]
-// SIMD4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG235]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG234:![0-9]+]]
+// SIMD4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG234]]
+// SIMD4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG234]]
// SIMD4: init.check:
-// SIMD4-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG235]]
-// SIMD4-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG236:![0-9]+]]
-// SIMD4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG235]]
-// SIMD4-NEXT: br label [[INIT_END]], !dbg [[DBG235]]
+// SIMD4-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG234]]
+// SIMD4-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG235:![0-9]+]]
+// SIMD4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG234]]
+// SIMD4-NEXT: br label [[INIT_END]], !dbg [[DBG234]]
// SIMD4: init.end:
-// SIMD4-NEXT: ret void, !dbg [[DBG238:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG237:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG239:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG238:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META240:![0-9]+]], metadata !DIExpression()), !dbg [[DBG242:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META239:![0-9]+]], metadata !DIExpression()), !dbg [[DBG241:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META243:![0-9]+]], metadata !DIExpression()), !dbg [[DBG244:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META242:![0-9]+]], metadata !DIExpression()), !dbg [[DBG243:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG245:![0-9]+]]
-// SIMD4-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG245]]
-// SIMD4-NEXT: ret void, !dbg [[DBG246:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG244:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG244]]
+// SIMD4-NEXT: ret void, !dbg [[DBG245:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG247:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG246:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META248:![0-9]+]], metadata !DIExpression()), !dbg [[DBG249:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META247:![0-9]+]], metadata !DIExpression()), !dbg [[DBG248:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG250:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG251:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG249:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG250:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG252:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG251:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META253:![0-9]+]], metadata !DIExpression()), !dbg [[DBG254:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META252:![0-9]+]], metadata !DIExpression()), !dbg [[DBG253:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META255:![0-9]+]], metadata !DIExpression()), !dbg [[DBG256:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META254:![0-9]+]], metadata !DIExpression()), !dbg [[DBG255:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG257:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG258:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG257]]
-// SIMD4-NEXT: ret void, !dbg [[DBG259:![0-9]+]]
+// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG256:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG257:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG256]]
+// SIMD4-NEXT: ret void, !dbg [[DBG258:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG260:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG259:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META261:![0-9]+]], metadata !DIExpression()), !dbg [[DBG262:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META260:![0-9]+]], metadata !DIExpression()), !dbg [[DBG261:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG263:![0-9]+]]
-// SIMD4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG265:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG266:![0-9]+]]
+// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG262:![0-9]+]]
+// SIMD4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG264:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG265:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG267:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG266:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META268:![0-9]+]], metadata !DIExpression()), !dbg [[DBG269:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META267:![0-9]+]], metadata !DIExpression()), !dbg [[DBG268:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META270:![0-9]+]], metadata !DIExpression()), !dbg [[DBG271:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META269:![0-9]+]], metadata !DIExpression()), !dbg [[DBG270:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG272:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG273:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG272]]
-// SIMD4-NEXT: ret void, !dbg [[DBG274:![0-9]+]]
+// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG271:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG272:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG271]]
+// SIMD4-NEXT: ret void, !dbg [[DBG273:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG275:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG274:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META276:![0-9]+]], metadata !DIExpression()), !dbg [[DBG277:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META275:![0-9]+]], metadata !DIExpression()), !dbg [[DBG276:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG278:![0-9]+]]
-// SIMD4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG280:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG281:![0-9]+]]
+// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG277:![0-9]+]]
+// SIMD4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG279:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG280:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG282:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG281:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META283:![0-9]+]], metadata !DIExpression()), !dbg [[DBG284:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META282:![0-9]+]], metadata !DIExpression()), !dbg [[DBG283:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META285:![0-9]+]], metadata !DIExpression()), !dbg [[DBG286:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META284:![0-9]+]], metadata !DIExpression()), !dbg [[DBG285:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG287:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG288:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG287]]
-// SIMD4-NEXT: ret void, !dbg [[DBG289:![0-9]+]]
+// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG286:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG287:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG286]]
+// SIMD4-NEXT: ret void, !dbg [[DBG288:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG290:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG289:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META291:![0-9]+]], metadata !DIExpression()), !dbg [[DBG292:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META290:![0-9]+]], metadata !DIExpression()), !dbg [[DBG291:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG293:![0-9]+]]
-// SIMD4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG295:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG296:![0-9]+]]
+// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG292:![0-9]+]]
+// SIMD4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG294:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG295:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG297:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG296:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META298:![0-9]+]], metadata !DIExpression()), !dbg [[DBG299:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META297:![0-9]+]], metadata !DIExpression()), !dbg [[DBG298:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META300:![0-9]+]], metadata !DIExpression()), !dbg [[DBG301:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG300:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG302:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG303:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG302]]
-// SIMD4-NEXT: ret void, !dbg [[DBG304:![0-9]+]]
+// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG301:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG302:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG301]]
+// SIMD4-NEXT: ret void, !dbg [[DBG303:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG305:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG304:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META306:![0-9]+]], metadata !DIExpression()), !dbg [[DBG307:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META305:![0-9]+]], metadata !DIExpression()), !dbg [[DBG306:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG308:![0-9]+]]
-// SIMD4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG310:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG311:![0-9]+]]
+// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG307:![0-9]+]]
+// SIMD4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG309:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG310:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// SIMD4-SAME: () #[[ATTR0]] !dbg [[DBG312:![0-9]+]] {
+// SIMD4-SAME: () #[[ATTR0]] !dbg [[DBG311:![0-9]+]] {
// SIMD4-NEXT: entry:
-// SIMD4-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG314:![0-9]+]]
-// SIMD4-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG314]]
-// SIMD4-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG314]]
+// SIMD4-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG313:![0-9]+]]
+// SIMD4-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG313]]
+// SIMD4-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG313]]
// SIMD4-NEXT: ret void
//
//
@@ -6440,7 +6440,7 @@ int foobar() {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META118:![0-9]+]], metadata !DIExpression()), !dbg [[DBG120:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META118:![0-9]+]], metadata !DIExpression()), !dbg [[DBG120:![0-9]+]]
// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG121:![0-9]+]]
// DEBUG1-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]], i32 noundef 5), !dbg [[DBG122:![0-9]+]]
// DEBUG1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG121]]
@@ -6453,9 +6453,9 @@ int foobar() {
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META124:![0-9]+]], metadata !DIExpression()), !dbg [[DBG126:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META124:![0-9]+]], metadata !DIExpression()), !dbg [[DBG126:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META127:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META127:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG129:![0-9]+]]
// DEBUG1-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG129]]
@@ -6467,7 +6467,7 @@ int foobar() {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG133]]
// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]]) #[[ATTR4:[0-9]+]], !dbg [[DBG133]]
// DEBUG1-NEXT: ret void, !dbg [[DBG134:![0-9]+]]
@@ -6478,7 +6478,7 @@ int foobar() {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META136:![0-9]+]], metadata !DIExpression()), !dbg [[DBG137:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META136:![0-9]+]], metadata !DIExpression()), !dbg [[DBG137:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// DEBUG1-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]], !dbg [[DBG138:![0-9]+]]
// DEBUG1-NEXT: ret void, !dbg [[DBG139:![0-9]+]]
@@ -6502,47 +6502,47 @@ int foobar() {
// DEBUG1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: [[ARRAYINIT_ENDOFINIT9:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG144:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG144:![0-9]+]]
// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG145:![0-9]+]]
// DEBUG1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP1]], i64 0, i64 0, !dbg [[DBG146:![0-9]+]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_BEGIN]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG146]]
// DEBUG1-NEXT: [[ARRAYINIT_BEGIN1:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0, !dbg [[DBG147:![0-9]+]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_BEGIN1]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG147]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN1]], i32 noundef 1)
-// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG148:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG148:![0-9]+]]
// DEBUG1: invoke.cont:
// DEBUG1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[ARRAYINIT_BEGIN1]], i64 1, !dbg [[DBG147]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_ELEMENT]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG147]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
-// DEBUG1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG149:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG149:![0-9]+]]
// DEBUG1: invoke.cont3:
// DEBUG1-NEXT: [[ARRAYINIT_ELEMENT4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT]], i64 1, !dbg [[DBG147]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_ELEMENT4]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG147]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT4]], i32 noundef 3)
-// DEBUG1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]], !dbg [[DBG150:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]], !dbg [[DBG150:![0-9]+]]
// DEBUG1: invoke.cont5:
// DEBUG1-NEXT: [[ARRAYINIT_ELEMENT7:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 1, !dbg [[DBG146]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_ELEMENT7]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG146]]
// DEBUG1-NEXT: [[ARRAYINIT_BEGIN8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_ELEMENT7]], i64 0, i64 0, !dbg [[DBG151:![0-9]+]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_BEGIN8]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG151]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN8]], i32 noundef 4)
-// DEBUG1-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]], !dbg [[DBG152:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]], !dbg [[DBG152:![0-9]+]]
// DEBUG1: invoke.cont11:
// DEBUG1-NEXT: [[ARRAYINIT_ELEMENT12:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_BEGIN8]], i64 1, !dbg [[DBG151]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_ELEMENT12]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG151]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT12]], i32 noundef 5)
-// DEBUG1-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]], !dbg [[DBG153:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]], !dbg [[DBG153:![0-9]+]]
// DEBUG1: invoke.cont13:
// DEBUG1-NEXT: [[ARRAYINIT_ELEMENT14:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT12]], i64 1, !dbg [[DBG151]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_ELEMENT14]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG151]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT14]], i32 noundef 6)
-// DEBUG1-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]], !dbg [[DBG154:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]], !dbg [[DBG154:![0-9]+]]
// DEBUG1: invoke.cont15:
// DEBUG1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG145]]
// DEBUG1-NEXT: ret ptr [[TMP2]], !dbg [[DBG145]]
// DEBUG1: lpad:
// DEBUG1-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 }
-// DEBUG1-NEXT: cleanup, !dbg [[DBG144]]
+// DEBUG1-NEXT: cleanup, !dbg [[DBG144]]
// DEBUG1-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0, !dbg [[DBG144]]
// DEBUG1-NEXT: store ptr [[TMP4]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG144]]
// DEBUG1-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 1, !dbg [[DBG144]]
@@ -6560,7 +6560,7 @@ int foobar() {
// DEBUG1-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG147]]
// DEBUG1: lpad10:
// DEBUG1-NEXT: [[TMP7:%.*]] = landingpad { ptr, i32 }
-// DEBUG1-NEXT: cleanup, !dbg [[DBG144]]
+// DEBUG1-NEXT: cleanup, !dbg [[DBG144]]
// DEBUG1-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 0, !dbg [[DBG144]]
// DEBUG1-NEXT: store ptr [[TMP8]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG144]]
// DEBUG1-NEXT: [[TMP9:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 1, !dbg [[DBG144]]
@@ -6603,7 +6603,7 @@ int foobar() {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META156:![0-9]+]], metadata !DIExpression()), !dbg [[DBG157:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META156:![0-9]+]], metadata !DIExpression()), !dbg [[DBG157:![0-9]+]]
// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG157]]
// DEBUG1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP1]], i64 6, !dbg [[DBG157]]
// DEBUG1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG157]]
@@ -6628,209 +6628,209 @@ int foobar() {
// DEBUG1-LABEL: define {{[^@]+}}@__cxx_global_var_init
// DEBUG1-SAME: () #[[ATTR0]] !dbg [[DBG161:![0-9]+]] {
// DEBUG1-NEXT: entry:
-// DEBUG1-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG165:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG167:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG168:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG164:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG166:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG167:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG169:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG168:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META170:![0-9]+]], metadata !DIExpression()), !dbg [[DBG171:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META169:![0-9]+]], metadata !DIExpression()), !dbg [[DBG170:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META172:![0-9]+]], metadata !DIExpression()), !dbg [[DBG173:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META171:![0-9]+]], metadata !DIExpression()), !dbg [[DBG172:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG174:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG175:![0-9]+]]
-// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG174]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG176:![0-9]+]]
+// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG173:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG174:![0-9]+]]
+// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG173]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG175:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG177:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG176:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META178:![0-9]+]], metadata !DIExpression()), !dbg [[DBG179:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META177:![0-9]+]], metadata !DIExpression()), !dbg [[DBG178:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG180:![0-9]+]]
-// DEBUG1-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG182:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG183:![0-9]+]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG179:![0-9]+]]
+// DEBUG1-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG181:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG182:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
-// DEBUG1-SAME: () #[[ATTR0]] !dbg [[DBG184:![0-9]+]] {
+// DEBUG1-SAME: () #[[ATTR0]] !dbg [[DBG183:![0-9]+]] {
// DEBUG1-NEXT: entry:
-// DEBUG1-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG185:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG187:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG188:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG184:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG186:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG187:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG189:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG188:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META190:![0-9]+]], metadata !DIExpression()), !dbg [[DBG192:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META189:![0-9]+]], metadata !DIExpression()), !dbg [[DBG191:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META193:![0-9]+]], metadata !DIExpression()), !dbg [[DBG194:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META192:![0-9]+]], metadata !DIExpression()), !dbg [[DBG193:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG195:![0-9]+]]
-// DEBUG1-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG195]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG196:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG194:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG194]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG195:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG197:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG196:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META198:![0-9]+]], metadata !DIExpression()), !dbg [[DBG199:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META197:![0-9]+]], metadata !DIExpression()), !dbg [[DBG198:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR4]], !dbg [[DBG200:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG201:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR4]], !dbg [[DBG199:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG200:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG202:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG201:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META203:![0-9]+]], metadata !DIExpression()), !dbg [[DBG204:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META202:![0-9]+]], metadata !DIExpression()), !dbg [[DBG203:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META205:![0-9]+]], metadata !DIExpression()), !dbg [[DBG206:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META204:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG207:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG208:![0-9]+]]
-// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG207]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG209:![0-9]+]]
+// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG206:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG207:![0-9]+]]
+// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG206]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG208:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG210:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG209:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META211:![0-9]+]], metadata !DIExpression()), !dbg [[DBG212:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META210:![0-9]+]], metadata !DIExpression()), !dbg [[DBG211:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG213:![0-9]+]]
-// DEBUG1-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG215:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG216:![0-9]+]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG212:![0-9]+]]
+// DEBUG1-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG214:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG215:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@__cxx_global_var_init.5
-// DEBUG1-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG217:![0-9]+]] {
+// DEBUG1-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG216:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// DEBUG1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG218:![0-9]+]]
-// DEBUG1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG220:![0-9]+]]
+// DEBUG1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG217:![0-9]+]]
+// DEBUG1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG219:![0-9]+]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG221:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG220:![0-9]+]]
// DEBUG1: invoke.cont:
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG220]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG219]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// DEBUG1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG222:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG221:![0-9]+]]
// DEBUG1: invoke.cont2:
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG220]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG219]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// DEBUG1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG223:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG222:![0-9]+]]
// DEBUG1: invoke.cont3:
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG218]]
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG224:![0-9]+]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG217]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG223:![0-9]+]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// DEBUG1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG225:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG224:![0-9]+]]
// DEBUG1: invoke.cont7:
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG224]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG223]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// DEBUG1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG226:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG225:![0-9]+]]
// DEBUG1: invoke.cont8:
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG224]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG223]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// DEBUG1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG227:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG226:![0-9]+]]
// DEBUG1: invoke.cont9:
-// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG228:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG227:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG227]]
// DEBUG1: lpad:
// DEBUG1-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// DEBUG1-NEXT: cleanup, !dbg [[DBG229:![0-9]+]]
-// DEBUG1-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG229]]
-// DEBUG1-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG229]]
-// DEBUG1-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG229]]
-// DEBUG1-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG229]]
-// DEBUG1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG220]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG220]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG220]]
+// DEBUG1-NEXT: cleanup, !dbg [[DBG228:![0-9]+]]
+// DEBUG1-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG228]]
+// DEBUG1-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG228]]
+// DEBUG1-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG219]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG219]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG219]]
// DEBUG1: arraydestroy.body:
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG220]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG220]]
-// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG220]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG220]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG220]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG219]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG219]]
+// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG219]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG219]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG219]]
// DEBUG1: arraydestroy.done4:
-// DEBUG1-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG220]]
+// DEBUG1-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG219]]
// DEBUG1: lpad6:
// DEBUG1-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// DEBUG1-NEXT: cleanup, !dbg [[DBG229]]
-// DEBUG1-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG229]]
-// DEBUG1-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG229]]
-// DEBUG1-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG229]]
-// DEBUG1-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG229]]
-// DEBUG1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG224]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG224]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG224]]
+// DEBUG1-NEXT: cleanup, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG228]]
+// DEBUG1-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG228]]
+// DEBUG1-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG223]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG223]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG223]]
// DEBUG1: arraydestroy.body11:
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG224]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG224]]
-// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR4]], !dbg [[DBG224]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG224]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG224]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG223]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG223]]
+// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR4]], !dbg [[DBG223]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG223]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG223]]
// DEBUG1: arraydestroy.done15:
-// DEBUG1-NEXT: br label [[EHCLEANUP]], !dbg [[DBG224]]
+// DEBUG1-NEXT: br label [[EHCLEANUP]], !dbg [[DBG223]]
// DEBUG1: ehcleanup:
-// DEBUG1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG218]]
-// DEBUG1-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG218]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG218]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG218]]
+// DEBUG1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG217]]
+// DEBUG1-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG217]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG217]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG217]]
// DEBUG1: arraydestroy.body17:
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG218]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG218]]
-// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG218]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG218]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG218]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG217]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG217]]
+// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG217]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG217]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG217]]
// DEBUG1: arraydestroy.done21:
-// DEBUG1-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG218]]
+// DEBUG1-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG217]]
// DEBUG1: eh.resume:
-// DEBUG1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG218]]
-// DEBUG1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG218]]
-// DEBUG1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG218]]
-// DEBUG1-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG218]]
-// DEBUG1-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG218]]
+// DEBUG1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG217]]
+// DEBUG1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG217]]
+// DEBUG1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG217]]
+// DEBUG1-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG217]]
+// DEBUG1-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG217]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG230:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG229:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META233:![0-9]+]], metadata !DIExpression()), !dbg [[DBG234:![0-9]+]]
-// DEBUG1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG234]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META232:![0-9]+]], metadata !DIExpression()), !dbg [[DBG233:![0-9]+]]
+// DEBUG1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG233]]
// DEBUG1: arraydestroy.body:
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG234]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG234]]
-// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG234]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG234]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG234]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG233]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG233]]
+// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG233]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG233]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG233]]
// DEBUG1: arraydestroy.done1:
-// DEBUG1-NEXT: ret void, !dbg [[DBG234]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG233]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@main
@@ -6840,9 +6840,9 @@ int foobar() {
// DEBUG1-NEXT: [[RES:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
-// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]])
+// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]]), !dbg [[DBG234:![0-9]+]]
// DEBUG1-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META235:![0-9]+]], metadata !DIExpression()), !dbg [[DBG236:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META235:![0-9]+]], metadata !DIExpression()), !dbg [[DBG236:![0-9]+]]
// DEBUG1-NEXT: [[TMP1:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG237:![0-9]+]]
// DEBUG1-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP1]], 0, !dbg [[DBG237]]
// DEBUG1-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG237]], !prof [[PROF238:![0-9]+]]
@@ -6853,76 +6853,76 @@ int foobar() {
// DEBUG1: init:
// DEBUG1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG237]]
// DEBUG1-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB7]], ptr @_ZZ4mainE2sm, ptr @.__kmpc_global_ctor_..6, ptr null, ptr @.__kmpc_global_dtor_..7), !dbg [[DBG237]]
-// DEBUG1-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB9]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG239:![0-9]+]]
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG240:![0-9]+]]
-// DEBUG1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG240]]
+// DEBUG1-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB9]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG234]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG239:![0-9]+]]
+// DEBUG1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG239]]
// DEBUG1-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP5]])
-// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG241:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG240:![0-9]+]]
// DEBUG1: invoke.cont:
// DEBUG1-NEXT: [[TMP6:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG237]]
// DEBUG1-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR4]], !dbg [[DBG237]]
// DEBUG1-NEXT: br label [[INIT_END]], !dbg [[DBG237]]
// DEBUG1: init.end:
-// DEBUG1-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB11:[0-9]+]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG242:![0-9]+]]
-// DEBUG1-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG243:![0-9]+]]
-// DEBUG1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG243]]
-// DEBUG1-NEXT: store i32 [[TMP8]], ptr [[RES]], align 4, !dbg [[DBG244:![0-9]+]]
-// DEBUG1-NEXT: [[TMP9:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB13:[0-9]+]], i32 [[TMP0]], ptr @_ZZ4mainE2sm, i64 24, ptr @_ZZ4mainE2sm.cache.), !dbg [[DBG245:![0-9]+]]
-// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG246:![0-9]+]]
-// DEBUG1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG246]]
-// DEBUG1-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG247:![0-9]+]]
-// DEBUG1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG247]]
-// DEBUG1-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG247]]
-// DEBUG1-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB15:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG248:![0-9]+]]
-// DEBUG1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP12]], i32 0, i32 0, !dbg [[DBG249:![0-9]+]]
-// DEBUG1-NEXT: [[TMP13:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG249]]
-// DEBUG1-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG250:![0-9]+]]
-// DEBUG1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG250]]
-// DEBUG1-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG250]]
-// DEBUG1-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG251:![0-9]+]]
-// DEBUG1-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG252:![0-9]+]]
-// DEBUG1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG252]]
-// DEBUG1-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG252]]
-// DEBUG1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB17:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG253:![0-9]+]]
-// DEBUG1-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP17]], i32 0, i32 0, !dbg [[DBG254:![0-9]+]]
-// DEBUG1-NEXT: [[TMP18:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG254]]
-// DEBUG1-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG255:![0-9]+]]
-// DEBUG1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG255]]
-// DEBUG1-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG255]]
-// DEBUG1-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB19:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG256:![0-9]+]]
-// DEBUG1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP20]], i64 0, i64 1, !dbg [[DBG256]]
-// DEBUG1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG256]]
-// DEBUG1-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG257:![0-9]+]]
-// DEBUG1-NEXT: [[TMP21:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG257]]
-// DEBUG1-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG258:![0-9]+]]
-// DEBUG1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG258]]
-// DEBUG1-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG258]]
-// DEBUG1-NEXT: [[TMP23:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB21:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG259:![0-9]+]]
-// DEBUG1-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4, !dbg [[DBG259]]
-// DEBUG1-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG260:![0-9]+]]
-// DEBUG1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP25]], [[TMP24]], !dbg [[DBG260]]
-// DEBUG1-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG260]]
-// DEBUG1-NEXT: [[TMP26:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB23:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG261:![0-9]+]]
-// DEBUG1-NEXT: [[TMP27:%.*]] = load float, ptr [[TMP26]], align 4, !dbg [[DBG261]]
-// DEBUG1-NEXT: [[CONV:%.*]] = fptosi float [[TMP27]] to i32, !dbg [[DBG261]]
-// DEBUG1-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG262:![0-9]+]]
-// DEBUG1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[CONV]], !dbg [[DBG262]]
-// DEBUG1-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG262]]
-// DEBUG1-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB25:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG263:![0-9]+]]
-// DEBUG1-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP29]], i32 0, i32 0, !dbg [[DBG264:![0-9]+]]
-// DEBUG1-NEXT: [[TMP30:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG264]]
-// DEBUG1-NEXT: [[TMP31:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG265:![0-9]+]]
-// DEBUG1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP31]], [[TMP30]], !dbg [[DBG265]]
-// DEBUG1-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG265]]
-// DEBUG1-NEXT: [[TMP32:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG266:![0-9]+]]
-// DEBUG1-NEXT: ret i32 [[TMP32]], !dbg [[DBG267:![0-9]+]]
+// DEBUG1-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB11:[0-9]+]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG241:![0-9]+]]
+// DEBUG1-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG242:![0-9]+]]
+// DEBUG1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG242]]
+// DEBUG1-NEXT: store i32 [[TMP8]], ptr [[RES]], align 4, !dbg [[DBG243:![0-9]+]]
+// DEBUG1-NEXT: [[TMP9:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB13:[0-9]+]], i32 [[TMP0]], ptr @_ZZ4mainE2sm, i64 24, ptr @_ZZ4mainE2sm.cache.), !dbg [[DBG244:![0-9]+]]
+// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG245:![0-9]+]]
+// DEBUG1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG245]]
+// DEBUG1-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG246:![0-9]+]]
+// DEBUG1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG246]]
+// DEBUG1-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG246]]
+// DEBUG1-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB15:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG247:![0-9]+]]
+// DEBUG1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP12]], i32 0, i32 0, !dbg [[DBG248:![0-9]+]]
+// DEBUG1-NEXT: [[TMP13:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG248]]
+// DEBUG1-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG249:![0-9]+]]
+// DEBUG1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG249]]
+// DEBUG1-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG249]]
+// DEBUG1-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG250:![0-9]+]]
+// DEBUG1-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG251:![0-9]+]]
+// DEBUG1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG251]]
+// DEBUG1-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG251]]
+// DEBUG1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB17:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG252:![0-9]+]]
+// DEBUG1-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP17]], i32 0, i32 0, !dbg [[DBG253:![0-9]+]]
+// DEBUG1-NEXT: [[TMP18:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG253]]
+// DEBUG1-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG254:![0-9]+]]
+// DEBUG1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG254]]
+// DEBUG1-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG254]]
+// DEBUG1-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB19:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG255:![0-9]+]]
+// DEBUG1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP20]], i64 0, i64 1, !dbg [[DBG255]]
+// DEBUG1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG255]]
+// DEBUG1-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG256:![0-9]+]]
+// DEBUG1-NEXT: [[TMP21:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG256]]
+// DEBUG1-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG257:![0-9]+]]
+// DEBUG1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG257]]
+// DEBUG1-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG257]]
+// DEBUG1-NEXT: [[TMP23:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB21:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG258:![0-9]+]]
+// DEBUG1-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4, !dbg [[DBG258]]
+// DEBUG1-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG259:![0-9]+]]
+// DEBUG1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP25]], [[TMP24]], !dbg [[DBG259]]
+// DEBUG1-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG259]]
+// DEBUG1-NEXT: [[TMP26:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB23:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG260:![0-9]+]]
+// DEBUG1-NEXT: [[TMP27:%.*]] = load float, ptr [[TMP26]], align 4, !dbg [[DBG260]]
+// DEBUG1-NEXT: [[CONV:%.*]] = fptosi float [[TMP27]] to i32, !dbg [[DBG260]]
+// DEBUG1-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG261:![0-9]+]]
+// DEBUG1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[CONV]], !dbg [[DBG261]]
+// DEBUG1-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG261]]
+// DEBUG1-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB25:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG262:![0-9]+]]
+// DEBUG1-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP29]], i32 0, i32 0, !dbg [[DBG263:![0-9]+]]
+// DEBUG1-NEXT: [[TMP30:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG263]]
+// DEBUG1-NEXT: [[TMP31:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG264:![0-9]+]]
+// DEBUG1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP31]], [[TMP30]], !dbg [[DBG264]]
+// DEBUG1-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG264]]
+// DEBUG1-NEXT: [[TMP32:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG265:![0-9]+]]
+// DEBUG1-NEXT: ret i32 [[TMP32]], !dbg [[DBG266:![0-9]+]]
// DEBUG1: lpad:
// DEBUG1-NEXT: [[TMP33:%.*]] = landingpad { ptr, i32 }
-// DEBUG1-NEXT: cleanup, !dbg [[DBG268:![0-9]+]]
-// DEBUG1-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0, !dbg [[DBG268]]
-// DEBUG1-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG268]]
-// DEBUG1-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1, !dbg [[DBG268]]
-// DEBUG1-NEXT: store i32 [[TMP35]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG268]]
+// DEBUG1-NEXT: cleanup, !dbg [[DBG267:![0-9]+]]
+// DEBUG1-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0, !dbg [[DBG267]]
+// DEBUG1-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG267]]
+// DEBUG1-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1, !dbg [[DBG267]]
+// DEBUG1-NEXT: store i32 [[TMP35]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG267]]
// DEBUG1-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR4]], !dbg [[DBG237]]
// DEBUG1-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG237]]
// DEBUG1: eh.resume:
@@ -6934,436 +6934,436 @@ int foobar() {
//
//
// DEBUG1-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..6
-// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG269:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG268:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
-// DEBUG1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]])
+// DEBUG1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]]), !dbg [[DBG269:![0-9]+]]
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META270:![0-9]+]], metadata !DIExpression()), !dbg [[DBG271:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META270:![0-9]+]], metadata !DIExpression()), !dbg [[DBG271:![0-9]+]]
// DEBUG1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG272:![0-9]+]]
-// DEBUG1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB5]], i32 [[TMP1]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG273:![0-9]+]]
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG274:![0-9]+]]
-// DEBUG1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG274]]
-// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) [[TMP2]], i32 noundef [[TMP4]]), !dbg [[DBG275:![0-9]+]]
+// DEBUG1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB5]], i32 [[TMP1]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG269]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG273:![0-9]+]]
+// DEBUG1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG273]]
+// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) [[TMP2]], i32 noundef [[TMP4]]), !dbg [[DBG274:![0-9]+]]
// DEBUG1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG272]]
// DEBUG1-NEXT: ret ptr [[TMP5]], !dbg [[DBG272]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC1Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG276:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG275:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META277:![0-9]+]], metadata !DIExpression()), !dbg [[DBG279:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META276:![0-9]+]], metadata !DIExpression()), !dbg [[DBG278:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META280:![0-9]+]], metadata !DIExpression()), !dbg [[DBG281:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META279:![0-9]+]], metadata !DIExpression()), !dbg [[DBG280:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG282:![0-9]+]]
-// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG282]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG283:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG281:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG281]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG282:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..7
-// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG284:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG283:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META285:![0-9]+]], metadata !DIExpression()), !dbg [[DBG286:![0-9]+]]
-// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG286]]
-// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[TMP1]]) #[[ATTR4]], !dbg [[DBG286]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG287:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META284:![0-9]+]], metadata !DIExpression()), !dbg [[DBG285:![0-9]+]]
+// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG285]]
+// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[TMP1]]) #[[ATTR4]], !dbg [[DBG285]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG286:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD1Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG288:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG287:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META289:![0-9]+]], metadata !DIExpression()), !dbg [[DBG290:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META288:![0-9]+]], metadata !DIExpression()), !dbg [[DBG289:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR4]], !dbg [[DBG291:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG292:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR4]], !dbg [[DBG290:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG291:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG293:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG292:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META294:![0-9]+]], metadata !DIExpression()), !dbg [[DBG295:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META293:![0-9]+]], metadata !DIExpression()), !dbg [[DBG294:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META296:![0-9]+]], metadata !DIExpression()), !dbg [[DBG297:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META295:![0-9]+]], metadata !DIExpression()), !dbg [[DBG296:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG298:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG299:![0-9]+]]
-// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG298]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG300:![0-9]+]]
+// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG297:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG298:![0-9]+]]
+// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG297]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG299:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG301:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG300:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META302:![0-9]+]], metadata !DIExpression()), !dbg [[DBG303:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META301:![0-9]+]], metadata !DIExpression()), !dbg [[DBG302:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG304:![0-9]+]]
-// DEBUG1-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG306:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG307:![0-9]+]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG303:![0-9]+]]
+// DEBUG1-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG305:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG306:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_Z6foobarv
-// DEBUG1-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG308:![0-9]+]] {
+// DEBUG1-SAME: () #[[ATTR3]] !dbg [[DBG307:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[RES:%.*]] = alloca i32, align 4
-// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB27:[0-9]+]])
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META309:![0-9]+]], metadata !DIExpression()), !dbg [[DBG310:![0-9]+]]
-// DEBUG1-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB27]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG311:![0-9]+]]
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG312:![0-9]+]]
-// DEBUG1-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG312]]
-// DEBUG1-NEXT: store i32 [[TMP2]], ptr [[RES]], align 4, !dbg [[DBG313:![0-9]+]]
-// DEBUG1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB29:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG314:![0-9]+]]
-// DEBUG1-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG315:![0-9]+]]
-// DEBUG1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG315]]
-// DEBUG1-NEXT: [[TMP5:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG316:![0-9]+]]
-// DEBUG1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], [[TMP4]], !dbg [[DBG316]]
-// DEBUG1-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG316]]
-// DEBUG1-NEXT: [[TMP6:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG317:![0-9]+]]
-// DEBUG1-NEXT: [[TMP7:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG318:![0-9]+]]
-// DEBUG1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP7]], [[TMP6]], !dbg [[DBG318]]
-// DEBUG1-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG318]]
-// DEBUG1-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB31:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG319:![0-9]+]]
-// DEBUG1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP8]], i32 0, i32 0, !dbg [[DBG320:![0-9]+]]
-// DEBUG1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG320]]
-// DEBUG1-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG321:![0-9]+]]
-// DEBUG1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG321]]
-// DEBUG1-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG321]]
-// DEBUG1-NEXT: [[TMP11:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB33:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG322:![0-9]+]]
-// DEBUG1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP11]], i64 0, i64 1, !dbg [[DBG322]]
-// DEBUG1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG322]]
-// DEBUG1-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG323:![0-9]+]]
-// DEBUG1-NEXT: [[TMP12:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG323]]
-// DEBUG1-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG324:![0-9]+]]
-// DEBUG1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG324]]
-// DEBUG1-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG324]]
-// DEBUG1-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB35:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG325:![0-9]+]]
-// DEBUG1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg [[DBG325]]
-// DEBUG1-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG326:![0-9]+]]
-// DEBUG1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG326]]
-// DEBUG1-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG326]]
-// DEBUG1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB37:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG327:![0-9]+]]
-// DEBUG1-NEXT: [[TMP18:%.*]] = load float, ptr [[TMP17]], align 4, !dbg [[DBG327]]
-// DEBUG1-NEXT: [[CONV:%.*]] = fptosi float [[TMP18]] to i32, !dbg [[DBG327]]
-// DEBUG1-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG328:![0-9]+]]
-// DEBUG1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], [[CONV]], !dbg [[DBG328]]
-// DEBUG1-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG328]]
-// DEBUG1-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB39:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG329:![0-9]+]]
-// DEBUG1-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP20]], i32 0, i32 0, !dbg [[DBG330:![0-9]+]]
-// DEBUG1-NEXT: [[TMP21:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG330]]
-// DEBUG1-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG331:![0-9]+]]
-// DEBUG1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG331]]
-// DEBUG1-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG331]]
-// DEBUG1-NEXT: [[TMP23:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG332:![0-9]+]]
-// DEBUG1-NEXT: ret i32 [[TMP23]], !dbg [[DBG333:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB27:[0-9]+]]), !dbg [[DBG308:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META309:![0-9]+]], metadata !DIExpression()), !dbg [[DBG310:![0-9]+]]
+// DEBUG1-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB27]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG308]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG311:![0-9]+]]
+// DEBUG1-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG311]]
+// DEBUG1-NEXT: store i32 [[TMP2]], ptr [[RES]], align 4, !dbg [[DBG312:![0-9]+]]
+// DEBUG1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB29:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG313:![0-9]+]]
+// DEBUG1-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG314:![0-9]+]]
+// DEBUG1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG314]]
+// DEBUG1-NEXT: [[TMP5:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG315:![0-9]+]]
+// DEBUG1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], [[TMP4]], !dbg [[DBG315]]
+// DEBUG1-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG315]]
+// DEBUG1-NEXT: [[TMP6:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG316:![0-9]+]]
+// DEBUG1-NEXT: [[TMP7:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG317:![0-9]+]]
+// DEBUG1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP7]], [[TMP6]], !dbg [[DBG317]]
+// DEBUG1-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG317]]
+// DEBUG1-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB31:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG318:![0-9]+]]
+// DEBUG1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP8]], i32 0, i32 0, !dbg [[DBG319:![0-9]+]]
+// DEBUG1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG319]]
+// DEBUG1-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG320:![0-9]+]]
+// DEBUG1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG320]]
+// DEBUG1-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG320]]
+// DEBUG1-NEXT: [[TMP11:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB33:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG321:![0-9]+]]
+// DEBUG1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP11]], i64 0, i64 1, !dbg [[DBG321]]
+// DEBUG1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG321]]
+// DEBUG1-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG322:![0-9]+]]
+// DEBUG1-NEXT: [[TMP12:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG322]]
+// DEBUG1-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG323:![0-9]+]]
+// DEBUG1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG323]]
+// DEBUG1-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG323]]
+// DEBUG1-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB35:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG324:![0-9]+]]
+// DEBUG1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg [[DBG324]]
+// DEBUG1-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG325:![0-9]+]]
+// DEBUG1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG325]]
+// DEBUG1-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG325]]
+// DEBUG1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB37:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG326:![0-9]+]]
+// DEBUG1-NEXT: [[TMP18:%.*]] = load float, ptr [[TMP17]], align 4, !dbg [[DBG326]]
+// DEBUG1-NEXT: [[CONV:%.*]] = fptosi float [[TMP18]] to i32, !dbg [[DBG326]]
+// DEBUG1-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG327:![0-9]+]]
+// DEBUG1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], [[CONV]], !dbg [[DBG327]]
+// DEBUG1-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG327]]
+// DEBUG1-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB39:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG328:![0-9]+]]
+// DEBUG1-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP20]], i32 0, i32 0, !dbg [[DBG329:![0-9]+]]
+// DEBUG1-NEXT: [[TMP21:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG329]]
+// DEBUG1-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG330:![0-9]+]]
+// DEBUG1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG330]]
+// DEBUG1-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG330]]
+// DEBUG1-NEXT: [[TMP23:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG331:![0-9]+]]
+// DEBUG1-NEXT: ret i32 [[TMP23]], !dbg [[DBG332:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@__cxx_global_var_init.8
-// DEBUG1-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG334:![0-9]+]] {
+// DEBUG1-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG333:![0-9]+]] {
// DEBUG1-NEXT: entry:
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG335:![0-9]+]]
-// DEBUG1-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG335]]
-// DEBUG1-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG335]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG334:![0-9]+]]
+// DEBUG1-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG334]]
+// DEBUG1-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG334]]
// DEBUG1: init.check:
-// DEBUG1-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG335]]
-// DEBUG1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB41:[0-9]+]]), !dbg [[DBG335]]
-// DEBUG1-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB41]], ptr @_ZN2STI2S4E2stE, ptr @.__kmpc_global_ctor_..9, ptr null, ptr @.__kmpc_global_dtor_..10), !dbg [[DBG335]]
-// DEBUG1-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG336:![0-9]+]]
-// DEBUG1-NEXT: [[TMP2:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG335]]
-// DEBUG1-NEXT: br label [[INIT_END]], !dbg [[DBG335]]
+// DEBUG1-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG334]]
+// DEBUG1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB41:[0-9]+]]), !dbg [[DBG334]]
+// DEBUG1-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB41]], ptr @_ZN2STI2S4E2stE, ptr @.__kmpc_global_ctor_..9, ptr null, ptr @.__kmpc_global_dtor_..10), !dbg [[DBG334]]
+// DEBUG1-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG335:![0-9]+]]
+// DEBUG1-NEXT: [[TMP2:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG334]]
+// DEBUG1-NEXT: br label [[INIT_END]], !dbg [[DBG334]]
// DEBUG1: init.end:
-// DEBUG1-NEXT: ret void, !dbg [[DBG338:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG337:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..9
-// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG339:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG338:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META340:![0-9]+]], metadata !DIExpression()), !dbg [[DBG341:![0-9]+]]
-// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG342:![0-9]+]]
-// DEBUG1-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]], i32 noundef 23), !dbg [[DBG343:![0-9]+]]
-// DEBUG1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG342]]
-// DEBUG1-NEXT: ret ptr [[TMP2]], !dbg [[DBG342]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META339:![0-9]+]], metadata !DIExpression()), !dbg [[DBG340:![0-9]+]]
+// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG341:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]], i32 noundef 23), !dbg [[DBG342:![0-9]+]]
+// DEBUG1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG341]]
+// DEBUG1-NEXT: ret ptr [[TMP2]], !dbg [[DBG341]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG344:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG343:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META345:![0-9]+]], metadata !DIExpression()), !dbg [[DBG347:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META344:![0-9]+]], metadata !DIExpression()), !dbg [[DBG346:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META348:![0-9]+]], metadata !DIExpression()), !dbg [[DBG349:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META347:![0-9]+]], metadata !DIExpression()), !dbg [[DBG348:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG350:![0-9]+]]
-// DEBUG1-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG350]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG351:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG349:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG349]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG350:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..10
-// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG352:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG351:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META353:![0-9]+]], metadata !DIExpression()), !dbg [[DBG354:![0-9]+]]
-// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG354]]
-// DEBUG1-NEXT: call void @_ZN2S4D1Ev(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]]) #[[ATTR4]], !dbg [[DBG354]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG355:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META352:![0-9]+]], metadata !DIExpression()), !dbg [[DBG353:![0-9]+]]
+// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG353]]
+// DEBUG1-NEXT: call void @_ZN2S4D1Ev(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]]) #[[ATTR4]], !dbg [[DBG353]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG354:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG356:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG355:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META357:![0-9]+]], metadata !DIExpression()), !dbg [[DBG358:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META356:![0-9]+]], metadata !DIExpression()), !dbg [[DBG357:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]], !dbg [[DBG359:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG360:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]], !dbg [[DBG358:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG359:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG361:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG360:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META362:![0-9]+]], metadata !DIExpression()), !dbg [[DBG363:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META361:![0-9]+]], metadata !DIExpression()), !dbg [[DBG362:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META364:![0-9]+]], metadata !DIExpression()), !dbg [[DBG365:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META363:![0-9]+]], metadata !DIExpression()), !dbg [[DBG364:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG366:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG367:![0-9]+]]
-// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG366]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG368:![0-9]+]]
+// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG365:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG366:![0-9]+]]
+// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG365]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG367:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG369:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG368:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META370:![0-9]+]], metadata !DIExpression()), !dbg [[DBG371:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META369:![0-9]+]], metadata !DIExpression()), !dbg [[DBG370:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG372:![0-9]+]]
-// DEBUG1-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG374:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG375:![0-9]+]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG371:![0-9]+]]
+// DEBUG1-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG373:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG374:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// DEBUG1-SAME: () #[[ATTR0]] !dbg [[DBG376:![0-9]+]] {
+// DEBUG1-SAME: () #[[ATTR0]] !dbg [[DBG375:![0-9]+]] {
// DEBUG1-NEXT: entry:
-// DEBUG1-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG377:![0-9]+]]
-// DEBUG1-NEXT: call void @.__omp_threadprivate_init_.(), !dbg [[DBG377]]
-// DEBUG1-NEXT: call void @__cxx_global_var_init.4(), !dbg [[DBG377]]
-// DEBUG1-NEXT: call void @__cxx_global_var_init.5(), !dbg [[DBG377]]
-// DEBUG1-NEXT: call void @.__omp_threadprivate_init_..3(), !dbg [[DBG377]]
+// DEBUG1-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG376:![0-9]+]]
+// DEBUG1-NEXT: call void @.__omp_threadprivate_init_.(), !dbg [[DBG376]]
+// DEBUG1-NEXT: call void @__cxx_global_var_init.4(), !dbg [[DBG376]]
+// DEBUG1-NEXT: call void @__cxx_global_var_init.5(), !dbg [[DBG376]]
+// DEBUG1-NEXT: call void @.__omp_threadprivate_init_..3(), !dbg [[DBG376]]
// DEBUG1-NEXT: ret void
//
//
// DEBUG2-LABEL: define {{[^@]+}}@__cxx_global_var_init
// DEBUG2-SAME: () #[[ATTR0:[0-9]+]] !dbg [[DBG116:![0-9]+]] {
// DEBUG2-NEXT: entry:
-// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]), !dbg [[DBG120:![0-9]+]]
-// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB1]], ptr @_ZL3gs1, ptr @.__kmpc_global_ctor_., ptr null, ptr @.__kmpc_global_dtor_.), !dbg [[DBG120]]
-// DEBUG2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG121:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR4:[0-9]+]], !dbg [[DBG120]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG123:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]), !dbg [[DBG119:![0-9]+]]
+// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB1]], ptr @_ZL3gs1, ptr @.__kmpc_global_ctor_., ptr null, ptr @.__kmpc_global_dtor_.), !dbg [[DBG119]]
+// DEBUG2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG120:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR4:[0-9]+]], !dbg [[DBG119]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG122:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_.
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG124:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG123:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META126:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG129:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]], i32 noundef 5), !dbg [[DBG130:![0-9]+]]
-// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG129]]
-// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG129]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META125:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG128:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]], i32 noundef 5), !dbg [[DBG129:![0-9]+]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG128]]
+// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG128]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S1C1Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG131:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG130:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG134:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META131:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META135:![0-9]+]], metadata !DIExpression()), !dbg [[DBG136:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META134:![0-9]+]], metadata !DIExpression()), !dbg [[DBG135:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG137:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG137]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG138:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG136:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG136]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG137:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_.
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG139:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG138:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META140:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG141]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]]) #[[ATTR4]], !dbg [[DBG141]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG142:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META139:![0-9]+]], metadata !DIExpression()), !dbg [[DBG140:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG140]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]]) #[[ATTR4]], !dbg [[DBG140]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG141:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S1D1Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3:[0-9]+]] comdat align 2 !dbg [[DBG143:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3:[0-9]+]] comdat align 2 !dbg [[DBG142:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG145:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG144:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]], !dbg [[DBG146:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG147:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]], !dbg [[DBG145:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG146:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// DEBUG2-SAME: () #[[ATTR0]] !dbg [[DBG148:![0-9]+]] {
+// DEBUG2-SAME: () #[[ATTR0]] !dbg [[DBG147:![0-9]+]] {
// DEBUG2-NEXT: entry:
-// DEBUG2-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG149:![0-9]+]]
-// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG151:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG152:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG148:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG150:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG151:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG153:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG152:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META154:![0-9]+]], metadata !DIExpression()), !dbg [[DBG156:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META153:![0-9]+]], metadata !DIExpression()), !dbg [[DBG155:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META157:![0-9]+]], metadata !DIExpression()), !dbg [[DBG158:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META156:![0-9]+]], metadata !DIExpression()), !dbg [[DBG157:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG159:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG159]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG160:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG158:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG158]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG159:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG161:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG160:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META162:![0-9]+]], metadata !DIExpression()), !dbg [[DBG163:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META161:![0-9]+]], metadata !DIExpression()), !dbg [[DBG162:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR4]], !dbg [[DBG164:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG165:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR4]], !dbg [[DBG163:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG164:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// DEBUG2-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG166:![0-9]+]] {
+// DEBUG2-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG165:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]]), !dbg [[DBG167:![0-9]+]]
-// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB3]], ptr @arr_x, ptr @.__kmpc_global_ctor_..3, ptr null, ptr @.__kmpc_global_dtor_..4), !dbg [[DBG167]]
-// DEBUG2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG168:![0-9]+]]
-// DEBUG2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG170:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]]), !dbg [[DBG166:![0-9]+]]
+// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB3]], ptr @arr_x, ptr @.__kmpc_global_ctor_..3, ptr null, ptr @.__kmpc_global_dtor_..4), !dbg [[DBG166]]
+// DEBUG2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG167:![0-9]+]]
+// DEBUG2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG169:![0-9]+]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG171:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG170:![0-9]+]]
// DEBUG2: invoke.cont:
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG170]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG169]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// DEBUG2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG172:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG171:![0-9]+]]
// DEBUG2: invoke.cont2:
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG170]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG169]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// DEBUG2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG173:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG172:![0-9]+]]
// DEBUG2: invoke.cont3:
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG168]]
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG174:![0-9]+]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG167]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG173:![0-9]+]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// DEBUG2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG175:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG174:![0-9]+]]
// DEBUG2: invoke.cont7:
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG174]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG173]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// DEBUG2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG176:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG175:![0-9]+]]
// DEBUG2: invoke.cont8:
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG174]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG173]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// DEBUG2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG177:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG176:![0-9]+]]
// DEBUG2: invoke.cont9:
-// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG167]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG166]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG166]]
// DEBUG2: lpad:
// DEBUG2-NEXT: [[TMP2:%.*]] = landingpad { ptr, i32 }
-// DEBUG2-NEXT: cleanup, !dbg [[DBG178:![0-9]+]]
-// DEBUG2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP2]], 0, !dbg [[DBG178]]
-// DEBUG2-NEXT: store ptr [[TMP3]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG178]]
-// DEBUG2-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP2]], 1, !dbg [[DBG178]]
-// DEBUG2-NEXT: store i32 [[TMP4]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG178]]
-// DEBUG2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG170]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP5]], !dbg [[DBG170]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG170]]
+// DEBUG2-NEXT: cleanup, !dbg [[DBG177:![0-9]+]]
+// DEBUG2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP2]], 0, !dbg [[DBG177]]
+// DEBUG2-NEXT: store ptr [[TMP3]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG177]]
+// DEBUG2-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP2]], 1, !dbg [[DBG177]]
+// DEBUG2-NEXT: store i32 [[TMP4]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG177]]
+// DEBUG2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG169]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP5]], !dbg [[DBG169]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG169]]
// DEBUG2: arraydestroy.body:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG170]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG170]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG170]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG170]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG170]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG169]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG169]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG169]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG169]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG169]]
// DEBUG2: arraydestroy.done4:
-// DEBUG2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG170]]
+// DEBUG2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG169]]
// DEBUG2: lpad6:
// DEBUG2-NEXT: [[TMP6:%.*]] = landingpad { ptr, i32 }
-// DEBUG2-NEXT: cleanup, !dbg [[DBG178]]
-// DEBUG2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 0, !dbg [[DBG178]]
-// DEBUG2-NEXT: store ptr [[TMP7]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG178]]
-// DEBUG2-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 1, !dbg [[DBG178]]
-// DEBUG2-NEXT: store i32 [[TMP8]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG178]]
-// DEBUG2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG174]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP9]], !dbg [[DBG174]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG174]]
+// DEBUG2-NEXT: cleanup, !dbg [[DBG177]]
+// DEBUG2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 0, !dbg [[DBG177]]
+// DEBUG2-NEXT: store ptr [[TMP7]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG177]]
+// DEBUG2-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 1, !dbg [[DBG177]]
+// DEBUG2-NEXT: store i32 [[TMP8]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG177]]
+// DEBUG2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG173]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP9]], !dbg [[DBG173]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG173]]
// DEBUG2: arraydestroy.body11:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP9]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG174]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG174]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR4]], !dbg [[DBG174]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG174]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG174]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP9]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG173]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG173]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR4]], !dbg [[DBG173]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG173]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG173]]
// DEBUG2: arraydestroy.done15:
-// DEBUG2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG174]]
+// DEBUG2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG173]]
// DEBUG2: ehcleanup:
-// DEBUG2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG168]]
-// DEBUG2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP10]], i64 0, i64 0, !dbg [[DBG168]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG168]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG168]]
+// DEBUG2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP10]], i64 0, i64 0, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG167]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG167]]
// DEBUG2: arraydestroy.body17:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG168]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG168]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG168]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG168]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG168]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG167]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG167]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG167]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG167]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG167]]
// DEBUG2: arraydestroy.done21:
-// DEBUG2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG168]]
+// DEBUG2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG167]]
// DEBUG2: eh.resume:
-// DEBUG2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG168]]
-// DEBUG2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG168]]
-// DEBUG2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG168]]
-// DEBUG2-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG168]]
-// DEBUG2-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG168]]
+// DEBUG2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG167]]
+// DEBUG2-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG167]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..3
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG179:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG178:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
@@ -7372,136 +7372,136 @@ int foobar() {
// DEBUG2-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: [[ARRAYINIT_ENDOFINIT9:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META180:![0-9]+]], metadata !DIExpression()), !dbg [[DBG181:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG182:![0-9]+]]
-// DEBUG2-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP1]], i64 0, i64 0, !dbg [[DBG183:![0-9]+]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[ARRAYINIT_BEGIN1:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0, !dbg [[DBG184:![0-9]+]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN1]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG184]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META179:![0-9]+]], metadata !DIExpression()), !dbg [[DBG180:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG181:![0-9]+]]
+// DEBUG2-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP1]], i64 0, i64 0, !dbg [[DBG182:![0-9]+]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[ARRAYINIT_BEGIN1:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0, !dbg [[DBG183:![0-9]+]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN1]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG183]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN1]], i32 noundef 1)
-// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG185:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG184:![0-9]+]]
// DEBUG2: invoke.cont:
-// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[ARRAYINIT_BEGIN1]], i64 1, !dbg [[DBG184]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG184]]
+// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[ARRAYINIT_BEGIN1]], i64 1, !dbg [[DBG183]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG183]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
-// DEBUG2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG186:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG185:![0-9]+]]
// DEBUG2: invoke.cont3:
-// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT]], i64 1, !dbg [[DBG184]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT4]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG184]]
+// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT]], i64 1, !dbg [[DBG183]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT4]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG183]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT4]], i32 noundef 3)
-// DEBUG2-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]], !dbg [[DBG187:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]], !dbg [[DBG186:![0-9]+]]
// DEBUG2: invoke.cont5:
-// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT7:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 1, !dbg [[DBG183]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT7]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[ARRAYINIT_BEGIN8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_ELEMENT7]], i64 0, i64 0, !dbg [[DBG188:![0-9]+]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN8]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG188]]
+// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT7:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 1, !dbg [[DBG182]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT7]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[ARRAYINIT_BEGIN8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_ELEMENT7]], i64 0, i64 0, !dbg [[DBG187:![0-9]+]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN8]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG187]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN8]], i32 noundef 4)
-// DEBUG2-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]], !dbg [[DBG189:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]], !dbg [[DBG188:![0-9]+]]
// DEBUG2: invoke.cont11:
-// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT12:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_BEGIN8]], i64 1, !dbg [[DBG188]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT12]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG188]]
+// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT12:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_BEGIN8]], i64 1, !dbg [[DBG187]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT12]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG187]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT12]], i32 noundef 5)
-// DEBUG2-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]], !dbg [[DBG190:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]], !dbg [[DBG189:![0-9]+]]
// DEBUG2: invoke.cont13:
-// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT14:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT12]], i64 1, !dbg [[DBG188]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT14]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG188]]
+// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT14:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT12]], i64 1, !dbg [[DBG187]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT14]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG187]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT14]], i32 noundef 6)
-// DEBUG2-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]], !dbg [[DBG191:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]], !dbg [[DBG190:![0-9]+]]
// DEBUG2: invoke.cont15:
-// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG182]]
-// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG182]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG181]]
+// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG181]]
// DEBUG2: lpad:
// DEBUG2-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 }
-// DEBUG2-NEXT: cleanup, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0, !dbg [[DBG181]]
-// DEBUG2-NEXT: store ptr [[TMP4]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 1, !dbg [[DBG181]]
-// DEBUG2-NEXT: store i32 [[TMP5]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG184]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYINIT_BEGIN1]], [[TMP6]], !dbg [[DBG184]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE6:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG184]]
+// DEBUG2-NEXT: cleanup, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0, !dbg [[DBG180]]
+// DEBUG2-NEXT: store ptr [[TMP4]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 1, !dbg [[DBG180]]
+// DEBUG2-NEXT: store i32 [[TMP5]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG183]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYINIT_BEGIN1]], [[TMP6]], !dbg [[DBG183]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE6:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG183]]
// DEBUG2: arraydestroy.body:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP6]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG184]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG184]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG184]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAYINIT_BEGIN1]], !dbg [[DBG184]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE6]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG184]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP6]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG183]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG183]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG183]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAYINIT_BEGIN1]], !dbg [[DBG183]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE6]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG183]]
// DEBUG2: arraydestroy.done6:
-// DEBUG2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG184]]
+// DEBUG2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG183]]
// DEBUG2: lpad10:
// DEBUG2-NEXT: [[TMP7:%.*]] = landingpad { ptr, i32 }
-// DEBUG2-NEXT: cleanup, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 0, !dbg [[DBG181]]
-// DEBUG2-NEXT: store ptr [[TMP8]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP9:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 1, !dbg [[DBG181]]
-// DEBUG2-NEXT: store i32 [[TMP9]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG188]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr [[ARRAYINIT_BEGIN8]], [[TMP10]], !dbg [[DBG188]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG188]]
+// DEBUG2-NEXT: cleanup, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 0, !dbg [[DBG180]]
+// DEBUG2-NEXT: store ptr [[TMP8]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP9:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 1, !dbg [[DBG180]]
+// DEBUG2-NEXT: store i32 [[TMP9]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG187]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr [[ARRAYINIT_BEGIN8]], [[TMP10]], !dbg [[DBG187]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG187]]
// DEBUG2: arraydestroy.body17:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[TMP10]], [[LPAD10]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG188]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG188]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG188]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], [[ARRAYINIT_BEGIN8]], !dbg [[DBG188]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG188]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[TMP10]], [[LPAD10]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG187]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG187]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG187]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], [[ARRAYINIT_BEGIN8]], !dbg [[DBG187]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG187]]
// DEBUG2: arraydestroy.done21:
-// DEBUG2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG188]]
+// DEBUG2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG187]]
// DEBUG2: ehcleanup:
-// DEBUG2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[PAD_ARRAYBEGIN:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP11]], i64 0, i64 0, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY22:%.*]] = icmp eq ptr [[PAD_ARRAYBEGIN]], [[PAD_ARRAYEND]], !dbg [[DBG183]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY22]], label [[ARRAYDESTROY_DONE27:%.*]], label [[ARRAYDESTROY_BODY23:%.*]], !dbg [[DBG183]]
+// DEBUG2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[PAD_ARRAYBEGIN:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP11]], i64 0, i64 0, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY22:%.*]] = icmp eq ptr [[PAD_ARRAYBEGIN]], [[PAD_ARRAYEND]], !dbg [[DBG182]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY22]], label [[ARRAYDESTROY_DONE27:%.*]], label [[ARRAYDESTROY_BODY23:%.*]], !dbg [[DBG182]]
// DEBUG2: arraydestroy.body23:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST24:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT25:%.*]], [[ARRAYDESTROY_BODY23]] ], !dbg [[DBG183]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT25]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST24]], i64 -1, !dbg [[DBG183]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT25]]) #[[ATTR4]], !dbg [[DBG183]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE26:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT25]], [[PAD_ARRAYBEGIN]], !dbg [[DBG183]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE26]], label [[ARRAYDESTROY_DONE27]], label [[ARRAYDESTROY_BODY23]], !dbg [[DBG183]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST24:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT25:%.*]], [[ARRAYDESTROY_BODY23]] ], !dbg [[DBG182]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT25]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST24]], i64 -1, !dbg [[DBG182]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT25]]) #[[ATTR4]], !dbg [[DBG182]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE26:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT25]], [[PAD_ARRAYBEGIN]], !dbg [[DBG182]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE26]], label [[ARRAYDESTROY_DONE27]], label [[ARRAYDESTROY_BODY23]], !dbg [[DBG182]]
// DEBUG2: arraydestroy.done27:
-// DEBUG2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG183]]
+// DEBUG2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG182]]
// DEBUG2: eh.resume:
-// DEBUG2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[LPAD_VAL28:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG183]]
-// DEBUG2-NEXT: resume { ptr, i32 } [[LPAD_VAL28]], !dbg [[DBG183]]
+// DEBUG2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[LPAD_VAL28:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG182]]
+// DEBUG2-NEXT: resume { ptr, i32 } [[LPAD_VAL28]], !dbg [[DBG182]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..4
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG192:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG191:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META193:![0-9]+]], metadata !DIExpression()), !dbg [[DBG194:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG194]]
-// DEBUG2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP1]], i64 6, !dbg [[DBG194]]
-// DEBUG2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG194]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META192:![0-9]+]], metadata !DIExpression()), !dbg [[DBG193:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG193]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP1]], i64 6, !dbg [[DBG193]]
+// DEBUG2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG193]]
// DEBUG2: arraydestroy.body:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG194]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG194]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG194]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[TMP1]], !dbg [[DBG194]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG194]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG193]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG193]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG193]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[TMP1]], !dbg [[DBG193]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG193]]
// DEBUG2: arraydestroy.done1:
-// DEBUG2-NEXT: ret void, !dbg [[DBG195:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG194:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG196:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG195:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META199:![0-9]+]], metadata !DIExpression()), !dbg [[DBG200:![0-9]+]]
-// DEBUG2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG200]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META198:![0-9]+]], metadata !DIExpression()), !dbg [[DBG199:![0-9]+]]
+// DEBUG2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG199]]
// DEBUG2: arraydestroy.body:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG200]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG200]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG200]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG200]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG200]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG199]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG199]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG199]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG199]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG199]]
// DEBUG2: arraydestroy.done1:
-// DEBUG2-NEXT: ret void, !dbg [[DBG200]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG199]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@main
@@ -7511,9 +7511,9 @@ int foobar() {
// DEBUG2-NEXT: [[RES:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
-// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]])
+// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]]), !dbg [[DBG200:![0-9]+]]
// DEBUG2-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG202:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG202:![0-9]+]]
// DEBUG2-NEXT: [[TMP1:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG203:![0-9]+]]
// DEBUG2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP1]], 0, !dbg [[DBG203]]
// DEBUG2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG203]], !prof [[PROF204:![0-9]+]]
@@ -7524,76 +7524,76 @@ int foobar() {
// DEBUG2: init:
// DEBUG2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG203]]
// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB7]], ptr @_ZZ4mainE2sm, ptr @.__kmpc_global_ctor_..5, ptr null, ptr @.__kmpc_global_dtor_..6), !dbg [[DBG203]]
-// DEBUG2-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB9]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG205:![0-9]+]]
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG206:![0-9]+]]
-// DEBUG2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG206]]
+// DEBUG2-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB9]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG200]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG205:![0-9]+]]
+// DEBUG2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG205]]
// DEBUG2-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP5]])
-// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG207:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG206:![0-9]+]]
// DEBUG2: invoke.cont:
// DEBUG2-NEXT: [[TMP6:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG203]]
// DEBUG2-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR4]], !dbg [[DBG203]]
// DEBUG2-NEXT: br label [[INIT_END]], !dbg [[DBG203]]
// DEBUG2: init.end:
-// DEBUG2-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB11:[0-9]+]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG208:![0-9]+]]
-// DEBUG2-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG209:![0-9]+]]
-// DEBUG2-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG209]]
-// DEBUG2-NEXT: store i32 [[TMP8]], ptr [[RES]], align 4, !dbg [[DBG210:![0-9]+]]
-// DEBUG2-NEXT: [[TMP9:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB13:[0-9]+]], i32 [[TMP0]], ptr @_ZZ4mainE2sm, i64 24, ptr @_ZZ4mainE2sm.cache.), !dbg [[DBG211:![0-9]+]]
-// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG212:![0-9]+]]
-// DEBUG2-NEXT: [[TMP10:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG212]]
-// DEBUG2-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG213:![0-9]+]]
-// DEBUG2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG213]]
-// DEBUG2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG213]]
-// DEBUG2-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB15:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG214:![0-9]+]]
-// DEBUG2-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP12]], i32 0, i32 0, !dbg [[DBG215:![0-9]+]]
-// DEBUG2-NEXT: [[TMP13:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG215]]
-// DEBUG2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG216:![0-9]+]]
-// DEBUG2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG216]]
-// DEBUG2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG216]]
-// DEBUG2-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG217:![0-9]+]]
-// DEBUG2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG218:![0-9]+]]
-// DEBUG2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG218]]
-// DEBUG2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG218]]
-// DEBUG2-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB17:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG219:![0-9]+]]
-// DEBUG2-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP17]], i32 0, i32 0, !dbg [[DBG220:![0-9]+]]
-// DEBUG2-NEXT: [[TMP18:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG220]]
-// DEBUG2-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG221:![0-9]+]]
-// DEBUG2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG221]]
-// DEBUG2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG221]]
-// DEBUG2-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB19:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG222:![0-9]+]]
-// DEBUG2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP20]], i64 0, i64 1, !dbg [[DBG222]]
-// DEBUG2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG222]]
-// DEBUG2-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG223:![0-9]+]]
-// DEBUG2-NEXT: [[TMP21:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG223]]
-// DEBUG2-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG224:![0-9]+]]
-// DEBUG2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG224]]
-// DEBUG2-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG224]]
-// DEBUG2-NEXT: [[TMP23:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB21:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG225:![0-9]+]]
-// DEBUG2-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4, !dbg [[DBG225]]
-// DEBUG2-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG226:![0-9]+]]
-// DEBUG2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP25]], [[TMP24]], !dbg [[DBG226]]
-// DEBUG2-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG226]]
-// DEBUG2-NEXT: [[TMP26:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB23:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG227:![0-9]+]]
-// DEBUG2-NEXT: [[TMP27:%.*]] = load float, ptr [[TMP26]], align 4, !dbg [[DBG227]]
-// DEBUG2-NEXT: [[CONV:%.*]] = fptosi float [[TMP27]] to i32, !dbg [[DBG227]]
-// DEBUG2-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG228:![0-9]+]]
-// DEBUG2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[CONV]], !dbg [[DBG228]]
-// DEBUG2-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG228]]
-// DEBUG2-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB25:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG229:![0-9]+]]
-// DEBUG2-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP29]], i32 0, i32 0, !dbg [[DBG230:![0-9]+]]
-// DEBUG2-NEXT: [[TMP30:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG230]]
-// DEBUG2-NEXT: [[TMP31:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
-// DEBUG2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP31]], [[TMP30]], !dbg [[DBG231]]
-// DEBUG2-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG231]]
-// DEBUG2-NEXT: [[TMP32:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG232:![0-9]+]]
-// DEBUG2-NEXT: ret i32 [[TMP32]], !dbg [[DBG233:![0-9]+]]
+// DEBUG2-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB11:[0-9]+]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG207:![0-9]+]]
+// DEBUG2-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG208:![0-9]+]]
+// DEBUG2-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG208]]
+// DEBUG2-NEXT: store i32 [[TMP8]], ptr [[RES]], align 4, !dbg [[DBG209:![0-9]+]]
+// DEBUG2-NEXT: [[TMP9:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB13:[0-9]+]], i32 [[TMP0]], ptr @_ZZ4mainE2sm, i64 24, ptr @_ZZ4mainE2sm.cache.), !dbg [[DBG210:![0-9]+]]
+// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG211:![0-9]+]]
+// DEBUG2-NEXT: [[TMP10:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG211]]
+// DEBUG2-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG212:![0-9]+]]
+// DEBUG2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG212]]
+// DEBUG2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG212]]
+// DEBUG2-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB15:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG213:![0-9]+]]
+// DEBUG2-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP12]], i32 0, i32 0, !dbg [[DBG214:![0-9]+]]
+// DEBUG2-NEXT: [[TMP13:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG214]]
+// DEBUG2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG215:![0-9]+]]
+// DEBUG2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG215]]
+// DEBUG2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG215]]
+// DEBUG2-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG216:![0-9]+]]
+// DEBUG2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG217:![0-9]+]]
+// DEBUG2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG217]]
+// DEBUG2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG217]]
+// DEBUG2-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB17:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG218:![0-9]+]]
+// DEBUG2-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP17]], i32 0, i32 0, !dbg [[DBG219:![0-9]+]]
+// DEBUG2-NEXT: [[TMP18:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG219]]
+// DEBUG2-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG220:![0-9]+]]
+// DEBUG2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG220]]
+// DEBUG2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG220]]
+// DEBUG2-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB19:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG221:![0-9]+]]
+// DEBUG2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP20]], i64 0, i64 1, !dbg [[DBG221]]
+// DEBUG2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG221]]
+// DEBUG2-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG222:![0-9]+]]
+// DEBUG2-NEXT: [[TMP21:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG222]]
+// DEBUG2-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG223:![0-9]+]]
+// DEBUG2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG223]]
+// DEBUG2-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG223]]
+// DEBUG2-NEXT: [[TMP23:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB21:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG224:![0-9]+]]
+// DEBUG2-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4, !dbg [[DBG224]]
+// DEBUG2-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG225:![0-9]+]]
+// DEBUG2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP25]], [[TMP24]], !dbg [[DBG225]]
+// DEBUG2-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG225]]
+// DEBUG2-NEXT: [[TMP26:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB23:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG226:![0-9]+]]
+// DEBUG2-NEXT: [[TMP27:%.*]] = load float, ptr [[TMP26]], align 4, !dbg [[DBG226]]
+// DEBUG2-NEXT: [[CONV:%.*]] = fptosi float [[TMP27]] to i32, !dbg [[DBG226]]
+// DEBUG2-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG227:![0-9]+]]
+// DEBUG2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[CONV]], !dbg [[DBG227]]
+// DEBUG2-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG227]]
+// DEBUG2-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB25:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG228:![0-9]+]]
+// DEBUG2-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP29]], i32 0, i32 0, !dbg [[DBG229:![0-9]+]]
+// DEBUG2-NEXT: [[TMP30:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG229]]
+// DEBUG2-NEXT: [[TMP31:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG230:![0-9]+]]
+// DEBUG2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP31]], [[TMP30]], !dbg [[DBG230]]
+// DEBUG2-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG230]]
+// DEBUG2-NEXT: [[TMP32:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
+// DEBUG2-NEXT: ret i32 [[TMP32]], !dbg [[DBG232:![0-9]+]]
// DEBUG2: lpad:
// DEBUG2-NEXT: [[TMP33:%.*]] = landingpad { ptr, i32 }
-// DEBUG2-NEXT: cleanup, !dbg [[DBG234:![0-9]+]]
-// DEBUG2-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0, !dbg [[DBG234]]
-// DEBUG2-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG234]]
-// DEBUG2-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1, !dbg [[DBG234]]
-// DEBUG2-NEXT: store i32 [[TMP35]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG234]]
+// DEBUG2-NEXT: cleanup, !dbg [[DBG233:![0-9]+]]
+// DEBUG2-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0, !dbg [[DBG233]]
+// DEBUG2-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG233]]
+// DEBUG2-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1, !dbg [[DBG233]]
+// DEBUG2-NEXT: store i32 [[TMP35]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG233]]
// DEBUG2-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR4]], !dbg [[DBG203]]
// DEBUG2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG203]]
// DEBUG2: eh.resume:
@@ -7605,296 +7605,296 @@ int foobar() {
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..5
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG235:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG234:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
-// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]])
+// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]]), !dbg [[DBG235:![0-9]+]]
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META236:![0-9]+]], metadata !DIExpression()), !dbg [[DBG237:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META236:![0-9]+]], metadata !DIExpression()), !dbg [[DBG237:![0-9]+]]
// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG238:![0-9]+]]
-// DEBUG2-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB5]], i32 [[TMP1]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG239:![0-9]+]]
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG240:![0-9]+]]
-// DEBUG2-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG240]]
-// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) [[TMP2]], i32 noundef [[TMP4]]), !dbg [[DBG241:![0-9]+]]
+// DEBUG2-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB5]], i32 [[TMP1]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG235]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG239:![0-9]+]]
+// DEBUG2-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG239]]
+// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) [[TMP2]], i32 noundef [[TMP4]]), !dbg [[DBG240:![0-9]+]]
// DEBUG2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG238]]
// DEBUG2-NEXT: ret ptr [[TMP5]], !dbg [[DBG238]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC1Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG242:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG241:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META243:![0-9]+]], metadata !DIExpression()), !dbg [[DBG245:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META242:![0-9]+]], metadata !DIExpression()), !dbg [[DBG244:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META246:![0-9]+]], metadata !DIExpression()), !dbg [[DBG247:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META245:![0-9]+]], metadata !DIExpression()), !dbg [[DBG246:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG248:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG248]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG249:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG247:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG247]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG248:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..6
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG250:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG249:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META251:![0-9]+]], metadata !DIExpression()), !dbg [[DBG252:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG252]]
-// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[TMP1]]) #[[ATTR4]], !dbg [[DBG252]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG253:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META250:![0-9]+]], metadata !DIExpression()), !dbg [[DBG251:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG251]]
+// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[TMP1]]) #[[ATTR4]], !dbg [[DBG251]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG252:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD1Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG254:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG253:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META255:![0-9]+]], metadata !DIExpression()), !dbg [[DBG256:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META254:![0-9]+]], metadata !DIExpression()), !dbg [[DBG255:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR4]], !dbg [[DBG257:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG258:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR4]], !dbg [[DBG256:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG257:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_Z6foobarv
-// DEBUG2-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG259:![0-9]+]] {
+// DEBUG2-SAME: () #[[ATTR3]] !dbg [[DBG258:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[RES:%.*]] = alloca i32, align 4
-// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB27:[0-9]+]])
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META260:![0-9]+]], metadata !DIExpression()), !dbg [[DBG261:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB27]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG262:![0-9]+]]
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG263:![0-9]+]]
-// DEBUG2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG263]]
-// DEBUG2-NEXT: store i32 [[TMP2]], ptr [[RES]], align 4, !dbg [[DBG264:![0-9]+]]
-// DEBUG2-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB29:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG265:![0-9]+]]
-// DEBUG2-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG266:![0-9]+]]
-// DEBUG2-NEXT: [[TMP4:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG266]]
-// DEBUG2-NEXT: [[TMP5:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG267:![0-9]+]]
-// DEBUG2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], [[TMP4]], !dbg [[DBG267]]
-// DEBUG2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG267]]
-// DEBUG2-NEXT: [[TMP6:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG268:![0-9]+]]
-// DEBUG2-NEXT: [[TMP7:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG269:![0-9]+]]
-// DEBUG2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP7]], [[TMP6]], !dbg [[DBG269]]
-// DEBUG2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG269]]
-// DEBUG2-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB31:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG270:![0-9]+]]
-// DEBUG2-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP8]], i32 0, i32 0, !dbg [[DBG271:![0-9]+]]
-// DEBUG2-NEXT: [[TMP9:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG271]]
-// DEBUG2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG272:![0-9]+]]
-// DEBUG2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG272]]
-// DEBUG2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG272]]
-// DEBUG2-NEXT: [[TMP11:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB33:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG273:![0-9]+]]
-// DEBUG2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP11]], i64 0, i64 1, !dbg [[DBG273]]
-// DEBUG2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG273]]
-// DEBUG2-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG274:![0-9]+]]
-// DEBUG2-NEXT: [[TMP12:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG274]]
-// DEBUG2-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG275:![0-9]+]]
-// DEBUG2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG275]]
-// DEBUG2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG275]]
-// DEBUG2-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB35:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG276:![0-9]+]]
-// DEBUG2-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg [[DBG276]]
-// DEBUG2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG277:![0-9]+]]
-// DEBUG2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG277]]
-// DEBUG2-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG277]]
-// DEBUG2-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB37:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG278:![0-9]+]]
-// DEBUG2-NEXT: [[TMP18:%.*]] = load float, ptr [[TMP17]], align 4, !dbg [[DBG278]]
-// DEBUG2-NEXT: [[CONV:%.*]] = fptosi float [[TMP18]] to i32, !dbg [[DBG278]]
-// DEBUG2-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG279:![0-9]+]]
-// DEBUG2-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], [[CONV]], !dbg [[DBG279]]
-// DEBUG2-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG279]]
-// DEBUG2-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB39:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG280:![0-9]+]]
-// DEBUG2-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP20]], i32 0, i32 0, !dbg [[DBG281:![0-9]+]]
-// DEBUG2-NEXT: [[TMP21:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG281]]
-// DEBUG2-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG282:![0-9]+]]
-// DEBUG2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG282]]
-// DEBUG2-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG282]]
-// DEBUG2-NEXT: [[TMP23:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG283:![0-9]+]]
-// DEBUG2-NEXT: ret i32 [[TMP23]], !dbg [[DBG284:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB27:[0-9]+]]), !dbg [[DBG259:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META260:![0-9]+]], metadata !DIExpression()), !dbg [[DBG261:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB27]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG259]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG262:![0-9]+]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG262]]
+// DEBUG2-NEXT: store i32 [[TMP2]], ptr [[RES]], align 4, !dbg [[DBG263:![0-9]+]]
+// DEBUG2-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB29:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG264:![0-9]+]]
+// DEBUG2-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG265:![0-9]+]]
+// DEBUG2-NEXT: [[TMP4:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG265]]
+// DEBUG2-NEXT: [[TMP5:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG266:![0-9]+]]
+// DEBUG2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], [[TMP4]], !dbg [[DBG266]]
+// DEBUG2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG266]]
+// DEBUG2-NEXT: [[TMP6:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG267:![0-9]+]]
+// DEBUG2-NEXT: [[TMP7:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG268:![0-9]+]]
+// DEBUG2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP7]], [[TMP6]], !dbg [[DBG268]]
+// DEBUG2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG268]]
+// DEBUG2-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB31:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG269:![0-9]+]]
+// DEBUG2-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP8]], i32 0, i32 0, !dbg [[DBG270:![0-9]+]]
+// DEBUG2-NEXT: [[TMP9:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG270]]
+// DEBUG2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG271:![0-9]+]]
+// DEBUG2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG271]]
+// DEBUG2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG271]]
+// DEBUG2-NEXT: [[TMP11:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB33:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG272:![0-9]+]]
+// DEBUG2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP11]], i64 0, i64 1, !dbg [[DBG272]]
+// DEBUG2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG272]]
+// DEBUG2-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG273:![0-9]+]]
+// DEBUG2-NEXT: [[TMP12:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG273]]
+// DEBUG2-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG274:![0-9]+]]
+// DEBUG2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG274]]
+// DEBUG2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG274]]
+// DEBUG2-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB35:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG275:![0-9]+]]
+// DEBUG2-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg [[DBG275]]
+// DEBUG2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG276:![0-9]+]]
+// DEBUG2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG276]]
+// DEBUG2-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG276]]
+// DEBUG2-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB37:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG277:![0-9]+]]
+// DEBUG2-NEXT: [[TMP18:%.*]] = load float, ptr [[TMP17]], align 4, !dbg [[DBG277]]
+// DEBUG2-NEXT: [[CONV:%.*]] = fptosi float [[TMP18]] to i32, !dbg [[DBG277]]
+// DEBUG2-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG278:![0-9]+]]
+// DEBUG2-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], [[CONV]], !dbg [[DBG278]]
+// DEBUG2-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG278]]
+// DEBUG2-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB39:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG279:![0-9]+]]
+// DEBUG2-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP20]], i32 0, i32 0, !dbg [[DBG280:![0-9]+]]
+// DEBUG2-NEXT: [[TMP21:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG280]]
+// DEBUG2-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG281:![0-9]+]]
+// DEBUG2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG281]]
+// DEBUG2-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG281]]
+// DEBUG2-NEXT: [[TMP23:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG282:![0-9]+]]
+// DEBUG2-NEXT: ret i32 [[TMP23]], !dbg [[DBG283:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@__cxx_global_var_init.7
-// DEBUG2-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG285:![0-9]+]] {
+// DEBUG2-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG284:![0-9]+]] {
// DEBUG2-NEXT: entry:
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG286:![0-9]+]]
-// DEBUG2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG286]]
-// DEBUG2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG286]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG285:![0-9]+]]
+// DEBUG2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG285]]
+// DEBUG2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG285]]
// DEBUG2: init.check:
-// DEBUG2-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG286]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB41:[0-9]+]]), !dbg [[DBG286]]
-// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB41]], ptr @_ZN2STI2S4E2stE, ptr @.__kmpc_global_ctor_..8, ptr null, ptr @.__kmpc_global_dtor_..9), !dbg [[DBG286]]
-// DEBUG2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG287:![0-9]+]]
-// DEBUG2-NEXT: [[TMP2:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG286]]
-// DEBUG2-NEXT: br label [[INIT_END]], !dbg [[DBG286]]
+// DEBUG2-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG285]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB41:[0-9]+]]), !dbg [[DBG285]]
+// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB41]], ptr @_ZN2STI2S4E2stE, ptr @.__kmpc_global_ctor_..8, ptr null, ptr @.__kmpc_global_dtor_..9), !dbg [[DBG285]]
+// DEBUG2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG286:![0-9]+]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG285]]
+// DEBUG2-NEXT: br label [[INIT_END]], !dbg [[DBG285]]
// DEBUG2: init.end:
-// DEBUG2-NEXT: ret void, !dbg [[DBG289:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG288:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..8
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG290:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG289:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META291:![0-9]+]], metadata !DIExpression()), !dbg [[DBG292:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG293:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]], i32 noundef 23), !dbg [[DBG294:![0-9]+]]
-// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG293]]
-// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG293]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META290:![0-9]+]], metadata !DIExpression()), !dbg [[DBG291:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG292:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]], i32 noundef 23), !dbg [[DBG293:![0-9]+]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG292]]
+// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG292]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG295:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG294:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META296:![0-9]+]], metadata !DIExpression()), !dbg [[DBG298:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META295:![0-9]+]], metadata !DIExpression()), !dbg [[DBG297:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG300:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META298:![0-9]+]], metadata !DIExpression()), !dbg [[DBG299:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG301:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG301]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG302:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG300:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG300]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG301:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..9
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG303:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG302:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META304:![0-9]+]], metadata !DIExpression()), !dbg [[DBG305:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG305]]
-// DEBUG2-NEXT: call void @_ZN2S4D1Ev(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]]) #[[ATTR4]], !dbg [[DBG305]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG306:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META303:![0-9]+]], metadata !DIExpression()), !dbg [[DBG304:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG304]]
+// DEBUG2-NEXT: call void @_ZN2S4D1Ev(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]]) #[[ATTR4]], !dbg [[DBG304]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG305:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG307:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG306:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META308:![0-9]+]], metadata !DIExpression()), !dbg [[DBG309:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META307:![0-9]+]], metadata !DIExpression()), !dbg [[DBG308:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]], !dbg [[DBG310:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG311:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]], !dbg [[DBG309:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG310:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG312:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG311:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META313:![0-9]+]], metadata !DIExpression()), !dbg [[DBG314:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META312:![0-9]+]], metadata !DIExpression()), !dbg [[DBG313:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META315:![0-9]+]], metadata !DIExpression()), !dbg [[DBG316:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META314:![0-9]+]], metadata !DIExpression()), !dbg [[DBG315:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG317:![0-9]+]]
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG318:![0-9]+]]
-// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG317]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG319:![0-9]+]]
+// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG316:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG317:![0-9]+]]
+// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG316]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG318:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG320:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG319:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META321:![0-9]+]], metadata !DIExpression()), !dbg [[DBG322:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META320:![0-9]+]], metadata !DIExpression()), !dbg [[DBG321:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG323:![0-9]+]]
-// DEBUG2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG325:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG326:![0-9]+]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG322:![0-9]+]]
+// DEBUG2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG324:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG325:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG327:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG326:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META328:![0-9]+]], metadata !DIExpression()), !dbg [[DBG329:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META327:![0-9]+]], metadata !DIExpression()), !dbg [[DBG328:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META330:![0-9]+]], metadata !DIExpression()), !dbg [[DBG331:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META329:![0-9]+]], metadata !DIExpression()), !dbg [[DBG330:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG332:![0-9]+]]
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG333:![0-9]+]]
-// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG332]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG334:![0-9]+]]
+// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG331:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG332:![0-9]+]]
+// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG331]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG333:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG335:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG334:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META336:![0-9]+]], metadata !DIExpression()), !dbg [[DBG337:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META335:![0-9]+]], metadata !DIExpression()), !dbg [[DBG336:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG338:![0-9]+]]
-// DEBUG2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG340:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG341:![0-9]+]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG337:![0-9]+]]
+// DEBUG2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG339:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG340:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG342:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG341:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META343:![0-9]+]], metadata !DIExpression()), !dbg [[DBG344:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META342:![0-9]+]], metadata !DIExpression()), !dbg [[DBG343:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META345:![0-9]+]], metadata !DIExpression()), !dbg [[DBG346:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META344:![0-9]+]], metadata !DIExpression()), !dbg [[DBG345:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG347:![0-9]+]]
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG348:![0-9]+]]
-// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG347]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG349:![0-9]+]]
+// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG346:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG347:![0-9]+]]
+// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG346]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG348:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG350:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG349:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META351:![0-9]+]], metadata !DIExpression()), !dbg [[DBG352:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META350:![0-9]+]], metadata !DIExpression()), !dbg [[DBG351:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG353:![0-9]+]]
-// DEBUG2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG355:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG356:![0-9]+]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG352:![0-9]+]]
+// DEBUG2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG354:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG355:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG357:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG356:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META358:![0-9]+]], metadata !DIExpression()), !dbg [[DBG359:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META357:![0-9]+]], metadata !DIExpression()), !dbg [[DBG358:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META360:![0-9]+]], metadata !DIExpression()), !dbg [[DBG361:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META359:![0-9]+]], metadata !DIExpression()), !dbg [[DBG360:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG362:![0-9]+]]
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG363:![0-9]+]]
-// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG362]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG364:![0-9]+]]
+// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG361:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG362:![0-9]+]]
+// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG361]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG363:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG365:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG364:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META366:![0-9]+]], metadata !DIExpression()), !dbg [[DBG367:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META365:![0-9]+]], metadata !DIExpression()), !dbg [[DBG366:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG368:![0-9]+]]
-// DEBUG2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG370:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG371:![0-9]+]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG367:![0-9]+]]
+// DEBUG2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG369:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG370:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// DEBUG2-SAME: () #[[ATTR0]] !dbg [[DBG372:![0-9]+]] {
+// DEBUG2-SAME: () #[[ATTR0]] !dbg [[DBG371:![0-9]+]] {
// DEBUG2-NEXT: entry:
-// DEBUG2-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG373:![0-9]+]]
-// DEBUG2-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG373]]
-// DEBUG2-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG373]]
+// DEBUG2-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG372:![0-9]+]]
+// DEBUG2-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG372]]
+// DEBUG2-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG372]]
// DEBUG2-NEXT: ret void
//
diff --git a/clang/test/OpenMP/tile_codegen.cpp b/clang/test/OpenMP/tile_codegen.cpp
index 93a3a14133ab..5fd5609b844c 100644
--- a/clang/test/OpenMP/tile_codegen.cpp
+++ b/clang/test/OpenMP/tile_codegen.cpp
@@ -1,10 +1,10 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ --version 4
// Check code generation
-// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fclang-abi-compat=latest -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
+// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fclang-abi-compat=latest -std=c++20 -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
// Check same results after serialization round-trip
-// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fclang-abi-compat=latest -fopenmp -emit-pch -o %t %s
-// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fclang-abi-compat=latest -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK2
+// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fclang-abi-compat=latest -std=c++20 -fopenmp -emit-pch -o %t %s
+// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fclang-abi-compat=latest -std=c++20 -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK2
// expected-no-diagnostics
#ifndef HEADER
@@ -91,22 +91,38 @@ extern "C" void foo8(int a) {
}
+typedef struct { double array[12]; } data_t;
+extern "C" void foo9(data_t data) {
+#pragma omp tile sizes(5)
+ for (double v : data.array)
+ body(v);
+}
+
+
+extern "C" void foo10(data_t data) {
+#pragma omp tile sizes(5)
+ for (double c = 42.0; double v : data.array)
+ body(c, v);
+}
+
+
#endif /* HEADER */
-// CHECK1-LABEL: define {{[^@]+}}@body
-// CHECK1-SAME: (...) #[[ATTR0:[0-9]+]] {
+
+// CHECK1-LABEL: define dso_local void @body(
+// CHECK1-SAME: ...) #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init
-// CHECK1-SAME: () #[[ATTR1:[0-9]+]] section ".text.startup" {
+// CHECK1-LABEL: define internal void @__cxx_global_var_init(
+// CHECK1-SAME: ) #[[ATTR1:[0-9]+]] section ".text.startup" {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) @s)
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1Ev
-// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat align 2 {
+// CHECK1-LABEL: define linkonce_odr void @_ZN1SC1Ev(
+// CHECK1-SAME: ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
@@ -115,50 +131,52 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2Ev
-// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat align 2 {
+// CHECK1-LABEL: define linkonce_odr void @_ZN1SC2Ev(
+// CHECK1-SAME: ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR0]] comdat align 2 {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
-// CHECK1-NEXT: [[I:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[I2:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTFLOOR_0_IV_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTTILE_0_IV_I:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK1-NEXT: [[I2:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
-// CHECK1-NEXT: store ptr [[I2]], ptr [[I]], align 8
+// CHECK1-NEXT: [[I:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: store i32 7, ptr [[I]], align 4
+// CHECK1-NEXT: [[I3:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[THIS1]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[I3]], ptr [[I2]], align 8
// CHECK1-NEXT: store i32 0, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND:%.*]]
// CHECK1: for.cond:
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 4
-// CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END11:%.*]]
+// CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END12:%.*]]
// CHECK1: for.body:
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK1-NEXT: br label [[FOR_COND3:%.*]]
-// CHECK1: for.cond3:
+// CHECK1-NEXT: br label [[FOR_COND4:%.*]]
+// CHECK1: for.cond4:
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 5
-// CHECK1-NEXT: [[CMP4:%.*]] = icmp slt i32 4, [[ADD]]
-// CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1-NEXT: [[CMP5:%.*]] = icmp slt i32 4, [[ADD]]
+// CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP4]], 5
+// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP4]], 5
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
-// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 4, [[COND_TRUE]] ], [ [[ADD5]], [[COND_FALSE]] ]
-// CHECK1-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP2]], [[COND]]
-// CHECK1-NEXT: br i1 [[CMP6]], label [[FOR_BODY7:%.*]], label [[FOR_END:%.*]]
-// CHECK1: for.body7:
+// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 4, [[COND_TRUE]] ], [ [[ADD6]], [[COND_FALSE]] ]
+// CHECK1-NEXT: [[CMP7:%.*]] = icmp slt i32 [[TMP2]], [[COND]]
+// CHECK1-NEXT: br i1 [[CMP7]], label [[FOR_BODY8:%.*]], label [[FOR_END:%.*]]
+// CHECK1: for.body8:
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 3
-// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 7, [[MUL]]
-// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[I]], align 8
-// CHECK1-NEXT: store i32 [[ADD8]], ptr [[TMP6]], align 4
-// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[I]], align 8
+// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 7, [[MUL]]
+// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[I2]], align 8
+// CHECK1-NEXT: store i32 [[ADD9]], ptr [[TMP6]], align 4
+// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[I2]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP8]])
// CHECK1-NEXT: br label [[FOR_INC:%.*]]
@@ -166,20 +184,20 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK1-NEXT: br label [[FOR_COND3]], !llvm.loop [[LOOP3:![0-9]+]]
+// CHECK1-NEXT: br label [[FOR_COND4]], !llvm.loop [[LOOP3:![0-9]+]]
// CHECK1: for.end:
-// CHECK1-NEXT: br label [[FOR_INC9:%.*]]
-// CHECK1: for.inc9:
+// CHECK1-NEXT: br label [[FOR_INC10:%.*]]
+// CHECK1: for.inc10:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP10]], 5
-// CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP10]], 5
+// CHECK1-NEXT: store i32 [[ADD11]], ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
-// CHECK1: for.end11:
+// CHECK1: for.end12:
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@foo1
-// CHECK1-SAME: (i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR0]] {
+// CHECK1-LABEL: define dso_local void @foo1(
+// CHECK1-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
@@ -195,81 +213,83 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4
// CHECK1-NEXT: store i32 [[STEP]], ptr [[STEP_ADDR]], align 4
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[START_ADDR]], align 4
-// CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4
-// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[END_ADDR]], align 4
-// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4
-// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[STEP_ADDR]], align 4
-// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTNEW_STEP]], align 4
-// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
-// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
-// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
+// CHECK1-NEXT: store i32 [[TMP0]], ptr [[I]], align 4
+// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[START_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[END_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
+// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP_ADDR]], align 4
+// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4
+// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
+// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]]
// CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
-// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
-// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP5]]
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
-// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP6]]
+// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]]
+// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
+// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]]
// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: store i32 0, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND:%.*]]
// CHECK1: for.cond:
-// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
-// CHECK1-NEXT: [[ADD5:%.*]] = add i32 [[TMP8]], 1
-// CHECK1-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP7]], [[ADD5]]
+// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK1-NEXT: [[ADD5:%.*]] = add i32 [[TMP9]], 1
+// CHECK1-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP8]], [[ADD5]]
// CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END17:%.*]]
// CHECK1: for.body:
-// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK1-NEXT: store i32 [[TMP9]], ptr [[DOTTILE_0_IV_I]], align 4
+// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTTILE_0_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND6:%.*]]
// CHECK1: for.cond6:
-// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
-// CHECK1-NEXT: [[ADD7:%.*]] = add i32 [[TMP11]], 1
-// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP12]], 5
+// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
+// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK1-NEXT: [[ADD7:%.*]] = add i32 [[TMP12]], 1
+// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[ADD8:%.*]] = add i32 [[TMP13]], 5
// CHECK1-NEXT: [[CMP9:%.*]] = icmp ult i32 [[ADD7]], [[ADD8]]
// CHECK1-NEXT: br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
-// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
-// CHECK1-NEXT: [[ADD10:%.*]] = add i32 [[TMP13]], 1
+// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK1-NEXT: [[ADD10:%.*]] = add i32 [[TMP14]], 1
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
-// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP14]], 5
+// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[TMP15]], 5
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[ADD10]], [[COND_TRUE]] ], [ [[ADD11]], [[COND_FALSE]] ]
-// CHECK1-NEXT: [[CMP12:%.*]] = icmp ult i32 [[TMP10]], [[COND]]
+// CHECK1-NEXT: [[CMP12:%.*]] = icmp ult i32 [[TMP11]], [[COND]]
// CHECK1-NEXT: br i1 [[CMP12]], label [[FOR_BODY13:%.*]], label [[FOR_END:%.*]]
// CHECK1: for.body13:
-// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
-// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
-// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], [[TMP17]]
-// CHECK1-NEXT: [[ADD14:%.*]] = add i32 [[TMP15]], [[MUL]]
+// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
+// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
+// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP17]], [[TMP18]]
+// CHECK1-NEXT: [[ADD14:%.*]] = add i32 [[TMP16]], [[MUL]]
// CHECK1-NEXT: store i32 [[ADD14]], ptr [[I]], align 4
-// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4
-// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP18]])
+// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4
+// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP19]])
// CHECK1-NEXT: br label [[FOR_INC:%.*]]
// CHECK1: for.inc:
-// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP19]], 1
+// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
+// CHECK1-NEXT: [[INC:%.*]] = add i32 [[TMP20]], 1
// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTTILE_0_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND6]], !llvm.loop [[LOOP6:![0-9]+]]
// CHECK1: for.end:
// CHECK1-NEXT: br label [[FOR_INC15:%.*]]
// CHECK1: for.inc15:
-// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP20]], 5
+// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK1-NEXT: [[ADD16:%.*]] = add i32 [[TMP21]], 5
// CHECK1-NEXT: store i32 [[ADD16]], ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
// CHECK1: for.end17:
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@foo2
-// CHECK1-SAME: (i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR0]] {
+// CHECK1-LABEL: define dso_local void @foo2(
+// CHECK1-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
@@ -381,8 +401,8 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@foo3
-// CHECK1-SAME: () #[[ATTR0]] {
+// CHECK1-LABEL: define dso_local void @foo3(
+// CHECK1-SAME: ) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
@@ -523,8 +543,8 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@foo4
-// CHECK1-SAME: () #[[ATTR0]] {
+// CHECK1-LABEL: define dso_local void @foo4(
+// CHECK1-SAME: ) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
@@ -676,8 +696,8 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@foo5
-// CHECK1-SAME: () #[[ATTR0]] {
+// CHECK1-LABEL: define dso_local void @foo5(
+// CHECK1-SAME: ) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
@@ -885,15 +905,15 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@foo6
-// CHECK1-SAME: () #[[ATTR0]] {
+// CHECK1-LABEL: define dso_local void @foo6(
+// CHECK1-SAME: ) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 0, ptr @foo6.omp_outlined)
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@foo6.omp_outlined
-// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK1-LABEL: define internal void @foo6.omp_outlined(
+// CHECK1-SAME: ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -988,15 +1008,15 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@tfoo7
-// CHECK1-SAME: () #[[ATTR0]] {
+// CHECK1-LABEL: define dso_local void @tfoo7(
+// CHECK1-SAME: ) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void @_Z4foo7IiTnT_Li3ETnS0_Li5EEvS0_S0_(i32 noundef 0, i32 noundef 42)
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@_Z4foo7IiTnT_Li3ETnS0_Li5EEvS0_S0_
-// CHECK1-SAME: (i32 noundef [[START:%.*]], i32 noundef [[END:%.*]]) #[[ATTR0]] comdat {
+// CHECK1-LABEL: define linkonce_odr void @_Z4foo7IiTnT_Li3ETnS0_Li5EEvS0_S0_(
+// CHECK1-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]]) #[[ATTR0]] comdat {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
@@ -1039,7 +1059,7 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
// CHECK1-NEXT: [[ADD7:%.*]] = add i32 [[TMP9]], 1
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 5
+// CHECK1-NEXT: [[ADD8:%.*]] = add i32 [[TMP10]], 5
// CHECK1-NEXT: [[CMP9:%.*]] = icmp ult i32 [[ADD7]], [[ADD8]]
// CHECK1-NEXT: br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK1: cond.true:
@@ -1048,7 +1068,7 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: br label [[COND_END:%.*]]
// CHECK1: cond.false:
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP12]], 5
+// CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[TMP12]], 5
// CHECK1-NEXT: br label [[COND_END]]
// CHECK1: cond.end:
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[ADD10]], [[COND_TRUE]] ], [ [[ADD11]], [[COND_FALSE]] ]
@@ -1065,22 +1085,22 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: br label [[FOR_INC:%.*]]
// CHECK1: for.inc:
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP16]], 1
+// CHECK1-NEXT: [[INC:%.*]] = add i32 [[TMP16]], 1
// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTTILE_0_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND6]], !llvm.loop [[LOOP21:![0-9]+]]
// CHECK1: for.end:
// CHECK1-NEXT: br label [[FOR_INC15:%.*]]
// CHECK1: for.inc15:
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP17]], 5
+// CHECK1-NEXT: [[ADD16:%.*]] = add i32 [[TMP17]], 5
// CHECK1-NEXT: store i32 [[ADD16]], ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
// CHECK1: for.end17:
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@foo8
-// CHECK1-SAME: (i32 noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK1-LABEL: define dso_local void @foo8(
+// CHECK1-SAME: i32 noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
@@ -1168,22 +1188,219 @@ extern "C" void foo8(int a) {
// CHECK1-NEXT: ret void
//
//
-// CHECK1-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_tile_codegen.cpp
-// CHECK1-SAME: () #[[ATTR1]] section ".text.startup" {
+// CHECK1-LABEL: define dso_local void @foo9(
+// CHECK1-SAME: ptr noundef byval([[STRUCT_DATA_T:%.*]]) align 8 [[DATA:%.*]]) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[__END2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTFLOOR_0_IV___BEGIN2:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTTILE_0_IV___BEGIN2:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[V:%.*]] = alloca double, align 8
+// CHECK1-NEXT: [[ARRAY:%.*]] = getelementptr inbounds [[STRUCT_DATA_T]], ptr [[DATA]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[ARRAY]], ptr [[__RANGE2]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP0]], i64 0, i64 0
+// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 12
+// CHECK1-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK1-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP1]], i64 0, i64 0
+// CHECK1-NEXT: store ptr [[ARRAYDECAY1]], ptr [[__BEGIN2]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK1-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP2]], i64 0, i64 0
+// CHECK1-NEXT: store ptr [[ARRAYDECAY2]], ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[__END2]], align 8
+// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP4]] to i64
+// CHECK1-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP5]] to i64
+// CHECK1-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
+// CHECK1-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8
+// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
+// CHECK1-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
+// CHECK1-NEXT: [[SUB5:%.*]] = sub nsw i64 [[DIV]], 1
+// CHECK1-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK1-NEXT: store i64 0, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: br label [[FOR_COND:%.*]]
+// CHECK1: for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP7]], 1
+// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i64 [[TMP6]], [[ADD6]]
+// CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END18:%.*]]
+// CHECK1: for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: store i64 [[TMP8]], ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: br label [[FOR_COND7:%.*]]
+// CHECK1: for.cond7:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP10]], 1
+// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i64 [[TMP11]], 5
+// CHECK1-NEXT: [[CMP10:%.*]] = icmp slt i64 [[ADD8]], [[ADD9]]
+// CHECK1-NEXT: br i1 [[CMP10]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK1-NEXT: [[ADD11:%.*]] = add nsw i64 [[TMP12]], 1
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[ADD12:%.*]] = add nsw i64 [[TMP13]], 5
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ [[ADD11]], [[COND_TRUE]] ], [ [[ADD12]], [[COND_FALSE]] ]
+// CHECK1-NEXT: [[CMP13:%.*]] = icmp slt i64 [[TMP9]], [[COND]]
+// CHECK1-NEXT: br i1 [[CMP13]], label [[FOR_BODY14:%.*]], label [[FOR_END:%.*]]
+// CHECK1: for.body14:
+// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP15]], 1
+// CHECK1-NEXT: [[ADD_PTR15:%.*]] = getelementptr inbounds double, ptr [[TMP14]], i64 [[MUL]]
+// CHECK1-NEXT: store ptr [[ADD_PTR15]], ptr [[__BEGIN2]], align 8
+// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[__BEGIN2]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = load double, ptr [[TMP16]], align 8
+// CHECK1-NEXT: store double [[TMP17]], ptr [[V]], align 8
+// CHECK1-NEXT: [[TMP18:%.*]] = load double, ptr [[V]], align 8
+// CHECK1-NEXT: call void (...) @body(double noundef [[TMP18]])
+// CHECK1-NEXT: br label [[FOR_INC:%.*]]
+// CHECK1: for.inc:
+// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[INC:%.*]] = add nsw i64 [[TMP19]], 1
+// CHECK1-NEXT: store i64 [[INC]], ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: br label [[FOR_COND7]], !llvm.loop [[LOOP25:![0-9]+]]
+// CHECK1: for.end:
+// CHECK1-NEXT: br label [[FOR_INC16:%.*]]
+// CHECK1: for.inc16:
+// CHECK1-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP20]], 5
+// CHECK1-NEXT: store i64 [[ADD17]], ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
+// CHECK1: for.end18:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define dso_local void @foo10(
+// CHECK1-SAME: ptr noundef byval([[STRUCT_DATA_T:%.*]]) align 8 [[DATA:%.*]]) #[[ATTR0]] {
+// CHECK1-NEXT: entry:
+// CHECK1-NEXT: [[C:%.*]] = alloca double, align 8
+// CHECK1-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[__END2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 8
+// CHECK1-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTFLOOR_0_IV___BEGIN2:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[DOTTILE_0_IV___BEGIN2:%.*]] = alloca i64, align 8
+// CHECK1-NEXT: [[V:%.*]] = alloca double, align 8
+// CHECK1-NEXT: store double 4.200000e+01, ptr [[C]], align 8
+// CHECK1-NEXT: [[ARRAY:%.*]] = getelementptr inbounds [[STRUCT_DATA_T]], ptr [[DATA]], i32 0, i32 0
+// CHECK1-NEXT: store ptr [[ARRAY]], ptr [[__RANGE2]], align 8
+// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP0]], i64 0, i64 0
+// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 12
+// CHECK1-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8
+// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK1-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP1]], i64 0, i64 0
+// CHECK1-NEXT: store ptr [[ARRAYDECAY1]], ptr [[__BEGIN2]], align 8
+// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK1-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP2]], i64 0, i64 0
+// CHECK1-NEXT: store ptr [[ARRAYDECAY2]], ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[__END2]], align 8
+// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP4]] to i64
+// CHECK1-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP5]] to i64
+// CHECK1-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
+// CHECK1-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8
+// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
+// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
+// CHECK1-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
+// CHECK1-NEXT: [[SUB5:%.*]] = sub nsw i64 [[DIV]], 1
+// CHECK1-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK1-NEXT: store i64 0, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: br label [[FOR_COND:%.*]]
+// CHECK1: for.cond:
+// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP7]], 1
+// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i64 [[TMP6]], [[ADD6]]
+// CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END18:%.*]]
+// CHECK1: for.body:
+// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: store i64 [[TMP8]], ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: br label [[FOR_COND7:%.*]]
+// CHECK1: for.cond7:
+// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP10]], 1
+// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i64 [[TMP11]], 5
+// CHECK1-NEXT: [[CMP10:%.*]] = icmp slt i64 [[ADD8]], [[ADD9]]
+// CHECK1-NEXT: br i1 [[CMP10]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK1: cond.true:
+// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK1-NEXT: [[ADD11:%.*]] = add nsw i64 [[TMP12]], 1
+// CHECK1-NEXT: br label [[COND_END:%.*]]
+// CHECK1: cond.false:
+// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[ADD12:%.*]] = add nsw i64 [[TMP13]], 5
+// CHECK1-NEXT: br label [[COND_END]]
+// CHECK1: cond.end:
+// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ [[ADD11]], [[COND_TRUE]] ], [ [[ADD12]], [[COND_FALSE]] ]
+// CHECK1-NEXT: [[CMP13:%.*]] = icmp slt i64 [[TMP9]], [[COND]]
+// CHECK1-NEXT: br i1 [[CMP13]], label [[FOR_BODY14:%.*]], label [[FOR_END:%.*]]
+// CHECK1: for.body14:
+// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP15]], 1
+// CHECK1-NEXT: [[ADD_PTR15:%.*]] = getelementptr inbounds double, ptr [[TMP14]], i64 [[MUL]]
+// CHECK1-NEXT: store ptr [[ADD_PTR15]], ptr [[__BEGIN2]], align 8
+// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[__BEGIN2]], align 8
+// CHECK1-NEXT: [[TMP17:%.*]] = load double, ptr [[TMP16]], align 8
+// CHECK1-NEXT: store double [[TMP17]], ptr [[V]], align 8
+// CHECK1-NEXT: [[TMP18:%.*]] = load double, ptr [[C]], align 8
+// CHECK1-NEXT: [[TMP19:%.*]] = load double, ptr [[V]], align 8
+// CHECK1-NEXT: call void (...) @body(double noundef [[TMP18]], double noundef [[TMP19]])
+// CHECK1-NEXT: br label [[FOR_INC:%.*]]
+// CHECK1: for.inc:
+// CHECK1-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[INC:%.*]] = add nsw i64 [[TMP20]], 1
+// CHECK1-NEXT: store i64 [[INC]], ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: br label [[FOR_COND7]], !llvm.loop [[LOOP27:![0-9]+]]
+// CHECK1: for.end:
+// CHECK1-NEXT: br label [[FOR_INC16:%.*]]
+// CHECK1: for.inc16:
+// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP21]], 5
+// CHECK1-NEXT: store i64 [[ADD17]], ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
+// CHECK1: for.end18:
+// CHECK1-NEXT: ret void
+//
+//
+// CHECK1-LABEL: define internal void @_GLOBAL__sub_I_tile_codegen.cpp(
+// CHECK1-SAME: ) #[[ATTR1]] section ".text.startup" {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void @__cxx_global_var_init()
// CHECK1-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@__cxx_global_var_init
-// CHECK2-SAME: () #[[ATTR0:[0-9]+]] section ".text.startup" {
+// CHECK2-LABEL: define internal void @__cxx_global_var_init(
+// CHECK2-SAME: ) #[[ATTR0:[0-9]+]] section ".text.startup" {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) @s)
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@_ZN1SC1Ev
-// CHECK2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
+// CHECK2-LABEL: define linkonce_odr void @_ZN1SC1Ev(
+// CHECK2-SAME: ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
@@ -1192,50 +1409,52 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@_ZN1SC2Ev
-// CHECK2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
+// CHECK2-LABEL: define linkonce_odr void @_ZN1SC2Ev(
+// CHECK2-SAME: ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
-// CHECK2-NEXT: [[I:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[I2:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[DOTFLOOR_0_IV_I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[DOTTILE_0_IV_I:%.*]] = alloca i32, align 4
// CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
// CHECK2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK2-NEXT: [[I2:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
-// CHECK2-NEXT: store ptr [[I2]], ptr [[I]], align 8
+// CHECK2-NEXT: [[I:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK2-NEXT: store i32 7, ptr [[I]], align 4
+// CHECK2-NEXT: [[I3:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[THIS1]], i32 0, i32 0
+// CHECK2-NEXT: store ptr [[I3]], ptr [[I2]], align 8
// CHECK2-NEXT: store i32 0, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND:%.*]]
// CHECK2: for.cond:
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 4
-// CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END11:%.*]]
+// CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END12:%.*]]
// CHECK2: for.body:
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: br label [[FOR_COND3:%.*]]
-// CHECK2: for.cond3:
+// CHECK2-NEXT: br label [[FOR_COND4:%.*]]
+// CHECK2: for.cond4:
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 5
-// CHECK2-NEXT: [[CMP4:%.*]] = icmp slt i32 4, [[ADD]]
-// CHECK2-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK2-NEXT: [[CMP5:%.*]] = icmp slt i32 4, [[ADD]]
+// CHECK2-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP4]], 5
+// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP4]], 5
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
-// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 4, [[COND_TRUE]] ], [ [[ADD5]], [[COND_FALSE]] ]
-// CHECK2-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP2]], [[COND]]
-// CHECK2-NEXT: br i1 [[CMP6]], label [[FOR_BODY7:%.*]], label [[FOR_END:%.*]]
-// CHECK2: for.body7:
+// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 4, [[COND_TRUE]] ], [ [[ADD6]], [[COND_FALSE]] ]
+// CHECK2-NEXT: [[CMP7:%.*]] = icmp slt i32 [[TMP2]], [[COND]]
+// CHECK2-NEXT: br i1 [[CMP7]], label [[FOR_BODY8:%.*]], label [[FOR_END:%.*]]
+// CHECK2: for.body8:
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 3
-// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 7, [[MUL]]
-// CHECK2-NEXT: [[TMP6:%.*]] = load ptr, ptr [[I]], align 8
-// CHECK2-NEXT: store i32 [[ADD8]], ptr [[TMP6]], align 4
-// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[I]], align 8
+// CHECK2-NEXT: [[ADD9:%.*]] = add nsw i32 7, [[MUL]]
+// CHECK2-NEXT: [[TMP6:%.*]] = load ptr, ptr [[I2]], align 8
+// CHECK2-NEXT: store i32 [[ADD9]], ptr [[TMP6]], align 4
+// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[I2]], align 8
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP8]])
// CHECK2-NEXT: br label [[FOR_INC:%.*]]
@@ -1243,26 +1462,26 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: br label [[FOR_COND3]], !llvm.loop [[LOOP3:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND4]], !llvm.loop [[LOOP3:![0-9]+]]
// CHECK2: for.end:
-// CHECK2-NEXT: br label [[FOR_INC9:%.*]]
-// CHECK2: for.inc9:
+// CHECK2-NEXT: br label [[FOR_INC10:%.*]]
+// CHECK2: for.inc10:
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP10]], 5
-// CHECK2-NEXT: store i32 [[ADD10]], ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP10]], 5
+// CHECK2-NEXT: store i32 [[ADD11]], ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
-// CHECK2: for.end11:
+// CHECK2: for.end12:
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@body
-// CHECK2-SAME: (...) #[[ATTR1]] {
+// CHECK2-LABEL: define dso_local void @body(
+// CHECK2-SAME: ...) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@foo1
-// CHECK2-SAME: (i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR1]] {
+// CHECK2-LABEL: define dso_local void @foo1(
+// CHECK2-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
@@ -1278,81 +1497,183 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4
// CHECK2-NEXT: store i32 [[STEP]], ptr [[STEP_ADDR]], align 4
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[START_ADDR]], align 4
-// CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4
-// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[END_ADDR]], align 4
-// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4
-// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[STEP_ADDR]], align 4
-// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTNEW_STEP]], align 4
-// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
-// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
-// CHECK2-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]]
+// CHECK2-NEXT: store i32 [[TMP0]], ptr [[I]], align 4
+// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[START_ADDR]], align 4
+// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[END_ADDR]], align 4
+// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
+// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP_ADDR]], align 4
+// CHECK2-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4
+// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
+// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]]
// CHECK2-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1
-// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
-// CHECK2-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP5]]
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
-// CHECK2-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP6]]
+// CHECK2-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]]
+// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
+// CHECK2-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]]
// CHECK2-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1
// CHECK2-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: store i32 0, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND:%.*]]
// CHECK2: for.cond:
-// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
-// CHECK2-NEXT: [[ADD5:%.*]] = add i32 [[TMP8]], 1
-// CHECK2-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP7]], [[ADD5]]
+// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK2-NEXT: [[ADD5:%.*]] = add i32 [[TMP9]], 1
+// CHECK2-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP8]], [[ADD5]]
// CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END17:%.*]]
// CHECK2: for.body:
-// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: store i32 [[TMP9]], ptr [[DOTTILE_0_IV_I]], align 4
+// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: store i32 [[TMP10]], ptr [[DOTTILE_0_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND6:%.*]]
// CHECK2: for.cond6:
-// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
-// CHECK2-NEXT: [[ADD7:%.*]] = add i32 [[TMP11]], 1
-// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP12]], 5
+// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
+// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK2-NEXT: [[ADD7:%.*]] = add i32 [[TMP12]], 1
+// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[ADD8:%.*]] = add i32 [[TMP13]], 5
// CHECK2-NEXT: [[CMP9:%.*]] = icmp ult i32 [[ADD7]], [[ADD8]]
// CHECK2-NEXT: br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
-// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
-// CHECK2-NEXT: [[ADD10:%.*]] = add i32 [[TMP13]], 1
+// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
+// CHECK2-NEXT: [[ADD10:%.*]] = add i32 [[TMP14]], 1
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
-// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP14]], 5
+// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[ADD11:%.*]] = add i32 [[TMP15]], 5
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[ADD10]], [[COND_TRUE]] ], [ [[ADD11]], [[COND_FALSE]] ]
-// CHECK2-NEXT: [[CMP12:%.*]] = icmp ult i32 [[TMP10]], [[COND]]
+// CHECK2-NEXT: [[CMP12:%.*]] = icmp ult i32 [[TMP11]], [[COND]]
// CHECK2-NEXT: br i1 [[CMP12]], label [[FOR_BODY13:%.*]], label [[FOR_END:%.*]]
// CHECK2: for.body13:
-// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
-// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
-// CHECK2-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], [[TMP17]]
-// CHECK2-NEXT: [[ADD14:%.*]] = add i32 [[TMP15]], [[MUL]]
+// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
+// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
+// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4
+// CHECK2-NEXT: [[MUL:%.*]] = mul i32 [[TMP17]], [[TMP18]]
+// CHECK2-NEXT: [[ADD14:%.*]] = add i32 [[TMP16]], [[MUL]]
// CHECK2-NEXT: store i32 [[ADD14]], ptr [[I]], align 4
-// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4
-// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP18]])
+// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4
+// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP19]])
// CHECK2-NEXT: br label [[FOR_INC:%.*]]
// CHECK2: for.inc:
-// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP19]], 1
+// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
+// CHECK2-NEXT: [[INC:%.*]] = add i32 [[TMP20]], 1
// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTTILE_0_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND6]], !llvm.loop [[LOOP6:![0-9]+]]
// CHECK2: for.end:
// CHECK2-NEXT: br label [[FOR_INC15:%.*]]
// CHECK2: for.inc15:
-// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP20]], 5
+// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
+// CHECK2-NEXT: [[ADD16:%.*]] = add i32 [[TMP21]], 5
// CHECK2-NEXT: store i32 [[ADD16]], ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
// CHECK2: for.end17:
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@foo2
-// CHECK2-SAME: (i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR1]] {
+// CHECK2-LABEL: define dso_local void @foo10(
+// CHECK2-SAME: ptr noundef byval([[STRUCT_DATA_T:%.*]]) align 8 [[DATA:%.*]]) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[C:%.*]] = alloca double, align 8
+// CHECK2-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[__END2:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i64, align 8
+// CHECK2-NEXT: [[DOTFLOOR_0_IV___BEGIN2:%.*]] = alloca i64, align 8
+// CHECK2-NEXT: [[DOTTILE_0_IV___BEGIN2:%.*]] = alloca i64, align 8
+// CHECK2-NEXT: [[V:%.*]] = alloca double, align 8
+// CHECK2-NEXT: store double 4.200000e+01, ptr [[C]], align 8
+// CHECK2-NEXT: [[ARRAY:%.*]] = getelementptr inbounds [[STRUCT_DATA_T]], ptr [[DATA]], i32 0, i32 0
+// CHECK2-NEXT: store ptr [[ARRAY]], ptr [[__RANGE2]], align 8
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP0]], i64 0, i64 0
+// CHECK2-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 12
+// CHECK2-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8
+// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK2-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP1]], i64 0, i64 0
+// CHECK2-NEXT: store ptr [[ARRAYDECAY1]], ptr [[__BEGIN2]], align 8
+// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK2-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP2]], i64 0, i64 0
+// CHECK2-NEXT: store ptr [[ARRAYDECAY2]], ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[__END2]], align 8
+// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK2-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK2-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP4]] to i64
+// CHECK2-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP5]] to i64
+// CHECK2-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
+// CHECK2-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8
+// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
+// CHECK2-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
+// CHECK2-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
+// CHECK2-NEXT: [[SUB5:%.*]] = sub nsw i64 [[DIV]], 1
+// CHECK2-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK2-NEXT: store i64 0, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: br label [[FOR_COND:%.*]]
+// CHECK2: for.cond:
+// CHECK2-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP7]], 1
+// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i64 [[TMP6]], [[ADD6]]
+// CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END18:%.*]]
+// CHECK2: for.body:
+// CHECK2-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: store i64 [[TMP8]], ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: br label [[FOR_COND7:%.*]]
+// CHECK2: for.cond7:
+// CHECK2-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP10]], 1
+// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[ADD9:%.*]] = add nsw i64 [[TMP11]], 5
+// CHECK2-NEXT: [[CMP10:%.*]] = icmp slt i64 [[ADD8]], [[ADD9]]
+// CHECK2-NEXT: br i1 [[CMP10]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK2: cond.true:
+// CHECK2-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i64 [[TMP12]], 1
+// CHECK2-NEXT: br label [[COND_END:%.*]]
+// CHECK2: cond.false:
+// CHECK2-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[ADD12:%.*]] = add nsw i64 [[TMP13]], 5
+// CHECK2-NEXT: br label [[COND_END]]
+// CHECK2: cond.end:
+// CHECK2-NEXT: [[COND:%.*]] = phi i64 [ [[ADD11]], [[COND_TRUE]] ], [ [[ADD12]], [[COND_FALSE]] ]
+// CHECK2-NEXT: [[CMP13:%.*]] = icmp slt i64 [[TMP9]], [[COND]]
+// CHECK2-NEXT: br i1 [[CMP13]], label [[FOR_BODY14:%.*]], label [[FOR_END:%.*]]
+// CHECK2: for.body14:
+// CHECK2-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK2-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP15]], 1
+// CHECK2-NEXT: [[ADD_PTR15:%.*]] = getelementptr inbounds double, ptr [[TMP14]], i64 [[MUL]]
+// CHECK2-NEXT: store ptr [[ADD_PTR15]], ptr [[__BEGIN2]], align 8
+// CHECK2-NEXT: [[TMP16:%.*]] = load ptr, ptr [[__BEGIN2]], align 8
+// CHECK2-NEXT: [[TMP17:%.*]] = load double, ptr [[TMP16]], align 8
+// CHECK2-NEXT: store double [[TMP17]], ptr [[V]], align 8
+// CHECK2-NEXT: [[TMP18:%.*]] = load double, ptr [[C]], align 8
+// CHECK2-NEXT: [[TMP19:%.*]] = load double, ptr [[V]], align 8
+// CHECK2-NEXT: call void (...) @body(double noundef [[TMP18]], double noundef [[TMP19]])
+// CHECK2-NEXT: br label [[FOR_INC:%.*]]
+// CHECK2: for.inc:
+// CHECK2-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[INC:%.*]] = add nsw i64 [[TMP20]], 1
+// CHECK2-NEXT: store i64 [[INC]], ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: br label [[FOR_COND7]], !llvm.loop [[LOOP8:![0-9]+]]
+// CHECK2: for.end:
+// CHECK2-NEXT: br label [[FOR_INC16:%.*]]
+// CHECK2: for.inc16:
+// CHECK2-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP21]], 5
+// CHECK2-NEXT: store i64 [[ADD17]], ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
+// CHECK2: for.end18:
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @foo2(
+// CHECK2-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
@@ -1438,34 +1759,34 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTTILE_1_IV_J]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTTILE_1_IV_J]], align 4
-// CHECK2-NEXT: br label [[FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND10]], !llvm.loop [[LOOP10:![0-9]+]]
// CHECK2: for.end:
// CHECK2-NEXT: br label [[FOR_INC22:%.*]]
// CHECK2: for.inc22:
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
// CHECK2-NEXT: [[INC23:%.*]] = add nsw i32 [[TMP15]], 1
// CHECK2-NEXT: store i32 [[INC23]], ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: br label [[FOR_COND4]], !llvm.loop [[LOOP9:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND4]], !llvm.loop [[LOOP11:![0-9]+]]
// CHECK2: for.end24:
// CHECK2-NEXT: br label [[FOR_INC25:%.*]]
// CHECK2: for.inc25:
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
// CHECK2-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP16]], 5
// CHECK2-NEXT: store i32 [[ADD26]], ptr [[DOTFLOOR_1_IV_J]], align 4
-// CHECK2-NEXT: br label [[FOR_COND1]], !llvm.loop [[LOOP10:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND1]], !llvm.loop [[LOOP12:![0-9]+]]
// CHECK2: for.end27:
// CHECK2-NEXT: br label [[FOR_INC28:%.*]]
// CHECK2: for.inc28:
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK2-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP17]], 5
// CHECK2-NEXT: store i32 [[ADD29]], ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
// CHECK2: for.end30:
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@foo3
-// CHECK2-SAME: () #[[ATTR1]] {
+// CHECK2-LABEL: define dso_local void @foo3(
+// CHECK2-SAME: ) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
@@ -1574,21 +1895,21 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTTILE_1_IV_J]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP20]], 1
// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTTILE_1_IV_J]], align 4
-// CHECK2-NEXT: br label [[FOR_COND15]], !llvm.loop [[LOOP12:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND15]], !llvm.loop [[LOOP14:![0-9]+]]
// CHECK2: for.end:
// CHECK2-NEXT: br label [[FOR_INC27:%.*]]
// CHECK2: for.inc27:
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
// CHECK2-NEXT: [[INC28:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK2-NEXT: store i32 [[INC28]], ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: br label [[FOR_COND3]], !llvm.loop [[LOOP13:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND3]], !llvm.loop [[LOOP15:![0-9]+]]
// CHECK2: for.end29:
// CHECK2-NEXT: br label [[FOR_INC30:%.*]]
// CHECK2: for.inc30:
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
// CHECK2-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP22]], 5
// CHECK2-NEXT: store i32 [[ADD31]], ptr [[DOTFLOOR_1_IV_J]], align 4
-// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK2: for.end32:
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
@@ -1606,8 +1927,8 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@foo4
-// CHECK2-SAME: () #[[ATTR1]] {
+// CHECK2-LABEL: define dso_local void @foo4(
+// CHECK2-SAME: ) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
@@ -1727,21 +2048,21 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTTILE_1_IV_J]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP22]], 1
// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTTILE_1_IV_J]], align 4
-// CHECK2-NEXT: br label [[FOR_COND20]], !llvm.loop [[LOOP15:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND20]], !llvm.loop [[LOOP17:![0-9]+]]
// CHECK2: for.end:
// CHECK2-NEXT: br label [[FOR_INC32:%.*]]
// CHECK2: for.inc32:
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
// CHECK2-NEXT: [[INC33:%.*]] = add nsw i32 [[TMP23]], 1
// CHECK2-NEXT: store i32 [[INC33]], ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: br label [[FOR_COND8]], !llvm.loop [[LOOP16:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND8]], !llvm.loop [[LOOP18:![0-9]+]]
// CHECK2: for.end34:
// CHECK2-NEXT: br label [[FOR_INC35:%.*]]
// CHECK2: for.inc35:
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTFLOOR_1_IV_J]], align 4
// CHECK2-NEXT: [[ADD36:%.*]] = add nsw i32 [[TMP24]], 5
// CHECK2-NEXT: store i32 [[ADD36]], ptr [[DOTFLOOR_1_IV_J]], align 4
-// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
// CHECK2: for.end37:
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
@@ -1759,8 +2080,8 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@foo5
-// CHECK2-SAME: () #[[ATTR1]] {
+// CHECK2-LABEL: define dso_local void @foo5(
+// CHECK2-SAME: ) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
@@ -1968,15 +2289,15 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@foo6
-// CHECK2-SAME: () #[[ATTR1]] {
+// CHECK2-LABEL: define dso_local void @foo6(
+// CHECK2-SAME: ) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 0, ptr @foo6.omp_outlined)
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@foo6.omp_outlined
-// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK2-LABEL: define internal void @foo6.omp_outlined(
+// CHECK2-SAME: ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR4:[0-9]+]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
@@ -2054,7 +2375,7 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
// CHECK2: for.end:
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK2: omp.body.continue:
@@ -2071,8 +2392,8 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@foo8
-// CHECK2-SAME: (i32 noundef [[A:%.*]]) #[[ATTR1]] {
+// CHECK2-LABEL: define dso_local void @foo8(
+// CHECK2-SAME: i32 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
@@ -2138,7 +2459,7 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1
// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: br label [[FOR_COND1]], !llvm.loop [[LOOP21:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND1]], !llvm.loop [[LOOP23:![0-9]+]]
// CHECK2: for.end:
// CHECK2-NEXT: br label [[FOR_INC17:%.*]]
// CHECK2: for.inc17:
@@ -2155,20 +2476,117 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
// CHECK2-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP14]], [[COND22]]
// CHECK2-NEXT: store i32 [[ADD23]], ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
// CHECK2: for.end24:
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@tfoo7
-// CHECK2-SAME: () #[[ATTR1]] {
+// CHECK2-LABEL: define dso_local void @foo9(
+// CHECK2-SAME: ptr noundef byval([[STRUCT_DATA_T:%.*]]) align 8 [[DATA:%.*]]) #[[ATTR1]] {
+// CHECK2-NEXT: entry:
+// CHECK2-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[__END2:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 8
+// CHECK2-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i64, align 8
+// CHECK2-NEXT: [[DOTFLOOR_0_IV___BEGIN2:%.*]] = alloca i64, align 8
+// CHECK2-NEXT: [[DOTTILE_0_IV___BEGIN2:%.*]] = alloca i64, align 8
+// CHECK2-NEXT: [[V:%.*]] = alloca double, align 8
+// CHECK2-NEXT: [[ARRAY:%.*]] = getelementptr inbounds [[STRUCT_DATA_T]], ptr [[DATA]], i32 0, i32 0
+// CHECK2-NEXT: store ptr [[ARRAY]], ptr [[__RANGE2]], align 8
+// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP0]], i64 0, i64 0
+// CHECK2-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 12
+// CHECK2-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8
+// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK2-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP1]], i64 0, i64 0
+// CHECK2-NEXT: store ptr [[ARRAYDECAY1]], ptr [[__BEGIN2]], align 8
+// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__RANGE2]], align 8
+// CHECK2-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [12 x double], ptr [[TMP2]], i64 0, i64 0
+// CHECK2-NEXT: store ptr [[ARRAYDECAY2]], ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[__END2]], align 8
+// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK2-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 8
+// CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK2-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP4]] to i64
+// CHECK2-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP5]] to i64
+// CHECK2-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
+// CHECK2-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8
+// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1
+// CHECK2-NEXT: [[ADD:%.*]] = add nsw i64 [[SUB]], 1
+// CHECK2-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD]], 1
+// CHECK2-NEXT: [[SUB5:%.*]] = sub nsw i64 [[DIV]], 1
+// CHECK2-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK2-NEXT: store i64 0, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: br label [[FOR_COND:%.*]]
+// CHECK2: for.cond:
+// CHECK2-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP7]], 1
+// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i64 [[TMP6]], [[ADD6]]
+// CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END18:%.*]]
+// CHECK2: for.body:
+// CHECK2-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: store i64 [[TMP8]], ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: br label [[FOR_COND7:%.*]]
+// CHECK2: for.cond7:
+// CHECK2-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP10]], 1
+// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[ADD9:%.*]] = add nsw i64 [[TMP11]], 5
+// CHECK2-NEXT: [[CMP10:%.*]] = icmp slt i64 [[ADD8]], [[ADD9]]
+// CHECK2-NEXT: br i1 [[CMP10]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
+// CHECK2: cond.true:
+// CHECK2-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8
+// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i64 [[TMP12]], 1
+// CHECK2-NEXT: br label [[COND_END:%.*]]
+// CHECK2: cond.false:
+// CHECK2-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[ADD12:%.*]] = add nsw i64 [[TMP13]], 5
+// CHECK2-NEXT: br label [[COND_END]]
+// CHECK2: cond.end:
+// CHECK2-NEXT: [[COND:%.*]] = phi i64 [ [[ADD11]], [[COND_TRUE]] ], [ [[ADD12]], [[COND_FALSE]] ]
+// CHECK2-NEXT: [[CMP13:%.*]] = icmp slt i64 [[TMP9]], [[COND]]
+// CHECK2-NEXT: br i1 [[CMP13]], label [[FOR_BODY14:%.*]], label [[FOR_END:%.*]]
+// CHECK2: for.body14:
+// CHECK2-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8
+// CHECK2-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP15]], 1
+// CHECK2-NEXT: [[ADD_PTR15:%.*]] = getelementptr inbounds double, ptr [[TMP14]], i64 [[MUL]]
+// CHECK2-NEXT: store ptr [[ADD_PTR15]], ptr [[__BEGIN2]], align 8
+// CHECK2-NEXT: [[TMP16:%.*]] = load ptr, ptr [[__BEGIN2]], align 8
+// CHECK2-NEXT: [[TMP17:%.*]] = load double, ptr [[TMP16]], align 8
+// CHECK2-NEXT: store double [[TMP17]], ptr [[V]], align 8
+// CHECK2-NEXT: [[TMP18:%.*]] = load double, ptr [[V]], align 8
+// CHECK2-NEXT: call void (...) @body(double noundef [[TMP18]])
+// CHECK2-NEXT: br label [[FOR_INC:%.*]]
+// CHECK2: for.inc:
+// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[INC:%.*]] = add nsw i64 [[TMP19]], 1
+// CHECK2-NEXT: store i64 [[INC]], ptr [[DOTTILE_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: br label [[FOR_COND7]], !llvm.loop [[LOOP25:![0-9]+]]
+// CHECK2: for.end:
+// CHECK2-NEXT: br label [[FOR_INC16:%.*]]
+// CHECK2: for.inc16:
+// CHECK2-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP20]], 5
+// CHECK2-NEXT: store i64 [[ADD17]], ptr [[DOTFLOOR_0_IV___BEGIN2]], align 8
+// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
+// CHECK2: for.end18:
+// CHECK2-NEXT: ret void
+//
+//
+// CHECK2-LABEL: define dso_local void @tfoo7(
+// CHECK2-SAME: ) #[[ATTR1]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: call void @_Z4foo7IiTnT_Li3ETnS0_Li5EEvS0_S0_(i32 noundef 0, i32 noundef 42)
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@_Z4foo7IiTnT_Li3ETnS0_Li5EEvS0_S0_
-// CHECK2-SAME: (i32 noundef [[START:%.*]], i32 noundef [[END:%.*]]) #[[ATTR1]] comdat {
+// CHECK2-LABEL: define linkonce_odr void @_Z4foo7IiTnT_Li3ETnS0_Li5EEvS0_S0_(
+// CHECK2-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]]) #[[ATTR1]] comdat {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4
@@ -2211,7 +2629,7 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
// CHECK2-NEXT: [[ADD7:%.*]] = add i32 [[TMP9]], 1
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 5
+// CHECK2-NEXT: [[ADD8:%.*]] = add i32 [[TMP10]], 5
// CHECK2-NEXT: [[CMP9:%.*]] = icmp ult i32 [[ADD7]], [[ADD8]]
// CHECK2-NEXT: br i1 [[CMP9]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK2: cond.true:
@@ -2220,7 +2638,7 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: br label [[COND_END:%.*]]
// CHECK2: cond.false:
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP12]], 5
+// CHECK2-NEXT: [[ADD11:%.*]] = add i32 [[TMP12]], 5
// CHECK2-NEXT: br label [[COND_END]]
// CHECK2: cond.end:
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[ADD10]], [[COND_TRUE]] ], [ [[ADD11]], [[COND_FALSE]] ]
@@ -2237,23 +2655,74 @@ extern "C" void foo8(int a) {
// CHECK2-NEXT: br label [[FOR_INC:%.*]]
// CHECK2: for.inc:
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP16]], 1
+// CHECK2-NEXT: [[INC:%.*]] = add i32 [[TMP16]], 1
// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTTILE_0_IV_I]], align 4
-// CHECK2-NEXT: br label [[FOR_COND6]], !llvm.loop [[LOOP23:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND6]], !llvm.loop [[LOOP27:![0-9]+]]
// CHECK2: for.end:
// CHECK2-NEXT: br label [[FOR_INC15:%.*]]
// CHECK2: for.inc15:
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP17]], 5
+// CHECK2-NEXT: [[ADD16:%.*]] = add i32 [[TMP17]], 5
// CHECK2-NEXT: store i32 [[ADD16]], ptr [[DOTFLOOR_0_IV_I]], align 4
-// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
+// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
// CHECK2: for.end17:
// CHECK2-NEXT: ret void
//
//
-// CHECK2-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_tile_codegen.cpp
-// CHECK2-SAME: () #[[ATTR0]] section ".text.startup" {
+// CHECK2-LABEL: define internal void @_GLOBAL__sub_I_tile_codegen.cpp(
+// CHECK2-SAME: ) #[[ATTR0]] section ".text.startup" {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: call void @__cxx_global_var_init()
// CHECK2-NEXT: ret void
//
+//.
+// CHECK1: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]}
+// CHECK1: [[META4]] = !{!"llvm.loop.mustprogress"}
+// CHECK1: [[LOOP5]] = distinct !{[[LOOP5]], [[META4]]}
+// CHECK1: [[LOOP6]] = distinct !{[[LOOP6]], [[META4]]}
+// CHECK1: [[LOOP7]] = distinct !{[[LOOP7]], [[META4]]}
+// CHECK1: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]}
+// CHECK1: [[LOOP9]] = distinct !{[[LOOP9]], [[META4]]}
+// CHECK1: [[LOOP10]] = distinct !{[[LOOP10]], [[META4]]}
+// CHECK1: [[LOOP11]] = distinct !{[[LOOP11]], [[META4]]}
+// CHECK1: [[LOOP12]] = distinct !{[[LOOP12]], [[META4]]}
+// CHECK1: [[LOOP13]] = distinct !{[[LOOP13]], [[META4]]}
+// CHECK1: [[LOOP14]] = distinct !{[[LOOP14]], [[META4]]}
+// CHECK1: [[LOOP15]] = distinct !{[[LOOP15]], [[META4]]}
+// CHECK1: [[LOOP16]] = distinct !{[[LOOP16]], [[META4]]}
+// CHECK1: [[LOOP17]] = distinct !{[[LOOP17]], [[META4]]}
+// CHECK1: [[LOOP18]] = distinct !{[[LOOP18]], [[META4]]}
+// CHECK1: [[LOOP21]] = distinct !{[[LOOP21]], [[META4]]}
+// CHECK1: [[LOOP22]] = distinct !{[[LOOP22]], [[META4]]}
+// CHECK1: [[LOOP23]] = distinct !{[[LOOP23]], [[META4]]}
+// CHECK1: [[LOOP24]] = distinct !{[[LOOP24]], [[META4]]}
+// CHECK1: [[LOOP25]] = distinct !{[[LOOP25]], [[META4]]}
+// CHECK1: [[LOOP26]] = distinct !{[[LOOP26]], [[META4]]}
+// CHECK1: [[LOOP27]] = distinct !{[[LOOP27]], [[META4]]}
+// CHECK1: [[LOOP28]] = distinct !{[[LOOP28]], [[META4]]}
+//.
+// CHECK2: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]}
+// CHECK2: [[META4]] = !{!"llvm.loop.mustprogress"}
+// CHECK2: [[LOOP5]] = distinct !{[[LOOP5]], [[META4]]}
+// CHECK2: [[LOOP6]] = distinct !{[[LOOP6]], [[META4]]}
+// CHECK2: [[LOOP7]] = distinct !{[[LOOP7]], [[META4]]}
+// CHECK2: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]}
+// CHECK2: [[LOOP9]] = distinct !{[[LOOP9]], [[META4]]}
+// CHECK2: [[LOOP10]] = distinct !{[[LOOP10]], [[META4]]}
+// CHECK2: [[LOOP11]] = distinct !{[[LOOP11]], [[META4]]}
+// CHECK2: [[LOOP12]] = distinct !{[[LOOP12]], [[META4]]}
+// CHECK2: [[LOOP13]] = distinct !{[[LOOP13]], [[META4]]}
+// CHECK2: [[LOOP14]] = distinct !{[[LOOP14]], [[META4]]}
+// CHECK2: [[LOOP15]] = distinct !{[[LOOP15]], [[META4]]}
+// CHECK2: [[LOOP16]] = distinct !{[[LOOP16]], [[META4]]}
+// CHECK2: [[LOOP17]] = distinct !{[[LOOP17]], [[META4]]}
+// CHECK2: [[LOOP18]] = distinct !{[[LOOP18]], [[META4]]}
+// CHECK2: [[LOOP19]] = distinct !{[[LOOP19]], [[META4]]}
+// CHECK2: [[LOOP20]] = distinct !{[[LOOP20]], [[META4]]}
+// CHECK2: [[LOOP23]] = distinct !{[[LOOP23]], [[META4]]}
+// CHECK2: [[LOOP24]] = distinct !{[[LOOP24]], [[META4]]}
+// CHECK2: [[LOOP25]] = distinct !{[[LOOP25]], [[META4]]}
+// CHECK2: [[LOOP26]] = distinct !{[[LOOP26]], [[META4]]}
+// CHECK2: [[LOOP27]] = distinct !{[[LOOP27]], [[META4]]}
+// CHECK2: [[LOOP28]] = distinct !{[[LOOP28]], [[META4]]}
+//.
diff --git a/clang/test/OpenMP/tile_codegen_for_dependent.cpp b/clang/test/OpenMP/tile_codegen_for_dependent.cpp
index 93c51c9165a4..820d33d15287 100644
--- a/clang/test/OpenMP/tile_codegen_for_dependent.cpp
+++ b/clang/test/OpenMP/tile_codegen_for_dependent.cpp
@@ -17,7 +17,7 @@
extern "C" void body(...) {}
-// IR-LABEL: @func(
+// IR-LABEL: define {{.*}}@func(
// IR-NEXT: [[ENTRY:.*]]:
// IR-NEXT: %[[START_ADDR:.+]] = alloca i32, align 4
// IR-NEXT: %[[END_ADDR:.+]] = alloca i32, align 4
@@ -27,18 +27,18 @@ extern "C" void body(...) {}
// IR-NEXT: %[[I:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTCAPTURE_EXPR_:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTCAPTURE_EXPR_1:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTNEW_STEP:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTCAPTURE_EXPR_2:.+]] = alloca i32, align 4
-// IR-NEXT: %[[DOTCAPTURE_EXPR_3:.+]] = alloca i32, align 4
-// IR-NEXT: %[[DOTCAPTURE_EXPR_6:.+]] = alloca i32, align 4
-// IR-NEXT: %[[DOTCAPTURE_EXPR_8:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTCAPTURE_EXPR_5:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTCAPTURE_EXPR_7:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTFLOOR_0_IV_I:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTOMP_LB:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTOMP_UB:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTOMP_STRIDE:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTOMP_IS_LAST:.+]] = alloca i32, align 4
-// IR-NEXT: %[[DOTFLOOR_0_IV_I12:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTFLOOR_0_IV_I11:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTTILE_0_IV_I:.+]] = alloca i32, align 4
-// IR-NEXT: %[[TMP0:.+]] = call i32 @__kmpc_global_thread_num(ptr @2)
+// IR-NEXT: %[[TMP0:.+]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:.+]])
// IR-NEXT: store i32 %[[START:.+]], ptr %[[START_ADDR]], align 4
// IR-NEXT: store i32 %[[END:.+]], ptr %[[END_ADDR]], align 4
// IR-NEXT: store i32 %[[STEP:.+]], ptr %[[STEP_ADDR]], align 4
@@ -49,44 +49,44 @@ extern "C" void body(...) {}
// IR-NEXT: %[[TMP3:.+]] = load i32, ptr %[[END_ADDR]], align 4
// IR-NEXT: store i32 %[[TMP3]], ptr %[[DOTCAPTURE_EXPR_1]], align 4
// IR-NEXT: %[[TMP4:.+]] = load i32, ptr %[[STEP_ADDR]], align 4
-// IR-NEXT: store i32 %[[TMP4]], ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: store i32 %[[TMP4]], ptr %[[DOTNEW_STEP]], align 4
// IR-NEXT: %[[TMP5:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_1]], align 4
// IR-NEXT: %[[TMP6:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_]], align 4
// IR-NEXT: %[[SUB:.+]] = sub i32 %[[TMP5]], %[[TMP6]]
-// IR-NEXT: %[[SUB4:.+]] = sub i32 %[[SUB]], 1
-// IR-NEXT: %[[TMP7:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
-// IR-NEXT: %[[ADD:.+]] = add i32 %[[SUB4]], %[[TMP7]]
-// IR-NEXT: %[[TMP8:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[SUB3:.+]] = sub i32 %[[SUB]], 1
+// IR-NEXT: %[[TMP7:.+]] = load i32, ptr %[[DOTNEW_STEP]], align 4
+// IR-NEXT: %[[ADD:.+]] = add i32 %[[SUB3]], %[[TMP7]]
+// IR-NEXT: %[[TMP8:.+]] = load i32, ptr %[[DOTNEW_STEP]], align 4
// IR-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP8]]
-// IR-NEXT: %[[SUB5:.+]] = sub i32 %[[DIV]], 1
-// IR-NEXT: store i32 %[[SUB5]], ptr %[[DOTCAPTURE_EXPR_3]], align 4
-// IR-NEXT: %[[TMP9:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_3]], align 4
-// IR-NEXT: %[[ADD7:.+]] = add i32 %[[TMP9]], 1
-// IR-NEXT: store i32 %[[ADD7]], ptr %[[DOTCAPTURE_EXPR_6]], align 4
-// IR-NEXT: %[[TMP10:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_6]], align 4
-// IR-NEXT: %[[SUB9:.+]] = sub i32 %[[TMP10]], -3
-// IR-NEXT: %[[DIV10:.+]] = udiv i32 %[[SUB9]], 4
-// IR-NEXT: %[[SUB11:.+]] = sub i32 %[[DIV10]], 1
-// IR-NEXT: store i32 %[[SUB11]], ptr %[[DOTCAPTURE_EXPR_8]], align 4
+// IR-NEXT: %[[SUB4:.+]] = sub i32 %[[DIV]], 1
+// IR-NEXT: store i32 %[[SUB4]], ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[TMP9:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[ADD6:.+]] = add i32 %[[TMP9]], 1
+// IR-NEXT: store i32 %[[ADD6]], ptr %[[DOTCAPTURE_EXPR_5]], align 4
+// IR-NEXT: %[[TMP10:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_5]], align 4
+// IR-NEXT: %[[SUB8:.+]] = sub i32 %[[TMP10]], -3
+// IR-NEXT: %[[DIV9:.+]] = udiv i32 %[[SUB8]], 4
+// IR-NEXT: %[[SUB10:.+]] = sub i32 %[[DIV9]], 1
+// IR-NEXT: store i32 %[[SUB10]], ptr %[[DOTCAPTURE_EXPR_7]], align 4
// IR-NEXT: store i32 0, ptr %[[DOTFLOOR_0_IV_I]], align 4
-// IR-NEXT: %[[TMP11:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_6]], align 4
+// IR-NEXT: %[[TMP11:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_5]], align 4
// IR-NEXT: %[[CMP:.+]] = icmp ult i32 0, %[[TMP11]]
// IR-NEXT: br i1 %[[CMP]], label %[[OMP_PRECOND_THEN:.+]], label %[[OMP_PRECOND_END:.+]]
// IR-EMPTY:
// IR-NEXT: [[OMP_PRECOND_THEN]]:
// IR-NEXT: store i32 0, ptr %[[DOTOMP_LB]], align 4
-// IR-NEXT: %[[TMP12:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_8]], align 4
+// IR-NEXT: %[[TMP12:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_7]], align 4
// IR-NEXT: store i32 %[[TMP12]], ptr %[[DOTOMP_UB]], align 4
// IR-NEXT: store i32 1, ptr %[[DOTOMP_STRIDE]], align 4
// IR-NEXT: store i32 0, ptr %[[DOTOMP_IS_LAST]], align 4
-// IR-NEXT: call void @__kmpc_for_static_init_4u(ptr @1, i32 %[[TMP0]], i32 34, ptr %[[DOTOMP_IS_LAST]], ptr %[[DOTOMP_LB]], ptr %[[DOTOMP_UB]], ptr %[[DOTOMP_STRIDE]], i32 1, i32 1)
+// IR-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1:.+]], i32 %[[TMP0]], i32 34, ptr %[[DOTOMP_IS_LAST]], ptr %[[DOTOMP_LB]], ptr %[[DOTOMP_UB]], ptr %[[DOTOMP_STRIDE]], i32 1, i32 1)
// IR-NEXT: %[[TMP13:.+]] = load i32, ptr %[[DOTOMP_UB]], align 4
-// IR-NEXT: %[[TMP14:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_8]], align 4
-// IR-NEXT: %[[CMP13:.+]] = icmp ugt i32 %[[TMP13]], %[[TMP14]]
-// IR-NEXT: br i1 %[[CMP13]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
+// IR-NEXT: %[[TMP14:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_7]], align 4
+// IR-NEXT: %[[CMP12:.+]] = icmp ugt i32 %[[TMP13]], %[[TMP14]]
+// IR-NEXT: br i1 %[[CMP12]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// IR-EMPTY:
// IR-NEXT: [[COND_TRUE]]:
-// IR-NEXT: %[[TMP15:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_8]], align 4
+// IR-NEXT: %[[TMP15:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_7]], align 4
// IR-NEXT: br label %[[COND_END:.+]]
// IR-EMPTY:
// IR-NEXT: [[COND_FALSE]]:
@@ -103,50 +103,50 @@ extern "C" void body(...) {}
// IR-NEXT: [[OMP_INNER_FOR_COND]]:
// IR-NEXT: %[[TMP18:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
// IR-NEXT: %[[TMP19:.+]] = load i32, ptr %[[DOTOMP_UB]], align 4
-// IR-NEXT: %[[ADD14:.+]] = add i32 %[[TMP19]], 1
-// IR-NEXT: %[[CMP15:.+]] = icmp ult i32 %[[TMP18]], %[[ADD14]]
-// IR-NEXT: br i1 %[[CMP15]], label %[[OMP_INNER_FOR_BODY:.+]], label %[[OMP_INNER_FOR_END:.+]]
+// IR-NEXT: %[[ADD13:.+]] = add i32 %[[TMP19]], 1
+// IR-NEXT: %[[CMP14:.+]] = icmp ult i32 %[[TMP18]], %[[ADD13]]
+// IR-NEXT: br i1 %[[CMP14]], label %[[OMP_INNER_FOR_BODY:.+]], label %[[OMP_INNER_FOR_END:.+]]
// IR-EMPTY:
// IR-NEXT: [[OMP_INNER_FOR_BODY]]:
// IR-NEXT: %[[TMP20:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
// IR-NEXT: %[[MUL:.+]] = mul i32 %[[TMP20]], 4
-// IR-NEXT: %[[ADD16:.+]] = add i32 0, %[[MUL]]
-// IR-NEXT: store i32 %[[ADD16]], ptr %[[DOTFLOOR_0_IV_I12]], align 4
-// IR-NEXT: %[[TMP21:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I12]], align 4
+// IR-NEXT: %[[ADD15:.+]] = add i32 0, %[[MUL]]
+// IR-NEXT: store i32 %[[ADD15]], ptr %[[DOTFLOOR_0_IV_I11]], align 4
+// IR-NEXT: %[[TMP21:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I11]], align 4
// IR-NEXT: store i32 %[[TMP21]], ptr %[[DOTTILE_0_IV_I]], align 4
// IR-NEXT: br label %[[FOR_COND:.+]]
// IR-EMPTY:
// IR-NEXT: [[FOR_COND]]:
// IR-NEXT: %[[TMP22:.+]] = load i32, ptr %[[DOTTILE_0_IV_I]], align 4
-// IR-NEXT: %[[TMP23:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_3]], align 4
-// IR-NEXT: %[[ADD17:.+]] = add i32 %[[TMP23]], 1
-// IR-NEXT: %[[TMP24:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I12]], align 4
-// IR-NEXT: %[[ADD18:.+]] = add nsw i32 %[[TMP24]], 4
-// IR-NEXT: %[[CMP19:.+]] = icmp ult i32 %[[ADD17]], %[[ADD18]]
-// IR-NEXT: br i1 %[[CMP19]], label %[[COND_TRUE20:.+]], label %[[COND_FALSE22:.+]]
-// IR-EMPTY:
-// IR-NEXT: [[COND_TRUE20]]:
-// IR-NEXT: %[[TMP25:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_3]], align 4
-// IR-NEXT: %[[ADD21:.+]] = add i32 %[[TMP25]], 1
-// IR-NEXT: br label %[[COND_END24:.+]]
-// IR-EMPTY:
-// IR-NEXT: [[COND_FALSE22]]:
-// IR-NEXT: %[[TMP26:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I12]], align 4
-// IR-NEXT: %[[ADD23:.+]] = add nsw i32 %[[TMP26]], 4
-// IR-NEXT: br label %[[COND_END24]]
-// IR-EMPTY:
-// IR-NEXT: [[COND_END24]]:
-// IR-NEXT: %[[COND25:.+]] = phi i32 [ %[[ADD21]], %[[COND_TRUE20]] ], [ %[[ADD23]], %[[COND_FALSE22]] ]
-// IR-NEXT: %[[CMP26:.+]] = icmp ult i32 %[[TMP22]], %[[COND25]]
-// IR-NEXT: br i1 %[[CMP26]], label %[[FOR_BODY:.+]], label %[[FOR_END:.+]]
+// IR-NEXT: %[[TMP23:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[ADD16:.+]] = add i32 %[[TMP23]], 1
+// IR-NEXT: %[[TMP24:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I11]], align 4
+// IR-NEXT: %[[ADD17:.+]] = add i32 %[[TMP24]], 4
+// IR-NEXT: %[[CMP18:.+]] = icmp ult i32 %[[ADD16]], %[[ADD17]]
+// IR-NEXT: br i1 %[[CMP18]], label %[[COND_TRUE19:.+]], label %[[COND_FALSE21:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_TRUE19]]:
+// IR-NEXT: %[[TMP25:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[ADD20:.+]] = add i32 %[[TMP25]], 1
+// IR-NEXT: br label %[[COND_END23:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_FALSE21]]:
+// IR-NEXT: %[[TMP26:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I11]], align 4
+// IR-NEXT: %[[ADD22:.+]] = add i32 %[[TMP26]], 4
+// IR-NEXT: br label %[[COND_END23]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_END23]]:
+// IR-NEXT: %[[COND24:.+]] = phi i32 [ %[[ADD20]], %[[COND_TRUE19]] ], [ %[[ADD22]], %[[COND_FALSE21]] ]
+// IR-NEXT: %[[CMP25:.+]] = icmp ult i32 %[[TMP22]], %[[COND24]]
+// IR-NEXT: br i1 %[[CMP25]], label %[[FOR_BODY:.+]], label %[[FOR_END:.+]]
// IR-EMPTY:
// IR-NEXT: [[FOR_BODY]]:
// IR-NEXT: %[[TMP27:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_]], align 4
// IR-NEXT: %[[TMP28:.+]] = load i32, ptr %[[DOTTILE_0_IV_I]], align 4
-// IR-NEXT: %[[TMP29:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
-// IR-NEXT: %[[MUL27:.+]] = mul i32 %[[TMP28]], %[[TMP29]]
-// IR-NEXT: %[[ADD28:.+]] = add i32 %[[TMP27]], %[[MUL27]]
-// IR-NEXT: store i32 %[[ADD28]], ptr %[[I]], align 4
+// IR-NEXT: %[[TMP29:.+]] = load i32, ptr %[[DOTNEW_STEP]], align 4
+// IR-NEXT: %[[MUL26:.+]] = mul i32 %[[TMP28]], %[[TMP29]]
+// IR-NEXT: %[[ADD27:.+]] = add i32 %[[TMP27]], %[[MUL26]]
+// IR-NEXT: store i32 %[[ADD27]], ptr %[[I]], align 4
// IR-NEXT: %[[TMP30:.+]] = load i32, ptr %[[START_ADDR]], align 4
// IR-NEXT: %[[TMP31:.+]] = load i32, ptr %[[END_ADDR]], align 4
// IR-NEXT: %[[TMP32:.+]] = load i32, ptr %[[STEP_ADDR]], align 4
@@ -156,9 +156,9 @@ extern "C" void body(...) {}
// IR-EMPTY:
// IR-NEXT: [[FOR_INC]]:
// IR-NEXT: %[[TMP34:.+]] = load i32, ptr %[[DOTTILE_0_IV_I]], align 4
-// IR-NEXT: %[[INC:.+]] = add nsw i32 %[[TMP34]], 1
+// IR-NEXT: %[[INC:.+]] = add i32 %[[TMP34]], 1
// IR-NEXT: store i32 %[[INC]], ptr %[[DOTTILE_0_IV_I]], align 4
-// IR-NEXT: br label %[[FOR_COND]], !llvm.loop ![[LOOP2:[0-9]+]]
+// IR-NEXT: br label %[[FOR_COND]], !llvm.loop ![[LOOP3:[0-9]+]]
// IR-EMPTY:
// IR-NEXT: [[FOR_END]]:
// IR-NEXT: br label %[[OMP_BODY_CONTINUE:.+]]
@@ -168,19 +168,19 @@ extern "C" void body(...) {}
// IR-EMPTY:
// IR-NEXT: [[OMP_INNER_FOR_INC]]:
// IR-NEXT: %[[TMP35:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
-// IR-NEXT: %[[ADD29:.+]] = add i32 %[[TMP35]], 1
-// IR-NEXT: store i32 %[[ADD29]], ptr %[[DOTOMP_IV]], align 4
+// IR-NEXT: %[[ADD28:.+]] = add i32 %[[TMP35]], 1
+// IR-NEXT: store i32 %[[ADD28]], ptr %[[DOTOMP_IV]], align 4
// IR-NEXT: br label %[[OMP_INNER_FOR_COND]]
// IR-EMPTY:
// IR-NEXT: [[OMP_INNER_FOR_END]]:
// IR-NEXT: br label %[[OMP_LOOP_EXIT:.+]]
// IR-EMPTY:
// IR-NEXT: [[OMP_LOOP_EXIT]]:
-// IR-NEXT: call void @__kmpc_for_static_fini(ptr @1, i32 %[[TMP0]])
+// IR-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 %[[TMP0]])
// IR-NEXT: br label %[[OMP_PRECOND_END]]
// IR-EMPTY:
// IR-NEXT: [[OMP_PRECOND_END]]:
-// IR-NEXT: call void @__kmpc_barrier(ptr @3, i32 %[[TMP0]])
+// IR-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:.+]], i32 %[[TMP0]])
// IR-NEXT: ret void
// IR-NEXT: }
extern "C" void func(int start, int end, int step) {
diff --git a/clang/test/OpenMP/tile_codegen_tile_for.cpp b/clang/test/OpenMP/tile_codegen_tile_for.cpp
index d0fb89398c24..91536c406368 100644
--- a/clang/test/OpenMP/tile_codegen_tile_for.cpp
+++ b/clang/test/OpenMP/tile_codegen_tile_for.cpp
@@ -16,7 +16,7 @@
extern "C" void body(...) {}
-// IR-LABEL: @func(
+// IR-LABEL: define {{.*}}@func(
// IR-NEXT: [[ENTRY:.*]]:
// IR-NEXT: %[[START_ADDR:.+]] = alloca i32, align 4
// IR-NEXT: %[[END_ADDR:.+]] = alloca i32, align 4
@@ -26,22 +26,22 @@ extern "C" void body(...) {}
// IR-NEXT: %[[I:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTCAPTURE_EXPR_:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTCAPTURE_EXPR_1:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTNEW_STEP:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTCAPTURE_EXPR_2:.+]] = alloca i32, align 4
-// IR-NEXT: %[[DOTCAPTURE_EXPR_3:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTFLOOR_0_IV_I:.+]] = alloca i32, align 4
-// IR-NEXT: %[[DOTCAPTURE_EXPR_6:.+]] = alloca i32, align 4
-// IR-NEXT: %[[DOTCAPTURE_EXPR_8:.+]] = alloca i32, align 4
-// IR-NEXT: %[[DOTCAPTURE_EXPR_12:.+]] = alloca i32, align 4
-// IR-NEXT: %[[DOTCAPTURE_EXPR_14:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTCAPTURE_EXPR_5:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTCAPTURE_EXPR_7:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTCAPTURE_EXPR_11:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTCAPTURE_EXPR_13:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTFLOOR_0_IV__FLOOR_0_IV_I:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTOMP_LB:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTOMP_UB:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTOMP_STRIDE:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTOMP_IS_LAST:.+]] = alloca i32, align 4
-// IR-NEXT: %[[DOTFLOOR_0_IV__FLOOR_0_IV_I18:.+]] = alloca i32, align 4
+// IR-NEXT: %[[DOTFLOOR_0_IV__FLOOR_0_IV_I17:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTTILE_0_IV__FLOOR_0_IV_I:.+]] = alloca i32, align 4
// IR-NEXT: %[[DOTTILE_0_IV_I:.+]] = alloca i32, align 4
-// IR-NEXT: %[[TMP0:.+]] = call i32 @__kmpc_global_thread_num(ptr @2)
+// IR-NEXT: %[[TMP0:.+]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:.+]])
// IR-NEXT: store i32 %[[START:.+]], ptr %[[START_ADDR]], align 4
// IR-NEXT: store i32 %[[END:.+]], ptr %[[END_ADDR]], align 4
// IR-NEXT: store i32 %[[STEP:.+]], ptr %[[STEP_ADDR]], align 4
@@ -52,53 +52,53 @@ extern "C" void body(...) {}
// IR-NEXT: %[[TMP3:.+]] = load i32, ptr %[[END_ADDR]], align 4
// IR-NEXT: store i32 %[[TMP3]], ptr %[[DOTCAPTURE_EXPR_1]], align 4
// IR-NEXT: %[[TMP4:.+]] = load i32, ptr %[[STEP_ADDR]], align 4
-// IR-NEXT: store i32 %[[TMP4]], ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: store i32 %[[TMP4]], ptr %[[DOTNEW_STEP]], align 4
// IR-NEXT: %[[TMP5:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_1]], align 4
// IR-NEXT: %[[TMP6:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_]], align 4
// IR-NEXT: %[[SUB:.+]] = sub i32 %[[TMP5]], %[[TMP6]]
-// IR-NEXT: %[[SUB4:.+]] = sub i32 %[[SUB]], 1
-// IR-NEXT: %[[TMP7:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
-// IR-NEXT: %[[ADD:.+]] = add i32 %[[SUB4]], %[[TMP7]]
-// IR-NEXT: %[[TMP8:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[SUB3:.+]] = sub i32 %[[SUB]], 1
+// IR-NEXT: %[[TMP7:.+]] = load i32, ptr %[[DOTNEW_STEP]], align 4
+// IR-NEXT: %[[ADD:.+]] = add i32 %[[SUB3]], %[[TMP7]]
+// IR-NEXT: %[[TMP8:.+]] = load i32, ptr %[[DOTNEW_STEP]], align 4
// IR-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP8]]
-// IR-NEXT: %[[SUB5:.+]] = sub i32 %[[DIV]], 1
-// IR-NEXT: store i32 %[[SUB5]], ptr %[[DOTCAPTURE_EXPR_3]], align 4
+// IR-NEXT: %[[SUB4:.+]] = sub i32 %[[DIV]], 1
+// IR-NEXT: store i32 %[[SUB4]], ptr %[[DOTCAPTURE_EXPR_2]], align 4
// IR-NEXT: store i32 0, ptr %[[DOTFLOOR_0_IV_I]], align 4
-// IR-NEXT: %[[TMP9:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_3]], align 4
-// IR-NEXT: %[[ADD7:.+]] = add i32 %[[TMP9]], 1
-// IR-NEXT: store i32 %[[ADD7]], ptr %[[DOTCAPTURE_EXPR_6]], align 4
-// IR-NEXT: %[[TMP10:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_6]], align 4
-// IR-NEXT: %[[SUB9:.+]] = sub i32 %[[TMP10]], -3
-// IR-NEXT: %[[DIV10:.+]] = udiv i32 %[[SUB9]], 4
-// IR-NEXT: %[[SUB11:.+]] = sub i32 %[[DIV10]], 1
-// IR-NEXT: store i32 %[[SUB11]], ptr %[[DOTCAPTURE_EXPR_8]], align 4
-// IR-NEXT: %[[TMP11:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_8]], align 4
-// IR-NEXT: %[[ADD13:.+]] = add i32 %[[TMP11]], 1
-// IR-NEXT: store i32 %[[ADD13]], ptr %[[DOTCAPTURE_EXPR_12]], align 4
-// IR-NEXT: %[[TMP12:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_12]], align 4
-// IR-NEXT: %[[SUB15:.+]] = sub i32 %[[TMP12]], -2
-// IR-NEXT: %[[DIV16:.+]] = udiv i32 %[[SUB15]], 3
-// IR-NEXT: %[[SUB17:.+]] = sub i32 %[[DIV16]], 1
-// IR-NEXT: store i32 %[[SUB17]], ptr %[[DOTCAPTURE_EXPR_14]], align 4
+// IR-NEXT: %[[TMP9:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[ADD6:.+]] = add i32 %[[TMP9]], 1
+// IR-NEXT: store i32 %[[ADD6]], ptr %[[DOTCAPTURE_EXPR_5]], align 4
+// IR-NEXT: %[[TMP10:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_5]], align 4
+// IR-NEXT: %[[SUB8:.+]] = sub i32 %[[TMP10]], -3
+// IR-NEXT: %[[DIV9:.+]] = udiv i32 %[[SUB8]], 4
+// IR-NEXT: %[[SUB10:.+]] = sub i32 %[[DIV9]], 1
+// IR-NEXT: store i32 %[[SUB10]], ptr %[[DOTCAPTURE_EXPR_7]], align 4
+// IR-NEXT: %[[TMP11:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_7]], align 4
+// IR-NEXT: %[[ADD12:.+]] = add i32 %[[TMP11]], 1
+// IR-NEXT: store i32 %[[ADD12]], ptr %[[DOTCAPTURE_EXPR_11]], align 4
+// IR-NEXT: %[[TMP12:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_11]], align 4
+// IR-NEXT: %[[SUB14:.+]] = sub i32 %[[TMP12]], -2
+// IR-NEXT: %[[DIV15:.+]] = udiv i32 %[[SUB14]], 3
+// IR-NEXT: %[[SUB16:.+]] = sub i32 %[[DIV15]], 1
+// IR-NEXT: store i32 %[[SUB16]], ptr %[[DOTCAPTURE_EXPR_13]], align 4
// IR-NEXT: store i32 0, ptr %[[DOTFLOOR_0_IV__FLOOR_0_IV_I]], align 4
-// IR-NEXT: %[[TMP13:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_12]], align 4
+// IR-NEXT: %[[TMP13:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_11]], align 4
// IR-NEXT: %[[CMP:.+]] = icmp ult i32 0, %[[TMP13]]
// IR-NEXT: br i1 %[[CMP]], label %[[OMP_PRECOND_THEN:.+]], label %[[OMP_PRECOND_END:.+]]
// IR-EMPTY:
// IR-NEXT: [[OMP_PRECOND_THEN]]:
// IR-NEXT: store i32 0, ptr %[[DOTOMP_LB]], align 4
-// IR-NEXT: %[[TMP14:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_14]], align 4
+// IR-NEXT: %[[TMP14:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_13]], align 4
// IR-NEXT: store i32 %[[TMP14]], ptr %[[DOTOMP_UB]], align 4
// IR-NEXT: store i32 1, ptr %[[DOTOMP_STRIDE]], align 4
// IR-NEXT: store i32 0, ptr %[[DOTOMP_IS_LAST]], align 4
-// IR-NEXT: call void @__kmpc_for_static_init_4u(ptr @1, i32 %[[TMP0]], i32 34, ptr %[[DOTOMP_IS_LAST]], ptr %[[DOTOMP_LB]], ptr %[[DOTOMP_UB]], ptr %[[DOTOMP_STRIDE]], i32 1, i32 1)
+// IR-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1:.+]], i32 %[[TMP0]], i32 34, ptr %[[DOTOMP_IS_LAST]], ptr %[[DOTOMP_LB]], ptr %[[DOTOMP_UB]], ptr %[[DOTOMP_STRIDE]], i32 1, i32 1)
// IR-NEXT: %[[TMP15:.+]] = load i32, ptr %[[DOTOMP_UB]], align 4
-// IR-NEXT: %[[TMP16:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_14]], align 4
-// IR-NEXT: %[[CMP19:.+]] = icmp ugt i32 %[[TMP15]], %[[TMP16]]
-// IR-NEXT: br i1 %[[CMP19]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
+// IR-NEXT: %[[TMP16:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_13]], align 4
+// IR-NEXT: %[[CMP18:.+]] = icmp ugt i32 %[[TMP15]], %[[TMP16]]
+// IR-NEXT: br i1 %[[CMP18]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// IR-EMPTY:
// IR-NEXT: [[COND_TRUE]]:
-// IR-NEXT: %[[TMP17:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_14]], align 4
+// IR-NEXT: %[[TMP17:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_13]], align 4
// IR-NEXT: br label %[[COND_END:.+]]
// IR-EMPTY:
// IR-NEXT: [[COND_FALSE]]:
@@ -115,83 +115,83 @@ extern "C" void body(...) {}
// IR-NEXT: [[OMP_INNER_FOR_COND]]:
// IR-NEXT: %[[TMP20:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
// IR-NEXT: %[[TMP21:.+]] = load i32, ptr %[[DOTOMP_UB]], align 4
-// IR-NEXT: %[[ADD20:.+]] = add i32 %[[TMP21]], 1
-// IR-NEXT: %[[CMP21:.+]] = icmp ult i32 %[[TMP20]], %[[ADD20]]
-// IR-NEXT: br i1 %[[CMP21]], label %[[OMP_INNER_FOR_BODY:.+]], label %[[OMP_INNER_FOR_END:.+]]
+// IR-NEXT: %[[ADD19:.+]] = add i32 %[[TMP21]], 1
+// IR-NEXT: %[[CMP20:.+]] = icmp ult i32 %[[TMP20]], %[[ADD19]]
+// IR-NEXT: br i1 %[[CMP20]], label %[[OMP_INNER_FOR_BODY:.+]], label %[[OMP_INNER_FOR_END:.+]]
// IR-EMPTY:
// IR-NEXT: [[OMP_INNER_FOR_BODY]]:
// IR-NEXT: %[[TMP22:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
// IR-NEXT: %[[MUL:.+]] = mul i32 %[[TMP22]], 3
-// IR-NEXT: %[[ADD22:.+]] = add i32 0, %[[MUL]]
-// IR-NEXT: store i32 %[[ADD22]], ptr %[[DOTFLOOR_0_IV__FLOOR_0_IV_I18]], align 4
-// IR-NEXT: %[[TMP23:.+]] = load i32, ptr %[[DOTFLOOR_0_IV__FLOOR_0_IV_I18]], align 4
+// IR-NEXT: %[[ADD21:.+]] = add i32 0, %[[MUL]]
+// IR-NEXT: store i32 %[[ADD21]], ptr %[[DOTFLOOR_0_IV__FLOOR_0_IV_I17]], align 4
+// IR-NEXT: %[[TMP23:.+]] = load i32, ptr %[[DOTFLOOR_0_IV__FLOOR_0_IV_I17]], align 4
// IR-NEXT: store i32 %[[TMP23]], ptr %[[DOTTILE_0_IV__FLOOR_0_IV_I]], align 4
// IR-NEXT: br label %[[FOR_COND:.+]]
// IR-EMPTY:
// IR-NEXT: [[FOR_COND]]:
// IR-NEXT: %[[TMP24:.+]] = load i32, ptr %[[DOTTILE_0_IV__FLOOR_0_IV_I]], align 4
-// IR-NEXT: %[[TMP25:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_8]], align 4
-// IR-NEXT: %[[ADD23:.+]] = add i32 %[[TMP25]], 1
-// IR-NEXT: %[[TMP26:.+]] = load i32, ptr %[[DOTFLOOR_0_IV__FLOOR_0_IV_I18]], align 4
-// IR-NEXT: %[[ADD24:.+]] = add i32 %[[TMP26]], 3
-// IR-NEXT: %[[CMP25:.+]] = icmp ult i32 %[[ADD23]], %[[ADD24]]
-// IR-NEXT: br i1 %[[CMP25]], label %[[COND_TRUE26:.+]], label %[[COND_FALSE28:.+]]
-// IR-EMPTY:
-// IR-NEXT: [[COND_TRUE26]]:
-// IR-NEXT: %[[TMP27:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_8]], align 4
-// IR-NEXT: %[[ADD27:.+]] = add i32 %[[TMP27]], 1
-// IR-NEXT: br label %[[COND_END30:.+]]
-// IR-EMPTY:
-// IR-NEXT: [[COND_FALSE28]]:
-// IR-NEXT: %[[TMP28:.+]] = load i32, ptr %[[DOTFLOOR_0_IV__FLOOR_0_IV_I18]], align 4
-// IR-NEXT: %[[ADD29:.+]] = add i32 %[[TMP28]], 3
-// IR-NEXT: br label %[[COND_END30]]
-// IR-EMPTY:
-// IR-NEXT: [[COND_END30]]:
-// IR-NEXT: %[[COND31:.+]] = phi i32 [ %[[ADD27]], %[[COND_TRUE26]] ], [ %[[ADD29]], %[[COND_FALSE28]] ]
-// IR-NEXT: %[[CMP32:.+]] = icmp ult i32 %[[TMP24]], %[[COND31]]
-// IR-NEXT: br i1 %[[CMP32]], label %[[FOR_BODY:.+]], label %[[FOR_END51:.+]]
+// IR-NEXT: %[[TMP25:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_7]], align 4
+// IR-NEXT: %[[ADD22:.+]] = add i32 %[[TMP25]], 1
+// IR-NEXT: %[[TMP26:.+]] = load i32, ptr %[[DOTFLOOR_0_IV__FLOOR_0_IV_I17]], align 4
+// IR-NEXT: %[[ADD23:.+]] = add i32 %[[TMP26]], 3
+// IR-NEXT: %[[CMP24:.+]] = icmp ult i32 %[[ADD22]], %[[ADD23]]
+// IR-NEXT: br i1 %[[CMP24]], label %[[COND_TRUE25:.+]], label %[[COND_FALSE27:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_TRUE25]]:
+// IR-NEXT: %[[TMP27:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_7]], align 4
+// IR-NEXT: %[[ADD26:.+]] = add i32 %[[TMP27]], 1
+// IR-NEXT: br label %[[COND_END29:.+]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_FALSE27]]:
+// IR-NEXT: %[[TMP28:.+]] = load i32, ptr %[[DOTFLOOR_0_IV__FLOOR_0_IV_I17]], align 4
+// IR-NEXT: %[[ADD28:.+]] = add i32 %[[TMP28]], 3
+// IR-NEXT: br label %[[COND_END29]]
+// IR-EMPTY:
+// IR-NEXT: [[COND_END29]]:
+// IR-NEXT: %[[COND30:.+]] = phi i32 [ %[[ADD26]], %[[COND_TRUE25]] ], [ %[[ADD28]], %[[COND_FALSE27]] ]
+// IR-NEXT: %[[CMP31:.+]] = icmp ult i32 %[[TMP24]], %[[COND30]]
+// IR-NEXT: br i1 %[[CMP31]], label %[[FOR_BODY:.+]], label %[[FOR_END50:.+]]
// IR-EMPTY:
// IR-NEXT: [[FOR_BODY]]:
// IR-NEXT: %[[TMP29:.+]] = load i32, ptr %[[DOTTILE_0_IV__FLOOR_0_IV_I]], align 4
-// IR-NEXT: %[[MUL33:.+]] = mul i32 %[[TMP29]], 4
-// IR-NEXT: %[[ADD34:.+]] = add i32 0, %[[MUL33]]
-// IR-NEXT: store i32 %[[ADD34]], ptr %[[DOTFLOOR_0_IV_I]], align 4
+// IR-NEXT: %[[MUL32:.+]] = mul i32 %[[TMP29]], 4
+// IR-NEXT: %[[ADD33:.+]] = add i32 0, %[[MUL32]]
+// IR-NEXT: store i32 %[[ADD33]], ptr %[[DOTFLOOR_0_IV_I]], align 4
// IR-NEXT: %[[TMP30:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I]], align 4
// IR-NEXT: store i32 %[[TMP30]], ptr %[[DOTTILE_0_IV_I]], align 4
-// IR-NEXT: br label %[[FOR_COND35:.+]]
+// IR-NEXT: br label %[[FOR_COND34:.+]]
// IR-EMPTY:
-// IR-NEXT: [[FOR_COND35]]:
+// IR-NEXT: [[FOR_COND34]]:
// IR-NEXT: %[[TMP31:.+]] = load i32, ptr %[[DOTTILE_0_IV_I]], align 4
-// IR-NEXT: %[[TMP32:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_3]], align 4
-// IR-NEXT: %[[ADD36:.+]] = add i32 %[[TMP32]], 1
+// IR-NEXT: %[[TMP32:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[ADD35:.+]] = add i32 %[[TMP32]], 1
// IR-NEXT: %[[TMP33:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I]], align 4
-// IR-NEXT: %[[ADD37:.+]] = add nsw i32 %[[TMP33]], 4
-// IR-NEXT: %[[CMP38:.+]] = icmp ult i32 %[[ADD36]], %[[ADD37]]
-// IR-NEXT: br i1 %[[CMP38]], label %[[COND_TRUE39:.+]], label %[[COND_FALSE41:.+]]
+// IR-NEXT: %[[ADD36:.+]] = add i32 %[[TMP33]], 4
+// IR-NEXT: %[[CMP37:.+]] = icmp ult i32 %[[ADD35]], %[[ADD36]]
+// IR-NEXT: br i1 %[[CMP37]], label %[[COND_TRUE38:.+]], label %[[COND_FALSE40:.+]]
// IR-EMPTY:
-// IR-NEXT: [[COND_TRUE39]]:
-// IR-NEXT: %[[TMP34:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_3]], align 4
-// IR-NEXT: %[[ADD40:.+]] = add i32 %[[TMP34]], 1
-// IR-NEXT: br label %[[COND_END43:.+]]
+// IR-NEXT: [[COND_TRUE38]]:
+// IR-NEXT: %[[TMP34:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
+// IR-NEXT: %[[ADD39:.+]] = add i32 %[[TMP34]], 1
+// IR-NEXT: br label %[[COND_END42:.+]]
// IR-EMPTY:
-// IR-NEXT: [[COND_FALSE41]]:
+// IR-NEXT: [[COND_FALSE40]]:
// IR-NEXT: %[[TMP35:.+]] = load i32, ptr %[[DOTFLOOR_0_IV_I]], align 4
-// IR-NEXT: %[[ADD42:.+]] = add nsw i32 %[[TMP35]], 4
-// IR-NEXT: br label %[[COND_END43]]
+// IR-NEXT: %[[ADD41:.+]] = add i32 %[[TMP35]], 4
+// IR-NEXT: br label %[[COND_END42]]
// IR-EMPTY:
-// IR-NEXT: [[COND_END43]]:
-// IR-NEXT: %[[COND44:.+]] = phi i32 [ %[[ADD40]], %[[COND_TRUE39]] ], [ %[[ADD42]], %[[COND_FALSE41]] ]
-// IR-NEXT: %[[CMP45:.+]] = icmp ult i32 %[[TMP31]], %[[COND44]]
-// IR-NEXT: br i1 %[[CMP45]], label %[[FOR_BODY46:.+]], label %[[FOR_END:.+]]
+// IR-NEXT: [[COND_END42]]:
+// IR-NEXT: %[[COND43:.+]] = phi i32 [ %[[ADD39]], %[[COND_TRUE38]] ], [ %[[ADD41]], %[[COND_FALSE40]] ]
+// IR-NEXT: %[[CMP44:.+]] = icmp ult i32 %[[TMP31]], %[[COND43]]
+// IR-NEXT: br i1 %[[CMP44]], label %[[FOR_BODY45:.+]], label %[[FOR_END:.+]]
// IR-EMPTY:
-// IR-NEXT: [[FOR_BODY46]]:
+// IR-NEXT: [[FOR_BODY45]]:
// IR-NEXT: %[[TMP36:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_]], align 4
// IR-NEXT: %[[TMP37:.+]] = load i32, ptr %[[DOTTILE_0_IV_I]], align 4
-// IR-NEXT: %[[TMP38:.+]] = load i32, ptr %[[DOTCAPTURE_EXPR_2]], align 4
-// IR-NEXT: %[[MUL47:.+]] = mul i32 %[[TMP37]], %[[TMP38]]
-// IR-NEXT: %[[ADD48:.+]] = add i32 %[[TMP36]], %[[MUL47]]
-// IR-NEXT: store i32 %[[ADD48]], ptr %[[I]], align 4
+// IR-NEXT: %[[TMP38:.+]] = load i32, ptr %[[DOTNEW_STEP]], align 4
+// IR-NEXT: %[[MUL46:.+]] = mul i32 %[[TMP37]], %[[TMP38]]
+// IR-NEXT: %[[ADD47:.+]] = add i32 %[[TMP36]], %[[MUL46]]
+// IR-NEXT: store i32 %[[ADD47]], ptr %[[I]], align 4
// IR-NEXT: %[[TMP39:.+]] = load i32, ptr %[[START_ADDR]], align 4
// IR-NEXT: %[[TMP40:.+]] = load i32, ptr %[[END_ADDR]], align 4
// IR-NEXT: %[[TMP41:.+]] = load i32, ptr %[[STEP_ADDR]], align 4
@@ -201,20 +201,20 @@ extern "C" void body(...) {}
// IR-EMPTY:
// IR-NEXT: [[FOR_INC]]:
// IR-NEXT: %[[TMP43:.+]] = load i32, ptr %[[DOTTILE_0_IV_I]], align 4
-// IR-NEXT: %[[INC:.+]] = add nsw i32 %[[TMP43]], 1
+// IR-NEXT: %[[INC:.+]] = add i32 %[[TMP43]], 1
// IR-NEXT: store i32 %[[INC]], ptr %[[DOTTILE_0_IV_I]], align 4
-// IR-NEXT: br label %[[FOR_COND35]], !llvm.loop ![[LOOP2:[0-9]+]]
+// IR-NEXT: br label %[[FOR_COND34]], !llvm.loop ![[LOOP3:[0-9]+]]
// IR-EMPTY:
// IR-NEXT: [[FOR_END]]:
-// IR-NEXT: br label %[[FOR_INC49:.+]]
+// IR-NEXT: br label %[[FOR_INC48:.+]]
// IR-EMPTY:
-// IR-NEXT: [[FOR_INC49]]:
+// IR-NEXT: [[FOR_INC48]]:
// IR-NEXT: %[[TMP44:.+]] = load i32, ptr %[[DOTTILE_0_IV__FLOOR_0_IV_I]], align 4
-// IR-NEXT: %[[INC50:.+]] = add i32 %[[TMP44]], 1
-// IR-NEXT: store i32 %[[INC50]], ptr %[[DOTTILE_0_IV__FLOOR_0_IV_I]], align 4
-// IR-NEXT: br label %[[FOR_COND]], !llvm.loop ![[LOOP4:[0-9]+]]
+// IR-NEXT: %[[INC49:.+]] = add i32 %[[TMP44]], 1
+// IR-NEXT: store i32 %[[INC49]], ptr %[[DOTTILE_0_IV__FLOOR_0_IV_I]], align 4
+// IR-NEXT: br label %[[FOR_COND]], !llvm.loop ![[LOOP5:[0-9]+]]
// IR-EMPTY:
-// IR-NEXT: [[FOR_END51]]:
+// IR-NEXT: [[FOR_END50]]:
// IR-NEXT: br label %[[OMP_BODY_CONTINUE:.+]]
// IR-EMPTY:
// IR-NEXT: [[OMP_BODY_CONTINUE]]:
@@ -222,21 +222,23 @@ extern "C" void body(...) {}
// IR-EMPTY:
// IR-NEXT: [[OMP_INNER_FOR_INC]]:
// IR-NEXT: %[[TMP45:.+]] = load i32, ptr %[[DOTOMP_IV]], align 4
-// IR-NEXT: %[[ADD52:.+]] = add i32 %[[TMP45]], 1
-// IR-NEXT: store i32 %[[ADD52]], ptr %[[DOTOMP_IV]], align 4
+// IR-NEXT: %[[ADD51:.+]] = add i32 %[[TMP45]], 1
+// IR-NEXT: store i32 %[[ADD51]], ptr %[[DOTOMP_IV]], align 4
// IR-NEXT: br label %[[OMP_INNER_FOR_COND]]
// IR-EMPTY:
// IR-NEXT: [[OMP_INNER_FOR_END]]:
// IR-NEXT: br label %[[OMP_LOOP_EXIT:.+]]
// IR-EMPTY:
// IR-NEXT: [[OMP_LOOP_EXIT]]:
-// IR-NEXT: call void @__kmpc_for_static_fini(ptr @1, i32 %[[TMP0]])
+// IR-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 %[[TMP0]])
// IR-NEXT: br label %[[OMP_PRECOND_END]]
// IR-EMPTY:
// IR-NEXT: [[OMP_PRECOND_END]]:
-// IR-NEXT: call void @__kmpc_barrier(ptr @3, i32 %[[TMP0]])
+// IR-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:.+]], i32 %[[TMP0]])
// IR-NEXT: ret void
// IR-NEXT: }
+
+
extern "C" void func(int start, int end, int step) {
#pragma omp for
#pragma omp tile sizes(3)
@@ -246,8 +248,10 @@ extern "C" void func(int start, int end, int step) {
}
#endif /* HEADER */
+
// IR: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
-// IR: ![[META1:[0-9]+]] = !{!"{{[^"]*}}"}
-// IR: ![[LOOP2]] = distinct !{![[LOOP2]], ![[LOOPPROP3:[0-9]+]]}
-// IR: ![[LOOPPROP3]] = !{!"llvm.loop.mustprogress"}
-// IR: ![[LOOP4]] = distinct !{![[LOOP4]], ![[LOOPPROP3]]}
+// IR: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
+// IR: ![[META2:[0-9]+]] =
+// IR: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]]}
+// IR: ![[LOOPPROP4]] = !{!"llvm.loop.mustprogress"}
+// IR: ![[LOOP5]] = distinct !{![[LOOP5]], ![[LOOPPROP4]]}
diff --git a/clang/test/PCH/cxx1z-aligned-alloc.cpp b/clang/test/PCH/cxx1z-aligned-alloc.cpp
index c1becbde3bf2..cccd62859784 100644
--- a/clang/test/PCH/cxx1z-aligned-alloc.cpp
+++ b/clang/test/PCH/cxx1z-aligned-alloc.cpp
@@ -1,12 +1,12 @@
// No PCH:
-// RUN: %clang_cc1 -pedantic -fsized-deallocation -std=c++1z -include %s -verify %s
+// RUN: %clang_cc1 -pedantic -std=c++1z -include %s -verify %s
//
// With PCH:
-// RUN: %clang_cc1 -pedantic -fsized-deallocation -std=c++1z -emit-pch %s -o %t
-// RUN: %clang_cc1 -pedantic -fsized-deallocation -std=c++1z -include-pch %t -verify %s
+// RUN: %clang_cc1 -pedantic -std=c++1z -emit-pch %s -o %t
+// RUN: %clang_cc1 -pedantic -std=c++1z -include-pch %t -verify %s
-// RUN: %clang_cc1 -pedantic -fsized-deallocation -std=c++1z -emit-pch -fpch-instantiate-templates %s -o %t
-// RUN: %clang_cc1 -pedantic -fsized-deallocation -std=c++1z -include-pch %t -verify %s
+// RUN: %clang_cc1 -pedantic -std=c++1z -emit-pch -fpch-instantiate-templates %s -o %t
+// RUN: %clang_cc1 -pedantic -std=c++1z -include-pch %t -verify %s
// expected-no-diagnostics
diff --git a/clang/test/PCH/pack_indexing.cpp b/clang/test/PCH/pack_indexing.cpp
index cf8124617b3c..1c4dac0fd9a3 100644
--- a/clang/test/PCH/pack_indexing.cpp
+++ b/clang/test/PCH/pack_indexing.cpp
@@ -10,7 +10,11 @@ using Type = U...[I];
template <int I, auto...V>
constexpr auto Var = V...[I];
+template <int I, auto...V>
+decltype(V...[I]) foo() { return V...[I]; }
+
void fn1() {
using A = Type<1, int, long, double>;
constexpr auto V = Var<2, 0, 1, 42>;
+ foo<2, 0, 1, 42>();
}
diff --git a/clang/test/Parser/MicrosoftExtensions.cpp b/clang/test/Parser/MicrosoftExtensions.cpp
index 6bf802a29ace..9102bca8f6bb 100644
--- a/clang/test/Parser/MicrosoftExtensions.cpp
+++ b/clang/test/Parser/MicrosoftExtensions.cpp
@@ -426,7 +426,7 @@ bool f(int);
template <typename T>
struct A {
constexpr A(T t) {
- __assume(f(t)); // expected-warning{{the argument to '__assume' has side effects that will be discarded}}
+ __assume(f(t)); // expected-warning{{assumption is ignored because it contains (potential) side-effects}}
}
constexpr bool g() { return false; }
};
diff --git a/clang/test/Parser/altivec.c b/clang/test/Parser/altivec.c
index 445369f0dc06..9291b9b69160 100644
--- a/clang/test/Parser/altivec.c
+++ b/clang/test/Parser/altivec.c
@@ -56,40 +56,40 @@ void f_a2(int b, vector int a);
vector int v = (vector int)(-1);
// These should have errors on AIX and warnings otherwise.
-__vector long vv_l; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector long vv_l; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector signed long vv_sl; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector signed long vv_sl; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector unsigned long vv_ul; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector unsigned long vv_ul; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector long int vv_li; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector long int vv_li; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector signed long int vv_sli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector signed long int vv_sli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector unsigned long int vv_uli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector unsigned long int vv_uli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector long v_l; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector long v_l; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector signed long v_sl; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector signed long v_sl; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector unsigned long v_ul; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector unsigned long v_ul; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector long int v_li; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector long int v_li; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector signed long int v_sli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector signed long int v_sli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector unsigned long int v_uli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector unsigned long int v_uli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
diff --git a/clang/test/Parser/attr-availability.c b/clang/test/Parser/attr-availability.c
index aab0f2f3a852..9d84d9c1df36 100644
--- a/clang/test/Parser/attr-availability.c
+++ b/clang/test/Parser/attr-availability.c
@@ -30,6 +30,8 @@ void f11(void) __attribute__((availability(macosx,message=u"b"))); // expected-w
void f12(void) __attribute__((availability(macosx,message="a" u"b"))); // expected-warning {{encoding prefix 'u' on an unevaluated string literal has no effect}}
+void f13(void) __attribute__((availability(shadermodel, introduced = 6.0, environment=pixel))); // expected-error {{unexpected parameter 'environment' in availability attribute, not permitted in C/C++}}
+
enum E{
gorf __attribute__((availability(macosx,introduced=8.5, message = 10.0))), // expected-error {{expected string literal for optional message in 'availability' attribute}}
garf __attribute__((availability(macosx,introduced=8.5, message))), // expected-error {{expected '=' after 'message'}}
diff --git a/clang/test/Parser/cxx-altivec.cpp b/clang/test/Parser/cxx-altivec.cpp
index 5cb760dababb..15a6bf6d1be8 100644
--- a/clang/test/Parser/cxx-altivec.cpp
+++ b/clang/test/Parser/cxx-altivec.cpp
@@ -59,40 +59,40 @@ void f_a2(int b, vector int a);
vector int v = (vector int)(-1);
// These should have errors on AIX and warnings otherwise.
-__vector long vv_l; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector long vv_l; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector signed long vv_sl; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector signed long vv_sl; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector unsigned long vv_ul; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector unsigned long vv_ul; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector long int vv_li; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector long int vv_li; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector signed long int vv_sli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector signed long int vv_sli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector unsigned long int vv_uli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector unsigned long int vv_uli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector long v_l; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector long v_l; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector signed long v_sl; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector signed long v_sl; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector unsigned long v_ul; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector unsigned long v_ul; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector long int v_li; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector long int v_li; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector signed long int v_sli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector signed long int v_sli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector unsigned long int v_uli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector unsigned long int v_uli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
diff --git a/clang/test/Parser/lax-conv.cpp b/clang/test/Parser/lax-conv.cpp
index f784e3fa74e7..0cb2503a9691 100644
--- a/clang/test/Parser/lax-conv.cpp
+++ b/clang/test/Parser/lax-conv.cpp
@@ -21,10 +21,10 @@ template <typename VEC> VEC __attribute__((noinline)) test(vector unsigned char
return (VEC)(a * b);
}
vector unsigned int test1(vector unsigned char RetImplicitConv) {
- return RetImplicitConv; // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return RetImplicitConv; // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
vector unsigned int test2(vector unsigned char RetImplicitConvAddConst) {
- return RetImplicitConvAddConst + 5; // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return RetImplicitConvAddConst + 5; // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
vector unsigned int test3(vector unsigned char RetExplicitConv) {
return (vector unsigned int)RetExplicitConv;
@@ -34,7 +34,7 @@ vector unsigned int test4(vector unsigned char RetExplicitConvAddConst) {
}
vector unsigned int test5(vector unsigned char RetImplicitConvAddSame1,
vector unsigned char RetImplicitConvAddSame2) {
- return RetImplicitConvAddSame1 + RetImplicitConvAddSame2; // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return RetImplicitConvAddSame1 + RetImplicitConvAddSame2; // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
vector unsigned int test6(vector unsigned char RetExplicitConvAddSame1,
vector unsigned char RetExplicitConvAddSame2) {
@@ -54,10 +54,10 @@ vector unsigned long long test9(vector unsigned char a, vector unsigned char b)
return test<vector unsigned long long>(a, b);
}
void test1a(vector unsigned char ArgImplicitConv) {
- return dummy(ArgImplicitConv); // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return dummy(ArgImplicitConv); // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
void test2a(vector unsigned char ArgImplicitConvAddConst) {
- return dummy(ArgImplicitConvAddConst + 5); // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return dummy(ArgImplicitConvAddConst + 5); // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
void test3a(vector unsigned char ArgExplicitConv) {
return dummy((vector unsigned int)ArgExplicitConv);
@@ -67,7 +67,7 @@ void test4a(vector unsigned char ArgExplicitConvAddConst) {
}
void test5a(vector unsigned char ArgImplicitConvAddSame1,
vector unsigned char ArgImplicitConvAddSame2) {
- return dummy(ArgImplicitConvAddSame1 + ArgImplicitConvAddSame2); // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return dummy(ArgImplicitConvAddSame1 + ArgImplicitConvAddSame2); // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
void test6a(vector unsigned char ArgExplicitConvAddSame1,
vector unsigned char ArgExplicitConvAddSame2) {
@@ -80,33 +80,33 @@ void test7a(vector unsigned char ArgExplicitConvAddSame1Full,
ArgExplicitConvAddSame2Full));
}
void test_bool_compat(void) {
- vbs = vss; // expected-warning {{Implicit conversion between vector types (''__vector short' (vector of 8 'short' values)' and ''__vector __bool unsigned short' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vbs = vus; // expected-warning {{Implicit conversion between vector types (''__vector unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __bool unsigned short' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vbs = vss; // expected-warning {{implicit conversion between vector types (''__vector short' (vector of 8 'short' values)' and ''__vector __bool unsigned short' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vbs = vus; // expected-warning {{implicit conversion between vector types (''__vector unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __bool unsigned short' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vbi = vsi; // expected-warning {{Implicit conversion between vector types (''__vector int' (vector of 4 'int' values)' and ''__vector __bool unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vbi = vui; // expected-warning {{Implicit conversion between vector types (''__vector unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __bool unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vbi = vsi; // expected-warning {{implicit conversion between vector types (''__vector int' (vector of 4 'int' values)' and ''__vector __bool unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vbi = vui; // expected-warning {{implicit conversion between vector types (''__vector unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __bool unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vbl = vsl; // expected-warning {{Implicit conversion between vector types (''__vector long long' (vector of 2 'long long' values)' and ''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vbl = vul; // expected-warning {{Implicit conversion between vector types (''__vector unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vbl = vsl; // expected-warning {{implicit conversion between vector types (''__vector long long' (vector of 2 'long long' values)' and ''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vbl = vul; // expected-warning {{implicit conversion between vector types (''__vector unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vbc = vsc; // expected-warning {{Implicit conversion between vector types (''__vector signed char' (vector of 16 'signed char' values)' and ''__vector __bool unsigned char' (vector of 16 'unsigned char' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vbc = vuc; // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __bool unsigned char' (vector of 16 'unsigned char' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vbc = vsc; // expected-warning {{implicit conversion between vector types (''__vector signed char' (vector of 16 'signed char' values)' and ''__vector __bool unsigned char' (vector of 16 'unsigned char' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vbc = vuc; // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __bool unsigned char' (vector of 16 'unsigned char' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
void test_pixel_compat(void) {
- vp = vbs; // expected-warning {{Implicit conversion between vector types (''__vector __bool unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vss; // expected-warning {{Implicit conversion between vector types (''__vector short' (vector of 8 'short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vus; // expected-warning {{Implicit conversion between vector types (''__vector unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vp = vbs; // expected-warning {{implicit conversion between vector types (''__vector __bool unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vss; // expected-warning {{implicit conversion between vector types (''__vector short' (vector of 8 'short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vus; // expected-warning {{implicit conversion between vector types (''__vector unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vp = vbi; // expected-warning {{Implicit conversion between vector types (''__vector __bool unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vsi; // expected-warning {{Implicit conversion between vector types (''__vector int' (vector of 4 'int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vui; // expected-warning {{Implicit conversion between vector types (''__vector unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vp = vbi; // expected-warning {{implicit conversion between vector types (''__vector __bool unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vsi; // expected-warning {{implicit conversion between vector types (''__vector int' (vector of 4 'int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vui; // expected-warning {{implicit conversion between vector types (''__vector unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vp = vbl; // expected-warning {{Implicit conversion between vector types (''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vsl; // expected-warning {{Implicit conversion between vector types (''__vector long long' (vector of 2 'long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vul; // expected-warning {{Implicit conversion between vector types (''__vector unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vp = vbl; // expected-warning {{implicit conversion between vector types (''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vsl; // expected-warning {{implicit conversion between vector types (''__vector long long' (vector of 2 'long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vul; // expected-warning {{implicit conversion between vector types (''__vector unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vp = vbc; // expected-warning {{Implicit conversion between vector types (''__vector __bool unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vsc; // expected-warning {{Implicit conversion between vector types (''__vector signed char' (vector of 16 'signed char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vuc; // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vp = vbc; // expected-warning {{implicit conversion between vector types (''__vector __bool unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vsc; // expected-warning {{implicit conversion between vector types (''__vector signed char' (vector of 16 'signed char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vuc; // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
diff --git a/clang/test/Parser/objcbridge-related-attribute.m b/clang/test/Parser/objcbridge-related-attribute.m
index 246afeef5198..e76d5e388141 100644
--- a/clang/test/Parser/objcbridge-related-attribute.m
+++ b/clang/test/Parser/objcbridge-related-attribute.m
@@ -5,10 +5,10 @@ typedef struct __attribute__((objc_bridge_related(NSColor,,CGColor))) CGColor *C
typedef struct __attribute__((objc_bridge_related(NSColor,,))) CGColor *CGColorRef2Ok;
typedef struct __attribute__((objc_bridge_related(NSColor,colorWithCGColor:,))) CGColor *CGColorRef3Ok;
-typedef struct __attribute__((objc_bridge_related(,colorWithCGColor:,CGColor))) CGColor *CGColorRef1NotOk; // expected-error {{expected a related ObjectiveC class name, e.g., 'NSColor'}}
+typedef struct __attribute__((objc_bridge_related(,colorWithCGColor:,CGColor))) CGColor *CGColorRef1NotOk; // expected-error {{expected a related Objective-C class name, e.g., 'NSColor'}}
typedef struct __attribute__((objc_bridge_related(NSColor,colorWithCGColor,CGColor))) CGColor *CGColorRef2NotOk; // expected-error {{expected a class method selector with single argument, e.g., 'colorWithCGColor:'}}
typedef struct __attribute__((objc_bridge_related(NSColor,colorWithCGColor::,CGColor))) CGColor *CGColorRef3NotOk; // expected-error {{expected a class method selector with single argument, e.g., 'colorWithCGColor:'}}
-typedef struct __attribute__((objc_bridge_related(12,colorWithCGColor:,CGColor))) CGColor *CGColorRef4NotOk; // expected-error {{expected a related ObjectiveC class name, e.g., 'NSColor'}}
+typedef struct __attribute__((objc_bridge_related(12,colorWithCGColor:,CGColor))) CGColor *CGColorRef4NotOk; // expected-error {{expected a related Objective-C class name, e.g., 'NSColor'}}
typedef struct __attribute__((objc_bridge_related(NSColor,+:,CGColor))) CGColor *CGColorRef5NotOk; // expected-error {{expected ','}}
typedef struct __attribute__((objc_bridge_related(NSColor,colorWithCGColor:,+))) CGColor *CGColorRef6NotOk; // expected-error {{expected ')'}}
diff --git a/clang/test/Parser/pragma-attribute.cpp b/clang/test/Parser/pragma-attribute.cpp
index bc8e7b9e78c6..6377fc754352 100644
--- a/clang/test/Parser/pragma-attribute.cpp
+++ b/clang/test/Parser/pragma-attribute.cpp
@@ -127,7 +127,7 @@ void function();
// expected-error@-1 {{attribute 'objc_bridge_related' can't be applied to 'function'}}
#pragma clang attribute pop
-#pragma clang attribute push (__attribute__((objc_bridge_related(1))), apply_to=function) // expected-error {{expected a related ObjectiveC class name, e.g., 'NSColor'}}
+#pragma clang attribute push (__attribute__((objc_bridge_related(1))), apply_to=function) // expected-error {{expected a related Objective-C class name, e.g., 'NSColor'}}
#pragma clang attribute push (__attribute__((used)), apply_to=function) // expected-error {{attribute 'used' is not supported by '#pragma clang attribute'}}
diff --git a/clang/test/ParserOpenACC/parse-clauses.c b/clang/test/ParserOpenACC/parse-clauses.c
index 694f28b86ec9..49e749feb2ec 100644
--- a/clang/test/ParserOpenACC/parse-clauses.c
+++ b/clang/test/ParserOpenACC/parse-clauses.c
@@ -831,52 +831,38 @@ void ReductionClauseParsing() {
// expected-error@+1{{expected '('}}
#pragma acc serial reduction
for(;;){}
- // expected-error@+3{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
- // expected-error@+2{{expected expression}}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
+ // expected-error@+2{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
+ // expected-error@+1{{expected expression}}
#pragma acc serial reduction()
for(;;){}
- // expected-error@+2{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
+ // expected-error@+1{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
#pragma acc serial reduction(Begin)
for(;;){}
- // expected-error@+2{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
+ // expected-error@+1{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
#pragma acc serial reduction(Begin, End)
for(;;){}
- // expected-error@+2{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
+ // expected-error@+1{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
#pragma acc serial reduction(Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(+:Begin)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(+:Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(*: Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(max : Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(min: Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(&: Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(|: Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(^: Begin, End)
for(;;){}
- // expected-warning@+2{{OpenACC clause 'seq' not yet implemented, clause ignored}}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
+ // expected-warning@+1{{OpenACC clause 'seq' not yet implemented, clause ignored}}
#pragma acc serial seq, reduction(&&: Begin, End)
for(;;){}
- // expected-warning@+2{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
// expected-warning@+1{{OpenACC clause 'seq' not yet implemented, clause ignored}}
#pragma acc serial reduction(||: Begin, End), seq
for(;;){}
diff --git a/clang/test/Preprocessor/predefined-arch-macros.c b/clang/test/Preprocessor/predefined-arch-macros.c
index ca51f2fc22c5..f0a2ef851287 100644
--- a/clang/test/Preprocessor/predefined-arch-macros.c
+++ b/clang/test/Preprocessor/predefined-arch-macros.c
@@ -793,9 +793,7 @@
// CHECK_KNL_M32: #define __AES__ 1
// CHECK_KNL_M32: #define __AVX2__ 1
// CHECK_KNL_M32: #define __AVX512CD__ 1
-// CHECK_KNL_M32: #define __AVX512ER__ 1
// CHECK_KNL_M32: #define __AVX512F__ 1
-// CHECK_KNL_M32: #define __AVX512PF__ 1
// CHECK_KNL_M32: #define __AVX__ 1
// CHECK_KNL_M32: #define __BMI2__ 1
// CHECK_KNL_M32: #define __BMI__ 1
@@ -808,7 +806,6 @@
// CHECK_KNL_M32: #define __MOVBE__ 1
// CHECK_KNL_M32: #define __PCLMUL__ 1
// CHECK_KNL_M32: #define __POPCNT__ 1
-// CHECK_KNL_M32: #define __PREFETCHWT1__ 1
// CHECK_KNL_M32: #define __PRFCHW__ 1
// CHECK_KNL_M32: #define __RDRND__ 1
// CHECK_KNL_M32: #define __SSE2__ 1
@@ -832,9 +829,7 @@
// CHECK_KNL_M64: #define __AES__ 1
// CHECK_KNL_M64: #define __AVX2__ 1
// CHECK_KNL_M64: #define __AVX512CD__ 1
-// CHECK_KNL_M64: #define __AVX512ER__ 1
// CHECK_KNL_M64: #define __AVX512F__ 1
-// CHECK_KNL_M64: #define __AVX512PF__ 1
// CHECK_KNL_M64: #define __AVX__ 1
// CHECK_KNL_M64: #define __BMI2__ 1
// CHECK_KNL_M64: #define __BMI__ 1
@@ -847,7 +842,6 @@
// CHECK_KNL_M64: #define __MOVBE__ 1
// CHECK_KNL_M64: #define __PCLMUL__ 1
// CHECK_KNL_M64: #define __POPCNT__ 1
-// CHECK_KNL_M64: #define __PREFETCHWT1__ 1
// CHECK_KNL_M64: #define __PRFCHW__ 1
// CHECK_KNL_M64: #define __RDRND__ 1
// CHECK_KNL_M64: #define __SSE2_MATH__ 1
@@ -874,9 +868,7 @@
// CHECK_KNM_M32: #define __AES__ 1
// CHECK_KNM_M32: #define __AVX2__ 1
// CHECK_KNM_M32: #define __AVX512CD__ 1
-// CHECK_KNM_M32: #define __AVX512ER__ 1
// CHECK_KNM_M32: #define __AVX512F__ 1
-// CHECK_KNM_M32: #define __AVX512PF__ 1
// CHECK_KNM_M32: #define __AVX512VPOPCNTDQ__ 1
// CHECK_KNM_M32: #define __AVX__ 1
// CHECK_KNM_M32: #define __BMI2__ 1
@@ -890,7 +882,6 @@
// CHECK_KNM_M32: #define __MOVBE__ 1
// CHECK_KNM_M32: #define __PCLMUL__ 1
// CHECK_KNM_M32: #define __POPCNT__ 1
-// CHECK_KNM_M32: #define __PREFETCHWT1__ 1
// CHECK_KNM_M32: #define __PRFCHW__ 1
// CHECK_KNM_M32: #define __RDRND__ 1
// CHECK_KNM_M32: #define __SSE2__ 1
@@ -911,9 +902,7 @@
// CHECK_KNM_M64: #define __AES__ 1
// CHECK_KNM_M64: #define __AVX2__ 1
// CHECK_KNM_M64: #define __AVX512CD__ 1
-// CHECK_KNM_M64: #define __AVX512ER__ 1
// CHECK_KNM_M64: #define __AVX512F__ 1
-// CHECK_KNM_M64: #define __AVX512PF__ 1
// CHECK_KNM_M64: #define __AVX512VPOPCNTDQ__ 1
// CHECK_KNM_M64: #define __AVX__ 1
// CHECK_KNM_M64: #define __BMI2__ 1
@@ -927,7 +916,6 @@
// CHECK_KNM_M64: #define __MOVBE__ 1
// CHECK_KNM_M64: #define __PCLMUL__ 1
// CHECK_KNM_M64: #define __POPCNT__ 1
-// CHECK_KNM_M64: #define __PREFETCHWT1__ 1
// CHECK_KNM_M64: #define __PRFCHW__ 1
// CHECK_KNM_M64: #define __RDRND__ 1
// CHECK_KNM_M64: #define __SSE2_MATH__ 1
diff --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index 913093bb51db..0865add7e8fb 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -79,7 +79,9 @@
// CHECK-NOT: __riscv_xventanacondops {{.*$}}
// CHECK-NOT: __riscv_za128rs {{.*$}}
// CHECK-NOT: __riscv_za64rs {{.*$}}
+// CHECK-NOT: __riscv_zaamo {{.*$}}
// CHECK-NOT: __riscv_zacas {{.*$}}
+// CHECK-NOT: __riscv_zalrsc {{.*$}}
// CHECK-NOT: __riscv_zama16b {{.*$}}
// CHECK-NOT: __riscv_zawrs {{.*$}}
// CHECK-NOT: __riscv_zba {{.*$}}
@@ -174,10 +176,8 @@
// CHECK-NOT: __riscv_sspm{{.*$}}
// CHECK-NOT: __riscv_ssqosid{{.*$}}
// CHECK-NOT: __riscv_supm{{.*$}}
-// CHECK-NOT: __riscv_zaamo {{.*$}}
// CHECK-NOT: __riscv_zabha {{.*$}}
// CHECK-NOT: __riscv_zalasr {{.*$}}
-// CHECK-NOT: __riscv_zalrsc {{.*$}}
// CHECK-NOT: __riscv_zfbfmin {{.*$}}
// CHECK-NOT: __riscv_zicfilp {{.*$}}
// CHECK-NOT: __riscv_zicfiss {{.*$}}
@@ -708,6 +708,14 @@
// CHECK-ZA64RS-EXT: __riscv_za64rs 1000000{{$}}
// RUN: %clang --target=riscv32 \
+// RUN: -march=rv32i_zaamo1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZAAMO-EXT %s
+// RUN: %clang --target=riscv64 \
+// RUN: -march=rv64i_zaamo1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZAAMO-EXT %s
+// CHECK-ZAAMO-EXT: __riscv_zaamo 1000000{{$}}
+
+// RUN: %clang --target=riscv32 \
// RUN: -march=rv32ia_zacas1p0 -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
// RUN: %clang --target=riscv64 \
@@ -715,6 +723,14 @@
// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
// CHECK-ZACAS-EXT: __riscv_zacas 1000000{{$}}
+// RUN: %clang --target=riscv32 \
+// RUN: -march=rv32i_zalrsc1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZALRSC-EXT %s
+// RUN: %clang --target=riscv64 \
+// RUN: -march=rv64i_zalrsc1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZALRSC-EXT %s
+// CHECK-ZALRSC-EXT: __riscv_zalrsc 1000000{{$}}
+
// RUN: %clang --target=riscv32 -march=rv32izama16b -x c -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZAMA16B-EXT %s
// RUN: %clang --target=riscv64 -march=rv64izama16b -x c -E -dM %s \
@@ -1555,14 +1571,6 @@
// Experimental extensions
// RUN: %clang --target=riscv32 -menable-experimental-extensions \
-// RUN: -march=rv32i_zaamo0p2 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZAAMO-EXT %s
-// RUN: %clang --target=riscv64 -menable-experimental-extensions \
-// RUN: -march=rv64i_zaamo0p2 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZAAMO-EXT %s
-// CHECK-ZAAMO-EXT: __riscv_zaamo 2000{{$}}
-
-// RUN: %clang --target=riscv32 -menable-experimental-extensions \
// RUN: -march=rv32ia_zabha1p0 -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZABHA-EXT %s
// RUN: %clang --target=riscv64 -menable-experimental-extensions \
@@ -1579,14 +1587,6 @@
// CHECK-ZALASR-EXT: __riscv_zalasr 1000{{$}}
// RUN: %clang --target=riscv32 -menable-experimental-extensions \
-// RUN: -march=rv32i_zalrsc0p2 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZALRSC-EXT %s
-// RUN: %clang --target=riscv64 -menable-experimental-extensions \
-// RUN: -march=rv64i_zalrsc0p2 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZALRSC-EXT %s
-// CHECK-ZALRSC-EXT: __riscv_zalrsc 2000{{$}}
-
-// RUN: %clang --target=riscv32 -menable-experimental-extensions \
// RUN: -march=rv32izfbfmin1p0 -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZFBFMIN-EXT %s
// RUN: %clang --target=riscv64 -menable-experimental-extensions \
diff --git a/clang/test/Preprocessor/stdc-ms-extension.cpp b/clang/test/Preprocessor/stdc-ms-extension.cpp
new file mode 100644
index 000000000000..6e9fa6055306
--- /dev/null
+++ b/clang/test/Preprocessor/stdc-ms-extension.cpp
@@ -0,0 +1,9 @@
+// RUN: %clang_cl /TC /dev/null /E -Xclang -dM 2> /dev/null | FileCheck -match-full-lines %s --check-prefix=NOSTDC
+// RUN: %clang_cl /TC /dev/null /E -Xclang -dM /Zc:__STDC__ 2> /dev/null | FileCheck -match-full-lines %s --check-prefix=YESSTDC
+// __STDC__ should never be defined in C++ mode with fms-compatibility.
+// RUN: %clang_cl /dev/null /E -Xclang -dM 2>&1 | FileCheck %s --check-prefix=NOSTDC
+// RUN: %clang_cl /dev/null /E -Xclang -dM /Zc:__STDC__ 2>&1 | FileCheck %s --check-prefix=ZCSTDCIGNORED
+// YESSTDC: #define __STDC__ 1
+// NOSTDC-NOT: #define __STDC__ 1
+// ZCSTDCIGNORED-NOT: #define __STDC__ 1
+// ZCSTDCIGNORED: argument unused during compilation
diff --git a/clang/test/Preprocessor/x86_target_features.c b/clang/test/Preprocessor/x86_target_features.c
index 57104c9e7a50..7567267be26b 100644
--- a/clang/test/Preprocessor/x86_target_features.c
+++ b/clang/test/Preprocessor/x86_target_features.c
@@ -90,38 +90,6 @@
// AVX512CD: #define __SSE__ 1
// AVX512CD: #define __SSSE3__ 1
-// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512er -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=AVX512ER %s
-
-// AVX512ER: #define __AVX2__ 1
-// AVX512ER: #define __AVX512ER__ 1
-// AVX512ER: #define __AVX512F__ 1
-// AVX512ER: #define __AVX__ 1
-// AVX512ER: #define __EVEX512__ 1
-// AVX512ER: #define __SSE2_MATH__ 1
-// AVX512ER: #define __SSE2__ 1
-// AVX512ER: #define __SSE3__ 1
-// AVX512ER: #define __SSE4_1__ 1
-// AVX512ER: #define __SSE4_2__ 1
-// AVX512ER: #define __SSE_MATH__ 1
-// AVX512ER: #define __SSE__ 1
-// AVX512ER: #define __SSSE3__ 1
-
-// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512pf -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=AVX512PF %s
-
-// AVX512PF: #define __AVX2__ 1
-// AVX512PF: #define __AVX512F__ 1
-// AVX512PF: #define __AVX512PF__ 1
-// AVX512PF: #define __AVX__ 1
-// AVX512PF: #define __EVEX512__ 1
-// AVX512PF: #define __SSE2_MATH__ 1
-// AVX512PF: #define __SSE2__ 1
-// AVX512PF: #define __SSE3__ 1
-// AVX512PF: #define __SSE4_1__ 1
-// AVX512PF: #define __SSE4_2__ 1
-// AVX512PF: #define __SSE_MATH__ 1
-// AVX512PF: #define __SSE__ 1
-// AVX512PF: #define __SSSE3__ 1
-
// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512dq -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=AVX512DQ %s
// AVX512DQ: #define __AVX2__ 1
@@ -171,22 +139,6 @@
// AVX512VL: #define __SSE__ 1
// AVX512VL: #define __SSSE3__ 1
-// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512pf -mno-avx512f -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=AVX512F2 %s
-
-// AVX512F2: #define __AVX2__ 1
-// AVX512F2-NOT: #define __AVX512F__ 1
-// AVX512F2-NOT: #define __AVX512PF__ 1
-// AVX512F2-NOT: #define __EVEX512__ 1
-// AVX512F2: #define __AVX__ 1
-// AVX512F2: #define __SSE2_MATH__ 1
-// AVX512F2: #define __SSE2__ 1
-// AVX512F2: #define __SSE3__ 1
-// AVX512F2: #define __SSE4_1__ 1
-// AVX512F2: #define __SSE4_2__ 1
-// AVX512F2: #define __SSE_MATH__ 1
-// AVX512F2: #define __SSE__ 1
-// AVX512F2: #define __SSSE3__ 1
-
// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512ifma -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=AVX512IFMA %s
// AVX512IFMA: #define __AVX2__ 1
@@ -640,14 +592,12 @@
// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512f -mno-avx512f -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=NOEVEX512 %s
// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512cd -mno-avx512f -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=NOEVEX512 %s
-// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512er -mno-avx512f -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=NOEVEX512 %s
// NOEVEX512-NOT: #define __AVX512F__ 1
// NOEVEX512-NOT: #define __EVEX256__ 1
// NOEVEX512-NOT: #define __EVEX512__ 1
// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512f -mno-evex512 -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=AVX512NOEVEX512 %s
// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512cd -mno-evex512 -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=AVX512NOEVEX512 %s
-// RUN: %clang -target i386-unknown-unknown -march=atom -mavx512er -mno-evex512 -x c -E -dM -o - %s | FileCheck -match-full-lines --check-prefix=AVX512NOEVEX512 %s
// AVX512NOEVEX512: #define __AVX512F__ 1
// AVX512NOEVEX512-NOT: #define __EVEX256__ 1
// AVX512NOEVEX512-NOT: #define __EVEX512__ 1
diff --git a/clang/test/Profile/c-unreachable-after-switch.c b/clang/test/Profile/c-unreachable-after-switch.c
index 34d2742f7a3b..0ed2efa32e83 100644
--- a/clang/test/Profile/c-unreachable-after-switch.c
+++ b/clang/test/Profile/c-unreachable-after-switch.c
@@ -5,11 +5,11 @@
// CHECK-LABEL: @foo()
// CHECK: store {{.*}} @[[C]]
void foo(void) {
- // CHECK: store {{.*}} @[[C]], i64 0, i64 2
+ // CHECK: store {{.*}} @[[C]], i64 16)
switch (0) {
default:
return;
}
// We shouldn't emit the unreachable counter. This used to crash in GlobalDCE.
- // CHECK-NOT: store {{.*}} @[[C]], i64 0, i64 1}
+ // CHECK-NOT: store {{.*}} @[[C]], i64 8)
}
diff --git a/clang/test/Profile/misexpect-branch.c b/clang/test/Profile/misexpect-branch.c
index ce46b4688061..5c4394405e17 100644
--- a/clang/test/Profile/misexpect-branch.c
+++ b/clang/test/Profile/misexpect-branch.c
@@ -26,10 +26,10 @@ int buzz();
const int inner_loop = 100;
const int outer_loop = 2000;
-int bar() { // imprecise-warning-re {{Potential performance regression from use of __builtin_expect(): Annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions.}}
+int bar() { // imprecise-warning-re {{potential performance regression from use of __builtin_expect(): annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions}}
int rando = buzz();
int x = 0;
- if (likely(rando % (outer_loop * inner_loop) == 0)) { // exact-warning-re {{Potential performance regression from use of __builtin_expect(): Annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions.}}
+ if (likely(rando % (outer_loop * inner_loop) == 0)) { // exact-warning-re {{potential performance regression from use of __builtin_expect(): annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions}}
x = baz(rando);
} else {
x = foo(50);
@@ -37,10 +37,10 @@ int bar() { // imprecise-warning-re {{Potential performance regression from use
return x;
}
-int fizz() { // imprecise-warning-re {{Potential performance regression from use of __builtin_expect(): Annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions.}}
+int fizz() { // imprecise-warning-re {{potential performance regression from use of __builtin_expect(): annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions}}
int rando = buzz();
int x = 0;
- if (unlikely(rando % (outer_loop * inner_loop) == 0)) { // exact-warning-re {{Potential performance regression from use of __builtin_expect(): Annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions.}}
+ if (unlikely(rando % (outer_loop * inner_loop) == 0)) { // exact-warning-re {{potential performance regression from use of __builtin_expect(): annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions}}
x = baz(rando);
} else {
x = foo(50);
diff --git a/clang/test/Profile/misexpect-switch-default.c b/clang/test/Profile/misexpect-switch-default.c
index 033490e558e6..cd337b943017 100644
--- a/clang/test/Profile/misexpect-switch-default.c
+++ b/clang/test/Profile/misexpect-switch-default.c
@@ -20,7 +20,7 @@ int main() {
int j;
for (j = 0; j < outer_loop * inner_loop; ++j) {
unsigned condition = rand() % 5;
- switch (__builtin_expect(condition, 6)) { // expected-warning-re {{Potential performance regression from use of __builtin_expect(): Annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions.}}
+ switch (__builtin_expect(condition, 6)) { // expected-warning-re {{potential performance regression from use of __builtin_expect(): annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions}}
case 0:
val += sum(arry, arry_size);
break;
diff --git a/clang/test/Profile/misexpect-switch.c b/clang/test/Profile/misexpect-switch.c
index 8ca8a155c74a..84a7174f635f 100644
--- a/clang/test/Profile/misexpect-switch.c
+++ b/clang/test/Profile/misexpect-switch.c
@@ -20,7 +20,7 @@ int main() {
for (j = 0; j < outer_loop; ++j) {
for (k = 0; k < inner_loop; ++k) {
unsigned condition = rand() % 10000;
- switch (__builtin_expect(condition, 0)) { // expected-warning-re {{Potential performance regression from use of __builtin_expect(): Annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions.}}
+ switch (__builtin_expect(condition, 0)) { // expected-warning-re {{potential performance regression from use of __builtin_expect(): annotation was correct on {{.+}}% ({{[0-9]+ / [0-9]+}}) of profiled executions}}
case 0:
val += sum(arry, arry_size);
break;
diff --git a/clang/test/Sema/atomic-ops.c b/clang/test/Sema/atomic-ops.c
index 1d36667d6cf4..2024b81ce6ae 100644
--- a/clang/test/Sema/atomic-ops.c
+++ b/clang/test/Sema/atomic-ops.c
@@ -639,6 +639,38 @@ void memory_checks(_Atomic(int) *Ap, int *p, int val) {
(void)__atomic_compare_exchange_n(p, p, val, 0, memory_order_seq_cst, -1); // expected-warning {{memory order argument to atomic operation is invalid}}
}
+struct Z {
+ char z[];
+};
+
+void zeroSizeArgError(struct Z *a, struct Z *b, struct Z *c) {
+ __atomic_exchange(b, b, c, memory_order_relaxed); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_exchange(b, b, c, memory_order_acq_rel); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_exchange(b, b, c, memory_order_acquire); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_exchange(b, b, c, memory_order_consume); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_exchange(b, b, c, memory_order_release); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_exchange(b, b, c, memory_order_seq_cst); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_load(a, b, memory_order_relaxed); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_load(a, b, memory_order_acq_rel); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_load(a, b, memory_order_acquire); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_load(a, b, memory_order_consume); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_load(a, b, memory_order_release); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_load(a, b, memory_order_seq_cst); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_store(a, b, memory_order_relaxed); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_store(a, b, memory_order_acq_rel); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_store(a, b, memory_order_acquire); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_store(a, b, memory_order_consume); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_store(a, b, memory_order_release); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_store(a, b, memory_order_seq_cst); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_compare_exchange(a, b, c, 0, memory_order_relaxed, memory_order_relaxed); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_compare_exchange(a, b, c, 0, memory_order_acq_rel, memory_order_acq_rel); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_compare_exchange(a, b, c, 0, memory_order_acquire, memory_order_acquire); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_compare_exchange(a, b, c, 0, memory_order_consume, memory_order_consume); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_compare_exchange(a, b, c, 0, memory_order_release, memory_order_release); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+ __atomic_compare_exchange(a, b, c, 0, memory_order_seq_cst, memory_order_seq_cst); // expected-error {{address argument to atomic builtin must be a pointer to a non-zero-sized object}}
+
+}
+
void nullPointerWarning(void) {
volatile _Atomic(int) vai;
_Atomic(int) ai;
diff --git a/clang/test/Sema/attr-assume.c b/clang/test/Sema/attr-assume.c
deleted file mode 100644
index 98deffa3a746..000000000000
--- a/clang/test/Sema/attr-assume.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// RUN: %clang_cc1 -triple i386-apple-darwin9 -fsyntax-only -verify %s
-
-void f1(void) __attribute__((assume(3))); // expected-error {{expected string literal as argument of 'assume' attribute}}
-void f2(void) __attribute__((assume(int))); // expected-error {{expected string literal as argument of 'assume' attribute}}
-void f3(void) __attribute__((assume(for))); // expected-error {{expected string literal as argument of 'assume' attribute}}
-void f4(void) __attribute__((assume("QQQQ"))); // expected-warning {{unknown assumption string 'QQQQ'; attribute is potentially ignored}}
-void f5(void) __attribute__((assume("omp_no_openmp")));
-void f6(void) __attribute__((assume("omp_noopenmp"))); // expected-warning {{unknown assumption string 'omp_noopenmp' may be misspelled; attribute is potentially ignored, did you mean 'omp_no_openmp'?}}
-void f7(void) __attribute__((assume("omp_no_openmp_routine"))); // expected-warning {{unknown assumption string 'omp_no_openmp_routine' may be misspelled; attribute is potentially ignored, did you mean 'omp_no_openmp_routines'?}}
-void f8(void) __attribute__((assume("omp_no_openmp1"))); // expected-warning {{unknown assumption string 'omp_no_openmp1' may be misspelled; attribute is potentially ignored, did you mean 'omp_no_openmp'?}}
-void f9(void) __attribute__((assume("omp_no_openmp", "omp_no_openmp"))); // expected-error {{'assume' attribute takes one argument}}
-
-int g1 __attribute__((assume(0))); // expected-error {{expected string literal as argument of 'assume' attribute}}
-int g2 __attribute__((assume("omp_no_openmp"))); // expected-warning {{'assume' attribute only applies to functions and Objective-C methods}}
diff --git a/clang/test/Sema/attr-availability-ios.c b/clang/test/Sema/attr-availability-ios.c
index b97b7e688cc6..b001e70b5ff5 100644
--- a/clang/test/Sema/attr-availability-ios.c
+++ b/clang/test/Sema/attr-availability-ios.c
@@ -9,6 +9,7 @@ void f4(int) __attribute__((availability(macosx,introduced=10.1,deprecated=10.3,
void f5(int) __attribute__((availability(ios,introduced=2.0))) __attribute__((availability(ios,deprecated=3.0))); // expected-note {{'f5' has been explicitly marked deprecated here}}
void f6(int) __attribute__((availability(ios,deprecated=3.0))); // expected-note {{'f6' has been explicitly marked deprecated here}}
void f6(int) __attribute__((availability(iOS,introduced=2.0)));
+void f7(int) __attribute__((availability(ios,introduced=2.0, environment=e))); // expected-error {{unexpected parameter 'environment' in availability attribute, not permitted in C/C++}}
void test(void) {
f0(0); // expected-warning{{'f0' is deprecated: first deprecated in iOS 2.1}}
diff --git a/clang/test/Sema/attr-objc-bridge-related.m b/clang/test/Sema/attr-objc-bridge-related.m
index 7b2e3e5df3fe..6c7fb2588dbc 100644
--- a/clang/test/Sema/attr-objc-bridge-related.m
+++ b/clang/test/Sema/attr-objc-bridge-related.m
@@ -3,5 +3,5 @@
struct [[clang::objc_bridge_related(NSParagraphStyle,,)]] TestBridgedRef;
struct [[clang::objc_bridge_related(NSColor,colorWithCGColor:,CGColor)]] CGColorRefOk;
-struct [[clang::objc_bridge_related(,colorWithCGColor:,CGColor)]] CGColorRef1NotOk; // expected-error {{expected a related ObjectiveC class name, e.g., 'NSColor'}}
+struct [[clang::objc_bridge_related(,colorWithCGColor:,CGColor)]] CGColorRef1NotOk; // expected-error {{expected a related Objective-C class name, e.g., 'NSColor'}}
struct [[clang::objc_bridge_related(NSColor,colorWithCGColor::,CGColor)]] CGColorRef3NotOk; // expected-error {{expected a class method selector with single argument, e.g., 'colorWithCGColor:'}}
diff --git a/clang/test/Sema/builtin-assume.c b/clang/test/Sema/builtin-assume.c
index 932fb5c973eb..21d62d8fd06c 100644
--- a/clang/test/Sema/builtin-assume.c
+++ b/clang/test/Sema/builtin-assume.c
@@ -8,20 +8,20 @@ int ispure(int) __attribute__((pure));
int foo(int *a, int i) {
#ifdef _MSC_VER
__assume(i != 4);
- __assume(++i > 2); //expected-warning {{the argument to '__assume' has side effects that will be discarded}}
- __assume(nonconst() > 2); //expected-warning {{the argument to '__assume' has side effects that will be discarded}}
+ __assume(++i > 2); //expected-warning {{assumption is ignored because it contains (potential) side-effects}}
+ __assume(nonconst() > 2); //expected-warning {{assumption is ignored because it contains (potential) side-effects}}
__assume(isconst() > 2);
__assume(ispure(i) > 2);
- __assume(ispure(++i) > 2); //expected-warning {{the argument to '__assume' has side effects that will be discarded}}
+ __assume(ispure(++i) > 2); //expected-warning {{assumption is ignored because it contains (potential) side-effects}}
int test = sizeof(struct{char qq[(__assume(i != 5), 7)];});
#else
__builtin_assume(i != 4);
- __builtin_assume(++i > 2); //expected-warning {{the argument to '__builtin_assume' has side effects that will be discarded}}
- __builtin_assume(nonconst() > 2); //expected-warning {{the argument to '__builtin_assume' has side effects that will be discarded}}
+ __builtin_assume(++i > 2); //expected-warning {{assumption is ignored because it contains (potential) side-effects}}
+ __builtin_assume(nonconst() > 2); //expected-warning {{assumption is ignored because it contains (potential) side-effects}}
__builtin_assume(isconst() > 2);
__builtin_assume(ispure(i) > 2);
- __builtin_assume(ispure(++i) > 2); //expected-warning {{the argument to '__builtin_assume' has side effects that will be discarded}}
+ __builtin_assume(ispure(++i) > 2); //expected-warning {{assumption is ignored because it contains (potential) side-effects}}
int test = sizeof(struct{char qq[(__builtin_assume(i != 5), 7)];}); // expected-warning {{variable length array}}
#endif
diff --git a/clang/test/Sema/builtins-x86.c b/clang/test/Sema/builtins-x86.c
index cbaf7bcde871..7d9cdce3d789 100644
--- a/clang/test/Sema/builtins-x86.c
+++ b/clang/test/Sema/builtins-x86.c
@@ -106,14 +106,6 @@ __m128i test_mm_mask_i32gather_epi32(__m128i a, int const *b, __m128i c, __m128i
return __builtin_ia32_gatherd_d(a, b, c, mask, 5); // expected-error {{scale argument must be 1, 2, 4, or 8}}
}
-void _mm512_mask_prefetch_i32gather_ps(__m512i index, __mmask16 mask, int const *addr) {
- __builtin_ia32_gatherpfdps(mask, index, addr, 5, 1); // expected-error {{scale argument must be 1, 2, 4, or 8}}
-}
-
-void _mm512_mask_prefetch_i32gather_ps_2(__m512i index, __mmask16 mask, int const *addr) {
- __builtin_ia32_gatherpfdps(mask, index, addr, 1, 1); // expected-error {{argument value 1 is outside the valid range [2, 3]}}
-}
-
__m512i test_mm512_shldi_epi64(__m512i __A, __m512i __B) {
return __builtin_ia32_vpshldq512(__A, __B, 1024); // expected-error {{argument value 1024 is outside the valid range [0, 255]}}
}
diff --git a/clang/test/Sema/builtins.c b/clang/test/Sema/builtins.c
index 3bee31459529..4f843aeec24e 100644
--- a/clang/test/Sema/builtins.c
+++ b/clang/test/Sema/builtins.c
@@ -277,9 +277,9 @@ void test21(const int *ptr) {
}
void test_ei_i42i(_BitInt(42) *ptr, int value) {
- __sync_fetch_and_add(ptr, value); // expected-error {{Atomic memory operand must have a power-of-two size}}
+ __sync_fetch_and_add(ptr, value); // expected-error {{atomic memory operand must have a power-of-two size}}
// expected-warning@+1 {{the semantics of this intrinsic changed with GCC version 4.4 - the newer semantics are provided here}}
- __sync_nand_and_fetch(ptr, value); // expected-error {{Atomic memory operand must have a power-of-two size}}
+ __sync_nand_and_fetch(ptr, value); // expected-error {{atomic memory operand must have a power-of-two size}}
__atomic_fetch_add(ptr, 1, 0); // expected-error {{argument to atomic builtin of type '_BitInt' is not supported}}
}
@@ -305,9 +305,9 @@ void test_ei_ii64(int *ptr, _BitInt(64) value) {
}
void test_ei_i42i42(_BitInt(42) *ptr, _BitInt(42) value) {
- __sync_fetch_and_add(ptr, value); // expected-error {{Atomic memory operand must have a power-of-two size}}
+ __sync_fetch_and_add(ptr, value); // expected-error {{atomic memory operand must have a power-of-two size}}
// expected-warning@+1 {{the semantics of this intrinsic changed with GCC version 4.4 - the newer semantics are provided here}}
- __sync_nand_and_fetch(ptr, value); // expected-error {{Atomic memory operand must have a power-of-two size}}
+ __sync_nand_and_fetch(ptr, value); // expected-error {{atomic memory operand must have a power-of-two size}}
}
void test_ei_i64i64(_BitInt(64) *ptr, _BitInt(64) value) {
diff --git a/clang/test/Sema/constant_builtins_vector.cpp b/clang/test/Sema/constant_builtins_vector.cpp
index ddb78696ce62..c6b1b37cef28 100644
--- a/clang/test/Sema/constant_builtins_vector.cpp
+++ b/clang/test/Sema/constant_builtins_vector.cpp
@@ -719,7 +719,7 @@ constexpr vector4char
vectorShuffleFail1 = // expected-error {{constexpr variable 'vectorShuffleFail1'\
must be initialized by a constant expression}}
__builtin_shufflevector( // expected-error {{index for __builtin_shufflevector \
-not within the bounds of the input vectors; index of -1 found at position 0 not \
-permitted in a constexpr context.}}
+not within the bounds of the input vectors; index of -1 found at position 0 is not \
+permitted in a constexpr context}}
vector4charConst1,
vector4charConst2, -1, -1, -1, -1);
diff --git a/clang/test/Sema/fmv-namespace.cpp b/clang/test/Sema/fmv-namespace.cpp
new file mode 100644
index 000000000000..1c12fd66cf24
--- /dev/null
+++ b/clang/test/Sema/fmv-namespace.cpp
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 -triple aarch64-linux-gnu -fsyntax-only -verify %s
+// expected-no-diagnostics
+
+namespace Name {
+int __attribute((target_version("default"))) foo() { return 0; }
+}
+
+namespace Name {
+int __attribute((target_version("sve"))) foo() { return 1; }
+}
+
+int bar() { return Name::foo(); }
diff --git a/clang/test/Sema/stmtexprs.c b/clang/test/Sema/stmtexprs.c
index 708fc9abb75c..7493bbcef363 100644
--- a/clang/test/Sema/stmtexprs.c
+++ b/clang/test/Sema/stmtexprs.c
@@ -4,6 +4,6 @@ int stmtexpr_fn(void);
void stmtexprs(int i) {
__builtin_assume( ({ 1; }) ); // no warning about "side effects"
__builtin_assume( ({ if (i) { (void)0; }; 42; }) ); // no warning about "side effects"
- // expected-warning@+1 {{the argument to '__builtin_assume' has side effects that will be discarded}}
+ // expected-warning@+1 {{assumption is ignored because it contains (potential) side-effects}}
__builtin_assume( ({ if (i) ({ stmtexpr_fn(); }); 1; }) );
}
diff --git a/clang/test/Sema/x86-eval-method.c b/clang/test/Sema/x86-eval-method.c
index f475b0d1b29b..e540a59528b6 100644
--- a/clang/test/Sema/x86-eval-method.c
+++ b/clang/test/Sema/x86-eval-method.c
@@ -10,9 +10,9 @@
float add1(float a, float b, float c) {
return a + b + c;
-} // warn-warning{{Setting the floating point evaluation method to `source` on a target without SSE is not supported.}}
+} // warn-warning{{setting the floating point evaluation method to `source` on a target without SSE is not supported}}
float add2(float a, float b, float c) {
#pragma clang fp eval_method(source)
return a + b + c;
-} // warn-warning{{Setting the floating point evaluation method to `source` on a target without SSE is not supported.}}
+} // warn-warning{{setting the floating point evaluation method to `source` on a target without SSE is not supported}}
diff --git a/clang/test/Sema/x86_64-eval-method.c b/clang/test/Sema/x86_64-eval-method.c
index dbdc1f881b4a..fe4368a42ca1 100644
--- a/clang/test/Sema/x86_64-eval-method.c
+++ b/clang/test/Sema/x86_64-eval-method.c
@@ -10,4 +10,4 @@
float add2(float a, float b, float c) {
#pragma clang fp eval_method(source)
return a + b + c;
-} // warn-warning{{Setting the floating point evaluation method to `source` on a target without SSE is not supported.}}
+} // warn-warning{{setting the floating point evaluation method to `source` on a target without SSE is not supported}}
diff --git a/clang/test/SemaCUDA/device-var-init.cu b/clang/test/SemaCUDA/device-var-init.cu
index ee7a9e2276f2..1555d151c259 100644
--- a/clang/test/SemaCUDA/device-var-init.cu
+++ b/clang/test/SemaCUDA/device-var-init.cu
@@ -13,17 +13,17 @@
#include "Inputs/cuda-initializers.h"
__shared__ int s_v_i = 1;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__device__ int d_v_f = f();
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ int s_v_f = f();
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ int c_v_f = f();
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ T s_t_i = {2};
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__device__ T d_t_i = {2};
__constant__ T c_t_i = {2};
@@ -40,175 +40,175 @@ __shared__ CGTC s_cgtc;
__constant__ CGTC c_cgtc;
__device__ EC d_ec_i(3);
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ EC s_ec_i(3);
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ EC c_ec_i(3);
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ EC d_ec_i2 = {3};
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ EC s_ec_i2 = {3};
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ EC c_ec_i2 = {3};
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ ETC d_etc_i(3);
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ ETC s_etc_i(3);
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ ETC c_etc_i(3);
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ ETC d_etc_i2 = {3};
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ ETC s_etc_i2 = {3};
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ ETC c_etc_i2 = {3};
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ UC d_uc;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ UC s_uc;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ UC c_uc;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ UD d_ud;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ UD s_ud;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ UD c_ud;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ ECI d_eci;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ ECI s_eci;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ ECI c_eci;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ NEC d_nec;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ NEC s_nec;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ NEC c_nec;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ NED d_ned;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ NED s_ned;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ NED c_ned;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ NCV d_ncv;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ NCV s_ncv;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ NCV c_ncv;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ VD d_vd;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ VD s_vd;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ VD c_vd;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ NCF d_ncf;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ NCF s_ncf;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ NCF c_ncf;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ NCFS s_ncfs;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__device__ UTC d_utc;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ UTC s_utc;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ UTC c_utc;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ UTC d_utc_i(3);
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ UTC s_utc_i(3);
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ UTC c_utc_i(3);
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ NETC d_netc;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ NETC s_netc;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ NETC c_netc;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ NETC d_netc_i(3);
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ NETC s_netc_i(3);
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ NETC c_netc_i(3);
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ EC_I_EC1 d_ec_i_ec1;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ EC_I_EC1 s_ec_i_ec1;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ EC_I_EC1 c_ec_i_ec1;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ T_V_T d_t_v_t;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ T_V_T s_t_v_t;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ T_V_T c_t_v_t;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ T_B_NEC d_t_b_nec;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ T_B_NEC s_t_b_nec;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ T_B_NEC c_t_b_nec;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ T_F_NEC d_t_f_nec;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ T_F_NEC s_t_f_nec;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ T_F_NEC c_t_f_nec;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ T_FA_NEC d_t_fa_nec;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ T_FA_NEC s_t_fa_nec;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ T_FA_NEC c_t_fa_nec;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ T_B_NED d_t_b_ned;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ T_B_NED s_t_b_ned;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ T_B_NED c_t_b_ned;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ T_F_NED d_t_f_ned;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ T_F_NED s_t_f_ned;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ T_F_NED c_t_f_ned;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ T_FA_NED d_t_fa_ned;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__shared__ T_FA_NED s_t_fa_ned;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
__constant__ T_FA_NED c_t_fa_ned;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
// Verify that local variables may be static on device
// side and that they conform to the initialization constraints.
@@ -244,14 +244,14 @@ __device__ void df_sema() {
// Same test cases as for the globals above.
static __device__ int d_v_f = f();
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ int s_v_f = f();
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ int c_v_f = f();
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ T s_t_i = {2};
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __device__ T d_t_i = {2};
static __constant__ T c_t_i = {2};
@@ -260,175 +260,175 @@ __device__ void df_sema() {
static __constant__ ECD c_ecd_i;
static __device__ EC d_ec_i(3);
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ EC s_ec_i(3);
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ EC c_ec_i(3);
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ EC d_ec_i2 = {3};
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ EC s_ec_i2 = {3};
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ EC c_ec_i2 = {3};
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ ETC d_etc_i(3);
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ ETC s_etc_i(3);
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ ETC c_etc_i(3);
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ ETC d_etc_i2 = {3};
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ ETC s_etc_i2 = {3};
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ ETC c_etc_i2 = {3};
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ UC d_uc;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ UC s_uc;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ UC c_uc;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ UD d_ud;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ UD s_ud;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ UD c_ud;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ ECI d_eci;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ ECI s_eci;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ ECI c_eci;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ NEC d_nec;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ NEC s_nec;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ NEC c_nec;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ NED d_ned;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ NED s_ned;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ NED c_ned;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ NCV d_ncv;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ NCV s_ncv;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ NCV c_ncv;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ VD d_vd;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ VD s_vd;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ VD c_vd;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ NCF d_ncf;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ NCF s_ncf;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ NCF c_ncf;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ NCFS s_ncfs;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __device__ UTC d_utc;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ UTC s_utc;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ UTC c_utc;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ UTC d_utc_i(3);
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ UTC s_utc_i(3);
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ UTC c_utc_i(3);
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ NETC d_netc;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ NETC s_netc;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ NETC c_netc;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ NETC d_netc_i(3);
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ NETC s_netc_i(3);
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ NETC c_netc_i(3);
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ EC_I_EC1 d_ec_i_ec1;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ EC_I_EC1 s_ec_i_ec1;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ EC_I_EC1 c_ec_i_ec1;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ T_V_T d_t_v_t;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ T_V_T s_t_v_t;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ T_V_T c_t_v_t;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ T_B_NEC d_t_b_nec;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ T_B_NEC s_t_b_nec;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ T_B_NEC c_t_b_nec;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ T_F_NEC d_t_f_nec;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ T_F_NEC s_t_f_nec;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ T_F_NEC c_t_f_nec;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ T_FA_NEC d_t_fa_nec;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ T_FA_NEC s_t_fa_nec;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ T_FA_NEC c_t_fa_nec;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ T_B_NED d_t_b_ned;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ T_B_NED s_t_b_ned;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ T_B_NED c_t_b_ned;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ T_F_NED d_t_f_ned;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ T_F_NED s_t_f_ned;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ T_F_NED c_t_f_ned;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __device__ T_FA_NED d_t_fa_ned;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
static __shared__ T_FA_NED s_t_fa_ned;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
static __constant__ T_FA_NED c_t_fa_ned;
- // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+ // expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
}
__host__ __device__ void hd_sema() {
@@ -449,7 +449,7 @@ struct NontrivialInitializer {
template <typename T>
__global__ void bar() {
__shared__ T bad;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
for (int i = 0; i < 10; i++) {
static __device__ CEEC sd_ceec;
static __shared__ CEEC ss_ceec;
@@ -467,7 +467,7 @@ __global__ void bar() {
template <>
__global__ void bar<int>() {
__shared__ NontrivialInitializer bad;
-// expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+// expected-error@-1 {{initialization is not supported for __shared__ variables}}
for (int i = 0; i < 10; i++) {
static __device__ CEEC sd_ceec;
static __shared__ CEEC ss_ceec;
diff --git a/clang/test/SemaCUDA/function-overload.cu b/clang/test/SemaCUDA/function-overload.cu
index 163648cd9a87..4710c81763ad 100644
--- a/clang/test/SemaCUDA/function-overload.cu
+++ b/clang/test/SemaCUDA/function-overload.cu
@@ -469,7 +469,7 @@ int test_constexpr_overload(C2 &x, C2 &y) {
// Verify no ambiguity for new operator.
void *a = new int;
__device__ void *b = new int;
-// expected-error@-1{{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1{{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
// Verify no ambiguity for new operator.
template<typename _Tp> _Tp&& f();
diff --git a/clang/test/SemaCUDA/union-init.cu b/clang/test/SemaCUDA/union-init.cu
index 9e4d14a71069..dd4b1296b713 100644
--- a/clang/test/SemaCUDA/union-init.cu
+++ b/clang/test/SemaCUDA/union-init.cu
@@ -31,14 +31,14 @@ union D {
__device__ B b;
__device__ C c;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ D d;
-// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables.}}
+// expected-error@-1 {{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
__device__ void foo() {
__shared__ B b;
__shared__ C c;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
__shared__ D d;
- // expected-error@-1 {{initialization is not supported for __shared__ variables.}}
+ // expected-error@-1 {{initialization is not supported for __shared__ variables}}
}
diff --git a/clang/test/SemaCXX/MicrosoftExtensions.cpp b/clang/test/SemaCXX/MicrosoftExtensions.cpp
index 7286217b1644..98c19975095b 100644
--- a/clang/test/SemaCXX/MicrosoftExtensions.cpp
+++ b/clang/test/SemaCXX/MicrosoftExtensions.cpp
@@ -571,11 +571,17 @@ class PR34109_class {
virtual ~PR34109_class() {}
};
+#if !defined(__cpp_sized_deallocation)
void operator delete(void *) throw();
// expected-note@-1 {{previous declaration is here}}
__declspec(dllexport) void operator delete(void *) throw();
// expected-error@-1 {{redeclaration of 'operator delete' cannot add 'dllexport' attribute}}
-
+#else
+void operator delete(void *, unsigned int) throw();
+// expected-note@-1 {{previous declaration is here}}
+__declspec(dllexport) void operator delete(void *, unsigned int) throw();
+// expected-error@-1 {{redeclaration of 'operator delete' cannot add 'dllexport' attribute}}
+#endif
void PR34109(int* a) {
delete a;
}
diff --git a/clang/test/SemaCXX/addr-label-in-coroutines.cpp b/clang/test/SemaCXX/addr-label-in-coroutines.cpp
index e37ee6413437..65d78636e5cd 100644
--- a/clang/test/SemaCXX/addr-label-in-coroutines.cpp
+++ b/clang/test/SemaCXX/addr-label-in-coroutines.cpp
@@ -13,9 +13,9 @@ struct resumable {
};
resumable f1(int &out, int *inst) {
- static void* dispatch_table[] = {&&inc, // expected-error {{the GNU address of label extension is not allowed in coroutines.}}
- &&suspend, // expected-error {{the GNU address of label extension is not allowed in coroutines.}}
- &&stop}; // expected-error {{the GNU address of label extension is not allowed in coroutines.}}
+ static void* dispatch_table[] = {&&inc, // expected-error {{the GNU address of label extension is not allowed in coroutines}}
+ &&suspend, // expected-error {{the GNU address of label extension is not allowed in coroutines}}
+ &&stop}; // expected-error {{the GNU address of label extension is not allowed in coroutines}}
#define DISPATCH() goto *dispatch_table[*inst++]
inc:
out++;
@@ -31,9 +31,9 @@ stop:
resumable f2(int &out, int *inst) {
void* dispatch_table[] = {nullptr, nullptr, nullptr};
- dispatch_table[0] = &&inc; // expected-error {{the GNU address of label extension is not allowed in coroutines.}}
- dispatch_table[1] = &&suspend; // expected-error {{the GNU address of label extension is not allowed in coroutines.}}
- dispatch_table[2] = &&stop; // expected-error {{the GNU address of label extension is not allowed in coroutines.}}
+ dispatch_table[0] = &&inc; // expected-error {{the GNU address of label extension is not allowed in coroutines}}
+ dispatch_table[1] = &&suspend; // expected-error {{the GNU address of label extension is not allowed in coroutines}}
+ dispatch_table[2] = &&stop; // expected-error {{the GNU address of label extension is not allowed in coroutines}}
#define DISPATCH() goto *dispatch_table[*inst++]
inc:
out++;
@@ -50,9 +50,9 @@ stop:
resumable f3(int &out, int *inst) {
void* dispatch_table[] = {nullptr, nullptr, nullptr};
[&]() -> resumable {
- dispatch_table[0] = &&inc; // expected-error {{the GNU address of label extension is not allowed in coroutines.}}
- dispatch_table[1] = &&suspend; // expected-error {{the GNU address of label extension is not allowed in coroutines.}}
- dispatch_table[2] = &&stop; // expected-error {{the GNU address of label extension is not allowed in coroutines.}}
+ dispatch_table[0] = &&inc; // expected-error {{the GNU address of label extension is not allowed in coroutines}}
+ dispatch_table[1] = &&suspend; // expected-error {{the GNU address of label extension is not allowed in coroutines}}
+ dispatch_table[2] = &&stop; // expected-error {{the GNU address of label extension is not allowed in coroutines}}
#define DISPATCH() goto *dispatch_table[*inst++]
inc:
out++;
diff --git a/clang/test/SemaCXX/attribute-pack-expansion.cpp b/clang/test/SemaCXX/attribute-pack-expansion.cpp
new file mode 100644
index 000000000000..a339e68c0964
--- /dev/null
+++ b/clang/test/SemaCXX/attribute-pack-expansion.cpp
@@ -0,0 +1,20 @@
+// RUN: %clang_cc1 -std=c++20 -fsyntax-only -verify %s
+
+template <bool... vals>
+void f() __attribute((diagnose_if(vals, "message", "error"))) { // expected-error {{expression contains unexpanded parameter pack 'vals'}}
+ [] () __attribute((diagnose_if(vals, "message", "error"))) {}(); // expected-error {{expression contains unexpanded parameter pack 'vals'}}
+ [] () __attribute((diagnose_if(vals..., "message", "error"))) {}(); // expected-error {{attribute 'diagnose_if' does not support argument pack expansion}}
+ [] <bool ...inner> () __attribute((diagnose_if(inner, "message", "error"))) {}(); // expected-error {{expression contains unexpanded parameter pack 'inner'}}
+ ([] <bool ...inner> () __attribute((diagnose_if(inner, "message", "error"))) {}(), ...); // expected-error {{expression contains unexpanded parameter pack 'inner'}} \
+ // expected-error {{pack expansion does not contain any unexpanded parameter packs}}
+
+ // This is fine, so check that we're actually emitting an error
+ // due to the 'diagnose_if'.
+ ([] () __attribute((diagnose_if(vals, "foobar", "error"))) {}(), ...); // expected-error {{foobar}} expected-note {{from 'diagnose_if'}}
+}
+
+void g() {
+ f<>();
+ f<false>();
+ f<true, true>(); // expected-note {{in instantiation of}}
+}
diff --git a/clang/test/SemaCXX/builtin-operator-new-delete.cpp b/clang/test/SemaCXX/builtin-operator-new-delete.cpp
index 6fcff92dc095..db15616803e3 100644
--- a/clang/test/SemaCXX/builtin-operator-new-delete.cpp
+++ b/clang/test/SemaCXX/builtin-operator-new-delete.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -std=c++1z -fsyntax-only -verify %s
+// RUN: %clang_cc1 -std=c++1z -fno-sized-deallocation -fsyntax-only -verify %s
// RUN: %clang_cc1 -std=c++03 -fsyntax-only -verify %s
// RUN: %clang_cc1 -std=c++03 -faligned-allocation -fsyntax-only -verify %s
// RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s
diff --git a/clang/test/SemaCXX/constexpr-default-arg.cpp b/clang/test/SemaCXX/constexpr-default-arg.cpp
index ec9b2927880b..901123bfb359 100644
--- a/clang/test/SemaCXX/constexpr-default-arg.cpp
+++ b/clang/test/SemaCXX/constexpr-default-arg.cpp
@@ -32,8 +32,8 @@ void test_default_arg2() {
}
// Check that multiple CXXDefaultInitExprs don't cause an assertion failure.
-struct A { int &&r = 0; }; // expected-note 2{{default member initializer}}
+struct A { int &&r = 0; };
struct B { A x, y; };
-B b = {}; // expected-warning 2{{lifetime extension of temporary created by aggregate initialization using a default member initializer is not yet supported}}
+B b = {}; // expected-no-diagnostics
}
diff --git a/clang/test/SemaCXX/cxx11-default-member-initializers.cpp b/clang/test/SemaCXX/cxx11-default-member-initializers.cpp
index dd8e9c6b7fc1..1ea8b98cd863 100644
--- a/clang/test/SemaCXX/cxx11-default-member-initializers.cpp
+++ b/clang/test/SemaCXX/cxx11-default-member-initializers.cpp
@@ -27,6 +27,80 @@ class MemInit {
C m = s;
};
+namespace std {
+typedef decltype(sizeof(int)) size_t;
+
+// libc++'s implementation
+template <class _E> class initializer_list {
+ const _E *__begin_;
+ size_t __size_;
+
+ initializer_list(const _E *__b, size_t __s) : __begin_(__b), __size_(__s) {}
+
+public:
+ typedef _E value_type;
+ typedef const _E &reference;
+ typedef const _E &const_reference;
+ typedef size_t size_type;
+
+ typedef const _E *iterator;
+ typedef const _E *const_iterator;
+
+ initializer_list() : __begin_(nullptr), __size_(0) {}
+
+ size_t size() const { return __size_; }
+ const _E *begin() const { return __begin_; }
+ const _E *end() const { return __begin_ + __size_; }
+};
+} // namespace std
+
+#if __cplusplus >= 201703L
+namespace test_rebuild {
+template <typename T, int> class C {
+public:
+ C(std::initializer_list<T>);
+};
+
+template <typename T> using Ptr = __remove_pointer(T) *;
+template <typename T> C(T) -> C<Ptr<T>, sizeof(T)>;
+
+class A {
+public:
+ template <typename T1, typename T2> T1 *some_func(T2 &&);
+};
+
+struct B : A {
+ // Test CXXDefaultInitExpr rebuild issue in
+ // https://github.com/llvm/llvm-project/pull/87933
+ int *ar = some_func<int>(C{some_func<int>(0)});
+ B() {}
+};
+
+int TestBody_got;
+template <int> class Vector {
+public:
+ Vector(std::initializer_list<int>);
+};
+template <typename... Ts> Vector(Ts...) -> Vector<sizeof...(Ts)>;
+class ProgramBuilder {
+public:
+ template <typename T, typename ARGS> int *create(ARGS);
+};
+
+struct TypeTest : ProgramBuilder {
+ int *str_f16 = create<int>(Vector{0});
+ TypeTest() {}
+};
+class TypeTest_Element_Test : TypeTest {
+ void TestBody();
+};
+void TypeTest_Element_Test::TestBody() {
+ int *expect = str_f16;
+ &TestBody_got != expect; // expected-warning {{inequality comparison result unused}}
+}
+} // namespace test_rebuild
+#endif // __cplusplus >= 201703L
+
#if __cplusplus >= 202002L
// This test ensures cleanup expressions are correctly produced
// in the presence of default member initializers.
diff --git a/clang/test/SemaCXX/cxx1y-sized-deallocation.cpp b/clang/test/SemaCXX/cxx1y-sized-deallocation.cpp
index 3ec65a6a64d1..462f1725bb1c 100644
--- a/clang/test/SemaCXX/cxx1y-sized-deallocation.cpp
+++ b/clang/test/SemaCXX/cxx1y-sized-deallocation.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -std=c++1y -verify %s -fsized-deallocation -fexceptions -fcxx-exceptions
+// RUN: %clang_cc1 -std=c++1y -verify %s -fexceptions -fcxx-exceptions
using size_t = decltype(sizeof(0));
void operator delete(void *, size_t) noexcept; // expected-note {{'operator delete' declared here}}
diff --git a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
index 285532e3d80d..b71dfc6ccaf4 100644
--- a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
+++ b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
@@ -173,6 +173,11 @@ template <typename... Ts>
using AFoo = Foo<Ts...>;
auto b = AFoo{};
+AFoo a(1, 2);
+
+template <typename T>
+using BFoo = Foo<T, T>;
+BFoo b2(1.0, 2.0);
} // namespace test13
namespace test14 {
@@ -279,7 +284,7 @@ class Foo {};
// Verify that template template type parameter TTP is referenced/used in the
// template arguments of the RHS.
template <template<typename> typename TTP>
-using Bar = Foo<K<TTP>>; // expected-note {{candidate template ignored: could not match 'Foo<K<>>' against 'int'}}
+using Bar = Foo<K<TTP>>; // expected-note {{candidate template ignored: could not match 'Foo<K<template-parameter-0-0>>' against 'int'}}
template <class T>
class Container {};
diff --git a/clang/test/SemaCXX/cxx23-assume.cpp b/clang/test/SemaCXX/cxx23-assume.cpp
index e67d72ae0a99..9138501d726d 100644
--- a/clang/test/SemaCXX/cxx23-assume.cpp
+++ b/clang/test/SemaCXX/cxx23-assume.cpp
@@ -28,7 +28,7 @@ bool f2();
template <typename T>
constexpr void f3() {
- [[assume(T{})]]; // expected-error {{not contextually convertible to 'bool'}} expected-warning {{has side effects that will be discarded}} ext-warning {{C++23 extension}}
+ [[assume(T{})]]; // expected-error {{not contextually convertible to 'bool'}} expected-warning {{assumption is ignored because it contains (potential) side-effects}} ext-warning {{C++23 extension}}
}
void g(int x) {
@@ -38,13 +38,13 @@ void g(int x) {
S<false>{}.f();
S<true>{}.g<char>();
S<true>{}.g<int>();
- [[assume(f2())]]; // expected-warning {{side effects that will be discarded}} ext-warning {{C++23 extension}}
+ [[assume(f2())]]; // expected-warning {{assumption is ignored because it contains (potential) side-effects}} ext-warning {{C++23 extension}}
- [[assume((x = 3))]]; // expected-warning {{has side effects that will be discarded}} // ext-warning {{C++23 extension}}
- [[assume(x++)]]; // expected-warning {{has side effects that will be discarded}} // ext-warning {{C++23 extension}}
- [[assume(++x)]]; // expected-warning {{has side effects that will be discarded}} // ext-warning {{C++23 extension}}
- [[assume([]{ return true; }())]]; // expected-warning {{has side effects that will be discarded}} // ext-warning {{C++23 extension}}
- [[assume(B{})]]; // expected-warning {{has side effects that will be discarded}} // ext-warning {{C++23 extension}}
+ [[assume((x = 3))]]; // expected-warning {{assumption is ignored because it contains (potential) side-effects}} // ext-warning {{C++23 extension}}
+ [[assume(x++)]]; // expected-warning {{assumption is ignored because it contains (potential) side-effects}} // ext-warning {{C++23 extension}}
+ [[assume(++x)]]; // expected-warning {{assumption is ignored because it contains (potential) side-effects}} // ext-warning {{C++23 extension}}
+ [[assume([]{ return true; }())]]; // expected-warning {{assumption is ignored because it contains (potential) side-effects}} // ext-warning {{C++23 extension}}
+ [[assume(B{})]]; // expected-warning {{assumption is ignored because it contains (potential) side-effects}} // ext-warning {{C++23 extension}}
[[assume((1, 2))]]; // expected-warning {{has no effect}} // ext-warning {{C++23 extension}}
f3<A>(); // expected-note {{in instantiation of}}
@@ -58,6 +58,11 @@ void g(int x) {
[[assume(true)]] while (false) {} // expected-error {{only applies to empty statements}}
[[assume(true)]] label:; // expected-error {{cannot be applied to a declaration}}
[[assume(true)]] goto label; // expected-error {{only applies to empty statements}}
+
+ // Also check variant spellings.
+ __attribute__((__assume__(true))); // Should not issue a warning because it doesn't use the [[]] spelling.
+ __attribute__((assume(true))) {}; // expected-error {{only applies to empty statements}}
+ [[clang::assume(true)]] {}; // expected-error {{only applies to empty statements}}
}
// Check that 'x' is ODR-used here.
@@ -86,7 +91,7 @@ static_assert(S<false>{}.g<A>()); // expected-error {{not an integral constant e
template <typename T>
constexpr bool f4() {
- [[assume(!T{})]]; // expected-error {{invalid argument type 'D'}} // expected-warning 2 {{side effects}} ext-warning {{C++23 extension}}
+ [[assume(!T{})]]; // expected-error {{invalid argument type 'D'}} // expected-warning 2 {{assumption is ignored because it contains (potential) side-effects}} ext-warning {{C++23 extension}}
return sizeof(T) == sizeof(int);
}
@@ -132,8 +137,8 @@ static_assert(f5<F>() == 2); // expected-note {{while checking constraint satisf
// Do not validate assumptions whose evaluation would have side-effects.
constexpr int foo() {
int a = 0;
- [[assume(a++)]] [[assume(++a)]]; // expected-warning 2 {{has side effects that will be discarded}} ext-warning 2 {{C++23 extension}}
- [[assume((a+=1))]]; // expected-warning {{has side effects that will be discarded}} ext-warning {{C++23 extension}}
+ [[assume(a++)]] [[assume(++a)]]; // expected-warning 2 {{assumption is ignored because it contains (potential) side-effects}} ext-warning 2 {{C++23 extension}}
+ [[assume((a+=1))]]; // expected-warning {{assumption is ignored because it contains (potential) side-effects}} ext-warning {{C++23 extension}}
return a;
}
@@ -143,3 +148,13 @@ template <bool ...val>
void f() {
[[assume(val)]]; // expected-error {{expression contains unexpanded parameter pack}}
}
+
+namespace gh71858 {
+int
+foo (int x, int y)
+{
+ __attribute__((assume(x == 42)));
+ __attribute__((assume(++y == 43))); // expected-warning {{assumption is ignored because it contains (potential) side-effects}}
+ return x + y;
+}
+}
diff --git a/clang/test/SemaCXX/cxx2b-consteval-propagate.cpp b/clang/test/SemaCXX/cxx2b-consteval-propagate.cpp
index 07937deb6673..b70c02201ac3 100644
--- a/clang/test/SemaCXX/cxx2b-consteval-propagate.cpp
+++ b/clang/test/SemaCXX/cxx2b-consteval-propagate.cpp
@@ -446,3 +446,11 @@ int h(int x) {
}
#endif
+
+
+namespace GH91308 {
+ constexpr void f(auto) {
+ static_assert(false);
+ }
+ using R1 = decltype(&f<int>);
+}
diff --git a/clang/test/SemaCXX/cxx2c-pack-indexing.cpp b/clang/test/SemaCXX/cxx2c-pack-indexing.cpp
index 0ac85b5bcc14..28b9765127f4 100644
--- a/clang/test/SemaCXX/cxx2c-pack-indexing.cpp
+++ b/clang/test/SemaCXX/cxx2c-pack-indexing.cpp
@@ -206,13 +206,17 @@ void test(auto...args){
template<int... args>
void test2(){
[&]<int idx>(){
- using R = decltype( args...[idx] ) ;
- }.template operator()<0>();
+ using R = decltype( args...[idx] ) ; // #test2-R
+ }.template operator()<0>(); // #test2-call
}
void f( ) {
test(1);
test2<1>();
+ test2();
+ // expected-error@#test2-R {{invalid index 0 for pack args of size 0}}
+ // expected-note@#test2-call {{requested here}}
+ // expected-note@-3 {{requested here}}
}
diff --git a/clang/test/SemaCXX/eval-crashes.cpp b/clang/test/SemaCXX/eval-crashes.cpp
index 017df977b26b..a06f60f71e9c 100644
--- a/clang/test/SemaCXX/eval-crashes.cpp
+++ b/clang/test/SemaCXX/eval-crashes.cpp
@@ -25,11 +25,9 @@ namespace pr33140_0b {
}
namespace pr33140_2 {
- // FIXME: The declaration of 'b' below should lifetime-extend two int
- // temporaries.
- struct A { int &&r = 0; }; // expected-note 2{{initializing field 'r' with default member initializer}}
+ struct A { int &&r = 0; };
struct B { A x, y; };
- B b = {}; // expected-warning 2{{lifetime extension of temporary created by aggregate initialization using a default member initializer is not yet supported}}
+ B b = {};
}
namespace pr33140_3 {
diff --git a/clang/test/SemaCXX/overload-decl.cpp b/clang/test/SemaCXX/overload-decl.cpp
index 1201396996e7..5d1df89a0da7 100644
--- a/clang/test/SemaCXX/overload-decl.cpp
+++ b/clang/test/SemaCXX/overload-decl.cpp
@@ -36,3 +36,20 @@ class X {
int main() {} // expected-note {{previous definition is here}}
int main(int,char**) {} // expected-error {{conflicting types for 'main'}}
+
+
+namespace GH93456 {
+
+struct X {
+ static void f(); // expected-note {{previous declaration is here}}
+ void f() const;
+ // expected-error@-1 {{static and non-static member functions with the same parameter types cannot be overloaded}}
+};
+
+struct Y {
+ void f() const; // expected-note {{previous declaration is here}}
+ static void f();
+ // expected-error@-1 {{static and non-static member functions with the same parameter types cannot be overloaded}}
+};
+
+}
diff --git a/clang/test/SemaCXX/overload-template.cpp b/clang/test/SemaCXX/overload-template.cpp
index 0fe13c479cce..3277a17e5e45 100644
--- a/clang/test/SemaCXX/overload-template.cpp
+++ b/clang/test/SemaCXX/overload-template.cpp
@@ -58,3 +58,13 @@ namespace overloadCheck{
}
}
#endif
+
+namespace GH93076 {
+template <typename ...a> int b(a..., int); // expected-note-re 3 {{candidate function template not viable: no known conversion from 'int ()' to 'int' for {{.*}} argument}}
+int d() {
+ (void)b<int, int>(0, 0, d); // expected-error {{no matching function for call to 'b'}}
+ (void)b<int, int>(0, d, 0); // expected-error {{no matching function for call to 'b'}}
+ (void)b<int, int>(d, 0, 0); // expected-error {{no matching function for call to 'b'}}
+ return 0;
+ }
+}
diff --git a/clang/test/SemaCXX/overloaded-operator.cpp b/clang/test/SemaCXX/overloaded-operator.cpp
index cab21d67a002..0701a96d5d0c 100644
--- a/clang/test/SemaCXX/overloaded-operator.cpp
+++ b/clang/test/SemaCXX/overloaded-operator.cpp
@@ -691,4 +691,15 @@ template <auto T> A<*T> operator *() { return {}; }
// expected-error@-1 {{overloaded 'operator*' must have at least one parameter of class or enumeration type}}
}
+namespace GH92275 {
+
+template <auto v>
+struct constant{};
+
+template <auto x>
+auto operator *(constant<x>)
+{ return constant<(*x)>{}; }
+
+}
+
#endif
diff --git a/clang/test/SemaCXX/recovery-expr-type.cpp b/clang/test/SemaCXX/recovery-expr-type.cpp
index 479039f28479..5a42a11b82da 100644
--- a/clang/test/SemaCXX/recovery-expr-type.cpp
+++ b/clang/test/SemaCXX/recovery-expr-type.cpp
@@ -1,3 +1,5 @@
+// RUN: %clang_cc1 -triple=x86_64-unknown-unknown -o - %s -std=gnu++17 -fsyntax-only -verify -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -triple=x86_64-unknown-unknown -o - %s -std=gnu++20 -fsyntax-only -verify -fexperimental-new-constant-interpreter
// RUN: %clang_cc1 -triple=x86_64-unknown-unknown -o - %s -std=gnu++17 -fsyntax-only -verify
// RUN: %clang_cc1 -triple=x86_64-unknown-unknown -o - %s -std=gnu++20 -fsyntax-only -verify
diff --git a/clang/test/SemaCXX/source_location.cpp b/clang/test/SemaCXX/source_location.cpp
index 63157cfacdd9..6b3610d703e7 100644
--- a/clang/test/SemaCXX/source_location.cpp
+++ b/clang/test/SemaCXX/source_location.cpp
@@ -912,3 +912,20 @@ auto g() {
}
}
+
+namespace GH92680 {
+
+struct IntConstuctible {
+ IntConstuctible(std::source_location = std::source_location::current());
+};
+
+template <typename>
+auto construct_at(IntConstuctible) -> decltype(IntConstuctible()) {
+ return {};
+}
+
+void test() {
+ construct_at<IntConstuctible>({});
+}
+
+}
diff --git a/clang/test/SemaCXX/type-traits.cpp b/clang/test/SemaCXX/type-traits.cpp
index f2fd45762abf..d40605f56f1e 100644
--- a/clang/test/SemaCXX/type-traits.cpp
+++ b/clang/test/SemaCXX/type-traits.cpp
@@ -25,6 +25,7 @@ typedef Empty EmptyArMB[1][2];
typedef int Int;
typedef Int IntAr[10];
typedef Int IntArNB[];
+typedef Int IntArZero[0];
class Statics { static int priv; static NonPOD np; };
union EmptyUnion {};
union IncompleteUnion; // expected-note {{forward declaration of 'IncompleteUnion'}}
@@ -685,6 +686,7 @@ void is_array()
{
static_assert(__is_array(IntAr));
static_assert(__is_array(IntArNB));
+ static_assert(!__is_array(IntArZero));
static_assert(__is_array(UnionAr));
static_assert(!__is_array(void));
@@ -714,6 +716,7 @@ void is_array()
void is_bounded_array(int n) {
static_assert(__is_bounded_array(IntAr));
static_assert(!__is_bounded_array(IntArNB));
+ static_assert(!__is_bounded_array(IntArZero));
static_assert(__is_bounded_array(UnionAr));
static_assert(!__is_bounded_array(void));
@@ -746,6 +749,7 @@ void is_bounded_array(int n) {
void is_unbounded_array(int n) {
static_assert(!__is_unbounded_array(IntAr));
static_assert(__is_unbounded_array(IntArNB));
+ static_assert(!__is_unbounded_array(IntArZero));
static_assert(!__is_unbounded_array(UnionAr));
static_assert(!__is_unbounded_array(void));
diff --git a/clang/test/SemaCXX/unavailable_aligned_allocation.cpp b/clang/test/SemaCXX/unavailable_aligned_allocation.cpp
index be593eafe11d..45fdec606ad1 100644
--- a/clang/test/SemaCXX/unavailable_aligned_allocation.cpp
+++ b/clang/test/SemaCXX/unavailable_aligned_allocation.cpp
@@ -75,7 +75,7 @@ void testOveraligned() {
// expected-error-re@-22 {{aligned deallocation function of type 'void (void *, enum std::align_val_t) noexcept' is {{only|not}} available on}}
// expected-note@-23 {{if you supply your own aligned allocation functions}}
-// expected-error-re@-24 {{aligned deallocation function of type 'void (void *, enum std::align_val_t) noexcept' is {{only|not}} available on}}
+// expected-error-re@-24 {{aligned deallocation function of type 'void (void *, std::size_t, std::align_val_t) noexcept' is {{only|not}} available on}}
// expected-note@-25 {{if you supply your own aligned allocation functions}}
// expected-error-re@-26 {{aligned allocation function of type 'void *(std::size_t, std::align_val_t, const std::nothrow_t &) noexcept' is {{only|not}} available on}}
@@ -143,19 +143,19 @@ OveralignedS2::~OveralignedS2() {}
// expected-no-diagnostics
#else
#if defined(IOS)
-// expected-error@-6 {{aligned deallocation function of type 'void (void *, enum std::align_val_t) noexcept' is only available on iOS 11 or newer}}}
+// expected-error@-6 {{aligned deallocation function of type 'void (void *, std::size_t, std::align_val_t) noexcept' is only available on iOS 11 or newer}}}
// expected-note@-7 {{if you supply your own aligned allocation functions}}
#elif defined(TVOS)
-// expected-error@-9 {{aligned deallocation function of type 'void (void *, enum std::align_val_t) noexcept' is only available on tvOS 11 or newer}}}
+// expected-error@-9 {{aligned deallocation function of type 'void (void *, std::size_t, std::align_val_t) noexcept' is only available on tvOS 11 or newer}}}
// expected-note@-10 {{if you supply your own aligned allocation functions}}
#elif defined(WATCHOS)
-// expected-error@-12 {{aligned deallocation function of type 'void (void *, enum std::align_val_t) noexcept' is only available on watchOS 4 or newer}}}
+// expected-error@-12 {{aligned deallocation function of type 'void (void *, std::size_t, std::align_val_t) noexcept' is only available on watchOS 4 or newer}}}
// expected-note@-13 {{if you supply your own aligned allocation functions}}
#elif defined(MACOS)
-// expected-error@-15 {{aligned deallocation function of type 'void (void *, enum std::align_val_t) noexcept' is only available on macOS 10.13 or newer}}}
+// expected-error@-15 {{aligned deallocation function of type 'void (void *, std::size_t, std::align_val_t) noexcept' is only available on macOS 10.13 or newer}}}
// expected-note@-16 {{if you supply your own aligned allocation functions}}
#elif defined(ZOS)
-// expected-error@-18 {{aligned deallocation function of type 'void (void *, enum std::align_val_t) noexcept' is not available on z/OS}}}
+// expected-error@-18 {{aligned deallocation function of type 'void (void *, std::size_t, std::align_val_t) noexcept' is not available on z/OS}}}
// expected-note@-19 {{if you supply your own aligned allocation functions}}
#endif
#endif
@@ -209,6 +209,9 @@ void *operator new(std::size_t __sz, std::align_val_t) {
void operator delete(void *p, std::align_val_t) {
}
+void operator delete(void *p, std::size_t __sz, std::align_val_t) {
+}
+
void testOveraligned2() {
auto p = new ((std::align_val_t)8) OveralignedS;
delete p;
diff --git a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
index 749d9e135d94..73cc946ca0ce 100644
--- a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
+++ b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
@@ -5838,12 +5838,12 @@ class Foo5 {
class Foo6 {
- Mutex mu1 ACQUIRED_AFTER(mu3); // expected-warning {{Cycle in acquired_before/after dependencies, starting with 'mu1'}}
- Mutex mu2 ACQUIRED_AFTER(mu1); // expected-warning {{Cycle in acquired_before/after dependencies, starting with 'mu2'}}
- Mutex mu3 ACQUIRED_AFTER(mu2); // expected-warning {{Cycle in acquired_before/after dependencies, starting with 'mu3'}}
+ Mutex mu1 ACQUIRED_AFTER(mu3); // expected-warning {{cycle in acquired_before/after dependencies, starting with 'mu1'}}
+ Mutex mu2 ACQUIRED_AFTER(mu1); // expected-warning {{cycle in acquired_before/after dependencies, starting with 'mu2'}}
+ Mutex mu3 ACQUIRED_AFTER(mu2); // expected-warning {{cycle in acquired_before/after dependencies, starting with 'mu3'}}
- Mutex mu_b ACQUIRED_BEFORE(mu_b); // expected-warning {{Cycle in acquired_before/after dependencies, starting with 'mu_b'}}
- Mutex mu_a ACQUIRED_AFTER(mu_a); // expected-warning {{Cycle in acquired_before/after dependencies, starting with 'mu_a'}}
+ Mutex mu_b ACQUIRED_BEFORE(mu_b); // expected-warning {{cycle in acquired_before/after dependencies, starting with 'mu_b'}}
+ Mutex mu_a ACQUIRED_AFTER(mu_a); // expected-warning {{cycle in acquired_before/after dependencies, starting with 'mu_a'}}
void test0() {
mu_a.Lock();
diff --git a/clang/test/SemaCXX/warn-unsafe-buffer-usage-pragma-misuse.cpp b/clang/test/SemaCXX/warn-unsafe-buffer-usage-pragma-misuse.cpp
index 126257e0fc47..106661491800 100644
--- a/clang/test/SemaCXX/warn-unsafe-buffer-usage-pragma-misuse.cpp
+++ b/clang/test/SemaCXX/warn-unsafe-buffer-usage-pragma-misuse.cpp
@@ -18,8 +18,8 @@ void endUnopened(int *x) {
}
void wrongOption() {
-#pragma clang unsafe_buffer_usage start // expected-error{{Expected 'begin' or 'end'}}
-#pragma clang unsafe_buffer_usage close // expected-error{{Expected 'begin' or 'end'}}
+#pragma clang unsafe_buffer_usage start // expected-error{{expected 'begin' or 'end'}}
+#pragma clang unsafe_buffer_usage close // expected-error{{expected 'begin' or 'end'}}
}
void unclosed(int * p1) {
diff --git a/clang/test/SemaHLSL/Availability/attr-availability-compute.hlsl b/clang/test/SemaHLSL/Availability/attr-availability-compute.hlsl
new file mode 100644
index 000000000000..8fa696ea1164
--- /dev/null
+++ b/clang/test/SemaHLSL/Availability/attr-availability-compute.hlsl
@@ -0,0 +1,73 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel5.0-compute -fsyntax-only -verify %s
+
+// Platform shader model, no environment parameter
+__attribute__((availability(shadermodel, introduced = 6.0)))
+unsigned f1(); // #f1
+
+__attribute__((availability(shadermodel, introduced = 5.1)))
+unsigned f2(); // #f2
+
+__attribute__((availability(shadermodel, introduced = 5.0)))
+unsigned f3();
+
+// Platform shader model, environment parameter restricting earlier version,
+// available in all environments in higher versions
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 6.0)))
+unsigned f4(); // #f4
+
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 5.0)))
+unsigned f5();
+
+// Platform shader model, environment parameter restricting earlier version,
+// never available in all environments in higher versions
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 6.0, environment = compute)))
+__attribute__((availability(shadermodel, introduced = 5.0, environment = mesh)))
+unsigned f6(); // #f6
+
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 6.0, environment = mesh)))
+unsigned f7(); // #f7
+
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 5.0, environment = compute)))
+__attribute__((availability(shadermodel, introduced = 6.0, environment = mesh)))
+unsigned f8();
+
+[numthreads(4,1,1)]
+int main() {
+ // expected-warning@#f1_call {{'f1' is only available on Shader Model 6.0 or newer}}
+ // expected-note@#f1 {{'f1' has been marked as being introduced in Shader Model 6.0 here, but the deployment target is Shader Model 5.0}}
+ // expected-note@#f1_call {{enclose 'f1' in a __builtin_available check to silence this warning}}
+ unsigned A = f1(); // #f1_call
+
+ // expected-warning@#f2_call {{'f2' is only available on Shader Model 5.1 or newer}}
+ // expected-note@#f2 {{'f2' has been marked as being introduced in Shader Model 5.1 here, but the deployment target is Shader Model 5.0}}
+ // expected-note@#f2_call {{enclose 'f2' in a __builtin_available check to silence this warning}}
+ unsigned B = f2(); // #f2_call
+
+ unsigned C = f3();
+
+ // expected-warning@#f4_call {{'f4' is only available on Shader Model 6.0 or newer}}
+ // expected-note@#f4 {{'f4' has been marked as being introduced in Shader Model 6.0 here, but the deployment target is Shader Model 5.0}}
+ // expected-note@#f4_call {{enclose 'f4' in a __builtin_available check to silence this warning}}
+ unsigned D = f4(); // #f4_call
+
+ unsigned E = f5();
+
+ // expected-warning@#f6_call {{'f6' is only available in compute shader environment on Shader Model 6.0 or newer}}
+ // expected-note@#f6 {{'f6' has been marked as being introduced in Shader Model 6.0 in compute shader environment here, but the deployment target is Shader Model 5.0}}
+ // expected-note@#f6_call {{enclose 'f6' in a __builtin_available check to silence this warning}}
+ unsigned F = f6(); // #f6_call
+
+ // expected-warning@#f7_call {{'f7' is unavailable}}
+ // expected-note@#f7 {{'f7' has been marked as being introduced in Shader Model 6.0 in mesh shader environment here, but the deployment target is Shader Model 5.0 compute shader environment}}
+ // expected-note@#f7_call {{enclose 'f7' in a __builtin_available check to silence this warning}}
+ unsigned G = f7(); // #f7_call
+
+ unsigned H = f8();
+
+ return 0;
+}
diff --git a/clang/test/SemaHLSL/Availability/attr-availability-errors.hlsl b/clang/test/SemaHLSL/Availability/attr-availability-errors.hlsl
new file mode 100644
index 000000000000..2682eb5fbb5c
--- /dev/null
+++ b/clang/test/SemaHLSL/Availability/attr-availability-errors.hlsl
@@ -0,0 +1,11 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.5-library -fsyntax-only -verify %s
+
+
+void f1(void) __attribute__((availability(shadermodel, introduced = 6.0, environment="pixel"))); // expected-error {{expected an environment name, e.g., 'compute'}}
+
+void f2(void) __attribute__((availability(shadermodel, introduced = 6.0, environment=pixel, environment=compute))); // expected-error {{redundant 'environment' availability change; only the last specified change will be used}}
+
+void f3(void) __attribute__((availability(shadermodel, strict, introduced = 6.0, environment = mesh))); // expected-error {{unexpected parameter 'strict' in availability attribute, not permitted in HLSL}}
+
+int main() {
+}
diff --git a/clang/test/SemaHLSL/Availability/attr-availability-mesh.hlsl b/clang/test/SemaHLSL/Availability/attr-availability-mesh.hlsl
new file mode 100644
index 000000000000..40a7ddbb1de9
--- /dev/null
+++ b/clang/test/SemaHLSL/Availability/attr-availability-mesh.hlsl
@@ -0,0 +1,73 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel5.0-mesh -fsyntax-only -verify %s
+
+// Platform shader model, no environment parameter
+__attribute__((availability(shadermodel, introduced = 6.0)))
+unsigned f1(); // #f1
+
+__attribute__((availability(shadermodel, introduced = 5.1)))
+unsigned f2(); // #f2
+
+__attribute__((availability(shadermodel, introduced = 5.0)))
+unsigned f3();
+
+// Platform shader model, environment parameter restricting earlier version,
+// available in all environments in higher versions
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 6.0)))
+unsigned f4(); // #f4
+
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 5.0)))
+unsigned f5(); // #f5
+
+// Platform shader model, environment parameter restricting earlier version,
+// never available in all environments in higher versions
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 6.0, environment = compute)))
+__attribute__((availability(shadermodel, introduced = 5.0, environment = mesh)))
+unsigned f6(); // #f6
+
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 6.0, environment = mesh)))
+unsigned f7(); // #f7
+
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 5.0, environment = compute)))
+__attribute__((availability(shadermodel, introduced = 6.0, environment = mesh)))
+unsigned f8(); // #f8
+
+[numthreads(4,1,1)]
+int main() {
+ // expected-warning@#f1_call {{'f1' is only available on Shader Model 6.0 or newer}}
+ // expected-note@#f1 {{'f1' has been marked as being introduced in Shader Model 6.0 here, but the deployment target is Shader Model 5.0}}
+ // expected-note@#f1_call {{enclose 'f1' in a __builtin_available check to silence this warning}}
+ unsigned A = f1(); // #f1_call
+
+ // expected-warning@#f2_call {{'f2' is only available on Shader Model 5.1 or newer}}
+ // expected-note@#f2 {{'f2' has been marked as being introduced in Shader Model 5.1 here, but the deployment target is Shader Model 5.0}}
+ // expected-note@#f2_call {{enclose 'f2' in a __builtin_available check to silence this warning}}
+ unsigned B = f2(); // #f2_call
+
+ unsigned C = f3();
+
+ // expected-warning@#f4_call {{'f4' is only available on Shader Model 6.0 or newer}}
+ // expected-note@#f4 {{'f4' has been marked as being introduced in Shader Model 6.0 here, but the deployment target is Shader Model 5.0}}
+ // expected-note@#f4_call {{enclose 'f4' in a __builtin_available check to silence this warning}}
+ unsigned D = f4(); // #f4_call
+
+ unsigned E = f5(); // #f5_call
+
+ unsigned F = f6(); // #f6_call
+
+ // expected-warning@#f7_call {{'f7' is only available in mesh shader environment on Shader Model 6.0 or newer}}
+ // expected-note@#f7 {{'f7' has been marked as being introduced in Shader Model 6.0 in mesh shader environment here, but the deployment target is Shader Model 5.0 mesh shader environment}}
+ // expected-note@#f7_call {{enclose 'f7' in a __builtin_available check to silence this warning}}
+ unsigned G = f7(); // #f7_call
+
+ // expected-warning@#f8_call {{'f8' is only available in mesh shader environment on Shader Model 6.0 or newer}}
+ // expected-note@#f8 {{'f8' has been marked as being introduced in Shader Model 6.0 in mesh shader environment here, but the deployment target is Shader Model 5.0 mesh shader environment}}
+ // expected-note@#f8_call {{enclose 'f8' in a __builtin_available check to silence this warning}}
+ unsigned H = f8(); // #f8_call
+
+ return 0;
+}
diff --git a/clang/test/SemaHLSL/Availability/attr-availability-pixel.hlsl b/clang/test/SemaHLSL/Availability/attr-availability-pixel.hlsl
new file mode 100644
index 000000000000..59d09a9cd276
--- /dev/null
+++ b/clang/test/SemaHLSL/Availability/attr-availability-pixel.hlsl
@@ -0,0 +1,63 @@
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel5.0-pixel -fsyntax-only -verify %s
+
+// Platform shader model, no environment parameter
+__attribute__((availability(shadermodel, introduced = 6.0)))
+unsigned f1(); // #f1
+
+__attribute__((availability(shadermodel, introduced = 5.1)))
+unsigned f2(); // #f2
+
+__attribute__((availability(shadermodel, introduced = 5.0)))
+unsigned f3();
+
+// Platform shader model, environment parameter restricting earlier version,
+// available in all environments in higher versions
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 6.0)))
+unsigned f4(); // #f4
+
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 5.0)))
+unsigned f5();
+
+// Platform shader model, environment parameter restricting earlier version,
+// never available in all environments in higher versions
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 6.0, environment = compute)))
+__attribute__((availability(shadermodel, introduced = 5.0, environment = mesh)))
+unsigned f6(); // #f6
+
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 6.0, environment = mesh)))
+unsigned f7(); // #f7
+
+__attribute__((availability(shadermodel, introduced = 2.0, environment = pixel)))
+__attribute__((availability(shadermodel, introduced = 5.0, environment = compute)))
+__attribute__((availability(shadermodel, introduced = 6.0, environment = mesh)))
+unsigned f8();
+
+int main() {
+ // expected-warning@#f1_call {{'f1' is only available on Shader Model 6.0 or newer}}
+ // expected-note@#f1 {{'f1' has been marked as being introduced in Shader Model 6.0 here, but the deployment target is Shader Model 5.0}}
+ // expected-note@#f1_call {{enclose 'f1' in a __builtin_available check to silence this warning}}
+ unsigned A = f1(); // #f1_call
+
+ // expected-warning@#f2_call {{'f2' is only available on Shader Model 5.1 or newer}}
+ // expected-note@#f2 {{'f2' has been marked as being introduced in Shader Model 5.1 here, but the deployment target is Shader Model 5.0}}
+ // expected-note@#f2_call {{enclose 'f2' in a __builtin_available check to silence this warning}}
+ unsigned B = f2(); // #f2_call
+
+ unsigned C = f3();
+
+ unsigned D = f4(); // #f4_call
+
+ unsigned E = f5();
+
+ unsigned F = f6(); // #f6_call
+
+ unsigned G = f7(); // #f7_call
+
+ unsigned H = f8();
+
+ return 0;
+}
diff --git a/clang/test/SemaHLSL/AvailabilityMarkup.hlsl b/clang/test/SemaHLSL/AvailabilityMarkup.hlsl
deleted file mode 100644
index b883957af087..000000000000
--- a/clang/test/SemaHLSL/AvailabilityMarkup.hlsl
+++ /dev/null
@@ -1,25 +0,0 @@
-// RUN: %clang_cc1 -triple dxil-pc-shadermodel5.0-library -verify %s
-
-__attribute__((availability(shadermodel, introduced = 6.0)))
-unsigned fn6_0(); // #fn6_0
-
-__attribute__((availability(shadermodel, introduced = 5.1)))
-unsigned fn5_1(); // #fn5_1
-
-__attribute__((availability(shadermodel, introduced = 5.0)))
-unsigned fn5_0();
-
-void fn() {
- // expected-warning@#fn6_0_site {{'fn6_0' is only available on HLSL ShaderModel 6.0 or newer}}
- // expected-note@#fn6_0 {{'fn6_0' has been marked as being introduced in HLSL ShaderModel 6.0 here, but the deployment target is HLSL ShaderModel 5.0}}
- // expected-note@#fn6_0_site {{enclose 'fn6_0' in a __builtin_available check to silence this warning}}
- unsigned A = fn6_0(); // #fn6_0_site
-
- // expected-warning@#fn5_1_site {{'fn5_1' is only available on HLSL ShaderModel 5.1 or newer}}
- // expected-note@#fn5_1 {{'fn5_1' has been marked as being introduced in HLSL ShaderModel 5.1 here, but the deployment target is HLSL ShaderModel 5.0}}
- // expected-note@#fn5_1_site {{enclose 'fn5_1' in a __builtin_available check to silence this warning}}
- unsigned B = fn5_1(); // #fn5_1_site
-
- unsigned C = fn5_0();
-}
-
diff --git a/clang/test/SemaHLSL/WaveBuiltinAvailability.hlsl b/clang/test/SemaHLSL/WaveBuiltinAvailability.hlsl
index 0e45edc6a4c8..185b79be37be 100644
--- a/clang/test/SemaHLSL/WaveBuiltinAvailability.hlsl
+++ b/clang/test/SemaHLSL/WaveBuiltinAvailability.hlsl
@@ -2,8 +2,8 @@
// WaveActiveCountBits is unavailable before ShaderModel 6.0.
unsigned foo(bool b) {
- // expected-warning@#site {{'WaveActiveCountBits' is only available on HLSL ShaderModel 6.0 or newer}}
- // expected-note@hlsl/hlsl_intrinsics.h:* {{'WaveActiveCountBits' has been marked as being introduced in HLSL ShaderModel 6.0 here, but the deployment target is HLSL ShaderModel 5.0}}
+ // expected-warning@#site {{'WaveActiveCountBits' is only available on Shader Model 6.0 or newer}}
+ // expected-note@hlsl/hlsl_intrinsics.h:* {{'WaveActiveCountBits' has been marked as being introduced in Shader Model 6.0 here, but the deployment target is Shader Model 5.0}}
// expected-note@#site {{enclose 'WaveActiveCountBits' in a __builtin_available check to silence this warning}}
return hlsl::WaveActiveCountBits(b); // #site
}
diff --git a/clang/test/SemaObjC/unguarded-availability.m b/clang/test/SemaObjC/unguarded-availability.m
index d0e23eabcb59..ecd91990174a 100644
--- a/clang/test/SemaObjC/unguarded-availability.m
+++ b/clang/test/SemaObjC/unguarded-availability.m
@@ -177,16 +177,28 @@ void justAtAvailable(void) {
#ifdef OBJCPP
-int f(char) AVAILABLE_10_12;
+int f(char) AVAILABLE_10_12; // #f_char_def
int f(int);
template <class T> int use_f() {
- // FIXME: We should warn here!
- return f(T());
+ if (@available(macos 10.12, *)) {
+ return f(T()); // no warning expected
+ } else {
+ // expected-warning@#f_call {{'f' is only available on macOS 10.12 or newer}}
+ // expected-note@#f_char_inst {{in instantiation of function template specialization 'use_f<char>' requested here}}
+ // expected-note@#f_char_def {{'f' has been marked as being introduced in macOS 10.12 here, but the deployment target is macOS 10.9}}
+ // expected-note@#f_call {{enclose 'f' in an @available check to silence this warning}}
+ return f(T()); // #f_call
+ }
}
int a = use_f<int>();
-int b = use_f<char>();
+int b = use_f<char>(); // #f_char_inst
+
+int use_f2() AVAILABLE_10_12 {
+ int c = use_f<int>();
+ int d = use_f<char>(); // no warning expected
+}
template <class> int use_at_available() {
if (@available(macos 10.12, *))
diff --git a/clang/test/SemaOpenACC/compute-construct-attach-clause.c b/clang/test/SemaOpenACC/compute-construct-attach-clause.c
index de735308528a..deca99f5bae4 100644
--- a/clang/test/SemaOpenACC/compute-construct-attach-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-attach-clause.c
@@ -16,7 +16,7 @@ void uses() {
#pragma acc parallel attach(LocalInt)
while (1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel attach(&LocalInt)
while (1);
diff --git a/clang/test/SemaOpenACC/compute-construct-clause-ast.cpp b/clang/test/SemaOpenACC/compute-construct-clause-ast.cpp
index 6d2efcf81eb6..69f65f4083ae 100644
--- a/clang/test/SemaOpenACC/compute-construct-clause-ast.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-clause-ast.cpp
@@ -40,6 +40,89 @@ void NormalFunc(int i, float f) {
// CHECK-NEXT: WhileStmt
// CHECK-NEXT: CXXBoolLiteralExpr
// CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(+: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: +
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'int' lvalue ParmVar{{.*}} 'i' 'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(*: f)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: *
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'float' lvalue ParmVar{{.*}} 'f' 'float'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(max: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: max
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'int' lvalue ParmVar{{.*}} 'i' 'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(min: f)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: min
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'float' lvalue ParmVar{{.*}} 'f' 'float'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(&: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: &
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'int' lvalue ParmVar{{.*}} 'i' 'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(|: f)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: |
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'float' lvalue ParmVar{{.*}} 'f' 'float'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+
+#pragma acc parallel reduction(^: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ^
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'int' lvalue ParmVar{{.*}} 'i' 'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(&&: f)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: &&
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'float' lvalue ParmVar{{.*}} 'f' 'float'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+
+#pragma acc parallel reduction(||: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ||
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'int' lvalue ParmVar{{.*}} 'i' 'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
}
template<typename T>
@@ -154,6 +237,98 @@ void TemplFunc() {
// CHECK-NEXT: CXXBoolLiteralExpr
// CHECK-NEXT: NullStmt
+ T t;
+ // CHECK-NEXT: DeclStmt
+ // CHECK-NEXT: VarDecl{{.*}} t 'T'
+
+#pragma acc parallel reduction(+: t)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: +
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'T' lvalue Var{{.*}} 't' 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(*: T::SomeFloat)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: *
+ // CHECK-NEXT: DependentScopeDeclRefExpr{{.*}} '<dependent type>' lvalue
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ typename T::IntTy i;
+ // CHECK-NEXT: DeclStmt
+ // CHECK-NEXT: VarDecl{{.*}} i 'typename T::IntTy'
+
+#pragma acc parallel reduction(max: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: max
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename T::IntTy' lvalue Var{{.*}} 'i' 'typename T::IntTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(min: t)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: min
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'T' lvalue Var{{.*}} 't' 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(&: T::SomeFloat)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: &
+ // CHECK-NEXT: DependentScopeDeclRefExpr{{.*}} '<dependent type>' lvalue
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(|: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: |
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename T::IntTy' lvalue Var{{.*}} 'i' 'typename T::IntTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(^: t)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ^
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'T' lvalue Var{{.*}} 't' 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(&&: T::SomeFloat)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: &&
+ // CHECK-NEXT: DependentScopeDeclRefExpr{{.*}} '<dependent type>' lvalue
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(||: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ||
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename T::IntTy' lvalue Var{{.*}} 'i' 'typename T::IntTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
// Match the instantiation:
// CHECK: FunctionDecl{{.*}}TemplFunc{{.*}}implicit_instantiation
// CHECK-NEXT: TemplateArgument type 'InstTy'
@@ -262,6 +437,79 @@ void TemplFunc() {
// CHECK-NEXT: CXXBoolLiteralExpr
// CHECK-NEXT: NullStmt
+ // CHECK-NEXT: DeclStmt
+ // CHECK-NEXT: VarDecl{{.*}} t 'InstTy'
+ // CHECK-NEXT: CXXConstructExpr
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: +
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'InstTy' lvalue Var{{.*}} 't' 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: *
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'const float' lvalue Var{{.*}} 'SomeFloat' 'const float'
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: DeclStmt
+ // CHECK-NEXT: VarDecl{{.*}} i 'typename InstTy::IntTy':'int'
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: max
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename InstTy::IntTy':'int' lvalue Var{{.*}} 'i' 'typename InstTy::IntTy':'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: min
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'InstTy' lvalue Var{{.*}} 't' 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: &
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'const float' lvalue Var{{.*}} 'SomeFloat' 'const float'
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: |
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename InstTy::IntTy':'int' lvalue Var{{.*}} 'i' 'typename InstTy::IntTy':'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ^
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'InstTy' lvalue Var{{.*}} 't' 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: &&
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'const float' lvalue Var{{.*}} 'SomeFloat' 'const float'
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ||
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename InstTy::IntTy':'int' lvalue Var{{.*}} 'i' 'typename InstTy::IntTy':'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
}
struct BoolConversion{ operator bool() const;};
diff --git a/clang/test/SemaOpenACC/compute-construct-copy-clause.c b/clang/test/SemaOpenACC/compute-construct-copy-clause.c
index accbe43cea40..2b43480be8b4 100644
--- a/clang/test/SemaOpenACC/compute-construct-copy-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-copy-clause.c
@@ -36,11 +36,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel copy(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(+IntParam)
while(1);
@@ -53,10 +53,10 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy((float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-copy-clause.cpp b/clang/test/SemaOpenACC/compute-construct-copy-clause.cpp
index 16e78a43026a..2797927e6e56 100644
--- a/clang/test/SemaOpenACC/compute-construct-copy-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-copy-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel copy(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel copy(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel copy(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-copyin-clause.c b/clang/test/SemaOpenACC/compute-construct-copyin-clause.c
index 6f200b357f52..5ea4db9e5fae 100644
--- a/clang/test/SemaOpenACC/compute-construct-copyin-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-copyin-clause.c
@@ -38,11 +38,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel copyin(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(+IntParam)
while(1);
@@ -55,14 +55,14 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin((float)ArrayParam[2])
while(1);
// expected-error@+2{{invalid tag 'invalid' on 'copyin' clause}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(invalid:(float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-copyin-clause.cpp b/clang/test/SemaOpenACC/compute-construct-copyin-clause.cpp
index 79275e701161..74ce74a1368d 100644
--- a/clang/test/SemaOpenACC/compute-construct-copyin-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-copyin-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel copyin(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel copyin(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel copyin(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-copyout-clause.c b/clang/test/SemaOpenACC/compute-construct-copyout-clause.c
index 38a50f8373e8..a035ab3242e3 100644
--- a/clang/test/SemaOpenACC/compute-construct-copyout-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-copyout-clause.c
@@ -38,11 +38,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel copyout(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(+IntParam)
while(1);
@@ -55,14 +55,14 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout((float)ArrayParam[2])
while(1);
// expected-error@+2{{invalid tag 'invalid' on 'copyout' clause}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(invalid:(float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-copyout-clause.cpp b/clang/test/SemaOpenACC/compute-construct-copyout-clause.cpp
index 3d05a5670092..c01dc1a39963 100644
--- a/clang/test/SemaOpenACC/compute-construct-copyout-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-copyout-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel copyout(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel copyout(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel copyout(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-create-clause.c b/clang/test/SemaOpenACC/compute-construct-create-clause.c
index 9c94e3a1a407..5cfa9b0c5cc3 100644
--- a/clang/test/SemaOpenACC/compute-construct-create-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-create-clause.c
@@ -39,11 +39,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel create(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(+IntParam)
while(1);
@@ -56,14 +56,14 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create((float)ArrayParam[2])
while(1);
// expected-error@+2{{invalid tag 'invalid' on 'create' clause}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(invalid:(float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-create-clause.cpp b/clang/test/SemaOpenACC/compute-construct-create-clause.cpp
index d0323620b8f7..3ed1e1e9f700 100644
--- a/clang/test/SemaOpenACC/compute-construct-create-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-create-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel create(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel create(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel create(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-device_type-clause.c b/clang/test/SemaOpenACC/compute-construct-device_type-clause.c
index 15c9cf396c80..bf2a00a0f736 100644
--- a/clang/test/SemaOpenACC/compute-construct-device_type-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-device_type-clause.c
@@ -182,7 +182,7 @@ void uses() {
while(1);
// expected-error@+2{{OpenACC clause 'reduction' may not follow a 'device_type' clause in a compute construct}}
// expected-note@+1{{previous clause is here}}
-#pragma acc kernels device_type(*) reduction(+:Var)
+#pragma acc serial device_type(*) reduction(+:Var)
while(1);
// expected-error@+2{{OpenACC clause 'collapse' may not follow a 'device_type' clause in a compute construct}}
// expected-note@+1{{previous clause is here}}
diff --git a/clang/test/SemaOpenACC/compute-construct-deviceptr-clause.c b/clang/test/SemaOpenACC/compute-construct-deviceptr-clause.c
index e5d328eb0b28..ae8269b9779a 100644
--- a/clang/test/SemaOpenACC/compute-construct-deviceptr-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-deviceptr-clause.c
@@ -16,7 +16,7 @@ void uses() {
#pragma acc parallel deviceptr(LocalInt)
while (1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel deviceptr(&LocalInt)
while (1);
diff --git a/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.c b/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.c
index 4e057bf32c2d..eacda7bbbbba 100644
--- a/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.c
@@ -29,11 +29,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel firstprivate(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate(+IntParam)
while(1);
@@ -46,10 +46,10 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate((float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.cpp b/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.cpp
index 2fbb80f7b2fb..161e4012c08d 100644
--- a/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.cpp
@@ -32,11 +32,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel firstprivate(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate(+IntParam)
while(1);
@@ -49,27 +49,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel private(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(t, I)
while(true);
@@ -94,7 +94,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel private(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-no_create-clause.c b/clang/test/SemaOpenACC/compute-construct-no_create-clause.c
index 07a60b73c34f..4ff06eaf132b 100644
--- a/clang/test/SemaOpenACC/compute-construct-no_create-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-no_create-clause.c
@@ -28,11 +28,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel no_create(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(+IntParam)
while(1);
@@ -45,10 +45,10 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create((float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-no_create-clause.cpp b/clang/test/SemaOpenACC/compute-construct-no_create-clause.cpp
index 3820d5e3999d..fa84b1fbeda0 100644
--- a/clang/test/SemaOpenACC/compute-construct-no_create-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-no_create-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel no_create(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel no_create(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel no_create(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-present-clause.c b/clang/test/SemaOpenACC/compute-construct-present-clause.c
index 99c4b1dcd19b..1d50a6b1275b 100644
--- a/clang/test/SemaOpenACC/compute-construct-present-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-present-clause.c
@@ -28,11 +28,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel present(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(+IntParam)
while(1);
@@ -45,10 +45,10 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present((float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-present-clause.cpp b/clang/test/SemaOpenACC/compute-construct-present-clause.cpp
index 62e481dea3e2..db230d0b1d9d 100644
--- a/clang/test/SemaOpenACC/compute-construct-present-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-present-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel present(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel present(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel present(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-private-clause.c b/clang/test/SemaOpenACC/compute-construct-private-clause.c
index d2615c384cdb..3e6dbaafbc6f 100644
--- a/clang/test/SemaOpenACC/compute-construct-private-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-private-clause.c
@@ -89,13 +89,13 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
// Invalid cases, arbitrary expressions.
struct Incomplete *I;
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(*I)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(GlobalInt + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(+GlobalInt)
while(1);
@@ -128,10 +128,10 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private((float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-private-clause.cpp b/clang/test/SemaOpenACC/compute-construct-private-clause.cpp
index a776b16f0feb..fb9e89a21acc 100644
--- a/clang/test/SemaOpenACC/compute-construct-private-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-private-clause.cpp
@@ -64,34 +64,34 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
// Invalid cases, arbitrary expressions.
Incomplete *I;
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(*I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(GlobalInt + IntParam)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(+GlobalInt)
while(true);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(+t)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(+I)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel private(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(t, I)
while(true);
@@ -120,7 +120,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel private(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-reduction-clause.c b/clang/test/SemaOpenACC/compute-construct-reduction-clause.c
new file mode 100644
index 000000000000..9c0debd34503
--- /dev/null
+++ b/clang/test/SemaOpenACC/compute-construct-reduction-clause.c
@@ -0,0 +1,107 @@
+// RUN: %clang_cc1 %s -fopenacc -verify
+
+struct CompositeOfScalars {
+ int I;
+ float F;
+ short J;
+ char C;
+ double D;
+ _Complex float CF;
+ _Complex double CD;
+};
+
+struct CompositeHasComposite {
+ int I;
+ float F;
+ short J;
+ char C;
+ double D;
+ _Complex float CF;
+ _Complex double CD;
+ struct CompositeOfScalars COS; // #COS_FIELD
+};
+
+void uses(unsigned Parm) {
+ float Var;
+ int IVar;
+
+#pragma acc parallel reduction(+:Parm)
+ while (1);
+#pragma acc serial reduction(+:Parm)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' clause is not valid on 'kernels' directive}}
+#pragma acc kernels reduction(+:Parm)
+ while (1);
+
+ // On a 'parallel', 'num_gangs' cannot have >1 args. num_gangs not valid on
+ // 'serial', but 'reduction' not valid on 'kernels', other combos cannot be
+ // tested.
+#pragma acc parallel reduction(+:Parm) num_gangs(IVar)
+ while (1);
+#pragma acc parallel num_gangs(IVar) reduction(+:IVar)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel reduction(+:Parm) num_gangs(Parm, IVar)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel num_gangs(Parm, IVar) reduction(+:Var)
+ while (1);
+
+ struct CompositeOfScalars CoS;
+ struct CompositeOfScalars *CoSPtr;
+ struct CompositeHasComposite ChC;
+ struct CompositeHasComposite *ChCPtr;
+
+ int I;
+ float F;
+ int Array[5];
+
+ // Vars in a reduction must be a scalar or a composite of scalars.
+#pragma acc parallel reduction(&: CoS, I, F)
+ while (1);
+ // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
+ // expected-note@#COS_FIELD{{invalid field is here}}
+#pragma acc parallel reduction(&: ChC)
+ while (1);
+
+ // expected-error@+1{{OpenACC 'reduction' variable must be of scalar type, sub-array, or a composite of scalar types; type is 'int[5]'}}
+#pragma acc parallel reduction(&: Array)
+ while (1);
+
+#pragma acc parallel reduction(&: CoS, Array[I], Array[0:I])
+ while (1);
+
+ struct CompositeHasComposite ChCArray[5];
+ // expected-error@+1{{OpenACC 'reduction' variable must be of scalar type, sub-array, or a composite of scalar types; sub-array base type is 'struct CompositeHasComposite'}}
+#pragma acc parallel reduction(&: CoS, Array[I], ChCArray[0:I])
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoS.I)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoSPtr->I)
+
+ while (1);
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChC.COS)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChCPtr->COS)
+ while (1);
+
+#pragma acc parallel reduction(&: I) reduction(&:I)
+ while (1);
+
+ struct HasArray { int array[5]; } HA;
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&:HA.array[1:2])
+ while (1);
+}
diff --git a/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp b/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp
new file mode 100644
index 000000000000..532dbb238716
--- /dev/null
+++ b/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp
@@ -0,0 +1,175 @@
+// RUN: %clang_cc1 %s -fopenacc -verify
+
+struct CompositeOfScalars {
+ int I;
+ float F;
+ short J;
+ char C;
+ double D;
+ _Complex float CF;
+ _Complex double CD;
+};
+
+struct CompositeHasComposite {
+ int I;
+ float F;
+ short J;
+ char C;
+ double D;
+ _Complex float CF;
+ _Complex double CD;
+ struct CompositeOfScalars COS; // #COS_FIELD
+};
+
+void uses(unsigned Parm) {
+ float Var;
+ int IVar;
+
+#pragma acc parallel reduction(+:Parm)
+ while (1);
+#pragma acc serial reduction(+:Parm)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' clause is not valid on 'kernels' directive}}
+#pragma acc kernels reduction(+:Parm)
+ while (1);
+
+ // On a 'parallel', 'num_gangs' cannot have >1 args. num_gangs not valid on
+ // 'serial', but 'reduction' not valid on 'kernels', other combos cannot be
+ // tested.
+#pragma acc parallel reduction(+:Parm) num_gangs(IVar)
+ while (1);
+#pragma acc parallel num_gangs(IVar) reduction(+:Var)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel reduction(+:Parm) num_gangs(Parm, IVar)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel num_gangs(Parm, IVar) reduction(+:Var)
+ while (1);
+
+#pragma acc parallel reduction(+:Parm) reduction(+:Parm)
+ while (1);
+
+ struct CompositeOfScalars CoS;
+ struct CompositeOfScalars *CoSPtr;
+ struct CompositeHasComposite ChC;
+ struct CompositeHasComposite *ChCPtr;
+
+ int I;
+ float F;
+ int Array[5];
+
+ // Vars in a reduction must be a scalar or a composite of scalars.
+#pragma acc parallel reduction(&: CoS, I, F)
+ while (1);
+ // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
+ // expected-note@#COS_FIELD{{invalid field is here}}
+#pragma acc parallel reduction(&: ChC)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' variable must be of scalar type, sub-array, or a composite of scalar types; type is 'int[5]'}}
+#pragma acc parallel reduction(&: Array)
+ while (1);
+
+#pragma acc parallel reduction(&: CoS, Array[I], Array[0:I])
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoS.I)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoSPtr->I)
+
+ while (1);
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChC.COS)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChCPtr->COS)
+ while (1);
+}
+
+template<typename T, typename U, typename V>
+void TemplUses(T Parm, U CoS, V ChC) {
+ T Var;
+ U *CoSPtr;
+ V *ChCPtr;
+
+#pragma acc parallel reduction(+:Parm)
+ while (1);
+#pragma acc serial reduction(+:Parm)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' clause is not valid on 'kernels' directive}}
+#pragma acc kernels reduction(+:Parm)
+ while (1);
+
+ // On a 'parallel', 'num_gangs' cannot have >1 args. num_gangs not valid on
+ // 'serial', but 'reduction' not valid on 'kernels', other combos cannot be
+ // tested.
+#pragma acc parallel reduction(+:Parm) num_gangs(Var)
+ while (1);
+#pragma acc parallel num_gangs(Var) reduction(+:Var)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel reduction(+:Parm) num_gangs(Parm, Var)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel num_gangs(Parm, Var) reduction(+:Var)
+ while (1);
+
+#pragma acc parallel reduction(+:Parm) reduction(+:Parm)
+ while (1);
+
+ int NonDep;
+ int NonDepArray[5];
+ T Array[5];
+
+ // Vars in a reduction must be a scalar or a composite of scalars.
+#pragma acc parallel reduction(&: CoS, Var, Parm)
+ while (1);
+ // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
+ // expected-note@#COS_FIELD{{invalid field is here}}
+#pragma acc parallel reduction(&: ChC)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' variable must be of scalar type, sub-array, or a composite of scalar types; type is 'int[5]'}}
+#pragma acc parallel reduction(&: Array)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' variable must be of scalar type, sub-array, or a composite of scalar types; type is 'int[5]'}}
+#pragma acc parallel reduction(&: NonDepArray)
+ while (1);
+
+#pragma acc parallel reduction(&: CoS, Array[Var], Array[0:Var])
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoS.I)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoSPtr->I)
+
+ while (1);
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChC.COS)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChCPtr->COS)
+ while (1);
+}
+
+void inst() {
+ CompositeOfScalars CoS;
+ CompositeHasComposite ChC;
+ // expected-note@+1{{in instantiation of function template specialization}}
+ TemplUses(5, CoS, ChC);
+}
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error.cl
index b044763edcf0..7a550f026bc1 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error.cl
@@ -155,8 +155,8 @@ void test_ds_fmaxf(local float *out, float src, int a) {
void test_fence() {
__builtin_amdgcn_fence(__ATOMIC_SEQ_CST + 1, "workgroup"); // expected-warning {{memory order argument to atomic operation is invalid}}
__builtin_amdgcn_fence(__ATOMIC_ACQUIRE - 1, "workgroup"); // expected-warning {{memory order argument to atomic operation is invalid}}
- __builtin_amdgcn_fence(4); // expected-error {{too few arguments to function call, expected 2}}
- __builtin_amdgcn_fence(4, 4, 4); // expected-error {{too many arguments to function call, expected 2}}
+ __builtin_amdgcn_fence(4); // expected-error {{too few arguments to function call, expected at least 2, have 1}}
+ __builtin_amdgcn_fence(4, 4, 4); // expected-error {{incompatible integer to pointer conversion passing 'int' to parameter of type 'const char *'}}
__builtin_amdgcn_fence(3.14, ""); // expected-warning {{implicit conversion from 'double' to 'unsigned int' changes value from 3.14 to 3}}
__builtin_amdgcn_fence(__ATOMIC_ACQUIRE, 5); // expected-error {{incompatible integer to pointer conversion passing 'int' to parameter of type 'const char *'}}
const char ptr[] = "workgroup";
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-gfx940-err.cl b/clang/test/SemaOpenCL/builtins-amdgcn-gfx940-err.cl
new file mode 100644
index 000000000000..487cc53e8ad8
--- /dev/null
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-gfx940-err.cl
@@ -0,0 +1,14 @@
+// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown -target-cpu gfx940 -S -verify -o - %s
+// REQUIRES: amdgpu-registered-target
+
+typedef unsigned int u32;
+
+void test_global_load_lds_unsupported_size(global u32* src, local u32 *dst, u32 size) {
+ __builtin_amdgcn_global_load_lds(src, dst, size, /*offset=*/0, /*aux=*/0); // expected-error{{expression is not an integer constant expression}}
+ __builtin_amdgcn_global_load_lds(src, dst, /*size=*/5, /*offset=*/0, /*aux=*/0); // expected-error{{invalid size value}} expected-note {{size must be 1, 2, or 4}}
+ __builtin_amdgcn_global_load_lds(src, dst, /*size=*/0, /*offset=*/0, /*aux=*/0); // expected-error{{invalid size value}} expected-note {{size must be 1, 2, or 4}}
+ __builtin_amdgcn_global_load_lds(src, dst, /*size=*/3, /*offset=*/0, /*aux=*/0); // expected-error{{invalid size value}} expected-note {{size must be 1, 2, or 4}}
+ __builtin_amdgcn_global_load_lds(src, dst, /*size=*/12, /*offset=*/0, /*aux=*/0); // expected-error{{invalid size value}} expected-note {{size must be 1, 2, or 4}}
+ __builtin_amdgcn_global_load_lds(src, dst, /*size=*/16, /*offset=*/0, /*aux=*/0); // expected-error{{invalid size value}} expected-note {{size must be 1, 2, or 4}}
+ __builtin_amdgcn_global_load_lds(src, dst, /*size=*/-1, /*offset=*/0, /*aux=*/0); // expected-error{{invalid size value}} expected-note {{size must be 1, 2, or 4}}
+}
diff --git a/clang/test/SemaOpenCL/vector_swizzle_length.cl b/clang/test/SemaOpenCL/vector_swizzle_length.cl
index f36ae201205e..b06cc126c3ec 100644
--- a/clang/test/SemaOpenCL/vector_swizzle_length.cl
+++ b/clang/test/SemaOpenCL/vector_swizzle_length.cl
@@ -5,6 +5,6 @@ typedef float float8 __attribute__((ext_vector_type(8)));
void foo(void) {
float8 f2 = (float8)(0, 0, 0, 0, 0, 0, 0, 0);
- f2.s01234; // expected-error {{vector component access has invalid length 5. Supported: 1,2,3,4,8,16}}
- f2.xyzxy; // expected-error {{vector component access has invalid length 5. Supported: 1,2,3,4,8,16}}
+ f2.s01234; // expected-error {{vector component access has invalid length 5; supported lengths are: 1,2,3,4,8,16}}
+ f2.xyzxy; // expected-error {{vector component access has invalid length 5; supported lengths are: 1,2,3,4,8,16}}
}
diff --git a/clang/test/SemaTemplate/cwg2398.cpp b/clang/test/SemaTemplate/cwg2398.cpp
index 31686c4bc980..e3b5e575374d 100644
--- a/clang/test/SemaTemplate/cwg2398.cpp
+++ b/clang/test/SemaTemplate/cwg2398.cpp
@@ -59,6 +59,21 @@ namespace templ {
template struct C<B<int>>;
} // namespace templ
+namespace class_template {
+ template <class T1, class T2 = float> struct A;
+
+ template <class T3> struct B;
+
+ template <template <class T4> class TT1, class T5> struct B<TT1<T5>>;
+ // new-note@-1 {{partial specialization matches}}
+
+ template <class T6, class T7> struct B<A<T6, T7>> {};
+ // new-note@-1 {{partial specialization matches}}
+
+ template struct B<A<int>>;
+ // new-error@-1 {{ambiguous partial specialization}}
+} // namespace class_template
+
namespace type_pack1 {
template<class T2> struct A;
template<template<class ...T3s> class TT1, class T4> struct A<TT1<T4>> ;
diff --git a/clang/test/SemaTemplate/deduction-guide.cpp b/clang/test/SemaTemplate/deduction-guide.cpp
index a91ab5ec7bcc..91c35d98fbf5 100644
--- a/clang/test/SemaTemplate/deduction-guide.cpp
+++ b/clang/test/SemaTemplate/deduction-guide.cpp
@@ -100,11 +100,11 @@ using CT = C<int>;
// CHECK: | `-NonTypeTemplateParmDecl {{.*}} 'X' depth 1 index 1
// CHECK: |-TemplateTypeParmDecl {{.*}} typename depth 0 index 2 U
// CHECK: |-NonTypeTemplateParmDecl {{.*}} 'type-parameter-0-2' depth 0 index 3 V
-// CHECK: | `-TemplateArgument expr
+// CHECK: | `-TemplateArgument {{.*}} expr
// CHECK: | `-IntegerLiteral {{.*}} 'int' 0
-// CHECK: |-CXXDeductionGuideDecl {{.*}} 'auto (A, Y<>, type-parameter-0-2) -> C<A>'
+// CHECK: |-CXXDeductionGuideDecl {{.*}} 'auto (A, Y<template-parameter-0-1>, type-parameter-0-2) -> C<A>'
// CHECK: | |-ParmVarDecl {{.*}} 'A'
-// CHECK: | |-ParmVarDecl {{.*}} 'Y<>'
+// CHECK: | |-ParmVarDecl {{.*}} 'Y<template-parameter-0-1>'
// CHECK: | `-ParmVarDecl {{.*}} 'type-parameter-0-2'
// CHECK: `-CXXDeductionGuideDecl {{.*}} 'auto (int, Y<B>, int) -> C<int>'
// CHECK: |-TemplateArgument type 'int'
@@ -114,12 +114,12 @@ using CT = C<int>;
// CHECK: |-ParmVarDecl {{.*}} 'int'
// CHECK: |-ParmVarDecl {{.*}} 'Y<B>'
// CHECK: `-ParmVarDecl {{.*}} 'int'
-// CHECK: FunctionProtoType {{.*}} 'auto (A, Y<>, type-parameter-0-2) -> C<A>' dependent trailing_return cdecl
+// CHECK: FunctionProtoType {{.*}} 'auto (A, Y<template-parameter-0-1>, type-parameter-0-2) -> C<A>' dependent trailing_return cdecl
// CHECK: |-InjectedClassNameType {{.*}} 'C<A>' dependent
// CHECK: |-TemplateTypeParmType {{.*}} 'A' dependent depth 0 index 0
// CHECK: | `-TemplateTypeParm {{.*}} 'A'
-// CHECK: |-ElaboratedType {{.*}} 'Y<>' sugar dependent
-// CHECK: | `-TemplateSpecializationType {{.*}} 'Y<>' dependent Y
+// CHECK: |-ElaboratedType {{.*}} 'Y<template-parameter-0-1>' sugar dependent
+// CHECK: | `-TemplateSpecializationType {{.*}} 'Y<template-parameter-0-1>' dependent Y
// CHECK: | `-TemplateArgument template
// CHECK: `-TemplateTypeParmType {{.*}} 'type-parameter-0-2' dependent depth 0 index 2
@@ -139,7 +139,7 @@ using DT = D<int, int>;
// CHECK: |-TemplateTypeParmDecl {{.*}} typename depth 0 index 0 ... T
// CHECK: |-TemplateTypeParmDecl {{.*}} typename depth 0 index 1 U1
// CHECK: |-TemplateTypeParmDecl {{.*}} typename depth 0 index 2 U2
-// CHECK: `-CXXDeductionGuideDecl {{.*}} 'auto (B<type-parameter-0-1, type-parameter-0-2> *) -> D<T...>'
+// CHECK: `-CXXDeductionGuideDecl {{.*}} 'auto (B<type-parameter-0-1, type-parameter-0-2> *) -> D<T...>'
// CHECK: `-ParmVarDecl {{.*}} 'B<type-parameter-0-1, type-parameter-0-2> *'
// CHECK: FunctionProtoType {{.*}} 'auto (B<type-parameter-0-1, type-parameter-0-2> *) -> D<T...>' dependent trailing_return
// CHECK: |-InjectedClassNameType {{.*}} 'D<T...>' dependent
@@ -222,7 +222,7 @@ F s(0);
// CHECK-LABEL: Dumping <deduction guide for F>:
// CHECK: FunctionTemplateDecl
// CHECK: |-NonTypeTemplateParmDecl {{.*}} 'char' depth 0 index 0
-// CHECK: `-TemplateArgument expr
+// CHECK: `-TemplateArgument {{.*}} expr
// CHECK: | |-inherited from NonTypeTemplateParm {{.*}} '' 'char'
// CHECK: | `-CharacterLiteral {{.*}} 'char' 120
// CHECK: |-TemplateTypeParmDecl {{.*}} typename depth 0 index 1 U
@@ -299,3 +299,34 @@ using AFoo = Foo<G<U>>;
// CHECK-NEXT: `-ParmVarDecl {{.*}} 'G<int>'
AFoo aa(G<int>{});
+
+namespace TTP {
+ template<typename> struct A {};
+
+ template<class T> struct B {
+ template<template <class> typename TT> B(TT<T>);
+ };
+
+ B b(A<int>{});
+} // namespace TTP
+
+// CHECK-LABEL: Dumping TTP::<deduction guide for B>:
+// CHECK-NEXT: FunctionTemplateDecl 0x{{.+}} <{{.+}}:[[# @LINE - 7]]:5, col:51>
+// CHECK-NEXT: |-TemplateTypeParmDecl {{.+}} class depth 0 index 0 T{{$}}
+// CHECK-NEXT: |-TemplateTemplateParmDecl {{.+}} depth 0 index 1 TT{{$}}
+// CHECK-NEXT: | `-TemplateTypeParmDecl {{.+}} class depth 1 index 0{{$}}
+// CHECK-NEXT: |-CXXDeductionGuideDecl {{.+}} 'auto (<T>) -> B<T>'{{$}}
+// CHECK-NEXT: | `-ParmVarDecl {{.+}} '<T>'{{$}}
+// CHECK-NEXT: `-CXXDeductionGuideDecl {{.+}} 'auto (A<int>) -> TTP::B<int>'
+// CHECK-NEXT: |-TemplateArgument type 'int'
+// CHECK-NEXT: | `-BuiltinType {{.+}} 'int'{{$}}
+// CHECK-NEXT: |-TemplateArgument template A
+// CHECK-NEXT: `-ParmVarDecl {{.+}} 'A<int>':'TTP::A<int>'{{$}}
+// CHECK-NEXT: FunctionProtoType {{.+}} 'auto (<T>) -> B<T>' dependent trailing_return cdecl{{$}}
+// CHECK-NEXT: |-InjectedClassNameType {{.+}} 'B<T>' dependent{{$}}
+// CHECK-NEXT: | `-CXXRecord {{.+}} 'B'{{$}}
+// CHECK-NEXT: `-ElaboratedType {{.+}} '<T>' sugar dependent{{$}}
+// CHECK-NEXT: `-TemplateSpecializationType {{.+}} '<T>' dependent {{$}}
+// CHECK-NEXT: `-TemplateArgument type 'T'{{$}}
+// CHECK-NEXT: `-TemplateTypeParmType {{.+}} 'T' dependent depth 0 index 0{{$}}
+// CHECK-NEXT: `-TemplateTypeParm {{.+}} 'T'{{$}}
diff --git a/clang/test/SemaTemplate/dependent-names.cpp b/clang/test/SemaTemplate/dependent-names.cpp
index 641ec950054f..a7260b194462 100644
--- a/clang/test/SemaTemplate/dependent-names.cpp
+++ b/clang/test/SemaTemplate/dependent-names.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s
+// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s
typedef double A;
template<typename T> class B {
@@ -334,8 +334,9 @@ int arr[sizeof(Sub)];
namespace PR11421 {
template < unsigned > struct X {
static const unsigned dimension = 3;
- template<unsigned dim=dimension>
- struct Y: Y<dim> { }; // expected-error{{circular inheritance between 'Y<dim>' and 'Y<dim>'}}
+ template<unsigned dim=dimension>
+ struct Y: Y<dim> { }; // expected-error{{base class has incomplete type}}
+ // expected-note@-1{{definition of 'Y<dim>' is not complete until the closing '}'}}
};
typedef X<3> X3;
X3::Y<>::iterator it; // expected-error {{no type named 'iterator' in 'PR11421::X<3>::Y<>'}}
@@ -344,11 +345,12 @@ X3::Y<>::iterator it; // expected-error {{no type named 'iterator' in 'PR11421::
namespace rdar12629723 {
template<class T>
struct X {
- struct C : public C { }; // expected-error{{circular inheritance between 'C' and 'rdar12629723::X::C'}}
+ struct C : public C { }; // expected-error{{base class has incomplete type}}
+ // expected-note@-1{{definition of 'rdar12629723::X::C' is not complete until the closing '}'}}
struct B;
- struct A : public B { // expected-note{{'A' declared here}}
+ struct A : public B {
virtual void foo() { }
};
@@ -357,7 +359,7 @@ namespace rdar12629723 {
};
template<class T>
- struct X<T>::B : public A { // expected-error{{circular inheritance between 'A' and 'rdar12629723::X::B'}}
+ struct X<T>::B : public A {
virtual void foo() { }
};
}
diff --git a/clang/test/SemaTemplate/destructor-template.cpp b/clang/test/SemaTemplate/destructor-template.cpp
index 890188294762..7a3398308bbe 100644
--- a/clang/test/SemaTemplate/destructor-template.cpp
+++ b/clang/test/SemaTemplate/destructor-template.cpp
@@ -1,12 +1,14 @@
// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s
template<typename A> class s0 {
+ template<typename B> class s1;
+};
- template<typename B> class s1 : public s0<A> {
- ~s1() {}
- s0<A> ms0;
- };
-
+template<typename A>
+template<typename B>
+class s0<A>::s1 : s0<A> {
+ ~s1() {}
+ s0<A> ms0;
};
struct Incomplete;
@@ -28,7 +30,7 @@ namespace PR6152 {
y->template Y<T>::~Y<T>();
y->~Y();
}
-
+
template struct X<int>;
}
diff --git a/clang/test/SemaTemplate/make_integer_seq.cpp b/clang/test/SemaTemplate/make_integer_seq.cpp
index 3a692f5ae2bf..c5a1e2705368 100644
--- a/clang/test/SemaTemplate/make_integer_seq.cpp
+++ b/clang/test/SemaTemplate/make_integer_seq.cpp
@@ -61,7 +61,7 @@ using test2 = B<int, 1>;
template <template <class T, T...> class S, class T, int N> struct C {
using test3 = __make_integer_seq<S, T, N>;
-// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:63:3, col:43> col:9 test3 '__make_integer_seq<S, T, N>':'__make_integer_seq<type-parameter-0-1, N>'
+// CHECK: |-TypeAliasDecl 0x{{[0-9A-Fa-f]+}} <line:63:3, col:43> col:9 test3 '__make_integer_seq<S, T, N>':'__make_integer_seq<template-parameter-0-0, type-parameter-0-1, N>'
// CHECK-NEXT: `-ElaboratedType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<S, T, N>' sugar dependent
// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<S, T, N>' sugar dependent alias __make_integer_seq
// CHECK-NEXT: |-TemplateArgument template S
@@ -71,7 +71,7 @@ template <template <class T, T...> class S, class T, int N> struct C {
// CHECK-NEXT: |-TemplateArgument expr
// CHECK-NEXT: | `-ImplicitCastExpr 0x{{[0-9A-Fa-f]+}} <col:42> 'T' <Dependent>
// CHECK-NEXT: | `-DeclRefExpr 0x{{[0-9A-Fa-f]+}} <col:42> 'int' NonTypeTemplateParm 0x{{[0-9A-Fa-f]+}} 'N' 'int'
-// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<type-parameter-0-1, N>' dependent __make_integer_seq
+// CHECK-NEXT: `-TemplateSpecializationType 0x{{[0-9A-Fa-f]+}} '__make_integer_seq<template-parameter-0-0, type-parameter-0-1, N>' dependent __make_integer_seq
// CHECK-NEXT: |-TemplateArgument template
// CHECK-NEXT: |-TemplateArgument type 'type-parameter-0-1'
// CHECK-NEXT: | `-TemplateTypeParmType 0x{{[0-9A-Fa-f]+}} 'type-parameter-0-1' dependent depth 0 index 1
diff --git a/clang/test/SemaTemplate/ms-function-specialization-class-scope.cpp b/clang/test/SemaTemplate/ms-function-specialization-class-scope.cpp
index c49d2cb2422f..e1f3ab37ad94 100644
--- a/clang/test/SemaTemplate/ms-function-specialization-class-scope.cpp
+++ b/clang/test/SemaTemplate/ms-function-specialization-class-scope.cpp
@@ -464,6 +464,32 @@ namespace UsesThis {
g1(x1);
g1(y0);
g1(y1);
+
+ T::f0(0);
+ T::f0(z);
+ T::f0(x0);
+ T::f0(x1);
+ T::f0(y0);
+ T::f0(y1);
+ T::g0(0);
+ T::g0(z);
+ T::g0(x0);
+ T::g0(x1);
+ T::g0(y0);
+ T::g0(y1);
+
+ E::f1(0);
+ E::f1(z);
+ E::f1(x0);
+ E::f1(x1);
+ E::f1(y0);
+ E::f1(y1);
+ E::g1(0);
+ E::g1(z);
+ E::g1(x0);
+ E::g1(x1);
+ E::g1(y0);
+ E::g1(y1);
}
template<>
@@ -519,6 +545,32 @@ namespace UsesThis {
g1(x1); // expected-error {{invalid use of member 'x1' in static member function}}
g1(y0);
g1(y1);
+
+ T::f0(0); // expected-error {{call to non-static member function without an object argument}}
+ T::f0(z); // expected-error {{call to non-static member function without an object argument}}
+ T::f0(x0); // expected-error {{call to non-static member function without an object argument}}
+ T::f0(x1); // expected-error {{call to non-static member function without an object argument}}
+ T::f0(y0); // expected-error {{call to non-static member function without an object argument}}
+ T::f0(y1); // expected-error {{call to non-static member function without an object argument}}
+ T::g0(0);
+ T::g0(z);
+ T::g0(x0); // expected-error {{invalid use of member 'x0' in static member function}}
+ T::g0(x1); // expected-error {{invalid use of member 'x1' in static member function}}
+ T::g0(y0);
+ T::g0(y1);
+
+ E::f1(0); // expected-error {{call to non-static member function without an object argument}}
+ E::f1(z); // expected-error {{call to non-static member function without an object argument}}
+ E::f1(x0); // expected-error {{call to non-static member function without an object argument}}
+ E::f1(x1); // expected-error {{call to non-static member function without an object argument}}
+ E::f1(y0); // expected-error {{call to non-static member function without an object argument}}
+ E::f1(y1); // expected-error {{call to non-static member function without an object argument}}
+ E::g1(0);
+ E::g1(z);
+ E::g1(x0); // expected-error {{invalid use of member 'x0' in static member function}}
+ E::g1(x1); // expected-error {{invalid use of member 'x1' in static member function}}
+ E::g1(y0);
+ E::g1(y1);
}
};
diff --git a/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp b/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
index 534a5dc9ddc1..547e5945ac6b 100644
--- a/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
+++ b/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
@@ -102,7 +102,7 @@ public:
};
template class B<int>; // expected-note {{requested here}}
-}
+}
@@ -111,8 +111,8 @@ namespace lookup_dependent_base_class_default_argument {
template<class T>
class A {
public:
- static int f1(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
- int f2(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
+ static int f1(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
+ int f2(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
};
template<class T>
@@ -137,7 +137,7 @@ namespace lookup_dependent_base_class_friend {
template <class T>
class B {
public:
- static void g(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
+ static void g(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
};
template <class T>
@@ -228,7 +228,7 @@ template <typename T> struct C : T {
int *bar() { return &b; } // expected-error {{no member named 'b' in 'PR16014::C<A>'}} expected-warning {{lookup into dependent bases}}
int baz() { return T::b; } // expected-error {{no member named 'b' in 'PR16014::A'}}
int T::*qux() { return &T::b; } // expected-error {{no member named 'b' in 'PR16014::A'}}
- int T::*fuz() { return &U::a; } // expected-error {{use of undeclared identifier 'U'}} \
+ int T::*fuz() { return &U::a; } // expected-error {{no member named 'U' in 'PR16014::C<A>'}} \
// expected-warning {{unqualified lookup into dependent bases of class template 'C'}}
};
@@ -258,7 +258,7 @@ struct A : T {
::UndefClass::undef(); // expected-error {{no member named 'UndefClass' in the global namespace}}
}
void baz() {
- B::qux(); // expected-error {{use of undeclared identifier 'B'}} \
+ B::qux(); // expected-error {{no member named 'B' in 'PR19233::A<D>'}} \
// expected-warning {{unqualified lookup into dependent bases of class template 'A'}}
}
};
diff --git a/clang/test/SemaTemplate/typo-dependent-name.cpp b/clang/test/SemaTemplate/typo-dependent-name.cpp
index fb61b03e5010..5bd924241480 100644
--- a/clang/test/SemaTemplate/typo-dependent-name.cpp
+++ b/clang/test/SemaTemplate/typo-dependent-name.cpp
@@ -31,8 +31,7 @@ struct Y {
static int z;
template<int U>
- struct Inner : Y { // expected-note {{declared here}}
- };
+ struct Inner; // expected-note {{declared here}}
bool f(T other) {
// We can determine that 'inner' does not exist at parse time, so can
@@ -41,5 +40,9 @@ struct Y {
}
};
+template<typename T>
+template<int U>
+struct Y<T>::Inner : Y { };
+
struct Q { constexpr operator int() { return 0; } };
void use_y(Y<Q> x) { x.f(Q()); }
diff --git a/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp b/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp
index 7c9071be0918..7338872dbf32 100644
--- a/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp
+++ b/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp
@@ -1,4 +1,4 @@
-//===- AMDGPUArch.cpp - list AMDGPU installed ----------*- C++ -*---------===//
+//===- AMDGPUArchByHIP.cpp - list AMDGPU installed ----------*- C++ -*-----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp b/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp
index f82a4890f465..432f2c414ed2 100644
--- a/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp
+++ b/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp
@@ -1,4 +1,4 @@
-//===- AMDGPUArchLinux.cpp - list AMDGPU installed ------*- C++ -*---------===//
+//===- AMDGPUArchByHSA.cpp - list AMDGPU installed ------*- C++ -*---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/clang/tools/clang-format/ClangFormat.cpp b/clang/tools/clang-format/ClangFormat.cpp
index 3fa5f81a3576..50dd073c4125 100644
--- a/clang/tools/clang-format/ClangFormat.cpp
+++ b/clang/tools/clang-format/ClangFormat.cpp
@@ -352,7 +352,7 @@ emitReplacementWarnings(const Replacements &Replaces, StringRef AssumedFileName,
unsigned Errors = 0;
if (WarnFormat && !NoWarnFormat) {
- llvm::SourceMgr Mgr;
+ SourceMgr Mgr;
const char *StartBuf = Code->getBufferStart();
Mgr.AddNewSourceBuffer(
@@ -447,11 +447,11 @@ static bool format(StringRef FileName, bool ErrorOnIncompleteFormat = false) {
return true;
}
- llvm::Expected<FormatStyle> FormatStyle =
+ Expected<FormatStyle> FormatStyle =
getStyle(Style, AssumedFileName, FallbackStyle, Code->getBuffer(),
nullptr, WNoErrorList.isSet(WNoError::Unknown));
if (!FormatStyle) {
- llvm::errs() << llvm::toString(FormatStyle.takeError()) << "\n";
+ llvm::errs() << toString(FormatStyle.takeError()) << "\n";
return true;
}
@@ -497,7 +497,7 @@ static bool format(StringRef FileName, bool ErrorOnIncompleteFormat = false) {
auto ChangedCode = tooling::applyAllReplacements(Code->getBuffer(), Replaces);
if (!ChangedCode) {
- llvm::errs() << llvm::toString(ChangedCode.takeError()) << "\n";
+ llvm::errs() << toString(ChangedCode.takeError()) << "\n";
return true;
}
// Get new affected ranges after sorting `#includes`.
@@ -567,14 +567,12 @@ static int dumpConfig() {
}
Code = std::move(CodeOrErr.get());
}
- llvm::Expected<clang::format::FormatStyle> FormatStyle =
- clang::format::getStyle(Style,
- FileNames.empty() || FileNames[0] == "-"
- ? AssumeFileName
- : FileNames[0],
- FallbackStyle, Code ? Code->getBuffer() : "");
+ Expected<clang::format::FormatStyle> FormatStyle = clang::format::getStyle(
+ Style,
+ FileNames.empty() || FileNames[0] == "-" ? AssumeFileName : FileNames[0],
+ FallbackStyle, Code ? Code->getBuffer() : "");
if (!FormatStyle) {
- llvm::errs() << llvm::toString(FormatStyle.takeError()) << "\n";
+ llvm::errs() << toString(FormatStyle.takeError()) << "\n";
return 1;
}
std::string Config = clang::format::configurationAsText(*FormatStyle);
@@ -671,7 +669,7 @@ static bool isIgnored(StringRef FilePath) {
}
int main(int argc, const char **argv) {
- llvm::InitLLVM X(argc, argv);
+ InitLLVM X(argc, argv);
cl::HideUnrelatedOptions(ClangFormatCategory);
diff --git a/clang/tools/clang-installapi/InstallAPIOpts.td b/clang/tools/clang-installapi/InstallAPIOpts.td
index a95a7a80a9d2..fc0fbe929c88 100644
--- a/clang/tools/clang-installapi/InstallAPIOpts.td
+++ b/clang/tools/clang-installapi/InstallAPIOpts.td
@@ -99,6 +99,9 @@ def X__ : Joined<["-"], "X">,
HelpText<"Pass <arg> to run unique clang invocation identified as <label>">,
MetaVarName<"<label> <arg>">;
+def option_list : Separate<["-"], "optionlist">, MetaVarName<"<path>">,
+ HelpText<"Specifies the <path> to a file that contains X<label> arguments to parse.">;
+
//
/// Overidden clang options for different behavior.
//
diff --git a/clang/tools/clang-installapi/Options.cpp b/clang/tools/clang-installapi/Options.cpp
index 53340da704fc..95d28b7b040d 100644
--- a/clang/tools/clang-installapi/Options.cpp
+++ b/clang/tools/clang-installapi/Options.cpp
@@ -13,6 +13,7 @@
#include "clang/InstallAPI/HeaderFile.h"
#include "clang/InstallAPI/InstallAPIDiagnostic.h"
#include "llvm/BinaryFormat/Magic.h"
+#include "llvm/Support/JSON.h"
#include "llvm/Support/Program.h"
#include "llvm/TargetParser/Host.h"
#include "llvm/TextAPI/DylibReader.h"
@@ -82,6 +83,47 @@ static llvm::opt::OptTable *createDriverOptTable() {
return new DriverOptTable();
}
+/// Parse JSON input into argument list.
+///
+/* Expected input format.
+ * { "label" : ["-ClangArg1", "-ClangArg2"] }
+ */
+///
+/// Input is interpreted as "-Xlabel ClangArg1 -XLabel ClangArg2".
+static Expected<llvm::opt::InputArgList>
+getArgListFromJSON(const StringRef Input, llvm::opt::OptTable *Table,
+ std::vector<std::string> &Storage) {
+ using namespace json;
+ Expected<Value> ValOrErr = json::parse(Input);
+ if (!ValOrErr)
+ return ValOrErr.takeError();
+
+ const Object *Root = ValOrErr->getAsObject();
+ if (!Root)
+ return llvm::opt::InputArgList();
+
+ for (const auto &KV : *Root) {
+ const Array *ArgList = KV.getSecond().getAsArray();
+ std::string Label = "-X" + KV.getFirst().str();
+ if (!ArgList)
+ return make_error<TextAPIError>(TextAPIErrorCode::InvalidInputFormat);
+ for (auto Arg : *ArgList) {
+ std::optional<StringRef> ArgStr = Arg.getAsString();
+ if (!ArgStr)
+ return make_error<TextAPIError>(TextAPIErrorCode::InvalidInputFormat);
+ Storage.emplace_back(Label);
+ Storage.emplace_back(*ArgStr);
+ }
+ }
+
+ std::vector<const char *> CArgs(Storage.size());
+ llvm::for_each(Storage,
+ [&CArgs](StringRef Str) { CArgs.emplace_back(Str.data()); });
+
+ unsigned MissingArgIndex, MissingArgCount;
+ return Table->ParseArgs(CArgs, MissingArgIndex, MissingArgCount);
+}
+
bool Options::processDriverOptions(InputArgList &Args) {
// Handle inputs.
llvm::append_range(DriverOpts.FileLists,
@@ -348,6 +390,31 @@ bool Options::processXarchOption(InputArgList &Args, arg_iterator Curr) {
return true;
}
+bool Options::processOptionList(InputArgList &Args,
+ llvm::opt::OptTable *Table) {
+ Arg *A = Args.getLastArg(OPT_option_list);
+ if (!A)
+ return true;
+
+ const StringRef Path = A->getValue(0);
+ auto InputOrErr = FM->getBufferForFile(Path);
+ if (auto Err = InputOrErr.getError()) {
+ Diags->Report(diag::err_cannot_open_file) << Path << Err.message();
+ return false;
+ }
+ // Backing storage referenced for argument processing.
+ std::vector<std::string> Storage;
+ auto ArgsOrErr =
+ getArgListFromJSON((*InputOrErr)->getBuffer(), Table, Storage);
+
+ if (auto Err = ArgsOrErr.takeError()) {
+ Diags->Report(diag::err_cannot_read_input_list)
+ << "option" << Path << toString(std::move(Err));
+ return false;
+ }
+ return processInstallAPIXOptions(*ArgsOrErr);
+}
+
bool Options::processLinkerOptions(InputArgList &Args) {
// Handle required arguments.
if (const Arg *A = Args.getLastArg(drv::OPT_install__name))
@@ -510,6 +577,9 @@ Options::processAndFilterOutInstallAPIOptions(ArrayRef<const char *> Args) {
if (!processInstallAPIXOptions(ParsedArgs))
return {};
+ if (!processOptionList(ParsedArgs, Table.get()))
+ return {};
+
DriverOpts.Demangle = ParsedArgs.hasArg(OPT_demangle);
if (auto *A = ParsedArgs.getLastArg(OPT_filetype)) {
@@ -818,7 +888,7 @@ InstallAPIContext Options::createContext() {
Expected<AliasMap> Result = parseAliasList(Buffer.get());
if (!Result) {
Diags->Report(diag::err_cannot_read_input_list)
- << /*IsFileList=*/false << ListPath << toString(Result.takeError());
+ << "symbol alias" << ListPath << toString(Result.takeError());
return Ctx;
}
Aliases.insert(Result.get().begin(), Result.get().end());
@@ -839,7 +909,7 @@ InstallAPIContext Options::createContext() {
if (auto Err = FileListReader::loadHeaders(std::move(Buffer.get()),
Ctx.InputHeaders, FM)) {
Diags->Report(diag::err_cannot_read_input_list)
- << /*IsFileList=*/true << ListPath << std::move(Err);
+ << "header file" << ListPath << std::move(Err);
return Ctx;
}
}
diff --git a/clang/tools/clang-installapi/Options.h b/clang/tools/clang-installapi/Options.h
index fd1e10065d10..b37f91efbda7 100644
--- a/clang/tools/clang-installapi/Options.h
+++ b/clang/tools/clang-installapi/Options.h
@@ -161,6 +161,8 @@ private:
bool processXarchOption(llvm::opt::InputArgList &Args, arg_iterator Curr);
bool processXplatformOption(llvm::opt::InputArgList &Args, arg_iterator Curr);
bool processXprojectOption(llvm::opt::InputArgList &Args, arg_iterator Curr);
+ bool processOptionList(llvm::opt::InputArgList &Args,
+ llvm::opt::OptTable *Table);
public:
/// The various options grouped together.
diff --git a/clang/tools/clang-repl/CMakeLists.txt b/clang/tools/clang-repl/CMakeLists.txt
index d3dec1984b78..4017b1445da0 100644
--- a/clang/tools/clang-repl/CMakeLists.txt
+++ b/clang/tools/clang-repl/CMakeLists.txt
@@ -11,6 +11,49 @@ add_clang_tool(clang-repl
ClangRepl.cpp
)
+if(MSVC)
+ set_target_properties(clang-repl PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS 1)
+
+ # RTTI/C++ symbols
+ set(clang_repl_exports ${clang_repl_exports} ??_7type_info@@6B@
+ ?__type_info_root_node@@3U__type_info_node@@A
+ ?nothrow@std@@3Unothrow_t@1@B
+ )
+
+ # Compiler added symbols for static variables. NOT for VStudio < 2015
+ set(clang_repl_exports ${clang_repl_exports} _Init_thread_abort _Init_thread_epoch
+ _Init_thread_footer _Init_thread_header _tls_index
+ )
+
+ if(CMAKE_SIZEOF_VOID_P EQUAL 8)
+ # new/delete variants needed when linking to static msvc runtime (esp. Debug)
+ set(clang_repl_exports ${clang_repl_exports}
+ ??2@YAPEAX_K@Z
+ ??3@YAXPEAX@Z
+ ??_U@YAPEAX_K@Z
+ ??_V@YAXPEAX@Z
+ ??3@YAXPEAX_K@Z
+ )
+ else()
+ set(clang_repl_exports ${clang_repl_exports}
+ ??2@YAPAXI@Z
+ ??3@YAXPAX@Z
+ ??3@YAXPAXI@Z
+ ??_U@YAPAXI@Z
+ ??_V@YAXPAX@Z
+ ??_V@YAXPAXI@Z
+ )
+ endif()
+
+ # List to '/EXPORT:sym0 /EXPORT:sym1 /EXPORT:sym2 ...'
+ foreach(sym ${clang_repl_exports})
+ set(clang_repl_link_str "${clang_repl_link_str} /EXPORT:${sym}")
+ endforeach(sym ${clang_repl_exports})
+
+ set_property(TARGET clang-repl APPEND_STRING PROPERTY LINK_FLAGS ${clang_repl_link_str})
+
+endif(MSVC)
+
clang_target_link_libraries(clang-repl PRIVATE
clangAST
clangBasic
diff --git a/clang/tools/clang-scan-deps/ClangScanDeps.cpp b/clang/tools/clang-scan-deps/ClangScanDeps.cpp
index f42af7e330e1..036e57c8d213 100644
--- a/clang/tools/clang-scan-deps/ClangScanDeps.cpp
+++ b/clang/tools/clang-scan-deps/ClangScanDeps.cpp
@@ -86,6 +86,8 @@ static bool DeprecatedDriverCommand;
static ResourceDirRecipeKind ResourceDirRecipe;
static bool Verbose;
static bool PrintTiming;
+static llvm::BumpPtrAllocator Alloc;
+static llvm::StringSaver Saver{Alloc};
static std::vector<const char *> CommandLine;
#ifndef NDEBUG
@@ -99,8 +101,6 @@ static bool RoundTripArgs = DoRoundTripDefault;
static void ParseArgs(int argc, char **argv) {
ScanDepsOptTable Tbl;
llvm::StringRef ToolName = argv[0];
- llvm::BumpPtrAllocator Alloc;
- llvm::StringSaver Saver{Alloc};
llvm::opt::InputArgList Args =
Tbl.parseArgs(argc, argv, OPT_UNKNOWN, Saver, [&](StringRef Msg) {
llvm::errs() << Msg << '\n';
@@ -792,6 +792,11 @@ int clang_scan_deps_main(int argc, char **argv, const llvm::ToolContext &) {
llvm::cl::PrintOptionValues();
+ // Expand response files in advance, so that we can "see" all the arguments
+ // when adjusting below.
+ Compilations = expandResponseFiles(std::move(Compilations),
+ llvm::vfs::getRealFileSystem());
+
// The command options are rewritten to run Clang in preprocessor only mode.
auto AdjustingCompilations =
std::make_unique<tooling::ArgumentsAdjustingCompilations>(
diff --git a/clang/tools/driver/cc1as_main.cpp b/clang/tools/driver/cc1as_main.cpp
index 86afe22fac24..4eb753a7297a 100644
--- a/clang/tools/driver/cc1as_main.cpp
+++ b/clang/tools/driver/cc1as_main.cpp
@@ -576,9 +576,6 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
Str.get()->emitZeros(1);
}
- // Assembly to object compilation should leverage assembly info.
- Str->setUseAssemblerInfoForParsing(true);
-
bool Failed = false;
std::unique_ptr<MCAsmParser> Parser(
diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index bfbdb5be9ff2..49ed60d990ca 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -776,10 +776,9 @@ bool CursorVisitor::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
}
// Visit the default argument.
- if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
- if (TypeSourceInfo *DefArg = D->getDefaultArgumentInfo())
- if (Visit(DefArg->getTypeLoc()))
- return true;
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited() &&
+ VisitTemplateArgumentLoc(D->getDefaultArgument()))
+ return true;
return false;
}
@@ -946,8 +945,9 @@ bool CursorVisitor::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
return true;
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
- if (Expr *DefArg = D->getDefaultArgument())
- return Visit(MakeCXCursor(DefArg, StmtParent, TU, RegionOfInterest));
+ if (D->hasDefaultArgument() &&
+ VisitTemplateArgumentLoc(D->getDefaultArgument()))
+ return true;
return false;
}
@@ -2855,6 +2855,10 @@ void OpenACCClauseEnqueue::VisitWaitClause(const OpenACCWaitClause &C) {
}
void OpenACCClauseEnqueue::VisitDeviceTypeClause(
const OpenACCDeviceTypeClause &C) {}
+void OpenACCClauseEnqueue::VisitReductionClause(
+ const OpenACCReductionClause &C) {
+ VisitVarList(C);
+}
} // namespace
void EnqueueVisitor::EnqueueChildren(const OpenACCClause *C) {
diff --git a/clang/tools/libclang/CMakeLists.txt b/clang/tools/libclang/CMakeLists.txt
index b5b6d2807d71..7b634003d11f 100644
--- a/clang/tools/libclang/CMakeLists.txt
+++ b/clang/tools/libclang/CMakeLists.txt
@@ -230,7 +230,7 @@ install(DIRECTORY ../../include/clang-c
# component and an install-component target, so add a dummy libclang-headers
# target to allow using it in LLVM_DISTRIBUTION_COMPONENTS.
add_custom_target(libclang-headers)
-set_target_properties(libclang-headers PROPERTIES FOLDER "Misc")
+set_target_properties(libclang-headers PROPERTIES FOLDER "Clang/Resources")
if (NOT LLVM_ENABLE_IDE)
add_llvm_install_targets(install-libclang-headers
diff --git a/clang/tools/scan-build-py/tests/functional/exec/CMakeLists.txt b/clang/tools/scan-build-py/tests/functional/exec/CMakeLists.txt
index 95c6fdb610e0..cb6ebda18372 100644
--- a/clang/tools/scan-build-py/tests/functional/exec/CMakeLists.txt
+++ b/clang/tools/scan-build-py/tests/functional/exec/CMakeLists.txt
@@ -2,11 +2,7 @@ project(exec C)
cmake_minimum_required(VERSION 3.20.0)
-include(CheckCCompilerFlag)
-check_c_compiler_flag("-std=c99" C99_SUPPORTED)
-if (C99_SUPPORTED)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99")
-endif()
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99")
include(CheckFunctionExists)
include(CheckSymbolExists)
diff --git a/clang/unittests/AST/ASTImporterTest.cpp b/clang/unittests/AST/ASTImporterTest.cpp
index 4ee64de697d3..3dc1c336365d 100644
--- a/clang/unittests/AST/ASTImporterTest.cpp
+++ b/clang/unittests/AST/ASTImporterTest.cpp
@@ -1188,7 +1188,7 @@ TEST_P(ASTImporterOptionSpecificTestBase, TemplateTypeParmDeclDefaultArg) {
FromTU, templateTypeParmDecl(hasName("T")));
TemplateTypeParmDecl *To = Import(From, Lang_CXX03);
ASSERT_TRUE(To->hasDefaultArgument());
- QualType ToArg = To->getDefaultArgument();
+ QualType ToArg = To->getDefaultArgument().getArgument().getAsType();
ASSERT_EQ(ToArg, QualType(To->getASTContext().IntTy));
}
@@ -1260,7 +1260,7 @@ TEST_P(ASTImporterOptionSpecificTestBase, NonTypeTemplateParmDeclDefaultArg) {
FromTU, nonTypeTemplateParmDecl(hasName("S")));
NonTypeTemplateParmDecl *To = Import(From, Lang_CXX03);
ASSERT_TRUE(To->hasDefaultArgument());
- Stmt *ToArg = To->getDefaultArgument();
+ Stmt *ToArg = To->getDefaultArgument().getArgument().getAsExpr();
ASSERT_TRUE(isa<IntegerLiteral>(ToArg));
ASSERT_EQ(cast<IntegerLiteral>(ToArg)->getValue().getLimitedValue(), 1U);
}
diff --git a/clang/unittests/AST/DeclTest.cpp b/clang/unittests/AST/DeclTest.cpp
index 2530ce74eb6a..16aa2b50b7a0 100644
--- a/clang/unittests/AST/DeclTest.cpp
+++ b/clang/unittests/AST/DeclTest.cpp
@@ -545,3 +545,34 @@ TEST(Decl, TemplateArgumentDefaulted) {
EXPECT_TRUE(ArgList.get(2).getIsDefaulted());
EXPECT_TRUE(ArgList.get(3).getIsDefaulted());
}
+
+TEST(Decl, CXXDestructorDeclsShouldHaveWellFormedNameInfoRanges) {
+ // GH71161
+ llvm::Annotations Code(R"cpp(
+template <typename T> struct Resource {
+ ~Resource(); // 1
+};
+template <typename T>
+Resource<T>::~Resource() {} // 2,3
+
+void instantiate_template() {
+ Resource<int> x;
+}
+)cpp");
+
+ auto AST = tooling::buildASTFromCode(Code.code());
+ ASTContext &Ctx = AST->getASTContext();
+
+ const auto &SM = Ctx.getSourceManager();
+ auto GetNameInfoRange = [&SM](const BoundNodes &Match) {
+ const auto *D = Match.getNodeAs<CXXDestructorDecl>("dtor");
+ return D->getNameInfo().getSourceRange().printToString(SM);
+ };
+
+ auto Matches = match(findAll(cxxDestructorDecl().bind("dtor")),
+ *Ctx.getTranslationUnitDecl(), Ctx);
+ ASSERT_EQ(Matches.size(), 3U);
+ EXPECT_EQ(GetNameInfoRange(Matches[0]), "<input.cc:3:3, col:4>");
+ EXPECT_EQ(GetNameInfoRange(Matches[1]), "<input.cc:6:14, col:15>");
+ EXPECT_EQ(GetNameInfoRange(Matches[2]), "<input.cc:6:14, col:15>");
+}
diff --git a/clang/unittests/AST/Interp/Descriptor.cpp b/clang/unittests/AST/Interp/Descriptor.cpp
index 053d579ea391..3157b4d401f9 100644
--- a/clang/unittests/AST/Interp/Descriptor.cpp
+++ b/clang/unittests/AST/Interp/Descriptor.cpp
@@ -22,9 +22,10 @@ TEST(Descriptor, Primitives) {
" char s[4];\n"
" A a[3];\n"
" short l[3][3];\n"
+ " int EmptyA[0];\n"
"};\n"
"constexpr S d = {0.0, \"foo\", {{true, false}, {false, true}, {false, false}},\n"
- " {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}};\n";
+ " {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}, {}};\n";
auto AST = tooling::buildASTFromCodeWithArgs(
Code, {"-fexperimental-new-constant-interpreter"});
@@ -64,7 +65,7 @@ TEST(Descriptor, Primitives) {
// Test the Record for the struct S.
const Record *SRecord = GlobalDesc->ElemRecord;
ASSERT_TRUE(SRecord);
- ASSERT_TRUE(SRecord->getNumFields() == 4);
+ ASSERT_TRUE(SRecord->getNumFields() == 5);
ASSERT_TRUE(SRecord->getNumBases() == 0);
ASSERT_FALSE(SRecord->getDestructor());
@@ -113,6 +114,16 @@ TEST(Descriptor, Primitives) {
ASSERT_TRUE(F4->Desc->getElemSize() > 0);
ASSERT_TRUE(F4->Desc->ElemDesc->isPrimitiveArray());
+ // Fifth field. Zero-size array.
+ const Record::Field *F5 = SRecord->getField(4u);
+ ASSERT_TRUE(F5);
+ ASSERT_FALSE(F5->isBitField());
+ ASSERT_TRUE(F5->Desc->isArray());
+ ASSERT_FALSE(F5->Desc->isCompositeArray());
+ ASSERT_TRUE(F5->Desc->isPrimitiveArray());
+ ASSERT_FALSE(F5->Desc->isPrimitive());
+ ASSERT_EQ(F5->Desc->getNumElems(), 0u);
+
// Check pointer stuff.
// Global variables have an inline descriptor.
ASSERT_TRUE(GlobalPtr.isRoot());
@@ -382,4 +393,13 @@ TEST(Descriptor, Primitives) {
ASSERT_EQ(PE3.getArray(), NE3);
ASSERT_EQ(PE3.getIndex(), 2u);
}
+
+ // Zero-size array.
+ {
+ const Pointer &PF5 = GlobalPtr.atField(F5->Offset);
+
+ ASSERT_TRUE(PF5.isZeroSizeArray());
+ ASSERT_FALSE(PF5.isOnePastEnd());
+ ASSERT_FALSE(PF5.isElementPastEnd());
+ }
}
diff --git a/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp b/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
index 65df513d2713..2e42b8580895 100644
--- a/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
+++ b/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
@@ -614,8 +614,10 @@ TEST_P(ASTMatchersTest, MemberExpr_MatchesVariable) {
EXPECT_TRUE(matches("template <class T>"
"class X : T { void f() { this->T::v; } };",
cxxDependentScopeMemberExpr()));
- EXPECT_TRUE(matches("template <class T> class X : T { void f() { T::v; } };",
- cxxDependentScopeMemberExpr()));
+ // FIXME: Add a matcher for DependentScopeDeclRefExpr.
+ EXPECT_TRUE(
+ notMatches("template <class T> class X : T { void f() { T::v; } };",
+ cxxDependentScopeMemberExpr()));
EXPECT_TRUE(matches("template <class T> void x() { T t; t.v; }",
cxxDependentScopeMemberExpr()));
}
diff --git a/clang/unittests/CMakeLists.txt b/clang/unittests/CMakeLists.txt
index a8363ffa481e..e43ee7bfa88a 100644
--- a/clang/unittests/CMakeLists.txt
+++ b/clang/unittests/CMakeLists.txt
@@ -1,5 +1,5 @@
add_custom_target(ClangUnitTests)
-set_target_properties(ClangUnitTests PROPERTIES FOLDER "Clang tests")
+set_target_properties(ClangUnitTests PROPERTIES FOLDER "Clang/Tests")
if(CLANG_BUILT_STANDALONE)
# LLVMTesting* libraries are needed for some of the unittests.
diff --git a/clang/unittests/Driver/DXCModeTest.cpp b/clang/unittests/Driver/DXCModeTest.cpp
index 416723d498a2..41ab30bc81d5 100644
--- a/clang/unittests/Driver/DXCModeTest.cpp
+++ b/clang/unittests/Driver/DXCModeTest.cpp
@@ -156,9 +156,10 @@ TEST(DxcModeTest, ValidatorVersionValidation) {
TranslatedArgs.reset(
TC.TranslateArgs(*DAL, "0", Action::OffloadKind::OFK_None));
EXPECT_EQ(Diags.getNumErrors(), 1u);
- EXPECT_STREQ(DiagConsumer->Errors.back().c_str(),
- "invalid validator version : 0.1\nIf validator major version is "
- "0, minor version must also be 0.");
+ EXPECT_STREQ(
+ DiagConsumer->Errors.back().c_str(),
+ "invalid validator version : 0.1; if validator major version is 0, "
+ "minor version must also be 0");
Diags.Clear();
DiagConsumer->clear();
@@ -173,8 +174,8 @@ TEST(DxcModeTest, ValidatorVersionValidation) {
TC.TranslateArgs(*DAL, "0", Action::OffloadKind::OFK_None));
EXPECT_EQ(Diags.getNumErrors(), 2u);
EXPECT_STREQ(DiagConsumer->Errors.back().c_str(),
- "invalid validator version : 1\nFormat of validator version is "
- "\"<major>.<minor>\" (ex:\"1.4\").");
+ "invalid validator version : 1; format of validator version is "
+ "\"<major>.<minor>\" (ex:\"1.4\")");
Diags.Clear();
DiagConsumer->clear();
@@ -190,8 +191,8 @@ TEST(DxcModeTest, ValidatorVersionValidation) {
EXPECT_EQ(Diags.getNumErrors(), 3u);
EXPECT_STREQ(
DiagConsumer->Errors.back().c_str(),
- "invalid validator version : -Tlib_6_7\nFormat of validator version is "
- "\"<major>.<minor>\" (ex:\"1.4\").");
+ "invalid validator version : -Tlib_6_7; format of validator version is "
+ "\"<major>.<minor>\" (ex:\"1.4\")");
Diags.Clear();
DiagConsumer->clear();
@@ -207,8 +208,8 @@ TEST(DxcModeTest, ValidatorVersionValidation) {
EXPECT_EQ(Diags.getNumErrors(), 4u);
EXPECT_STREQ(
DiagConsumer->Errors.back().c_str(),
- "invalid validator version : foo\nFormat of validator version is "
- "\"<major>.<minor>\" (ex:\"1.4\").");
+ "invalid validator version : foo; format of validator version is "
+ "\"<major>.<minor>\" (ex:\"1.4\")");
Diags.Clear();
DiagConsumer->clear();
}
diff --git a/clang/unittests/Format/CleanupTest.cpp b/clang/unittests/Format/CleanupTest.cpp
index dc149b502bc5..a3801106e1ce 100644
--- a/clang/unittests/Format/CleanupTest.cpp
+++ b/clang/unittests/Format/CleanupTest.cpp
@@ -20,10 +20,9 @@ namespace clang {
namespace format {
namespace {
-class CleanupTest : public ::testing::Test {
+class CleanupTest : public testing::Test {
protected:
- std::string cleanup(llvm::StringRef Code,
- const std::vector<tooling::Range> &Ranges,
+ std::string cleanup(StringRef Code, const std::vector<tooling::Range> &Ranges,
const FormatStyle &Style = getLLVMStyle()) {
tooling::Replacements Replaces = format::cleanup(Style, Code, Ranges);
@@ -33,8 +32,7 @@ protected:
}
// Returns code after cleanup around \p Offsets.
- std::string cleanupAroundOffsets(llvm::ArrayRef<unsigned> Offsets,
- llvm::StringRef Code,
+ std::string cleanupAroundOffsets(ArrayRef<unsigned> Offsets, StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
std::vector<tooling::Range> Ranges;
for (auto Offset : Offsets)
@@ -332,7 +330,7 @@ protected:
const tooling::Replacements &Replaces) {
auto CleanReplaces = cleanupAroundReplacements(Code, Replaces, Style);
EXPECT_TRUE(static_cast<bool>(CleanReplaces))
- << llvm::toString(CleanReplaces.takeError()) << "\n";
+ << toString(CleanReplaces.takeError()) << "\n";
auto Result = applyAllReplacements(Code, *CleanReplaces);
EXPECT_TRUE(static_cast<bool>(Result));
return *Result;
@@ -342,10 +340,10 @@ protected:
const tooling::Replacements &Replaces) {
auto CleanReplaces = cleanupAroundReplacements(Code, Replaces, Style);
EXPECT_TRUE(static_cast<bool>(CleanReplaces))
- << llvm::toString(CleanReplaces.takeError()) << "\n";
+ << toString(CleanReplaces.takeError()) << "\n";
auto FormattedReplaces = formatReplacements(Code, *CleanReplaces, Style);
EXPECT_TRUE(static_cast<bool>(FormattedReplaces))
- << llvm::toString(FormattedReplaces.takeError()) << "\n";
+ << toString(FormattedReplaces.takeError()) << "\n";
auto Result = applyAllReplacements(Code, *FormattedReplaces);
EXPECT_TRUE(static_cast<bool>(Result));
return *Result;
diff --git a/clang/unittests/Format/ConfigParseTest.cpp b/clang/unittests/Format/ConfigParseTest.cpp
index 82e72f08ffb5..ff3ced38a1f3 100644
--- a/clang/unittests/Format/ConfigParseTest.cpp
+++ b/clang/unittests/Format/ConfigParseTest.cpp
@@ -480,6 +480,8 @@ TEST(ConfigParseTest, ParsesConfiguration) {
FormatStyle::ENAS_DontAlign);
CHECK_PARSE("AlignEscapedNewlines: Left", AlignEscapedNewlines,
FormatStyle::ENAS_Left);
+ CHECK_PARSE("AlignEscapedNewlines: LeftWithLastLine", AlignEscapedNewlines,
+ FormatStyle::ENAS_LeftWithLastLine);
CHECK_PARSE("AlignEscapedNewlines: Right", AlignEscapedNewlines,
FormatStyle::ENAS_Right);
// For backward compatibility:
diff --git a/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp b/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp
index 7a120935cfa9..b26b9f4f4ff6 100644
--- a/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp
+++ b/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp
@@ -18,10 +18,10 @@ namespace clang {
namespace format {
namespace {
-class DefinitionBlockSeparatorTest : public ::testing::Test {
+class DefinitionBlockSeparatorTest : public testing::Test {
protected:
static std::string
- separateDefinitionBlocks(llvm::StringRef Code,
+ separateDefinitionBlocks(StringRef Code,
const std::vector<tooling::Range> &Ranges,
const FormatStyle &Style = getLLVMStyle()) {
LLVM_DEBUG(llvm::errs() << "---\n");
@@ -34,18 +34,17 @@ protected:
}
static std::string
- separateDefinitionBlocks(llvm::StringRef Code,
+ separateDefinitionBlocks(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return separateDefinitionBlocks(
Code,
/*Ranges=*/{1, tooling::Range(0, Code.size())}, Style);
}
- static void _verifyFormat(const char *File, int Line, llvm::StringRef Code,
+ static void _verifyFormat(const char *File, int Line, StringRef Code,
const FormatStyle &Style = getLLVMStyle(),
- llvm::StringRef ExpectedCode = "",
- bool Inverse = true) {
- ::testing::ScopedTrace t(File, Line, ::testing::Message() << Code.str());
+ StringRef ExpectedCode = "", bool Inverse = true) {
+ testing::ScopedTrace t(File, Line, testing::Message() << Code.str());
bool HasOriginalCode = true;
if (ExpectedCode == "") {
ExpectedCode = Code;
@@ -70,7 +69,7 @@ protected:
EXPECT_EQ(ExpectedCode, Result) << "Test failed. Formatted:\n" << Result;
}
- static std::string removeEmptyLines(llvm::StringRef Code) {
+ static std::string removeEmptyLines(StringRef Code) {
std::string Result = "";
for (auto Char : Code.str()) {
if (Result.size()) {
@@ -165,13 +164,13 @@ TEST_F(DefinitionBlockSeparatorTest, Basic) {
TEST_F(DefinitionBlockSeparatorTest, FormatConflict) {
FormatStyle Style = getLLVMStyle();
Style.SeparateDefinitionBlocks = FormatStyle::SDS_Always;
- llvm::StringRef Code = "class Test {\n"
- "public:\n"
- " static void foo() {\n"
- " int t;\n"
- " return 1;\n"
- " }\n"
- "};";
+ StringRef Code = "class Test {\n"
+ "public:\n"
+ " static void foo() {\n"
+ " int t;\n"
+ " return 1;\n"
+ " }\n"
+ "};";
std::vector<tooling::Range> Ranges = {1, tooling::Range(0, Code.size())};
EXPECT_EQ(reformat(Style, Code, Ranges, "<stdin>").size(), 0u);
}
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index 6f57f10e12e8..76eb2b6cd994 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -3124,6 +3124,7 @@ TEST_F(FormatTest, FormatsLabels) {
" g();\n"
" }\n"
"}");
+
FormatStyle Style = getLLVMStyle();
Style.IndentGotoLabels = false;
verifyFormat("void f() {\n"
@@ -3163,6 +3164,13 @@ TEST_F(FormatTest, FormatsLabels) {
" }\n"
"}",
Style);
+
+ Style.ColumnLimit = 15;
+ verifyFormat("#define FOO \\\n"
+ "label: \\\n"
+ " break;",
+ Style);
+
// The opening brace may either be on the same unwrapped line as the colon or
// on a separate one. The formatter should recognize both.
Style = getLLVMStyle();
@@ -6628,6 +6636,26 @@ TEST_F(FormatTest, EscapedNewlines) {
" int x(int a);",
AlignLeft);
+ constexpr StringRef Code{"#define A \\\n"
+ " int a123; \\\n"
+ " int a; \\\n"
+ " int a1234;"};
+ verifyFormat(Code, AlignLeft);
+
+ constexpr StringRef Code2{"#define A \\\n"
+ " int a123; \\\n"
+ " int a; \\\n"
+ " int a1234;"};
+ auto LastLine = getLLVMStyle();
+ LastLine.AlignEscapedNewlines = FormatStyle::ENAS_LeftWithLastLine;
+ verifyFormat(Code2, LastLine);
+
+ LastLine.ColumnLimit = 13;
+ verifyFormat(Code, LastLine);
+
+ LastLine.ColumnLimit = 0;
+ verifyFormat(Code2, LastLine);
+
FormatStyle DontAlign = getLLVMStyle();
DontAlign.AlignEscapedNewlines = FormatStyle::ENAS_DontAlign;
DontAlign.MaxEmptyLinesToKeep = 3;
@@ -17332,12 +17360,14 @@ TEST_F(FormatTest, ConfigurableSpaceBeforeAssignmentOperators) {
verifyFormat("int a = 5;");
verifyFormat("a += 42;");
verifyFormat("a or_eq 8;");
+ verifyFormat("xor = foo;");
FormatStyle Spaces = getLLVMStyle();
Spaces.SpaceBeforeAssignmentOperators = false;
verifyFormat("int a= 5;", Spaces);
verifyFormat("a+= 42;", Spaces);
verifyFormat("a or_eq 8;", Spaces);
+ verifyFormat("xor= foo;", Spaces);
}
TEST_F(FormatTest, ConfigurableSpaceBeforeColon) {
diff --git a/clang/unittests/Format/FormatTestBase.h b/clang/unittests/Format/FormatTestBase.h
index eaadb1c9f83e..33110ca5d9ed 100644
--- a/clang/unittests/Format/FormatTestBase.h
+++ b/clang/unittests/Format/FormatTestBase.h
@@ -25,17 +25,17 @@ namespace test {
#define DEBUG_TYPE "format-test-base"
-class FormatTestBase : public ::testing::Test {
+class FormatTestBase : public testing::Test {
protected:
enum StatusCheck { SC_ExpectComplete, SC_ExpectIncomplete, SC_DoNotCheck };
virtual FormatStyle getDefaultStyle() const { return getLLVMStyle(); }
- virtual std::string messUp(llvm::StringRef Code) const {
+ virtual std::string messUp(StringRef Code) const {
return test::messUp(Code);
}
- std::string format(llvm::StringRef Code,
+ std::string format(StringRef Code,
const std::optional<FormatStyle> &Style = {},
StatusCheck CheckComplete = SC_ExpectComplete,
const std::vector<tooling::Range> &Ranges = {}) {
@@ -80,11 +80,11 @@ protected:
return Style;
}
- bool _verifyFormat(const char *File, int Line, llvm::StringRef Expected,
- llvm::StringRef Code,
+ bool _verifyFormat(const char *File, int Line, StringRef Expected,
+ StringRef Code,
const std::optional<FormatStyle> &Style = {},
const std::vector<tooling::Range> &Ranges = {}) {
- testing::ScopedTrace t(File, Line, ::testing::Message() << Code.str());
+ testing::ScopedTrace t(File, Line, testing::Message() << Code.str());
const auto ExpectedCode{Expected.str()};
auto FormattedCode{format(Code, Style, SC_ExpectComplete, Ranges)};
EXPECT_EQ(ExpectedCode, FormattedCode);
@@ -111,7 +111,7 @@ protected:
return true;
}
- void _verifyFormat(const char *File, int Line, llvm::StringRef Code,
+ void _verifyFormat(const char *File, int Line, StringRef Code,
const std::optional<FormatStyle> &Style = {}) {
if (!_verifyFormat(File, Line, Code, Code, Style))
return;
@@ -119,27 +119,26 @@ protected:
_verifyFormat(File, Line, Code, MessedUpCode, Style);
}
- void _verifyIncompleteFormat(const char *File, int Line, llvm::StringRef Code,
+ void _verifyIncompleteFormat(const char *File, int Line, StringRef Code,
const std::optional<FormatStyle> &Style = {}) {
- testing::ScopedTrace t(File, Line, ::testing::Message() << Code.str());
+ testing::ScopedTrace t(File, Line, testing::Message() << Code.str());
EXPECT_EQ(Code.str(), format(messUp(Code), Style, SC_ExpectIncomplete));
}
void
- _verifyIndependentOfContext(const char *File, int Line, llvm::StringRef Text,
+ _verifyIndependentOfContext(const char *File, int Line, StringRef Text,
const std::optional<FormatStyle> &Style = {}) {
_verifyFormat(File, Line, Text, Style);
- _verifyFormat(File, Line, llvm::Twine("void f() { " + Text + " }").str(),
- Style);
+ _verifyFormat(File, Line, Twine("void f() { " + Text + " }").str(), Style);
}
- void _verifyNoChange(const char *File, int Line, llvm::StringRef Code,
+ void _verifyNoChange(const char *File, int Line, StringRef Code,
const std::optional<FormatStyle> &Style = {}) {
_verifyFormat(File, Line, Code, Code, Style);
}
/// \brief Verify that clang-format does not crash on the given input.
- void verifyNoCrash(llvm::StringRef Code,
+ void verifyNoCrash(StringRef Code,
const std::optional<FormatStyle> &Style = {}) {
format(Code, Style, SC_DoNotCheck);
}
diff --git a/clang/unittests/Format/FormatTestCSharp.cpp b/clang/unittests/Format/FormatTestCSharp.cpp
index de261c094830..7166e4ec4de3 100644
--- a/clang/unittests/Format/FormatTestCSharp.cpp
+++ b/clang/unittests/Format/FormatTestCSharp.cpp
@@ -21,8 +21,8 @@ protected:
return getMicrosoftStyle(FormatStyle::LK_CSharp);
}
- static std::string format(llvm::StringRef Code, unsigned Offset,
- unsigned Length, const FormatStyle &Style) {
+ static std::string format(StringRef Code, unsigned Offset, unsigned Length,
+ const FormatStyle &Style) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
std::vector<tooling::Range> Ranges(1, tooling::Range(Offset, Length));
@@ -34,7 +34,7 @@ protected:
}
static std::string
- format(llvm::StringRef Code,
+ format(StringRef Code,
const FormatStyle &Style = getMicrosoftStyle(FormatStyle::LK_CSharp)) {
return format(Code, 0, Code.size(), Style);
}
diff --git a/clang/unittests/Format/FormatTestJS.cpp b/clang/unittests/Format/FormatTestJS.cpp
index 3aded8f3726d..b910ce620de7 100644
--- a/clang/unittests/Format/FormatTestJS.cpp
+++ b/clang/unittests/Format/FormatTestJS.cpp
@@ -16,10 +16,10 @@
namespace clang {
namespace format {
-class FormatTestJS : public ::testing::Test {
+class FormatTestJS : public testing::Test {
protected:
- static std::string format(llvm::StringRef Code, unsigned Offset,
- unsigned Length, const FormatStyle &Style) {
+ static std::string format(StringRef Code, unsigned Offset, unsigned Length,
+ const FormatStyle &Style) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
std::vector<tooling::Range> Ranges(1, tooling::Range(Offset, Length));
@@ -34,7 +34,7 @@ protected:
}
static std::string format(
- llvm::StringRef Code,
+ StringRef Code,
const FormatStyle &Style = getGoogleStyle(FormatStyle::LK_JavaScript)) {
return format(Code, 0, Code.size(), Style);
}
@@ -46,7 +46,7 @@ protected:
}
static void verifyFormat(
- llvm::StringRef Code,
+ StringRef Code,
const FormatStyle &Style = getGoogleStyle(FormatStyle::LK_JavaScript)) {
EXPECT_EQ(Code.str(), format(Code, Style)) << "Expected code is not stable";
std::string Result = format(test::messUp(Code), Style);
@@ -54,7 +54,7 @@ protected:
}
static void verifyFormat(
- llvm::StringRef Expected, llvm::StringRef Code,
+ StringRef Expected, StringRef Code,
const FormatStyle &Style = getGoogleStyle(FormatStyle::LK_JavaScript)) {
EXPECT_EQ(Expected.str(), format(Expected, Style))
<< "Expected code is not stable";
diff --git a/clang/unittests/Format/FormatTestJson.cpp b/clang/unittests/Format/FormatTestJson.cpp
index 3254802dc0d6..60e9f17855f7 100644
--- a/clang/unittests/Format/FormatTestJson.cpp
+++ b/clang/unittests/Format/FormatTestJson.cpp
@@ -16,10 +16,10 @@
namespace clang {
namespace format {
-class FormatTestJson : public ::testing::Test {
+class FormatTestJson : public testing::Test {
protected:
- static std::string format(llvm::StringRef Code, unsigned Offset,
- unsigned Length, const FormatStyle &Style) {
+ static std::string format(StringRef Code, unsigned Offset, unsigned Length,
+ const FormatStyle &Style) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
@@ -47,7 +47,7 @@ protected:
}
static std::string
- format(llvm::StringRef Code,
+ format(StringRef Code,
const FormatStyle &Style = getLLVMStyle(FormatStyle::LK_Json)) {
return format(Code, 0, Code.size(), Style);
}
@@ -58,13 +58,12 @@ protected:
return Style;
}
- static void verifyFormatStable(llvm::StringRef Code,
- const FormatStyle &Style) {
+ static void verifyFormatStable(StringRef Code, const FormatStyle &Style) {
EXPECT_EQ(Code.str(), format(Code, Style)) << "Expected code is not stable";
}
static void
- verifyFormat(llvm::StringRef Code,
+ verifyFormat(StringRef Code,
const FormatStyle &Style = getLLVMStyle(FormatStyle::LK_Json)) {
verifyFormatStable(Code, Style);
EXPECT_EQ(Code.str(), format(test::messUp(Code), Style));
diff --git a/clang/unittests/Format/FormatTestProto.cpp b/clang/unittests/Format/FormatTestProto.cpp
index 4a2d2d68248d..5adb532ae4a4 100644
--- a/clang/unittests/Format/FormatTestProto.cpp
+++ b/clang/unittests/Format/FormatTestProto.cpp
@@ -16,10 +16,10 @@
namespace clang {
namespace format {
-class FormatTestProto : public ::testing::Test {
+class FormatTestProto : public testing::Test {
protected:
- static std::string format(llvm::StringRef Code, unsigned Offset,
- unsigned Length, const FormatStyle &Style) {
+ static std::string format(StringRef Code, unsigned Offset, unsigned Length,
+ const FormatStyle &Style) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
std::vector<tooling::Range> Ranges(1, tooling::Range(Offset, Length));
@@ -30,13 +30,13 @@ protected:
return *Result;
}
- static std::string format(llvm::StringRef Code) {
+ static std::string format(StringRef Code) {
FormatStyle Style = getGoogleStyle(FormatStyle::LK_Proto);
Style.ColumnLimit = 60; // To make writing tests easier.
return format(Code, 0, Code.size(), Style);
}
- static void verifyFormat(llvm::StringRef Code) {
+ static void verifyFormat(StringRef Code) {
EXPECT_EQ(Code.str(), format(Code)) << "Expected code is not stable";
EXPECT_EQ(Code.str(), format(test::messUp(Code)));
}
diff --git a/clang/unittests/Format/FormatTestRawStrings.cpp b/clang/unittests/Format/FormatTestRawStrings.cpp
index 10f341cc8f79..0615fb1fad4c 100644
--- a/clang/unittests/Format/FormatTestRawStrings.cpp
+++ b/clang/unittests/Format/FormatTestRawStrings.cpp
@@ -21,12 +21,11 @@ namespace clang {
namespace format {
namespace {
-class FormatTestRawStrings : public ::testing::Test {
+class FormatTestRawStrings : public testing::Test {
protected:
enum StatusCheck { SC_ExpectComplete, SC_ExpectIncomplete, SC_DoNotCheck };
- std::string format(llvm::StringRef Code,
- const FormatStyle &Style = getLLVMStyle(),
+ std::string format(StringRef Code, const FormatStyle &Style = getLLVMStyle(),
StatusCheck CheckComplete = SC_ExpectComplete) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
diff --git a/clang/unittests/Format/FormatTestSelective.cpp b/clang/unittests/Format/FormatTestSelective.cpp
index c21c9bfe6079..3ae70a15d359 100644
--- a/clang/unittests/Format/FormatTestSelective.cpp
+++ b/clang/unittests/Format/FormatTestSelective.cpp
@@ -17,9 +17,9 @@ namespace clang {
namespace format {
namespace {
-class FormatTestSelective : public ::testing::Test {
+class FormatTestSelective : public testing::Test {
protected:
- std::string format(llvm::StringRef Code, unsigned Offset, unsigned Length) {
+ std::string format(StringRef Code, unsigned Offset, unsigned Length) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
std::vector<tooling::Range> Ranges(1, tooling::Range(Offset, Length));
diff --git a/clang/unittests/Format/FormatTestTableGen.cpp b/clang/unittests/Format/FormatTestTableGen.cpp
index 79b6961b00b4..7771f6a109a9 100644
--- a/clang/unittests/Format/FormatTestTableGen.cpp
+++ b/clang/unittests/Format/FormatTestTableGen.cpp
@@ -16,10 +16,10 @@
namespace clang {
namespace format {
-class FormatTestTableGen : public ::testing::Test {
+class FormatTestTableGen : public testing::Test {
protected:
- static std::string format(llvm::StringRef Code, unsigned Offset,
- unsigned Length, const FormatStyle &Style) {
+ static std::string format(StringRef Code, unsigned Offset, unsigned Length,
+ const FormatStyle &Style) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
std::vector<tooling::Range> Ranges(1, tooling::Range(Offset, Length));
@@ -30,22 +30,22 @@ protected:
return *Result;
}
- static std::string format(llvm::StringRef Code) {
+ static std::string format(StringRef Code) {
FormatStyle Style = getGoogleStyle(FormatStyle::LK_TableGen);
Style.ColumnLimit = 60; // To make writing tests easier.
return format(Code, 0, Code.size(), Style);
}
- static void verifyFormat(llvm::StringRef Code) {
+ static void verifyFormat(StringRef Code) {
EXPECT_EQ(Code.str(), format(Code)) << "Expected code is not stable";
EXPECT_EQ(Code.str(), format(test::messUp(Code)));
}
- static void verifyFormat(llvm::StringRef Result, llvm::StringRef MessedUp) {
+ static void verifyFormat(StringRef Result, StringRef MessedUp) {
EXPECT_EQ(Result, format(MessedUp));
}
- static void verifyFormat(llvm::StringRef Code, const FormatStyle &Style) {
+ static void verifyFormat(StringRef Code, const FormatStyle &Style) {
EXPECT_EQ(Code.str(), format(Code, 0, Code.size(), Style))
<< "Expected code is not stable";
auto MessUp = test::messUp(Code);
diff --git a/clang/unittests/Format/FormatTestUtils.h b/clang/unittests/Format/FormatTestUtils.h
index fa42b61c547d..cb55b2d747b6 100644
--- a/clang/unittests/Format/FormatTestUtils.h
+++ b/clang/unittests/Format/FormatTestUtils.h
@@ -27,7 +27,7 @@ inline FormatStyle getGoogleStyle() {
// When HandleHash is false, preprocessor directives starting with hash will not
// be on separate lines. This is needed because Verilog uses hash for other
// purposes.
-inline std::string messUp(llvm::StringRef Code, bool HandleHash = true) {
+inline std::string messUp(StringRef Code, bool HandleHash = true) {
std::string MessedUp(Code.str());
bool InComment = false;
bool InPreprocessorDirective = false;
diff --git a/clang/unittests/Format/FormatTestVerilog.cpp b/clang/unittests/Format/FormatTestVerilog.cpp
index abebf9f7d4c7..b5241a4e0d6a 100644
--- a/clang/unittests/Format/FormatTestVerilog.cpp
+++ b/clang/unittests/Format/FormatTestVerilog.cpp
@@ -19,7 +19,7 @@ protected:
FormatStyle getDefaultStyle() const override {
return getLLVMStyle(FormatStyle::LK_Verilog);
}
- std::string messUp(llvm::StringRef Code) const override {
+ std::string messUp(StringRef Code) const override {
return test::messUp(Code, /*HandleHash=*/false);
}
};
diff --git a/clang/unittests/Format/FormatTokenSourceTest.cpp b/clang/unittests/Format/FormatTokenSourceTest.cpp
index 74de93057df6..4f19e255004f 100644
--- a/clang/unittests/Format/FormatTokenSourceTest.cpp
+++ b/clang/unittests/Format/FormatTokenSourceTest.cpp
@@ -15,10 +15,9 @@ namespace clang {
namespace format {
namespace {
-class IndexedTokenSourceTest : public ::testing::Test {
+class IndexedTokenSourceTest : public testing::Test {
protected:
- TokenList lex(llvm::StringRef Code,
- const FormatStyle &Style = getLLVMStyle()) {
+ TokenList lex(StringRef Code, const FormatStyle &Style = getLLVMStyle()) {
return TestLexer(Allocator, Buffers, Style).lex(Code);
}
llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
diff --git a/clang/unittests/Format/MacroCallReconstructorTest.cpp b/clang/unittests/Format/MacroCallReconstructorTest.cpp
index 9df21eae70cb..acef5e79eaae 100644
--- a/clang/unittests/Format/MacroCallReconstructorTest.cpp
+++ b/clang/unittests/Format/MacroCallReconstructorTest.cpp
@@ -30,15 +30,14 @@ public:
// Appends the token stream obtained from expanding the macro Name given
// the provided arguments, to be later retrieved with getTokens().
// Returns the list of tokens making up the unexpanded macro call.
- TokenList
- expand(llvm::StringRef Name,
- const SmallVector<llvm::SmallVector<FormatToken *, 8>, 1> &Args) {
+ TokenList expand(StringRef Name,
+ const SmallVector<SmallVector<FormatToken *, 8>, 1> &Args) {
return expandInternal(Name, Args);
}
- TokenList expand(llvm::StringRef Name) { return expandInternal(Name, {}); }
+ TokenList expand(StringRef Name) { return expandInternal(Name, {}); }
- TokenList expand(llvm::StringRef Name, const std::vector<std::string> &Args) {
+ TokenList expand(StringRef Name, const std::vector<std::string> &Args) {
return expandInternal(Name, lexArgs(Args));
}
@@ -48,8 +47,8 @@ public:
private:
TokenList expandInternal(
- llvm::StringRef Name,
- const std::optional<SmallVector<llvm::SmallVector<FormatToken *, 8>, 1>>
+ StringRef Name,
+ const std::optional<SmallVector<SmallVector<FormatToken *, 8>, 1>>
&Args) {
auto *ID = Lex.id(Name);
auto UnexpandedLine = std::make_unique<UnwrappedLine>();
@@ -75,26 +74,25 @@ private:
return UnexpandedTokens;
}
- llvm::SmallVector<TokenList, 1>
- lexArgs(const std::vector<std::string> &Args) {
- llvm::SmallVector<TokenList, 1> Result;
+ SmallVector<TokenList, 1> lexArgs(const std::vector<std::string> &Args) {
+ SmallVector<TokenList, 1> Result;
for (const auto &Arg : Args)
Result.push_back(uneof(Lex.lex(Arg)));
return Result;
}
llvm::DenseMap<FormatToken *, std::unique_ptr<UnwrappedLine>> Unexpanded;
- llvm::SmallVector<FormatToken *, 8> Tokens;
+ SmallVector<FormatToken *, 8> Tokens;
TestLexer &Lex;
MacroExpander &Macros;
};
struct Chunk {
- Chunk(llvm::ArrayRef<FormatToken *> Tokens)
+ Chunk(ArrayRef<FormatToken *> Tokens)
: Tokens(Tokens.begin(), Tokens.end()) {}
- Chunk(llvm::ArrayRef<UnwrappedLine> Children)
+ Chunk(ArrayRef<UnwrappedLine> Children)
: Children(Children.begin(), Children.end()) {}
- llvm::SmallVector<UnwrappedLineNode, 1> Tokens;
- llvm::SmallVector<UnwrappedLine, 0> Children;
+ SmallVector<UnwrappedLineNode, 1> Tokens;
+ SmallVector<UnwrappedLine, 0> Children;
};
// Allows to produce chunks of a token list by typing the code of equal tokens.
@@ -140,7 +138,7 @@ UnexpandedMap mergeUnexpanded(const UnexpandedMap &M1,
return Result;
}
-class MacroCallReconstructorTest : public ::testing::Test {
+class MacroCallReconstructorTest : public testing::Test {
public:
MacroCallReconstructorTest() : Lex(Allocator, Buffers) {}
@@ -151,7 +149,7 @@ public:
Lex.Allocator, Lex.IdentTable);
}
- UnwrappedLine line(llvm::ArrayRef<FormatToken *> Tokens, unsigned Level = 0) {
+ UnwrappedLine line(ArrayRef<FormatToken *> Tokens, unsigned Level = 0) {
UnwrappedLine Result;
Result.Level = Level;
for (FormatToken *Tok : Tokens)
@@ -159,11 +157,11 @@ public:
return Result;
}
- UnwrappedLine line(llvm::StringRef Text, unsigned Level = 0) {
+ UnwrappedLine line(StringRef Text, unsigned Level = 0) {
return line({lex(Text)}, Level);
}
- UnwrappedLine line(llvm::ArrayRef<Chunk> Chunks, unsigned Level = 0) {
+ UnwrappedLine line(ArrayRef<Chunk> Chunks, unsigned Level = 0) {
UnwrappedLine Result;
Result.Level = Level;
for (const Chunk &Chunk : Chunks) {
@@ -176,13 +174,11 @@ public:
return Result;
}
- TokenList lex(llvm::StringRef Text) { return uneof(Lex.lex(Text)); }
+ TokenList lex(StringRef Text) { return uneof(Lex.lex(Text)); }
- Chunk tokens(llvm::StringRef Text) { return Chunk(lex(Text)); }
+ Chunk tokens(StringRef Text) { return Chunk(lex(Text)); }
- Chunk children(llvm::ArrayRef<UnwrappedLine> Children) {
- return Chunk(Children);
- }
+ Chunk children(ArrayRef<UnwrappedLine> Children) { return Chunk(Children); }
llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
std::vector<std::unique_ptr<llvm::MemoryBuffer>> Buffers;
diff --git a/clang/unittests/Format/MacroExpanderTest.cpp b/clang/unittests/Format/MacroExpanderTest.cpp
index 72302aa0cea7..e001c986dc2b 100644
--- a/clang/unittests/Format/MacroExpanderTest.cpp
+++ b/clang/unittests/Format/MacroExpanderTest.cpp
@@ -9,7 +9,7 @@ namespace format {
namespace {
-class MacroExpanderTest : public ::testing::Test {
+class MacroExpanderTest : public testing::Test {
public:
MacroExpanderTest() : Lex(Allocator, Buffers) {}
std::unique_ptr<MacroExpander>
@@ -19,33 +19,32 @@ public:
Lex.Allocator, Lex.IdentTable);
}
- std::string expand(MacroExpander &Macros, llvm::StringRef Name) {
+ std::string expand(MacroExpander &Macros, StringRef Name) {
EXPECT_TRUE(Macros.defined(Name))
<< "Macro not defined: \"" << Name << "\"";
return text(Macros.expand(Lex.id(Name), {}));
}
- std::string expand(MacroExpander &Macros, llvm::StringRef Name,
+ std::string expand(MacroExpander &Macros, StringRef Name,
const std::vector<std::string> &Args) {
EXPECT_TRUE(Macros.defined(Name))
<< "Macro not defined: \"" << Name << "\"";
return text(Macros.expand(Lex.id(Name), lexArgs(Args)));
}
- llvm::SmallVector<TokenList, 1>
- lexArgs(const std::vector<std::string> &Args) {
- llvm::SmallVector<TokenList, 1> Result;
+ SmallVector<TokenList, 1> lexArgs(const std::vector<std::string> &Args) {
+ SmallVector<TokenList, 1> Result;
for (const auto &Arg : Args)
Result.push_back(uneof(Lex.lex(Arg)));
return Result;
}
struct MacroAttributes {
- clang::tok::TokenKind Kind;
+ tok::TokenKind Kind;
MacroRole Role;
unsigned Start;
unsigned End;
- llvm::SmallVector<FormatToken *, 1> ExpandedFrom;
+ SmallVector<FormatToken *, 1> ExpandedFrom;
};
void expectAttributes(const TokenList &Tokens,
@@ -56,8 +55,8 @@ public:
if (I >= Attributes.size())
continue;
std::string Context =
- ("for token " + llvm::Twine(I) + ": " + Tokens[I]->Tok.getName() +
- " / " + Tokens[I]->TokenText)
+ ("for token " + Twine(I) + ": " + Tokens[I]->Tok.getName() + " / " +
+ Tokens[I]->TokenText)
.str();
EXPECT_TRUE(Tokens[I]->is(Attributes[I].Kind))
<< Context << " in " << text(Tokens) << " at " << File << ":" << Line;
diff --git a/clang/unittests/Format/MatchFilePathTest.cpp b/clang/unittests/Format/MatchFilePathTest.cpp
index 55723584ddc8..f41cf7f97159 100644
--- a/clang/unittests/Format/MatchFilePathTest.cpp
+++ b/clang/unittests/Format/MatchFilePathTest.cpp
@@ -13,7 +13,7 @@ namespace clang {
namespace format {
namespace {
-class MatchFilePathTest : public ::testing::Test {
+class MatchFilePathTest : public testing::Test {
protected:
bool match(llvm::StringRef FilePath, llvm::StringRef Pattern) {
return matchFilePath(Pattern, FilePath);
diff --git a/clang/unittests/Format/NamespaceEndCommentsFixerTest.cpp b/clang/unittests/Format/NamespaceEndCommentsFixerTest.cpp
index fe097e9961e2..2c45ad1cbe1c 100644
--- a/clang/unittests/Format/NamespaceEndCommentsFixerTest.cpp
+++ b/clang/unittests/Format/NamespaceEndCommentsFixerTest.cpp
@@ -17,16 +17,16 @@ namespace clang {
namespace format {
namespace {
-class NamespaceEndCommentsFixerTest : public ::testing::Test {
+class NamespaceEndCommentsFixerTest : public testing::Test {
protected:
std::string
- fixNamespaceEndComments(llvm::StringRef Code,
+ fixNamespaceEndComments(StringRef Code,
const std::vector<tooling::Range> &Ranges,
const FormatStyle &Style = getLLVMStyle()) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
tooling::Replacements Replaces =
- clang::format::fixNamespaceEndComments(Style, Code, Ranges, "<stdin>");
+ format::fixNamespaceEndComments(Style, Code, Ranges, "<stdin>");
auto Result = applyAllReplacements(Code, Replaces);
EXPECT_TRUE(static_cast<bool>(Result));
LLVM_DEBUG(llvm::errs() << "\n" << *Result << "\n\n");
@@ -34,7 +34,7 @@ protected:
}
std::string
- fixNamespaceEndComments(llvm::StringRef Code,
+ fixNamespaceEndComments(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return fixNamespaceEndComments(
Code,
@@ -43,8 +43,7 @@ protected:
bool isFormatted(StringRef Code, const std::vector<tooling::Range> &Ranges,
const FormatStyle &Style = getLLVMStyle()) const {
- return clang::format::fixNamespaceEndComments(Style, Code, Ranges,
- "<stdin>")
+ return format::fixNamespaceEndComments(Style, Code, Ranges, "<stdin>")
.empty();
}
diff --git a/clang/unittests/Format/ObjCPropertyAttributeOrderFixerTest.cpp b/clang/unittests/Format/ObjCPropertyAttributeOrderFixerTest.cpp
index 79ded6673591..9f852e4768b1 100644
--- a/clang/unittests/Format/ObjCPropertyAttributeOrderFixerTest.cpp
+++ b/clang/unittests/Format/ObjCPropertyAttributeOrderFixerTest.cpp
@@ -28,7 +28,7 @@ namespace {
class ObjCPropertyAttributeOrderFixerTest : public FormatTestBase {
protected:
- TokenList annotate(llvm::StringRef Code,
+ TokenList annotate(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return TestLexer(Allocator, Buffers, Style).annotate(Code);
}
diff --git a/clang/unittests/Format/QualifierFixerTest.cpp b/clang/unittests/Format/QualifierFixerTest.cpp
index 1f21fc0e0b42..3a5f63e5de65 100644
--- a/clang/unittests/Format/QualifierFixerTest.cpp
+++ b/clang/unittests/Format/QualifierFixerTest.cpp
@@ -28,7 +28,7 @@ namespace {
class QualifierFixerTest : public FormatTestBase {
protected:
- TokenList annotate(llvm::StringRef Code,
+ TokenList annotate(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return TestLexer(Allocator, Buffers, Style).annotate(Code);
}
diff --git a/clang/unittests/Format/SortImportsTestJS.cpp b/clang/unittests/Format/SortImportsTestJS.cpp
index f423cdd21d1a..59ce62117d4a 100644
--- a/clang/unittests/Format/SortImportsTestJS.cpp
+++ b/clang/unittests/Format/SortImportsTestJS.cpp
@@ -17,7 +17,7 @@ namespace clang {
namespace format {
namespace {
-class SortImportsTestJS : public ::testing::Test {
+class SortImportsTestJS : public testing::Test {
protected:
std::string sort(StringRef Code, unsigned Offset = 0, unsigned Length = 0) {
StringRef FileName = "input.js";
@@ -33,10 +33,9 @@ protected:
return *Formatted;
}
- void _verifySort(const char *File, int Line, llvm::StringRef Expected,
- llvm::StringRef Code, unsigned Offset = 0,
- unsigned Length = 0) {
- ::testing::ScopedTrace t(File, Line, ::testing::Message() << Code.str());
+ void _verifySort(const char *File, int Line, StringRef Expected,
+ StringRef Code, unsigned Offset = 0, unsigned Length = 0) {
+ testing::ScopedTrace t(File, Line, testing::Message() << Code.str());
std::string Result = sort(Code, Offset, Length);
EXPECT_EQ(Expected.str(), Result) << "Expected:\n"
<< Expected << "\nActual:\n"
diff --git a/clang/unittests/Format/SortImportsTestJava.cpp b/clang/unittests/Format/SortImportsTestJava.cpp
index 98a6826b1ff5..d577efa34f86 100644
--- a/clang/unittests/Format/SortImportsTestJava.cpp
+++ b/clang/unittests/Format/SortImportsTestJava.cpp
@@ -7,7 +7,7 @@ namespace clang {
namespace format {
namespace {
-class SortImportsTestJava : public ::testing::Test {
+class SortImportsTestJava : public testing::Test {
protected:
std::vector<tooling::Range> GetCodeRange(StringRef Code) {
return std::vector<tooling::Range>(1, tooling::Range(0, Code.size()));
diff --git a/clang/unittests/Format/SortIncludesTest.cpp b/clang/unittests/Format/SortIncludesTest.cpp
index 824fa0078cd0..2eeb16b4ab9f 100644
--- a/clang/unittests/Format/SortIncludesTest.cpp
+++ b/clang/unittests/Format/SortIncludesTest.cpp
@@ -43,7 +43,7 @@ protected:
return sort(Code, GetCodeRange(Code), FileName, ExpectedNumRanges);
}
- unsigned newCursor(llvm::StringRef Code, unsigned Cursor) {
+ unsigned newCursor(StringRef Code, unsigned Cursor) {
sortIncludes(FmtStyle, Code, GetCodeRange(Code), "input.cpp", &Cursor);
return Cursor;
}
@@ -53,35 +53,35 @@ protected:
};
TEST_F(SortIncludesTest, BasicSorting) {
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\""));
-
- EXPECT_EQ("// comment\n"
- "#include <a>\n"
- "#include <b>",
- sort("// comment\n"
- "#include <b>\n"
- "#include <a>",
- {tooling::Range(25, 1)}));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\""));
+
+ verifyFormat("// comment\n"
+ "#include <a>\n"
+ "#include <b>",
+ sort("// comment\n"
+ "#include <b>\n"
+ "#include <a>",
+ {tooling::Range(25, 1)}));
}
TEST_F(SortIncludesTest, TrailingComments) {
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"b.h\" /* long\n"
- " * long\n"
- " * comment*/\n"
- "#include \"c.h\"\n"
- "#include \"d.h\"",
- sort("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\" /* long\n"
- " * long\n"
- " * comment*/\n"
- "#include \"d.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"b.h\" /* long\n"
+ " * long\n"
+ " * comment*/\n"
+ "#include \"c.h\"\n"
+ "#include \"d.h\"",
+ sort("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\" /* long\n"
+ " * long\n"
+ " * comment*/\n"
+ "#include \"d.h\""));
}
TEST_F(SortIncludesTest, SortedIncludesUsingSortPriorityAttribute) {
@@ -100,531 +100,531 @@ TEST_F(SortIncludesTest, SortedIncludesUsingSortPriorityAttribute) {
{"<path", 9, 11, false},
{"^<[^/].*\\.h>", 8, 10, false},
{"^\".*\\.h\"", 10, 12, false}};
- EXPECT_EQ("#include <sys/param.h>\n"
- "#include <sys/types.h>\n"
- "#include <sys/ioctl.h>\n"
- "#include <sys/socket.h>\n"
- "#include <sys/stat.h>\n"
- "#include <sys/wait.h>\n"
- "\n"
- "#include <net/if.h>\n"
- "#include <net/if_dl.h>\n"
- "#include <net/route.h>\n"
- "#include <netinet/in.h>\n"
- "#include <protocols/rwhod.h>\n"
- "\n"
- "#include <assert.h>\n"
- "#include <errno.h>\n"
- "#include <inttypes.h>\n"
- "#include <stdio.h>\n"
- "#include <stdlib.h>\n"
- "\n"
- "#include <paths.h>\n"
- "\n"
- "#include \"pathnames.h\"",
- sort("#include <sys/param.h>\n"
- "#include <sys/types.h>\n"
- "#include <sys/ioctl.h>\n"
- "#include <net/if_dl.h>\n"
- "#include <net/route.h>\n"
- "#include <netinet/in.h>\n"
- "#include <sys/socket.h>\n"
- "#include <sys/stat.h>\n"
- "#include <sys/wait.h>\n"
- "#include <net/if.h>\n"
- "#include <protocols/rwhod.h>\n"
- "#include <assert.h>\n"
- "#include <paths.h>\n"
- "#include \"pathnames.h\"\n"
- "#include <errno.h>\n"
- "#include <inttypes.h>\n"
- "#include <stdio.h>\n"
- "#include <stdlib.h>"));
+ verifyFormat("#include <sys/param.h>\n"
+ "#include <sys/types.h>\n"
+ "#include <sys/ioctl.h>\n"
+ "#include <sys/socket.h>\n"
+ "#include <sys/stat.h>\n"
+ "#include <sys/wait.h>\n"
+ "\n"
+ "#include <net/if.h>\n"
+ "#include <net/if_dl.h>\n"
+ "#include <net/route.h>\n"
+ "#include <netinet/in.h>\n"
+ "#include <protocols/rwhod.h>\n"
+ "\n"
+ "#include <assert.h>\n"
+ "#include <errno.h>\n"
+ "#include <inttypes.h>\n"
+ "#include <stdio.h>\n"
+ "#include <stdlib.h>\n"
+ "\n"
+ "#include <paths.h>\n"
+ "\n"
+ "#include \"pathnames.h\"",
+ sort("#include <sys/param.h>\n"
+ "#include <sys/types.h>\n"
+ "#include <sys/ioctl.h>\n"
+ "#include <net/if_dl.h>\n"
+ "#include <net/route.h>\n"
+ "#include <netinet/in.h>\n"
+ "#include <sys/socket.h>\n"
+ "#include <sys/stat.h>\n"
+ "#include <sys/wait.h>\n"
+ "#include <net/if.h>\n"
+ "#include <protocols/rwhod.h>\n"
+ "#include <assert.h>\n"
+ "#include <paths.h>\n"
+ "#include \"pathnames.h\"\n"
+ "#include <errno.h>\n"
+ "#include <inttypes.h>\n"
+ "#include <stdio.h>\n"
+ "#include <stdlib.h>"));
}
TEST_F(SortIncludesTest, SortPriorityNotDefined) {
FmtStyle = getLLVMStyle();
- EXPECT_EQ("#include \"FormatTestUtils.h\"\n"
- "#include \"clang/Format/Format.h\"\n"
- "#include \"llvm/ADT/None.h\"\n"
- "#include \"llvm/Support/Debug.h\"\n"
- "#include \"gtest/gtest.h\"",
- sort("#include \"clang/Format/Format.h\"\n"
- "#include \"llvm/ADT/None.h\"\n"
- "#include \"FormatTestUtils.h\"\n"
- "#include \"gtest/gtest.h\"\n"
- "#include \"llvm/Support/Debug.h\""));
+ verifyFormat("#include \"FormatTestUtils.h\"\n"
+ "#include \"clang/Format/Format.h\"\n"
+ "#include \"llvm/ADT/None.h\"\n"
+ "#include \"llvm/Support/Debug.h\"\n"
+ "#include \"gtest/gtest.h\"",
+ sort("#include \"clang/Format/Format.h\"\n"
+ "#include \"llvm/ADT/None.h\"\n"
+ "#include \"FormatTestUtils.h\"\n"
+ "#include \"gtest/gtest.h\"\n"
+ "#include \"llvm/Support/Debug.h\""));
}
TEST_F(SortIncludesTest, NoReplacementsForValidIncludes) {
// Identical #includes have led to a failure with an unstable sort.
- std::string Code = "#include <a>\n"
- "#include <b>\n"
- "#include <c>\n"
- "#include <d>\n"
- "#include <e>\n"
- "#include <f>\n";
+ StringRef Code = "#include <a>\n"
+ "#include <b>\n"
+ "#include <c>\n"
+ "#include <d>\n"
+ "#include <e>\n"
+ "#include <f>\n";
EXPECT_TRUE(sortIncludes(FmtStyle, Code, GetCodeRange(Code), "a.cc").empty());
}
TEST_F(SortIncludesTest, MainFileHeader) {
- std::string Code = "#include <string>\n"
- "\n"
- "#include \"a/extra_action.proto.h\"\n";
+ StringRef Code = "#include <string>\n"
+ "\n"
+ "#include \"a/extra_action.proto.h\"\n";
FmtStyle = getGoogleStyle(FormatStyle::LK_Cpp);
EXPECT_TRUE(
sortIncludes(FmtStyle, Code, GetCodeRange(Code), "a/extra_action.cc")
.empty());
- EXPECT_EQ("#include \"foo.bar.h\"\n"
- "\n"
- "#include \"a.h\"",
- sort("#include \"a.h\"\n"
- "#include \"foo.bar.h\"",
- "foo.bar.cc"));
+ verifyFormat("#include \"foo.bar.h\"\n"
+ "\n"
+ "#include \"a.h\"",
+ sort("#include \"a.h\"\n"
+ "#include \"foo.bar.h\"",
+ "foo.bar.cc"));
}
TEST_F(SortIncludesTest, SortedIncludesInMultipleBlocksAreMerged) {
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Merge;
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "\n"
- "\n"
- "#include \"b.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "\n"
+ "\n"
+ "#include \"b.h\""));
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "\n"
- "\n"
- "#include \"b.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "\n"
+ "\n"
+ "#include \"b.h\""));
}
TEST_F(SortIncludesTest, SupportClangFormatOff) {
- EXPECT_EQ("#include <a>\n"
- "#include <b>\n"
- "#include <c>\n"
- "// clang-format off\n"
- "#include <b>\n"
- "#include <a>\n"
- "#include <c>\n"
- "// clang-format on",
- sort("#include <b>\n"
- "#include <a>\n"
- "#include <c>\n"
- "// clang-format off\n"
- "#include <b>\n"
- "#include <a>\n"
- "#include <c>\n"
- "// clang-format on"));
+ verifyFormat("#include <a>\n"
+ "#include <b>\n"
+ "#include <c>\n"
+ "// clang-format off\n"
+ "#include <b>\n"
+ "#include <a>\n"
+ "#include <c>\n"
+ "// clang-format on",
+ sort("#include <b>\n"
+ "#include <a>\n"
+ "#include <c>\n"
+ "// clang-format off\n"
+ "#include <b>\n"
+ "#include <a>\n"
+ "#include <c>\n"
+ "// clang-format on"));
Style.IncludeBlocks = Style.IBS_Merge;
- std::string Code = "// clang-format off\r\n"
- "#include \"d.h\"\r\n"
- "#include \"b.h\"\r\n"
- "// clang-format on\r\n"
- "\r\n"
- "#include \"c.h\"\r\n"
- "#include \"a.h\"\r\n"
- "#include \"e.h\"\r\n";
-
- std::string Expected = "// clang-format off\r\n"
- "#include \"d.h\"\r\n"
- "#include \"b.h\"\r\n"
- "// clang-format on\r\n"
- "\r\n"
- "#include \"e.h\"\r\n"
- "#include \"a.h\"\r\n"
- "#include \"c.h\"\r\n";
-
- EXPECT_EQ(Expected, sort(Code, "e.cpp", 1));
+ StringRef Code = "// clang-format off\r\n"
+ "#include \"d.h\"\r\n"
+ "#include \"b.h\"\r\n"
+ "// clang-format on\r\n"
+ "\r\n"
+ "#include \"c.h\"\r\n"
+ "#include \"a.h\"\r\n"
+ "#include \"e.h\"\r\n";
+
+ StringRef Expected = "// clang-format off\r\n"
+ "#include \"d.h\"\r\n"
+ "#include \"b.h\"\r\n"
+ "// clang-format on\r\n"
+ "\r\n"
+ "#include \"e.h\"\r\n"
+ "#include \"a.h\"\r\n"
+ "#include \"c.h\"\r\n";
+
+ verifyFormat(Expected, sort(Code, "e.cpp", 1));
}
TEST_F(SortIncludesTest, SupportClangFormatOffCStyle) {
- EXPECT_EQ("#include <a>\n"
- "#include <b>\n"
- "#include <c>\n"
- "/* clang-format off */\n"
- "#include <b>\n"
- "#include <a>\n"
- "#include <c>\n"
- "/* clang-format on */",
- sort("#include <b>\n"
- "#include <a>\n"
- "#include <c>\n"
- "/* clang-format off */\n"
- "#include <b>\n"
- "#include <a>\n"
- "#include <c>\n"
- "/* clang-format on */"));
+ verifyFormat("#include <a>\n"
+ "#include <b>\n"
+ "#include <c>\n"
+ "/* clang-format off */\n"
+ "#include <b>\n"
+ "#include <a>\n"
+ "#include <c>\n"
+ "/* clang-format on */",
+ sort("#include <b>\n"
+ "#include <a>\n"
+ "#include <c>\n"
+ "/* clang-format off */\n"
+ "#include <b>\n"
+ "#include <a>\n"
+ "#include <c>\n"
+ "/* clang-format on */"));
// Not really turning it off
- EXPECT_EQ("#include <a>\n"
- "#include <b>\n"
- "#include <c>\n"
- "/* clang-format offically */\n"
- "#include <a>\n"
- "#include <b>\n"
- "#include <c>\n"
- "/* clang-format onwards */",
- sort("#include <b>\n"
- "#include <a>\n"
- "#include <c>\n"
- "/* clang-format offically */\n"
- "#include <b>\n"
- "#include <a>\n"
- "#include <c>\n"
- "/* clang-format onwards */",
- "input.h", 2));
+ verifyFormat("#include <a>\n"
+ "#include <b>\n"
+ "#include <c>\n"
+ "/* clang-format offically */\n"
+ "#include <a>\n"
+ "#include <b>\n"
+ "#include <c>\n"
+ "/* clang-format onwards */",
+ sort("#include <b>\n"
+ "#include <a>\n"
+ "#include <c>\n"
+ "/* clang-format offically */\n"
+ "#include <b>\n"
+ "#include <a>\n"
+ "#include <c>\n"
+ "/* clang-format onwards */",
+ "input.h", 2));
}
TEST_F(SortIncludesTest, IncludeSortingCanBeDisabled) {
FmtStyle.SortIncludes = FormatStyle::SI_Never;
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- sort("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "input.h", 0));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ sort("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "input.h", 0));
}
TEST_F(SortIncludesTest, MixIncludeAndImport) {
- EXPECT_EQ("#include \"a.h\"\n"
- "#import \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "#import \"b.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#import \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "#import \"b.h\""));
}
TEST_F(SortIncludesTest, FixTrailingComments) {
- EXPECT_EQ("#include \"a.h\" // comment\n"
- "#include \"bb.h\" // comment\n"
- "#include \"ccc.h\"",
- sort("#include \"a.h\" // comment\n"
- "#include \"ccc.h\"\n"
- "#include \"bb.h\" // comment"));
+ verifyFormat("#include \"a.h\" // comment\n"
+ "#include \"bb.h\" // comment\n"
+ "#include \"ccc.h\"",
+ sort("#include \"a.h\" // comment\n"
+ "#include \"ccc.h\"\n"
+ "#include \"bb.h\" // comment"));
}
TEST_F(SortIncludesTest, LeadingWhitespace) {
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort(" #include \"a.h\"\n"
- " #include \"c.h\"\n"
- " #include \"b.h\""));
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("# include \"a.h\"\n"
- "# include \"c.h\"\n"
- "# include \"b.h\""));
- EXPECT_EQ("#include \"a.h\"", sort("#include \"a.h\"\n"
- " #include \"a.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort(" #include \"a.h\"\n"
+ " #include \"c.h\"\n"
+ " #include \"b.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("# include \"a.h\"\n"
+ "# include \"c.h\"\n"
+ "# include \"b.h\""));
+ verifyFormat("#include \"a.h\"", sort("#include \"a.h\"\n"
+ " #include \"a.h\""));
}
TEST_F(SortIncludesTest, TrailingWhitespace) {
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"a.h\" \n"
- "#include \"c.h\" \n"
- "#include \"b.h\" "));
- EXPECT_EQ("#include \"a.h\"", sort("#include \"a.h\"\n"
- "#include \"a.h\" "));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"a.h\" \n"
+ "#include \"c.h\" \n"
+ "#include \"b.h\" "));
+ verifyFormat("#include \"a.h\"", sort("#include \"a.h\"\n"
+ "#include \"a.h\" "));
}
TEST_F(SortIncludesTest, GreaterInComment) {
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"b.h\" // >\n"
- "#include \"c.h\"",
- sort("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\" // >"));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"b.h\" // >\n"
+ "#include \"c.h\"",
+ sort("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\" // >"));
}
TEST_F(SortIncludesTest, SortsLocallyInEachBlock) {
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "\n"
- "#include \"b.h\"",
- sort("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "\n"
- "#include \"b.h\"",
- "input.h", 0));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "\n"
+ "#include \"b.h\"",
+ sort("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "\n"
+ "#include \"b.h\"",
+ "input.h", 0));
}
TEST_F(SortIncludesTest, SortsAllBlocksWhenMerging) {
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Merge;
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "\n"
- "#include \"b.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "\n"
+ "#include \"b.h\""));
}
TEST_F(SortIncludesTest, CommentsAlwaysSeparateGroups) {
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "// comment\n"
- "#include \"b.h\"",
- sort("#include \"c.h\"\n"
- "#include \"a.h\"\n"
- "// comment\n"
- "#include \"b.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "// comment\n"
+ "#include \"b.h\"",
+ sort("#include \"c.h\"\n"
+ "#include \"a.h\"\n"
+ "// comment\n"
+ "#include \"b.h\""));
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Merge;
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "// comment\n"
- "#include \"b.h\"",
- sort("#include \"c.h\"\n"
- "#include \"a.h\"\n"
- "// comment\n"
- "#include \"b.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "// comment\n"
+ "#include \"b.h\"",
+ sort("#include \"c.h\"\n"
+ "#include \"a.h\"\n"
+ "// comment\n"
+ "#include \"b.h\""));
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "// comment\n"
- "#include \"b.h\"",
- sort("#include \"c.h\"\n"
- "#include \"a.h\"\n"
- "// comment\n"
- "#include \"b.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "// comment\n"
+ "#include \"b.h\"",
+ sort("#include \"c.h\"\n"
+ "#include \"a.h\"\n"
+ "// comment\n"
+ "#include \"b.h\""));
}
TEST_F(SortIncludesTest, HandlesAngledIncludesAsSeparateBlocks) {
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "#include <array>\n"
- "#include <b.h>\n"
- "#include <d.h>\n"
- "#include <vector>",
- sort("#include <vector>\n"
- "#include <d.h>\n"
- "#include <array>\n"
- "#include <b.h>\n"
- "#include \"c.h\"\n"
- "#include \"a.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include <array>\n"
+ "#include <b.h>\n"
+ "#include <d.h>\n"
+ "#include <vector>",
+ sort("#include <vector>\n"
+ "#include <d.h>\n"
+ "#include <array>\n"
+ "#include <b.h>\n"
+ "#include \"c.h\"\n"
+ "#include \"a.h\""));
FmtStyle = getGoogleStyle(FormatStyle::LK_Cpp);
- EXPECT_EQ("#include <b.h>\n"
- "#include <d.h>\n"
- "\n"
- "#include <array>\n"
- "#include <vector>\n"
- "\n"
- "#include \"a.h\"\n"
- "#include \"c.h\"",
- sort("#include <vector>\n"
- "#include <d.h>\n"
- "#include <array>\n"
- "#include <b.h>\n"
- "#include \"c.h\"\n"
- "#include \"a.h\""));
+ verifyFormat("#include <b.h>\n"
+ "#include <d.h>\n"
+ "\n"
+ "#include <array>\n"
+ "#include <vector>\n"
+ "\n"
+ "#include \"a.h\"\n"
+ "#include \"c.h\"",
+ sort("#include <vector>\n"
+ "#include <d.h>\n"
+ "#include <array>\n"
+ "#include <b.h>\n"
+ "#include \"c.h\"\n"
+ "#include \"a.h\""));
}
TEST_F(SortIncludesTest, RegroupsAngledIncludesInSeparateBlocks) {
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "\n"
- "#include <b.h>\n"
- "#include <d.h>",
- sort("#include <d.h>\n"
- "#include <b.h>\n"
- "#include \"c.h\"\n"
- "#include \"a.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "\n"
+ "#include <b.h>\n"
+ "#include <d.h>",
+ sort("#include <d.h>\n"
+ "#include <b.h>\n"
+ "#include \"c.h\"\n"
+ "#include \"a.h\""));
}
TEST_F(SortIncludesTest, HandlesMultilineIncludes) {
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"a.h\"\n"
- "#include \\\n"
- "\"c.h\"\n"
- "#include \"b.h\""));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"a.h\"\n"
+ "#include \\\n"
+ "\"c.h\"\n"
+ "#include \"b.h\""));
}
TEST_F(SortIncludesTest, HandlesTrailingCommentsWithAngleBrackets) {
// Regression test from the discussion at https://reviews.llvm.org/D121370.
- EXPECT_EQ("#include <cstdint>\n"
- "\n"
- "#include \"util/bar.h\"\n"
- "#include \"util/foo/foo.h\" // foo<T>",
- sort("#include <cstdint>\n"
- "\n"
- "#include \"util/bar.h\"\n"
- "#include \"util/foo/foo.h\" // foo<T>",
- /*FileName=*/"input.cc",
- /*ExpectedNumRanges=*/0));
+ verifyFormat("#include <cstdint>\n"
+ "\n"
+ "#include \"util/bar.h\"\n"
+ "#include \"util/foo/foo.h\" // foo<T>",
+ sort("#include <cstdint>\n"
+ "\n"
+ "#include \"util/bar.h\"\n"
+ "#include \"util/foo/foo.h\" // foo<T>",
+ /*FileName=*/"input.cc",
+ /*ExpectedNumRanges=*/0));
}
TEST_F(SortIncludesTest, LeavesMainHeaderFirst) {
Style.IncludeIsMainRegex = "([-_](test|unittest))?$";
- EXPECT_EQ("#include \"llvm/a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"llvm/a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "a.cc"));
- EXPECT_EQ("#include \"llvm/a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"llvm/a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "a_test.cc"));
- EXPECT_EQ("#include \"llvm/input.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"llvm/input.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "input.mm"));
+ verifyFormat("#include \"llvm/a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"llvm/a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "a.cc"));
+ verifyFormat("#include \"llvm/a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"llvm/a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "a_test.cc"));
+ verifyFormat("#include \"llvm/input.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"llvm/input.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "input.mm"));
// Don't allow prefixes.
- EXPECT_EQ("#include \"b.h\"\n"
- "#include \"c.h\"\n"
- "#include \"llvm/not_a.h\"",
- sort("#include \"llvm/not_a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "a.cc"));
+ verifyFormat("#include \"b.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"llvm/not_a.h\"",
+ sort("#include \"llvm/not_a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "a.cc"));
// Don't do this for _main and other suffixes.
- EXPECT_EQ("#include \"b.h\"\n"
- "#include \"c.h\"\n"
- "#include \"llvm/a.h\"",
- sort("#include \"llvm/a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "a_main.cc"));
+ verifyFormat("#include \"b.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"llvm/a.h\"",
+ sort("#include \"llvm/a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "a_main.cc"));
// Don't do this in headers.
- EXPECT_EQ("#include \"b.h\"\n"
- "#include \"c.h\"\n"
- "#include \"llvm/a.h\"",
- sort("#include \"llvm/a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "a.h"));
+ verifyFormat("#include \"b.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"llvm/a.h\"",
+ sort("#include \"llvm/a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "a.h"));
// Only do this in the first #include block.
- EXPECT_EQ("#include <a>\n"
- "\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"\n"
- "#include \"llvm/a.h\"",
- sort("#include <a>\n"
- "\n"
- "#include \"llvm/a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "a.cc"));
+ verifyFormat("#include <a>\n"
+ "\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"llvm/a.h\"",
+ sort("#include <a>\n"
+ "\n"
+ "#include \"llvm/a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "a.cc"));
// Only recognize the first #include with a matching basename as main include.
- EXPECT_EQ("#include \"a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"\n"
- "#include \"llvm/a.h\"",
- sort("#include \"b.h\"\n"
- "#include \"a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"llvm/a.h\"",
- "a.cc"));
+ verifyFormat("#include \"a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"llvm/a.h\"",
+ sort("#include \"b.h\"\n"
+ "#include \"a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"llvm/a.h\"",
+ "a.cc"));
}
TEST_F(SortIncludesTest, LeavesMainHeaderFirstInAdditionalExtensions) {
Style.IncludeIsMainRegex = "([-_](test|unittest))?|(Impl)?$";
- EXPECT_EQ("#include \"b.h\"\n"
- "#include \"c.h\"\n"
- "#include \"llvm/a.h\"",
- sort("#include \"llvm/a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "a_test.xxx"));
- EXPECT_EQ("#include \"b.h\"\n"
- "#include \"c.h\"\n"
- "#include \"llvm/a.h\"",
- sort("#include \"llvm/a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "aImpl.hpp"));
+ verifyFormat("#include \"b.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"llvm/a.h\"",
+ sort("#include \"llvm/a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "a_test.xxx"));
+ verifyFormat("#include \"b.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"llvm/a.h\"",
+ sort("#include \"llvm/a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "aImpl.hpp"));
// .cpp extension is considered "main" by default
- EXPECT_EQ("#include \"llvm/a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"llvm/a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "aImpl.cpp"));
- EXPECT_EQ("#include \"llvm/a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"llvm/a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "a_test.cpp"));
+ verifyFormat("#include \"llvm/a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"llvm/a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "aImpl.cpp"));
+ verifyFormat("#include \"llvm/a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"llvm/a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "a_test.cpp"));
// Allow additional filenames / extensions
Style.IncludeIsMainSourceRegex = "(Impl\\.hpp)|(\\.xxx)$";
- EXPECT_EQ("#include \"llvm/a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"llvm/a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "a_test.xxx"));
- EXPECT_EQ("#include \"llvm/a.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"llvm/a.h\"\n"
- "#include \"c.h\"\n"
- "#include \"b.h\"",
- "aImpl.hpp"));
+ verifyFormat("#include \"llvm/a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"llvm/a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "a_test.xxx"));
+ verifyFormat("#include \"llvm/a.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"llvm/a.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"b.h\"",
+ "aImpl.hpp"));
}
TEST_F(SortIncludesTest, RecognizeMainHeaderInAllGroups) {
Style.IncludeIsMainRegex = "([-_](test|unittest))?$";
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Merge;
- EXPECT_EQ("#include \"c.h\"\n"
- "#include \"a.h\"\n"
- "#include \"b.h\"",
- sort("#include \"b.h\"\n"
- "\n"
- "#include \"a.h\"\n"
- "#include \"c.h\"",
- "c.cc"));
+ verifyFormat("#include \"c.h\"\n"
+ "#include \"a.h\"\n"
+ "#include \"b.h\"",
+ sort("#include \"b.h\"\n"
+ "\n"
+ "#include \"a.h\"\n"
+ "#include \"c.h\"",
+ "c.cc"));
}
TEST_F(SortIncludesTest, MainHeaderIsSeparatedWhenRegroupping) {
Style.IncludeIsMainRegex = "([-_](test|unittest))?$";
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
- EXPECT_EQ("#include \"a.h\"\n"
- "\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"",
- sort("#include \"b.h\"\n"
- "\n"
- "#include \"a.h\"\n"
- "#include \"c.h\"",
- "a.cc"));
+ verifyFormat("#include \"a.h\"\n"
+ "\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"",
+ sort("#include \"b.h\"\n"
+ "\n"
+ "#include \"a.h\"\n"
+ "#include \"c.h\"",
+ "a.cc"));
}
TEST_F(SortIncludesTest, SupportOptionalCaseSensitiveSorting) {
@@ -632,19 +632,19 @@ TEST_F(SortIncludesTest, SupportOptionalCaseSensitiveSorting) {
FmtStyle.SortIncludes = FormatStyle::SI_CaseInsensitive;
- EXPECT_EQ("#include \"A/B.h\"\n"
- "#include \"A/b.h\"\n"
- "#include \"a/b.h\"\n"
- "#include \"B/A.h\"\n"
- "#include \"B/a.h\"",
- sort("#include \"B/a.h\"\n"
- "#include \"B/A.h\"\n"
- "#include \"A/B.h\"\n"
- "#include \"a/b.h\"\n"
- "#include \"A/b.h\"",
- "a.h"));
-
- Style.IncludeBlocks = clang::tooling::IncludeStyle::IBS_Regroup;
+ verifyFormat("#include \"A/B.h\"\n"
+ "#include \"A/b.h\"\n"
+ "#include \"a/b.h\"\n"
+ "#include \"B/A.h\"\n"
+ "#include \"B/a.h\"",
+ sort("#include \"B/a.h\"\n"
+ "#include \"B/A.h\"\n"
+ "#include \"A/B.h\"\n"
+ "#include \"a/b.h\"\n"
+ "#include \"A/b.h\"",
+ "a.h"));
+
+ Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
Style.IncludeCategories = {
{"^\"", 1, 0, false}, {"^<.*\\.h>$", 2, 0, false}, {"^<", 3, 0, false}};
@@ -657,17 +657,17 @@ TEST_F(SortIncludesTest, SupportOptionalCaseSensitiveSorting) {
"#include \"Vlib.h\"\n"
"#include \"AST.h\"";
- EXPECT_EQ("#include \"AST.h\"\n"
- "#include \"qt.h\"\n"
- "#include \"Vlib.h\"\n"
- "#include \"vlib.h\"\n"
- "\n"
- "#include <Qtwhatever.h>\n"
- "#include <qtwhatever.h>\n"
- "\n"
- "#include <Algorithm>\n"
- "#include <algorithm>",
- sort(UnsortedCode));
+ verifyFormat("#include \"AST.h\"\n"
+ "#include \"qt.h\"\n"
+ "#include \"Vlib.h\"\n"
+ "#include \"vlib.h\"\n"
+ "\n"
+ "#include <Qtwhatever.h>\n"
+ "#include <qtwhatever.h>\n"
+ "\n"
+ "#include <Algorithm>\n"
+ "#include <algorithm>",
+ sort(UnsortedCode));
}
TEST_F(SortIncludesTest, SupportCaseInsensitiveMatching) {
@@ -676,25 +676,25 @@ TEST_F(SortIncludesTest, SupportCaseInsensitiveMatching) {
// Ensure both main header detection and grouping work in a case insensitive
// manner.
- EXPECT_EQ("#include \"llvm/A.h\"\n"
- "#include \"b.h\"\n"
- "#include \"c.h\"\n"
- "#include \"LLVM/z.h\"\n"
- "#include \"llvm/X.h\"\n"
- "#include \"GTest/GTest.h\"\n"
- "#include \"gmock/gmock.h\"",
- sort("#include \"c.h\"\n"
- "#include \"b.h\"\n"
- "#include \"GTest/GTest.h\"\n"
- "#include \"llvm/A.h\"\n"
- "#include \"gmock/gmock.h\"\n"
- "#include \"llvm/X.h\"\n"
- "#include \"LLVM/z.h\"",
- "a_TEST.cc"));
+ verifyFormat("#include \"llvm/A.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"c.h\"\n"
+ "#include \"LLVM/z.h\"\n"
+ "#include \"llvm/X.h\"\n"
+ "#include \"GTest/GTest.h\"\n"
+ "#include \"gmock/gmock.h\"",
+ sort("#include \"c.h\"\n"
+ "#include \"b.h\"\n"
+ "#include \"GTest/GTest.h\"\n"
+ "#include \"llvm/A.h\"\n"
+ "#include \"gmock/gmock.h\"\n"
+ "#include \"llvm/X.h\"\n"
+ "#include \"LLVM/z.h\"",
+ "a_TEST.cc"));
}
TEST_F(SortIncludesTest, SupportOptionalCaseSensitiveMachting) {
- Style.IncludeBlocks = clang::tooling::IncludeStyle::IBS_Regroup;
+ Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
Style.IncludeCategories = {{"^\"", 1, 0, false},
{"^<.*\\.h>$", 2, 0, false},
{"^<Q[A-Z][^\\.]*>", 3, 0, false},
@@ -711,57 +711,57 @@ TEST_F(SortIncludesTest, SupportOptionalCaseSensitiveMachting) {
"#include <qtwhatever.h>\n"
"#include <QtGlobal>";
- EXPECT_EQ("#include \"qa.h\"\n"
- "#include \"qt.h\"\n"
- "\n"
- "#include <qtwhatever.h>\n"
- "#include <windows.h>\n"
- "\n"
- "#include <QLabel>\n"
- "#include <QWidget>\n"
- "#include <QtGlobal>\n"
- "#include <queue>\n"
- "\n"
- "#include <algorithm>",
- sort(UnsortedCode));
+ verifyFormat("#include \"qa.h\"\n"
+ "#include \"qt.h\"\n"
+ "\n"
+ "#include <qtwhatever.h>\n"
+ "#include <windows.h>\n"
+ "\n"
+ "#include <QLabel>\n"
+ "#include <QWidget>\n"
+ "#include <QtGlobal>\n"
+ "#include <queue>\n"
+ "\n"
+ "#include <algorithm>",
+ sort(UnsortedCode));
Style.IncludeCategories[2].RegexIsCaseSensitive = true;
Style.IncludeCategories[3].RegexIsCaseSensitive = true;
- EXPECT_EQ("#include \"qa.h\"\n"
- "#include \"qt.h\"\n"
- "\n"
- "#include <qtwhatever.h>\n"
- "#include <windows.h>\n"
- "\n"
- "#include <QLabel>\n"
- "#include <QWidget>\n"
- "\n"
- "#include <QtGlobal>\n"
- "\n"
- "#include <algorithm>\n"
- "#include <queue>",
- sort(UnsortedCode));
+ verifyFormat("#include \"qa.h\"\n"
+ "#include \"qt.h\"\n"
+ "\n"
+ "#include <qtwhatever.h>\n"
+ "#include <windows.h>\n"
+ "\n"
+ "#include <QLabel>\n"
+ "#include <QWidget>\n"
+ "\n"
+ "#include <QtGlobal>\n"
+ "\n"
+ "#include <algorithm>\n"
+ "#include <queue>",
+ sort(UnsortedCode));
}
TEST_F(SortIncludesTest, NegativePriorities) {
Style.IncludeCategories = {{".*important_os_header.*", -1, 0, false},
{".*", 1, 0, false}};
- EXPECT_EQ("#include \"important_os_header.h\"\n"
- "#include \"c_main.h\"\n"
- "#include \"a_other.h\"",
- sort("#include \"c_main.h\"\n"
- "#include \"a_other.h\"\n"
- "#include \"important_os_header.h\"",
- "c_main.cc"));
+ verifyFormat("#include \"important_os_header.h\"\n"
+ "#include \"c_main.h\"\n"
+ "#include \"a_other.h\"",
+ sort("#include \"c_main.h\"\n"
+ "#include \"a_other.h\"\n"
+ "#include \"important_os_header.h\"",
+ "c_main.cc"));
// check stable when re-run
- EXPECT_EQ("#include \"important_os_header.h\"\n"
- "#include \"c_main.h\"\n"
- "#include \"a_other.h\"",
- sort("#include \"important_os_header.h\"\n"
- "#include \"c_main.h\"\n"
- "#include \"a_other.h\"",
- "c_main.cc", 0));
+ verifyFormat("#include \"important_os_header.h\"\n"
+ "#include \"c_main.h\"\n"
+ "#include \"a_other.h\"",
+ sort("#include \"important_os_header.h\"\n"
+ "#include \"c_main.h\"\n"
+ "#include \"a_other.h\"",
+ "c_main.cc", 0));
}
TEST_F(SortIncludesTest, PriorityGroupsAreSeparatedWhenRegroupping) {
@@ -769,34 +769,34 @@ TEST_F(SortIncludesTest, PriorityGroupsAreSeparatedWhenRegroupping) {
{".*", 1, 0, false}};
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
- EXPECT_EQ("#include \"important_os_header.h\"\n"
- "\n"
- "#include \"c_main.h\"\n"
- "\n"
- "#include \"a_other.h\"",
- sort("#include \"c_main.h\"\n"
- "#include \"a_other.h\"\n"
- "#include \"important_os_header.h\"",
- "c_main.cc"));
+ verifyFormat("#include \"important_os_header.h\"\n"
+ "\n"
+ "#include \"c_main.h\"\n"
+ "\n"
+ "#include \"a_other.h\"",
+ sort("#include \"c_main.h\"\n"
+ "#include \"a_other.h\"\n"
+ "#include \"important_os_header.h\"",
+ "c_main.cc"));
// check stable when re-run
- EXPECT_EQ("#include \"important_os_header.h\"\n"
- "\n"
- "#include \"c_main.h\"\n"
- "\n"
- "#include \"a_other.h\"",
- sort("#include \"important_os_header.h\"\n"
- "\n"
- "#include \"c_main.h\"\n"
- "\n"
- "#include \"a_other.h\"",
- "c_main.cc", 0));
+ verifyFormat("#include \"important_os_header.h\"\n"
+ "\n"
+ "#include \"c_main.h\"\n"
+ "\n"
+ "#include \"a_other.h\"",
+ sort("#include \"important_os_header.h\"\n"
+ "\n"
+ "#include \"c_main.h\"\n"
+ "\n"
+ "#include \"a_other.h\"",
+ "c_main.cc", 0));
}
TEST_F(SortIncludesTest, CalculatesCorrectCursorPosition) {
- std::string Code = "#include <ccc>\n" // Start of line: 0
- "#include <bbbbbb>\n" // Start of line: 15
- "#include <a>\n"; // Start of line: 33
+ StringRef Code = "#include <ccc>\n" // Start of line: 0
+ "#include <bbbbbb>\n" // Start of line: 15
+ "#include <a>\n"; // Start of line: 33
EXPECT_EQ(31u, newCursor(Code, 0));
EXPECT_EQ(13u, newCursor(Code, 15));
EXPECT_EQ(0u, newCursor(Code, 33));
@@ -808,14 +808,14 @@ TEST_F(SortIncludesTest, CalculatesCorrectCursorPosition) {
TEST_F(SortIncludesTest, CalculatesCorrectCursorPositionWithRegrouping) {
Style.IncludeBlocks = Style.IBS_Regroup;
- std::string Code = "#include \"b\"\n" // Start of line: 0
- "\n" // Start of line: 13
- "#include \"aa\"\n" // Start of line: 14
- "int i;"; // Start of line: 28
- std::string Expected = "#include \"aa\"\n" // Start of line: 0
- "#include \"b\"\n" // Start of line: 14
- "int i;"; // Start of line: 27
- EXPECT_EQ(Expected, sort(Code));
+ StringRef Code = "#include \"b\"\n" // Start of line: 0
+ "\n" // Start of line: 13
+ "#include \"aa\"\n" // Start of line: 14
+ "int i;"; // Start of line: 28
+ StringRef Expected = "#include \"aa\"\n" // Start of line: 0
+ "#include \"b\"\n" // Start of line: 14
+ "int i;"; // Start of line: 27
+ verifyFormat(Expected, sort(Code));
EXPECT_EQ(12u, newCursor(Code, 26)); // Closing quote of "aa"
EXPECT_EQ(26u, newCursor(Code, 27)); // Newline after "aa"
EXPECT_EQ(27u, newCursor(Code, 28)); // Start of last line
@@ -827,14 +827,14 @@ TEST_F(SortIncludesTest,
FmtStyle.LineEnding = FormatStyle::LE_CRLF;
Style.IncludeCategories = {
{"^\"a\"", 0, 0, false}, {"^\"b\"", 1, 1, false}, {".*", 2, 2, false}};
- std::string Code = "#include \"a\"\r\n" // Start of line: 0
- "\r\n" // Start of line: 14
- "#include \"b\"\r\n" // Start of line: 16
- "\r\n" // Start of line: 30
- "#include \"c\"\r\n" // Start of line: 32
- "\r\n" // Start of line: 46
- "int i;"; // Start of line: 48
- verifyNoChange(Code);
+ StringRef Code = "#include \"a\"\r\n" // Start of line: 0
+ "\r\n" // Start of line: 14
+ "#include \"b\"\r\n" // Start of line: 16
+ "\r\n" // Start of line: 30
+ "#include \"c\"\r\n" // Start of line: 32
+ "\r\n" // Start of line: 46
+ "int i;"; // Start of line: 48
+ verifyFormat(Code);
EXPECT_EQ(0u, newCursor(Code, 0));
EXPECT_EQ(14u, newCursor(Code, 14));
EXPECT_EQ(16u, newCursor(Code, 16));
@@ -850,19 +850,19 @@ TEST_F(
Style.IncludeBlocks = Style.IBS_Regroup;
FmtStyle.LineEnding = FormatStyle::LE_CRLF;
Style.IncludeCategories = {{".*", 0, 0, false}};
- std::string Code = "#include \"a\"\r\n" // Start of line: 0
- "\r\n" // Start of line: 14
- "#include \"b\"\r\n" // Start of line: 16
- "\r\n" // Start of line: 30
- "#include \"c\"\r\n" // Start of line: 32
- "\r\n" // Start of line: 46
- "int i;"; // Start of line: 48
- std::string Expected = "#include \"a\"\r\n" // Start of line: 0
- "#include \"b\"\r\n" // Start of line: 14
- "#include \"c\"\r\n" // Start of line: 28
- "\r\n" // Start of line: 42
- "int i;"; // Start of line: 44
- EXPECT_EQ(Expected, sort(Code));
+ StringRef Code = "#include \"a\"\r\n" // Start of line: 0
+ "\r\n" // Start of line: 14
+ "#include \"b\"\r\n" // Start of line: 16
+ "\r\n" // Start of line: 30
+ "#include \"c\"\r\n" // Start of line: 32
+ "\r\n" // Start of line: 46
+ "int i;"; // Start of line: 48
+ StringRef Expected = "#include \"a\"\r\n" // Start of line: 0
+ "#include \"b\"\r\n" // Start of line: 14
+ "#include \"c\"\r\n" // Start of line: 28
+ "\r\n" // Start of line: 42
+ "int i;"; // Start of line: 44
+ verifyFormat(Expected, sort(Code));
EXPECT_EQ(0u, newCursor(Code, 0));
EXPECT_EQ(
14u,
@@ -885,19 +885,19 @@ TEST_F(
FmtStyle.LineEnding = FormatStyle::LE_CRLF;
Style.IncludeCategories = {
{"^\"a\"", 0, 0, false}, {"^\"b\"", 1, 1, false}, {".*", 2, 2, false}};
- std::string Code = "#include \"a\"\r\n" // Start of line: 0
- "#include \"b\"\r\n" // Start of line: 14
- "#include \"c\"\r\n" // Start of line: 28
- "\r\n" // Start of line: 42
- "int i;"; // Start of line: 44
- std::string Expected = "#include \"a\"\r\n" // Start of line: 0
- "\r\n" // Start of line: 14
- "#include \"b\"\r\n" // Start of line: 16
- "\r\n" // Start of line: 30
- "#include \"c\"\r\n" // Start of line: 32
- "\r\n" // Start of line: 46
- "int i;"; // Start of line: 48
- EXPECT_EQ(Expected, sort(Code));
+ StringRef Code = "#include \"a\"\r\n" // Start of line: 0
+ "#include \"b\"\r\n" // Start of line: 14
+ "#include \"c\"\r\n" // Start of line: 28
+ "\r\n" // Start of line: 42
+ "int i;"; // Start of line: 44
+ StringRef Expected = "#include \"a\"\r\n" // Start of line: 0
+ "\r\n" // Start of line: 14
+ "#include \"b\"\r\n" // Start of line: 16
+ "\r\n" // Start of line: 30
+ "#include \"c\"\r\n" // Start of line: 32
+ "\r\n" // Start of line: 46
+ "int i;"; // Start of line: 48
+ verifyFormat(Expected, sort(Code));
EXPECT_EQ(0u, newCursor(Code, 0));
EXPECT_EQ(15u, newCursor(Code, 16));
EXPECT_EQ(30u, newCursor(Code, 32));
@@ -912,21 +912,21 @@ TEST_F(
FmtStyle.LineEnding = FormatStyle::LE_CRLF;
Style.IncludeCategories = {
{"^\"a\"", 0, 0, false}, {"^\"b\"", 1, 1, false}, {".*", 2, 2, false}};
- std::string Code = "#include \"a\"\r\n" // Start of line: 0
- "\r\n" // Start of line: 14
- "#include \"c\"\r\n" // Start of line: 16
- "\r\n" // Start of line: 30
- "#include \"b\"\r\n" // Start of line: 32
- "\r\n" // Start of line: 46
- "int i;"; // Start of line: 48
- std::string Expected = "#include \"a\"\r\n" // Start of line: 0
- "\r\n" // Start of line: 14
- "#include \"b\"\r\n" // Start of line: 16
- "\r\n" // Start of line: 30
- "#include \"c\"\r\n" // Start of line: 32
- "\r\n" // Start of line: 46
- "int i;"; // Start of line: 48
- EXPECT_EQ(Expected, sort(Code));
+ StringRef Code = "#include \"a\"\r\n" // Start of line: 0
+ "\r\n" // Start of line: 14
+ "#include \"c\"\r\n" // Start of line: 16
+ "\r\n" // Start of line: 30
+ "#include \"b\"\r\n" // Start of line: 32
+ "\r\n" // Start of line: 46
+ "int i;"; // Start of line: 48
+ StringRef Expected = "#include \"a\"\r\n" // Start of line: 0
+ "\r\n" // Start of line: 14
+ "#include \"b\"\r\n" // Start of line: 16
+ "\r\n" // Start of line: 30
+ "#include \"c\"\r\n" // Start of line: 32
+ "\r\n" // Start of line: 46
+ "int i;"; // Start of line: 48
+ verifyFormat(Expected, sort(Code));
EXPECT_EQ(0u, newCursor(Code, 0));
EXPECT_EQ(14u, newCursor(Code, 14));
EXPECT_EQ(30u, newCursor(Code, 32));
@@ -938,88 +938,88 @@ TEST_F(
#endif
TEST_F(SortIncludesTest, DeduplicateIncludes) {
- EXPECT_EQ("#include <a>\n"
- "#include <b>\n"
- "#include <c>",
- sort("#include <a>\n"
- "#include <b>\n"
- "#include <b>\n"
- "#include <b>\n"
- "#include <b>\n"
- "#include <c>"));
+ verifyFormat("#include <a>\n"
+ "#include <b>\n"
+ "#include <c>",
+ sort("#include <a>\n"
+ "#include <b>\n"
+ "#include <b>\n"
+ "#include <b>\n"
+ "#include <b>\n"
+ "#include <c>"));
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Merge;
- EXPECT_EQ("#include <a>\n"
- "#include <b>\n"
- "#include <c>",
- sort("#include <a>\n"
- "#include <b>\n"
- "\n"
- "#include <b>\n"
- "\n"
- "#include <b>\n"
- "#include <c>"));
+ verifyFormat("#include <a>\n"
+ "#include <b>\n"
+ "#include <c>",
+ sort("#include <a>\n"
+ "#include <b>\n"
+ "\n"
+ "#include <b>\n"
+ "\n"
+ "#include <b>\n"
+ "#include <c>"));
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
- EXPECT_EQ("#include <a>\n"
- "#include <b>\n"
- "#include <c>",
- sort("#include <a>\n"
- "#include <b>\n"
- "\n"
- "#include <b>\n"
- "\n"
- "#include <b>\n"
- "#include <c>"));
+ verifyFormat("#include <a>\n"
+ "#include <b>\n"
+ "#include <c>",
+ sort("#include <a>\n"
+ "#include <b>\n"
+ "\n"
+ "#include <b>\n"
+ "\n"
+ "#include <b>\n"
+ "#include <c>"));
}
TEST_F(SortIncludesTest, SortAndDeduplicateIncludes) {
- EXPECT_EQ("#include <a>\n"
- "#include <b>\n"
- "#include <c>",
- sort("#include <b>\n"
- "#include <a>\n"
- "#include <b>\n"
- "#include <b>\n"
- "#include <c>\n"
- "#include <b>"));
+ verifyFormat("#include <a>\n"
+ "#include <b>\n"
+ "#include <c>",
+ sort("#include <b>\n"
+ "#include <a>\n"
+ "#include <b>\n"
+ "#include <b>\n"
+ "#include <c>\n"
+ "#include <b>"));
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Merge;
- EXPECT_EQ("#include <a>\n"
- "#include <b>\n"
- "#include <c>",
- sort("#include <b>\n"
- "#include <a>\n"
- "\n"
- "#include <b>\n"
- "\n"
- "#include <c>\n"
- "#include <b>"));
+ verifyFormat("#include <a>\n"
+ "#include <b>\n"
+ "#include <c>",
+ sort("#include <b>\n"
+ "#include <a>\n"
+ "\n"
+ "#include <b>\n"
+ "\n"
+ "#include <c>\n"
+ "#include <b>"));
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
- EXPECT_EQ("#include <a>\n"
- "#include <b>\n"
- "#include <c>",
- sort("#include <b>\n"
- "#include <a>\n"
- "\n"
- "#include <b>\n"
- "\n"
- "#include <c>\n"
- "#include <b>"));
+ verifyFormat("#include <a>\n"
+ "#include <b>\n"
+ "#include <c>",
+ sort("#include <b>\n"
+ "#include <a>\n"
+ "\n"
+ "#include <b>\n"
+ "\n"
+ "#include <c>\n"
+ "#include <b>"));
}
TEST_F(SortIncludesTest, CalculatesCorrectCursorPositionAfterDeduplicate) {
- std::string Code = "#include <b>\n" // Start of line: 0
- "#include <a>\n" // Start of line: 13
- "#include <b>\n" // Start of line: 26
- "#include <b>\n" // Start of line: 39
- "#include <c>\n" // Start of line: 52
- "#include <b>\n"; // Start of line: 65
- std::string Expected = "#include <a>\n" // Start of line: 0
- "#include <b>\n" // Start of line: 13
- "#include <c>\n"; // Start of line: 26
- EXPECT_EQ(Expected, sort(Code));
+ StringRef Code = "#include <b>\n" // Start of line: 0
+ "#include <a>\n" // Start of line: 13
+ "#include <b>\n" // Start of line: 26
+ "#include <b>\n" // Start of line: 39
+ "#include <c>\n" // Start of line: 52
+ "#include <b>\n"; // Start of line: 65
+ StringRef Expected = "#include <a>\n" // Start of line: 0
+ "#include <b>\n" // Start of line: 13
+ "#include <c>\n"; // Start of line: 26
+ verifyFormat(Expected, sort(Code));
// Cursor on 'i' in "#include <a>".
EXPECT_EQ(1u, newCursor(Code, 14));
// Cursor on 'b' in "#include <b>".
@@ -1033,26 +1033,26 @@ TEST_F(SortIncludesTest, CalculatesCorrectCursorPositionAfterDeduplicate) {
}
TEST_F(SortIncludesTest, DeduplicateLocallyInEachBlock) {
- EXPECT_EQ("#include <a>\n"
- "#include <b>\n"
- "\n"
- "#include <b>\n"
- "#include <c>",
- sort("#include <a>\n"
- "#include <b>\n"
- "\n"
- "#include <c>\n"
- "#include <b>\n"
- "#include <b>"));
+ verifyFormat("#include <a>\n"
+ "#include <b>\n"
+ "\n"
+ "#include <b>\n"
+ "#include <c>",
+ sort("#include <a>\n"
+ "#include <b>\n"
+ "\n"
+ "#include <c>\n"
+ "#include <b>\n"
+ "#include <b>"));
}
TEST_F(SortIncludesTest, ValidAffactedRangesAfterDeduplicatingIncludes) {
- std::string Code = "#include <a>\n"
- "#include <b>\n"
- "#include <a>\n"
- "#include <a>\n"
- "\n"
- " int x ;";
+ StringRef Code = "#include <a>\n"
+ "#include <b>\n"
+ "#include <a>\n"
+ "#include <a>\n"
+ "\n"
+ " int x ;";
std::vector<tooling::Range> Ranges = {tooling::Range(0, 52)};
auto Replaces = sortIncludes(FmtStyle, Code, Ranges, "input.cpp");
Ranges = tooling::calculateRangesAfterReplacements(Replaces, Ranges);
@@ -1062,80 +1062,78 @@ TEST_F(SortIncludesTest, ValidAffactedRangesAfterDeduplicatingIncludes) {
}
TEST_F(SortIncludesTest, DoNotSortLikelyXml) {
- EXPECT_EQ("<!--;\n"
- "#include <b>\n"
- "#include <a>\n"
- "-->",
- sort("<!--;\n"
- "#include <b>\n"
- "#include <a>\n"
- "-->",
- "input.h", 0));
+ verifyFormat("<!--;\n"
+ "#include <b>\n"
+ "#include <a>\n"
+ "-->",
+ sort("<!--;\n"
+ "#include <b>\n"
+ "#include <a>\n"
+ "-->",
+ "input.h", 0));
}
TEST_F(SortIncludesTest, DoNotOutputReplacementsForSortedBlocksWithRegrouping) {
Style.IncludeBlocks = Style.IBS_Regroup;
- std::string Code = R"(
-#include "b.h"
-
-#include <a.h>
-)";
- EXPECT_EQ(Code, sort(Code, "input.h", 0));
+ StringRef Code = "#include \"b.h\"\n"
+ "\n"
+ "#include <a.h>";
+ verifyFormat(Code, sort(Code, "input.h", 0));
}
TEST_F(SortIncludesTest,
DoNotOutputReplacementsForSortedBlocksWithRegroupingWindows) {
Style.IncludeBlocks = Style.IBS_Regroup;
- std::string Code = "#include \"b.h\"\r\n"
- "\r\n"
- "#include <a.h>\r\n";
- EXPECT_EQ(Code, sort(Code, "input.h", 0));
+ StringRef Code = "#include \"b.h\"\r\n"
+ "\r\n"
+ "#include <a.h>\r\n";
+ verifyFormat(Code, sort(Code, "input.h", 0));
}
TEST_F(SortIncludesTest, MainIncludeChar) {
- std::string Code = "#include <a>\n"
- "#include \"quote/input.h\"\n"
- "#include <angle-bracket/input.h>\n";
+ StringRef Code = "#include <a>\n"
+ "#include \"quote/input.h\"\n"
+ "#include <angle-bracket/input.h>\n";
// Default behavior
- EXPECT_EQ("#include \"quote/input.h\"\n"
- "#include <a>\n"
- "#include <angle-bracket/input.h>\n",
- sort(Code, "input.cc", 1));
+ verifyFormat("#include \"quote/input.h\"\n"
+ "#include <a>\n"
+ "#include <angle-bracket/input.h>\n",
+ sort(Code, "input.cc", 1));
Style.MainIncludeChar = tooling::IncludeStyle::MICD_Quote;
- EXPECT_EQ("#include \"quote/input.h\"\n"
- "#include <a>\n"
- "#include <angle-bracket/input.h>\n",
- sort(Code, "input.cc", 1));
+ verifyFormat("#include \"quote/input.h\"\n"
+ "#include <a>\n"
+ "#include <angle-bracket/input.h>\n",
+ sort(Code, "input.cc", 1));
Style.MainIncludeChar = tooling::IncludeStyle::MICD_AngleBracket;
- EXPECT_EQ("#include <angle-bracket/input.h>\n"
- "#include \"quote/input.h\"\n"
- "#include <a>\n",
- sort(Code, "input.cc", 1));
+ verifyFormat("#include <angle-bracket/input.h>\n"
+ "#include \"quote/input.h\"\n"
+ "#include <a>\n",
+ sort(Code, "input.cc", 1));
}
TEST_F(SortIncludesTest, MainIncludeCharAnyPickQuote) {
Style.MainIncludeChar = tooling::IncludeStyle::MICD_Any;
- EXPECT_EQ("#include \"input.h\"\n"
- "#include <a>\n"
- "#include <b>\n",
- sort("#include <a>\n"
- "#include \"input.h\"\n"
- "#include <b>\n",
- "input.cc", 1));
+ verifyFormat("#include \"input.h\"\n"
+ "#include <a>\n"
+ "#include <b>\n",
+ sort("#include <a>\n"
+ "#include \"input.h\"\n"
+ "#include <b>\n",
+ "input.cc", 1));
}
TEST_F(SortIncludesTest, MainIncludeCharAnyPickAngleBracket) {
Style.MainIncludeChar = tooling::IncludeStyle::MICD_Any;
- EXPECT_EQ("#include <input.h>\n"
- "#include <a>\n"
- "#include <b>\n",
- sort("#include <a>\n"
- "#include <input.h>\n"
- "#include <b>\n",
- "input.cc", 1));
+ verifyFormat("#include <input.h>\n"
+ "#include <a>\n"
+ "#include <b>\n",
+ sort("#include <a>\n"
+ "#include <input.h>\n"
+ "#include <b>\n",
+ "input.cc", 1));
}
TEST_F(SortIncludesTest, MainIncludeCharQuoteAndRegroup) {
@@ -1144,28 +1142,28 @@ TEST_F(SortIncludesTest, MainIncludeCharQuoteAndRegroup) {
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
Style.MainIncludeChar = tooling::IncludeStyle::MICD_Quote;
- EXPECT_EQ("#include \"lib-b/input.h\"\n"
- "\n"
- "#include <lib-a/h-1.h>\n"
- "#include <lib-a/h-3.h>\n"
- "#include <lib-a/input.h>\n"
- "\n"
- "#include <lib-b/h-1.h>\n"
- "#include <lib-b/h-3.h>\n"
- "\n"
- "#include <lib-c/h-1.h>\n"
- "#include <lib-c/h-2.h>\n"
- "#include <lib-c/h-3.h>\n",
- sort("#include <lib-c/h-1.h>\n"
- "#include <lib-c/h-2.h>\n"
- "#include <lib-c/h-3.h>\n"
- "#include <lib-b/h-1.h>\n"
- "#include \"lib-b/input.h\"\n"
- "#include <lib-b/h-3.h>\n"
- "#include <lib-a/h-1.h>\n"
- "#include <lib-a/input.h>\n"
- "#include <lib-a/h-3.h>\n",
- "input.cc"));
+ verifyFormat("#include \"lib-b/input.h\"\n"
+ "\n"
+ "#include <lib-a/h-1.h>\n"
+ "#include <lib-a/h-3.h>\n"
+ "#include <lib-a/input.h>\n"
+ "\n"
+ "#include <lib-b/h-1.h>\n"
+ "#include <lib-b/h-3.h>\n"
+ "\n"
+ "#include <lib-c/h-1.h>\n"
+ "#include <lib-c/h-2.h>\n"
+ "#include <lib-c/h-3.h>\n",
+ sort("#include <lib-c/h-1.h>\n"
+ "#include <lib-c/h-2.h>\n"
+ "#include <lib-c/h-3.h>\n"
+ "#include <lib-b/h-1.h>\n"
+ "#include \"lib-b/input.h\"\n"
+ "#include <lib-b/h-3.h>\n"
+ "#include <lib-a/h-1.h>\n"
+ "#include <lib-a/input.h>\n"
+ "#include <lib-a/h-3.h>\n",
+ "input.cc"));
}
TEST_F(SortIncludesTest, MainIncludeCharAngleBracketAndRegroup) {
@@ -1174,60 +1172,60 @@ TEST_F(SortIncludesTest, MainIncludeCharAngleBracketAndRegroup) {
Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
Style.MainIncludeChar = tooling::IncludeStyle::MICD_AngleBracket;
- EXPECT_EQ("#include <lib-a/input.h>\n"
- "\n"
- "#include <lib-a/h-1.h>\n"
- "#include <lib-a/h-3.h>\n"
- "\n"
- "#include \"lib-b/input.h\"\n"
- "#include <lib-b/h-1.h>\n"
- "#include <lib-b/h-3.h>\n"
- "\n"
- "#include <lib-c/h-1.h>\n"
- "#include <lib-c/h-2.h>\n"
- "#include <lib-c/h-3.h>\n",
- sort("#include <lib-c/h-1.h>\n"
- "#include <lib-c/h-2.h>\n"
- "#include <lib-c/h-3.h>\n"
- "#include <lib-b/h-1.h>\n"
- "#include \"lib-b/input.h\"\n"
- "#include <lib-b/h-3.h>\n"
- "#include <lib-a/h-1.h>\n"
- "#include <lib-a/input.h>\n"
- "#include <lib-a/h-3.h>\n",
- "input.cc"));
+ verifyFormat("#include <lib-a/input.h>\n"
+ "\n"
+ "#include <lib-a/h-1.h>\n"
+ "#include <lib-a/h-3.h>\n"
+ "\n"
+ "#include \"lib-b/input.h\"\n"
+ "#include <lib-b/h-1.h>\n"
+ "#include <lib-b/h-3.h>\n"
+ "\n"
+ "#include <lib-c/h-1.h>\n"
+ "#include <lib-c/h-2.h>\n"
+ "#include <lib-c/h-3.h>\n",
+ sort("#include <lib-c/h-1.h>\n"
+ "#include <lib-c/h-2.h>\n"
+ "#include <lib-c/h-3.h>\n"
+ "#include <lib-b/h-1.h>\n"
+ "#include \"lib-b/input.h\"\n"
+ "#include <lib-b/h-3.h>\n"
+ "#include <lib-a/h-1.h>\n"
+ "#include <lib-a/input.h>\n"
+ "#include <lib-a/h-3.h>\n",
+ "input.cc"));
}
TEST_F(SortIncludesTest, DoNotRegroupGroupsInGoogleObjCStyle) {
FmtStyle = getGoogleStyle(FormatStyle::LK_ObjC);
- EXPECT_EQ("#include <a.h>\n"
- "#include <b.h>\n"
- "#include \"a.h\"",
- sort("#include <b.h>\n"
- "#include <a.h>\n"
- "#include \"a.h\""));
+ verifyFormat("#include <a.h>\n"
+ "#include <b.h>\n"
+ "#include \"a.h\"",
+ sort("#include <b.h>\n"
+ "#include <a.h>\n"
+ "#include \"a.h\""));
}
TEST_F(SortIncludesTest, DoNotTreatPrecompiledHeadersAsFirstBlock) {
Style.IncludeBlocks = Style.IBS_Merge;
- std::string Code = "#include \"d.h\"\r\n"
- "#include \"b.h\"\r\n"
- "#pragma hdrstop\r\n"
- "\r\n"
- "#include \"c.h\"\r\n"
- "#include \"a.h\"\r\n"
- "#include \"e.h\"\r\n";
-
- std::string Expected = "#include \"b.h\"\r\n"
- "#include \"d.h\"\r\n"
- "#pragma hdrstop\r\n"
- "\r\n"
- "#include \"e.h\"\r\n"
- "#include \"a.h\"\r\n"
- "#include \"c.h\"\r\n";
-
- EXPECT_EQ(Expected, sort(Code, "e.cpp", 2));
+ StringRef Code = "#include \"d.h\"\r\n"
+ "#include \"b.h\"\r\n"
+ "#pragma hdrstop\r\n"
+ "\r\n"
+ "#include \"c.h\"\r\n"
+ "#include \"a.h\"\r\n"
+ "#include \"e.h\"\r\n";
+
+ StringRef Expected = "#include \"b.h\"\r\n"
+ "#include \"d.h\"\r\n"
+ "#pragma hdrstop\r\n"
+ "\r\n"
+ "#include \"e.h\"\r\n"
+ "#include \"a.h\"\r\n"
+ "#include \"c.h\"\r\n";
+
+ verifyFormat(Expected, sort(Code, "e.cpp", 2));
Code = "#include \"d.h\"\n"
"#include \"b.h\"\n"
@@ -1245,59 +1243,59 @@ TEST_F(SortIncludesTest, DoNotTreatPrecompiledHeadersAsFirstBlock) {
"#include \"a.h\"\n"
"#include \"c.h\"\n";
- EXPECT_EQ(Expected, sort(Code, "e.cpp", 2));
+ verifyFormat(Expected, sort(Code, "e.cpp", 2));
}
TEST_F(SortIncludesTest, skipUTF8ByteOrderMarkMerge) {
Style.IncludeBlocks = Style.IBS_Merge;
- std::string Code = "\xEF\xBB\xBF#include \"d.h\"\r\n"
- "#include \"b.h\"\r\n"
- "\r\n"
- "#include \"c.h\"\r\n"
- "#include \"a.h\"\r\n"
- "#include \"e.h\"\r\n";
-
- std::string Expected = "\xEF\xBB\xBF#include \"e.h\"\r\n"
- "#include \"a.h\"\r\n"
- "#include \"b.h\"\r\n"
- "#include \"c.h\"\r\n"
- "#include \"d.h\"\r\n";
-
- EXPECT_EQ(Expected, sort(Code, "e.cpp", 1));
+ StringRef Code = "\xEF\xBB\xBF#include \"d.h\"\r\n"
+ "#include \"b.h\"\r\n"
+ "\r\n"
+ "#include \"c.h\"\r\n"
+ "#include \"a.h\"\r\n"
+ "#include \"e.h\"\r\n";
+
+ StringRef Expected = "\xEF\xBB\xBF#include \"e.h\"\r\n"
+ "#include \"a.h\"\r\n"
+ "#include \"b.h\"\r\n"
+ "#include \"c.h\"\r\n"
+ "#include \"d.h\"\r\n";
+
+ verifyFormat(Expected, sort(Code, "e.cpp", 1));
}
TEST_F(SortIncludesTest, skipUTF8ByteOrderMarkPreserve) {
Style.IncludeBlocks = Style.IBS_Preserve;
- std::string Code = "\xEF\xBB\xBF#include \"d.h\"\r\n"
- "#include \"b.h\"\r\n"
- "\r\n"
- "#include \"c.h\"\r\n"
- "#include \"a.h\"\r\n"
- "#include \"e.h\"\r\n";
-
- std::string Expected = "\xEF\xBB\xBF#include \"b.h\"\r\n"
- "#include \"d.h\"\r\n"
- "\r\n"
- "#include \"a.h\"\r\n"
- "#include \"c.h\"\r\n"
- "#include \"e.h\"\r\n";
-
- EXPECT_EQ(Expected, sort(Code, "e.cpp", 2));
+ StringRef Code = "\xEF\xBB\xBF#include \"d.h\"\r\n"
+ "#include \"b.h\"\r\n"
+ "\r\n"
+ "#include \"c.h\"\r\n"
+ "#include \"a.h\"\r\n"
+ "#include \"e.h\"\r\n";
+
+ StringRef Expected = "\xEF\xBB\xBF#include \"b.h\"\r\n"
+ "#include \"d.h\"\r\n"
+ "\r\n"
+ "#include \"a.h\"\r\n"
+ "#include \"c.h\"\r\n"
+ "#include \"e.h\"\r\n";
+
+ verifyFormat(Expected, sort(Code, "e.cpp", 2));
}
TEST_F(SortIncludesTest, MergeLines) {
Style.IncludeBlocks = Style.IBS_Merge;
- std::string Code = "#include \"c.h\"\r\n"
- "#include \"b\\\r\n"
- ".h\"\r\n"
- "#include \"a.h\"\r\n";
+ StringRef Code = "#include \"c.h\"\r\n"
+ "#include \"b\\\r\n"
+ ".h\"\r\n"
+ "#include \"a.h\"\r\n";
- std::string Expected = "#include \"a.h\"\r\n"
- "#include \"b\\\r\n"
- ".h\"\r\n"
- "#include \"c.h\"\r\n";
+ StringRef Expected = "#include \"a.h\"\r\n"
+ "#include \"b\\\r\n"
+ ".h\"\r\n"
+ "#include \"c.h\"\r\n";
- EXPECT_EQ(Expected, sort(Code, "a.cpp", 1));
+ verifyFormat(Expected, sort(Code, "a.cpp", 1));
}
TEST_F(SortIncludesTest, DisableFormatDisablesIncludeSorting) {
@@ -1305,154 +1303,154 @@ TEST_F(SortIncludesTest, DisableFormatDisablesIncludeSorting) {
"#include <b.h>\n";
StringRef Unsorted = "#include <b.h>\n"
"#include <a.h>\n";
- EXPECT_EQ(Sorted, sort(Unsorted));
+ verifyFormat(Sorted, sort(Unsorted));
FmtStyle.DisableFormat = true;
- EXPECT_EQ(Unsorted, sort(Unsorted, "input.cpp", 0));
+ verifyFormat(Unsorted, sort(Unsorted, "input.cpp", 0));
}
TEST_F(SortIncludesTest, DisableRawStringLiteralSorting) {
- EXPECT_EQ("const char *t = R\"(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")\";",
- sort("const char *t = R\"(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")\";",
- "test.cxx", 0));
- EXPECT_EQ("const char *t = R\"x(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")x\";",
- sort("const char *t = R\"x(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")x\";",
- "test.cxx", 0));
- EXPECT_EQ("const char *t = R\"xyz(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")xyz\";",
- sort("const char *t = R\"xyz(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")xyz\";",
- "test.cxx", 0));
-
- EXPECT_EQ("#include <a.h>\n"
- "#include <b.h>\n"
- "const char *t = R\"(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")\";\n"
- "#include <c.h>\n"
- "#include <d.h>\n"
- "const char *t = R\"x(\n"
- "#include <f.h>\n"
- "#include <e.h>\n"
- ")x\";\n"
- "#include <g.h>\n"
- "#include <h.h>\n"
- "const char *t = R\"xyz(\n"
- "#include <j.h>\n"
- "#include <i.h>\n"
- ")xyz\";\n"
- "#include <k.h>\n"
- "#include <l.h>",
- sort("#include <b.h>\n"
- "#include <a.h>\n"
- "const char *t = R\"(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")\";\n"
- "#include <d.h>\n"
- "#include <c.h>\n"
- "const char *t = R\"x(\n"
- "#include <f.h>\n"
- "#include <e.h>\n"
- ")x\";\n"
- "#include <h.h>\n"
- "#include <g.h>\n"
- "const char *t = R\"xyz(\n"
- "#include <j.h>\n"
- "#include <i.h>\n"
- ")xyz\";\n"
- "#include <l.h>\n"
- "#include <k.h>",
- "test.cc", 4));
-
- EXPECT_EQ("const char *t = R\"AMZ029amz(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")AMZ029amz\";",
- sort("const char *t = R\"AMZ029amz(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")AMZ029amz\";",
- "test.cxx", 0));
-
- EXPECT_EQ("const char *t = R\"-AMZ029amz(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")-AMZ029amz\";",
- sort("const char *t = R\"-AMZ029amz(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")-AMZ029amz\";",
- "test.cxx", 0));
-
- EXPECT_EQ("const char *t = R\"AMZ029amz-(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")AMZ029amz-\";",
- sort("const char *t = R\"AMZ029amz-(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")AMZ029amz-\";",
- "test.cxx", 0));
-
- EXPECT_EQ("const char *t = R\"AM|029amz-(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")AM|029amz-\";",
- sort("const char *t = R\"AM|029amz-(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")AM|029amz-\";",
- "test.cxx", 0));
-
- EXPECT_EQ("const char *t = R\"AM[029amz-(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")AM[029amz-\";",
- sort("const char *t = R\"AM[029amz-(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")AM[029amz-\";",
- "test.cxx", 0));
-
- EXPECT_EQ("const char *t = R\"AM]029amz-(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")AM]029amz-\";",
- sort("const char *t = R\"AM]029amz-(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")AM]029amz-\";",
- "test.cxx", 0));
+ verifyFormat("const char *t = R\"(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")\";",
+ sort("const char *t = R\"(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")\";",
+ "test.cxx", 0));
+ verifyFormat("const char *t = R\"x(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")x\";",
+ sort("const char *t = R\"x(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")x\";",
+ "test.cxx", 0));
+ verifyFormat("const char *t = R\"xyz(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")xyz\";",
+ sort("const char *t = R\"xyz(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")xyz\";",
+ "test.cxx", 0));
+
+ verifyFormat("#include <a.h>\n"
+ "#include <b.h>\n"
+ "const char *t = R\"(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")\";\n"
+ "#include <c.h>\n"
+ "#include <d.h>\n"
+ "const char *t = R\"x(\n"
+ "#include <f.h>\n"
+ "#include <e.h>\n"
+ ")x\";\n"
+ "#include <g.h>\n"
+ "#include <h.h>\n"
+ "const char *t = R\"xyz(\n"
+ "#include <j.h>\n"
+ "#include <i.h>\n"
+ ")xyz\";\n"
+ "#include <k.h>\n"
+ "#include <l.h>",
+ sort("#include <b.h>\n"
+ "#include <a.h>\n"
+ "const char *t = R\"(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")\";\n"
+ "#include <d.h>\n"
+ "#include <c.h>\n"
+ "const char *t = R\"x(\n"
+ "#include <f.h>\n"
+ "#include <e.h>\n"
+ ")x\";\n"
+ "#include <h.h>\n"
+ "#include <g.h>\n"
+ "const char *t = R\"xyz(\n"
+ "#include <j.h>\n"
+ "#include <i.h>\n"
+ ")xyz\";\n"
+ "#include <l.h>\n"
+ "#include <k.h>",
+ "test.cc", 4));
+
+ verifyFormat("const char *t = R\"AMZ029amz(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")AMZ029amz\";",
+ sort("const char *t = R\"AMZ029amz(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")AMZ029amz\";",
+ "test.cxx", 0));
+
+ verifyFormat("const char *t = R\"-AMZ029amz(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")-AMZ029amz\";",
+ sort("const char *t = R\"-AMZ029amz(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")-AMZ029amz\";",
+ "test.cxx", 0));
+
+ verifyFormat("const char *t = R\"AMZ029amz-(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")AMZ029amz-\";",
+ sort("const char *t = R\"AMZ029amz-(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")AMZ029amz-\";",
+ "test.cxx", 0));
+
+ verifyFormat("const char *t = R\"AM|029amz-(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")AM|029amz-\";",
+ sort("const char *t = R\"AM|029amz-(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")AM|029amz-\";",
+ "test.cxx", 0));
+
+ verifyFormat("const char *t = R\"AM[029amz-(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")AM[029amz-\";",
+ sort("const char *t = R\"AM[029amz-(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")AM[029amz-\";",
+ "test.cxx", 0));
+
+ verifyFormat("const char *t = R\"AM]029amz-(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")AM]029amz-\";",
+ sort("const char *t = R\"AM]029amz-(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")AM]029amz-\";",
+ "test.cxx", 0));
#define X "AMZ029amz{}+!%*=_:;',.<>|/?#~-$"
- EXPECT_EQ("const char *t = R\"" X "(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")" X "\";",
- sort("const char *t = R\"" X "(\n"
- "#include <b.h>\n"
- "#include <a.h>\n"
- ")" X "\";",
- "test.cxx", 0));
+ verifyFormat("const char *t = R\"" X "(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")" X "\";",
+ sort("const char *t = R\"" X "(\n"
+ "#include <b.h>\n"
+ "#include <a.h>\n"
+ ")" X "\";",
+ "test.cxx", 0));
#undef X
}
diff --git a/clang/unittests/Format/TestLexer.h b/clang/unittests/Format/TestLexer.h
index 8b5949b32fc9..294d0106dbe2 100644
--- a/clang/unittests/Format/TestLexer.h
+++ b/clang/unittests/Format/TestLexer.h
@@ -28,7 +28,7 @@
namespace clang {
namespace format {
-typedef llvm::SmallVector<FormatToken *, 8> TokenList;
+typedef SmallVector<FormatToken *, 8> TokenList;
inline std::ostream &operator<<(std::ostream &Stream, const FormatToken &Tok) {
Stream << "(" << Tok.Tok.getName() << ", \"" << Tok.TokenText.str() << "\" , "
@@ -48,7 +48,7 @@ inline TokenList uneof(const TokenList &Tokens) {
return TokenList(Tokens.begin(), std::prev(Tokens.end()));
}
-inline std::string text(llvm::ArrayRef<FormatToken *> Tokens) {
+inline std::string text(ArrayRef<FormatToken *> Tokens) {
return std::accumulate(Tokens.begin(), Tokens.end(), std::string(),
[](const std::string &R, FormatToken *Tok) {
return (R + Tok->TokenText).str();
@@ -63,13 +63,13 @@ public:
: Allocator(Allocator), Buffers(Buffers), Style(Style),
SourceMgr("test.cpp", ""), IdentTable(getFormattingLangOpts(Style)) {}
- TokenList lex(llvm::StringRef Code) {
+ TokenList lex(StringRef Code) {
FormatTokenLexer Lex = getNewLexer(Code);
ArrayRef<FormatToken *> Result = Lex.lex();
return TokenList(Result.begin(), Result.end());
}
- TokenList annotate(llvm::StringRef Code) {
+ TokenList annotate(StringRef Code) {
FormatTokenLexer Lex = getNewLexer(Code);
auto Tokens = Lex.lex();
UnwrappedLineParser Parser(SourceMgr.get(), Style, Lex.getKeywords(), 0,
@@ -85,7 +85,7 @@ public:
return TokenList(Tokens.begin(), Tokens.end());
}
- FormatToken *id(llvm::StringRef Code) {
+ FormatToken *id(StringRef Code) {
auto Result = uneof(lex(Code));
assert(Result.size() == 1U && "Code must expand to 1 token.");
return Result[0];
@@ -100,7 +100,7 @@ protected:
FormatTokenLexer getNewLexer(StringRef Code) {
Buffers.push_back(
llvm::MemoryBuffer::getMemBufferCopy(Code, "<scratch space>"));
- clang::FileID FID =
+ FileID FID =
SourceMgr.get().createFileID(Buffers.back()->getMemBufferRef());
return FormatTokenLexer(SourceMgr.get(), FID, 0, Style, Encoding, Allocator,
IdentTable);
@@ -111,7 +111,7 @@ public:
std::vector<std::unique_ptr<llvm::MemoryBuffer>> &Buffers;
FormatStyle Style;
encoding::Encoding Encoding = encoding::Encoding_UTF8;
- clang::SourceManagerForFile SourceMgr;
+ SourceManagerForFile SourceMgr;
IdentifierTable IdentTable;
SmallVector<UnwrappedLine, 16> UnwrappedLines;
};
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index aadfa6dc0165..6ea9c4a241dc 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -24,9 +24,9 @@ static bool operator==(const FormatToken &LHS,
namespace {
-class TokenAnnotatorTest : public ::testing::Test {
+class TokenAnnotatorTest : public testing::Test {
protected:
- TokenList annotate(llvm::StringRef Code,
+ TokenList annotate(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return TestLexer(Allocator, Buffers, Style).annotate(Code);
}
@@ -2097,7 +2097,7 @@ TEST_F(TokenAnnotatorTest, UnderstandsAttributeMacrosOnObjCProperty) {
}
TEST_F(TokenAnnotatorTest, UnderstandsVerilogOperators) {
- auto Annotate = [this](llvm::StringRef Code) {
+ auto Annotate = [this](StringRef Code) {
return annotate(Code, getLLVMStyle(FormatStyle::LK_Verilog));
};
// Test that unary operators get labeled as such and that operators like '++'
@@ -2279,9 +2279,7 @@ TEST_F(TokenAnnotatorTest, UnderstandTableGenTokens) {
TestLexer Lexer(Allocator, Buffers, Style);
AdditionalKeywords Keywords(Lexer.IdentTable);
- auto Annotate = [&Lexer](llvm::StringRef Code) {
- return Lexer.annotate(Code);
- };
+ auto Annotate = [&Lexer](StringRef Code) { return Lexer.annotate(Code); };
// Additional keywords representation test.
auto Tokens = Annotate("def foo : Bar<1>;");
@@ -2357,7 +2355,7 @@ TEST_F(TokenAnnotatorTest, UnderstandTableGenTokens) {
Tokens = Annotate("!cond");
EXPECT_TOKEN(Tokens[0], tok::identifier, TT_TableGenCondOperator);
- auto AnnotateValue = [this, &Style](llvm::StringRef Code) {
+ auto AnnotateValue = [this, &Style](StringRef Code) {
// Values are annotated only in specific context.
auto Result = annotate(("def X { let V = " + Code + "; }").str(), Style);
return decltype(Result){Result.begin() + 6, Result.end() - 3};
@@ -2581,15 +2579,28 @@ TEST_F(TokenAnnotatorTest, UnderstandsLabels) {
auto Tokens = annotate("{ x: break; }");
ASSERT_EQ(Tokens.size(), 7u) << Tokens;
EXPECT_TOKEN(Tokens[2], tok::colon, TT_GotoLabelColon);
+
Tokens = annotate("{ case x: break; }");
ASSERT_EQ(Tokens.size(), 8u) << Tokens;
EXPECT_TOKEN(Tokens[3], tok::colon, TT_CaseLabelColon);
+
Tokens = annotate("{ x: { break; } }");
ASSERT_EQ(Tokens.size(), 9u) << Tokens;
EXPECT_TOKEN(Tokens[2], tok::colon, TT_GotoLabelColon);
+
Tokens = annotate("{ case x: { break; } }");
ASSERT_EQ(Tokens.size(), 10u) << Tokens;
EXPECT_TOKEN(Tokens[3], tok::colon, TT_CaseLabelColon);
+
+ Tokens = annotate("#define FOO label:");
+ ASSERT_EQ(Tokens.size(), 6u) << Tokens;
+ EXPECT_TOKEN(Tokens[4], tok::colon, TT_GotoLabelColon);
+
+ Tokens = annotate("#define FOO \\\n"
+ "label: \\\n"
+ " break;");
+ ASSERT_EQ(Tokens.size(), 8u) << Tokens;
+ EXPECT_TOKEN(Tokens[4], tok::colon, TT_GotoLabelColon);
}
TEST_F(TokenAnnotatorTest, UnderstandsNestedBlocks) {
@@ -2649,7 +2660,7 @@ TEST_F(TokenAnnotatorTest, UnderstandDesignatedInitializers) {
}
TEST_F(TokenAnnotatorTest, UnderstandsJavaScript) {
- auto Annotate = [this](llvm::StringRef Code) {
+ auto Annotate = [this](StringRef Code) {
return annotate(Code, getLLVMStyle(FormatStyle::LK_JavaScript));
};
@@ -3004,6 +3015,60 @@ TEST_F(TokenAnnotatorTest, SwitchExpression) {
EXPECT_TOKEN(Tokens[20], tok::arrow, TT_CaseLabelArrow);
}
+TEST_F(TokenAnnotatorTest, CppAltOperatorKeywords) {
+ auto Tokens = annotate("a = b and c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::ampamp, TT_BinaryOperator);
+
+ Tokens = annotate("a = b and_eq c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::ampequal, TT_BinaryOperator);
+
+ Tokens = annotate("a = b bitand c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::amp, TT_BinaryOperator);
+
+ Tokens = annotate("a = b bitor c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::pipe, TT_BinaryOperator);
+
+ Tokens = annotate("a = b compl c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::tilde, TT_UnaryOperator);
+
+ Tokens = annotate("a = b not c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::exclaim, TT_UnaryOperator);
+
+ Tokens = annotate("a = b not_eq c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::exclaimequal, TT_BinaryOperator);
+
+ Tokens = annotate("a = b or c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::pipepipe, TT_BinaryOperator);
+
+ Tokens = annotate("a = b or_eq c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::pipeequal, TT_BinaryOperator);
+
+ Tokens = annotate("a = b xor c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::caret, TT_BinaryOperator);
+
+ Tokens = annotate("a = b xor_eq c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::caretequal, TT_BinaryOperator);
+
+ Tokens = annotate("xor = foo;");
+ ASSERT_EQ(Tokens.size(), 5u);
+ EXPECT_TOKEN(Tokens[0], tok::identifier, TT_Unknown);
+
+ Tokens = annotate("int xor = foo;");
+ ASSERT_EQ(Tokens.size(), 6u);
+ EXPECT_TOKEN(Tokens[1], tok::identifier, TT_StartOfName);
+}
+
} // namespace
} // namespace format
} // namespace clang
diff --git a/clang/unittests/Format/UsingDeclarationsSorterTest.cpp b/clang/unittests/Format/UsingDeclarationsSorterTest.cpp
index c0c0de7076fe..ddcecc0208b0 100644
--- a/clang/unittests/Format/UsingDeclarationsSorterTest.cpp
+++ b/clang/unittests/Format/UsingDeclarationsSorterTest.cpp
@@ -17,22 +17,22 @@ namespace clang {
namespace format {
namespace {
-class UsingDeclarationsSorterTest : public ::testing::Test {
+class UsingDeclarationsSorterTest : public testing::Test {
protected:
- std::string sortUsingDeclarations(llvm::StringRef Code,
+ std::string sortUsingDeclarations(StringRef Code,
const std::vector<tooling::Range> &Ranges,
const FormatStyle &Style = getLLVMStyle()) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
tooling::Replacements Replaces =
- clang::format::sortUsingDeclarations(Style, Code, Ranges, "<stdin>");
+ format::sortUsingDeclarations(Style, Code, Ranges, "<stdin>");
auto Result = applyAllReplacements(Code, Replaces);
EXPECT_TRUE(static_cast<bool>(Result));
LLVM_DEBUG(llvm::errs() << "\n" << *Result << "\n\n");
return *Result;
}
- std::string sortUsingDeclarations(llvm::StringRef Code,
+ std::string sortUsingDeclarations(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return sortUsingDeclarations(Code,
/*Ranges=*/{1, tooling::Range(0, Code.size())},
diff --git a/clang/unittests/Interpreter/CMakeLists.txt b/clang/unittests/Interpreter/CMakeLists.txt
index e5a77e77de75..c0fd2d8f3777 100644
--- a/clang/unittests/Interpreter/CMakeLists.txt
+++ b/clang/unittests/Interpreter/CMakeLists.txt
@@ -29,3 +29,46 @@ if(NOT WIN32)
endif()
export_executable_symbols(ClangReplInterpreterTests)
+
+if(MSVC)
+ set_target_properties(ClangReplInterpreterTests PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS 1)
+
+ # RTTI/C++ symbols
+ set(ClangReplInterpreterTests_exports ${ClangReplInterpreterTests_exports} ??_7type_info@@6B@
+ ?__type_info_root_node@@3U__type_info_node@@A
+ ?nothrow@std@@3Unothrow_t@1@B
+ )
+
+ # Compiler added symbols for static variables. NOT for VStudio < 2015
+ set(ClangReplInterpreterTests_exports ${ClangReplInterpreterTests_exports} _Init_thread_abort _Init_thread_epoch
+ _Init_thread_footer _Init_thread_header _tls_index
+ )
+
+ if(CMAKE_SIZEOF_VOID_P EQUAL 8)
+ # new/delete variants needed when linking to static msvc runtime (esp. Debug)
+ set(ClangReplInterpreterTests_exports ${ClangReplInterpreterTests_exports}
+ ??2@YAPEAX_K@Z
+ ??3@YAXPEAX@Z
+ ??_U@YAPEAX_K@Z
+ ??_V@YAXPEAX@Z
+ ??3@YAXPEAX_K@Z
+ )
+ else()
+ set(ClangReplInterpreterTests_exports ${ClangReplInterpreterTests_exports}
+ ??2@YAPAXI@Z
+ ??3@YAXPAX@Z
+ ??3@YAXPAXI@Z
+ ??_U@YAPAXI@Z
+ ??_V@YAXPAX@Z
+ ??_V@YAXPAXI@Z
+ )
+ endif()
+
+ # List to '/EXPORT:sym0 /EXPORT:sym1 /EXPORT:sym2 ...'
+ foreach(sym ${ClangReplInterpreterTests_exports})
+ set(ClangReplInterpreterTests_link_str "${ClangReplInterpreterTests_link_str} /EXPORT:${sym}")
+ endforeach(sym ${ClangReplInterpreterTests_exports})
+
+ set_property(TARGET ClangReplInterpreterTests APPEND_STRING PROPERTY LINK_FLAGS ${ClangReplInterpreterTests_link_str})
+
+endif(MSVC)
diff --git a/clang/unittests/StaticAnalyzer/CallEventTest.cpp b/clang/unittests/StaticAnalyzer/CallEventTest.cpp
index adbfe02a284d..7c4132788ca7 100644
--- a/clang/unittests/StaticAnalyzer/CallEventTest.cpp
+++ b/clang/unittests/StaticAnalyzer/CallEventTest.cpp
@@ -76,7 +76,7 @@ TEST(CXXDeallocatorCall, SimpleDestructor) {
}
)",
Diags));
- EXPECT_EQ(Diags, "test.CXXDeallocator: NumArgs: 1\n");
+ EXPECT_EQ(Diags, "test.CXXDeallocator: NumArgs: 2\n");
}
} // namespace
diff --git a/clang/unittests/StaticAnalyzer/MemRegionDescriptiveNameTest.cpp b/clang/unittests/StaticAnalyzer/MemRegionDescriptiveNameTest.cpp
index 03aee56a200f..b13e7123ee52 100644
--- a/clang/unittests/StaticAnalyzer/MemRegionDescriptiveNameTest.cpp
+++ b/clang/unittests/StaticAnalyzer/MemRegionDescriptiveNameTest.cpp
@@ -90,7 +90,7 @@ void reportDescriptiveName(int *p);
extern int* ptr;
extern int array[3];
void top() {
- reportDescriptiveName(&array[(long)ptr]);
+ reportDescriptiveName(&array[(long long)ptr]);
})cpp";
std::string Output;
diff --git a/clang/utils/ClangVisualizers/CMakeLists.txt b/clang/utils/ClangVisualizers/CMakeLists.txt
index 16d118a421ba..c047419050d1 100644
--- a/clang/utils/ClangVisualizers/CMakeLists.txt
+++ b/clang/utils/ClangVisualizers/CMakeLists.txt
@@ -3,5 +3,5 @@
if (LLVM_ADD_NATIVE_VISUALIZERS_TO_SOLUTION)
set(CLANG_VISUALIZERS clang.natvis)
add_custom_target(ClangVisualizers SOURCES ${CLANG_VISUALIZERS})
- set_target_properties(ClangVisualizers PROPERTIES FOLDER "Utils")
+ set_target_properties(ClangVisualizers PROPERTIES FOLDER "Clang/Misc")
endif()
diff --git a/clang/utils/TableGen/CMakeLists.txt b/clang/utils/TableGen/CMakeLists.txt
index 2ca4a96cadb6..5b072a1ac196 100644
--- a/clang/utils/TableGen/CMakeLists.txt
+++ b/clang/utils/TableGen/CMakeLists.txt
@@ -27,5 +27,3 @@ add_tablegen(clang-tblgen CLANG
)
target_link_libraries(clang-tblgen PRIVATE clangSupport_tablegen)
-
-set_target_properties(clang-tblgen PROPERTIES FOLDER "Clang tablegenning")
diff --git a/clang/utils/TableGen/ClangAttrEmitter.cpp b/clang/utils/TableGen/ClangAttrEmitter.cpp
index aafbf1f40949..ca7630adfbb7 100644
--- a/clang/utils/TableGen/ClangAttrEmitter.cpp
+++ b/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -1845,7 +1845,7 @@ static LateAttrParseKind getLateAttrParseKind(const Record *Attr) {
PrintFatalError(Attr, "Field `" + llvm::Twine(LateParsedStr) +
"`should only have one super class");
- if (SuperClasses[0]->getName().compare(LateAttrParseKindStr) != 0)
+ if (SuperClasses[0]->getName() != LateAttrParseKindStr)
PrintFatalError(Attr, "Field `" + llvm::Twine(LateParsedStr) +
"`should only have type `" +
llvm::Twine(LateAttrParseKindStr) +
diff --git a/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
index 4512acfd19a1..f564689fff7c 100644
--- a/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -1086,7 +1086,7 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
PluralPiece *Plural = New<PluralPiece>();
do {
Text = Text.drop_front(); // '{' or '|'
- size_t End = Text.find_first_of(":");
+ size_t End = Text.find_first_of(':');
if (End == StringRef::npos)
Builder.PrintFatalError("expected ':' while parsing %plural");
++End;
diff --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp
index 3ddfd3277b68..e77d80623e84 100644
--- a/clang/utils/TableGen/SveEmitter.cpp
+++ b/clang/utils/TableGen/SveEmitter.cpp
@@ -1385,17 +1385,14 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
SVEType ToV(To.BaseType, N);
for (const ReinterpretTypeInfo &From : Reinterprets) {
SVEType FromV(From.BaseType, N);
- if (ShortForm) {
- OS << "__aio __attribute__((target(\"sve\"))) " << ToV.str()
- << " svreinterpret_" << To.Suffix;
- OS << "(" << FromV.str() << " op) __arm_streaming_compatible {\n";
- OS << " return __builtin_sve_reinterpret_" << To.Suffix << "_"
- << From.Suffix << Suffix << "(op);\n";
- OS << "}\n\n";
- } else
- OS << "#define svreinterpret_" << To.Suffix << "_" << From.Suffix
- << Suffix << "(...) __builtin_sve_reinterpret_" << To.Suffix
- << "_" << From.Suffix << Suffix << "(__VA_ARGS__)\n";
+ OS << "__aio "
+ "__attribute__((__clang_arm_builtin_alias(__builtin_sve_"
+ "reinterpret_"
+ << To.Suffix << "_" << From.Suffix << Suffix << ")))\n"
+ << ToV.str() << " svreinterpret_" << To.Suffix;
+ if (!ShortForm)
+ OS << "_" << From.Suffix << Suffix;
+ OS << "(" << FromV.str() << " op);\n";
}
}
}
@@ -1453,7 +1450,7 @@ void SVEEmitter::createBuiltins(raw_ostream &OS) {
SVEType FromV(From.BaseType, N);
OS << "TARGET_BUILTIN(__builtin_sve_reinterpret_" << To.Suffix << "_"
<< From.Suffix << Suffix << +", \"" << ToV.builtin_str()
- << FromV.builtin_str() << "\", \"n\", \"sve\")\n";
+ << FromV.builtin_str() << "\", \"n\", \"sme|sve\")\n";
}
}
}
diff --git a/clang/utils/analyzer/entrypoint.py b/clang/utils/analyzer/entrypoint.py
index ff877060bad6..4deb42db0a0b 100644
--- a/clang/utils/analyzer/entrypoint.py
+++ b/clang/utils/analyzer/entrypoint.py
@@ -54,7 +54,7 @@ CMAKE_COMMAND = (
"cmake -G Ninja -DCMAKE_BUILD_TYPE=Release "
"-DCMAKE_INSTALL_PREFIX=/analyzer -DLLVM_TARGETS_TO_BUILD=X86 "
'-DLLVM_ENABLE_PROJECTS="clang;openmp" -DLLVM_BUILD_RUNTIME=OFF '
- "-DLLVM_ENABLE_TERMINFO=OFF -DCLANG_ENABLE_ARCMT=OFF "
+ "-DCLANG_ENABLE_ARCMT=OFF "
"-DCLANG_ENABLE_STATIC_ANALYZER=ON"
)
diff --git a/clang/utils/ci/buildkite-pipeline.yml b/clang/utils/ci/buildkite-pipeline.yml
deleted file mode 100644
index 7a679176038c..000000000000
--- a/clang/utils/ci/buildkite-pipeline.yml
+++ /dev/null
@@ -1,105 +0,0 @@
-#===----------------------------------------------------------------------===##
-#
-# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-# See https://llvm.org/LICENSE.txt for license information.
-# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-#
-#===----------------------------------------------------------------------===##
-
-#
-# This file describes the various pre-commit CI bots used to test Clang against
-# libc++ under various configurations. Unlike the usual libc++ CI pipeline,
-# which aims to test libc++ itself, this pipeline aims to test Clang by
-# compiling libc++ and running its test suite against the just-built Clang,
-# in various configurations.
-#
-env:
- # LLVM RELEASE bump version
- LLVM_HEAD_VERSION: "17"
-steps:
- - label: "Format"
- commands:
- - "clang/utils/ci/run-buildbot check-format"
- agents:
- queue: "linux"
- retry:
- automatic:
- - exit_status: -1 # Agent was lost
- limit: 2
- timeout_in_minutes: 120
-
- - label: "Building and testing clang (Linux)"
- commands:
- - "clang/utils/ci/run-buildbot build-clang"
- agents:
- queue: "linux"
- retry:
- automatic:
- - exit_status: -1 # Agent was lost
- limit: 2
- timeout_in_minutes: 120
-
- - label: "Building and testing clang (Windows)"
- commands:
- - "C:\\BuildTools\\Common7\\Tools\\VsDevCmd.bat -arch=amd64 -host_arch=amd64"
- - "bash clang/utils/ci/run-buildbot build-clang-windows"
- agents:
- queue: "windows"
- retry:
- automatic:
- - exit_status: -1 # Agent was lost
- limit: 2
- timeout_in_minutes: 120
-
- - wait
-
- - label: "Running libc++ test suite in C++03"
- commands:
- - "clang/utils/ci/run-buildbot generic-cxx03"
- artifact_paths:
- - "**/test-results.xml"
- - "**/crash_diagnostics/*"
- env:
- LLVM_SYMBOLIZER_PATH: "/usr/bin/llvm-symbolizer-${LLVM_HEAD_VERSION}" # TODO: Should we build that from scratch?
- CLANG_CRASH_DIAGNOSTICS_DIR: "crash_diagnostics"
- agents:
- queue: "linux"
- retry:
- automatic:
- - exit_status: -1 # Agent was lost
- limit: 2
- timeout_in_minutes: 120
-
- - label: "Running libc++ test suite in C++26"
- commands:
- - "clang/utils/ci/run-buildbot generic-cxx26"
- artifact_paths:
- - "**/test-results.xml"
- - "**/crash_diagnostics/*"
- env:
- LLVM_SYMBOLIZER_PATH: "/usr/bin/llvm-symbolizer-${LLVM_HEAD_VERSION}" # TODO: Should we build that from scratch?
- CLANG_CRASH_DIAGNOSTICS_DIR: "crash_diagnostics"
- agents:
- queue: "linux"
- retry:
- automatic:
- - exit_status: -1 # Agent was lost
- limit: 2
- timeout_in_minutes: 120
-
- - label: "Running libc++ test suite with Clang Modules"
- commands:
- - "clang/utils/ci/run-buildbot generic-modules"
- artifact_paths:
- - "**/test-results.xml"
- - "**/crash_diagnostics/*"
- env:
- LLVM_SYMBOLIZER_PATH: "/usr/bin/llvm-symbolizer-${LLVM_HEAD_VERSION}" # TODO: Should we build that from scratch?
- CLANG_CRASH_DIAGNOSTICS_DIR: "crash_diagnostics"
- agents:
- queue: "linux"
- retry:
- automatic:
- - exit_status: -1 # Agent was lost
- limit: 2
- timeout_in_minutes: 120
diff --git a/clang/utils/ci/run-buildbot b/clang/utils/ci/run-buildbot
index f47ffb5cbd38..c68ddad571f3 100755
--- a/clang/utils/ci/run-buildbot
+++ b/clang/utils/ci/run-buildbot
@@ -69,13 +69,6 @@ cmake --version
ninja --version
case "${BUILDER}" in
-check-format)
- echo "*** Checking for trailing whitespace left in Clang source files ***"
- if grep -rnI '[[:blank:]]$' clang/lib clang/include clang/docs; then
- echo "*** Trailing whitespace has been found in Clang source files as described above ***"
- exit 1
- fi
-;;
build-clang)
mkdir install
# We use Release here to avoid including debug information. Otherwise, the
@@ -90,29 +83,13 @@ build-clang)
-DCMAKE_CXX_COMPILER_LAUNCHER="ccache" \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=install \
+ -DLLVM_TARGETS_TO_BUILD=Native \
-DLLVM_ENABLE_PROJECTS="clang;compiler-rt" \
ninja -C ${BUILD_DIR} install-clang install-clang-resource-headers
ccache -s
tar -cJvf install.tar.xz install/
buildkite-agent artifact upload --debug install.tar.xz
-
- ninja -C ${BUILD_DIR} check-clang
-;;
-build-clang-windows)
- cmake -S llvm -B ${BUILD_DIR} -G Ninja \
- -D CMAKE_C_COMPILER_LAUNCHER=sccache \
- -D CMAKE_CXX_COMPILER_LAUNCHER=sccache \
- -D CMAKE_BUILD_TYPE=Release \
- -D CMAKE_INSTALL_PREFIX=install-windows \
- -D LLVM_ENABLE_PROJECTS="clang;compiler-rt" \
- -D LLVM_ENABLE_ASSERTIONS=ON \
- -D LLVM_BUILD_EXAMPLES=ON \
- -D COMPILER_RT_BUILD_LIBFUZZER=OFF \
- -D COMPILER_RT_BUILD_ORC=OFF
-
- ninja -C ${BUILD_DIR} install-clang install-clang-resource-headers
- ninja -C ${BUILD_DIR} check-clang
;;
generic-cxx03)
buildkite-agent artifact download install.tar.xz .
diff --git a/clang/utils/hmaptool/CMakeLists.txt b/clang/utils/hmaptool/CMakeLists.txt
index 511268069bd1..02b29e4fba2f 100644
--- a/clang/utils/hmaptool/CMakeLists.txt
+++ b/clang/utils/hmaptool/CMakeLists.txt
@@ -1,6 +1,6 @@
install(PROGRAMS hmaptool DESTINATION "${CLANG_TOOLS_INSTALL_DIR}" COMPONENT hmaptool)
add_custom_target(hmaptool ALL DEPENDS "hmaptool")
-set_target_properties(hmaptool PROPERTIES FOLDER "Utils")
+set_target_properties(hmaptool PROPERTIES FOLDER "Clang/Utils")
if(NOT LLVM_ENABLE_IDE)
add_llvm_install_targets("install-hmaptool"
diff --git a/clang/www/cxx_dr_status.html b/clang/www/cxx_dr_status.html
index 5d517d358672..4cce88fe0490 100755
--- a/clang/www/cxx_dr_status.html
+++ b/clang/www/cxx_dr_status.html
@@ -10698,7 +10698,7 @@ and <I>POD class</I></td>
<td><a href="https://cplusplus.github.io/CWG/issues/1815.html">1815</a></td>
<td>CD4</td>
<td>Lifetime extension in aggregate initialization</td>
- <td class="none" align="center">No</td>
+ <td class="unreleased" align="center">Clang 19</td>
</tr>
<tr id="1816">
<td><a href="https://cplusplus.github.io/CWG/issues/1816.html">1816</a></td>
@@ -12890,11 +12890,11 @@ and <I>POD class</I></td>
<td>Virtual bases in destructors and defaulted assignment operators</td>
<td class="full" align="center">Yes</td>
</tr>
- <tr class="open" id="2181">
+ <tr id="2181">
<td><a href="https://cplusplus.github.io/CWG/issues/2181.html">2181</a></td>
- <td>drafting</td>
+ <td>C++20</td>
<td>Normative requirements in an informative Annex</td>
- <td align="center">Not resolved</td>
+ <td class="unknown" align="center">Unknown</td>
</tr>
<tr class="open" id="2182">
<td><a href="https://cplusplus.github.io/CWG/issues/2182.html">2182</a></td>
@@ -17021,13 +17021,13 @@ objects</td>
</tr>
<tr class="open" id="2869">
<td><a href="https://cplusplus.github.io/CWG/issues/2869.html">2869</a></td>
- <td>review</td>
+ <td>tentatively ready</td>
<td><TT>this</TT> in local classes</td>
<td align="center">Not resolved</td>
</tr>
<tr class="open" id="2870">
<td><a href="https://cplusplus.github.io/CWG/issues/2870.html">2870</a></td>
- <td>review</td>
+ <td>tentatively ready</td>
<td>Combining absent <I>encoding-prefix</I>es</td>
<td align="center">Not resolved</td>
</tr>
@@ -17039,7 +17039,7 @@ objects</td>
</tr>
<tr class="open" id="2872">
<td><a href="https://cplusplus.github.io/CWG/issues/2872.html">2872</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Linkage and unclear "can be referred to"</td>
<td align="center">Not resolved</td>
</tr>
@@ -17051,25 +17051,25 @@ objects</td>
</tr>
<tr class="open" id="2874">
<td><a href="https://cplusplus.github.io/CWG/issues/2874.html">2874</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Qualified declarations of partial specializations</td>
<td align="center">Not resolved</td>
</tr>
<tr class="open" id="2875">
<td><a href="https://cplusplus.github.io/CWG/issues/2875.html">2875</a></td>
- <td>open</td>
- <td>Missing support for round-tripping nullptr through indirection/address operators</td>
+ <td>tentatively ready</td>
+ <td>Missing support for round-tripping null pointer values through indirection/address operators</td>
<td align="center">Not resolved</td>
</tr>
<tr class="open" id="2876">
<td><a href="https://cplusplus.github.io/CWG/issues/2876.html">2876</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Disambiguation of <TT>T x = delete("text")</TT></td>
<td align="center">Not resolved</td>
</tr>
<tr class="open" id="2877">
<td><a href="https://cplusplus.github.io/CWG/issues/2877.html">2877</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Type-only lookup for <I>using-enum-declarator</I></td>
<td align="center">Not resolved</td>
</tr>
@@ -17093,9 +17093,9 @@ objects</td>
</tr>
<tr class="open" id="2881">
<td><a href="https://cplusplus.github.io/CWG/issues/2881.html">2881</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Type restrictions for the explicit object parameter of a lambda</td>
- <td align="center">Not resolved</td>
+ <td title="Clang 19 implements 2024-04-19 resolution" align="center">Not Resolved*</td>
</tr>
<tr class="open" id="2882">
<td><a href="https://cplusplus.github.io/CWG/issues/2882.html">2882</a></td>
@@ -17109,15 +17109,15 @@ objects</td>
<td>Definition of "odr-usable" ignores lambda scopes</td>
<td align="center">Not resolved</td>
</tr>
- <tr class="open" id="2884">
+ <tr id="2884">
<td><a href="https://cplusplus.github.io/CWG/issues/2884.html">2884</a></td>
- <td>open</td>
+ <td>dup</td>
<td>Qualified declarations of partial specializations</td>
- <td align="center">Not resolved</td>
+ <td class="unknown" align="center">Unknown</td>
</tr>
<tr class="open" id="2885">
<td><a href="https://cplusplus.github.io/CWG/issues/2885.html">2885</a></td>
- <td>open</td>
+ <td>review</td>
<td>Non-eligible trivial default constructors</td>
<td align="center">Not resolved</td>
</tr>
@@ -17150,6 +17150,12 @@ objects</td>
<td>open</td>
<td>Defining members of local classes</td>
<td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="2891">
+ <td><a href="https://cplusplus.github.io/CWG/issues/2891.html">2891</a></td>
+ <td>review</td>
+ <td>Normative status of implementation limits</td>
+ <td align="center">Not resolved</td>
</tr></table>
</div>
diff --git a/clang/www/cxx_status.html b/clang/www/cxx_status.html
index a11bf9a06f9f..45416170b16e 100755
--- a/clang/www/cxx_status.html
+++ b/clang/www/cxx_status.html
@@ -1255,12 +1255,11 @@ version.
</table>
<p>
-<span id="n3778">(7): In Clang 3.7 and later, sized deallocation is only enabled
-if the user passes the <code>-fsized-deallocation</code> flag. The user must
-supply definitions of the sized deallocation functions, either by providing them
-explicitly or by using a C++ standard library that does. <code>libstdc++</code>
-added these functions in version 5.0, and <code>libc++</code> added them in
-version 3.7.
+<span id="n3778">(7): The user must supply definitions of the sized deallocation
+ functions, either by providing them explicitly or by using a C++ standard library
+ that does. <code>libstdc++</code> added these functions in version 5.0, and
+ <code>libc++</code> added them in version 3.7. The user can also use the
+ <code>-fno-sized-deallocation</code> option to disable sized deallocation.
</span>
</p>
</details>
diff --git a/compiler-rt/cmake/config-ix.cmake b/compiler-rt/cmake/config-ix.cmake
index ba740af9e1d6..bddaa37579fd 100644
--- a/compiler-rt/cmake/config-ix.cmake
+++ b/compiler-rt/cmake/config-ix.cmake
@@ -127,6 +127,7 @@ check_cxx_compiler_flag("-Werror -Wframe-larger-than=512" COMPILER_RT_HAS_WFRAME
check_cxx_compiler_flag("-Werror -Wglobal-constructors" COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG)
check_cxx_compiler_flag("-Werror -Wc99-extensions" COMPILER_RT_HAS_WC99_EXTENSIONS_FLAG)
check_cxx_compiler_flag("-Werror -Wgnu" COMPILER_RT_HAS_WGNU_FLAG)
+check_cxx_compiler_flag("-Werror -Wgnu-anonymous-struct" COMPILER_RT_HAS_WGNU_ANONYMOUS_STRUCT_FLAG)
check_cxx_compiler_flag("-Werror -Wvariadic-macros" COMPILER_RT_HAS_WVARIADIC_MACROS_FLAG)
check_cxx_compiler_flag("-Werror -Wunused-parameter" COMPILER_RT_HAS_WUNUSED_PARAMETER_FLAG)
check_cxx_compiler_flag("-Werror -Wcovered-switch-default" COMPILER_RT_HAS_WCOVERED_SWITCH_DEFAULT_FLAG)
@@ -181,21 +182,6 @@ check_library_exists(m pow "" COMPILER_RT_HAS_LIBM)
check_library_exists(pthread pthread_create "" COMPILER_RT_HAS_LIBPTHREAD)
check_library_exists(execinfo backtrace "" COMPILER_RT_HAS_LIBEXECINFO)
-# Look for terminfo library, used in unittests that depend on LLVMSupport.
-if(LLVM_ENABLE_TERMINFO STREQUAL FORCE_ON)
- set(MAYBE_REQUIRED REQUIRED)
-else()
- set(MAYBE_REQUIRED)
-endif()
-if(LLVM_ENABLE_TERMINFO)
- find_library(COMPILER_RT_TERMINFO_LIB NAMES terminfo tinfo curses ncurses ncursesw ${MAYBE_REQUIRED})
-endif()
-if(COMPILER_RT_TERMINFO_LIB)
- set(LLVM_ENABLE_TERMINFO 1)
-else()
- set(LLVM_ENABLE_TERMINFO 0)
-endif()
-
if (ANDROID AND COMPILER_RT_HAS_LIBDL)
# Android's libstdc++ has a dependency on libdl.
list(APPEND CMAKE_REQUIRED_LIBRARIES dl)
diff --git a/compiler-rt/lib/ctx_profile/CMakeLists.txt b/compiler-rt/lib/ctx_profile/CMakeLists.txt
index ab7bf3241fd6..ce491fc7e8bf 100644
--- a/compiler-rt/lib/ctx_profile/CMakeLists.txt
+++ b/compiler-rt/lib/ctx_profile/CMakeLists.txt
@@ -15,6 +15,9 @@ include_directories(../../include)
# We don't use the C++ Standard Library here, so avoid including it by mistake.
append_list_if(COMPILER_RT_HAS_NOSTDINCXX_FLAG -nostdinc++ EXTRA_FLAGS)
+# __sanitizer_siginfo
+append_list_if(COMPILER_RT_HAS_WGNU_ANONYMOUS_STRUCT_FLAG -Wno-gnu-anonymous-struct EXTRA_FLAGS)
+
if(COMPILER_RT_INCLUDE_TESTS)
add_subdirectory(tests)
endif()
@@ -26,4 +29,4 @@ add_compiler_rt_runtime(clang_rt.ctx_profile
CFLAGS ${EXTRA_FLAGS}
SOURCES ${CTX_PROFILE_SOURCES}
ADDITIONAL_HEADERS ${CTX_PROFILE_HEADERS}
- PARENT_TARGET ctx_profile) \ No newline at end of file
+ PARENT_TARGET ctx_profile)
diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
index 63475f434cd1..682df8c6e034 100644
--- a/compiler-rt/lib/dfsan/dfsan_allocator.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
@@ -45,7 +45,7 @@ const uptr kAllocatorSpace = 0xE00000000000ULL;
#else
const uptr kAllocatorSpace = 0x700000000000ULL;
#endif
-const uptr kMaxAllowedMallocSize = 8UL << 30;
+const uptr kMaxAllowedMallocSize = 1ULL << 40;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
diff --git a/compiler-rt/lib/dfsan/dfsan_custom.cpp b/compiler-rt/lib/dfsan/dfsan_custom.cpp
index 3af26e9f64c9..af3c1f4d1673 100644
--- a/compiler-rt/lib/dfsan/dfsan_custom.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_custom.cpp
@@ -1901,17 +1901,27 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfso_nanosleep(
return __dfsw_nanosleep(req, rem, req_label, rem_label, ret_label);
}
-static void clear_msghdr_labels(size_t bytes_written, struct msghdr *msg) {
+static void clear_msghdr_labels(size_t bytes_written, struct msghdr *msg,
+ int flags) {
dfsan_set_label(0, msg, sizeof(*msg));
dfsan_set_label(0, msg->msg_name, msg->msg_namelen);
dfsan_set_label(0, msg->msg_control, msg->msg_controllen);
- for (size_t i = 0; bytes_written > 0; ++i) {
- assert(i < msg->msg_iovlen);
+ for (size_t i = 0; i < msg->msg_iovlen; ++i) {
struct iovec *iov = &msg->msg_iov[i];
- size_t iov_written =
- bytes_written < iov->iov_len ? bytes_written : iov->iov_len;
+ size_t iov_written = iov->iov_len;
+
+ // When MSG_TRUNC is not set, we want to avoid setting 0 label on bytes that
+ // may not have changed, using bytes_written to bound the 0 label write.
+ // When MSG_TRUNC flag is set, bytes_written may be larger than the buffer,
+ // and should not be used as a bound.
+ if (!(MSG_TRUNC & flags)) {
+ if (bytes_written < iov->iov_len) {
+ iov_written = bytes_written;
+ }
+ bytes_written -= iov_written;
+ }
+
dfsan_set_label(0, iov->iov_base, iov_written);
- bytes_written -= iov_written;
}
}
@@ -1923,7 +1933,7 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_recvmmsg(
int ret = recvmmsg(sockfd, msgvec, vlen, flags, timeout);
for (int i = 0; i < ret; ++i) {
dfsan_set_label(0, &msgvec[i].msg_len, sizeof(msgvec[i].msg_len));
- clear_msghdr_labels(msgvec[i].msg_len, &msgvec[i].msg_hdr);
+ clear_msghdr_labels(msgvec[i].msg_len, &msgvec[i].msg_hdr, flags);
}
*ret_label = 0;
return ret;
@@ -1947,7 +1957,7 @@ SANITIZER_INTERFACE_ATTRIBUTE ssize_t __dfsw_recvmsg(
dfsan_label msg_label, dfsan_label flags_label, dfsan_label *ret_label) {
ssize_t ret = recvmsg(sockfd, msg, flags);
if (ret >= 0)
- clear_msghdr_labels(ret, msg);
+ clear_msghdr_labels(ret, msg, flags);
*ret_label = 0;
return ret;
}
diff --git a/compiler-rt/lib/lsan/lsan_allocator.cpp b/compiler-rt/lib/lsan/lsan_allocator.cpp
index 12d579a9385b..493bf5f9efc5 100644
--- a/compiler-rt/lib/lsan/lsan_allocator.cpp
+++ b/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -31,7 +31,7 @@ static const uptr kMaxAllowedMallocSize = 1ULL << 30;
#elif defined(__mips64) || defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 4ULL << 30;
#else
-static const uptr kMaxAllowedMallocSize = 8ULL << 30;
+static const uptr kMaxAllowedMallocSize = 1ULL << 40;
#endif
static Allocator allocator;
diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp
index b1bc5b9390f7..8350106dc817 100644
--- a/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/compiler-rt/lib/msan/msan_allocator.cpp
@@ -71,7 +71,7 @@ static const uptr kAllocatorSpace = 0x700000000000ULL;
#else
static const uptr kAllocatorSpace = 0x600000000000ULL;
#endif
-static const uptr kMaxAllowedMallocSize = 8UL << 30;
+static const uptr kMaxAllowedMallocSize = 1ULL << 40;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
diff --git a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
index 005bd6d584c5..b4702339db59 100755
--- a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
+++ b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
@@ -139,7 +139,6 @@ if [[ ! -f ${LLVM_BUILD}/build.ninja ]]; then
-DLLVM_INCLUDE_TESTS=OFF \
-DLLVM_ENABLE_ZLIB=ON \
-DLLVM_ENABLE_ZSTD=OFF \
- -DLLVM_ENABLE_TERMINFO=OFF \
-DLLVM_ENABLE_THREADS=OFF \
$LLVM_SRC
fi
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 15a199ae0349..f9ed36581f8d 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -1052,6 +1052,10 @@ private:
void *Block, const uptr UserPtr,
const uptr SizeOrUnusedBytes,
const FillContentsMode FillContents) {
+ // Compute the default pointer before adding the header tag
+ const uptr DefaultAlignedPtr =
+ reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
+
Block = addHeaderTag(Block);
// Only do content fill when it's from primary allocator because secondary
// allocator has filled the content.
@@ -1064,8 +1068,6 @@ private:
Chunk::UnpackedHeader Header = {};
- const uptr DefaultAlignedPtr =
- reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
const uptr Offset = UserPtr - DefaultAlignedPtr;
DCHECK_GE(Offset, 2 * sizeof(u32));
@@ -1096,6 +1098,10 @@ private:
const Options Options = Primary.Options.load();
DCHECK(useMemoryTagging<AllocatorConfig>(Options));
+ // Compute the default pointer before adding the header tag
+ const uptr DefaultAlignedPtr =
+ reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
+
void *Ptr = reinterpret_cast<void *>(UserPtr);
void *TaggedPtr = Ptr;
@@ -1194,8 +1200,6 @@ private:
Chunk::UnpackedHeader Header = {};
- const uptr DefaultAlignedPtr =
- reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
const uptr Offset = UserPtr - DefaultAlignedPtr;
DCHECK_GE(Offset, 2 * sizeof(u32));
diff --git a/compiler-rt/lib/xray/tests/CMakeLists.txt b/compiler-rt/lib/xray/tests/CMakeLists.txt
index 0a428b9a30b1..4c7e92b6ecc3 100644
--- a/compiler-rt/lib/xray/tests/CMakeLists.txt
+++ b/compiler-rt/lib/xray/tests/CMakeLists.txt
@@ -54,11 +54,6 @@ set(XRAY_UNITTEST_LINK_FLAGS
${COMPILER_RT_CXX_LINK_LIBS})
if (NOT APPLE)
- # Needed by LLVMSupport.
- append_list_if(
- LLVM_ENABLE_TERMINFO
- -l${COMPILER_RT_TERMINFO_LIB} XRAY_UNITTEST_LINK_FLAGS)
-
# We add the library directories one at a time in our CFLAGS.
foreach (DIR ${LLVM_LIBRARY_DIR})
list(APPEND XRAY_UNITTEST_LINK_FLAGS -L${DIR})
diff --git a/compiler-rt/lib/xray/xray_buffer_queue.h b/compiler-rt/lib/xray/xray_buffer_queue.h
index e1739d050f3d..8d33f73576b5 100644
--- a/compiler-rt/lib/xray/xray_buffer_queue.h
+++ b/compiler-rt/lib/xray/xray_buffer_queue.h
@@ -87,7 +87,7 @@ private:
DCHECK_NE(Offset, Max);
do {
++Offset;
- } while (!Buffers[Offset].Used && Offset != Max);
+ } while (Offset != Max && !Buffers[Offset].Used);
return *this;
}
@@ -107,7 +107,7 @@ private:
Max(M) {
// We want to advance to the first Offset where the 'Used' property is
// true, or to the end of the list/queue.
- while (!Buffers[Offset].Used && Offset != Max) {
+ while (Offset != Max && !Buffers[Offset].Used) {
++Offset;
}
}
diff --git a/compiler-rt/lib/xray/xray_trampoline_x86_64.S b/compiler-rt/lib/xray/xray_trampoline_x86_64.S
index ff3ac91071a6..01098f60eeab 100644
--- a/compiler-rt/lib/xray/xray_trampoline_x86_64.S
+++ b/compiler-rt/lib/xray/xray_trampoline_x86_64.S
@@ -40,7 +40,7 @@
CFI_ADJUST_CFA_OFFSET(-8)
.endm
-// This macro should keep the stack aligned to 16 bytes.
+// This macro should lower the stack pointer by an odd multiple of 8.
.macro SAVE_REGISTERS
pushfq
CFI_ADJUST_CFA_OFFSET(8)
@@ -70,7 +70,6 @@
movq %r15, 0(%rsp)
.endm
-// This macro should keep the stack aligned to 16 bytes.
.macro RESTORE_REGISTERS
movq 232(%rsp), %rbp
movupd 216(%rsp), %xmm0
@@ -117,8 +116,8 @@
# LLVM-MCA-BEGIN __xray_FunctionEntry
ASM_SYMBOL(__xray_FunctionEntry):
CFI_STARTPROC
- ALIGN_STACK_16B
SAVE_REGISTERS
+ ALIGN_STACK_16B
// This load has to be atomic, it's concurrent with __xray_patch().
// On x86/amd64, a simple (type-aligned) MOV instruction is enough.
@@ -132,8 +131,8 @@ ASM_SYMBOL(__xray_FunctionEntry):
callq *%rax
LOCAL_LABEL(tmp0):
- RESTORE_REGISTERS
RESTORE_STACK_ALIGNMENT
+ RESTORE_REGISTERS
retq
# LLVM-MCA-END
ASM_SIZE(__xray_FunctionEntry)
@@ -193,8 +192,8 @@ LOCAL_LABEL(tmp2):
# LLVM-MCA-BEGIN __xray_FunctionTailExit
ASM_SYMBOL(__xray_FunctionTailExit):
CFI_STARTPROC
- ALIGN_STACK_16B
SAVE_REGISTERS
+ ALIGN_STACK_16B
movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
testq %rax,%rax
@@ -205,8 +204,8 @@ ASM_SYMBOL(__xray_FunctionTailExit):
callq *%rax
LOCAL_LABEL(tmp4):
- RESTORE_REGISTERS
RESTORE_STACK_ALIGNMENT
+ RESTORE_REGISTERS
retq
# LLVM-MCA-END
ASM_SIZE(__xray_FunctionTailExit)
@@ -221,8 +220,8 @@ LOCAL_LABEL(tmp4):
# LLVM-MCA-BEGIN __xray_ArgLoggerEntry
ASM_SYMBOL(__xray_ArgLoggerEntry):
CFI_STARTPROC
- ALIGN_STACK_16B
SAVE_REGISTERS
+ ALIGN_STACK_16B
// Again, these function pointer loads must be atomic; MOV is fine.
movq ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax
@@ -248,8 +247,8 @@ LOCAL_LABEL(arg1entryLog):
callq *%rax
LOCAL_LABEL(arg1entryFail):
- RESTORE_REGISTERS
RESTORE_STACK_ALIGNMENT
+ RESTORE_REGISTERS
retq
# LLVM-MCA-END
ASM_SIZE(__xray_ArgLoggerEntry)
diff --git a/compiler-rt/test/asan/TestCases/Windows/bitfield_uaf.cpp b/compiler-rt/test/asan/TestCases/Windows/bitfield_uaf.cpp
index 12ed505883e2..ac3649a9e1bf 100644
--- a/compiler-rt/test/asan/TestCases/Windows/bitfield_uaf.cpp
+++ b/compiler-rt/test/asan/TestCases/Windows/bitfield_uaf.cpp
@@ -24,10 +24,10 @@ int main(void) {
// CHECK: [[ADDR]] is located 0 bytes inside of 4-byte region
// CHECK-LABEL: freed by thread T0 here:
// CHECK: {{#0 .* free }}
- // CHECK: {{ #[1-2] .* main .*bitfield_uaf.cpp}}:[[@LINE-4]]
+ // CHECK: {{ #[1-3] .* main .*bitfield_uaf.cpp}}:[[@LINE-4]]
// CHECK-LABEL: previously allocated by thread T0 here:
// CHECK: {{#0 .* malloc }}
- // CHECK: {{ #[1-2] .* main .*bitfield_uaf.cpp}}:[[@LINE-8]]
+ // CHECK: {{ #[1-3] .* main .*bitfield_uaf.cpp}}:[[@LINE-8]]
make_access(s);
return 0;
}
diff --git a/compiler-rt/test/asan/TestCases/Windows/calloc_left_oob.cpp b/compiler-rt/test/asan/TestCases/Windows/calloc_left_oob.cpp
index e96fb6190f5a..e71ffdb9f241 100644
--- a/compiler-rt/test/asan/TestCases/Windows/calloc_left_oob.cpp
+++ b/compiler-rt/test/asan/TestCases/Windows/calloc_left_oob.cpp
@@ -12,6 +12,6 @@ int main() {
// CHECK: [[ADDR]] is located 4 bytes before 168-byte region
// CHECK: allocated by thread T0 here:
// CHECK: {{#0 .* calloc }}
- // CHECK: {{ #[1-2] .* main .*calloc_left_oob.cpp}}:[[@LINE-8]]
+ // CHECK: {{ #[1-3] .* main .*calloc_left_oob.cpp}}:[[@LINE-8]]
free(buffer);
}
diff --git a/compiler-rt/test/asan/TestCases/Windows/calloc_right_oob.cpp b/compiler-rt/test/asan/TestCases/Windows/calloc_right_oob.cpp
index fe0fc20e1919..507d84483cca 100644
--- a/compiler-rt/test/asan/TestCases/Windows/calloc_right_oob.cpp
+++ b/compiler-rt/test/asan/TestCases/Windows/calloc_right_oob.cpp
@@ -12,6 +12,6 @@ int main() {
// CHECK: [[ADDR]] is located 0 bytes after 168-byte region
// CHECK: allocated by thread T0 here:
// CHECK-NEXT: {{#0 .* calloc }}
- // CHECK: {{ #[1-2] .* main .*calloc_right_oob.cpp}}:[[@LINE-8]]
+ // CHECK: {{ #[1-3] .* main .*calloc_right_oob.cpp}}:[[@LINE-8]]
free(buffer);
}
diff --git a/compiler-rt/test/asan/TestCases/Windows/calloc_uaf.cpp b/compiler-rt/test/asan/TestCases/Windows/calloc_uaf.cpp
index bf13f7d3eb66..a03c5e10a533 100644
--- a/compiler-rt/test/asan/TestCases/Windows/calloc_uaf.cpp
+++ b/compiler-rt/test/asan/TestCases/Windows/calloc_uaf.cpp
@@ -13,8 +13,8 @@ int main() {
// CHECK: [[ADDR]] is located 0 bytes inside of 168-byte region
// CHECK: freed by thread T0 here:
// CHECK-NEXT: {{#0 .* free }}
- // CHECK: {{ #[1-2] .* main .*calloc_uaf.cpp}}:[[@LINE-8]]
+ // CHECK: {{ #[1-3] .* main .*calloc_uaf.cpp}}:[[@LINE-8]]
// CHECK: previously allocated by thread T0 here:
// CHECK-NEXT: {{#0 .* calloc }}
- // CHECK: {{ #[1-2] .* main .*calloc_uaf.cpp}}:[[@LINE-12]]
+ // CHECK: {{ #[1-3] .* main .*calloc_uaf.cpp}}:[[@LINE-12]]
}
diff --git a/compiler-rt/test/ctx_profile/TestCases/generate-context.cpp b/compiler-rt/test/ctx_profile/TestCases/generate-context.cpp
index 981d6170091c..797b87186065 100644
--- a/compiler-rt/test/ctx_profile/TestCases/generate-context.cpp
+++ b/compiler-rt/test/ctx_profile/TestCases/generate-context.cpp
@@ -5,7 +5,7 @@
// RUN: cp %llvm_src/include/llvm/ProfileData/CtxInstrContextNode.h %t_include/
//
// Compile with ctx instrumentation "on". We treat "theRoot" as callgraph root.
-// RUN: %clangxx %s -lclang_rt.ctx_profile -I%t_include -O2 -o %t.bin -mllvm -profile-context-root=theRoot
+// RUN: %clangxx %s %ctxprofilelib -I%t_include -O2 -o %t.bin -mllvm -profile-context-root=theRoot
//
// Run the binary, and observe the profile fetch handler's output.
// RUN: %t.bin | FileCheck %s
diff --git a/compiler-rt/test/ctx_profile/lit.cfg.py b/compiler-rt/test/ctx_profile/lit.cfg.py
index bf62093601f1..3034fadbb7a6 100644
--- a/compiler-rt/test/ctx_profile/lit.cfg.py
+++ b/compiler-rt/test/ctx_profile/lit.cfg.py
@@ -33,3 +33,10 @@ config.suffixes = [".c", ".cpp", ".test"]
config.substitutions.append(
("%clangxx ", " ".join([config.clang] + config.cxx_mode_flags) + " -ldl -lpthread ")
)
+
+config.substitutions.append(
+ (
+ "%ctxprofilelib",
+ "-L%s -lclang_rt.ctx_profile%s" % (config.compiler_rt_libdir, config.target_suffix)
+ )
+)
diff --git a/compiler-rt/test/dfsan/custom.cpp b/compiler-rt/test/dfsan/custom.cpp
index f544e481b726..cede0d64dbcf 100644
--- a/compiler-rt/test/dfsan/custom.cpp
+++ b/compiler-rt/test/dfsan/custom.cpp
@@ -768,26 +768,53 @@ void test_recvmsg() {
ssize_t sent = sendmsg(sockfds[0], &smsg, 0);
assert(sent > 0);
- char rbuf[128];
- struct iovec riovs[2] = {{&rbuf[0], 4}, {&rbuf[4], 4}};
- struct msghdr rmsg = {};
- rmsg.msg_iov = riovs;
- rmsg.msg_iovlen = 2;
-
- dfsan_set_label(i_label, rbuf, sizeof(rbuf));
- dfsan_set_label(i_label, &rmsg, sizeof(rmsg));
-
- DEFINE_AND_SAVE_ORIGINS(rmsg)
-
- ssize_t received = recvmsg(sockfds[1], &rmsg, 0);
- assert(received == sent);
- assert(memcmp(sbuf, rbuf, 8) == 0);
- ASSERT_ZERO_LABEL(received);
- ASSERT_READ_ZERO_LABEL(&rmsg, sizeof(rmsg));
- ASSERT_READ_ZERO_LABEL(&rbuf[0], 8);
- ASSERT_READ_LABEL(&rbuf[8], 1, i_label);
-
- ASSERT_SAVED_ORIGINS(rmsg)
+ {
+ char rpbuf[2];
+ struct iovec peek_iov;
+ peek_iov.iov_base = rpbuf;
+ peek_iov.iov_len = 2;
+
+ struct msghdr peek_header = {};
+ peek_header.msg_iov = &peek_iov;
+ peek_header.msg_iovlen = 1;
+
+ dfsan_set_label(i_label, rpbuf, sizeof(rpbuf));
+ dfsan_set_label(i_label, &peek_header, sizeof(peek_header));
+
+ DEFINE_AND_SAVE_ORIGINS(peek_header)
+
+ ssize_t received = recvmsg(sockfds[1], &peek_header, MSG_PEEK | MSG_TRUNC);
+ assert(received == sent);
+ assert(memcmp(sbuf, rpbuf, 2) == 0);
+ ASSERT_ZERO_LABEL(received);
+ ASSERT_READ_ZERO_LABEL(&peek_header, sizeof(peek_header));
+ ASSERT_READ_ZERO_LABEL(&rpbuf[0], 0);
+
+ ASSERT_SAVED_ORIGINS(peek_header)
+ }
+
+ {
+ char rbuf[128];
+ struct iovec riovs[2] = {{&rbuf[0], 4}, {&rbuf[4], 4}};
+ struct msghdr rmsg = {};
+ rmsg.msg_iov = riovs;
+ rmsg.msg_iovlen = 2;
+
+ dfsan_set_label(i_label, rbuf, sizeof(rbuf));
+ dfsan_set_label(i_label, &rmsg, sizeof(rmsg));
+
+ DEFINE_AND_SAVE_ORIGINS(rmsg)
+
+ ssize_t received = recvmsg(sockfds[1], &rmsg, 0);
+ assert(received == sent);
+ assert(memcmp(sbuf, rbuf, 8) == 0);
+ ASSERT_ZERO_LABEL(received);
+ ASSERT_READ_ZERO_LABEL(&rmsg, sizeof(rmsg));
+ ASSERT_READ_ZERO_LABEL(&rbuf[0], 8);
+ ASSERT_READ_LABEL(&rbuf[8], 1, i_label);
+
+ ASSERT_SAVED_ORIGINS(rmsg)
+ }
close(sockfds[0]);
close(sockfds[1]);
diff --git a/compiler-rt/test/profile/Linux/counter_promo_for.c b/compiler-rt/test/profile/Linux/counter_promo_for.c
index 1694e3812de4..aa77e6084bf8 100644
--- a/compiler-rt/test/profile/Linux/counter_promo_for.c
+++ b/compiler-rt/test/profile/Linux/counter_promo_for.c
@@ -19,29 +19,29 @@ __attribute__((noinline)) void bar(int i) { g += i; }
__attribute__((noinline)) void foo(int n, int N) {
// PROMO-LABEL: @foo
-// PROMO: load{{.*}}@__profc_foo{{.*}} 3){{.*}}
+// PROMO: load{{.*}}@__profc_foo{{.*}} 24){{.*}}
// PROMO-NEXT: add
-// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 3){{.*}}
+// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 24){{.*}}
// PROMO: load{{.*}}@__profc_foo, align
// PROMO-NEXT: add
// PROMO-NEXT: store{{.*}}@__profc_foo, align
-// PROMO-NEXT: load{{.*}}@__profc_foo{{.*}} 1){{.*}}
+// PROMO-NEXT: load{{.*}}@__profc_foo{{.*}} 8){{.*}}
// PROMO-NEXT: add
-// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 1){{.*}}
-// PROMO: load{{.*}}@__profc_foo{{.*}} 2){{.*}}
+// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 8){{.*}}
+// PROMO: load{{.*}}@__profc_foo{{.*}} 16){{.*}}
// PROMO-NEXT: add
-// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 2){{.*}}
+// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 16){{.*}}
//
// NOPROMO-LABEL: @foo
// NOPROMO: load{{.*}}@__profc_foo, align
// NOPROMO-NEXT: add
// NOPROMO-NEXT: store{{.*}}@__profc_foo, align
-// NOPROMO: load{{.*}}@__profc_foo{{.*}} 1){{.*}}
+// NOPROMO: load{{.*}}@__profc_foo{{.*}} 8){{.*}}
// NOPROMO-NEXT: add
-// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 1){{.*}}
-// NOPROMO: load{{.*}}@__profc_foo{{.*}} 2){{.*}}
+// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 8){{.*}}
+// NOPROMO: load{{.*}}@__profc_foo{{.*}} 16){{.*}}
// NOPROMO-NEXT: add
-// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 2){{.*}}
+// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 16){{.*}}
int i;
for (i = 0; i < N; i++) {
if (i < n + 1)
diff --git a/compiler-rt/test/profile/Linux/counter_promo_while.c b/compiler-rt/test/profile/Linux/counter_promo_while.c
index 71c4a90d29fa..c6ea3a7282d4 100644
--- a/compiler-rt/test/profile/Linux/counter_promo_while.c
+++ b/compiler-rt/test/profile/Linux/counter_promo_while.c
@@ -20,23 +20,23 @@ __attribute__((noinline)) void foo(int n, int N) {
// PROMO: load{{.*}}@__profc_foo, align
// PROMO-NEXT: add
// PROMO-NEXT: store{{.*}}@__profc_foo, align
-// PROMO-NEXT: load{{.*}}@__profc_foo{{.*}} 1){{.*}}
+// PROMO-NEXT: load{{.*}}@__profc_foo{{.*}} 8){{.*}}
// PROMO-NEXT: add
-// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 1){{.*}}
-// PROMO-NEXT: load{{.*}}@__profc_foo{{.*}} 2){{.*}}
+// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 8){{.*}}
+// PROMO-NEXT: load{{.*}}@__profc_foo{{.*}} 16){{.*}}
// PROMO-NEXT: add
-// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 2){{.*}}
+// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 16){{.*}}
//
// NOPROMO-LABEL: @foo
// NOPROMO: load{{.*}}@__profc_foo, align
// NOPROMO-NEXT: add
// NOPROMO-NEXT: store{{.*}}@__profc_foo, align
-// NOPROMO: load{{.*}}@__profc_foo{{.*}} 1){{.*}}
+// NOPROMO: load{{.*}}@__profc_foo{{.*}} 8){{.*}}
// NOPROMO-NEXT: add
-// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 1){{.*}}
-// NOPROMO: load{{.*}}@__profc_foo{{.*}} 2){{.*}}
+// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 8){{.*}}
+// NOPROMO: load{{.*}}@__profc_foo{{.*}} 16){{.*}}
// NOPROMO-NEXT: add
-// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 2){{.*}}
+// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 16){{.*}}
int i = 0;
while (i < N) {
if (i < n + 1)
diff --git a/cross-project-tests/CMakeLists.txt b/cross-project-tests/CMakeLists.txt
index f7c2ca7ad83d..7f2fee48fda7 100644
--- a/cross-project-tests/CMakeLists.txt
+++ b/cross-project-tests/CMakeLists.txt
@@ -3,6 +3,7 @@
# The subset inside debuginfo-tests invoke clang to generate programs with
# various types of debug info, and then run those programs under a debugger
# such as GDB or LLDB to verify the results.
+set(LLVM_SUBPROJECT_TITLE "Cross-Project")
find_package(Python3 COMPONENTS Interpreter)
@@ -97,8 +98,3 @@ add_lit_testsuite(check-cross-amdgpu "Running AMDGPU cross-project tests"
add_lit_testsuites(CROSS_PROJECT ${CMAKE_CURRENT_SOURCE_DIR}
DEPENDS ${CROSS_PROJECT_TEST_DEPS}
)
-
-set_target_properties(check-cross-project PROPERTIES FOLDER "Tests")
-set_target_properties(check-debuginfo PROPERTIES FOLDER "Tests")
-set_target_properties(check-intrinsic-headers PROPERTIES FOLDER "Tests")
-set_target_properties(check-cross-amdgpu PROPERTIES FOLDER "Tests")
diff --git a/flang/CMakeLists.txt b/flang/CMakeLists.txt
index c8e75024823f..cbe8f1186236 100644
--- a/flang/CMakeLists.txt
+++ b/flang/CMakeLists.txt
@@ -1,4 +1,5 @@
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "Flang")
if(NOT DEFINED LLVM_COMMON_CMAKE_UTILS)
set(LLVM_COMMON_CMAKE_UTILS ${CMAKE_CURRENT_SOURCE_DIR}/../cmake)
@@ -336,7 +337,7 @@ endif()
if (FLANG_RUNTIME_F128_MATH_LIB)
add_compile_definitions(
- -DFLANG_RUNTIME_F128_MATH_LIB="${FLANG_RUNTIME_F128_MATH_LIB}"
+ FLANG_RUNTIME_F128_MATH_LIB="${FLANG_RUNTIME_F128_MATH_LIB}"
)
endif()
@@ -481,7 +482,7 @@ endif()
# Custom target to install Flang libraries.
add_custom_target(flang-libraries)
-set_target_properties(flang-libraries PROPERTIES FOLDER "Misc")
+set_target_properties(flang-libraries PROPERTIES FOLDER "Flang/Meta")
if (NOT LLVM_ENABLE_IDE)
add_llvm_install_targets(install-flang-libraries
diff --git a/flang/cmake/modules/AddFlang.cmake b/flang/cmake/modules/AddFlang.cmake
index 41ce8738e7bf..3a5119b83831 100644
--- a/flang/cmake/modules/AddFlang.cmake
+++ b/flang/cmake/modules/AddFlang.cmake
@@ -94,13 +94,12 @@ function(add_flang_library name)
add_custom_target(${name})
endif()
- set_target_properties(${name} PROPERTIES FOLDER "Flang libraries")
+ set_target_properties(${name} PROPERTIES FOLDER "Flang/Libraries")
set_flang_windows_version_resource_properties(${name})
endfunction(add_flang_library)
macro(add_flang_executable name)
add_llvm_executable(${name} ${ARGN})
- set_target_properties(${name} PROPERTIES FOLDER "Flang executables")
set_flang_windows_version_resource_properties(${name})
endmacro(add_flang_executable)
diff --git a/flang/docs/CMakeLists.txt b/flang/docs/CMakeLists.txt
index 3414b8e3acc4..3e4883e881ff 100644
--- a/flang/docs/CMakeLists.txt
+++ b/flang/docs/CMakeLists.txt
@@ -79,7 +79,7 @@ if (LLVM_ENABLE_DOXYGEN)
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating flang doxygen documentation." VERBATIM)
-
+ set_target_properties(doxygen-flang PROPERTIES FOLDER "Flang/Docs")
if (LLVM_BUILD_DOCS)
add_dependencies(doxygen doxygen-flang)
endif()
diff --git a/flang/docs/Extensions.md b/flang/docs/Extensions.md
index 43ed35e36a6e..7b872c786c82 100644
--- a/flang/docs/Extensions.md
+++ b/flang/docs/Extensions.md
@@ -223,6 +223,10 @@ end
* When a dummy argument is `POINTER` or `ALLOCATABLE` and is `INTENT(IN)`, we
relax enforcement of some requirements on actual arguments that must otherwise
hold true for definable arguments.
+* We allow a limited polymorphic `POINTER` or `ALLOCATABLE` actual argument
+ to be associated with a compatible monomorphic dummy argument, as
+ our implementation, like others, supports a reallocation that would
+ change the dynamic type
* Assignment of `LOGICAL` to `INTEGER` and vice versa (but not other types) is
allowed. The values are normalized to canonical `.TRUE.`/`.FALSE.`.
The values are also normalized for assignments of `LOGICAL(KIND=K1)` to
diff --git a/flang/include/flang/Common/Fortran-features.h b/flang/include/flang/Common/Fortran-features.h
index f57fcdc895ad..15c4af63f4be 100644
--- a/flang/include/flang/Common/Fortran-features.h
+++ b/flang/include/flang/Common/Fortran-features.h
@@ -49,7 +49,8 @@ ENUM_CLASS(LanguageFeature, BackslashEscapes, OldDebugLines,
IndistinguishableSpecifics, SubroutineAndFunctionSpecifics,
EmptySequenceType, NonSequenceCrayPointee, BranchIntoConstruct,
BadBranchTarget, ConvertedArgument, HollerithPolymorphic, ListDirectedSize,
- NonBindCInteroperability, CudaManaged, CudaUnified)
+ NonBindCInteroperability, CudaManaged, CudaUnified,
+ PolymorphicActualAllocatableOrPointerToMonomorphicDummy)
// Portability and suspicious usage warnings
ENUM_CLASS(UsageWarning, Portability, PointerToUndefinable,
diff --git a/flang/include/flang/Common/api-attrs.h b/flang/include/flang/Common/api-attrs.h
index 04ee307326ac..d73e60996bc8 100644
--- a/flang/include/flang/Common/api-attrs.h
+++ b/flang/include/flang/Common/api-attrs.h
@@ -156,4 +156,26 @@
#define RT_DIAG_DISABLE_CALL_HOST_FROM_DEVICE_WARN
#endif /* !defined(__CUDACC__) */
+/*
+ * RT_DEVICE_NOINLINE may be used for non-performance critical
+ * functions that should not be inlined to minimize the amount
+ * of code that needs to be processed by the device compiler's
+ * optimizer.
+ */
+#ifndef __has_attribute
+#define __has_attribute(x) 0
+#endif
+#if __has_attribute(noinline)
+#define RT_NOINLINE_ATTR __attribute__((noinline))
+#else
+#define RT_NOINLINE_ATTR
+#endif
+#if (defined(__CUDACC__) || defined(__CUDA__)) && defined(__CUDA_ARCH__)
+#define RT_DEVICE_NOINLINE RT_NOINLINE_ATTR
+#define RT_DEVICE_NOINLINE_HOST_INLINE
+#else
+#define RT_DEVICE_NOINLINE
+#define RT_DEVICE_NOINLINE_HOST_INLINE inline
+#endif
+
#endif /* !FORTRAN_RUNTIME_API_ATTRS_H_ */
diff --git a/flang/include/flang/Common/visit.h b/flang/include/flang/Common/visit.h
index d867338be7e0..ad66297650b0 100644
--- a/flang/include/flang/Common/visit.h
+++ b/flang/include/flang/Common/visit.h
@@ -30,7 +30,7 @@ namespace log2visit {
template <std::size_t LOW, std::size_t HIGH, typename RESULT, typename VISITOR,
typename... VARIANT>
-inline RT_API_ATTRS RESULT Log2VisitHelper(
+RT_DEVICE_NOINLINE_HOST_INLINE RT_API_ATTRS RESULT Log2VisitHelper(
VISITOR &&visitor, std::size_t which, VARIANT &&...u) {
if constexpr (LOW + 7 >= HIGH) {
switch (which - LOW) {
@@ -68,8 +68,9 @@ inline RT_API_ATTRS RESULT Log2VisitHelper(
}
template <typename VISITOR, typename... VARIANT>
-inline RT_API_ATTRS auto visit(VISITOR &&visitor, VARIANT &&...u)
- -> decltype(visitor(std::get<0>(std::forward<VARIANT>(u))...)) {
+RT_DEVICE_NOINLINE_HOST_INLINE RT_API_ATTRS auto
+visit(VISITOR &&visitor, VARIANT &&...u) -> decltype(visitor(std::get<0>(
+ std::forward<VARIANT>(u))...)) {
using Result = decltype(visitor(std::get<0>(std::forward<VARIANT>(u))...));
if constexpr (sizeof...(u) == 1) {
static constexpr std::size_t high{
diff --git a/flang/include/flang/Evaluate/characteristics.h b/flang/include/flang/Evaluate/characteristics.h
index 8aa065b025a4..9695c665d0cb 100644
--- a/flang/include/flang/Evaluate/characteristics.h
+++ b/flang/include/flang/Evaluate/characteristics.h
@@ -386,7 +386,7 @@ struct Procedure {
bool HasExplicitInterface() const {
return !attrs.test(Attr::ImplicitInterface);
}
- int FindPassIndex(std::optional<parser::CharBlock>) const;
+ std::optional<int> FindPassIndex(std::optional<parser::CharBlock>) const;
bool CanBeCalledViaImplicitInterface(std::string *whyNot = nullptr) const;
bool CanOverride(const Procedure &, std::optional<int> passIndex) const;
bool IsCompatibleWith(const Procedure &, bool ignoreImplicitVsExplicit,
diff --git a/flang/include/flang/Evaluate/constant.h b/flang/include/flang/Evaluate/constant.h
index 71be7906d2fe..d9866a08889f 100644
--- a/flang/include/flang/Evaluate/constant.h
+++ b/flang/include/flang/Evaluate/constant.h
@@ -126,8 +126,7 @@ public:
constexpr Result result() const { return result_; }
constexpr DynamicType GetType() const { return result_.GetType(); }
- llvm::raw_ostream &AsFortran(llvm::raw_ostream &,
- const parser::CharBlock *derivedTypeRename = nullptr) const;
+ llvm::raw_ostream &AsFortran(llvm::raw_ostream &) const;
protected:
std::vector<Element> Reshape(const ConstantSubscripts &) const;
diff --git a/flang/include/flang/Evaluate/expression.h b/flang/include/flang/Evaluate/expression.h
index 64db0b88d03e..642ddf511684 100644
--- a/flang/include/flang/Evaluate/expression.h
+++ b/flang/include/flang/Evaluate/expression.h
@@ -735,8 +735,7 @@ public:
StructureConstructor &Add(const semantics::Symbol &, Expr<SomeType> &&);
int Rank() const { return 0; }
DynamicType GetType() const;
- llvm::raw_ostream &AsFortran(llvm::raw_ostream &,
- const parser::CharBlock *derivedTypeRename = nullptr) const;
+ llvm::raw_ostream &AsFortran(llvm::raw_ostream &) const;
private:
std::optional<Expr<SomeType>> CreateParentComponent(const Symbol &) const;
diff --git a/flang/include/flang/Evaluate/type.h b/flang/include/flang/Evaluate/type.h
index 93a0f21fa914..de19e3d04dea 100644
--- a/flang/include/flang/Evaluate/type.h
+++ b/flang/include/flang/Evaluate/type.h
@@ -272,9 +272,6 @@ const semantics::DerivedTypeSpec *GetDerivedTypeSpec(
const semantics::DerivedTypeSpec *GetParentTypeSpec(
const semantics::DerivedTypeSpec &);
-std::string DerivedTypeSpecAsFortran(const semantics::DerivedTypeSpec &,
- const parser::CharBlock *derivedTypeRename = nullptr);
-
template <TypeCategory CATEGORY, int KIND = 0> struct TypeBase {
static constexpr TypeCategory category{CATEGORY};
static constexpr int kind{KIND};
diff --git a/flang/include/flang/Optimizer/Builder/FIRBuilder.h b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
index 287730ef2ac8..f9ef8b756629 100644
--- a/flang/include/flang/Optimizer/Builder/FIRBuilder.h
+++ b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
@@ -50,8 +50,10 @@ public:
mlir::SymbolTable *symbolTable = nullptr)
: OpBuilder{op, /*listener=*/this}, kindMap{std::move(kindMap)},
symbolTable{symbolTable} {}
- explicit FirOpBuilder(mlir::OpBuilder &builder, fir::KindMapping kindMap)
- : OpBuilder(builder), OpBuilder::Listener(), kindMap{std::move(kindMap)} {
+ explicit FirOpBuilder(mlir::OpBuilder &builder, fir::KindMapping kindMap,
+ mlir::SymbolTable *symbolTable = nullptr)
+ : OpBuilder(builder), OpBuilder::Listener(), kindMap{std::move(kindMap)},
+ symbolTable{symbolTable} {
setListener(this);
}
explicit FirOpBuilder(mlir::OpBuilder &builder, mlir::ModuleOp mod)
diff --git a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h
index 977a69af5281..357df3b6df50 100644
--- a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h
+++ b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h
@@ -333,7 +333,10 @@ struct IntrinsicLibrary {
llvm::ArrayRef<fir::ExtendedValue>);
mlir::Value genScale(mlir::Type, llvm::ArrayRef<mlir::Value>);
fir::ExtendedValue genScan(mlir::Type, llvm::ArrayRef<fir::ExtendedValue>);
+ fir::ExtendedValue genSelectedCharKind(mlir::Type,
+ llvm::ArrayRef<fir::ExtendedValue>);
mlir::Value genSelectedIntKind(mlir::Type, llvm::ArrayRef<mlir::Value>);
+ mlir::Value genSelectedLogicalKind(mlir::Type, llvm::ArrayRef<mlir::Value>);
mlir::Value genSelectedRealKind(mlir::Type, llvm::ArrayRef<mlir::Value>);
mlir::Value genSetExponent(mlir::Type resultType,
llvm::ArrayRef<mlir::Value> args);
diff --git a/flang/include/flang/Optimizer/Builder/Runtime/Numeric.h b/flang/include/flang/Optimizer/Builder/Runtime/Numeric.h
index fec8c9906eff..558358257b51 100644
--- a/flang/include/flang/Optimizer/Builder/Runtime/Numeric.h
+++ b/flang/include/flang/Optimizer/Builder/Runtime/Numeric.h
@@ -46,10 +46,18 @@ mlir::Value genRRSpacing(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value genScale(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value x, mlir::Value i);
+/// Generate call to Selected_char_kind intrinsic runtime routine.
+mlir::Value genSelectedCharKind(fir::FirOpBuilder &builder, mlir::Location loc,
+ mlir::Value name, mlir::Value length);
+
/// Generate call to Selected_int_kind intrinsic runtime routine.
mlir::Value genSelectedIntKind(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value x);
+/// Generate call to Selected_logical_kind intrinsic runtime routine.
+mlir::Value genSelectedLogicalKind(fir::FirOpBuilder &builder,
+ mlir::Location loc, mlir::Value x);
+
/// Generate call to Selected_real_kind intrinsic runtime routine.
mlir::Value genSelectedRealKind(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value precision, mlir::Value range,
diff --git a/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h b/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h
index c9884ef7df8b..575746374fcc 100644
--- a/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h
+++ b/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h
@@ -130,6 +130,12 @@ constexpr TypeBuilderFunc getModel<signed char>() {
};
}
template <>
+constexpr TypeBuilderFunc getModel<unsigned char>() {
+ return [](mlir::MLIRContext *context) -> mlir::Type {
+ return mlir::IntegerType::get(context, 8 * sizeof(unsigned char));
+ };
+}
+template <>
constexpr TypeBuilderFunc getModel<void *>() {
return [](mlir::MLIRContext *context) -> mlir::Type {
return fir::LLVMPointerType::get(context,
diff --git a/flang/include/flang/Optimizer/Builder/Runtime/Support.h b/flang/include/flang/Optimizer/Builder/Runtime/Support.h
new file mode 100644
index 000000000000..fe263ca2975e
--- /dev/null
+++ b/flang/include/flang/Optimizer/Builder/Runtime/Support.h
@@ -0,0 +1,31 @@
+//===-- Support.h - generate support runtime API calls ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FORTRAN_OPTIMIZER_BUILDER_RUNTIME_SUPPORT_H
+#define FORTRAN_OPTIMIZER_BUILDER_RUNTIME_SUPPORT_H
+
+namespace mlir {
+class Value;
+class Location;
+} // namespace mlir
+
+namespace fir {
+class FirOpBuilder;
+}
+
+namespace fir::runtime {
+
+/// Generate call to `CopyAndUpdateDescriptor` runtime routine.
+void genCopyAndUpdateDescriptor(fir::FirOpBuilder &builder, mlir::Location loc,
+ mlir::Value to, mlir::Value from,
+ mlir::Value newDynamicType,
+ mlir::Value newAttribute,
+ mlir::Value newLowerBounds);
+
+} // namespace fir::runtime
+#endif // FORTRAN_OPTIMIZER_BUILDER_RUNTIME_SUPPORT_H
diff --git a/flang/include/flang/Optimizer/Dialect/CMakeLists.txt b/flang/include/flang/Optimizer/Dialect/CMakeLists.txt
index 301a93c1fe5b..10ab213b30b0 100644
--- a/flang/include/flang/Optimizer/Dialect/CMakeLists.txt
+++ b/flang/include/flang/Optimizer/Dialect/CMakeLists.txt
@@ -33,6 +33,7 @@ mlir_tablegen(CanonicalizationPatterns.inc -gen-rewriters)
add_public_tablegen_target(CanonicalizationPatternsIncGen)
add_custom_target(flang-doc)
+set_target_properties(flang-doc PROPERTIES FOLDER "Flang/Docs")
set(dialect_doc_filename "FIRLangRef")
set(LLVM_TARGET_DEFINITIONS FIROps.td)
@@ -45,4 +46,5 @@ add_custom_command(
${GEN_DOC_FILE}
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${dialect_doc_filename}.md)
add_custom_target(${dialect_doc_filename}DocGen DEPENDS ${GEN_DOC_FILE})
+set_target_properties(${dialect_doc_filename}DocGen PROPERTIES FOLDER "Flang/Tablegenning/Docs")
add_dependencies(flang-doc ${dialect_doc_filename}DocGen)
diff --git a/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td b/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td
index 72157bce4f76..37b8da018195 100644
--- a/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td
+++ b/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td
@@ -152,15 +152,21 @@ def cuf_DataTransferOp : cuf_Op<"data_transfer", []> {
a = adev ! transfer device to host
bdev = adev ! transfer device to device
```
+
+ When the data transfer is done on data hold by descriptors, the LHS data
+ hold by the descriptor are updated. When required, the LHS decriptor is also
+ updated.
}];
- let arguments = (ins Arg<AnyReferenceLike, "", [MemWrite]>:$src,
- Arg<AnyReferenceLike, "", [MemRead]>:$dst,
+ let arguments = (ins Arg<AnyType, "", [MemRead]>:$src,
+ Arg<AnyRefOrBoxType, "", [MemWrite]>:$dst,
cuf_DataTransferKindAttr:$transfer_kind);
let assemblyFormat = [{
$src `to` $dst attr-dict `:` type(operands)
}];
+
+ let hasVerifier = 1;
}
def cuf_KernelLaunchOp : cuf_Op<"kernel_launch", [CallOpInterface,
diff --git a/flang/include/flang/Optimizer/Dialect/FIRAttr.td b/flang/include/flang/Optimizer/Dialect/FIRAttr.td
index 989319ff3dda..0c34b640a5c9 100644
--- a/flang/include/flang/Optimizer/Dialect/FIRAttr.td
+++ b/flang/include/flang/Optimizer/Dialect/FIRAttr.td
@@ -70,4 +70,15 @@ def fir_BoxFieldAttr : I32EnumAttr<
// mlir::SideEffects::Resource for modelling operations which add debugging information
def DebuggingResource : Resource<"::fir::DebuggingResource">;
+def fir_LowerBoundModifierAttribute : I32EnumAttr<
+ "LowerBoundModifierAttribute",
+ "Describes how to modify lower bounds",
+ [
+ I32EnumAttrCase<"Preserve", 0, "preserve">,
+ I32EnumAttrCase<"SetToOnes", 1, "ones">,
+ I32EnumAttrCase<"SetToZeroes", 2, "zeroes">,
+ ]> {
+ let cppNamespace = "::fir";
+}
+
#endif // FIR_DIALECT_FIR_ATTRS
diff --git a/flang/include/flang/Optimizer/Dialect/FIROps.td b/flang/include/flang/Optimizer/Dialect/FIROps.td
index d9c114904006..584b7e82bf27 100644
--- a/flang/include/flang/Optimizer/Dialect/FIROps.td
+++ b/flang/include/flang/Optimizer/Dialect/FIROps.td
@@ -857,6 +857,43 @@ def fir_ReboxOp : fir_Op<"rebox", [NoMemoryEffect, AttrSizedOperandSegments]> {
let hasVerifier = 1;
}
+def fir_ReboxAssumedRankOp : fir_Op<"rebox_assumed_rank",
+ [DeclareOpInterfaceMethods<MemoryEffectsOpInterface>]> {
+ let summary = "create an assumed-rank box given another assumed-rank box";
+
+ let description = [{
+ Limited version of fir.rebox for assumed-rank. Only the lower bounds,
+ attribute, and element type may change.
+
+ The input may be a box or a reference to a box, in which case the operation
+ reads the incoming reference.
+ Since a fir.shift cannot be built without knowing the rank statically,
+ lower bound changes are encoded via a LowerBoundModifierAttribute.
+ Attribute and element type change are encoded in the result type.
+ Changing the element type is only allowed if the input type is a derived
+ type that extends the output element type.
+
+ Example:
+ ```
+ fir.rebox_assumed_rank %1 lbs zeroes : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:f32>>
+ ```
+ }];
+
+ let arguments = (ins
+ AnyRefOrBoxType:$box,
+ fir_LowerBoundModifierAttribute:$lbs_modifier
+ );
+
+ let results = (outs BoxOrClassType);
+
+ let assemblyFormat = [{
+ $box `lbs` $lbs_modifier
+ attr-dict `:` functional-type(operands, results)
+ }];
+
+ let hasVerifier = 1;
+}
+
def fir_EmboxCharOp : fir_Op<"emboxchar", [NoMemoryEffect]> {
let summary = "boxes a given CHARACTER reference and its LEN parameter";
diff --git a/flang/include/flang/Optimizer/Dialect/FIRType.h b/flang/include/flang/Optimizer/Dialect/FIRType.h
index b4344435db9f..0aeb29a93d71 100644
--- a/flang/include/flang/Optimizer/Dialect/FIRType.h
+++ b/flang/include/flang/Optimizer/Dialect/FIRType.h
@@ -53,6 +53,7 @@ public:
/// Return the same type, except for the shape, that is taken the shape
/// of shapeMold.
BaseBoxType getBoxTypeWithNewShape(mlir::Type shapeMold) const;
+ BaseBoxType getBoxTypeWithNewShape(int rank) const;
/// Methods for support type inquiry through isa, cast, and dyn_cast.
static bool classof(mlir::Type type);
diff --git a/flang/include/flang/Optimizer/HLFIR/Passes.h b/flang/include/flang/Optimizer/HLFIR/Passes.h
index 3314e0b887f6..4fa619cd53ca 100644
--- a/flang/include/flang/Optimizer/HLFIR/Passes.h
+++ b/flang/include/flang/Optimizer/HLFIR/Passes.h
@@ -24,11 +24,6 @@ namespace hlfir {
std::unique_ptr<mlir::Pass> createConvertHLFIRtoFIRPass();
std::unique_ptr<mlir::Pass> createBufferizeHLFIRPass();
-std::unique_ptr<mlir::Pass> createLowerHLFIRIntrinsicsPass();
-std::unique_ptr<mlir::Pass> createSimplifyHLFIRIntrinsicsPass();
-std::unique_ptr<mlir::Pass> createInlineElementalsPass();
-std::unique_ptr<mlir::Pass> createLowerHLFIROrderedAssignmentsPass();
-std::unique_ptr<mlir::Pass> createOptimizedBufferizationPass();
#define GEN_PASS_REGISTRATION
#include "flang/Optimizer/HLFIR/Passes.h.inc"
diff --git a/flang/include/flang/Optimizer/HLFIR/Passes.td b/flang/include/flang/Optimizer/HLFIR/Passes.td
index dae96b3f767e..fc3d2a0d4681 100644
--- a/flang/include/flang/Optimizer/HLFIR/Passes.td
+++ b/flang/include/flang/Optimizer/HLFIR/Passes.td
@@ -23,19 +23,16 @@ def BufferizeHLFIR : Pass<"bufferize-hlfir", "::mlir::ModuleOp"> {
let constructor = "hlfir::createBufferizeHLFIRPass()";
}
-def OptimizedBufferization : Pass<"opt-bufferization", "::mlir::func::FuncOp"> {
+def OptimizedBufferization : Pass<"opt-bufferization"> {
let summary = "Special cases for hlfir.expr bufferization where we can avoid a temporary which would be created by the generic bufferization pass";
- let constructor = "hlfir::createOptimizedBufferizationPass()";
}
def LowerHLFIRIntrinsics : Pass<"lower-hlfir-intrinsics", "::mlir::ModuleOp"> {
let summary = "Lower HLFIR transformational intrinsic operations";
- let constructor = "hlfir::createLowerHLFIRIntrinsicsPass()";
}
def LowerHLFIROrderedAssignments : Pass<"lower-hlfir-ordered-assignments", "::mlir::ModuleOp"> {
let summary = "Lower HLFIR ordered assignments like forall and where operations";
- let constructor = "hlfir::createLowerHLFIROrderedAssignmentsPass()";
let options = [
Option<"tryFusingAssignments", "fuse-assignments",
"bool", /*default=*/"false",
@@ -46,14 +43,12 @@ def LowerHLFIROrderedAssignments : Pass<"lower-hlfir-ordered-assignments", "::ml
];
}
-def SimplifyHLFIRIntrinsics : Pass<"simplify-hlfir-intrinsics", "::mlir::func::FuncOp"> {
+def SimplifyHLFIRIntrinsics : Pass<"simplify-hlfir-intrinsics"> {
let summary = "Simplify HLFIR intrinsic operations that don't need to result in runtime calls";
- let constructor = "hlfir::createSimplifyHLFIRIntrinsicsPass()";
}
-def InlineElementals : Pass<"inline-elementals", "::mlir::func::FuncOp"> {
+def InlineElementals : Pass<"inline-elementals"> {
let summary = "Inline chained hlfir.elemental operations";
- let constructor = "hlfir::createInlineElementalsPass()";
}
#endif //FORTRAN_DIALECT_HLFIR_PASSES
diff --git a/flang/include/flang/Optimizer/Transforms/Passes.h b/flang/include/flang/Optimizer/Transforms/Passes.h
index e40e2faed533..ebdd60630c33 100644
--- a/flang/include/flang/Optimizer/Transforms/Passes.h
+++ b/flang/include/flang/Optimizer/Transforms/Passes.h
@@ -36,6 +36,7 @@ namespace fir {
#define GEN_PASS_DECL_AFFINEDIALECTDEMOTION
#define GEN_PASS_DECL_ANNOTATECONSTANTOPERANDS
#define GEN_PASS_DECL_ARRAYVALUECOPY
+#define GEN_PASS_DECL_ASSUMEDRANKOPCONVERSION
#define GEN_PASS_DECL_CHARACTERCONVERSION
#define GEN_PASS_DECL_CFGCONVERSION
#define GEN_PASS_DECL_EXTERNALNAMECONVERSION
diff --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td
index 28420a8b3f70..f494da555f5a 100644
--- a/flang/include/flang/Optimizer/Transforms/Passes.td
+++ b/flang/include/flang/Optimizer/Transforms/Passes.td
@@ -402,4 +402,16 @@ def FunctionAttr : Pass<"function-attr", "mlir::func::FuncOp"> {
let constructor = "::fir::createFunctionAttrPass()";
}
+def AssumedRankOpConversion : Pass<"fir-assumed-rank-op", "mlir::ModuleOp"> {
+ let summary =
+ "Simplify operations on assumed-rank types";
+ let description = [{
+ This pass breaks up the lowering of operations on assumed-rank types by
+ introducing an intermediate FIR level that simplifies code generation.
+ }];
+ let dependentDialects = [
+ "fir::FIROpsDialect", "mlir::func::FuncDialect"
+ ];
+}
+
#endif // FLANG_OPTIMIZER_TRANSFORMS_PASSES
diff --git a/flang/include/flang/Runtime/support.h b/flang/include/flang/Runtime/support.h
index e7ae2154b2a7..8bdf3b9fca83 100644
--- a/flang/include/flang/Runtime/support.h
+++ b/flang/include/flang/Runtime/support.h
@@ -10,6 +10,7 @@
#ifndef FORTRAN_RUNTIME_SUPPORT_H_
#define FORTRAN_RUNTIME_SUPPORT_H_
+#include "flang/ISO_Fortran_binding_wrapper.h"
#include "flang/Runtime/entry-names.h"
#include <cstddef>
#include <cstdint>
@@ -18,11 +19,29 @@ namespace Fortran::runtime {
class Descriptor;
+namespace typeInfo {
+class DerivedType;
+}
+
+enum class LowerBoundModifier : int {
+ Preserve = 0,
+ SetToOnes = 1,
+ SetToZeroes = 2
+};
+
extern "C" {
// Predicate: is the storage described by a Descriptor contiguous in memory?
bool RTDECL(IsContiguous)(const Descriptor &);
+// Copy "from" descriptor into "to" descriptor and update "to" dynamic type,
+// CFI_attribute, and lower bounds according to the other arguments.
+// "newDynamicType" may be a null pointer in which case "to" dynamic type is the
+// one of "from".
+void RTDECL(CopyAndUpdateDescriptor)(Descriptor &to, const Descriptor &from,
+ const typeInfo::DerivedType *newDynamicType,
+ ISO::CFI_attribute_t newAttribute, enum LowerBoundModifier newLowerBounds);
+
} // extern "C"
} // namespace Fortran::runtime
#endif // FORTRAN_RUNTIME_SUPPORT_H_
diff --git a/flang/include/flang/Semantics/openmp-directive-sets.h b/flang/include/flang/Semantics/openmp-directive-sets.h
index 842d251b682a..da66e0eda321 100644
--- a/flang/include/flang/Semantics/openmp-directive-sets.h
+++ b/flang/include/flang/Semantics/openmp-directive-sets.h
@@ -205,9 +205,11 @@ static const OmpDirectiveSet compositeConstructSet{
};
static const OmpDirectiveSet blockConstructSet{
+ Directive::OMPD_masked,
Directive::OMPD_master,
Directive::OMPD_ordered,
Directive::OMPD_parallel,
+ Directive::OMPD_parallel_masked,
Directive::OMPD_parallel_workshare,
Directive::OMPD_single,
Directive::OMPD_target,
diff --git a/flang/include/flang/Semantics/scope.h b/flang/include/flang/Semantics/scope.h
index 21072772d184..a58163f5460c 100644
--- a/flang/include/flang/Semantics/scope.h
+++ b/flang/include/flang/Semantics/scope.h
@@ -225,6 +225,7 @@ public:
ImportKind GetImportKind() const;
// Names appearing in IMPORT statements in this scope
std::set<SourceName> importNames() const { return importNames_; }
+ bool CanImport(const SourceName &) const;
// Set the kind of imports from host into this scope.
// Return an error message for incompatible kinds.
@@ -298,7 +299,6 @@ private:
// or Symbol& points to one in there.
static Symbols<1024> allSymbols;
- bool CanImport(const SourceName &) const;
const DeclTypeSpec &MakeLengthlessType(DeclTypeSpec &&);
friend llvm::raw_ostream &operator<<(llvm::raw_ostream &, const Scope &);
diff --git a/flang/include/flang/Semantics/semantics.h b/flang/include/flang/Semantics/semantics.h
index 167e61381639..d382663762bc 100644
--- a/flang/include/flang/Semantics/semantics.h
+++ b/flang/include/flang/Semantics/semantics.h
@@ -110,6 +110,9 @@ public:
evaluate::FoldingContext &foldingContext() { return foldingContext_; }
parser::AllCookedSources &allCookedSources() { return allCookedSources_; }
ModuleDependences &moduleDependences() { return moduleDependences_; }
+ std::map<const Symbol *, SourceName> &moduleFileOutputRenamings() {
+ return moduleFileOutputRenamings_;
+ }
SemanticsContext &set_location(
const std::optional<parser::CharBlock> &location) {
@@ -299,6 +302,7 @@ private:
std::list<parser::Program> modFileParseTrees_;
std::unique_ptr<CommonBlockMap> commonBlockMap_;
ModuleDependences moduleDependences_;
+ std::map<const Symbol *, SourceName> moduleFileOutputRenamings_;
};
class Semantics {
diff --git a/flang/include/flang/Semantics/symbol.h b/flang/include/flang/Semantics/symbol.h
index 50f7b68d80cb..f130036d949d 100644
--- a/flang/include/flang/Semantics/symbol.h
+++ b/flang/include/flang/Semantics/symbol.h
@@ -815,6 +815,7 @@ public:
void SetIsExplicitBindName(bool);
bool IsFuncResult() const;
bool IsObjectArray() const;
+ const ArraySpec *GetShape() const;
bool IsSubprogram() const;
bool IsFromModFile() const;
bool HasExplicitInterface() const {
diff --git a/flang/include/flang/Tools/CLOptions.inc b/flang/include/flang/Tools/CLOptions.inc
index 61e591f2086d..a215488ebd0f 100644
--- a/flang/include/flang/Tools/CLOptions.inc
+++ b/flang/include/flang/Tools/CLOptions.inc
@@ -292,6 +292,7 @@ inline void createDefaultFIROptimizerPassPipeline(
// Polymorphic types
pm.addPass(fir::createPolymorphicOpConversion());
+ pm.addPass(fir::createAssumedRankOpConversion());
if (pc.AliasAnalysis && !disableFirAliasTags && !useOldAliasTags)
pm.addPass(fir::createAddAliasTags());
@@ -317,16 +318,18 @@ inline void createHLFIRToFIRPassPipeline(
mlir::PassManager &pm, llvm::OptimizationLevel optLevel = defaultOptLevel) {
if (optLevel.isOptimizingForSpeed()) {
addCanonicalizerPassWithoutRegionSimplification(pm);
- pm.addPass(hlfir::createSimplifyHLFIRIntrinsicsPass());
+ addNestedPassToAllTopLevelOperations(
+ pm, hlfir::createSimplifyHLFIRIntrinsics);
}
- pm.addPass(hlfir::createInlineElementalsPass());
+ addNestedPassToAllTopLevelOperations(pm, hlfir::createInlineElementals);
if (optLevel.isOptimizingForSpeed()) {
addCanonicalizerPassWithoutRegionSimplification(pm);
pm.addPass(mlir::createCSEPass());
- pm.addPass(hlfir::createOptimizedBufferizationPass());
+ addNestedPassToAllTopLevelOperations(
+ pm, hlfir::createOptimizedBufferization);
}
- pm.addPass(hlfir::createLowerHLFIROrderedAssignmentsPass());
- pm.addPass(hlfir::createLowerHLFIRIntrinsicsPass());
+ pm.addPass(hlfir::createLowerHLFIROrderedAssignments());
+ pm.addPass(hlfir::createLowerHLFIRIntrinsics());
pm.addPass(hlfir::createBufferizeHLFIRPass());
pm.addPass(hlfir::createConvertHLFIRtoFIRPass());
}
diff --git a/flang/lib/Evaluate/characteristics.cpp b/flang/lib/Evaluate/characteristics.cpp
index ab03ca5ed2d5..a0ce190b90e9 100644
--- a/flang/lib/Evaluate/characteristics.cpp
+++ b/flang/lib/Evaluate/characteristics.cpp
@@ -1333,16 +1333,21 @@ bool Procedure::IsCompatibleWith(const Procedure &actual,
return false;
}
-int Procedure::FindPassIndex(std::optional<parser::CharBlock> name) const {
+std::optional<int> Procedure::FindPassIndex(
+ std::optional<parser::CharBlock> name) const {
int argCount{static_cast<int>(dummyArguments.size())};
- int index{0};
if (name) {
- while (index < argCount && *name != dummyArguments[index].name.c_str()) {
- ++index;
+ for (int index{0}; index < argCount; ++index) {
+ if (*name == dummyArguments[index].name.c_str()) {
+ return index;
+ }
}
+ return std::nullopt;
+ } else if (argCount > 0) {
+ return 0;
+ } else {
+ return std::nullopt;
}
- CHECK(index < argCount);
- return index;
}
bool Procedure::CanOverride(
diff --git a/flang/lib/Evaluate/formatting.cpp b/flang/lib/Evaluate/formatting.cpp
index 20193b006bf2..0870d56549f7 100644
--- a/flang/lib/Evaluate/formatting.cpp
+++ b/flang/lib/Evaluate/formatting.cpp
@@ -14,6 +14,7 @@
#include "flang/Evaluate/fold.h"
#include "flang/Evaluate/tools.h"
#include "flang/Parser/characters.h"
+#include "flang/Semantics/semantics.h"
#include "flang/Semantics/symbol.h"
#include "llvm/Support/raw_ostream.h"
@@ -53,7 +54,7 @@ static void ShapeAsFortran(llvm::raw_ostream &o,
template <typename RESULT, typename VALUE>
llvm::raw_ostream &ConstantBase<RESULT, VALUE>::AsFortran(
- llvm::raw_ostream &o, const parser::CharBlock *derivedTypeRename) const {
+ llvm::raw_ostream &o) const {
bool hasNonDefaultLowerBound{printLbounds && HasNonDefaultLowerBound()};
if (Rank() > 1 || hasNonDefaultLowerBound) {
o << "reshape(";
@@ -85,8 +86,7 @@ llvm::raw_ostream &ConstantBase<RESULT, VALUE>::AsFortran(
o << ".false." << '_' << Result::kind;
}
} else {
- StructureConstructor{result_.derivedTypeSpec(), value}.AsFortran(
- o, derivedTypeRename);
+ StructureConstructor{result_.derivedTypeSpec(), value}.AsFortran(o);
}
}
if (Rank() > 0) {
@@ -124,9 +124,89 @@ llvm::raw_ostream &Constant<Type<TypeCategory::Character, KIND>>::AsFortran(
return o;
}
+llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const Symbol &symbol,
+ std::optional<parser::CharBlock> name = std::nullopt) {
+ const auto &renamings{symbol.owner().context().moduleFileOutputRenamings()};
+ if (auto iter{renamings.find(&symbol)}; iter != renamings.end()) {
+ return o << iter->second.ToString();
+ } else if (name) {
+ return o << name->ToString();
+ } else {
+ return o << symbol.name().ToString();
+ }
+}
+
+llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const std::string &lit) {
+ return o << parser::QuoteCharacterLiteral(lit);
+}
+
+llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const std::u16string &lit) {
+ return o << parser::QuoteCharacterLiteral(lit);
+}
+
+llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const std::u32string &lit) {
+ return o << parser::QuoteCharacterLiteral(lit);
+}
+
+template <typename A>
+llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const A &x) {
+ return x.AsFortran(o);
+}
+
+template <typename A>
+llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, common::Reference<A> x) {
+ return EmitVar(o, *x);
+}
+
+template <typename A>
+llvm::raw_ostream &EmitVar(
+ llvm::raw_ostream &o, const A *p, const char *kw = nullptr) {
+ if (p) {
+ if (kw) {
+ o << kw;
+ }
+ EmitVar(o, *p);
+ }
+ return o;
+}
+
+template <typename A>
+llvm::raw_ostream &EmitVar(
+ llvm::raw_ostream &o, const std::optional<A> &x, const char *kw = nullptr) {
+ if (x) {
+ if (kw) {
+ o << kw;
+ }
+ EmitVar(o, *x);
+ }
+ return o;
+}
+
+template <typename A, bool COPY>
+llvm::raw_ostream &EmitVar(llvm::raw_ostream &o,
+ const common::Indirection<A, COPY> &p, const char *kw = nullptr) {
+ if (kw) {
+ o << kw;
+ }
+ EmitVar(o, p.value());
+ return o;
+}
+
+template <typename A>
+llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const std::shared_ptr<A> &p) {
+ CHECK(p);
+ return EmitVar(o, *p);
+}
+
+template <typename... A>
+llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const std::variant<A...> &u) {
+ common::visit([&](const auto &x) { EmitVar(o, x); }, u);
+ return o;
+}
+
llvm::raw_ostream &ActualArgument::AssumedType::AsFortran(
llvm::raw_ostream &o) const {
- return o << symbol_->name().ToString();
+ return EmitVar(o, *symbol_);
}
llvm::raw_ostream &ActualArgument::AsFortran(llvm::raw_ostream &o) const {
@@ -504,15 +584,37 @@ llvm::raw_ostream &ExpressionBase<RESULT>::AsFortran(
return o;
}
-llvm::raw_ostream &StructureConstructor::AsFortran(
- llvm::raw_ostream &o, const parser::CharBlock *derivedTypeRename) const {
- o << DerivedTypeSpecAsFortran(result_.derivedTypeSpec(), derivedTypeRename);
+static std::string DerivedTypeSpecAsFortran(
+ const semantics::DerivedTypeSpec &spec) {
+ std::string buf;
+ llvm::raw_string_ostream ss{buf};
+ EmitVar(ss, spec.typeSymbol(), spec.name());
+ char ch{'('};
+ for (const auto &[name, value] : spec.parameters()) {
+ ss << ch << name.ToString() << '=';
+ ch = ',';
+ if (value.isAssumed()) {
+ ss << '*';
+ } else if (value.isDeferred()) {
+ ss << ':';
+ } else {
+ value.GetExplicit()->AsFortran(ss);
+ }
+ }
+ if (ch != '(') {
+ ss << ')';
+ }
+ return ss.str();
+}
+
+llvm::raw_ostream &StructureConstructor::AsFortran(llvm::raw_ostream &o) const {
+ o << DerivedTypeSpecAsFortran(result_.derivedTypeSpec());
if (values_.empty()) {
o << '(';
} else {
char ch{'('};
for (const auto &[symbol, value] : values_) {
- value.value().AsFortran(o << ch << symbol->name().ToString() << '=');
+ value.value().AsFortran(EmitVar(o << ch, *symbol) << '=');
ch = ',';
}
}
@@ -568,101 +670,6 @@ std::string SomeDerived::AsFortran() const {
}
}
-std::string DerivedTypeSpecAsFortran(const semantics::DerivedTypeSpec &spec,
- const parser::CharBlock *derivedTypeRename) {
- std::string buf;
- llvm::raw_string_ostream ss{buf};
- ss << (derivedTypeRename ? *derivedTypeRename : spec.name()).ToString();
- char ch{'('};
- for (const auto &[name, value] : spec.parameters()) {
- ss << ch << name.ToString() << '=';
- ch = ',';
- if (value.isAssumed()) {
- ss << '*';
- } else if (value.isDeferred()) {
- ss << ':';
- } else {
- value.GetExplicit()->AsFortran(ss);
- }
- }
- if (ch != '(') {
- ss << ')';
- }
- return ss.str();
-}
-
-llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const Symbol &symbol) {
- return o << symbol.name().ToString();
-}
-
-llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const std::string &lit) {
- return o << parser::QuoteCharacterLiteral(lit);
-}
-
-llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const std::u16string &lit) {
- return o << parser::QuoteCharacterLiteral(lit);
-}
-
-llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const std::u32string &lit) {
- return o << parser::QuoteCharacterLiteral(lit);
-}
-
-template <typename A>
-llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const A &x) {
- return x.AsFortran(o);
-}
-
-template <typename A>
-llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, common::Reference<A> x) {
- return EmitVar(o, *x);
-}
-
-template <typename A>
-llvm::raw_ostream &EmitVar(
- llvm::raw_ostream &o, const A *p, const char *kw = nullptr) {
- if (p) {
- if (kw) {
- o << kw;
- }
- EmitVar(o, *p);
- }
- return o;
-}
-
-template <typename A>
-llvm::raw_ostream &EmitVar(
- llvm::raw_ostream &o, const std::optional<A> &x, const char *kw = nullptr) {
- if (x) {
- if (kw) {
- o << kw;
- }
- EmitVar(o, *x);
- }
- return o;
-}
-
-template <typename A, bool COPY>
-llvm::raw_ostream &EmitVar(llvm::raw_ostream &o,
- const common::Indirection<A, COPY> &p, const char *kw = nullptr) {
- if (kw) {
- o << kw;
- }
- EmitVar(o, p.value());
- return o;
-}
-
-template <typename A>
-llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const std::shared_ptr<A> &p) {
- CHECK(p);
- return EmitVar(o, *p);
-}
-
-template <typename... A>
-llvm::raw_ostream &EmitVar(llvm::raw_ostream &o, const std::variant<A...> &u) {
- common::visit([&](const auto &x) { EmitVar(o, x); }, u);
- return o;
-}
-
llvm::raw_ostream &BaseObject::AsFortran(llvm::raw_ostream &o) const {
return EmitVar(o, u);
}
diff --git a/flang/lib/Evaluate/shape.cpp b/flang/lib/Evaluate/shape.cpp
index 6246cb931ff9..5cf48b240eca 100644
--- a/flang/lib/Evaluate/shape.cpp
+++ b/flang/lib/Evaluate/shape.cpp
@@ -885,8 +885,12 @@ auto GetShapeHelper::operator()(const ProcedureRef &call) const -> Result {
intrinsic->name == "ubound") {
// For LBOUND/UBOUND, these are the array-valued cases (no DIM=)
if (!call.arguments().empty() && call.arguments().front()) {
- return Shape{
- MaybeExtentExpr{ExtentExpr{call.arguments().front()->Rank()}}};
+ if (IsAssumedRank(*call.arguments().front())) {
+ return Shape{MaybeExtentExpr{}};
+ } else {
+ return Shape{
+ MaybeExtentExpr{ExtentExpr{call.arguments().front()->Rank()}}};
+ }
}
} else if (intrinsic->name == "all" || intrinsic->name == "any" ||
intrinsic->name == "count" || intrinsic->name == "iall" ||
diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index 7ded9adcd5c2..63ef60710ddf 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -57,6 +57,7 @@
#include "flang/Semantics/symbol.h"
#include "flang/Semantics/tools.h"
#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h"
+#include "mlir/IR/Matchers.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Transforms/RegionUtils.h"
@@ -1303,6 +1304,43 @@ private:
genBranch(targetEval.block);
}
+ /// A construct contains nested evaluations. Some of these evaluations
+ /// may start a new basic block, others will add code to an existing
+ /// block.
+ /// Collect the list of nested evaluations that are last in their block,
+ /// organize them into two sets:
+ /// 1. Exiting evaluations: they may need a branch exiting from their
+ /// parent construct,
+ /// 2. Fall-through evaluations: they will continue to the following
+ /// evaluation. They may still need a branch, but they do not exit
+ /// the construct. They appear in cases where the following evaluation
+ /// is a target of some branch.
+ void collectFinalEvaluations(
+ Fortran::lower::pft::Evaluation &construct,
+ llvm::SmallVector<Fortran::lower::pft::Evaluation *> &exits,
+ llvm::SmallVector<Fortran::lower::pft::Evaluation *> &fallThroughs) {
+ Fortran::lower::pft::EvaluationList &nested =
+ construct.getNestedEvaluations();
+ if (nested.empty())
+ return;
+
+ Fortran::lower::pft::Evaluation *exit = construct.constructExit;
+ Fortran::lower::pft::Evaluation *previous = &nested.front();
+
+ for (auto it = ++nested.begin(), end = nested.end(); it != end;
+ previous = &*it++) {
+ if (it->block == nullptr)
+ continue;
+ // "*it" starts a new block, check what to do with "previous"
+ if (it->isIntermediateConstructStmt() && previous != exit)
+ exits.push_back(previous);
+ else if (previous->lexicalSuccessor && previous->lexicalSuccessor->block)
+ fallThroughs.push_back(previous);
+ }
+ if (previous != exit)
+ exits.push_back(previous);
+ }
+
/// Generate a SelectOp or branch sequence that compares \p selector against
/// values in \p valueList and targets corresponding labels in \p labelList.
/// If no value matches the selector, branch to \p defaultEval.
@@ -2110,6 +2148,9 @@ private:
}
// Unstructured branch sequence.
+ llvm::SmallVector<Fortran::lower::pft::Evaluation *> exits, fallThroughs;
+ collectFinalEvaluations(eval, exits, fallThroughs);
+
for (Fortran::lower::pft::Evaluation &e : eval.getNestedEvaluations()) {
auto genIfBranch = [&](mlir::Value cond) {
if (e.lexicalSuccessor == e.controlSuccessor) // empty block -> exit
@@ -2130,6 +2171,12 @@ private:
genIfBranch(genIfCondition(s));
} else {
genFIR(e);
+ if (blockIsUnterminated()) {
+ if (llvm::is_contained(exits, &e))
+ genConstructExitBranch(*eval.constructExit);
+ else if (llvm::is_contained(fallThroughs, &e))
+ genBranch(e.lexicalSuccessor->block);
+ }
}
}
}
@@ -2138,11 +2185,21 @@ private:
Fortran::lower::pft::Evaluation &eval = getEval();
Fortran::lower::StatementContext stmtCtx;
pushActiveConstruct(eval, stmtCtx);
+
+ llvm::SmallVector<Fortran::lower::pft::Evaluation *> exits, fallThroughs;
+ collectFinalEvaluations(eval, exits, fallThroughs);
+
for (Fortran::lower::pft::Evaluation &e : eval.getNestedEvaluations()) {
if (e.getIf<Fortran::parser::EndSelectStmt>())
maybeStartBlock(e.block);
else
genFIR(e);
+ if (blockIsUnterminated()) {
+ if (llvm::is_contained(exits, &e))
+ genConstructExitBranch(*eval.constructExit);
+ else if (llvm::is_contained(fallThroughs, &e))
+ genBranch(e.lexicalSuccessor->block);
+ }
}
popActiveConstruct();
}
@@ -3008,6 +3065,10 @@ private:
}
pushActiveConstruct(getEval(), stmtCtx);
+ llvm::SmallVector<Fortran::lower::pft::Evaluation *> exits, fallThroughs;
+ collectFinalEvaluations(getEval(), exits, fallThroughs);
+ Fortran::lower::pft::Evaluation &constructExit = *getEval().constructExit;
+
for (Fortran::lower::pft::Evaluation &eval :
getEval().getNestedEvaluations()) {
setCurrentPosition(eval.position);
@@ -3204,6 +3265,12 @@ private:
} else {
genFIR(eval);
}
+ if (blockIsUnterminated()) {
+ if (llvm::is_contained(exits, &eval))
+ genConstructExitBranch(constructExit);
+ else if (llvm::is_contained(fallThroughs, &eval))
+ genBranch(eval.lexicalSuccessor->block);
+ }
}
popActiveConstruct();
}
@@ -3717,21 +3784,36 @@ private:
hlfir::Entity &lhs, hlfir::Entity &rhs) {
bool lhsIsDevice = Fortran::evaluate::HasCUDAAttrs(assign.lhs);
bool rhsIsDevice = Fortran::evaluate::HasCUDAAttrs(assign.rhs);
- if (rhs.isBoxAddressOrValue() || lhs.isBoxAddressOrValue())
- TODO(loc, "CUDA data transfler with descriptors");
+
+ auto getRefIfLoaded = [](mlir::Value val) -> mlir::Value {
+ if (auto loadOp =
+ mlir::dyn_cast_or_null<fir::LoadOp>(val.getDefiningOp()))
+ return loadOp.getMemref();
+ return val;
+ };
+
+ mlir::Value rhsVal = getRefIfLoaded(rhs.getBase());
+ mlir::Value lhsVal = getRefIfLoaded(lhs.getBase());
// device = host
if (lhsIsDevice && !rhsIsDevice) {
auto transferKindAttr = cuf::DataTransferKindAttr::get(
builder.getContext(), cuf::DataTransferKind::HostDevice);
if (!rhs.isVariable()) {
- auto associate = hlfir::genAssociateExpr(
- loc, builder, rhs, rhs.getType(), ".cuf_host_tmp");
- builder.create<cuf::DataTransferOp>(loc, associate.getBase(), lhs,
- transferKindAttr);
- builder.create<hlfir::EndAssociateOp>(loc, associate);
+ // Special case if the rhs is a constant.
+ if (matchPattern(rhs.getDefiningOp(), mlir::m_Constant())) {
+ builder.create<cuf::DataTransferOp>(loc, rhs, lhsVal,
+ transferKindAttr);
+ } else {
+ auto associate = hlfir::genAssociateExpr(
+ loc, builder, rhs, rhs.getType(), ".cuf_host_tmp");
+ builder.create<cuf::DataTransferOp>(loc, associate.getBase(), lhsVal,
+ transferKindAttr);
+ builder.create<hlfir::EndAssociateOp>(loc, associate);
+ }
} else {
- builder.create<cuf::DataTransferOp>(loc, rhs, lhs, transferKindAttr);
+ builder.create<cuf::DataTransferOp>(loc, rhsVal, lhsVal,
+ transferKindAttr);
}
return;
}
@@ -3740,26 +3822,18 @@ private:
if (!lhsIsDevice && rhsIsDevice) {
auto transferKindAttr = cuf::DataTransferKindAttr::get(
builder.getContext(), cuf::DataTransferKind::DeviceHost);
- if (!rhs.isVariable()) {
- // evaluateRhs loads scalar. Look for the memory reference to be used in
- // the transfer.
- if (mlir::isa_and_nonnull<fir::LoadOp>(rhs.getDefiningOp())) {
- auto loadOp = mlir::dyn_cast<fir::LoadOp>(rhs.getDefiningOp());
- builder.create<cuf::DataTransferOp>(loc, loadOp.getMemref(), lhs,
- transferKindAttr);
- return;
- }
- } else {
- builder.create<cuf::DataTransferOp>(loc, rhs, lhs, transferKindAttr);
- }
+ builder.create<cuf::DataTransferOp>(loc, rhsVal, lhsVal,
+ transferKindAttr);
return;
}
+ // device = device
if (lhsIsDevice && rhsIsDevice) {
assert(rhs.isVariable() && "CUDA Fortran assignment rhs is not legal");
auto transferKindAttr = cuf::DataTransferKindAttr::get(
builder.getContext(), cuf::DataTransferKind::DeviceDevice);
- builder.create<cuf::DataTransferOp>(loc, rhs, lhs, transferKindAttr);
+ builder.create<cuf::DataTransferOp>(loc, rhsVal, lhsVal,
+ transferKindAttr);
return;
}
llvm_unreachable("Unhandled CUDA data transfer");
@@ -4536,28 +4610,6 @@ private:
setCurrentEval(eval);
setCurrentPosition(eval.position);
eval.visit([&](const auto &stmt) { genFIR(stmt); });
-
- // Generate an end-of-block branch for several special cases. For
- // constructs, this can be done for either the end construct statement,
- // or for the construct itself, which will skip this code if the
- // end statement was visited first and generated a branch.
- Fortran::lower::pft::Evaluation *successor = [&]() {
- if (eval.isConstruct() ||
- (eval.isDirective() && eval.hasNestedEvaluations()))
- return eval.getLastNestedEvaluation().lexicalSuccessor;
- return eval.lexicalSuccessor;
- }();
-
- if (successor && blockIsUnterminated()) {
- if (successor->isIntermediateConstructStmt() &&
- successor->parentConstruct->lowerAsUnstructured())
- // Exit from an intermediate unstructured IF or SELECT construct block.
- genBranch(successor->parentConstruct->constructExit->block);
- else if (unstructuredContext && eval.isConstructStmt() &&
- successor == eval.controlSuccessor)
- // Exit from a degenerate, empty construct block.
- genBranch(eval.parentConstruct->constructExit->block);
- }
}
/// Map mlir function block arguments to the corresponding Fortran dummy
diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
index 875599098b3d..68619f699ebb 100644
--- a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
@@ -882,8 +882,11 @@ bool ClauseProcessor::processMap(
// Explicit map captures are captured ByRef by default,
// optimisation passes may alter this to ByCopy or other capture
// types to optimise
+ auto location = mlir::NameLoc::get(
+ mlir::StringAttr::get(firOpBuilder.getContext(), asFortran.str()),
+ symAddr.getLoc());
mlir::omp::MapInfoOp mapOp = createMapInfoOp(
- firOpBuilder, clauseLocation, symAddr,
+ firOpBuilder, location, symAddr,
/*varPtrPtr=*/mlir::Value{}, asFortran.str(), bounds,
/*members=*/{}, /*membersIndex=*/mlir::DenseIntElementsAttr{},
static_cast<
diff --git a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
index 6a8c3e3ac9ba..b722e19272ca 100644
--- a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
@@ -22,6 +22,30 @@
namespace Fortran {
namespace lower {
namespace omp {
+bool DataSharingProcessor::OMPConstructSymbolVisitor::isSymbolDefineBy(
+ const semantics::Symbol *symbol, lower::pft::Evaluation &eval) const {
+ return eval.visit(
+ common::visitors{[&](const parser::OpenMPConstruct &functionParserNode) {
+ return symDefMap.count(symbol) &&
+ symDefMap.at(symbol) == &functionParserNode;
+ },
+ [](const auto &functionParserNode) { return false; }});
+}
+
+DataSharingProcessor::DataSharingProcessor(
+ lower::AbstractConverter &converter, semantics::SemanticsContext &semaCtx,
+ const List<Clause> &clauses, lower::pft::Evaluation &eval,
+ bool shouldCollectPreDeterminedSymbols, bool useDelayedPrivatization,
+ lower::SymMap *symTable)
+ : hasLastPrivateOp(false), converter(converter), semaCtx(semaCtx),
+ firOpBuilder(converter.getFirOpBuilder()), clauses(clauses), eval(eval),
+ shouldCollectPreDeterminedSymbols(shouldCollectPreDeterminedSymbols),
+ useDelayedPrivatization(useDelayedPrivatization), symTable(symTable),
+ visitor() {
+ eval.visit([&](const auto &functionParserNode) {
+ parser::Walk(functionParserNode, visitor);
+ });
+}
void DataSharingProcessor::processStep1(
mlir::omp::PrivateClauseOps *clauseOps,
@@ -226,7 +250,7 @@ void DataSharingProcessor::insertLastPrivateCompare(mlir::Operation *op) {
auto ifOp = firOpBuilder.create<fir::IfOp>(loc, cmpOp, /*else*/ false);
firOpBuilder.setInsertionPointToStart(&ifOp.getThenRegion().front());
assert(loopIV && "loopIV was not set");
- firOpBuilder.create<fir::StoreOp>(loopOp.getLoc(), v, loopIV);
+ firOpBuilder.createStoreWithConvert(loc, v, loopIV);
lastPrivIP = firOpBuilder.saveInsertionPoint();
} else if (mlir::isa<mlir::omp::SectionsOp>(op)) {
// Already handled by genOMP()
@@ -285,38 +309,9 @@ void DataSharingProcessor::collectSymbolsInNestedRegions(
// Recursively look for OpenMP constructs within `nestedEval`'s region
collectSymbolsInNestedRegions(nestedEval, flag, symbolsInNestedRegions);
else {
- bool isOrderedConstruct = [&]() {
- if (auto *ompConstruct =
- nestedEval.getIf<parser::OpenMPConstruct>()) {
- if (auto *ompBlockConstruct =
- std::get_if<parser::OpenMPBlockConstruct>(
- &ompConstruct->u)) {
- const auto &beginBlockDirective =
- std::get<parser::OmpBeginBlockDirective>(
- ompBlockConstruct->t);
- const auto origDirective =
- std::get<parser::OmpBlockDirective>(beginBlockDirective.t).v;
-
- return origDirective == llvm::omp::Directive::OMPD_ordered;
- }
- }
-
- return false;
- }();
-
- bool isCriticalConstruct = [&]() {
- if (auto *ompConstruct =
- nestedEval.getIf<parser::OpenMPConstruct>()) {
- return std::get_if<parser::OpenMPCriticalConstruct>(
- &ompConstruct->u) != nullptr;
- }
- return false;
- }();
-
- if (!isOrderedConstruct && !isCriticalConstruct)
- converter.collectSymbolSet(nestedEval, symbolsInNestedRegions, flag,
- /*collectSymbols=*/true,
- /*collectHostAssociatedSymbols=*/false);
+ converter.collectSymbolSet(nestedEval, symbolsInNestedRegions, flag,
+ /*collectSymbols=*/true,
+ /*collectHostAssociatedSymbols=*/false);
}
}
}
@@ -356,6 +351,11 @@ void DataSharingProcessor::collectSymbols(
llvm::SetVector<const semantics::Symbol *> symbolsInNestedRegions;
collectSymbolsInNestedRegions(eval, flag, symbolsInNestedRegions);
+
+ for (auto *symbol : allSymbols)
+ if (visitor.isSymbolDefineBy(symbol, eval))
+ symbolsInNestedRegions.remove(symbol);
+
// Filter-out symbols that must not be privatized.
bool collectImplicit = flag == semantics::Symbol::Flag::OmpImplicit;
bool collectPreDetermined = flag == semantics::Symbol::Flag::OmpPreDetermined;
diff --git a/flang/lib/Lower/OpenMP/DataSharingProcessor.h b/flang/lib/Lower/OpenMP/DataSharingProcessor.h
index 9ec5304eb69d..80a956de35ba 100644
--- a/flang/lib/Lower/OpenMP/DataSharingProcessor.h
+++ b/flang/lib/Lower/OpenMP/DataSharingProcessor.h
@@ -32,6 +32,40 @@ namespace omp {
class DataSharingProcessor {
private:
+ /// A symbol visitor that keeps track of the currently active OpenMPConstruct
+ /// at any point in time. This is used to track Symbol definition scopes in
+ /// order to tell which OMP scope defined vs. references a certain Symbol.
+ struct OMPConstructSymbolVisitor {
+ template <typename T>
+ bool Pre(const T &) {
+ return true;
+ }
+ template <typename T>
+ void Post(const T &) {}
+
+ bool Pre(const parser::OpenMPConstruct &omp) {
+ currentConstruct = &omp;
+ return true;
+ }
+
+ void Post(const parser::OpenMPConstruct &omp) {
+ currentConstruct = nullptr;
+ }
+
+ void Post(const parser::Name &name) {
+ symDefMap.try_emplace(name.symbol, currentConstruct);
+ }
+
+ const parser::OpenMPConstruct *currentConstruct = nullptr;
+ llvm::DenseMap<semantics::Symbol *, const parser::OpenMPConstruct *>
+ symDefMap;
+
+ /// Given a \p symbol and an \p eval, returns true if eval is the OMP
+ /// construct that defines symbol.
+ bool isSymbolDefineBy(const semantics::Symbol *symbol,
+ lower::pft::Evaluation &eval) const;
+ };
+
bool hasLastPrivateOp;
mlir::OpBuilder::InsertPoint lastPrivIP;
mlir::OpBuilder::InsertPoint insPt;
@@ -53,6 +87,7 @@ private:
bool shouldCollectPreDeterminedSymbols;
bool useDelayedPrivatization;
lower::SymMap *symTable;
+ OMPConstructSymbolVisitor visitor;
bool needBarrier();
void collectSymbols(semantics::Symbol::Flag flag,
@@ -97,11 +132,7 @@ public:
lower::pft::Evaluation &eval,
bool shouldCollectPreDeterminedSymbols,
bool useDelayedPrivatization = false,
- lower::SymMap *symTable = nullptr)
- : hasLastPrivateOp(false), converter(converter), semaCtx(semaCtx),
- firOpBuilder(converter.getFirOpBuilder()), clauses(clauses), eval(eval),
- shouldCollectPreDeterminedSymbols(shouldCollectPreDeterminedSymbols),
- useDelayedPrivatization(useDelayedPrivatization), symTable(symTable) {}
+ lower::SymMap *symTable = nullptr);
// Privatisation is split into two steps.
// Step1 performs cloning of all privatisation clauses and copying for
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index ece098a5bfbb..9598457d123c 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -1608,9 +1608,12 @@ genTargetOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
}
-
+ auto location =
+ mlir::NameLoc::get(mlir::StringAttr::get(firOpBuilder.getContext(),
+ sym.name().ToString()),
+ baseOp.getLoc());
mlir::Value mapOp = createMapInfoOp(
- firOpBuilder, baseOp.getLoc(), baseOp, /*varPtrPtr=*/mlir::Value{},
+ firOpBuilder, location, baseOp, /*varPtrPtr=*/mlir::Value{},
name.str(), bounds, /*members=*/{},
/*membersIndex=*/mlir::DenseIntElementsAttr{},
static_cast<
@@ -1899,8 +1902,7 @@ static void genOMPDispatch(lower::AbstractConverter &converter,
break;
case llvm::omp::Directive::OMPD_loop:
case llvm::omp::Directive::OMPD_masked:
- TODO(loc, "Unhandled loop directive (" +
- llvm::omp::getOpenMPDirectiveName(dir) + ")");
+ TODO(loc, "Unhandled directive " + llvm::omp::getOpenMPDirectiveName(dir));
break;
case llvm::omp::Directive::OMPD_master:
genMasterOp(converter, symTable, semaCtx, eval, loc, queue, item);
diff --git a/flang/lib/Optimizer/Builder/CMakeLists.txt b/flang/lib/Optimizer/Builder/CMakeLists.txt
index 6d0aeb429d35..8ffd0aa4cf42 100644
--- a/flang/lib/Optimizer/Builder/CMakeLists.txt
+++ b/flang/lib/Optimizer/Builder/CMakeLists.txt
@@ -29,6 +29,7 @@ add_flang_library(FIRBuilder
Runtime/Ragged.cpp
Runtime/Reduction.cpp
Runtime/Stop.cpp
+ Runtime/Support.cpp
Runtime/TemporaryStack.cpp
Runtime/Transformational.cpp
TemporaryStorage.cpp
diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
index ae7e65098744..ad2f9236f0db 100644
--- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
@@ -549,10 +549,18 @@ static constexpr IntrinsicHandler handlers[]{
{"back", asValue, handleDynamicOptional},
{"kind", asValue}}},
/*isElemental=*/true},
+ {"selected_char_kind",
+ &I::genSelectedCharKind,
+ {{{"name", asAddr}}},
+ /*isElemental=*/false},
{"selected_int_kind",
&I::genSelectedIntKind,
{{{"scalar", asAddr}}},
/*isElemental=*/false},
+ {"selected_logical_kind",
+ &I::genSelectedLogicalKind,
+ {{{"bits", asAddr}}},
+ /*isElemental=*/false},
{"selected_real_kind",
&I::genSelectedRealKind,
{{{"precision", asAddr, handleDynamicOptional},
@@ -5873,6 +5881,18 @@ IntrinsicLibrary::genScan(mlir::Type resultType,
return readAndAddCleanUp(resultMutableBox, resultType, "SCAN");
}
+// SELECTED_CHAR_KIND
+fir::ExtendedValue
+IntrinsicLibrary::genSelectedCharKind(mlir::Type resultType,
+ llvm::ArrayRef<fir::ExtendedValue> args) {
+ assert(args.size() == 1);
+
+ return builder.createConvert(
+ loc, resultType,
+ fir::runtime::genSelectedCharKind(builder, loc, fir::getBase(args[0]),
+ fir::getLen(args[0])));
+}
+
// SELECTED_INT_KIND
mlir::Value
IntrinsicLibrary::genSelectedIntKind(mlir::Type resultType,
@@ -5884,6 +5904,17 @@ IntrinsicLibrary::genSelectedIntKind(mlir::Type resultType,
fir::runtime::genSelectedIntKind(builder, loc, fir::getBase(args[0])));
}
+// SELECTED_LOGICAL_KIND
+mlir::Value
+IntrinsicLibrary::genSelectedLogicalKind(mlir::Type resultType,
+ llvm::ArrayRef<mlir::Value> args) {
+ assert(args.size() == 1);
+
+ return builder.createConvert(loc, resultType,
+ fir::runtime::genSelectedLogicalKind(
+ builder, loc, fir::getBase(args[0])));
+}
+
// SELECTED_REAL_KIND
mlir::Value
IntrinsicLibrary::genSelectedRealKind(mlir::Type resultType,
diff --git a/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp b/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
index 81d5d21ece7a..8ac9d64f576b 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
@@ -468,6 +468,26 @@ mlir::Value fir::runtime::genScale(fir::FirOpBuilder &builder,
return builder.create<fir::CallOp>(loc, func, args).getResult(0);
}
+/// Generate call to Selected_char_kind intrinsic runtime routine.
+mlir::Value fir::runtime::genSelectedCharKind(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ mlir::Value name,
+ mlir::Value length) {
+ mlir::func::FuncOp func =
+ fir::runtime::getRuntimeFunc<mkRTKey(SelectedCharKind)>(loc, builder);
+ auto fTy = func.getFunctionType();
+ auto sourceFile = fir::factory::locationToFilename(builder, loc);
+ auto sourceLine =
+ fir::factory::locationToLineNo(builder, loc, fTy.getInput(1));
+ if (!fir::isa_ref_type(name.getType()))
+ fir::emitFatalError(loc, "argument address for runtime not found");
+
+ auto args = fir::runtime::createArguments(builder, loc, fTy, sourceFile,
+ sourceLine, name, length);
+
+ return builder.create<fir::CallOp>(loc, func, args).getResult(0);
+}
+
/// Generate call to Selected_int_kind intrinsic runtime routine.
mlir::Value fir::runtime::genSelectedIntKind(fir::FirOpBuilder &builder,
mlir::Location loc,
@@ -489,6 +509,27 @@ mlir::Value fir::runtime::genSelectedIntKind(fir::FirOpBuilder &builder,
return builder.create<fir::CallOp>(loc, func, args).getResult(0);
}
+/// Generate call to Selected_logical_kind intrinsic runtime routine.
+mlir::Value fir::runtime::genSelectedLogicalKind(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ mlir::Value x) {
+ mlir::func::FuncOp func =
+ fir::runtime::getRuntimeFunc<mkRTKey(SelectedLogicalKind)>(loc, builder);
+ auto fTy = func.getFunctionType();
+ auto sourceFile = fir::factory::locationToFilename(builder, loc);
+ auto sourceLine =
+ fir::factory::locationToLineNo(builder, loc, fTy.getInput(1));
+ if (!fir::isa_ref_type(x.getType()))
+ fir::emitFatalError(loc, "argument address for runtime not found");
+ mlir::Type eleTy = fir::unwrapRefType(x.getType());
+ mlir::Value xKind = builder.createIntegerConstant(
+ loc, fTy.getInput(3), eleTy.getIntOrFloatBitWidth() / 8);
+ auto args = fir::runtime::createArguments(builder, loc, fTy, sourceFile,
+ sourceLine, x, xKind);
+
+ return builder.create<fir::CallOp>(loc, func, args).getResult(0);
+}
+
/// Generate call to Selected_real_kind intrinsic runtime routine.
mlir::Value fir::runtime::genSelectedRealKind(fir::FirOpBuilder &builder,
mlir::Location loc,
diff --git a/flang/lib/Optimizer/Builder/Runtime/Support.cpp b/flang/lib/Optimizer/Builder/Runtime/Support.cpp
new file mode 100644
index 000000000000..12e47233e3d9
--- /dev/null
+++ b/flang/lib/Optimizer/Builder/Runtime/Support.cpp
@@ -0,0 +1,46 @@
+//===-- Support.cpp - generate support runtime API calls --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flang/Optimizer/Builder/Runtime/Support.h"
+#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Builder/Runtime/RTBuilder.h"
+#include "flang/Runtime/support.h"
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+
+using namespace Fortran::runtime;
+
+template <>
+constexpr fir::runtime::TypeBuilderFunc
+fir::runtime::getModel<Fortran::runtime::LowerBoundModifier>() {
+ return [](mlir::MLIRContext *context) -> mlir::Type {
+ return mlir::IntegerType::get(
+ context, sizeof(Fortran::runtime::LowerBoundModifier) * 8);
+ };
+}
+
+void fir::runtime::genCopyAndUpdateDescriptor(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ mlir::Value to, mlir::Value from,
+ mlir::Value newDynamicType,
+ mlir::Value newAttribute,
+ mlir::Value newLowerBounds) {
+ mlir::func::FuncOp func =
+ fir::runtime::getRuntimeFunc<mkRTKey(CopyAndUpdateDescriptor)>(loc,
+ builder);
+ auto fTy = func.getFunctionType();
+ auto args =
+ fir::runtime::createArguments(builder, loc, fTy, to, from, newDynamicType,
+ newAttribute, newLowerBounds);
+ llvm::StringRef noCapture = mlir::LLVM::LLVMDialect::getNoCaptureAttrName();
+ if (!func.getArgAttr(0, noCapture)) {
+ mlir::UnitAttr unitAttr = mlir::UnitAttr::get(func.getContext());
+ func.setArgAttr(0, noCapture, unitAttr);
+ func.setArgAttr(1, noCapture, unitAttr);
+ }
+ builder.create<fir::CallOp>(loc, func, args);
+}
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index 72172f63888e..74e68725003c 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -2716,6 +2716,18 @@ struct GlobalOpConversion : public fir::FIROpConversion<fir::GlobalOp> {
mlir::LogicalResult
matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
+
+ mlir::LLVM::DIGlobalVariableExpressionAttr dbgExpr;
+
+ if (auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(global.getLoc())) {
+ if (auto gvAttr =
+ mlir::dyn_cast_or_null<mlir::LLVM::DIGlobalVariableAttr>(
+ fusedLoc.getMetadata())) {
+ dbgExpr = mlir::LLVM::DIGlobalVariableExpressionAttr::get(
+ global.getContext(), gvAttr, mlir::LLVM::DIExpressionAttr());
+ }
+ }
+
auto tyAttr = convertType(global.getType());
if (auto boxType = mlir::dyn_cast<fir::BaseBoxType>(global.getType()))
tyAttr = this->lowerTy().convertBoxTypeAsStruct(boxType);
@@ -2724,8 +2736,11 @@ struct GlobalOpConversion : public fir::FIROpConversion<fir::GlobalOp> {
assert(attributeTypeIsCompatible(global.getContext(), initAttr, tyAttr));
auto linkage = convertLinkage(global.getLinkName());
auto isConst = global.getConstant().has_value();
+ mlir::SymbolRefAttr comdat;
+ llvm::ArrayRef<mlir::NamedAttribute> attrs;
auto g = rewriter.create<mlir::LLVM::GlobalOp>(
- loc, tyAttr, isConst, linkage, global.getSymName(), initAttr);
+ loc, tyAttr, isConst, linkage, global.getSymName(), initAttr, 0, 0,
+ false, false, comdat, attrs, dbgExpr);
auto module = global->getParentOfType<mlir::ModuleOp>();
// Add comdat if necessary
@@ -2966,39 +2981,40 @@ struct SelectCaseOpConversion : public fir::FIROpConversion<fir::SelectCaseOp> {
caseOp.getSuccessorOperands(adaptor.getOperands(), t);
std::optional<mlir::ValueRange> cmpOps =
*caseOp.getCompareOperands(adaptor.getOperands(), t);
- mlir::Value caseArg = *(cmpOps.value().begin());
mlir::Attribute attr = cases[t];
+ assert(mlir::isa<mlir::UnitAttr>(attr) || cmpOps.has_value());
if (mlir::isa<fir::PointIntervalAttr>(attr)) {
auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg);
+ loc, mlir::LLVM::ICmpPredicate::eq, selector, cmpOps->front());
genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
continue;
}
if (mlir::isa<fir::LowerBoundAttr>(attr)) {
auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector);
+ loc, mlir::LLVM::ICmpPredicate::sle, cmpOps->front(), selector);
genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
continue;
}
if (mlir::isa<fir::UpperBoundAttr>(attr)) {
auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg);
+ loc, mlir::LLVM::ICmpPredicate::sle, selector, cmpOps->front());
genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
continue;
}
if (mlir::isa<fir::ClosedIntervalAttr>(attr)) {
- auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector);
+ mlir::Value caseArg0 = *cmpOps->begin();
+ auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>(
+ loc, mlir::LLVM::ICmpPredicate::sle, caseArg0, selector);
auto *thisBlock = rewriter.getInsertionBlock();
auto *newBlock1 = createBlock(rewriter, dest);
auto *newBlock2 = createBlock(rewriter, dest);
rewriter.setInsertionPointToEnd(thisBlock);
- rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2);
+ rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp0, newBlock1, newBlock2);
rewriter.setInsertionPointToEnd(newBlock1);
- mlir::Value caseArg0 = *(cmpOps.value().begin() + 1);
- auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0);
- genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2);
+ mlir::Value caseArg1 = *(cmpOps->begin() + 1);
+ auto cmp1 = rewriter.create<mlir::LLVM::ICmpOp>(
+ loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg1);
+ genCondBrOp(loc, cmp1, dest, destOps, rewriter, newBlock2);
rewriter.setInsertionPointToEnd(newBlock2);
continue;
}
diff --git a/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt b/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt
index 81db40f3ba46..ec5484c1d610 100644
--- a/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt
+++ b/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt
@@ -5,6 +5,7 @@ add_flang_library(CUFAttrs
DEPENDS
MLIRIR
CUFAttrsIncGen
+ CUFOpsIncGen
LINK_LIBS
MLIRTargetLLVMIRExport
diff --git a/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp b/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp
index 870652c72fab..2c0c4c2cfae3 100644
--- a/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp
+++ b/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp
@@ -90,6 +90,24 @@ mlir::LogicalResult cuf::AllocateOp::verify() {
}
//===----------------------------------------------------------------------===//
+// DataTransferOp
+//===----------------------------------------------------------------------===//
+
+mlir::LogicalResult cuf::DataTransferOp::verify() {
+ mlir::Type srcTy = getSrc().getType();
+ mlir::Type dstTy = getDst().getType();
+ if ((fir::isa_ref_type(srcTy) && fir::isa_ref_type(dstTy)) ||
+ (fir::isa_box_type(srcTy) && fir::isa_box_type(dstTy)))
+ return mlir::success();
+ if (fir::isa_trivial(srcTy) &&
+ matchPattern(getSrc().getDefiningOp(), mlir::m_Constant()))
+ return mlir::success();
+ return emitOpError()
+ << "expect src and dst to be both references or descriptors or src to "
+ "be a constant";
+}
+
+//===----------------------------------------------------------------------===//
// DeallocateOp
//===----------------------------------------------------------------------===//
diff --git a/flang/lib/Optimizer/Dialect/FIROps.cpp b/flang/lib/Optimizer/Dialect/FIROps.cpp
index 94113da9a46c..998e9535582c 100644
--- a/flang/lib/Optimizer/Dialect/FIROps.cpp
+++ b/flang/lib/Optimizer/Dialect/FIROps.cpp
@@ -2413,6 +2413,52 @@ mlir::LogicalResult fir::ReboxOp::verify() {
}
//===----------------------------------------------------------------------===//
+// ReboxAssumedRankOp
+//===----------------------------------------------------------------------===//
+
+static bool areCompatibleAssumedRankElementType(mlir::Type inputEleTy,
+ mlir::Type outEleTy) {
+ if (inputEleTy == outEleTy)
+ return true;
+ // Output is unlimited polymorphic -> output dynamic type is the same as input
+ // type.
+ if (mlir::isa<mlir::NoneType>(outEleTy))
+ return true;
+ // Output/Input are derived types. Assuming input extends output type, output
+ // dynamic type is the output static type, unless output is polymorphic.
+ if (mlir::isa<fir::RecordType>(inputEleTy) &&
+ mlir::isa<fir::RecordType>(outEleTy))
+ return true;
+ if (areCompatibleCharacterTypes(inputEleTy, outEleTy))
+ return true;
+ return false;
+}
+
+mlir::LogicalResult fir::ReboxAssumedRankOp::verify() {
+ mlir::Type inputType = getBox().getType();
+ if (!mlir::isa<fir::BaseBoxType>(inputType) && !fir::isBoxAddress(inputType))
+ return emitOpError("input must be a box or box address");
+ mlir::Type inputEleTy =
+ mlir::cast<fir::BaseBoxType>(fir::unwrapRefType(inputType))
+ .unwrapInnerType();
+ mlir::Type outEleTy =
+ mlir::cast<fir::BaseBoxType>(getType()).unwrapInnerType();
+ if (!areCompatibleAssumedRankElementType(inputEleTy, outEleTy))
+ return emitOpError("input and output element types are incompatible");
+ return mlir::success();
+}
+
+void fir::ReboxAssumedRankOp::getEffects(
+ llvm::SmallVectorImpl<
+ mlir::SideEffects::EffectInstance<mlir::MemoryEffects::Effect>>
+ &effects) {
+ mlir::Value inputBox = getBox();
+ if (fir::isBoxAddress(inputBox.getType()))
+ effects.emplace_back(mlir::MemoryEffects::Read::get(), inputBox,
+ mlir::SideEffects::DefaultResource::get());
+}
+
+//===----------------------------------------------------------------------===//
// ResultOp
//===----------------------------------------------------------------------===//
diff --git a/flang/lib/Optimizer/Dialect/FIRType.cpp b/flang/lib/Optimizer/Dialect/FIRType.cpp
index daa3ac905dad..b6adb31213cd 100644
--- a/flang/lib/Optimizer/Dialect/FIRType.cpp
+++ b/flang/lib/Optimizer/Dialect/FIRType.cpp
@@ -1324,6 +1324,17 @@ fir::BaseBoxType::getBoxTypeWithNewShape(mlir::Type shapeMold) const {
return mlir::cast<fir::BaseBoxType>(changeTypeShape(*this, newShape));
}
+fir::BaseBoxType fir::BaseBoxType::getBoxTypeWithNewShape(int rank) const {
+ std::optional<fir::SequenceType::ShapeRef> newShape;
+ fir::SequenceType::Shape shapeVector;
+ if (rank > 0) {
+ shapeVector =
+ fir::SequenceType::Shape(rank, fir::SequenceType::getUnknownExtent());
+ newShape = shapeVector;
+ }
+ return mlir::cast<fir::BaseBoxType>(changeTypeShape(*this, newShape));
+}
+
bool fir::BaseBoxType::isAssumedRank() const {
if (auto seqTy =
mlir::dyn_cast<fir::SequenceType>(fir::unwrapRefType(getEleTy())))
diff --git a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
index 11196353b07c..218b38e9ba79 100644
--- a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
+++ b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
@@ -1115,7 +1115,7 @@ mlir::LogicalResult
hlfir::MatmulOp::canonicalize(MatmulOp matmulOp,
mlir::PatternRewriter &rewriter) {
// the only two uses of the transposed matrix should be for the hlfir.matmul
- // and hlfir.destory
+ // and hlfir.destroy
auto isOtherwiseUnused = [&](hlfir::TransposeOp transposeOp) -> bool {
std::size_t numUses = 0;
for (mlir::Operation *user : transposeOp.getResult().getUsers()) {
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/InlineElementals.cpp b/flang/lib/Optimizer/HLFIR/Transforms/InlineElementals.cpp
index a99038fdfba9..6c8e3e119374 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/InlineElementals.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/InlineElementals.cpp
@@ -32,7 +32,7 @@ namespace hlfir {
} // namespace hlfir
/// If the elemental has only two uses and those two are an apply operation and
-/// a destory operation, return those two, otherwise return {}
+/// a destroy operation, return those two, otherwise return {}
static std::optional<std::pair<hlfir::ApplyOp, hlfir::DestroyOp>>
getTwoUses(hlfir::ElementalOp elemental) {
mlir::Operation::user_range users = elemental->getUsers();
@@ -115,7 +115,6 @@ class InlineElementalsPass
: public hlfir::impl::InlineElementalsBase<InlineElementalsPass> {
public:
void runOnOperation() override {
- mlir::func::FuncOp func = getOperation();
mlir::MLIRContext *context = &getContext();
mlir::GreedyRewriteConfig config;
@@ -126,14 +125,11 @@ public:
patterns.insert<InlineElementalConversion>(context);
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(
- func, std::move(patterns), config))) {
- mlir::emitError(func->getLoc(), "failure in HLFIR elemental inlining");
+ getOperation(), std::move(patterns), config))) {
+ mlir::emitError(getOperation()->getLoc(),
+ "failure in HLFIR elemental inlining");
signalPassFailure();
}
}
};
} // namespace
-
-std::unique_ptr<mlir::Pass> hlfir::createInlineElementalsPass() {
- return std::make_unique<InlineElementalsPass>();
-}
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIRIntrinsics.cpp b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIRIntrinsics.cpp
index e9dbb7095d0e..707c0feffbb3 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIRIntrinsics.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIRIntrinsics.cpp
@@ -468,13 +468,6 @@ class LowerHLFIRIntrinsics
: public hlfir::impl::LowerHLFIRIntrinsicsBase<LowerHLFIRIntrinsics> {
public:
void runOnOperation() override {
- // TODO: make this a pass operating on FuncOp. The issue is that
- // FirOpBuilder helpers may generate new FuncOp because of runtime/llvm
- // intrinsics calls creation. This may create race conflict if the pass is
- // scheduled on FuncOp. A solution could be to provide an optional mutex
- // when building a FirOpBuilder and locking around FuncOp and GlobalOp
- // creation, but this needs a bit more thinking, so at this point the pass
- // is scheduled on the moduleOp.
mlir::ModuleOp module = this->getOperation();
mlir::MLIRContext *context = &getContext();
mlir::RewritePatternSet patterns(context);
@@ -504,7 +497,3 @@ public:
}
};
} // namespace
-
-std::unique_ptr<mlir::Pass> hlfir::createLowerHLFIRIntrinsicsPass() {
- return std::make_unique<LowerHLFIRIntrinsics>();
-}
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
index c9ff4b1c3374..a1a89bb5154f 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
@@ -1383,6 +1383,9 @@ class LowerHLFIROrderedAssignments
: public hlfir::impl::LowerHLFIROrderedAssignmentsBase<
LowerHLFIROrderedAssignments> {
public:
+ using LowerHLFIROrderedAssignmentsBase<
+ LowerHLFIROrderedAssignments>::LowerHLFIROrderedAssignmentsBase;
+
void runOnOperation() override {
// Running on a ModuleOp because this pass may generate FuncOp declaration
// for runtime calls. This could be a FuncOp pass otherwise.
@@ -1409,7 +1412,3 @@ public:
}
};
} // namespace
-
-std::unique_ptr<mlir::Pass> hlfir::createLowerHLFIROrderedAssignmentsPass() {
- return std::make_unique<LowerHLFIROrderedAssignments>();
-}
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
index 8d68c7021608..3c8424ca564e 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp
@@ -1038,7 +1038,6 @@ class OptimizedBufferizationPass
OptimizedBufferizationPass> {
public:
void runOnOperation() override {
- mlir::func::FuncOp func = getOperation();
mlir::MLIRContext *context = &getContext();
mlir::GreedyRewriteConfig config;
@@ -1062,15 +1061,11 @@ public:
patterns.insert<MinMaxlocElementalConversion<hlfir::MaxlocOp>>(context);
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(
- func, std::move(patterns), config))) {
- mlir::emitError(func.getLoc(),
+ getOperation(), std::move(patterns), config))) {
+ mlir::emitError(getOperation()->getLoc(),
"failure in HLFIR optimized bufferization");
signalPassFailure();
}
}
};
} // namespace
-
-std::unique_ptr<mlir::Pass> hlfir::createOptimizedBufferizationPass() {
- return std::make_unique<OptimizedBufferizationPass>();
-}
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/SimplifyHLFIRIntrinsics.cpp b/flang/lib/Optimizer/HLFIR/Transforms/SimplifyHLFIRIntrinsics.cpp
index b761563eba0f..6153c82fa734 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/SimplifyHLFIRIntrinsics.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/SimplifyHLFIRIntrinsics.cpp
@@ -94,7 +94,6 @@ class SimplifyHLFIRIntrinsics
: public hlfir::impl::SimplifyHLFIRIntrinsicsBase<SimplifyHLFIRIntrinsics> {
public:
void runOnOperation() override {
- mlir::func::FuncOp func = this->getOperation();
mlir::MLIRContext *context = &getContext();
mlir::RewritePatternSet patterns(context);
patterns.insert<TransposeAsElementalConversion>(context);
@@ -108,16 +107,12 @@ public:
});
target.markUnknownOpDynamicallyLegal(
[](mlir::Operation *) { return true; });
- if (mlir::failed(
- mlir::applyFullConversion(func, target, std::move(patterns)))) {
- mlir::emitError(func->getLoc(),
+ if (mlir::failed(mlir::applyFullConversion(getOperation(), target,
+ std::move(patterns)))) {
+ mlir::emitError(getOperation()->getLoc(),
"failure in HLFIR intrinsic simplification");
signalPassFailure();
}
}
};
} // namespace
-
-std::unique_ptr<mlir::Pass> hlfir::createSimplifyHLFIRIntrinsicsPass() {
- return std::make_unique<SimplifyHLFIRIntrinsics>();
-}
diff --git a/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp b/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp
index 07e8aed4cd07..fb7c0bf0d1f9 100644
--- a/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp
+++ b/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp
@@ -54,6 +54,16 @@ class AddDebugInfoPass : public fir::impl::AddDebugInfoBase<AddDebugInfoPass> {
public:
AddDebugInfoPass(fir::AddDebugInfoOptions options) : Base(options) {}
void runOnOperation() override;
+
+private:
+ llvm::StringMap<mlir::LLVM::DIModuleAttr> moduleMap;
+
+ mlir::LLVM::DIModuleAttr getOrCreateModuleAttr(
+ const std::string &name, mlir::LLVM::DIFileAttr fileAttr,
+ mlir::LLVM::DIScopeAttr scope, unsigned line, bool decl);
+
+ void handleGlobalOp(fir::GlobalOp glocalOp, mlir::LLVM::DIFileAttr fileAttr,
+ mlir::LLVM::DIScopeAttr scope);
};
static uint32_t getLineFromLoc(mlir::Location loc) {
@@ -99,6 +109,70 @@ void AddDebugInfoPass::handleDeclareOp(fir::cg::XDeclareOp declOp,
declOp->setLoc(builder.getFusedLoc({declOp->getLoc()}, localVarAttr));
}
+// The `module` does not have a first class representation in the `FIR`. We
+// extract information about it from the name of the identifiers and keep a
+// map to avoid duplication.
+mlir::LLVM::DIModuleAttr AddDebugInfoPass::getOrCreateModuleAttr(
+ const std::string &name, mlir::LLVM::DIFileAttr fileAttr,
+ mlir::LLVM::DIScopeAttr scope, unsigned line, bool decl) {
+ mlir::MLIRContext *context = &getContext();
+ mlir::LLVM::DIModuleAttr modAttr;
+ if (auto iter{moduleMap.find(name)}; iter != moduleMap.end()) {
+ modAttr = iter->getValue();
+ } else {
+ modAttr = mlir::LLVM::DIModuleAttr::get(
+ context, fileAttr, scope, mlir::StringAttr::get(context, name),
+ /* configMacros */ mlir::StringAttr(),
+ /* includePath */ mlir::StringAttr(),
+ /* apinotes */ mlir::StringAttr(), line, decl);
+ moduleMap[name] = modAttr;
+ }
+ return modAttr;
+}
+
+void AddDebugInfoPass::handleGlobalOp(fir::GlobalOp globalOp,
+ mlir::LLVM::DIFileAttr fileAttr,
+ mlir::LLVM::DIScopeAttr scope) {
+ mlir::ModuleOp module = getOperation();
+ mlir::MLIRContext *context = &getContext();
+ fir::DebugTypeGenerator typeGen(module);
+ mlir::OpBuilder builder(context);
+
+ std::pair result = fir::NameUniquer::deconstruct(globalOp.getSymName());
+ if (result.first != fir::NameUniquer::NameKind::VARIABLE)
+ return;
+
+ unsigned line = getLineFromLoc(globalOp.getLoc());
+
+ // DWARF5 says following about the fortran modules:
+ // A Fortran 90 module may also be represented by a module entry
+ // (but no declaration attribute is warranted because Fortran has no concept
+ // of a corresponding module body).
+ // But in practice, compilers use declaration attribute with a module in cases
+ // where module was defined in another source file (only being used in this
+ // one). The isInitialized() seems to provide the right information
+ // but inverted. It is true where module is actually defined but false where
+ // it is used.
+ // FIXME: Currently we don't have the line number on which a module was
+ // declared. We are using a best guess of line - 1 where line is the source
+ // line of the first member of the module that we encounter.
+
+ if (result.second.modules.empty())
+ return;
+
+ scope = getOrCreateModuleAttr(result.second.modules[0], fileAttr, scope,
+ line - 1, !globalOp.isInitialized());
+
+ mlir::LLVM::DITypeAttr diType = typeGen.convertType(
+ globalOp.getType(), fileAttr, scope, globalOp.getLoc());
+ auto gvAttr = mlir::LLVM::DIGlobalVariableAttr::get(
+ context, scope, mlir::StringAttr::get(context, result.second.name),
+ mlir::StringAttr::get(context, globalOp.getName()), fileAttr, line,
+ diType, /*isLocalToUnit*/ false,
+ /*isDefinition*/ globalOp.isInitialized(), /* alignInBits*/ 0);
+ globalOp->setLoc(builder.getFusedLoc({globalOp->getLoc()}, gvAttr));
+}
+
void AddDebugInfoPass::runOnOperation() {
mlir::ModuleOp module = getOperation();
mlir::MLIRContext *context = &getContext();
@@ -138,6 +212,12 @@ void AddDebugInfoPass::runOnOperation() {
llvm::dwarf::getLanguage("DW_LANG_Fortran95"), fileAttr, producer,
isOptimized, debugLevel);
+ if (debugLevel == mlir::LLVM::DIEmissionKind::Full) {
+ // Process 'GlobalOp' only if full debug info is requested.
+ for (auto globalOp : module.getOps<fir::GlobalOp>())
+ handleGlobalOp(globalOp, fileAttr, cuAttr);
+ }
+
module.walk([&](mlir::func::FuncOp funcOp) {
mlir::Location l = funcOp->getLoc();
// If fused location has already been created then nothing to do
@@ -180,6 +260,7 @@ void AddDebugInfoPass::runOnOperation() {
// Only definitions need a distinct identifier and a compilation unit.
mlir::DistinctAttr id;
+ mlir::LLVM::DIScopeAttr Scope = fileAttr;
mlir::LLVM::DICompileUnitAttr compilationUnit;
mlir::LLVM::DISubprogramFlags subprogramFlags =
mlir::LLVM::DISubprogramFlags{};
@@ -192,9 +273,13 @@ void AddDebugInfoPass::runOnOperation() {
subprogramFlags | mlir::LLVM::DISubprogramFlags::Definition;
}
unsigned line = getLineFromLoc(l);
+ if (!result.second.modules.empty())
+ Scope = getOrCreateModuleAttr(result.second.modules[0], fileAttr, cuAttr,
+ line - 1, false);
+
auto spAttr = mlir::LLVM::DISubprogramAttr::get(
- context, id, compilationUnit, fileAttr, funcName, fullName,
- funcFileAttr, line, line, subprogramFlags, subTypeAttr);
+ context, id, compilationUnit, Scope, funcName, fullName, funcFileAttr,
+ line, line, subprogramFlags, subTypeAttr);
funcOp->setLoc(builder.getFusedLoc({funcOp->getLoc()}, spAttr));
// Don't process variables if user asked for line tables only.
diff --git a/flang/lib/Optimizer/Transforms/AssumedRankOpConversion.cpp b/flang/lib/Optimizer/Transforms/AssumedRankOpConversion.cpp
new file mode 100644
index 000000000000..5cc70c4d6125
--- /dev/null
+++ b/flang/lib/Optimizer/Transforms/AssumedRankOpConversion.cpp
@@ -0,0 +1,131 @@
+//===-- AssumedRankOpConversion.cpp ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flang/Common/Fortran.h"
+#include "flang/Lower/BuiltinModules.h"
+#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Builder/Runtime/Support.h"
+#include "flang/Optimizer/Builder/Todo.h"
+#include "flang/Optimizer/Dialect/FIRDialect.h"
+#include "flang/Optimizer/Dialect/FIROps.h"
+#include "flang/Optimizer/Support/TypeCode.h"
+#include "flang/Optimizer/Support/Utils.h"
+#include "flang/Optimizer/Transforms/Passes.h"
+#include "flang/Runtime/support.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/DialectConversion.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+
+namespace fir {
+#define GEN_PASS_DEF_ASSUMEDRANKOPCONVERSION
+#include "flang/Optimizer/Transforms/Passes.h.inc"
+} // namespace fir
+
+using namespace fir;
+using namespace mlir;
+
+namespace {
+
+static int getCFIAttribute(mlir::Type boxType) {
+ if (fir::isAllocatableType(boxType))
+ return CFI_attribute_allocatable;
+ if (fir::isPointerType(boxType))
+ return CFI_attribute_pointer;
+ return CFI_attribute_other;
+}
+
+static Fortran::runtime::LowerBoundModifier
+getLowerBoundModifier(fir::LowerBoundModifierAttribute modifier) {
+ switch (modifier) {
+ case fir::LowerBoundModifierAttribute::Preserve:
+ return Fortran::runtime::LowerBoundModifier::Preserve;
+ case fir::LowerBoundModifierAttribute::SetToOnes:
+ return Fortran::runtime::LowerBoundModifier::SetToOnes;
+ case fir::LowerBoundModifierAttribute::SetToZeroes:
+ return Fortran::runtime::LowerBoundModifier::SetToZeroes;
+ }
+ llvm_unreachable("bad modifier code");
+}
+
+class ReboxAssumedRankConv
+ : public mlir::OpRewritePattern<fir::ReboxAssumedRankOp> {
+public:
+ using OpRewritePattern::OpRewritePattern;
+
+ ReboxAssumedRankConv(mlir::MLIRContext *context,
+ mlir::SymbolTable *symbolTable, fir::KindMapping kindMap)
+ : mlir::OpRewritePattern<fir::ReboxAssumedRankOp>(context),
+ symbolTable{symbolTable}, kindMap{kindMap} {};
+
+ mlir::LogicalResult
+ matchAndRewrite(fir::ReboxAssumedRankOp rebox,
+ mlir::PatternRewriter &rewriter) const override {
+ fir::FirOpBuilder builder{rewriter, kindMap, symbolTable};
+ mlir::Location loc = rebox.getLoc();
+ auto newBoxType = mlir::cast<fir::BaseBoxType>(rebox.getType());
+ mlir::Type newMaxRankBoxType =
+ newBoxType.getBoxTypeWithNewShape(Fortran::common::maxRank);
+ // CopyAndUpdateDescriptor FIR interface requires loading
+ // !fir.ref<fir.box> input which is expensive with assumed-rank. It could
+ // be best to add an entry point that takes a non "const" from to cover
+ // this case, but it would be good to indicate to LLVM that from does not
+ // get modified.
+ if (fir::isBoxAddress(rebox.getBox().getType()))
+ TODO(loc, "fir.rebox_assumed_rank codegen with fir.ref<fir.box<>> input");
+ mlir::Value tempDesc = builder.createTemporary(loc, newMaxRankBoxType);
+ mlir::Value newDtype;
+ mlir::Type newEleType = newBoxType.unwrapInnerType();
+ auto oldBoxType = mlir::cast<fir::BaseBoxType>(
+ fir::unwrapRefType(rebox.getBox().getType()));
+ auto newDerivedType = mlir::dyn_cast<fir::RecordType>(newEleType);
+ if (newDerivedType && (newEleType != oldBoxType.unwrapInnerType()) &&
+ !fir::isPolymorphicType(newBoxType)) {
+ newDtype = builder.create<fir::TypeDescOp>(
+ loc, mlir::TypeAttr::get(newDerivedType));
+ } else {
+ newDtype = builder.createNullConstant(loc);
+ }
+ mlir::Value newAttribute = builder.createIntegerConstant(
+ loc, builder.getIntegerType(8), getCFIAttribute(newBoxType));
+ int lbsModifierCode =
+ static_cast<int>(getLowerBoundModifier(rebox.getLbsModifier()));
+ mlir::Value lowerBoundModifier = builder.createIntegerConstant(
+ loc, builder.getIntegerType(32), lbsModifierCode);
+ fir::runtime::genCopyAndUpdateDescriptor(builder, loc, tempDesc,
+ rebox.getBox(), newDtype,
+ newAttribute, lowerBoundModifier);
+
+ mlir::Value descValue = builder.create<fir::LoadOp>(loc, tempDesc);
+ mlir::Value castDesc = builder.createConvert(loc, newBoxType, descValue);
+ rewriter.replaceOp(rebox, castDesc);
+ return mlir::success();
+ }
+
+private:
+ mlir::SymbolTable *symbolTable = nullptr;
+ fir::KindMapping kindMap;
+};
+
+/// Convert FIR structured control flow ops to CFG ops.
+class AssumedRankOpConversion
+ : public fir::impl::AssumedRankOpConversionBase<AssumedRankOpConversion> {
+public:
+ void runOnOperation() override {
+ auto *context = &getContext();
+ mlir::ModuleOp mod = getOperation();
+ mlir::SymbolTable symbolTable(mod);
+ fir::KindMapping kindMap = fir::getKindMapping(mod);
+ mlir::RewritePatternSet patterns(context);
+ patterns.insert<ReboxAssumedRankConv>(context, &symbolTable, kindMap);
+ mlir::GreedyRewriteConfig config;
+ config.enableRegionSimplification = false;
+ (void)applyPatternsAndFoldGreedily(mod, std::move(patterns), config);
+ }
+};
+} // namespace
diff --git a/flang/lib/Optimizer/Transforms/CMakeLists.txt b/flang/lib/Optimizer/Transforms/CMakeLists.txt
index 308b5ed06623..5ef930fdb2c2 100644
--- a/flang/lib/Optimizer/Transforms/CMakeLists.txt
+++ b/flang/lib/Optimizer/Transforms/CMakeLists.txt
@@ -4,6 +4,7 @@ add_flang_library(FIRTransforms
AffinePromotion.cpp
AffineDemotion.cpp
AnnotateConstant.cpp
+ AssumedRankOpConversion.cpp
CharacterConversion.cpp
ControlFlowConverter.cpp
ArrayValueCopy.cpp
diff --git a/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp b/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp
index 64c6547e06e0..07163de958f9 100644
--- a/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp
+++ b/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp
@@ -37,6 +37,45 @@ static mlir::LLVM::DITypeAttr genPlaceholderType(mlir::MLIRContext *context) {
llvm::dwarf::DW_ATE_signed);
}
+mlir::LLVM::DITypeAttr DebugTypeGenerator::convertSequenceType(
+ fir::SequenceType seqTy, mlir::LLVM::DIFileAttr fileAttr,
+ mlir::LLVM::DIScopeAttr scope, mlir::Location loc) {
+
+ mlir::MLIRContext *context = module.getContext();
+ // FIXME: Only fixed sizes arrays handled at the moment.
+ if (seqTy.hasDynamicExtents())
+ return genPlaceholderType(context);
+
+ llvm::SmallVector<mlir::LLVM::DINodeAttr> elements;
+ mlir::LLVM::DITypeAttr elemTy =
+ convertType(seqTy.getEleTy(), fileAttr, scope, loc);
+
+ for (fir::SequenceType::Extent dim : seqTy.getShape()) {
+ auto intTy = mlir::IntegerType::get(context, 64);
+ // FIXME: Only supporting lower bound of 1 at the moment. The
+ // 'SequenceType' has information about the shape but not the shift. In
+ // cases where the conversion originated during the processing of
+ // 'DeclareOp', it may be possible to pass on this information. But the
+ // type conversion should ideally be based on what information present in
+ // the type class so that it works from everywhere (e.g. when it is part
+ // of a module or a derived type.)
+ auto countAttr = mlir::IntegerAttr::get(intTy, llvm::APInt(64, dim));
+ auto lowerAttr = mlir::IntegerAttr::get(intTy, llvm::APInt(64, 1));
+ auto subrangeTy = mlir::LLVM::DISubrangeAttr::get(
+ context, countAttr, lowerAttr, nullptr, nullptr);
+ elements.push_back(subrangeTy);
+ }
+ // Apart from arrays, the `DICompositeTypeAttr` is used for other things like
+ // structure types. Many of its fields which are not applicable to arrays
+ // have been set to some valid default values.
+
+ return mlir::LLVM::DICompositeTypeAttr::get(
+ context, llvm::dwarf::DW_TAG_array_type, /*recursive id*/ {},
+ /* name */ nullptr, /* file */ nullptr, /* line */ 0, /* scope */ nullptr,
+ elemTy, mlir::LLVM::DIFlags::Zero, /* sizeInBits */ 0,
+ /*alignInBits*/ 0, elements);
+}
+
mlir::LLVM::DITypeAttr
DebugTypeGenerator::convertType(mlir::Type Ty, mlir::LLVM::DIFileAttr fileAttr,
mlir::LLVM::DIScopeAttr scope,
@@ -57,6 +96,20 @@ DebugTypeGenerator::convertType(mlir::Type Ty, mlir::LLVM::DIFileAttr fileAttr,
mlir::StringAttr::get(context, logTy.getMnemonic()),
kindMapping.getLogicalBitsize(logTy.getFKind()),
llvm::dwarf::DW_ATE_boolean);
+ } else if (fir::isa_complex(Ty)) {
+ unsigned bitWidth;
+ if (auto cplxTy = mlir::dyn_cast_or_null<mlir::ComplexType>(Ty)) {
+ auto floatTy = mlir::cast<mlir::FloatType>(cplxTy.getElementType());
+ bitWidth = floatTy.getWidth();
+ } else if (auto cplxTy = mlir::dyn_cast_or_null<fir::ComplexType>(Ty)) {
+ bitWidth = kindMapping.getRealBitsize(cplxTy.getFKind());
+ } else {
+ llvm_unreachable("Unhandled complex type");
+ }
+ return genBasicType(context, mlir::StringAttr::get(context, "complex"),
+ bitWidth * 2, llvm::dwarf::DW_ATE_complex_float);
+ } else if (auto seqTy = mlir::dyn_cast_or_null<fir::SequenceType>(Ty)) {
+ return convertSequenceType(seqTy, fileAttr, scope, loc);
} else {
// FIXME: These types are currently unhandled. We are generating a
// placeholder type to allow us to test supported bits.
diff --git a/flang/lib/Optimizer/Transforms/DebugTypeGenerator.h b/flang/lib/Optimizer/Transforms/DebugTypeGenerator.h
index 5a2bb201db47..963c919d6682 100644
--- a/flang/lib/Optimizer/Transforms/DebugTypeGenerator.h
+++ b/flang/lib/Optimizer/Transforms/DebugTypeGenerator.h
@@ -31,6 +31,10 @@ public:
mlir::Location loc);
private:
+ mlir::LLVM::DITypeAttr convertSequenceType(fir::SequenceType seqTy,
+ mlir::LLVM::DIFileAttr fileAttr,
+ mlir::LLVM::DIScopeAttr scope,
+ mlir::Location loc);
mlir::ModuleOp module;
KindMapping kindMapping;
};
diff --git a/flang/lib/Parser/openmp-parsers.cpp b/flang/lib/Parser/openmp-parsers.cpp
index 48f213794247..e67dbcca30e7 100644
--- a/flang/lib/Parser/openmp-parsers.cpp
+++ b/flang/lib/Parser/openmp-parsers.cpp
@@ -266,6 +266,8 @@ TYPE_PARSER(
construct<OmpClause>(construct<OmpClause::DynamicAllocators>()) ||
"ENTER" >> construct<OmpClause>(construct<OmpClause::Enter>(
parenthesized(Parser<OmpObjectList>{}))) ||
+ "FILTER" >> construct<OmpClause>(construct<OmpClause::Filter>(
+ parenthesized(scalarIntExpr))) ||
"FINAL" >> construct<OmpClause>(construct<OmpClause::Final>(
parenthesized(scalarLogicalExpr))) ||
"FULL" >> construct<OmpClause>(construct<OmpClause::Full>()) ||
@@ -376,8 +378,15 @@ TYPE_PARSER(sourced(construct<OmpLoopDirective>(first(
"DISTRIBUTE" >> pure(llvm::omp::Directive::OMPD_distribute),
"DO SIMD" >> pure(llvm::omp::Directive::OMPD_do_simd),
"DO" >> pure(llvm::omp::Directive::OMPD_do),
+ "MASKED TASKLOOP SIMD" >>
+ pure(llvm::omp::Directive::OMPD_masked_taskloop_simd),
+ "MASKED TASKLOOP" >> pure(llvm::omp::Directive::OMPD_masked_taskloop),
"PARALLEL DO SIMD" >> pure(llvm::omp::Directive::OMPD_parallel_do_simd),
"PARALLEL DO" >> pure(llvm::omp::Directive::OMPD_parallel_do),
+ "PARALLEL MASKED TASKLOOP SIMD" >>
+ pure(llvm::omp::Directive::OMPD_parallel_masked_taskloop_simd),
+ "PARALLEL MASKED TASKLOOP" >>
+ pure(llvm::omp::Directive::OMPD_parallel_masked_taskloop),
"SIMD" >> pure(llvm::omp::Directive::OMPD_simd),
"TARGET PARALLEL DO SIMD" >>
pure(llvm::omp::Directive::OMPD_target_parallel_do_simd),
@@ -487,8 +496,10 @@ TYPE_PARSER(
// Directives enclosing structured-block
TYPE_PARSER(construct<OmpBlockDirective>(first(
+ "MASKED" >> pure(llvm::omp::Directive::OMPD_masked),
"MASTER" >> pure(llvm::omp::Directive::OMPD_master),
"ORDERED" >> pure(llvm::omp::Directive::OMPD_ordered),
+ "PARALLEL MASKED" >> pure(llvm::omp::Directive::OMPD_parallel_masked),
"PARALLEL WORKSHARE" >> pure(llvm::omp::Directive::OMPD_parallel_workshare),
"PARALLEL" >> pure(llvm::omp::Directive::OMPD_parallel),
"SINGLE" >> pure(llvm::omp::Directive::OMPD_single),
diff --git a/flang/lib/Parser/unparse.cpp b/flang/lib/Parser/unparse.cpp
index 1639e900903f..bdd968b19a43 100644
--- a/flang/lib/Parser/unparse.cpp
+++ b/flang/lib/Parser/unparse.cpp
@@ -2194,12 +2194,24 @@ public:
case llvm::omp::Directive::OMPD_do_simd:
Word("DO SIMD ");
break;
+ case llvm::omp::Directive::OMPD_masked_taskloop_simd:
+ Word("MASKED TASKLOOP SIMD");
+ break;
+ case llvm::omp::Directive::OMPD_masked_taskloop:
+ Word("MASKED TASKLOOP");
+ break;
case llvm::omp::Directive::OMPD_parallel_do:
Word("PARALLEL DO ");
break;
case llvm::omp::Directive::OMPD_parallel_do_simd:
Word("PARALLEL DO SIMD ");
break;
+ case llvm::omp::Directive::OMPD_parallel_masked_taskloop_simd:
+ Word("PARALLEL MASKED TASKLOOP SIMD");
+ break;
+ case llvm::omp::Directive::OMPD_parallel_masked_taskloop:
+ Word("PARALLEL MASKED TASKLOOP");
+ break;
case llvm::omp::Directive::OMPD_simd:
Word("SIMD ");
break;
@@ -2283,12 +2295,18 @@ public:
}
void Unparse(const OmpBlockDirective &x) {
switch (x.v) {
+ case llvm::omp::Directive::OMPD_masked:
+ Word("MASKED");
+ break;
case llvm::omp::Directive::OMPD_master:
Word("MASTER");
break;
case llvm::omp::Directive::OMPD_ordered:
Word("ORDERED ");
break;
+ case llvm::omp::Directive::OMPD_parallel_masked:
+ Word("PARALLEL MASKED");
+ break;
case llvm::omp::Directive::OMPD_parallel_workshare:
Word("PARALLEL WORKSHARE ");
break;
diff --git a/flang/lib/Semantics/check-call.cpp b/flang/lib/Semantics/check-call.cpp
index 8f51ef5ebeba..48c888c0dfb2 100644
--- a/flang/lib/Semantics/check-call.cpp
+++ b/flang/lib/Semantics/check-call.cpp
@@ -761,7 +761,8 @@ static void CheckExplicitDataArg(const characteristics::DummyDataObject &dummy,
}
// 15.5.2.5 -- actual & dummy are both POINTER or both ALLOCATABLE
- // For INTENT(IN) we relax two checks that are in Fortran to
+ // For INTENT(IN), and for a polymorphic actual being associated with a
+ // monomorphic dummy, we relax two checks that are in Fortran to
// prevent the callee from changing the type or to avoid having
// to use a descriptor.
if (!typesCompatible) {
@@ -770,7 +771,9 @@ static void CheckExplicitDataArg(const characteristics::DummyDataObject &dummy,
(actualIsAllocatable && dummyIsAllocatable)) {
bool actualIsUnlimited{actualType.type().IsUnlimitedPolymorphic()};
bool dummyIsUnlimited{dummy.type.type().IsUnlimitedPolymorphic()};
+ bool checkTypeCompatibility{true};
if (actualIsUnlimited != dummyIsUnlimited) {
+ checkTypeCompatibility = false;
if (dummyIsUnlimited && dummy.intent == common::Intent::In &&
context.IsEnabled(common::LanguageFeature::RelaxedIntentInChecking)) {
if (context.ShouldWarn(
@@ -790,11 +793,21 @@ static void CheckExplicitDataArg(const characteristics::DummyDataObject &dummy,
messages.Say(
"If a POINTER or ALLOCATABLE dummy or actual argument is polymorphic, both should be so"_port_en_US);
}
+ } else if (actualIsPolymorphic &&
+ context.IsEnabled(common::LanguageFeature::
+ PolymorphicActualAllocatableOrPointerToMonomorphicDummy)) {
+ if (context.ShouldWarn(common::LanguageFeature::
+ PolymorphicActualAllocatableOrPointerToMonomorphicDummy)) {
+ messages.Say(
+ "If a POINTER or ALLOCATABLE actual argument is polymorphic, the corresponding dummy argument should also be so"_port_en_US);
+ }
} else {
+ checkTypeCompatibility = false;
messages.Say(
"If a POINTER or ALLOCATABLE dummy or actual argument is polymorphic, both must be so"_err_en_US);
}
- } else if (!actualIsUnlimited) {
+ }
+ if (checkTypeCompatibility && !actualIsUnlimited) {
if (!actualType.type().IsTkCompatibleWith(dummy.type.type())) {
if (dummy.intent == common::Intent::In &&
context.IsEnabled(
@@ -1116,20 +1129,20 @@ static void CheckExplicitInterfaceArg(evaluate::ActualArgument &arg,
}
auto restorer{
messages.SetLocation(arg.sourceLocation().value_or(messages.at()))};
- auto checkActualArgForLabel = [&](evaluate::ActualArgument &arg) {
+ auto CheckActualArgForLabel = [&](evaluate::ActualArgument &arg) {
if (arg.isAlternateReturn()) {
messages.Say(
"Alternate return label '%d' cannot be associated with %s"_err_en_US,
arg.GetLabel(), dummyName);
- return true;
- } else {
return false;
+ } else {
+ return true;
}
};
common::visit(
common::visitors{
[&](const characteristics::DummyDataObject &object) {
- if (!checkActualArgForLabel(arg)) {
+ if (CheckActualArgForLabel(arg)) {
ConvertBOZLiteralArg(arg, object.type.type());
if (auto *expr{arg.UnwrapExpr()}) {
if (auto type{characteristics::TypeAndShape::Characterize(
@@ -1147,9 +1160,16 @@ static void CheckExplicitInterfaceArg(evaluate::ActualArgument &arg,
evaluate::IsNullObjectPointer(*expr)) {
// ok, ASSOCIATED(NULL(without MOLD=))
} else if (object.type.attrs().test(characteristics::
- TypeAndShape::Attr::AssumedRank)) {
+ TypeAndShape::Attr::AssumedRank) &&
+ evaluate::IsNullObjectPointer(*expr) &&
+ (object.attrs.test(
+ characteristics::DummyDataObject::Attr::Allocatable) ||
+ object.attrs.test(
+ characteristics::DummyDataObject::Attr::Pointer) ||
+ !object.attrs.test(characteristics::DummyDataObject::
+ Attr::Optional))) {
messages.Say(
- "NULL() without MOLD= must not be associated with an assumed-rank dummy argument"_err_en_US);
+ "NULL() without MOLD= must not be associated with an assumed-rank dummy argument that is ALLOCATABLE, POINTER, or non-OPTIONAL"_err_en_US);
} else if ((object.attrs.test(characteristics::DummyDataObject::
Attr::Pointer) ||
object.attrs.test(characteristics::
@@ -1210,7 +1230,7 @@ static void CheckExplicitInterfaceArg(evaluate::ActualArgument &arg,
}
},
[&](const characteristics::DummyProcedure &dummy) {
- if (!checkActualArgForLabel(arg)) {
+ if (CheckActualArgForLabel(arg)) {
CheckProcedureArg(arg, proc, dummy, dummyName, context,
ignoreImplicitVsExplicit);
}
diff --git a/flang/lib/Semantics/check-declarations.cpp b/flang/lib/Semantics/check-declarations.cpp
index f564a0b69671..7034902dcc58 100644
--- a/flang/lib/Semantics/check-declarations.cpp
+++ b/flang/lib/Semantics/check-declarations.cpp
@@ -2430,16 +2430,18 @@ void CheckHelper::CheckProcBinding(
"A NOPASS type-bound procedure and its override must have identical interfaces"_err_en_US);
}
} else if (!context_.HasError(binding.symbol())) {
- int passIndex{bindingChars->FindPassIndex(binding.passName())};
- int overriddenPassIndex{
+ auto passIndex{bindingChars->FindPassIndex(binding.passName())};
+ auto overriddenPassIndex{
overriddenChars->FindPassIndex(overriddenBinding->passName())};
- if (passIndex != overriddenPassIndex) {
- SayWithDeclaration(*overridden,
- "A type-bound procedure and its override must use the same PASS argument"_err_en_US);
- } else if (!bindingChars->CanOverride(
- *overriddenChars, passIndex)) {
- SayWithDeclaration(*overridden,
- "A type-bound procedure and its override must have compatible interfaces"_err_en_US);
+ if (passIndex && overriddenPassIndex) {
+ if (*passIndex != *overriddenPassIndex) {
+ SayWithDeclaration(*overridden,
+ "A type-bound procedure and its override must use the same PASS argument"_err_en_US);
+ } else if (!bindingChars->CanOverride(
+ *overriddenChars, passIndex)) {
+ SayWithDeclaration(*overridden,
+ "A type-bound procedure and its override must have compatible interfaces"_err_en_US);
+ }
}
}
}
@@ -2960,32 +2962,6 @@ parser::Messages CheckHelper::WhyNotInteroperableDerivedType(
return msgs;
}
-static UnorderedSymbolSet CollectEntryPointsWithDummy(const Symbol &dummy) {
- UnorderedSymbolSet entries;
- const Scope &subpScope{dummy.owner()};
- for (const auto &[_, ref] : subpScope.parent()) {
- const Symbol &x{*ref};
- if (const auto *subp{x.detailsIf<SubprogramDetails>()}) {
- if (x.scope() == &subpScope || subp->entryScope() == &dummy.owner()) {
- if (std::find(subp->dummyArgs().begin(), subp->dummyArgs().end(),
- &dummy) != subp->dummyArgs().end()) {
- entries.insert(x);
- }
- }
- }
- }
- return entries;
-}
-
-static bool AnyNonBindCEntry(const Symbol &dummy) {
- for (const Symbol &subp : CollectEntryPointsWithDummy(dummy)) {
- if (!subp.attrs().test(Attr::BIND_C)) {
- return true;
- }
- }
- return false;
-}
-
parser::Messages CheckHelper::WhyNotInteroperableObject(
const Symbol &symbol, bool isError) {
parser::Messages msgs;
@@ -2998,14 +2974,14 @@ parser::Messages CheckHelper::WhyNotInteroperableObject(
examinedByWhyNotInteroperable_.insert(symbol);
CHECK(symbol.has<ObjectEntityDetails>());
if (isExplicitBindC && !symbol.owner().IsModule()) {
- messages_.Say(symbol.name(),
+ msgs.Say(symbol.name(),
"A variable with BIND(C) attribute may only appear in the specification part of a module"_err_en_US);
}
auto shape{evaluate::GetShape(foldingContext_, symbol)};
if (shape) {
if (evaluate::GetRank(*shape) == 0) { // 18.3.4
if (IsAllocatableOrPointer(symbol) && !IsDummy(symbol)) {
- messages_.Say(symbol.name(),
+ msgs.Say(symbol.name(),
"A scalar interoperable variable may not be ALLOCATABLE or POINTER"_err_en_US);
}
} else if (auto extents{
@@ -3026,33 +3002,26 @@ parser::Messages CheckHelper::WhyNotInteroperableObject(
if (derived) {
if (derived->typeSymbol().attrs().test(Attr::BIND_C)) {
} else if (isError) {
- if (auto *msg{messages_.Say(symbol.name(),
- "The derived type of a BIND(C) object must also be BIND(C)"_err_en_US)}) {
- msg->Attach(derived->typeSymbol().name(), "Non-BIND(C) type"_en_US);
- }
- context_.SetError(symbol);
+ msgs.Say(symbol.name(),
+ "The derived type of a BIND(C) object must also be BIND(C)"_err_en_US)
+ .Attach(derived->typeSymbol().name(), "Non-BIND(C) type"_en_US);
} else if (auto bad{WhyNotInteroperableDerivedType(
derived->typeSymbol(), /*isError=*/false)};
bad.AnyFatalError()) {
- if (auto *msg{messages_.Say(symbol.name(),
- "The derived type of an interoperable object must be interoperable, but is not"_err_en_US)}) {
- msg->Attach(
- derived->typeSymbol().name(), "Non-interoperable type"_en_US);
- bad.AttachTo(*msg, parser::Severity::None);
- }
+ bad.AttachTo(
+ msgs.Say(symbol.name(),
+ "The derived type of an interoperable object must be interoperable, but is not"_err_en_US)
+ .Attach(derived->typeSymbol().name(),
+ "Non-interoperable type"_en_US),
+ parser::Severity::None);
} else {
- if (auto *msg{messages_.Say(symbol.name(),
- "The derived type of an interoperable object should be BIND(C)"_warn_en_US)}) {
- msg->Attach(derived->typeSymbol().name(), "Non-BIND(C) type"_en_US);
- }
+ msgs.Say(symbol.name(),
+ "The derived type of an interoperable object should be BIND(C)"_warn_en_US)
+ .Attach(derived->typeSymbol().name(), "Non-BIND(C) type"_en_US);
}
}
if (type->IsAssumedType()) { // ok
} else if (IsAssumedLengthCharacter(symbol)) {
- if (AnyNonBindCEntry(symbol)) {
- msgs.Say(symbol.name(),
- "An assumed-length dummy argument must not appear in a non-BIND(C) entry in a subprogram with an entry that must be interoperable"_err_en_US);
- }
} else if (IsAllocatableOrPointer(symbol) &&
type->category() == DeclTypeSpec::Character &&
type->characterTypeSpec().length().isDeferred()) {
@@ -3083,12 +3052,6 @@ parser::Messages CheckHelper::WhyNotInteroperableObject(
msgs.Say(symbol.name(),
"An interoperable procedure with an OPTIONAL dummy argument might not be portable"_port_en_US);
}
- if (symbol.attrs().test(Attr::VALUE)) {
- if (AnyNonBindCEntry(symbol)) {
- msgs.Say(symbol.name(),
- "A VALUE dummy argument must not appear in a non-BIND(C) entry of a subprogram with an entry that must be interoperable"_err_en_US);
- }
- }
if (IsDescriptor(symbol) && IsPointer(symbol) &&
symbol.attrs().test(Attr::CONTIGUOUS)) {
msgs.Say(symbol.name(),
diff --git a/flang/lib/Semantics/check-omp-structure.cpp b/flang/lib/Semantics/check-omp-structure.cpp
index e9637b7bb591..5e3a5725c18d 100644
--- a/flang/lib/Semantics/check-omp-structure.cpp
+++ b/flang/lib/Semantics/check-omp-structure.cpp
@@ -2310,6 +2310,7 @@ void OmpStructureChecker::Enter(const parser::OmpClause::Reduction &x) {
if (CheckReductionOperators(x)) {
CheckReductionTypeList(x);
}
+ CheckReductionModifier(x);
}
bool OmpStructureChecker::CheckReductionOperators(
@@ -2394,6 +2395,64 @@ void OmpStructureChecker::CheckReductionTypeList(
}
}
+void OmpStructureChecker::CheckReductionModifier(
+ const parser::OmpClause::Reduction &x) {
+ using ReductionModifier = parser::OmpReductionClause::ReductionModifier;
+ const auto &maybeModifier{std::get<std::optional<ReductionModifier>>(x.v.t)};
+ if (!maybeModifier || *maybeModifier == ReductionModifier::Default) {
+ // No modifier, or the default one is always ok.
+ return;
+ }
+ ReductionModifier modifier{*maybeModifier};
+ const DirectiveContext &dirCtx{GetContext()};
+ if (dirCtx.directive == llvm::omp::Directive::OMPD_loop) {
+ // [5.2:257:33-34]
+ // If a reduction-modifier is specified in a reduction clause that
+ // appears on the directive, then the reduction modifier must be
+ // default.
+ context_.Say(GetContext().clauseSource,
+ "REDUCTION modifier on LOOP directive must be DEFAULT"_err_en_US);
+ }
+ if (modifier == ReductionModifier::Task) {
+ // "Task" is only allowed on worksharing or "parallel" directive.
+ static llvm::omp::Directive worksharing[]{
+ llvm::omp::Directive::OMPD_do, llvm::omp::Directive::OMPD_scope,
+ llvm::omp::Directive::OMPD_sections,
+ // There are more worksharing directives, but they do not apply:
+ // "for" is C++ only,
+ // "single" and "workshare" don't allow reduction clause,
+ // "loop" has different restrictions (checked above).
+ };
+ if (dirCtx.directive != llvm::omp::Directive::OMPD_parallel &&
+ !llvm::is_contained(worksharing, dirCtx.directive)) {
+ context_.Say(GetContext().clauseSource,
+ "Modifier 'TASK' on REDUCTION clause is only allowed with "
+ "PARALLEL or worksharing directive"_err_en_US);
+ }
+ } else if (modifier == ReductionModifier::Inscan) {
+ // "Inscan" is only allowed on worksharing-loop, worksharing-loop simd,
+ // or "simd" directive.
+ // The worksharing-loop directives are OMPD_do and OMPD_for. Only the
+ // former is allowed in Fortran.
+ switch (dirCtx.directive) {
+ case llvm::omp::Directive::OMPD_do: // worksharing-loop
+ case llvm::omp::Directive::OMPD_do_simd: // worksharing-loop simd
+ case llvm::omp::Directive::OMPD_simd: // "simd"
+ break;
+ default:
+ context_.Say(GetContext().clauseSource,
+ "Modifier 'INSCAN' on REDUCTION clause is only allowed with "
+ "worksharing-loop, worksharing-loop simd, "
+ "or SIMD directive"_err_en_US);
+ }
+ } else {
+ // Catch-all for potential future modifiers to make sure that this
+ // function is up-to-date.
+ context_.Say(GetContext().clauseSource,
+ "Unexpected modifier on REDUCTION clause"_err_en_US);
+ }
+}
+
void OmpStructureChecker::CheckIntentInPointerAndDefinable(
const parser::OmpObjectList &objectList, const llvm::omp::Clause clause) {
for (const auto &ompObject : objectList.v) {
diff --git a/flang/lib/Semantics/check-omp-structure.h b/flang/lib/Semantics/check-omp-structure.h
index 1f7284307703..47705771e8e2 100644
--- a/flang/lib/Semantics/check-omp-structure.h
+++ b/flang/lib/Semantics/check-omp-structure.h
@@ -205,6 +205,7 @@ private:
bool CheckIntrinsicOperator(
const parser::DefinedOperator::IntrinsicOperator &);
void CheckReductionTypeList(const parser::OmpClause::Reduction &);
+ void CheckReductionModifier(const parser::OmpClause::Reduction &);
void CheckMasterNesting(const parser::OpenMPBlockConstruct &x);
void ChecksOnOrderedAsBlock();
void CheckBarrierNesting(const parser::OpenMPSimpleStandaloneConstruct &x);
diff --git a/flang/lib/Semantics/expression.cpp b/flang/lib/Semantics/expression.cpp
index 06e38da6626a..50e2b41212d7 100644
--- a/flang/lib/Semantics/expression.cpp
+++ b/flang/lib/Semantics/expression.cpp
@@ -1600,16 +1600,23 @@ private:
parser::CharBlock name, std::int64_t lower, std::int64_t upper,
std::int64_t stride);
- template <int KIND, typename A>
- std::optional<Expr<Type<TypeCategory::Integer, KIND>>> GetSpecificIntExpr(
- const A &x) {
- if (MaybeExpr y{exprAnalyzer_.Analyze(x)}) {
+ template <int KIND>
+ std::optional<Expr<Type<TypeCategory::Integer, KIND>>> ToSpecificInt(
+ MaybeExpr &&y) {
+ if (y) {
Expr<SomeInteger> *intExpr{UnwrapExpr<Expr<SomeInteger>>(*y)};
return Fold(exprAnalyzer_.GetFoldingContext(),
ConvertToType<Type<TypeCategory::Integer, KIND>>(
std::move(DEREF(intExpr))));
+ } else {
+ return std::nullopt;
}
- return std::nullopt;
+ }
+
+ template <int KIND, typename A>
+ std::optional<Expr<Type<TypeCategory::Integer, KIND>>> GetSpecificIntExpr(
+ const A &x) {
+ return ToSpecificInt<KIND>(exprAnalyzer_.Analyze(x));
}
// Nested array constructors all reference the same ExpressionAnalyzer,
@@ -1772,26 +1779,45 @@ void ArrayConstructorContext::Add(const parser::AcValue &x) {
// Transforms l:u(:s) into (_,_=l,u(,s)) with an anonymous index '_'
void ArrayConstructorContext::Add(const parser::AcValue::Triplet &triplet) {
- std::optional<Expr<ImpliedDoIntType>> lower{
- GetSpecificIntExpr<ImpliedDoIntType::kind>(std::get<0>(triplet.t))};
- std::optional<Expr<ImpliedDoIntType>> upper{
- GetSpecificIntExpr<ImpliedDoIntType::kind>(std::get<1>(triplet.t))};
- std::optional<Expr<ImpliedDoIntType>> stride{
- GetSpecificIntExpr<ImpliedDoIntType::kind>(std::get<2>(triplet.t))};
- if (lower && upper) {
- if (!stride) {
- stride = Expr<ImpliedDoIntType>{1};
- }
- if (!type_) {
- type_ = DynamicTypeWithLength{ImpliedDoIntType::GetType()};
+ MaybeExpr lowerExpr{exprAnalyzer_.Analyze(std::get<0>(triplet.t))};
+ MaybeExpr upperExpr{exprAnalyzer_.Analyze(std::get<1>(triplet.t))};
+ MaybeExpr strideExpr{exprAnalyzer_.Analyze(std::get<2>(triplet.t))};
+ if (lowerExpr && upperExpr) {
+ auto lowerType{lowerExpr->GetType()};
+ auto upperType{upperExpr->GetType()};
+ auto strideType{strideExpr ? strideExpr->GetType() : lowerType};
+ if (lowerType && upperType && strideType) {
+ int kind{lowerType->kind()};
+ if (upperType->kind() > kind) {
+ kind = upperType->kind();
+ }
+ if (strideType->kind() > kind) {
+ kind = strideType->kind();
+ }
+ auto lower{ToSpecificInt<ImpliedDoIntType::kind>(std::move(lowerExpr))};
+ auto upper{ToSpecificInt<ImpliedDoIntType::kind>(std::move(upperExpr))};
+ if (lower && upper) {
+ auto stride{
+ ToSpecificInt<ImpliedDoIntType::kind>(std::move(strideExpr))};
+ if (!stride) {
+ stride = Expr<ImpliedDoIntType>{1};
+ }
+ DynamicType type{TypeCategory::Integer, kind};
+ if (!type_) {
+ type_ = DynamicTypeWithLength{type};
+ }
+ parser::CharBlock anonymous;
+ if (auto converted{ConvertToType(type,
+ AsGenericExpr(
+ Expr<ImpliedDoIntType>{ImpliedDoIndex{anonymous}}))}) {
+ auto v{std::move(values_)};
+ Push(std::move(converted));
+ std::swap(v, values_);
+ values_.Push(ImpliedDo<SomeType>{anonymous, std::move(*lower),
+ std::move(*upper), std::move(*stride), std::move(v)});
+ }
+ }
}
- auto v{std::move(values_)};
- parser::CharBlock anonymous;
- Push(Expr<SomeType>{
- Expr<SomeInteger>{Expr<ImpliedDoIntType>{ImpliedDoIndex{anonymous}}}});
- std::swap(v, values_);
- values_.Push(ImpliedDo<SomeType>{anonymous, std::move(*lower),
- std::move(*upper), std::move(*stride), std::move(v)});
}
}
diff --git a/flang/lib/Semantics/mod-file.cpp b/flang/lib/Semantics/mod-file.cpp
index bb8c6c7567b8..d7f149467dd7 100644
--- a/flang/lib/Semantics/mod-file.cpp
+++ b/flang/lib/Semantics/mod-file.cpp
@@ -46,11 +46,11 @@ struct ModHeader {
};
static std::optional<SourceName> GetSubmoduleParent(const parser::Program &);
-static void CollectSymbols(const Scope &, SymbolVector &, SymbolVector &,
- std::map<const Symbol *, SourceName> &, UnorderedSymbolSet &);
+static void CollectSymbols(
+ const Scope &, SymbolVector &, SymbolVector &, UnorderedSymbolSet &);
static void PutPassName(llvm::raw_ostream &, const std::optional<SourceName> &);
static void PutInit(llvm::raw_ostream &, const Symbol &, const MaybeExpr &,
- const parser::Expr *, const std::map<const Symbol *, SourceName> &);
+ const parser::Expr *);
static void PutInit(llvm::raw_ostream &, const MaybeIntExpr &);
static void PutBound(llvm::raw_ostream &, const Bound &);
static void PutShapeSpec(llvm::raw_ostream &, const ShapeSpec &);
@@ -200,47 +200,105 @@ std::string ModFileWriter::GetAsString(const Symbol &symbol) {
return all.str();
}
-// Collect symbols from initializations that are being referenced directly
-// from other modules; they may require new USE associations.
-static void HarvestInitializerSymbols(
- SourceOrderedSymbolSet &set, const Scope &scope) {
- for (const auto &[_, symbol] : scope) {
- if (symbol->has<DerivedTypeDetails>()) {
- if (symbol->scope()) {
- HarvestInitializerSymbols(set, *symbol->scope());
+// Collect symbols from constant and specification expressions that are being
+// referenced directly from other modules; they may require new USE
+// associations.
+static void HarvestSymbolsNeededFromOtherModules(
+ SourceOrderedSymbolSet &, const Scope &);
+static void HarvestSymbolsNeededFromOtherModules(
+ SourceOrderedSymbolSet &set, const Symbol &symbol, const Scope &scope) {
+ auto HarvestBound{[&](const Bound &bound) {
+ if (const auto &expr{bound.GetExplicit()}) {
+ for (SymbolRef ref : evaluate::CollectSymbols(*expr)) {
+ set.emplace(*ref);
}
- } else if (const auto &generic{symbol->detailsIf<GenericDetails>()};
- generic && generic->derivedType()) {
- const Symbol &dtSym{*generic->derivedType()};
- if (dtSym.has<DerivedTypeDetails>()) {
- if (dtSym.scope()) {
- HarvestInitializerSymbols(set, *dtSym.scope());
- }
- } else {
- CHECK(dtSym.has<UseDetails>() || dtSym.has<UseErrorDetails>());
+ }
+ }};
+ auto HarvestShapeSpec{[&](const ShapeSpec &shapeSpec) {
+ HarvestBound(shapeSpec.lbound());
+ HarvestBound(shapeSpec.ubound());
+ }};
+ auto HarvestArraySpec{[&](const ArraySpec &arraySpec) {
+ for (const auto &shapeSpec : arraySpec) {
+ HarvestShapeSpec(shapeSpec);
+ }
+ }};
+
+ if (symbol.has<DerivedTypeDetails>()) {
+ if (symbol.scope()) {
+ HarvestSymbolsNeededFromOtherModules(set, *symbol.scope());
+ }
+ } else if (const auto &generic{symbol.detailsIf<GenericDetails>()};
+ generic && generic->derivedType()) {
+ const Symbol &dtSym{*generic->derivedType()};
+ if (dtSym.has<DerivedTypeDetails>()) {
+ if (dtSym.scope()) {
+ HarvestSymbolsNeededFromOtherModules(set, *dtSym.scope());
}
- } else if (IsNamedConstant(*symbol) || scope.IsDerivedType()) {
- if (const auto *object{symbol->detailsIf<ObjectEntityDetails>()}) {
- if (object->init()) {
- for (SymbolRef ref : evaluate::CollectSymbols(*object->init())) {
- set.emplace(*ref);
- }
- }
- } else if (const auto *proc{symbol->detailsIf<ProcEntityDetails>()}) {
- if (proc->init() && *proc->init()) {
- set.emplace(**proc->init());
+ } else {
+ CHECK(dtSym.has<UseDetails>() || dtSym.has<UseErrorDetails>());
+ }
+ } else if (const auto *object{symbol.detailsIf<ObjectEntityDetails>()}) {
+ HarvestArraySpec(object->shape());
+ HarvestArraySpec(object->coshape());
+ if (IsNamedConstant(symbol) || scope.IsDerivedType()) {
+ if (object->init()) {
+ for (SymbolRef ref : evaluate::CollectSymbols(*object->init())) {
+ set.emplace(*ref);
}
}
}
+ } else if (const auto *proc{symbol.detailsIf<ProcEntityDetails>()}) {
+ if (proc->init() && *proc->init() && scope.IsDerivedType()) {
+ set.emplace(**proc->init());
+ }
+ } else if (const auto *subp{symbol.detailsIf<SubprogramDetails>()}) {
+ for (const Symbol *dummy : subp->dummyArgs()) {
+ if (dummy) {
+ HarvestSymbolsNeededFromOtherModules(set, *dummy, scope);
+ }
+ }
+ if (subp->isFunction()) {
+ HarvestSymbolsNeededFromOtherModules(set, subp->result(), scope);
+ }
+ }
+}
+
+static void HarvestSymbolsNeededFromOtherModules(
+ SourceOrderedSymbolSet &set, const Scope &scope) {
+ for (const auto &[_, symbol] : scope) {
+ HarvestSymbolsNeededFromOtherModules(set, *symbol, scope);
}
}
void ModFileWriter::PrepareRenamings(const Scope &scope) {
- SourceOrderedSymbolSet symbolsInInits;
- HarvestInitializerSymbols(symbolsInInits, scope);
- for (SymbolRef s : symbolsInInits) {
+ // Identify use-associated symbols already in scope under some name
+ std::map<const Symbol *, const Symbol *> useMap;
+ for (const auto &[name, symbolRef] : scope) {
+ const Symbol *symbol{&*symbolRef};
+ while (const auto *hostAssoc{symbol->detailsIf<HostAssocDetails>()}) {
+ symbol = &hostAssoc->symbol();
+ }
+ if (const auto *use{symbol->detailsIf<UseDetails>()}) {
+ useMap.emplace(&use->symbol(), symbol);
+ }
+ }
+ // Collect symbols needed from other modules
+ SourceOrderedSymbolSet symbolsNeeded;
+ HarvestSymbolsNeededFromOtherModules(symbolsNeeded, scope);
+ // Establish any necessary renamings of symbols in other modules
+ // to their names in this scope, creating those new names when needed.
+ auto &renamings{context_.moduleFileOutputRenamings()};
+ for (SymbolRef s : symbolsNeeded) {
+ if (s->owner().kind() == Scope::Kind::DerivedType) {
+ continue; // component or binding: ok
+ }
const Scope *sMod{FindModuleContaining(s->owner())};
- if (!sMod) {
+ if (!sMod || sMod == &scope) {
+ continue;
+ }
+ if (auto iter{useMap.find(&*s)}; iter != useMap.end()) {
+ renamings.emplace(&*s, iter->second->name());
continue;
}
SourceName rename{s->name()};
@@ -272,10 +330,10 @@ void ModFileWriter::PrepareRenamings(const Scope &scope) {
uses_ << DEREF(sMod->symbol()).name() << ",only:";
if (rename != s->name()) {
uses_ << rename << "=>";
+ renamings.emplace(&*s, rename);
}
uses_ << s->name() << '\n';
useExtraAttrs_ << "private::" << rename << '\n';
- renamings_.emplace(&*s, rename);
}
}
@@ -283,9 +341,11 @@ void ModFileWriter::PrepareRenamings(const Scope &scope) {
void ModFileWriter::PutSymbols(const Scope &scope) {
SymbolVector sorted;
SymbolVector uses;
+ auto &renamings{context_.moduleFileOutputRenamings()};
+ auto previousRenamings{std::move(renamings)};
PrepareRenamings(scope);
UnorderedSymbolSet modules;
- CollectSymbols(scope, sorted, uses, renamings_, modules);
+ CollectSymbols(scope, sorted, uses, modules);
// Write module files for dependencies first so that their
// hashes are known.
for (auto ref : modules) {
@@ -318,6 +378,7 @@ void ModFileWriter::PutSymbols(const Scope &scope) {
}
}
CHECK(typeBindings.str().empty());
+ renamings = std::move(previousRenamings);
}
// Emit components in order
@@ -521,7 +582,7 @@ void ModFileWriter::PutDECStructure(
}
decls_ << ref->name();
PutShape(decls_, object->shape(), '(', ')');
- PutInit(decls_, *ref, object->init(), nullptr, renamings_);
+ PutInit(decls_, *ref, object->init(), nullptr);
emittedDECFields_.insert(*ref);
} else if (any) {
break; // any later use of this structure will use RECORD/str/
@@ -767,8 +828,7 @@ static inline SourceName NameInModuleFile(const Symbol &symbol) {
// Collect the symbols of this scope sorted by their original order, not name.
// Generics and namelists are exceptions: they are sorted after other symbols.
void CollectSymbols(const Scope &scope, SymbolVector &sorted,
- SymbolVector &uses, std::map<const Symbol *, SourceName> &renamings,
- UnorderedSymbolSet &modules) {
+ SymbolVector &uses, UnorderedSymbolSet &modules) {
SymbolVector namelist, generics;
auto symbols{scope.GetSymbols()};
std::size_t commonSize{scope.commonBlocks().size()};
@@ -878,8 +938,7 @@ void ModFileWriter::PutObjectEntity(
getSymbolAttrsToWrite(symbol));
PutShape(os, details.shape(), '(', ')');
PutShape(os, details.coshape(), '[', ']');
- PutInit(os, symbol, details.init(), details.unanalyzedPDTComponentInit(),
- renamings_);
+ PutInit(os, symbol, details.init(), details.unanalyzedPDTComponentInit());
os << '\n';
if (auto tkr{GetIgnoreTKR(symbol)}; !tkr.empty()) {
os << "!dir$ ignore_tkr(";
@@ -973,25 +1032,12 @@ void ModFileWriter::PutTypeParam(llvm::raw_ostream &os, const Symbol &symbol) {
}
void PutInit(llvm::raw_ostream &os, const Symbol &symbol, const MaybeExpr &init,
- const parser::Expr *unanalyzed,
- const std::map<const Symbol *, SourceName> &renamings) {
+ const parser::Expr *unanalyzed) {
if (IsNamedConstant(symbol) || symbol.owner().IsDerivedType()) {
const char *assign{symbol.attrs().test(Attr::POINTER) ? "=>" : "="};
if (unanalyzed) {
parser::Unparse(os << assign, *unanalyzed);
} else if (init) {
- if (const auto *dtConst{
- evaluate::UnwrapExpr<evaluate::Constant<evaluate::SomeDerived>>(
- *init)}) {
- const Symbol &dtSym{dtConst->result().derivedTypeSpec().typeSymbol()};
- if (auto iter{renamings.find(&dtSym)}; iter != renamings.end()) {
- // Initializer is a constant whose derived type's name has
- // been brought into scope from a module under a new name
- // to avoid a conflict.
- dtConst->AsFortran(os << assign, &iter->second);
- return;
- }
- }
init->AsFortran(os << assign);
}
}
diff --git a/flang/lib/Semantics/mod-file.h b/flang/lib/Semantics/mod-file.h
index 739add32c2e0..be44780bef43 100644
--- a/flang/lib/Semantics/mod-file.h
+++ b/flang/lib/Semantics/mod-file.h
@@ -57,7 +57,6 @@ private:
llvm::raw_string_ostream decls_{declsBuf_};
llvm::raw_string_ostream contains_{containsBuf_};
bool isSubmodule_{false};
- std::map<const Symbol *, SourceName> renamings_;
void WriteAll(const Scope &);
void WriteOne(const Scope &);
diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp
index 2add2056f658..dbc531372c3f 100644
--- a/flang/lib/Semantics/resolve-directives.cpp
+++ b/flang/lib/Semantics/resolve-directives.cpp
@@ -1503,6 +1503,8 @@ bool OmpAttributeVisitor::Pre(const parser::OpenMPBlockConstruct &x) {
const auto &beginBlockDir{std::get<parser::OmpBeginBlockDirective>(x.t)};
const auto &beginDir{std::get<parser::OmpBlockDirective>(beginBlockDir.t)};
switch (beginDir.v) {
+ case llvm::omp::Directive::OMPD_masked:
+ case llvm::omp::Directive::OMPD_parallel_masked:
case llvm::omp::Directive::OMPD_master:
case llvm::omp::Directive::OMPD_ordered:
case llvm::omp::Directive::OMPD_parallel:
@@ -1532,6 +1534,8 @@ void OmpAttributeVisitor::Post(const parser::OpenMPBlockConstruct &x) {
const auto &beginBlockDir{std::get<parser::OmpBeginBlockDirective>(x.t)};
const auto &beginDir{std::get<parser::OmpBlockDirective>(beginBlockDir.t)};
switch (beginDir.v) {
+ case llvm::omp::Directive::OMPD_masked:
+ case llvm::omp::Directive::OMPD_parallel_masked:
case llvm::omp::Directive::OMPD_parallel:
case llvm::omp::Directive::OMPD_single:
case llvm::omp::Directive::OMPD_target:
@@ -1598,8 +1602,12 @@ bool OmpAttributeVisitor::Pre(const parser::OpenMPLoopConstruct &x) {
case llvm::omp::Directive::OMPD_distribute_simd:
case llvm::omp::Directive::OMPD_do:
case llvm::omp::Directive::OMPD_do_simd:
+ case llvm::omp::Directive::OMPD_masked_taskloop_simd:
+ case llvm::omp::Directive::OMPD_masked_taskloop:
case llvm::omp::Directive::OMPD_parallel_do:
case llvm::omp::Directive::OMPD_parallel_do_simd:
+ case llvm::omp::Directive::OMPD_parallel_masked_taskloop_simd:
+ case llvm::omp::Directive::OMPD_parallel_masked_taskloop:
case llvm::omp::Directive::OMPD_simd:
case llvm::omp::Directive::OMPD_target_parallel_do:
case llvm::omp::Directive::OMPD_target_parallel_do_simd:
diff --git a/flang/lib/Semantics/resolve-names-utils.cpp b/flang/lib/Semantics/resolve-names-utils.cpp
index 3ca460b8e46a..e27a54361749 100644
--- a/flang/lib/Semantics/resolve-names-utils.cpp
+++ b/flang/lib/Semantics/resolve-names-utils.cpp
@@ -376,25 +376,35 @@ static void PropagateSaveAttr(const EquivalenceSet &src, EquivalenceSet &dst) {
void EquivalenceSets::AddToSet(const parser::Designator &designator) {
if (CheckDesignator(designator)) {
- Symbol &symbol{*currObject_.symbol};
- if (!currSet_.empty()) {
- // check this symbol against first of set for compatibility
- Symbol &first{currSet_.front().symbol};
- CheckCanEquivalence(designator.source, first, symbol) &&
- CheckCanEquivalence(designator.source, symbol, first);
- }
- auto subscripts{currObject_.subscripts};
- if (subscripts.empty() && symbol.IsObjectArray()) {
- // record a whole array as its first element
- for (const ShapeSpec &spec : symbol.get<ObjectEntityDetails>().shape()) {
- auto &lbound{spec.lbound().GetExplicit().value()};
- subscripts.push_back(evaluate::ToInt64(lbound).value());
+ if (Symbol * symbol{currObject_.symbol}) {
+ if (!currSet_.empty()) {
+ // check this symbol against first of set for compatibility
+ Symbol &first{currSet_.front().symbol};
+ CheckCanEquivalence(designator.source, first, *symbol) &&
+ CheckCanEquivalence(designator.source, *symbol, first);
+ }
+ auto subscripts{currObject_.subscripts};
+ if (subscripts.empty()) {
+ if (const ArraySpec * shape{symbol->GetShape()};
+ shape && shape->IsExplicitShape()) {
+ // record a whole array as its first element
+ for (const ShapeSpec &spec : *shape) {
+ if (auto lbound{spec.lbound().GetExplicit()}) {
+ if (auto lbValue{evaluate::ToInt64(*lbound)}) {
+ subscripts.push_back(*lbValue);
+ continue;
+ }
+ }
+ subscripts.clear(); // error recovery
+ break;
+ }
+ }
}
+ auto substringStart{currObject_.substringStart};
+ currSet_.emplace_back(
+ *symbol, subscripts, substringStart, designator.source);
+ PropagateSaveAttr(currSet_.back(), currSet_);
}
- auto substringStart{currObject_.substringStart};
- currSet_.emplace_back(
- symbol, subscripts, substringStart, designator.source);
- PropagateSaveAttr(currSet_.back(), currSet_);
}
currObject_ = {};
}
diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp
index a46c0f378d5d..68cfc8641b9b 100644
--- a/flang/lib/Semantics/resolve-names.cpp
+++ b/flang/lib/Semantics/resolve-names.cpp
@@ -6227,7 +6227,7 @@ void DeclarationVisitor::CheckEquivalenceSets() {
}
for (const parser::EquivalenceObject &object : *set) {
const auto &designator{object.v.value()};
- // The designator was not resolved when it was encountered so do it now.
+ // The designator was not resolved when it was encountered, so do it now.
// AnalyzeExpr causes array sections to be changed to substrings as needed
Walk(designator);
if (AnalyzeExpr(context(), designator)) {
@@ -7846,28 +7846,31 @@ bool DeclarationVisitor::CheckForHostAssociatedImplicit(
if (name.symbol) {
ApplyImplicitRules(*name.symbol, true);
}
- Symbol *hostSymbol;
- Scope *host{GetHostProcedure()};
- if (!host || isImplicitNoneType(*host)) {
- return false;
- }
- if (!name.symbol) {
- hostSymbol = &MakeSymbol(*host, name.source, Attrs{});
- ConvertToObjectEntity(*hostSymbol);
- ApplyImplicitRules(*hostSymbol);
- hostSymbol->set(Symbol::Flag::ImplicitOrError);
- } else if (name.symbol->test(Symbol::Flag::ImplicitOrError)) {
- hostSymbol = name.symbol;
- } else {
- return false;
- }
- Symbol &symbol{MakeHostAssocSymbol(name, *hostSymbol)};
- if (isImplicitNoneType()) {
- symbol.get<HostAssocDetails>().implicitOrExplicitTypeError = true;
- } else {
- symbol.get<HostAssocDetails>().implicitOrSpecExprError = true;
+ if (Scope * host{GetHostProcedure()}; host && !isImplicitNoneType(*host)) {
+ Symbol *hostSymbol{nullptr};
+ if (!name.symbol) {
+ if (currScope().CanImport(name.source)) {
+ hostSymbol = &MakeSymbol(*host, name.source, Attrs{});
+ ConvertToObjectEntity(*hostSymbol);
+ ApplyImplicitRules(*hostSymbol);
+ hostSymbol->set(Symbol::Flag::ImplicitOrError);
+ }
+ } else if (name.symbol->test(Symbol::Flag::ImplicitOrError)) {
+ hostSymbol = name.symbol;
+ }
+ if (hostSymbol) {
+ Symbol &symbol{MakeHostAssocSymbol(name, *hostSymbol)};
+ if (auto *assoc{symbol.detailsIf<HostAssocDetails>()}) {
+ if (isImplicitNoneType()) {
+ assoc->implicitOrExplicitTypeError = true;
+ } else {
+ assoc->implicitOrSpecExprError = true;
+ }
+ return true;
+ }
+ }
}
- return true;
+ return false;
}
bool DeclarationVisitor::IsUplevelReference(const Symbol &symbol) {
diff --git a/flang/lib/Semantics/symbol.cpp b/flang/lib/Semantics/symbol.cpp
index 381905b89fb2..3eb120fd962f 100644
--- a/flang/lib/Semantics/symbol.cpp
+++ b/flang/lib/Semantics/symbol.cpp
@@ -385,9 +385,17 @@ bool Symbol::IsFuncResult() const {
details_);
}
+const ArraySpec *Symbol::GetShape() const {
+ if (const auto *details{std::get_if<ObjectEntityDetails>(&details_)}) {
+ return &details->shape();
+ } else {
+ return nullptr;
+ }
+}
+
bool Symbol::IsObjectArray() const {
- const auto *details{std::get_if<ObjectEntityDetails>(&details_)};
- return details && details->IsArray();
+ const ArraySpec *shape{GetShape()};
+ return shape && !shape->empty();
}
bool Symbol::IsSubprogram() const {
diff --git a/flang/runtime/CMakeLists.txt b/flang/runtime/CMakeLists.txt
index 4f7627eac81f..a826980e1941 100644
--- a/flang/runtime/CMakeLists.txt
+++ b/flang/runtime/CMakeLists.txt
@@ -199,6 +199,7 @@ set(supported_files
inquiry.cpp
internal-unit.cpp
io-api.cpp
+ io-api-minimal.cpp
io-error.cpp
io-stmt.cpp
iostat.cpp
@@ -270,21 +271,26 @@ else()
LINK_LIBS
FortranDecimal.static
INSTALL_WITH_TOOLCHAIN)
+ set_target_properties(FortranRuntime.static PROPERTIES FOLDER "Flang/Runtime Libraries")
set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreadedDLL)
add_flang_library(FortranRuntime.dynamic ${sources}
LINK_LIBS
FortranDecimal.dynamic
INSTALL_WITH_TOOLCHAIN)
+ set_target_properties(FortranRuntime.dynamic PROPERTIES FOLDER "Flang/Runtime Libraries")
set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreadedDebug)
add_flang_library(FortranRuntime.static_dbg ${sources}
LINK_LIBS
FortranDecimal.static_dbg
INSTALL_WITH_TOOLCHAIN)
+ set_target_properties(FortranRuntime.static_dbg PROPERTIES FOLDER "Flang/Runtime Libraries")
set(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreadedDebugDLL)
add_flang_library(FortranRuntime.dynamic_dbg ${sources}
LINK_LIBS
FortranDecimal.dynamic_dbg
INSTALL_WITH_TOOLCHAIN)
+ set_target_properties(FortranRuntime.dynamic_dbg PROPERTIES FOLDER "Flang/Runtime Libraries")
add_dependencies(FortranRuntime FortranRuntime.static FortranRuntime.dynamic
FortranRuntime.static_dbg FortranRuntime.dynamic_dbg)
endif()
+set_target_properties(FortranRuntime PROPERTIES FOLDER "Flang/Runtime Libraries")
diff --git a/flang/runtime/edit-output.cpp b/flang/runtime/edit-output.cpp
index 13ab91fc56ea..1a73c85df840 100644
--- a/flang/runtime/edit-output.cpp
+++ b/flang/runtime/edit-output.cpp
@@ -263,7 +263,6 @@ template <int KIND>
RT_API_ATTRS decimal::ConversionToDecimalResult
RealOutputEditing<KIND>::ConvertToDecimal(
int significantDigits, enum decimal::FortranRounding rounding, int flags) {
-#if !defined(RT_DEVICE_COMPILATION)
auto converted{decimal::ConvertToDecimal<binaryPrecision>(buffer_,
sizeof buffer_, static_cast<enum decimal::DecimalConversionFlags>(flags),
significantDigits, rounding, x_)};
@@ -273,10 +272,6 @@ RealOutputEditing<KIND>::ConvertToDecimal(
sizeof buffer_);
}
return converted;
-#else // defined(RT_DEVICE_COMPILATION)
- // TODO: enable Decimal library build for the device.
- io_.GetIoErrorHandler().Crash("not implemented yet: decimal conversion");
-#endif // defined(RT_DEVICE_COMPILATION)
}
static RT_API_ATTRS bool IsInfOrNaN(const char *p, int length) {
@@ -832,8 +827,11 @@ RT_API_ATTRS bool EditLogicalOutput(
reinterpret_cast<const unsigned char *>(&truth), sizeof truth);
case 'A': { // legacy extension
int truthBits{truth};
- return EditCharacterOutput(
- io, edit, reinterpret_cast<char *>(&truthBits), sizeof truthBits);
+ int len{sizeof truthBits};
+ int width{edit.width.value_or(len)};
+ return EmitRepeated(io, ' ', std::max(0, width - len)) &&
+ EmitEncoded(
+ io, reinterpret_cast<char *>(&truthBits), std::min(width, len));
}
default:
io.GetIoErrorHandler().SignalError(IostatErrorInFormat,
diff --git a/flang/runtime/external-unit.cpp b/flang/runtime/external-unit.cpp
index b48549d54587..4bfa218bb776 100644
--- a/flang/runtime/external-unit.cpp
+++ b/flang/runtime/external-unit.cpp
@@ -214,6 +214,13 @@ Iostat ExternalFileUnit::SetDirection(Direction direction) {
}
} else {
if (mayWrite()) {
+ if (direction_ == Direction::Input) {
+ // Don't retain any input data from previous record, like a
+ // variable-length unformatted record footer, in the frame,
+ // since we're going start writing frames.
+ frameOffsetInFile_ += recordOffsetInFrame_;
+ recordOffsetInFrame_ = 0;
+ }
direction_ = Direction::Output;
return IostatOk;
} else {
@@ -332,5 +339,4 @@ bool ExternalFileUnit::Wait(int id) {
}
} // namespace Fortran::runtime::io
-
#endif // !defined(RT_USE_PSEUDO_FILE_UNIT)
diff --git a/flang/runtime/numeric.cpp b/flang/runtime/numeric.cpp
index 52b5a56894d8..2225473c4690 100644
--- a/flang/runtime/numeric.cpp
+++ b/flang/runtime/numeric.cpp
@@ -117,13 +117,13 @@ inline RT_API_ATTRS CppTypeFor<TypeCategory::Integer, 4> SelectedIntKind(T x) {
template <typename T>
inline RT_API_ATTRS CppTypeFor<TypeCategory::Integer, 4> SelectedLogicalKind(
T x) {
- if (x <= 2) {
+ if (x <= 8) {
return 1;
- } else if (x <= 4) {
+ } else if (x <= 16) {
return 2;
- } else if (x <= 9) {
+ } else if (x <= 32) {
return 4;
- } else if (x <= 18) {
+ } else if (x <= 64) {
return 8;
}
return -1;
diff --git a/flang/runtime/support.cpp b/flang/runtime/support.cpp
index 12135804f00e..19e75429774b 100644
--- a/flang/runtime/support.cpp
+++ b/flang/runtime/support.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "flang/Runtime/support.h"
+#include "type-info.h"
#include "flang/Runtime/descriptor.h"
namespace Fortran::runtime {
@@ -17,6 +18,27 @@ bool RTDEF(IsContiguous)(const Descriptor &descriptor) {
return descriptor.IsContiguous();
}
+void RTDEF(CopyAndUpdateDescriptor)(Descriptor &to, const Descriptor &from,
+ const typeInfo::DerivedType *newDynamicType,
+ ISO::CFI_attribute_t newAttribute, enum LowerBoundModifier newLowerBounds) {
+ to = from;
+ if (newDynamicType) {
+ DescriptorAddendum *toAddendum{to.Addendum()};
+ INTERNAL_CHECK(toAddendum);
+ toAddendum->set_derivedType(newDynamicType);
+ to.raw().elem_len = newDynamicType->sizeInBytes();
+ }
+ to.raw().attribute = newAttribute;
+ if (newLowerBounds != LowerBoundModifier::Preserve) {
+ const ISO::CFI_index_t newLowerBound{
+ newLowerBounds == LowerBoundModifier::SetToOnes ? 1 : 0};
+ const int rank{to.rank()};
+ for (int i = 0; i < rank; ++i) {
+ to.GetDimension(i).SetLowerBound(newLowerBound);
+ }
+ }
+}
+
RT_EXT_API_GROUP_END
} // extern "C"
} // namespace Fortran::runtime
diff --git a/flang/runtime/terminator.h b/flang/runtime/terminator.h
index 59a47ce93e7c..609f059d6e09 100644
--- a/flang/runtime/terminator.h
+++ b/flang/runtime/terminator.h
@@ -54,7 +54,7 @@ public:
// to regular printf for the device compilation.
// Try to keep the inline implementations as small as possible.
template <typename... Args>
- [[noreturn]] RT_API_ATTRS const char *Crash(
+ [[noreturn]] RT_DEVICE_NOINLINE RT_API_ATTRS const char *Crash(
const char *message, Args... args) const {
#if !defined(RT_DEVICE_COMPILATION)
// Invoke handler set up by the test harness.
diff --git a/flang/runtime/unit.cpp b/flang/runtime/unit.cpp
index 3b42f45d5588..a11f444d8d75 100644
--- a/flang/runtime/unit.cpp
+++ b/flang/runtime/unit.cpp
@@ -265,6 +265,7 @@ void ExternalFileUnit::FinishReadingRecord(IoErrorHandler &handler) {
furthestPositionInRecord =
std::max(furthestPositionInRecord, positionInRecord);
frameOffsetInFile_ += recordOffsetInFrame_ + furthestPositionInRecord;
+ recordOffsetInFrame_ = 0;
}
BeginRecord();
}
diff --git a/flang/test/CMakeLists.txt b/flang/test/CMakeLists.txt
index 7e036ad539df..43ad1e3312b6 100644
--- a/flang/test/CMakeLists.txt
+++ b/flang/test/CMakeLists.txt
@@ -83,13 +83,14 @@ if (LLVM_BUILD_EXAMPLES)
endif ()
add_custom_target(flang-test-depends DEPENDS ${FLANG_TEST_DEPENDS})
+set_target_properties(flang-test-depends PROPERTIES FOLDER "Flang/Meta")
add_lit_testsuite(check-flang "Running the Flang regression tests"
${CMAKE_CURRENT_BINARY_DIR}
PARAMS ${FLANG_TEST_PARAMS}
DEPENDS ${FLANG_TEST_DEPENDS}
)
-set_target_properties(check-flang PROPERTIES FOLDER "Tests")
+set_target_properties(check-flang PROPERTIES FOLDER "Flang/Meta")
# In case of standalone builds.
if (FLANG_STANDALONE_BUILD)
diff --git a/flang/test/Driver/bbc-mlir-pass-pipeline.f90 b/flang/test/Driver/bbc-mlir-pass-pipeline.f90
index 2cc25b3c473f..c94b98c7c580 100644
--- a/flang/test/Driver/bbc-mlir-pass-pipeline.f90
+++ b/flang/test/Driver/bbc-mlir-pass-pipeline.f90
@@ -46,6 +46,7 @@ end program
! CHECK-NEXT: (S) 0 num-dce'd - Number of operations DCE'd
! CHECK-NEXT: PolymorphicOpConversion
+! CHECK-NEXT: AssumedRankOpConversion
! CHECK-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
! CHECK-NEXT: 'fir.global' Pipeline
diff --git a/flang/test/Driver/fopenmp.f90 b/flang/test/Driver/fopenmp.f90
index c71d34dc9e7e..d70fe100c3d2 100644
--- a/flang/test/Driver/fopenmp.f90
+++ b/flang/test/Driver/fopenmp.f90
@@ -14,7 +14,7 @@
! CHECK-FC1-OPENMP: "-fc1"
! CHECK-FC1-OPENMP: "-fopenmp"
!
-! CHECK-WARNING: warning: The library '-fopenmp=={{.*}}' is not supported, openmp is not be enabled
+! CHECK-WARNING: warning: the library '-fopenmp=={{.*}}' is not supported, OpenMP will not be enabled
! CHECK-FC1-NO-OPENMP: "-fc1"
! CHECK-FC1-NO-OPENMP-NOT: "-fopenmp"
!
@@ -51,9 +51,14 @@
! We'd like to check that the default is sane, but until we have the ability
! to *always* semantically analyze OpenMP without always generating runtime
! calls (in the event of an unsupported runtime), we don't have a good way to
-! test the CC1 invocation. Instead, just ensure we do eventually link *some*
+! test the FC1 invocation. Instead, just ensure we do eventually link *some*
! OpenMP runtime.
!
+! RUN: %flang -target x86_64-linux-gnu -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
+! RUN: %flang -target x86_64-darwin -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
+! RUN: %flang -target x86_64-freebsd -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANY
+! RUN: %flang -target x86_64-windows-gnu -fopenmp %s -o %t -### 2>&1 | FileCheck %s --check-prefix=CHECK-LD-ANYMD
+!
! CHECK-LD-ANY: "{{.*}}ld{{(.exe)?}}"
! CHECK-LD-ANY: "-l{{(omp|gomp|iomp5)}}"
!
diff --git a/flang/test/Driver/mlir-debug-pass-pipeline.f90 b/flang/test/Driver/mlir-debug-pass-pipeline.f90
index a9980e3c932c..49b1f8c5c313 100644
--- a/flang/test/Driver/mlir-debug-pass-pipeline.f90
+++ b/flang/test/Driver/mlir-debug-pass-pipeline.f90
@@ -25,8 +25,15 @@ end program
! ALL: Pass statistics report
! ALL: Fortran::lower::VerifierPass
+! ALL-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
+! ALL-NEXT: 'fir.global' Pipeline
+! ALL-NEXT: InlineElementals
! ALL-NEXT: 'func.func' Pipeline
! ALL-NEXT: InlineElementals
+! ALL-NEXT: 'omp.declare_reduction' Pipeline
+! ALL-NEXT: InlineElementals
+! ALL-NEXT: 'omp.private' Pipeline
+! ALL-NEXT: InlineElementals
! ALL-NEXT: LowerHLFIROrderedAssignments
! ALL-NEXT: LowerHLFIRIntrinsics
! ALL-NEXT: BufferizeHLFIR
@@ -66,6 +73,7 @@ end program
! ALL-NEXT: (S) 0 num-dce'd - Number of operations DCE'd
! ALL-NEXT: PolymorphicOpConversion
+! ALL-NEXT: AssumedRankOpConversion
! ALL-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
! ALL-NEXT: 'fir.global' Pipeline
diff --git a/flang/test/Driver/mlir-pass-pipeline.f90 b/flang/test/Driver/mlir-pass-pipeline.f90
index 4ebac7c3fb65..8e1a3d43edd1 100644
--- a/flang/test/Driver/mlir-pass-pipeline.f90
+++ b/flang/test/Driver/mlir-pass-pipeline.f90
@@ -13,9 +13,32 @@ end program
! ALL: Fortran::lower::VerifierPass
! O2-NEXT: Canonicalizer
-! O2-NEXT: 'func.func' Pipeline
+! ALL: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
+! ALL-NEXT:'fir.global' Pipeline
+! O2-NEXT: SimplifyHLFIRIntrinsics
+! ALL: InlineElementals
+! ALL-NEXT:'func.func' Pipeline
+! O2-NEXT: SimplifyHLFIRIntrinsics
+! ALL: InlineElementals
+! ALL-NEXT:'omp.declare_reduction' Pipeline
! O2-NEXT: SimplifyHLFIRIntrinsics
! ALL: InlineElementals
+! ALL-NEXT:'omp.private' Pipeline
+! O2-NEXT: SimplifyHLFIRIntrinsics
+! ALL: InlineElementals
+! O2-NEXT: Canonicalizer
+! O2-NEXT: CSE
+! O2-NEXT: (S) {{.*}} num-cse'd
+! O2-NEXT: (S) {{.*}} num-dce'd
+! O2-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
+! O2-NEXT: 'fir.global' Pipeline
+! O2-NEXT: OptimizedBufferization
+! O2-NEXT: 'func.func' Pipeline
+! O2-NEXT: OptimizedBufferization
+! O2-NEXT: 'omp.declare_reduction' Pipeline
+! O2-NEXT: OptimizedBufferization
+! O2-NEXT: 'omp.private' Pipeline
+! O2-NEXT: OptimizedBufferization
! ALL: LowerHLFIROrderedAssignments
! ALL-NEXT: LowerHLFIRIntrinsics
! ALL-NEXT: BufferizeHLFIR
@@ -57,6 +80,7 @@ end program
! ALL-NEXT: (S) 0 num-dce'd - Number of operations DCE'd
! ALL-NEXT: PolymorphicOpConversion
+! ALL-NEXT: AssumedRankOpConversion
! O2-NEXT: AddAliasTags
! ALL-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
diff --git a/flang/test/Driver/w-arg-unsupported.f90 b/flang/test/Driver/w-arg-unsupported.f90
index 1ef25fdd4db0..be753bfc6784 100644
--- a/flang/test/Driver/w-arg-unsupported.f90
+++ b/flang/test/Driver/w-arg-unsupported.f90
@@ -6,32 +6,32 @@
! RUN: -Wrealloc-lhs -Wrealloc-lhs-all -Wfrontend-loop-interchange -Wtarget-lifetime %s \
! RUN: -c 2>&1 | FileCheck %s
-! CHECK: The warning option '-Wextra' is not supported
-! CHECK-NEXT: The warning option '-Waliasing' is not supported
-! CHECK-NEXT: The warning option '-Wampersand' is not supported
-! CHECK-NEXT: The warning option '-Warray-bounds' is not supported
-! CHECK-NEXT: The warning option '-Wc-binding-type' is not supported
-! CHECK-NEXT: The warning option '-Wcharacter-truncation' is not supported
-! CHECK-NEXT: The warning option '-Wconversion' is not supported
-! CHECK-NEXT: The warning option '-Wdo-subscript' is not supported
-! CHECK-NEXT: The warning option '-Wfunction-elimination' is not supported
-! CHECK-NEXT: The warning option '-Wimplicit-interface' is not supported
-! CHECK-NEXT: The warning option '-Wimplicit-procedure' is not supported
-! CHECK-NEXT: The warning option '-Wintrinsic-shadow' is not supported
-! CHECK-NEXT: The warning option '-Wuse-without-only' is not supported
-! CHECK-NEXT: The warning option '-Wintrinsics-std' is not supported
-! CHECK-NEXT: The warning option '-Wline-truncation' is not supported
-! CHECK-NEXT: The warning option '-Wno-align-commons' is not supported
-! CHECK-NEXT: The warning option '-Wno-overwrite-recursive' is not supported
-! CHECK-NEXT: The warning option '-Wno-tabs' is not supported
-! CHECK-NEXT: The warning option '-Wreal-q-constant' is not supported
-! CHECK-NEXT: The warning option '-Wsurprising' is not supported
-! CHECK-NEXT: The warning option '-Wunderflow' is not supported
-! CHECK-NEXT: The warning option '-Wunused-parameter' is not supported
-! CHECK-NEXT: The warning option '-Wrealloc-lhs' is not supported
-! CHECK-NEXT: The warning option '-Wrealloc-lhs-all' is not supported
-! CHECK-NEXT: The warning option '-Wfrontend-loop-interchange' is not supported
-! CHECK-NEXT: The warning option '-Wtarget-lifetime' is not supported
+! CHECK: the warning option '-Wextra' is not supported
+! CHECK-NEXT: the warning option '-Waliasing' is not supported
+! CHECK-NEXT: the warning option '-Wampersand' is not supported
+! CHECK-NEXT: the warning option '-Warray-bounds' is not supported
+! CHECK-NEXT: the warning option '-Wc-binding-type' is not supported
+! CHECK-NEXT: the warning option '-Wcharacter-truncation' is not supported
+! CHECK-NEXT: the warning option '-Wconversion' is not supported
+! CHECK-NEXT: the warning option '-Wdo-subscript' is not supported
+! CHECK-NEXT: the warning option '-Wfunction-elimination' is not supported
+! CHECK-NEXT: the warning option '-Wimplicit-interface' is not supported
+! CHECK-NEXT: the warning option '-Wimplicit-procedure' is not supported
+! CHECK-NEXT: the warning option '-Wintrinsic-shadow' is not supported
+! CHECK-NEXT: the warning option '-Wuse-without-only' is not supported
+! CHECK-NEXT: the warning option '-Wintrinsics-std' is not supported
+! CHECK-NEXT: the warning option '-Wline-truncation' is not supported
+! CHECK-NEXT: the warning option '-Wno-align-commons' is not supported
+! CHECK-NEXT: the warning option '-Wno-overwrite-recursive' is not supported
+! CHECK-NEXT: the warning option '-Wno-tabs' is not supported
+! CHECK-NEXT: the warning option '-Wreal-q-constant' is not supported
+! CHECK-NEXT: the warning option '-Wsurprising' is not supported
+! CHECK-NEXT: the warning option '-Wunderflow' is not supported
+! CHECK-NEXT: the warning option '-Wunused-parameter' is not supported
+! CHECK-NEXT: the warning option '-Wrealloc-lhs' is not supported
+! CHECK-NEXT: the warning option '-Wrealloc-lhs-all' is not supported
+! CHECK-NEXT: the warning option '-Wfrontend-loop-interchange' is not supported
+! CHECK-NEXT: the warning option '-Wtarget-lifetime' is not supported
program m
end program
diff --git a/flang/test/Driver/wextra-ok.f90 b/flang/test/Driver/wextra-ok.f90
index 48676e8e62aa..6a38d9481a36 100644
--- a/flang/test/Driver/wextra-ok.f90
+++ b/flang/test/Driver/wextra-ok.f90
@@ -4,7 +4,7 @@
! RUN: %flang -std=f2018 -Wextra %s -c 2>&1 | FileCheck %s --check-prefix=CHECK-OK
! RUN: not %flang -std=f2018 -Wblah -Wextra %s -c 2>&1 | FileCheck %s --check-prefix=WRONG
-! CHECK-OK: The warning option '-Wextra' is not supported
+! CHECK-OK: the warning option '-Wextra' is not supported
! WRONG: Only `-Werror` is supported currently.
program wextra_ok
diff --git a/flang/test/Evaluate/triplets01.f90 b/flang/test/Evaluate/triplets01.f90
new file mode 100644
index 000000000000..aba9772f6b95
--- /dev/null
+++ b/flang/test/Evaluate/triplets01.f90
@@ -0,0 +1,11 @@
+! RUN: %python %S/test_folding.py %s %flang_fc1
+module m
+ logical, parameter :: test01 = all([1:10:2] == [(j, j=1,10,2)])
+ logical, parameter :: test02 = kind([1:20:2]) == kind(1)
+ logical, parameter :: test03 = all([10:1:-3,123] == [(j, j=10,1,-3),123])
+ logical, parameter :: test04 = kind([10:1:-3,123]) == kind(1)
+ logical, parameter :: test05 = kind([10_2:1_2:-3_2,123_2]) == 2
+ logical, parameter :: test06 = all([10_2:1_2:-3_2,123_2] == [(j, integer(2)::j=10,1,-3),123_2])
+ logical, parameter :: test07 = kind([10_2:1_4:-3_2]) == 4
+ logical, parameter :: test08 = kind([10_2:1_4]) == 4
+end
diff --git a/flang/test/Fir/basic-program.fir b/flang/test/Fir/basic-program.fir
index 02fb84ed8c87..dd184d99cb80 100644
--- a/flang/test/Fir/basic-program.fir
+++ b/flang/test/Fir/basic-program.fir
@@ -17,14 +17,31 @@ func.func @_QQmain() {
// PASSES: Pass statistics report
// PASSES: Canonicalizer
+// PASSES-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
+// PASSES-NEXT: 'fir.global' Pipeline
+// PASSES-NEXT: SimplifyHLFIRIntrinsics
+// PASSES-NEXT: InlineElementals
// PASSES-NEXT: 'func.func' Pipeline
// PASSES-NEXT: SimplifyHLFIRIntrinsics
// PASSES-NEXT: InlineElementals
+// PASSES-NEXT: 'omp.declare_reduction' Pipeline
+// PASSES-NEXT: SimplifyHLFIRIntrinsics
+// PASSES-NEXT: InlineElementals
+// PASSES-NEXT: 'omp.private' Pipeline
+// PASSES-NEXT: SimplifyHLFIRIntrinsics
+// PASSES-NEXT: InlineElementals
// PASSES-NEXT: Canonicalizer
// PASSES-NEXT: CSE
// PASSES-NEXT: (S) 0 num-cse'd - Number of operations CSE'd
// PASSES-NEXT: (S) 0 num-dce'd - Number of operations DCE'd
-// PASSES-NEXT: 'func.func' Pipeline
+// PASSES-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
+// PASSES-NEXT: 'fir.global' Pipeline
+// PASSES-NEXT: OptimizedBufferization
+// PASSES-NEXT: 'func.func' Pipeline
+// PASSES-NEXT: OptimizedBufferization
+// PASSES-NEXT: 'omp.declare_reduction' Pipeline
+// PASSES-NEXT: OptimizedBufferization
+// PASSES-NEXT: 'omp.private' Pipeline
// PASSES-NEXT: OptimizedBufferization
// PASSES-NEXT: LowerHLFIROrderedAssignments
// PASSES-NEXT: LowerHLFIRIntrinsics
@@ -63,6 +80,7 @@ func.func @_QQmain() {
// PASSES-NEXT: (S) 0 num-dce'd - Number of operations DCE'd
// PASSES-NEXT: PolymorphicOpConversion
+// PASSES-NEXT: AssumedRankOpConversion
// PASSES-NEXT: AddAliasTags
// PASSES-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
diff --git a/flang/test/Fir/fir-ops.fir b/flang/test/Fir/fir-ops.fir
index 962621c4e2e1..a826dd49ef99 100644
--- a/flang/test/Fir/fir-ops.fir
+++ b/flang/test/Fir/fir-ops.fir
@@ -900,3 +900,15 @@ fir.global @t1 {keep_my_attr = "data"} : i32 {
}
// CHECK-LABEL: fir.global @t1 {keep_my_attr = "data"} : i32
+
+func.func @test_rebox_assumed_rank(%arg0: !fir.box<!fir.array<*:f32>> ) {
+ %1 = fir.rebox_assumed_rank %arg0 lbs ones : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:f32>>
+ %2 = fir.rebox_assumed_rank %arg0 lbs zeroes : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:f32>>
+ %3 = fir.rebox_assumed_rank %arg0 lbs preserve : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:f32>>
+ return
+}
+// CHECK-LABEL: func.func @test_rebox_assumed_rank(
+// CHECK-SAME: %[[A:.*]]: !fir.box<!fir.array<*:f32>>)
+ // CHECK: fir.rebox_assumed_rank %[[A]] lbs ones : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:f32>>
+ // CHECK: fir.rebox_assumed_rank %[[A]] lbs zeroes : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:f32>>
+ // CHECK: fir.rebox_assumed_rank %[[A]] lbs preserve : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:f32>>
diff --git a/flang/test/Fir/invalid.fir b/flang/test/Fir/invalid.fir
index 049e108ba992..f1e1aa433b9b 100644
--- a/flang/test/Fir/invalid.fir
+++ b/flang/test/Fir/invalid.fir
@@ -978,3 +978,27 @@ func.func @bad_box_offset(%no_addendum : !fir.ref<!fir.box<i32>>) {
%addr1 = fir.box_offset %no_addendum derived_type : (!fir.ref<!fir.box<i32>>) -> !fir.llvm_ptr<!fir.tdesc<!fir.type<none>>>
return
}
+
+// -----
+
+func.func @bad_rebox_assumed_rank_1(%arg0: !fir.ref<!fir.array<*:f32>> ) {
+ // expected-error@+1{{'fir.rebox_assumed_rank' op input must be a box or box address}}
+ %1 = fir.rebox_assumed_rank %arg0 lbs ones : (!fir.ref<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:f32>>
+ return
+}
+
+// -----
+
+func.func @bad_rebox_assumed_rank_2(%arg0: !fir.box<!fir.array<*:f32>> ) {
+ // expected-error@+1{{'fir.rebox_assumed_rank' op result #0 must be box or class, but got '!fir.ref<!fir.box<!fir.array<*:f32>>>'}}
+ %1 = fir.rebox_assumed_rank %arg0 lbs ones : (!fir.box<!fir.array<*:f32>>) -> !fir.ref<!fir.box<!fir.array<*:f32>>>
+ return
+}
+
+// -----
+
+func.func @bad_rebox_assumed_rank_3(%arg0: !fir.box<!fir.array<*:f32>> ) {
+ // expected-error@+1{{'fir.rebox_assumed_rank' op input and output element types are incompatible}}
+ %1 = fir.rebox_assumed_rank %arg0 lbs ones : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:i32>>
+ return
+}
diff --git a/flang/test/Fir/rebox_assumed_rank_codegen.fir b/flang/test/Fir/rebox_assumed_rank_codegen.fir
new file mode 100644
index 000000000000..6f9cd6edda31
--- /dev/null
+++ b/flang/test/Fir/rebox_assumed_rank_codegen.fir
@@ -0,0 +1,111 @@
+// Test fir.rebox_assumed_rank lowering to runtime calls in fir-assumed-rank-op pass.
+// RUN: fir-opt -o - --fir-assumed-rank-op %s | FileCheck %s
+
+func.func @test_simple(%arg0: !fir.box<!fir.array<*:f32>> ) {
+ %1 = fir.rebox_assumed_rank %arg0 lbs ones : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:f32>>
+ fir.call @somefunc(%1) : (!fir.box<!fir.array<*:f32>>) -> ()
+ return
+}
+func.func @test_simple_zeroes(%arg0: !fir.box<!fir.array<*:f32>> ) {
+ %1 = fir.rebox_assumed_rank %arg0 lbs zeroes : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:f32>>
+ fir.call @somefunc(%1) : (!fir.box<!fir.array<*:f32>>) -> ()
+ return
+}
+func.func @test_simple_preserve(%arg0: !fir.box<!fir.array<*:f32>> ) {
+ %1 = fir.rebox_assumed_rank %arg0 lbs preserve : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.array<*:f32>>
+ fir.call @somefunc(%1) : (!fir.box<!fir.array<*:f32>>) -> ()
+ return
+}
+func.func @test_allocatable(%arg0: !fir.box<!fir.array<*:f32>> ) {
+ %1 = fir.rebox_assumed_rank %arg0 lbs preserve : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.heap<!fir.array<*:f32>>>
+ fir.call @somefuncalloc(%1) : (!fir.box<!fir.heap<!fir.array<*:f32>>>) -> ()
+ return
+}
+func.func @test_pointer(%arg0: !fir.box<!fir.array<*:f32>> ) {
+ %1 = fir.rebox_assumed_rank %arg0 lbs preserve : (!fir.box<!fir.array<*:f32>>) -> !fir.box<!fir.ptr<!fir.array<*:f32>>>
+ fir.call @somefuncpointer(%1) : (!fir.box<!fir.ptr<!fir.array<*:f32>>>) -> ()
+ return
+}
+!t1= !fir.type<t1{i:i32}>
+!t2= !fir.type<t2{t1:!t1, x:f32}>
+func.func @test_new_dtype(%arg0: !fir.box<!fir.array<*:!t2>> ) {
+ %1 = fir.rebox_assumed_rank %arg0 lbs ones : (!fir.box<!fir.array<*:!t2>>) -> !fir.box<!fir.array<*:!t1>>
+ fir.call @somefunct1(%1) : (!fir.box<!fir.array<*:!t1>>) -> ()
+ return
+}
+
+func.func private @somefunc(!fir.box<!fir.array<*:f32>>)
+func.func private @somefuncalloc(!fir.box<!fir.heap<!fir.array<*:f32>>>)
+func.func private @somefuncpointer(!fir.box<!fir.ptr<!fir.array<*:f32>>>)
+func.func private @somefunct1(!fir.box<!fir.array<*:!t1>>)
+
+// CHECK-LABEL: func.func @test_simple(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<*:f32>>) {
+// CHECK: %[[VAL_1:.*]] = arith.constant 1 : i32
+// CHECK: %[[VAL_2:.*]] = arith.constant 0 : i8
+// CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>
+// CHECK: %[[VAL_4:.*]] = fir.zero_bits !fir.ref<none>
+// CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_3]] : (!fir.ref<!fir.box<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>>) -> !fir.ref<!fir.box<none>>
+// CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_0]] : (!fir.box<!fir.array<*:f32>>) -> !fir.box<none>
+// CHECK: %[[VAL_7:.*]] = fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_4]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.ref<none>, i8, i32) -> none
+// CHECK: %[[VAL_8:.*]] = fir.load %[[VAL_3]] : !fir.ref<!fir.box<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>>
+// CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (!fir.box<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>) -> !fir.box<!fir.array<*:f32>>
+// CHECK: fir.call @somefunc(%[[VAL_9]]) : (!fir.box<!fir.array<*:f32>>) -> ()
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func @test_simple_zeroes(
+// CHECK: %[[VAL_1:.*]] = arith.constant 2 : i32
+// CHECK: fir.call @_FortranACopyAndUpdateDescriptor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_1]])
+
+// CHECK-LABEL: func.func @test_simple_preserve(
+// CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
+// CHECK: fir.call @_FortranACopyAndUpdateDescriptor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_1]])
+
+// CHECK-LABEL: func.func @test_allocatable(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<*:f32>>) {
+// CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
+// CHECK: %[[VAL_2:.*]] = arith.constant 2 : i8
+// CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>>
+// CHECK: %[[VAL_4:.*]] = fir.zero_bits !fir.ref<none>
+// CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_3]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
+// CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_0]] : (!fir.box<!fir.array<*:f32>>) -> !fir.box<none>
+// CHECK: %[[VAL_7:.*]] = fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_4]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.ref<none>, i8, i32) -> none
+// CHECK: %[[VAL_8:.*]] = fir.load %[[VAL_3]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>>>
+// CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (!fir.box<!fir.heap<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>>) -> !fir.box<!fir.heap<!fir.array<*:f32>>>
+// CHECK: fir.call @somefuncalloc(%[[VAL_9]]) : (!fir.box<!fir.heap<!fir.array<*:f32>>>) -> ()
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func @test_pointer(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<*:f32>>) {
+// CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
+// CHECK: %[[VAL_2:.*]] = arith.constant 1 : i8
+// CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box<!fir.ptr<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>>
+// CHECK: %[[VAL_4:.*]] = fir.zero_bits !fir.ref<none>
+// CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_3]] : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
+// CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_0]] : (!fir.box<!fir.array<*:f32>>) -> !fir.box<none>
+// CHECK: %[[VAL_7:.*]] = fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_4]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.ref<none>, i8, i32) -> none
+// CHECK: %[[VAL_8:.*]] = fir.load %[[VAL_3]] : !fir.ref<!fir.box<!fir.ptr<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>>>
+// CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (!fir.box<!fir.ptr<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?xf32>>>) -> !fir.box<!fir.ptr<!fir.array<*:f32>>>
+// CHECK: fir.call @somefuncpointer(%[[VAL_9]]) : (!fir.box<!fir.ptr<!fir.array<*:f32>>>) -> ()
+// CHECK: return
+// CHECK: }
+
+// CHECK-LABEL: func.func @test_new_dtype(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.box<!fir.array<*:!fir.type<t2{t1:!fir.type<t1{i:i32}>,x:f32}>>>) {
+// CHECK: %[[VAL_1:.*]] = arith.constant 1 : i32
+// CHECK: %[[VAL_2:.*]] = arith.constant 0 : i8
+// CHECK: %[[VAL_3:.*]] = fir.alloca !fir.box<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?x!fir.type<t1{i:i32}>>>
+// CHECK: %[[VAL_4:.*]] = fir.type_desc !fir.type<t1{i:i32}>
+// CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_3]] : (!fir.ref<!fir.box<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?x!fir.type<t1{i:i32}>>>>) -> !fir.ref<!fir.box<none>>
+// CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_0]] : (!fir.box<!fir.array<*:!fir.type<t2{t1:!fir.type<t1{i:i32}>,x:f32}>>>) -> !fir.box<none>
+// CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_4]] : (!fir.tdesc<!fir.type<t1{i:i32}>>) -> !fir.ref<none>
+// CHECK: %[[VAL_8:.*]] = fir.call @_FortranACopyAndUpdateDescriptor(%[[VAL_5]], %[[VAL_6]], %[[VAL_7]], %[[VAL_2]], %[[VAL_1]]) : (!fir.ref<!fir.box<none>>, !fir.box<none>, !fir.ref<none>, i8, i32) -> none
+// CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_3]] : !fir.ref<!fir.box<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?x!fir.type<t1{i:i32}>>>>
+// CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (!fir.box<!fir.array<?x?x?x?x?x?x?x?x?x?x?x?x?x?x?x!fir.type<t1{i:i32}>>>) -> !fir.box<!fir.array<*:!fir.type<t1{i:i32}>>>
+// CHECK: fir.call @somefunct1(%[[VAL_10]]) : (!fir.box<!fir.array<*:!fir.type<t1{i:i32}>>>) -> ()
+// CHECK: return
+// CHECK: }
+
+// CHECK: func.func private @_FortranACopyAndUpdateDescriptor(!fir.ref<!fir.box<none>> {llvm.nocapture}, !fir.box<none> {llvm.nocapture}, !fir.ref<none>, i8, i32) -> none attributes {fir.runtime}
diff --git a/flang/test/Integration/debug-complex-1.f90 b/flang/test/Integration/debug-complex-1.f90
new file mode 100644
index 000000000000..c8d0da4c4baa
--- /dev/null
+++ b/flang/test/Integration/debug-complex-1.f90
@@ -0,0 +1,26 @@
+! RUN: %flang_fc1 -emit-llvm -debug-info-kind=standalone %s -o - | FileCheck %s
+
+program mn
+ complex(kind=4) :: c4
+ complex(kind=8) :: c8
+ complex(kind=16) :: r
+ r = fn1(c4, c8)
+ print *, r
+contains
+ function fn1(a, b) result (c)
+ complex(kind=4), intent(in) :: a
+ complex(kind=8), intent(in) :: b
+ complex(kind=16) :: c
+ c = a + b
+ end function
+end program
+
+! CHECK-DAG: ![[C4:.*]] = !DIBasicType(name: "complex", size: 64, encoding: DW_ATE_complex_float)
+! CHECK-DAG: ![[C8:.*]] = !DIBasicType(name: "complex", size: 128, encoding: DW_ATE_complex_float)
+! CHECK-DAG: ![[C16:.*]] = !DIBasicType(name: "complex", size: 256, encoding: DW_ATE_complex_float)
+! CHECK-DAG: !DILocalVariable(name: "c4"{{.*}}type: ![[C4]])
+! CHECK-DAG: !DILocalVariable(name: "c8"{{.*}}type: ![[C8]])
+! CHECK-DAG: !DILocalVariable(name: "r"{{.*}}type: ![[C16]])
+! CHECK-DAG: !DILocalVariable(name: "a"{{.*}}type: ![[C4]])
+! CHECK-DAG: !DILocalVariable(name: "b"{{.*}}type: ![[C8]])
+! CHECK-DAG: !DILocalVariable(name: "c"{{.*}}type: ![[C16]])
diff --git a/flang/test/Integration/debug-fixed-array-type-2.f90 b/flang/test/Integration/debug-fixed-array-type-2.f90
new file mode 100644
index 000000000000..315525442a5b
--- /dev/null
+++ b/flang/test/Integration/debug-fixed-array-type-2.f90
@@ -0,0 +1,43 @@
+! RUN: %flang_fc1 -emit-llvm -debug-info-kind=standalone %s -o - | FileCheck %s
+
+program mn
+
+ integer d1(3)
+ integer d2(2, 5)
+ real d3(6, 8, 7)
+
+ i8 = fn1(d1, d2, d3)
+contains
+ function fn1(a1, b1, c1) result (res)
+ integer a1(3)
+ integer b1(2, 5)
+ real c1(6, 8, 7)
+ integer res
+ res = a1(1) + b1(1,2) + c1(3, 3, 4)
+ end function
+
+end program
+
+! CHECK-DAG: ![[INT:.*]] = !DIBasicType(name: "integer", size: 32, encoding: DW_ATE_signed)
+! CHECK-DAG: ![[REAL:.*]] = !DIBasicType(name: "real", size: 32, encoding: DW_ATE_float)
+! CHECK-DAG: ![[R1:.*]] = !DISubrange(count: 3, lowerBound: 1)
+! CHECK-DAG: ![[SUB1:.*]] = !{![[R1]]}
+! CHECK-DAG: ![[D1TY:.*]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[INT]], elements: ![[SUB1]])
+! CHECK-DAG: !DILocalVariable(name: "d1"{{.*}}type: ![[D1TY]])
+
+! CHECK-DAG: ![[R21:.*]] = !DISubrange(count: 2, lowerBound: 1)
+! CHECK-DAG: ![[R22:.*]] = !DISubrange(count: 5, lowerBound: 1)
+! CHECK-DAG: ![[SUB2:.*]] = !{![[R21]], ![[R22]]}
+! CHECK-DAG: ![[D2TY:.*]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[INT]], elements: ![[SUB2]])
+! CHECK-DAG: !DILocalVariable(name: "d2"{{.*}}type: ![[D2TY]])
+
+! CHECK-DAG: ![[R31:.*]] = !DISubrange(count: 6, lowerBound: 1)
+! CHECK-DAG: ![[R32:.*]] = !DISubrange(count: 8, lowerBound: 1)
+! CHECK-DAG: ![[R33:.*]] = !DISubrange(count: 7, lowerBound: 1)
+! CHECK-DAG: ![[SUB3:.*]] = !{![[R31]], ![[R32]], ![[R33]]}
+! CHECK-DAG: ![[D3TY:.*]] = !DICompositeType(tag: DW_TAG_array_type, baseType: ![[REAL]], elements: ![[SUB3]])
+! CHECK-DAG: !DILocalVariable(name: "d3"{{.*}}type: ![[D3TY]])
+
+! CHECK-DAG: !DILocalVariable(name: "a1", arg: 1{{.*}}type: ![[D1TY]])
+! CHECK-DAG: !DILocalVariable(name: "b1", arg: 2{{.*}}type: ![[D2TY]])
+! CHECK-DAG: !DILocalVariable(name: "c1", arg: 3{{.*}}type: ![[D3TY]])
diff --git a/flang/test/Integration/debug-module-2.f90 b/flang/test/Integration/debug-module-2.f90
new file mode 100644
index 000000000000..60fccaa2a6c1
--- /dev/null
+++ b/flang/test/Integration/debug-module-2.f90
@@ -0,0 +1,39 @@
+! RUN: %flang_fc1 -emit-llvm -debug-info-kind=standalone %s -o - | FileCheck %s
+! RUN: %flang_fc1 -emit-llvm -debug-info-kind=line-tables-only %s -o - | FileCheck --check-prefix=LINEONLY %s
+
+! CHECK-DAG: ![[FILE:.*]] = !DIFile(filename: {{.*}}debug-module-2.f90{{.*}})
+! CHECK-DAG: ![[FILE2:.*]] = !DIFile(filename: {{.*}}debug-module-2.f90{{.*}})
+! CHECK-DAG: ![[CU:.*]] = distinct !DICompileUnit({{.*}}file: ![[FILE]]{{.*}} globals: ![[GLOBALS:.*]])
+! CHECK-DAG: ![[MOD:.*]] = !DIModule(scope: ![[CU]], name: "helper", file: ![[FILE]]{{.*}})
+! CHECK-DAG: ![[R4:.*]] = !DIBasicType(name: "real", size: 32, encoding: DW_ATE_float)
+! CHECK-DAG: ![[I4:.*]] = !DIBasicType(name: "integer", size: 32, encoding: DW_ATE_signed)
+module helper
+! CHECK-DAG: ![[GLR:.*]] = distinct !DIGlobalVariable(name: "glr", linkageName: "_QMhelperEglr", scope: ![[MOD]], file: ![[FILE]], line: [[@LINE+2]], type: ![[R4]], isLocal: false, isDefinition: true)
+! CHECK-DAG: ![[GLRX:.*]] = !DIGlobalVariableExpression(var: ![[GLR]], expr: !DIExpression())
+ real glr
+
+! CHECK-DAG: ![[GLI:.*]] = distinct !DIGlobalVariable(name: "gli", linkageName: "_QMhelperEgli", scope: ![[MOD]], file: ![[FILE]], line: [[@LINE+2]], type: ![[I4]], isLocal: false, isDefinition: true)
+! CHECK-DAG: ![[GLIX:.*]] = !DIGlobalVariableExpression(var: ![[GLI]], expr: !DIExpression())
+ integer gli
+
+ contains
+!CHECK-DAG: !DISubprogram(name: "test", linkageName: "_QMhelperPtest", scope: ![[MOD]], file: ![[FILE2]], line: [[@LINE+1]]{{.*}}unit: ![[CU]])
+ subroutine test()
+ glr = 12.34
+ gli = 67
+
+ end subroutine
+end module helper
+
+program test
+use helper
+implicit none
+
+ glr = 3.14
+ gli = 2
+ call test()
+
+end program test
+
+! CHECK-DAG: ![[GLOBALS]] = !{![[GLIX]], ![[GLRX]]}
+! LINEONLY-NOT: DIGlobalVariable
diff --git a/flang/test/Lower/CUDA/cuda-data-transfer.cuf b/flang/test/Lower/CUDA/cuda-data-transfer.cuf
index 084314ed63ec..42fa4d09c95e 100644
--- a/flang/test/Lower/CUDA/cuda-data-transfer.cuf
+++ b/flang/test/Lower/CUDA/cuda-data-transfer.cuf
@@ -25,6 +25,8 @@ subroutine sub1()
adev = ahost + bhost
+ adev = 10
+
end
! CHECK-LABEL: func.func @_QPsub1()
@@ -41,10 +43,7 @@ end
! CHECK: cuf.data_transfer %[[ASSOC]]#0 to %[[M]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.ref<i32>, !fir.ref<i32>
! CHECK: hlfir.end_associate %[[ASSOC]]#1, %[[ASSOC]]#2 : !fir.ref<i32>, i1
-! CHECK: %[[C1:.*]] = arith.constant 1 : i32
-! CHECK: %[[ASSOC:.*]]:3 = hlfir.associate %[[C1]] {uniq_name = ".cuf_host_tmp"} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
-! CHECK: cuf.data_transfer %[[ASSOC]]#0 to %[[M]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.ref<i32>, !fir.ref<i32>
-! CHECK: hlfir.end_associate %[[ASSOC]]#1, %[[ASSOC]]#2 : !fir.ref<i32>, i1
+! CHECK: cuf.data_transfer %c1{{.*}} to %[[M]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : i32, !fir.ref<i32>
! CHECK: cuf.data_transfer %[[AHOST]]#0 to %[[ADEV]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>
@@ -62,6 +61,8 @@ end
! CHECK: cuf.data_transfer %[[ASSOC]]#0 to %[[ADEV]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>
! CHECK: hlfir.end_associate %[[ASSOC]]#1, %[[ASSOC]]#2 : !fir.ref<!fir.array<10xi32>>, i1
+! CHECK: cuf.data_transfer %c10{{.*}} to %[[ADEV]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : i32, !fir.ref<!fir.array<10xi32>>
+
subroutine sub2()
integer, device :: m
integer, device :: adev(10), bdev(10)
@@ -159,3 +160,22 @@ end subroutine
! CHECK-LABEL: func.func @_QPsub6
! CHECK: cuf.data_transfer
+
+subroutine sub7(a, b, c)
+ integer, device, allocatable :: a(:), c(:)
+ integer, allocatable :: b(:)
+ b = a
+
+ a = b
+
+ c = a
+end subroutine
+
+! CHECK-LABEL: func.func @_QPsub7(
+! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {cuf.data_attr = #cuf.cuda<device>, fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {fir.bindc_name = "b"}, %[[ARG2:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {cuf.data_attr = #cuf.cuda<device>, fir.bindc_name = "c"}) {
+! CHECK: %[[A:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{.*}} {data_attr = #cuf.cuda<device>, fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFsub7Ea"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.dscope) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
+! CHECK: %[[B:.*]]:2 = hlfir.declare %[[ARG1]] dummy_scope %{{.*}} {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFsub7Eb"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.dscope) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
+! CHECK: %[[C:.*]]:2 = hlfir.declare %[[ARG2]] dummy_scope %0 {data_attr = #cuf.cuda<device>, fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFsub7Ec"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.dscope) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
+! CHECK: cuf.data_transfer %[[A]]#0 to %[[B]]#0 {transfer_kind = #cuf.cuda_transfer<device_host>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+! CHECK: cuf.data_transfer %[[B]]#0 to %[[A]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+! CHECK: cuf.data_transfer %[[A]]#0 to %[[C]]#0 {transfer_kind = #cuf.cuda_transfer<device_device>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
diff --git a/flang/test/Lower/Intrinsics/selected_char_kind.f90 b/flang/test/Lower/Intrinsics/selected_char_kind.f90
new file mode 100644
index 000000000000..4012591f2286
--- /dev/null
+++ b/flang/test/Lower/Intrinsics/selected_char_kind.f90
@@ -0,0 +1,17 @@
+! RUN: bbc -emit-hlfir %s -o - | FileCheck %s
+
+subroutine selected_char_kind_test(c)
+ character(*) :: c
+ integer :: res
+ res = selected_char_kind(c)
+end
+
+! CHECK-LABEL: func.func @_QPselected_char_kind_test(
+! CHECK-SAME: %[[ARG0:.*]]: !fir.boxchar<1> {fir.bindc_name = "c"})
+! CHECK: %[[UNBOXCHAR:.*]]:2 = fir.unboxchar %[[ARG0]] : (!fir.boxchar<1>) -> (!fir.ref<!fir.char<1,?>>, index)
+! CHECK: %[[C:.*]]:2 = hlfir.declare %[[UNBOXCHAR]]#0 typeparams %[[UNBOXCHAR]]#1 dummy_scope %0 {uniq_name = "_QFselected_char_kind_testEc"} : (!fir.ref<!fir.char<1,?>>, index, !fir.dscope) -> (!fir.boxchar<1>, !fir.ref<!fir.char<1,?>>)
+! CHECK: %[[RES_ALLOCA:.*]] = fir.alloca i32 {bindc_name = "res", uniq_name = "_QFselected_char_kind_testEres"}
+! CHECK: %[[RES:.*]]:2 = hlfir.declare %[[RES_ALLOCA]] {uniq_name = "_QFselected_char_kind_testEres"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[CHAR_PTR:.*]] = fir.convert %[[C]]#1 : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<i8>
+! CHECK: %[[CHAR_LEN:.*]] = fir.convert %[[UNBOXCHAR]]#1 : (index) -> i64
+! CHECK: %{{.*}} = fir.call @_FortranASelectedCharKind(%{{.*}}, %{{.*}}, %[[CHAR_PTR]], %[[CHAR_LEN]]) fastmath<contract> : (!fir.ref<i8>, i32, !fir.ref<i8>, i64) -> i32
diff --git a/flang/test/Lower/Intrinsics/selected_logical_kind.f90 b/flang/test/Lower/Intrinsics/selected_logical_kind.f90
new file mode 100644
index 000000000000..93952762cce5
--- /dev/null
+++ b/flang/test/Lower/Intrinsics/selected_logical_kind.f90
@@ -0,0 +1,71 @@
+! RUN: bbc -emit-hlfir %s -o - | FileCheck %s
+
+subroutine selected_logical_kind_test1(input)
+ integer(1) :: input, res
+ res = selected_logical_kind(input)
+end
+
+! CHECK-LABEL: func.func @_QPselected_logical_kind_test1(
+! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<i8> {fir.bindc_name = "input"})
+! CHECK: %[[INPUT:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{.*}} {uniq_name = "_QFselected_logical_kind_test1Einput"} : (!fir.ref<i8>, !fir.dscope) -> (!fir.ref<i8>, !fir.ref<i8>)
+! CHECK: %[[RES_ALLOCA:.*]] = fir.alloca i8 {bindc_name = "res", uniq_name = "_QFselected_logical_kind_test1Eres"}
+! CHECK: %[[RES:.*]]:2 = hlfir.declare %[[RES_ALLOCA]] {uniq_name = "_QFselected_logical_kind_test1Eres"} : (!fir.ref<i8>) -> (!fir.ref<i8>, !fir.ref<i8>)
+! CHECK: %[[KIND:.*]] = arith.constant 1 : i32
+! CHECK: %[[INPUT_ADDR:.*]] = fir.convert %1#1 : (!fir.ref<i8>) -> !fir.llvm_ptr<i8>
+! CHECK: %{{.*}} = fir.call @_FortranASelectedLogicalKind(%{{.*}}, %{{.*}}, %[[INPUT_ADDR]], %[[KIND]]) fastmath<contract> : (!fir.ref<i8>, i32, !fir.llvm_ptr<i8>, i32) -> i32
+
+subroutine selected_logical_kind_test2(input)
+ integer(2) :: input, res
+ res = selected_logical_kind(input)
+end
+
+! CHECK-LABEL: func.func @_QPselected_logical_kind_test2(
+! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<i16> {fir.bindc_name = "input"})
+! CHECK: %[[INPUT:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{.*}} {uniq_name = "_QFselected_logical_kind_test2Einput"} : (!fir.ref<i16>, !fir.dscope) -> (!fir.ref<i16>, !fir.ref<i16>)
+! CHECK: %[[RES_ALLOCA:.*]] = fir.alloca i16 {bindc_name = "res", uniq_name = "_QFselected_logical_kind_test2Eres"}
+! CHECK: %[[RES:.*]]:2 = hlfir.declare %[[RES_ALLOCA]] {uniq_name = "_QFselected_logical_kind_test2Eres"} : (!fir.ref<i16>) -> (!fir.ref<i16>, !fir.ref<i16>)
+! CHECK: %[[KIND:.*]] = arith.constant 2 : i32
+! CHECK: %[[INPUT_ADDR:.*]] = fir.convert %[[INPUT]]#1 : (!fir.ref<i16>) -> !fir.llvm_ptr<i8>
+! CHECK: %{{.*}} = fir.call @_FortranASelectedLogicalKind(%{{.*}}, %{{.*}}, %[[INPUT_ADDR]], %[[KIND]]) fastmath<contract> : (!fir.ref<i8>, i32, !fir.llvm_ptr<i8>, i32) -> i32
+
+subroutine selected_logical_kind_test4(input)
+ integer(4) :: input, res
+ res = selected_logical_kind(input)
+end
+
+! CHECK-LABEL: func.func @_QPselected_logical_kind_test4(
+! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<i32> {fir.bindc_name = "input"})
+! CHECK: %[[INPUT:.*]]:2 = hlfir.declare %arg0 dummy_scope %0 {uniq_name = "_QFselected_logical_kind_test4Einput"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[RES_ALLOCA:.*]] = fir.alloca i32 {bindc_name = "res", uniq_name = "_QFselected_logical_kind_test4Eres"}
+! CHECK: %[[RES:.*]]:2 = hlfir.declare %[[RES_ALLOCA]] {uniq_name = "_QFselected_logical_kind_test4Eres"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[KIND:.*]] = arith.constant 4 : i32
+! CHECK: %[[INPUT_ADDR:.*]] = fir.convert %[[INPUT]]#1 : (!fir.ref<i32>) -> !fir.llvm_ptr<i8>
+! CHECK: %{{.*}} = fir.call @_FortranASelectedLogicalKind(%{{.*}}, %{{.*}}, %[[INPUT_ADDR]], %[[KIND]]) fastmath<contract> : (!fir.ref<i8>, i32, !fir.llvm_ptr<i8>, i32) -> i32
+
+subroutine selected_logical_kind_test8(input)
+ integer(8) :: input, res
+ res = selected_logical_kind(input)
+end
+
+! CHECK-LABEL: func.func @_QPselected_logical_kind_test8(
+! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<i64> {fir.bindc_name = "input"})
+! CHECK: %[[INPUT:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{.*}} {uniq_name = "_QFselected_logical_kind_test8Einput"} : (!fir.ref<i64>, !fir.dscope) -> (!fir.ref<i64>, !fir.ref<i64>)
+! CHECK: %[[RES_ALLOCA]] = fir.alloca i64 {bindc_name = "res", uniq_name = "_QFselected_logical_kind_test8Eres"}
+! CHECK: %[[RES:.*]]:2 = hlfir.declare %[[RES_ALLOCA]] {uniq_name = "_QFselected_logical_kind_test8Eres"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
+! CHECK: %[[KIND:.*]] = arith.constant 8 : i32
+! CHECK: %[[INPUT_ADDR:.*]] = fir.convert %[[INPUT]]#1 : (!fir.ref<i64>) -> !fir.llvm_ptr<i8>
+! CHECK: %{{.*}} = fir.call @_FortranASelectedLogicalKind(%{{.*}}, %{{.*}}, %[[INPUT_ADDR]], %[[KIND]]) fastmath<contract> : (!fir.ref<i8>, i32, !fir.llvm_ptr<i8>, i32) -> i32
+
+subroutine selected_logical_kind_test16(input)
+ integer(16) :: input, res
+ res = selected_logical_kind(input)
+end
+
+! CHECK-LABEL: func.func @_QPselected_logical_kind_test16(
+! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<i128> {fir.bindc_name = "input"})
+! CHECK: %[[INPUT:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{.*}} {uniq_name = "_QFselected_logical_kind_test16Einput"} : (!fir.ref<i128>, !fir.dscope) -> (!fir.ref<i128>, !fir.ref<i128>)
+! CHECK: %[[RES_ALLOCA:.*]] = fir.alloca i128 {bindc_name = "res", uniq_name = "_QFselected_logical_kind_test16Eres"}
+! CHECK: %[[RES:.*]]:2 = hlfir.declare %[[RES_ALLOCA]] {uniq_name = "_QFselected_logical_kind_test16Eres"} : (!fir.ref<i128>) -> (!fir.ref<i128>, !fir.ref<i128>)
+! CHECK: %[[KIND:.*]] = arith.constant 16 : i32
+! CHECK: %[[INPUT_ADDR:.*]] = fir.convert %[[INPUT]]#1 : (!fir.ref<i128>) -> !fir.llvm_ptr<i8>
+! CHECK: %{{.*}} = fir.call @_FortranASelectedLogicalKind(%{{.*}}, %{{.*}}, %[[INPUT_ADDR]], %[[KIND]]) fastmath<contract> : (!fir.ref<i8>, i32, !fir.llvm_ptr<i8>, i32) -> i32
diff --git a/flang/test/Lower/OpenMP/Todo/masked-directive.f90 b/flang/test/Lower/OpenMP/Todo/masked-directive.f90
new file mode 100644
index 000000000000..77767715af52
--- /dev/null
+++ b/flang/test/Lower/OpenMP/Todo/masked-directive.f90
@@ -0,0 +1,13 @@
+! This test checks lowering of OpenMP masked Directive.
+
+! RUN: %not_todo_cmd bbc -emit-fir -fopenmp -o - %s 2>&1 | FileCheck %s
+! RUN: %not_todo_cmd %flang_fc1 -emit-fir -fopenmp -o - %s 2>&1 | FileCheck %s
+
+! CHECK: not yet implemented: Unhandled directive masked
+subroutine test_masked()
+ integer :: c = 1
+ !$omp masked
+ c = c + 1
+ !$omp end masked
+end subroutine
+
diff --git a/flang/test/Lower/OpenMP/invalid-reduction-modifier.f90 b/flang/test/Lower/OpenMP/invalid-reduction-modifier.f90
index 53871276761f..b3e87df7086e 100644
--- a/flang/test/Lower/OpenMP/invalid-reduction-modifier.f90
+++ b/flang/test/Lower/OpenMP/invalid-reduction-modifier.f90
@@ -1,6 +1,4 @@
-!Remove the --crash below once we can diagnose the issue more gracefully.
-!REQUIRES: asserts
-!RUN: not --crash %flang_fc1 -fopenmp -emit-hlfir -o - %s
+!RUN: not %flang_fc1 -fopenmp -emit-hlfir -o - %s
! Check that we reject the "task" reduction modifier on the "simd" directive.
diff --git a/flang/test/Lower/OpenMP/lastprivate-iv.f90 b/flang/test/Lower/OpenMP/lastprivate-iv.f90
index 61c588732882..21a701441cb5 100644
--- a/flang/test/Lower/OpenMP/lastprivate-iv.f90
+++ b/flang/test/Lower/OpenMP/lastprivate-iv.f90
@@ -76,3 +76,22 @@ subroutine lastprivate_iv_dec()
end do
!$omp end do
end subroutine
+
+
+!CHECK-LABEL: @_QPlastprivate_iv_i1
+subroutine lastprivate_iv_i1
+ integer*1 :: i1
+ i1=0
+!CHECK: omp.wsloop
+!CHECK: omp.loop_nest
+!CHECK: fir.if %{{.*}} {
+!CHECK: %[[I8_VAL:.*]] = fir.convert %{{.*}} : (i32) -> i8
+!CHECK: fir.store %[[I8_VAL]] to %[[IV:.*]]#1 : !fir.ref<i8>
+!CHECK: %[[IV_VAL:.*]] = fir.load %[[IV]]#0 : !fir.ref<i8>
+!CHECK: hlfir.assign %[[IV_VAL]] to %{{.*}}#0 temporary_lhs : i8, !fir.ref<i8>
+!CHECK: }
+ !$omp do lastprivate(i1)
+ do i1=1,8
+ enddo
+!$omp end do
+end subroutine
diff --git a/flang/test/Lower/branching-directive.f90 b/flang/test/Lower/branching-directive.f90
index a0a147f1053a..69270d7bcbe9 100644
--- a/flang/test/Lower/branching-directive.f90
+++ b/flang/test/Lower/branching-directive.f90
@@ -1,25 +1,88 @@
-!RUN: flang-new -fc1 -emit-hlfir -fopenmp -o - %s | FileCheck %s
+!RUN: bbc -emit-hlfir -fopenacc -fopenmp -o - %s | FileCheck %s
!https://github.com/llvm/llvm-project/issues/91526
+!CHECK-LABEL: func.func @_QPsimple1
!CHECK: cf.cond_br %{{[0-9]+}}, ^bb[[THEN:[0-9]+]], ^bb[[ELSE:[0-9]+]]
!CHECK: ^bb[[THEN]]:
-!CHECK: cf.br ^bb[[EXIT:[0-9]+]]
+!CHECK: omp.parallel
+!CHECK: cf.br ^bb[[ENDIF:[0-9]+]]
!CHECK: ^bb[[ELSE]]:
!CHECK: fir.call @_FortranAStopStatement
!CHECK: fir.unreachable
-!CHECK: ^bb[[EXIT]]:
+!CHECK: ^bb[[ENDIF]]:
+!CHECK: return
-subroutine simple(y)
+subroutine simple1(y)
implicit none
logical, intent(in) :: y
integer :: i
if (y) then
-!$omp parallel
+ !$omp parallel
i = 1
-!$omp end parallel
+ !$omp end parallel
else
stop 1
end if
-end subroutine simple
+end subroutine
+
+!CHECK-LABEL: func.func @_QPsimple2
+!CHECK: cf.cond_br %{{[0-9]+}}, ^bb[[THEN:[0-9]+]], ^bb[[ELSE:[0-9]+]]
+!CHECK: ^bb[[THEN]]:
+!CHECK: omp.parallel
+!CHECK: cf.br ^bb[[ENDIF:[0-9]+]]
+!CHECK: ^bb[[ELSE]]:
+!CHECK: fir.call @_FortranAStopStatement
+!CHECK: fir.unreachable
+!CHECK: ^bb[[ENDIF]]:
+!CHECK: fir.call @_FortranAioOutputReal64
+!CHECK: return
+subroutine simple2(x, yn)
+ implicit none
+ logical, intent(in) :: yn
+ integer, intent(in) :: x
+ integer :: i
+ real(8) :: E
+ E = 0d0
+
+ if (yn) then
+ !$omp parallel do private(i) reduction(+:E)
+ do i = 1, x
+ E = E + i
+ end do
+ !$omp end parallel do
+ else
+ stop 1
+ end if
+ print *, E
+end subroutine
+
+!CHECK-LABEL: func.func @_QPacccase
+!CHECK: fir.select_case %{{[0-9]+}} : i32 [{{.*}}, ^bb[[CASE1:[0-9]+]], {{.*}}, ^bb[[CASE2:[0-9]+]], {{.*}}, ^bb[[CASE3:[0-9]+]]]
+!CHECK: ^bb[[CASE1]]:
+!CHECK: acc.serial
+!CHECK: cf.br ^bb[[EXIT:[0-9]+]]
+!CHECK: ^bb[[CASE2]]:
+!CHECK: fir.call @_FortranAioOutputAscii
+!CHECK: cf.br ^bb[[EXIT]]
+!CHECK: ^bb[[CASE3]]:
+!CHECK: fir.call @_FortranAioOutputAscii
+!CHECK: cf.br ^bb[[EXIT]]
+!CHECK: ^bb[[EXIT]]:
+!CHECK: return
+subroutine acccase(var)
+ integer :: var
+ integer :: res(10)
+ select case (var)
+ case (1)
+ print *, "case 1"
+ !$acc serial
+ res(1) = 1
+ !$acc end serial
+ case (2)
+ print *, "case 2"
+ case default
+ print *, "case default"
+ end select
+end subroutine
diff --git a/flang/test/Lower/unstructured-control-flow.f90 b/flang/test/Lower/unstructured-control-flow.f90
new file mode 100644
index 000000000000..310293381e5f
--- /dev/null
+++ b/flang/test/Lower/unstructured-control-flow.f90
@@ -0,0 +1,31 @@
+!RUN: bbc -emit-hlfir -o - %s | FileCheck %s
+
+!CHECK-LABEL: func.func @_QPunstructured1
+!CHECK: fir.select %{{[0-9]+}} : i32 [{{.*}}, ^bb[[BLOCK3:[0-9]+]], {{.*}}, ^bb[[BLOCK4:[0-9]+]], {{.*}}, ^bb[[BLOCK5:[0-9]+]], {{.*}}, ^bb[[BLOCK1:[0-9]+]]]
+!CHECK: ^bb[[BLOCK1]]:
+!CHECK: cf.cond_br %{{[0-9]+}}, ^bb[[BLOCK2:[0-9]+]], ^bb[[BLOCK4]]
+!CHECK: ^bb[[BLOCK2]]:
+!CHECK: fir.if
+!CHECK: cf.br ^bb[[BLOCK3]]
+!CHECK: ^bb[[BLOCK3]]:
+!CHECK: %[[C10:[a-z0-9_]+]] = arith.constant 10 : i32
+!CHECK: arith.addi {{.*}}, %[[C10]]
+!CHECK: cf.br ^bb[[BLOCK4]]
+!CHECK: ^bb[[BLOCK4]]:
+!CHECK: %[[C100:[a-z0-9_]+]] = arith.constant 100 : i32
+!CHECK: arith.addi {{.*}}, %[[C100]]
+!CHECK: cf.br ^bb[[BLOCK5]]
+!CHECK: ^bb[[BLOCK5]]:
+!CHECK: %[[C1000:[a-z0-9_]+]] = arith.constant 1000 : i32
+!CHECK: arith.addi {{.*}}, %[[C1000]]
+!CHECK: return
+subroutine unstructured1(j, k)
+ goto (11, 22, 33) j-3 ! computed goto - an expression outside [1,3] is a nop
+ if (j == 2) goto 22
+ if (j == 1) goto 11
+ k = k + 1
+11 k = k + 10
+22 k = k + 100
+33 k = k + 1000
+end
+
diff --git a/flang/test/Parser/OpenMP/masked-unparse.f90 b/flang/test/Parser/OpenMP/masked-unparse.f90
new file mode 100644
index 000000000000..16d7ca68e3e1
--- /dev/null
+++ b/flang/test/Parser/OpenMP/masked-unparse.f90
@@ -0,0 +1,92 @@
+! RUN: %flang_fc1 -fdebug-unparse -fopenmp %s | FileCheck --ignore-case %s
+! RUN: %flang_fc1 -fdebug-dump-parse-tree -fopenmp %s | FileCheck --check-prefix="PARSE-TREE" %s
+
+! Check for parsing of masked directive with filter clause.
+
+
+subroutine test_masked()
+ integer :: c = 1
+ !PARSE-TREE: OmpBeginBlockDirective
+ !PARSE-TREE-NEXT: OmpBlockDirective -> llvm::omp::Directive = masked
+ !CHECK: !$omp masked
+ !$omp masked
+ c = c + 1
+ !$omp end masked
+ !PARSE-TREE: OmpBeginBlockDirective
+ !PARSE-TREE-NEXT: OmpBlockDirective -> llvm::omp::Directive = masked
+ !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Filter -> Scalar -> Integer -> Expr = '1_4'
+ !PARSE-TREE-NEXT: LiteralConstant -> IntLiteralConstant = '1'
+ !CHECK: !$omp masked filter(1_4)
+ !$omp masked filter(1)
+ c = c + 2
+ !$omp end masked
+end subroutine
+
+subroutine test_masked_taskloop_simd()
+ integer :: i, j = 1
+ !PARSE-TREE: OmpBeginLoopDirective
+ !PARSE-TREE-NEXT: OmpLoopDirective -> llvm::omp::Directive = masked taskloop simd
+ !CHECK: !$omp masked taskloop simd
+ !$omp masked taskloop simd
+ do i=1,10
+ j = j + 1
+ end do
+ !$omp end masked taskloop simd
+end subroutine
+
+subroutine test_masked_taskloop
+ integer :: i, j = 1
+ !PARSE-TREE: OmpBeginLoopDirective
+ !PARSE-TREE-NEXT: OmpLoopDirective -> llvm::omp::Directive = masked taskloop
+ !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Filter -> Scalar -> Integer -> Expr = '2_4'
+ !PARSE-TREE-NEXT: LiteralConstant -> IntLiteralConstant = '2'
+ !CHECK: !$omp masked taskloop filter(2_4)
+ !$omp masked taskloop filter(2)
+ do i=1,10
+ j = j + 1
+ end do
+ !$omp end masked taskloop
+end subroutine
+
+subroutine test_parallel_masked
+ integer, parameter :: i = 1, j = 1
+ integer :: c = 2
+ !PARSE-TREE: OmpBeginBlockDirective
+ !PARSE-TREE-NEXT: OmpBlockDirective -> llvm::omp::Directive = parallel masked
+ !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Filter -> Scalar -> Integer -> Expr = '2_4'
+ !PARSE-TREE-NEXT: Add
+ !PARSE-TREE-NEXT: Expr = '1_4'
+ !PARSE-TREE-NEXT: Designator -> DataRef -> Name = 'i'
+ !PARSE-TREE-NEXT: Expr = '1_4'
+ !PARSE-TREE-NEXT: Designator -> DataRef -> Name = 'j'
+ !CHECK: !$omp parallel masked filter(2_4)
+ !$omp parallel masked filter(i+j)
+ c = c + 2
+ !$omp end parallel masked
+end subroutine
+
+subroutine test_parallel_masked_taskloop_simd
+ integer :: i, j = 1
+ !PARSE-TREE: OmpBeginLoopDirective
+ !PARSE-TREE-NEXT: OmpLoopDirective -> llvm::omp::Directive = parallel masked taskloop simd
+ !CHECK: !$omp parallel masked taskloop simd
+ !$omp parallel masked taskloop simd
+ do i=1,10
+ j = j + 1
+ end do
+ !$omp end parallel masked taskloop simd
+end subroutine
+
+subroutine test_parallel_masked_taskloop
+ integer :: i, j = 1
+ !PARSE-TREE: OmpBeginLoopDirective
+ !PARSE-TREE-NEXT: OmpLoopDirective -> llvm::omp::Directive = parallel masked taskloop
+ !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Filter -> Scalar -> Integer -> Expr = '2_4'
+ !PARSE-TREE-NEXT: LiteralConstant -> IntLiteralConstant = '2'
+ !CHECK: !$omp parallel masked taskloop filter(2_4)
+ !$omp parallel masked taskloop filter(2)
+ do i=1,10
+ j = j + 1
+ end do
+ !$omp end parallel masked taskloop
+end subroutine
diff --git a/flang/test/Semantics/OpenMP/do02.f90 b/flang/test/Semantics/OpenMP/do02.f90
index 9749991e4f96..d9f5c9963ca5 100644
--- a/flang/test/Semantics/OpenMP/do02.f90
+++ b/flang/test/Semantics/OpenMP/do02.f90
@@ -1,4 +1,4 @@
-! RUN: %S/test_errors.sh %s %t %flang -fopenmp
+! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
! XFAIL: *
! OpenMP Version 4.5
diff --git a/flang/test/Semantics/OpenMP/masked.f90 b/flang/test/Semantics/OpenMP/masked.f90
new file mode 100644
index 000000000000..1113853ee8a9
--- /dev/null
+++ b/flang/test/Semantics/OpenMP/masked.f90
@@ -0,0 +1,13 @@
+! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
+
+subroutine test_masked()
+ integer :: c = 1
+ !ERROR: At most one FILTER clause can appear on the MASKED directive
+ !$omp masked filter(1) filter(2)
+ c = c + 1
+ !$omp end masked
+ !ERROR: NOWAIT clause is not allowed on the MASKED directive
+ !$omp masked nowait
+ c = c + 2
+ !$omp end masked
+end subroutine
diff --git a/flang/test/Semantics/OpenMP/reduction-modifiers.f90 b/flang/test/Semantics/OpenMP/reduction-modifiers.f90
new file mode 100644
index 000000000000..cf38200ba0a8
--- /dev/null
+++ b/flang/test/Semantics/OpenMP/reduction-modifiers.f90
@@ -0,0 +1,89 @@
+! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp -fopenmp-version=52
+
+subroutine mod_task1(x)
+ integer, intent(inout) :: x
+
+ !Correct: "parallel" directive.
+ !$omp parallel reduction(task, +:x)
+ do i = 1, 100
+ x = foo(i)
+ enddo
+ !$omp end parallel
+end
+
+subroutine mod_task2(x)
+ integer, intent(inout) :: x
+
+ !Correct: worksharing directive.
+ !$omp sections reduction(task, +:x)
+ do i = 1, 100
+ x = foo(i)
+ enddo
+ !$omp end sections
+end
+
+subroutine mod_task3(x)
+ integer, intent(inout) :: x
+
+ !ERROR: Modifier 'TASK' on REDUCTION clause is only allowed with PARALLEL or worksharing directive
+ !$omp simd reduction(task, +:x)
+ do i = 1, 100
+ x = foo(i)
+ enddo
+ !$omp end simd
+end
+
+subroutine mod_inscan1(x)
+ integer, intent(inout) :: x
+
+ !Correct: worksharing-loop directive
+ !$omp do reduction(inscan, +:x)
+ do i = 1, 100
+ x = foo(i)
+ enddo
+ !$omp end do
+end
+
+subroutine mod_inscan2(x)
+ integer, intent(inout) :: x
+
+ !Correct: worksharing-loop simd directive
+ !$omp do simd reduction(inscan, +:x)
+ do i = 1, 100
+ x = foo(i)
+ enddo
+ !$omp end do simd
+end
+
+subroutine mod_inscan3(x)
+ integer, intent(inout) :: x
+
+ !Correct: "simd" directive
+ !$omp simd reduction(inscan, +:x)
+ do i = 1, 100
+ x = foo(i)
+ enddo
+ !$omp end simd
+end
+
+subroutine mod_inscan4(x)
+ integer, intent(inout) :: x
+
+ !ERROR: Modifier 'INSCAN' on REDUCTION clause is only allowed with worksharing-loop, worksharing-loop simd, or SIMD directive
+ !$omp parallel reduction(inscan, +:x)
+ do i = 1, 100
+ x = foo(i)
+ enddo
+ !$omp end parallel
+end
+
+subroutine mod_inscan5(x)
+ integer, intent(inout) :: x
+
+ !ERROR: Modifier 'INSCAN' on REDUCTION clause is only allowed with worksharing-loop, worksharing-loop simd, or SIMD directive
+ !$omp sections reduction(inscan, +:x)
+ do i = 1, 100
+ x = foo(i)
+ enddo
+ !$omp end sections
+end
diff --git a/flang/test/Semantics/OpenMP/sections03.f90 b/flang/test/Semantics/OpenMP/sections03.f90
deleted file mode 100644
index 69775013ea82..000000000000
--- a/flang/test/Semantics/OpenMP/sections03.f90
+++ /dev/null
@@ -1,27 +0,0 @@
-! RUN: %python %S/../test_errors.py %s %flang -fopenmp
-!XFAIL: *
-! OpenMP version 5.0.0
-! 2.8.1 sections construct
-! Orphaned section directives are prohibited. That is, the section directives must appear within the sections construct and must not be encountered elsewhere in the sections region
-!TODO: Error in parsing. Make parser errors more informative. Until then, the test is XFAIL
-
-program OmpOrphanedSections
- use omp_lib
- integer counter
- counter = 0
- !CHECK: expected 'END'
- !CHECK: END PROGRAM statement
- !CHECK: in the context: main program
- !CHECK: expected 'END PROGRAM'
- !CHECK: in the context: END PROGRAM statement
- !CHECK: in the context: main program
- !$omp section
- print *, "An orphaned section containing a single statement"
- !$omp section
- counter = counter + 1
- print *, "An orphaned section containing multiple statements"
-!$omp sections
- !$omp section
- print *, "Not an orphan structured block"
-!$omp end sections
-end program OmpOrphanedSections
diff --git a/flang/test/Semantics/OpenMP/simd03.f90 b/flang/test/Semantics/OpenMP/simd03.f90
index 38f45da47748..8c90eba8fd8e 100644
--- a/flang/test/Semantics/OpenMP/simd03.f90
+++ b/flang/test/Semantics/OpenMP/simd03.f90
@@ -1,4 +1,4 @@
-! RUN: %S/test_errors.sh %s %t %flang -fopenmp
+! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
! XFAIL: *
! OpenMP Version 4.5
diff --git a/flang/test/Semantics/OpenMP/taskgroup01.f90 b/flang/test/Semantics/OpenMP/taskgroup01.f90
index 9de1df91bf3b..98c9aabffa23 100644
--- a/flang/test/Semantics/OpenMP/taskgroup01.f90
+++ b/flang/test/Semantics/OpenMP/taskgroup01.f90
@@ -47,4 +47,4 @@ use omp_lib
!$omp end taskgroup
!$omp end task
!$omp end parallel
-end program \ No newline at end of file
+end program
diff --git a/flang/test/Semantics/OpenMP/taskloop03.f90 b/flang/test/Semantics/OpenMP/taskloop03.f90
index 7e2e426a3fe7..3fe6a593bf49 100644
--- a/flang/test/Semantics/OpenMP/taskloop03.f90
+++ b/flang/test/Semantics/OpenMP/taskloop03.f90
@@ -1,4 +1,4 @@
-! RUN: %S/test_errors.sh %s %t %flang -fopenmp
+! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
! XFAIL: *
! OpenMP Version 4.5
diff --git a/flang/test/Semantics/bind-c12.f90 b/flang/test/Semantics/bind-c12.f90
index 55af8a93b5b5..01a8d0cdbc3d 100644
--- a/flang/test/Semantics/bind-c12.f90
+++ b/flang/test/Semantics/bind-c12.f90
@@ -26,8 +26,8 @@ end
subroutine subr5(p) bind(c)
interface
+ !WARNING: A dummy procedure of an interoperable procedure should be BIND(C)
subroutine p(c)
- !ERROR: An assumed-length dummy argument must not appear in a non-BIND(C) entry in a subprogram with an entry that must be interoperable
character(*), intent(in) :: c
end
end interface
@@ -52,8 +52,8 @@ end
subroutine subr8(p) bind(c)
interface
+ !WARNING: A dummy procedure of an interoperable procedure should be BIND(C)
subroutine p(n)
- !ERROR: A VALUE dummy argument must not appear in a non-BIND(C) entry of a subprogram with an entry that must be interoperable
integer, intent(in), value :: n
end
end interface
diff --git a/flang/test/Semantics/call05.f90 b/flang/test/Semantics/call05.f90
index 66d0a375fa56..71f2197067f7 100644
--- a/flang/test/Semantics/call05.f90
+++ b/flang/test/Semantics/call05.f90
@@ -1,4 +1,4 @@
-! RUN: %python %S/test_errors.py %s %flang_fc1
+! RUN: %python %S/test_errors.py %s %flang_fc1 -pedantic
! Test 15.5.2.5 constraints and restrictions for POINTER & ALLOCATABLE
! arguments when both sides of the call have the same attributes.
@@ -73,9 +73,9 @@ module m
call sma(ma) ! ok
call spp(pp) ! ok
call spa(pa) ! ok
- !ERROR: If a POINTER or ALLOCATABLE dummy or actual argument is polymorphic, both must be so
+ !PORTABILITY: If a POINTER or ALLOCATABLE actual argument is polymorphic, the corresponding dummy argument should also be so
call smp(pp)
- !ERROR: If a POINTER or ALLOCATABLE dummy or actual argument is polymorphic, both must be so
+ !PORTABILITY: If a POINTER or ALLOCATABLE actual argument is polymorphic, the corresponding dummy argument should also be so
call sma(pa)
!ERROR: If a POINTER or ALLOCATABLE dummy or actual argument is polymorphic, both must be so
call spp(mp)
diff --git a/flang/test/Semantics/call39.f90 b/flang/test/Semantics/call39.f90
index 41eeba100347..724c9f9c7b7d 100644
--- a/flang/test/Semantics/call39.f90
+++ b/flang/test/Semantics/call39.f90
@@ -1,4 +1,4 @@
-! RUN: %python %S/test_errors.py %s %flang_fc1 -pedantic -Werror
+! RUN: %python %S/test_errors.py %s %flang_fc1
! Tests actual/dummy pointer argument shape mismatches
module m
contains
@@ -11,6 +11,15 @@ module m
subroutine sa(p)
real, pointer, intent(in) :: p(..)
end
+ subroutine sao(p)
+ real, intent(in), optional, pointer :: p(..)
+ end
+ subroutine so(x)
+ real, intent(in), optional :: x(..)
+ end
+ subroutine soa(a)
+ real, intent(in), optional, allocatable :: a(..)
+ end
subroutine test
real, pointer :: a0, a1(:)
call s0(null(a0)) ! ok
@@ -23,9 +32,15 @@ module m
call s1(null(a1)) ! ok
call sa(null(a0)) ! ok
call sa(null(a1)) ! ok
- !ERROR: NULL() without MOLD= must not be associated with an assumed-rank dummy argument
- call sa(null())
- !ERROR: NULL() without MOLD= must not be associated with an assumed-rank dummy argument
+ !ERROR: NULL() without MOLD= must not be associated with an assumed-rank dummy argument that is ALLOCATABLE, POINTER, or non-OPTIONAL
call sa(null())
+ call sao ! ok
+ !ERROR: NULL() without MOLD= must not be associated with an assumed-rank dummy argument that is ALLOCATABLE, POINTER, or non-OPTIONAL
+ call sao(null())
+ call so ! ok
+ call so(null()) ! ok
+ call soa ! ok
+ !ERROR: NULL() without MOLD= must not be associated with an assumed-rank dummy argument that is ALLOCATABLE, POINTER, or non-OPTIONAL
+ call soa(null())
end
end
diff --git a/flang/test/Semantics/modfile03.f90 b/flang/test/Semantics/modfile03.f90
index db0caeab973f..eb3136f0aa8b 100644
--- a/flang/test/Semantics/modfile03.f90
+++ b/flang/test/Semantics/modfile03.f90
@@ -135,10 +135,8 @@ module m6d
end
!Expect: m6d.mod
!module m6d
-! use m6a,only:t1
! use m6a,only:t2=>t1
-! private::t1
-! type(t2),parameter::p=t1()
+! type(t2),parameter::p=t2()
!end
module m6e
@@ -178,3 +176,98 @@ end
! use m7a,only:x
! private::x
!end
+
+module m8a
+ private foo
+ type t
+ contains
+ procedure, nopass :: foo
+ end type
+ contains
+ pure integer function foo(n)
+ integer, intent(in) :: n
+ foo = n
+ end
+end
+!Expect: m8a.mod
+!module m8a
+!type::t
+!contains
+!procedure,nopass::foo
+!end type
+!private::foo
+!contains
+!pure function foo(n)
+!integer(4),intent(in)::n
+!integer(4)::foo
+!end
+!end
+
+module m8b
+ use m8a
+ contains
+ subroutine foo(x,a)
+ type(t), intent(in) :: x
+ real a(x%foo(10))
+ end
+end
+!Expect: m8b.mod
+!module m8b
+!use m8a,only:m8a$foo=>foo
+!use m8a,only:t
+!private::m8a$foo
+!contains
+!subroutine foo(x,a)
+!type(t),intent(in)::x
+!real(4)::a(1_8:int(m8a$foo(10_4),kind=8))
+!end
+!end
+
+module m9a
+ private
+ public t
+ type t
+ integer n
+ contains
+ procedure f
+ end type
+ contains
+ pure integer function f(x, k)
+ class(t), intent(in) :: x
+ integer, intent(in) :: k
+ f = x%n + k
+ end
+end
+!Expect: m9a.mod
+!module m9a
+!type::t
+!integer(4)::n
+!contains
+!procedure::f
+!end type
+!private::f
+!contains
+!pure function f(x,k)
+!class(t),intent(in)::x
+!integer(4),intent(in)::k
+!integer(4)::f
+!end
+!end
+
+module m9b
+ use m9a
+ contains
+ subroutine s(x, y)
+ class(t), intent(in) :: x
+ real y(x%f(x%n))
+ end
+end
+!Expect: m9b.mod
+!module m9b
+!use m9a,only:t
+!contains
+!subroutine s(x,y)
+!class(t),intent(in)::x
+!real(4)::y(1_8:int(x%f(x%n),kind=8))
+!end
+!end
diff --git a/flang/test/Semantics/procinterface05.f90 b/flang/test/Semantics/procinterface05.f90
new file mode 100644
index 000000000000..8c3afbffb2cf
--- /dev/null
+++ b/flang/test/Semantics/procinterface05.f90
@@ -0,0 +1,14 @@
+! RUN: %python %S/test_errors.py %s %flang_fc1
+interface a1
+ subroutine s1
+ interface a2
+ subroutine s2
+ !ERROR: Invalid specification expression: reference to local entity 'k'
+ real x(k)
+ end subroutine
+ end interface
+ !ERROR: Invalid specification expression: reference to local entity 'k'
+ real y(k)
+ end subroutine
+end interface
+end
diff --git a/flang/test/Semantics/shape.f90 b/flang/test/Semantics/shape.f90
index f43b81f2b44d..21e293031fd6 100644
--- a/flang/test/Semantics/shape.f90
+++ b/flang/test/Semantics/shape.f90
@@ -2,10 +2,12 @@
! Test comparisons that use the intrinsic SHAPE() as an operand
program testShape
contains
- subroutine sub1(arrayDummy)
- integer :: arrayDummy(:)
+ subroutine sub1(arrayDummy, assumedRank)
+ integer :: arrayDummy(:), assumedRank(..)
integer, allocatable :: arrayDeferred(:)
integer :: arrayLocal(2) = [88, 99]
+ integer, parameter :: aRrs = rank(shape(assumedRank))
+ integer(kind=merge(kind(1),-1,aRrs == 1)) :: test_aRrs
!ERROR: Dimension 1 of left operand has extent 1, but right operand has extent 0
!ERROR: Dimension 1 of left operand has extent 1, but right operand has extent 0
if (all(shape(arrayDummy)==shape(8))) then
@@ -45,5 +47,9 @@ contains
if (all(64==shape(arrayLocal))) then
print *, "hello"
end if
+ ! These can't be checked at compilation time
+ if (any(shape(assumedRank) == [1])) stop
+ if (any(lbound(assumedRank) == [1,2])) stop
+ if (any(ubound(assumedRank) == [1,2,3])) stop
end subroutine sub1
end program testShape
diff --git a/flang/test/Transforms/debug-complex-1.fir b/flang/test/Transforms/debug-complex-1.fir
new file mode 100644
index 000000000000..a3cbd767d8a5
--- /dev/null
+++ b/flang/test/Transforms/debug-complex-1.fir
@@ -0,0 +1,39 @@
+// RUN: fir-opt --add-debug-info --mlir-print-debuginfo %s | FileCheck %s
+
+// check conversion of complex type of different size. Both fir and mlir
+// variants are checked.
+
+module attributes {fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", llvm.target_triple = "native"} {
+ func.func @test1(%x : !fir.complex<4>) -> !fir.complex<8> {
+ %1 = fir.convert %x : (!fir.complex<4>) -> !fir.complex<8>
+ return %1 : !fir.complex<8>
+ }loc(#loc1)
+ func.func @test2(%x : !fir.complex<4>) -> complex<f64> {
+ %1 = fir.convert %x : (!fir.complex<4>) -> complex<f64>
+ return %1 : complex<f64>
+ }loc(#loc2)
+ func.func @test3(%x : !fir.complex<4>) -> !fir.complex<16> {
+ %1 = fir.convert %x : (!fir.complex<4>) -> !fir.complex<16>
+ return %1 : !fir.complex<16>
+ }loc(#loc3)
+ func.func @test4(%x : !fir.complex<4>) -> complex<f128> {
+ %1 = fir.convert %x : (!fir.complex<4>) -> complex<f128>
+ return %1 : complex<f128>
+ }loc(#loc4)
+}
+#loc1 = loc("./simple.f90":2:1)
+#loc2 = loc("./simple.f90":5:1)
+#loc3 = loc("./simple.f90":8:1)
+#loc4 = loc("./simple.f90":11:1)
+
+// CHECK-DAG: #[[CMPX8:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "complex", sizeInBits = 128, encoding = DW_ATE_complex_float>
+// CHECK-DAG: #[[CMPX4:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "complex", sizeInBits = 64, encoding = DW_ATE_complex_float>
+// CHECK-DAG: #[[CMPX16:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "complex", sizeInBits = 256, encoding = DW_ATE_complex_float>
+
+// CHECK-DAG: #[[TY1:.*]] = #llvm.di_subroutine_type<{{.*}}types = #[[CMPX8]], #[[CMPX4]]>
+// CHECK-DAG: #[[TY2:.*]] = #llvm.di_subroutine_type<{{.*}}types = #[[CMPX16]], #[[CMPX4]]>
+
+// CHECK-DAG: #llvm.di_subprogram<{{.*}}name = "test1"{{.*}}type = #[[TY1]]>
+// CHECK-DAG: #llvm.di_subprogram<{{.*}}name = "test2"{{.*}}type = #[[TY1]]>
+// CHECK-DAG: #llvm.di_subprogram<{{.*}}name = "test3"{{.*}}type = #[[TY2]]>
+// CHECK-DAG: #llvm.di_subprogram<{{.*}}name = "test4"{{.*}}type = #[[TY2]]>
diff --git a/flang/test/Transforms/debug-fixed-array-type.fir b/flang/test/Transforms/debug-fixed-array-type.fir
new file mode 100644
index 000000000000..401c72541183
--- /dev/null
+++ b/flang/test/Transforms/debug-fixed-array-type.fir
@@ -0,0 +1,34 @@
+// RUN: fir-opt --add-debug-info --mlir-print-debuginfo %s | FileCheck %s
+
+module attributes {} {
+ func.func @_QQmain() attributes {fir.bindc_name = "mn"} {
+ %c7 = arith.constant 7 : index
+ %c8 = arith.constant 8 : index
+ %c6 = arith.constant 6 : index
+ %c5 = arith.constant 5 : index
+ %c2 = arith.constant 2 : index
+ %c3 = arith.constant 3 : index
+ %0 = fir.alloca !fir.array<3xi32> {bindc_name = "d1", uniq_name = "_QFEd1"}
+ %1 = fircg.ext_declare %0(%c3) {uniq_name = "_QFEd1"} : (!fir.ref<!fir.array<3xi32>>, index) -> !fir.ref<!fir.array<3xi32>> loc(#loc1)
+ %2 = fir.address_of(@_QFEd2) : !fir.ref<!fir.array<2x5xi32>>
+ %3 = fircg.ext_declare %2(%c2, %c5) {uniq_name = "_QFEd2"} : (!fir.ref<!fir.array<2x5xi32>>, index, index) -> !fir.ref<!fir.array<2x5xi32>> loc(#loc2)
+ %4 = fir.address_of(@_QFEd3) : !fir.ref<!fir.array<6x8x7xf32>>
+ %5 = fircg.ext_declare %4(%c6, %c8, %c7) {uniq_name = "_QFEd3"} : (!fir.ref<!fir.array<6x8x7xf32>>, index, index, index) -> !fir.ref<!fir.array<6x8x7xf32>> loc(#loc3)
+ return
+ } loc(#loc4)
+}
+
+#loc1 = loc("test.f90":5:1)
+#loc2 = loc("test.f90":6:11)
+#loc3 = loc("test.f90":7:11)
+#loc4 = loc("test.f90":2:8)
+
+
+// CHECK-DAG: #[[INT:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer", sizeInBits = 32, encoding = DW_ATE_signed>
+// CHECK-DAG: #[[REAL:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real", sizeInBits = 32, encoding = DW_ATE_float>
+// CHECK-DAG: #[[D1TY:.*]] = #llvm.di_composite_type<tag = DW_TAG_array_type{{.*}}baseType = #[[INT]], elements = #llvm.di_subrange<count = 3 : i64, lowerBound = 1 : i64>>
+// CHECK-DAG: #[[D2TY:.*]] = #llvm.di_composite_type<tag = DW_TAG_array_type{{.*}}baseType = #[[INT]], elements = #llvm.di_subrange<count = 2 : i64, lowerBound = 1 : i64>, #llvm.di_subrange<count = 5 : i64, lowerBound = 1 : i64>>
+// CHECK-DAG: #[[D3TY:.*]] = #llvm.di_composite_type<tag = DW_TAG_array_type{{.*}}baseType = #[[REAL]], elements = #llvm.di_subrange<count = 6 : i64, lowerBound = 1 : i64>, #llvm.di_subrange<count = 8 : i64, lowerBound = 1 : i64>, #llvm.di_subrange<count = 7 : i64, lowerBound = 1 : i64>>
+// CHECK-DAG: #llvm.di_local_variable<{{.*}}name = "d1"{{.*}}type = #[[D1TY]]>
+// CHECK-DAG: #llvm.di_local_variable<{{.*}}name = "d2"{{.*}}type = #[[D2TY]]>
+// CHECK-DAG: #llvm.di_local_variable<{{.*}}name = "d3"{{.*}}type = #[[D3TY]]>
diff --git a/flang/test/Transforms/debug-module-1.fir b/flang/test/Transforms/debug-module-1.fir
new file mode 100644
index 000000000000..822ae01b99aa
--- /dev/null
+++ b/flang/test/Transforms/debug-module-1.fir
@@ -0,0 +1,40 @@
+// RUN: fir-opt --add-debug-info --mlir-print-debuginfo %s | FileCheck %s
+
+
+module attributes {} {
+ fir.global @_QMhelperEgli : i32 {
+ %0 = fir.zero_bits i32
+ fir.has_value %0 : i32
+ } loc(#loc1)
+ fir.global @_QMhelperEglr : f32 {
+ %0 = fir.zero_bits f32
+ fir.has_value %0 : f32
+ } loc(#loc2)
+ func.func @_QMhelperPtest() {
+ %c67_i32 = arith.constant 67 : i32
+ %cst = arith.constant 1.234000e+01 : f32
+ %0 = fir.address_of(@_QMhelperEgli) : !fir.ref<i32>
+ %1 = fir.address_of(@_QMhelperEglr) : !fir.ref<f32>
+ fir.store %cst to %1 : !fir.ref<f32>
+ fir.store %c67_i32 to %0 : !fir.ref<i32>
+ return
+ } loc(#loc3)
+}
+#loc1 = loc("test.f90":12:11)
+#loc2 = loc("test.f90":15:8)
+#loc3 = loc("test.f90":20:5)
+
+// CHECK-DAG: #[[I4:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer", sizeInBits = 32, encoding = DW_ATE_signed>
+// CHECK-DAG: #[[R4:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real", sizeInBits = 32, encoding = DW_ATE_float>
+// CHECK-DAG: #[[CU:.*]] = #llvm.di_compile_unit<{{.*}}>
+// CHECK-DAG: #[[MOD:.*]] = #llvm.di_module<{{.*}}scope = #[[CU]], name = "helper"{{.*}}>
+// CHECK-DAG: #[[LOC1:.*]] = loc("{{.*}}test.f90":12{{.*}})
+// CHECK-DAG: #[[GLI:.*]] = #llvm.di_global_variable<scope = #[[MOD]], name = "gli", linkageName = "_QMhelperEgli"{{.*}}line = 12, type = #[[I4]], isDefined = true>
+// CHECK-DAG: #[[LOC2:.*]] = loc("{{.*}}test.f90":15{{.*}})
+// CHECK-DAG: #[[GLR:.*]] = #llvm.di_global_variable<scope = #[[MOD]], name = "glr", linkageName = "_QMhelperEglr"{{.*}}line = 15, type = #[[R4]], isDefined = true>
+// CHECK-DAG: #[[LOC3:.*]] = loc("{{.*}}test.f90":20{{.*}})
+// CHECK-DAG: #[[TEST:.*]] = #llvm.di_subprogram<{{.*}}compileUnit = #[[CU]], scope = #[[MOD]], name = "test", linkageName = "_QMhelperPtest"{{.*}}line = 20, scopeLine = 20{{.*}}>
+// CHECK-DAG: loc(fused<#[[GLI]]>[#[[LOC1]]])
+// CHECK-DAG: loc(fused<#[[GLR]]>[#[[LOC2]]])
+// CHECK-DAG: loc(fused<#[[TEST]]>[#[[LOC3]]])
+
diff --git a/flang/test/Transforms/debug-module-2.fir b/flang/test/Transforms/debug-module-2.fir
new file mode 100644
index 000000000000..6acdc1df23d2
--- /dev/null
+++ b/flang/test/Transforms/debug-module-2.fir
@@ -0,0 +1,35 @@
+// RUN: fir-opt --fir-to-llvm-ir="target=x86_64-unknown-linux-gnu" --mlir-print-debuginfo %s | FileCheck %s
+
+module {
+ fir.global @_QMhelperEgli : i32 {
+ %0 = fir.zero_bits i32
+ fir.has_value %0 : i32
+ } loc(#loc3)
+ fir.global @_QMhelperEglr : f32 {
+ %0 = fir.zero_bits f32
+ fir.has_value %0 : f32
+ } loc(#loc4)
+}
+#di_basic_type = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "integer", sizeInBits = 32, encoding = DW_ATE_signed>
+#di_basic_type1 = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "real", sizeInBits = 32, encoding = DW_ATE_float>
+
+#di_file = #llvm.di_file<"test.f90" in "">
+#di_subroutine_type = #llvm.di_subroutine_type<callingConvention = DW_CC_normal>
+
+#di_compile_unit = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_Fortran95, file = #di_file, producer = "flang version 19.0.0 (/home/haqadeer/work/llvm-project/flang 5d5c73cad421bdca6e43e1cc10704ff160f1a33e)", isOptimized = false, emissionKind = Full>
+#di_module = #llvm.di_module<file = #di_file, scope = #di_compile_unit, name = "helper", line = 11>
+#di_global_variable = #llvm.di_global_variable<scope = #di_module, name = "gli", linkageName = "_QMhelperEgli", file = #di_file, line = 12, type = #di_basic_type, isDefined = true>
+#di_global_variable1 = #llvm.di_global_variable<scope = #di_module, name = "glr", linkageName = "_QMhelperEglr", file = #di_file, line = 15, type = #di_basic_type1, isDefined = true>
+
+#loc1 = loc("test.f90":12:11)
+#loc2 = loc("test.f90":15:8)
+#loc3 = loc(fused<#di_global_variable>[#loc1])
+#loc4 = loc(fused<#di_global_variable1>[#loc2])
+
+
+// CHECK-DAG: #[[GLI:.*]] = #llvm.di_global_variable<{{.*}}name = "gli", linkageName = "_QMhelperEgli"{{.*}}>
+// CHECK-DAG: #[[GLR:.*]] = #llvm.di_global_variable<{{.*}}name = "glr", linkageName = "_QMhelperEglr"{{.*}}>
+// CHECK-DAG: #[[GLIE:.*]] = #llvm.di_global_variable_expression<var = #[[GLI]]>
+// CHECK-DAG: #[[GLRE:.*]] = #llvm.di_global_variable_expression<var = #[[GLR]]>
+// CHECK-DAG: llvm.mlir.global{{.*}}@_QMhelperEgli() {{{.*}}dbg_expr = #[[GLIE]]}
+// CHECK-DAG: llvm.mlir.global{{.*}}@_QMhelperEglr() {{{.*}}dbg_expr = #[[GLRE]]}
diff --git a/flang/tools/f18/CMakeLists.txt b/flang/tools/f18/CMakeLists.txt
index 35e1cdafd3ae..477119965160 100644
--- a/flang/tools/f18/CMakeLists.txt
+++ b/flang/tools/f18/CMakeLists.txt
@@ -96,6 +96,7 @@ if (NOT CMAKE_CROSSCOMPILING)
endif()
add_custom_target(module_files ALL DEPENDS ${MODULE_FILES})
+set_target_properties(module_files PROPERTIES FOLDER "Flang/Resources")
# TODO Move this to a more suitable location
# Copy the generated omp_lib.h header file, if OpenMP support has been configured.
diff --git a/flang/unittests/CMakeLists.txt b/flang/unittests/CMakeLists.txt
index 72d37ebeb853..945067fed4f8 100644
--- a/flang/unittests/CMakeLists.txt
+++ b/flang/unittests/CMakeLists.txt
@@ -8,7 +8,7 @@ if (FLANG_EXPERIMENTAL_CUDA_RUNTIME)
endif()
add_custom_target(FlangUnitTests)
-set_target_properties(FlangUnitTests PROPERTIES FOLDER "Flang Unit Tests")
+set_target_properties(FlangUnitTests PROPERTIES FOLDER "Flang/Tests")
function(add_flang_unittest_offload_properties target)
# Set CUDA_RESOLVE_DEVICE_SYMBOLS.
@@ -55,6 +55,7 @@ function(add_flang_nongtest_unittest test_name)
endif()
add_executable(${test_name}${suffix} ${test_name}.cpp)
+ set_target_properties(${test_name}${suffix} PROPERTIES FOLDER "Flang/Tests/Unit")
if (LLVM_LINK_LLVM_DYLIB AND NOT ARG_DISABLE_LLVM_LINK_LLVM_DYLIB)
set(llvm_libs LLVM)
diff --git a/flang/unittests/Evaluate/CMakeLists.txt b/flang/unittests/Evaluate/CMakeLists.txt
index 4658d8d3345b..52eb385f4763 100644
--- a/flang/unittests/Evaluate/CMakeLists.txt
+++ b/flang/unittests/Evaluate/CMakeLists.txt
@@ -3,6 +3,7 @@ add_library(FortranEvaluateTesting
testing.cpp
fp-testing.cpp
)
+set_target_properties(FortranEvaluateTesting PROPERTIES FOLDER "Flang/Tests")
if (LLVM_LINK_LLVM_DYLIB)
set(llvm_libs LLVM)
else()
diff --git a/flang/unittests/Runtime/CMakeLists.txt b/flang/unittests/Runtime/CMakeLists.txt
index f7caacad3a59..ed047b08ada3 100644
--- a/flang/unittests/Runtime/CMakeLists.txt
+++ b/flang/unittests/Runtime/CMakeLists.txt
@@ -25,6 +25,7 @@ add_flang_unittest(FlangRuntimeTests
Reduction.cpp
RuntimeCrashTest.cpp
Stop.cpp
+ Support.cpp
Time.cpp
TemporaryStack.cpp
Transformational.cpp
diff --git a/flang/unittests/Runtime/Support.cpp b/flang/unittests/Runtime/Support.cpp
new file mode 100644
index 000000000000..fa2a233e1e65
--- /dev/null
+++ b/flang/unittests/Runtime/Support.cpp
@@ -0,0 +1,58 @@
+//===-- flang/unittests/Runtime/Support.cpp ----------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flang/Runtime/support.h"
+#include "gtest/gtest.h"
+#include "tools.h"
+#include "flang/Runtime/descriptor.h"
+
+using namespace Fortran::runtime;
+using Fortran::common::TypeCategory;
+TEST(CopyAndUpdateDescriptor, Basic) {
+ auto x{MakeArray<TypeCategory::Integer, 4>(
+ std::vector<int>{2, 3}, std::vector<std::int32_t>{0, 1, 2, 3, 4, 5})};
+ x->GetDimension(0).SetLowerBound(11);
+ x->GetDimension(1).SetLowerBound(12);
+
+ StaticDescriptor<2, false> statDesc;
+ Descriptor &result{statDesc.descriptor()};
+
+ RTNAME(CopyAndUpdateDescriptor)
+ (result, *x, nullptr, CFI_attribute_pointer, LowerBoundModifier::Preserve);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.raw().base_addr, x->raw().base_addr);
+ EXPECT_TRUE(result.IsPointer());
+ EXPECT_EQ(result.GetDimension(0).Extent(), x->GetDimension(0).Extent());
+ EXPECT_EQ(
+ result.GetDimension(0).LowerBound(), x->GetDimension(0).LowerBound());
+ EXPECT_EQ(result.GetDimension(1).Extent(), x->GetDimension(1).Extent());
+ EXPECT_EQ(
+ result.GetDimension(1).LowerBound(), x->GetDimension(1).LowerBound());
+
+ RTNAME(CopyAndUpdateDescriptor)
+ (result, *x, nullptr, CFI_attribute_allocatable,
+ LowerBoundModifier::SetToZeroes);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.raw().base_addr, x->raw().base_addr);
+ EXPECT_TRUE(result.IsAllocatable());
+ EXPECT_EQ(result.GetDimension(0).Extent(), x->GetDimension(0).Extent());
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 0);
+ EXPECT_EQ(result.GetDimension(1).Extent(), x->GetDimension(1).Extent());
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 0);
+
+ RTNAME(CopyAndUpdateDescriptor)
+ (result, *x, nullptr, CFI_attribute_other, LowerBoundModifier::SetToOnes);
+ ASSERT_EQ(result.rank(), 2);
+ EXPECT_EQ(result.raw().base_addr, x->raw().base_addr);
+ EXPECT_FALSE(result.IsAllocatable());
+ EXPECT_FALSE(result.IsPointer());
+ EXPECT_EQ(result.GetDimension(0).Extent(), x->GetDimension(0).Extent());
+ EXPECT_EQ(result.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(result.GetDimension(1).Extent(), x->GetDimension(1).Extent());
+ EXPECT_EQ(result.GetDimension(1).LowerBound(), 1);
+}
diff --git a/libc/CMakeLists.txt b/libc/CMakeLists.txt
index 175efd89d67e..f35471a06a53 100644
--- a/libc/CMakeLists.txt
+++ b/libc/CMakeLists.txt
@@ -1,4 +1,5 @@
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "libc")
# Include LLVM's cmake policies.
if(NOT DEFINED LLVM_COMMON_CMAKE_UTILS)
diff --git a/libc/cmake/modules/LLVMLibCObjectRules.cmake b/libc/cmake/modules/LLVMLibCObjectRules.cmake
index 0649e9f7a767..134c5143d6d6 100644
--- a/libc/cmake/modules/LLVMLibCObjectRules.cmake
+++ b/libc/cmake/modules/LLVMLibCObjectRules.cmake
@@ -246,9 +246,6 @@ function(create_entrypoint_object fq_target_name)
if(NOT ADD_ENTRYPOINT_OBJ_SRCS)
message(FATAL_ERROR "`add_entrypoint_object` rule requires SRCS to be specified.")
endif()
- if(NOT ADD_ENTRYPOINT_OBJ_HDRS)
- message(FATAL_ERROR "`add_entrypoint_object` rule requires HDRS to be specified.")
- endif()
if(NOT ADD_ENTRYPOINT_OBJ_CXX_STANDARD)
set(ADD_ENTRYPOINT_OBJ_CXX_STANDARD ${CMAKE_CXX_STANDARD})
endif()
diff --git a/libc/config/baremetal/arm/entrypoints.txt b/libc/config/baremetal/arm/entrypoints.txt
index 4e3d1cb9f533..7fb82c60a1bb 100644
--- a/libc/config/baremetal/arm/entrypoints.txt
+++ b/libc/config/baremetal/arm/entrypoints.txt
@@ -183,6 +183,10 @@ set(TARGET_LIBC_ENTRYPOINTS
# time.h entrypoints
libc.src.time.difftime
+
+ # internal entrypoints
+ libc.startup.baremetal.init
+ libc.startup.baremetal.fini
)
set(TARGET_LIBM_ENTRYPOINTS
diff --git a/libc/config/baremetal/riscv/entrypoints.txt b/libc/config/baremetal/riscv/entrypoints.txt
index 7efd9bcd5b3c..b769b43f03a2 100644
--- a/libc/config/baremetal/riscv/entrypoints.txt
+++ b/libc/config/baremetal/riscv/entrypoints.txt
@@ -183,6 +183,10 @@ set(TARGET_LIBC_ENTRYPOINTS
# time.h entrypoints
libc.src.time.difftime
+
+ # internal entrypoints
+ libc.startup.baremetal.init
+ libc.startup.baremetal.fini
)
set(TARGET_LIBM_ENTRYPOINTS
diff --git a/libc/docs/ctype.rst b/libc/docs/ctype.rst
index 7d77dadccc9b..828785c9b670 100644
--- a/libc/docs/ctype.rst
+++ b/libc/docs/ctype.rst
@@ -1,7 +1,11 @@
.. include:: check.rst
-ctype.h Functions
-=================
+=======
+ctype.h
+=======
+
+Functions
+=========
.. list-table::
:widths: auto
@@ -10,46 +14,61 @@ ctype.h Functions
* - Function
- Implemented
- - Standard
+ - C23 Standard Section
+ - POSIX.1-2017 Standard Section
* - isalnum
- |check|
- 7.4.1.1
+ -
* - isalpha
- |check|
- 7.4.1.2
+ -
* - isblank
- |check|
- 7.4.1.3
+ -
* - iscntrl
- |check|
- 7.4.1.4
+ -
* - isdigit
- |check|
- 7.4.1.5
+ -
* - isgraph
- |check|
- 7.4.1.6
+ -
* - islower
- |check|
- 7.4.1.7
+ -
* - isprint
- |check|
- 7.4.1.8
+ -
* - ispunct
- |check|
- 7.4.1.9
+ -
* - isspace
- |check|
- 7.4.1.10
+ -
* - isupper
- |check|
- 7.4.1.11
+ -
* - isxdigit
- |check|
- 7.4.1.12
+ -
* - tolower
- |check|
- 7.4.2.1
+ -
* - toupper
- |check|
- 7.4.2.2
+ -
diff --git a/libc/docs/fenv.rst b/libc/docs/fenv.rst
index 1dee5515e117..e7a5a3fb2c81 100644
--- a/libc/docs/fenv.rst
+++ b/libc/docs/fenv.rst
@@ -1,7 +1,11 @@
.. include:: check.rst
-fenv.h Functions
-================
+======
+fenv.h
+======
+
+Macros
+======
.. list-table::
:widths: auto
@@ -10,55 +14,162 @@ fenv.h Functions
* - Function
- Implemented
- - Standard
+ - C23 Standard Section
+ - POSIX.1-2017 Standard Section
+ * - FE_ALL_EXCEPT
+ - |check|
+ - 7.6.12
+ -
+ * - FE_DEC_DOWNWARD
+ -
+ - 7.6.14
+ -
+ * - FE_DEC_TONEAREST
+ -
+ - 7.6.14
+ -
+ * - FE_DEC_TONEARESTFROMZERO
+ -
+ - 7.6.14
+ -
+ * - FE_DEC_TOWARDZERO
+ -
+ - 7.6.14
+ -
+ * - FE_DEC_UPWARD
+ -
+ - 7.6.14
+ -
+ * - FE_DFL_ENV
+ - |check|
+ - 7.6.17
+ -
+ * - FE_DFL_MODE
+ -
+ - 7.6.11
+ -
+ * - FE_DIVBYZERO
+ - |check|
+ - 7.6.9
+ -
+ * - FE_DOWNARD
+ -
+ - 7.6.13
+ -
+ * - FE_INEXACT
+ - |check|
+ - 7.6.9
+ -
+ * - FE_INVALID
+ - |check|
+ - 7.6.9
+ -
+ * - FE_OVERFLOW
+ - |check|
+ - 7.6.9
+ -
+ * - FE_TONEAREST
+ - |check|
+ - 7.6.13
+ -
+ * - FE_TONEARESTFROMZERO
+ -
+ - 7.6.13
+ -
+ * - FE_TOWARDZERO
+ - |check|
+ - 7.6.13
+ -
+ * - FE_UNDERFLOW
+ - |check|
+ - 7.6.9
+ -
+ * - FE_UPWARD
+ - |check|
+ - 7.6.13
+ -
+ * - __STDC_VERSION_FENV_H__
+ -
+ - 7.6.5
+ -
+
+Functions
+=========
+
+.. list-table::
+ :widths: auto
+ :align: center
+ :header-rows: 1
+
+ * - Function
+ - Implemented
+ - C23 Standard Section
+ - POSIX.1-2017 Standard Section
* - fe_dec_getround
-
- 7.6.5.3
+ -
* - fe_dec_setround
-
- 7.6.5.6
+ -
* - feclearexcept
- |check|
- 7.6.4.1
+ -
* - fegetenv
- |check|
- 7.6.6.1
+ -
* - fegetexceptflag
- |check|
- 7.6.4.2
+ -
* - fegetmode
-
- 7.6.5.1
+ -
* - fegetround
- |check|
- 7.6.5.2
+ -
* - feholdexcept
- |check|
- 7.6.6.2
+ -
* - feraiseexcept
- |check|
- 7.6.4.3
+ -
* - fesetenv
- |check|
- 7.6.6.3
+ -
* - fesetexcept
- |check|
- 7.6.4.4
+ -
* - fesetexceptflag
- |check|
- 7.6.4.5
+ -
* - fesetmode
-
- 7.6.5.4
+ -
* - fesetround
- |check|
- 7.6.5.5
+ -
* - fetestexcept
- |check|
- 7.6.4.7
+ -
* - fetestexceptflag
- |check|
- 7.6.4.6
+ -
* - feupdateenv
- |check|
- 7.6.6.4
+ -
diff --git a/libc/docs/signal.rst b/libc/docs/signal.rst
index 7903bb439cb3..d1a7cb609560 100644
--- a/libc/docs/signal.rst
+++ b/libc/docs/signal.rst
@@ -1,7 +1,160 @@
.. include:: check.rst
-signal.h Functions
-==================
+========
+signal.h
+========
+
+Macros
+======
+
+.. list-table::
+ :widths: auto
+ :align: center
+ :header-rows: 1
+
+ * - Function
+ - Implemented
+ - C23 Standard Section
+ - POSIX.1-2017 Standard Section
+ * - SIGABRT
+ - |check|
+ - 7.14.3
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGALRM
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGBUS
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGCHLD
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGCONT
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGFPE
+ - |check|
+ - 7.14.3
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGHUP
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGILL
+ - |check|
+ - 7.14.3
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGINT
+ - |check|
+ - 7.14.3
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGKILL
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGPIPE
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGPOLL
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGPROF
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGQUIT
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGRTMAX
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGRTMIN
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGSEGV
+ - |check|
+ - 7.14.3
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGSTOP
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGSYS
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGTERM
+ - |check|
+ - 7.14.3
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGTRAP
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGTSTP
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGTTIN
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGTTOU
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGURG
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGUSR1
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGUSR2
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGVTALRM
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGXCPU
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIGXFSZ
+ - |check|
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIG_DFL
+ - |check|
+ - 7.14.3
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIG_ERR
+ - |check|
+ - 7.14.3
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIG_HOLD
+ -
+ -
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+ * - SIG_IGN
+ - |check|
+ - 7.14.3
+ - https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html
+
+Functions
+=========
.. list-table::
:widths: auto
@@ -10,34 +163,45 @@ signal.h Functions
* - Function
- Implemented
- - Standard
+ - C23 Standard Section
+ - POSIX.1-2017 Standard Section
* - kill
- |check|
-
+ - https://pubs.opengroup.org/onlinepubs/9699919799/functions/kill.html
* - raise
- |check|
- 7.14.2.1
+ - https://pubs.opengroup.org/onlinepubs/9699919799/functions/raise.html
* - sigaction
- |check|
-
+ - https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigaction.html
* - sigaddset
- |check|
-
+ - https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigaddset.html
* - sigaltstack
- |check|
-
+ - https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigaltstack.html
* - sigdelset
- |check|
-
+ - https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigdelset.html
* - sigemptyset
- |check|
-
+ - https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigemptyset.html
* - sigfillset
- |check|
-
+ - https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigfillset.html
* - signal
- |check|
- 7.14.1.1
+ - https://pubs.opengroup.org/onlinepubs/9699919799/functions/signal.html
* - sigprocmask
- |check|
-
+ - https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigprocmask.html
diff --git a/libc/docs/stdbit.rst b/libc/docs/stdbit.rst
index 0a12b2b6d7b5..71f9bbfd1d00 100644
--- a/libc/docs/stdbit.rst
+++ b/libc/docs/stdbit.rst
@@ -1,7 +1,96 @@
.. include:: check.rst
-stdbit.h Functions
-==================
+========
+stdbit.h
+========
+
+Macros
+======
+
+.. list-table::
+ :widths: auto
+ :align: center
+ :header-rows: 1
+
+ * - Function
+ - Implemented
+ - C23 Standard Section
+ - POSIX.1-2017 Standard Section
+ * - __STDC_ENDIAN_BIG__
+ - |check|
+ - 7.18.2.2
+ -
+ * - __STDC_ENDIAN_LITTLE__
+ - |check|
+ - 7.18.2.2
+ -
+ * - __STDC_ENDIAN_NATIVE__
+ - |check|
+ - 7.18.2.2
+ -
+ * - __STDC_VERSION_STDBIT_H__
+ - |check|
+ - 7.18.1.2
+ -
+ * - stdc_bit_ceil
+ - |check|
+ - 7.18.16.1
+ -
+ * - stdc_bit_floor
+ - |check|
+ - 7.18.15.1
+ -
+ * - stdc_bit_width
+ - |check|
+ - 7.18.14.1
+ -
+ * - stdc_count_ones
+ - |check|
+ - 7.18.12.1
+ -
+ * - stdc_count_zeros
+ - |check|
+ - 7.18.11.1
+ -
+ * - stdc_first_leading_one
+ - |check|
+ - 7.18.8.1
+ -
+ * - stdc_first_leading_zero
+ - |check|
+ - 7.18.7.1
+ -
+ * - stdc_first_trailing_one
+ - |check|
+ - 7.18.10.1
+ -
+ * - stdc_first_trailing_zero
+ - |check|
+ - 7.18.9.1
+ -
+ * - stdc_has_single_bit
+ - |check|
+ - 7.18.13.1
+ -
+ * - stdc_leading_ones
+ - |check|
+ - 7.18.4.1
+ -
+ * - stdc_leading_zeros
+ - |check|
+ - 7.18.3.1
+ -
+ * - stdc_trailing_ones
+ - |check|
+ - 7.18.6.1
+ -
+ * - stdc_trailing_zeros
+ - |check|
+ - 7.18.5.1
+ -
+
+Functions
+=========
.. list-table::
:widths: auto
@@ -10,214 +99,285 @@ stdbit.h Functions
* - Function
- Implemented
- - Standard
+ - C23 Standard Section
+ - POSIX.1-2017 Standard Section
* - stdc_bit_ceil_uc
- |check|
- 7.18.16
+ -
* - stdc_bit_ceil_ui
- |check|
- 7.18.16
+ -
* - stdc_bit_ceil_ul
- |check|
- 7.18.16
+ -
* - stdc_bit_ceil_ull
- |check|
- 7.18.16
+ -
* - stdc_bit_ceil_us
- |check|
- 7.18.16
+ -
* - stdc_bit_floor_uc
- |check|
- 7.18.15
+ -
* - stdc_bit_floor_ui
- |check|
- 7.18.15
+ -
* - stdc_bit_floor_ul
- |check|
- 7.18.15
+ -
* - stdc_bit_floor_ull
- |check|
- 7.18.15
+ -
* - stdc_bit_floor_us
- |check|
- 7.18.15
+ -
* - stdc_bit_width_uc
- |check|
- 7.18.14
+ -
* - stdc_bit_width_ui
- |check|
- 7.18.14
+ -
* - stdc_bit_width_ul
- |check|
- 7.18.14
+ -
* - stdc_bit_width_ull
- |check|
- 7.18.14
+ -
* - stdc_bit_width_us
- |check|
- 7.18.14
+ -
* - stdc_count_ones_uc
- |check|
- 7.18.12
+ -
* - stdc_count_ones_ui
- |check|
- 7.18.12
+ -
* - stdc_count_ones_ul
- |check|
- 7.18.12
+ -
* - stdc_count_ones_ull
- |check|
- 7.18.12
+ -
* - stdc_count_ones_us
- |check|
- 7.18.12
+ -
* - stdc_count_zeros_uc
- |check|
- 7.18.11
+ -
* - stdc_count_zeros_ui
- |check|
- 7.18.11
+ -
* - stdc_count_zeros_ul
- |check|
- 7.18.11
+ -
* - stdc_count_zeros_ull
- |check|
- 7.18.11
+ -
* - stdc_count_zeros_us
- |check|
- 7.18.11
+ -
* - stdc_first_leading_one_uc
- |check|
- 7.18.8
+ -
* - stdc_first_leading_one_ui
- |check|
- 7.18.8
+ -
* - stdc_first_leading_one_ul
- |check|
- 7.18.8
+ -
* - stdc_first_leading_one_ull
- |check|
- 7.18.8
+ -
* - stdc_first_leading_one_us
- |check|
- 7.18.8
+ -
* - stdc_first_leading_zero_uc
- |check|
- 7.18.7
+ -
* - stdc_first_leading_zero_ui
- |check|
- 7.18.7
+ -
* - stdc_first_leading_zero_ul
- |check|
- 7.18.7
+ -
* - stdc_first_leading_zero_ull
- |check|
- 7.18.7
+ -
* - stdc_first_leading_zero_us
- |check|
- 7.18.7
+ -
* - stdc_first_trailing_one_uc
- |check|
- 7.18.10
+ -
* - stdc_first_trailing_one_ui
- |check|
- 7.18.10
+ -
* - stdc_first_trailing_one_ul
- |check|
- 7.18.10
+ -
* - stdc_first_trailing_one_ull
- |check|
- 7.18.10
+ -
* - stdc_first_trailing_one_us
- |check|
- 7.18.10
+ -
* - stdc_first_trailing_zero_uc
- |check|
- 7.18.9
+ -
* - stdc_first_trailing_zero_ui
- |check|
- 7.18.9
+ -
* - stdc_first_trailing_zero_ul
- |check|
- 7.18.9
+ -
* - stdc_first_trailing_zero_ull
- |check|
- 7.18.9
+ -
* - stdc_first_trailing_zero_us
- |check|
- 7.18.9
+ -
* - stdc_has_single_bit_uc
- |check|
- 7.18.13
+ -
* - stdc_has_single_bit_ui
- |check|
- 7.18.13
+ -
* - stdc_has_single_bit_ul
- |check|
- 7.18.13
+ -
* - stdc_has_single_bit_ull
- |check|
- 7.18.13
+ -
* - stdc_has_single_bit_us
- |check|
- 7.18.13
+ -
* - stdc_leading_ones_uc
- |check|
- 7.18.4
+ -
* - stdc_leading_ones_ui
- |check|
- 7.18.4
+ -
* - stdc_leading_ones_ul
- |check|
- 7.18.4
+ -
* - stdc_leading_ones_ull
- |check|
- 7.18.4
+ -
* - stdc_leading_ones_us
- |check|
- 7.18.4
+ -
* - stdc_leading_zeros_uc
- |check|
- 7.18.3
+ -
* - stdc_leading_zeros_ui
- |check|
- 7.18.3
+ -
* - stdc_leading_zeros_ul
- |check|
- 7.18.3
+ -
* - stdc_leading_zeros_ull
- |check|
- 7.18.3
+ -
* - stdc_leading_zeros_us
- |check|
- 7.18.3
+ -
* - stdc_trailing_ones_uc
- |check|
- 7.18.6
+ -
* - stdc_trailing_ones_ui
- |check|
- 7.18.6
+ -
* - stdc_trailing_ones_ul
- |check|
- 7.18.6
+ -
* - stdc_trailing_ones_ull
- |check|
- 7.18.6
+ -
* - stdc_trailing_ones_us
- |check|
- 7.18.6
+ -
* - stdc_trailing_zeros_uc
- |check|
- 7.18.5
+ -
* - stdc_trailing_zeros_ui
- |check|
- 7.18.5
+ -
* - stdc_trailing_zeros_ul
- |check|
- 7.18.5
+ -
* - stdc_trailing_zeros_ull
- |check|
- 7.18.5
+ -
* - stdc_trailing_zeros_us
- |check|
- 7.18.5
+ -
diff --git a/libc/docs/threads.rst b/libc/docs/threads.rst
index 78e17e9fdec3..63cd6c40e145 100644
--- a/libc/docs/threads.rst
+++ b/libc/docs/threads.rst
@@ -1,7 +1,32 @@
.. include:: check.rst
-threads.h Functions
-===================
+=========
+threads.h
+=========
+
+Macros
+======
+
+.. list-table::
+ :widths: auto
+ :align: center
+ :header-rows: 1
+
+ * - Function
+ - Implemented
+ - C23 Standard Section
+ - POSIX.1-2017 Standard Section
+ * - ONCE_FLAG_INIT
+ -
+ - 7.28.1.3
+ -
+ * - TSS_DTOR_ITERATIONS
+ -
+ - 7.28.1.3
+ -
+
+Functions
+=========
.. list-table::
:widths: auto
@@ -10,79 +35,105 @@ threads.h Functions
* - Function
- Implemented
- - Standard
+ - C23 Standard Section
+ - POSIX.1-2017 Standard Section
* - call_once
- |check|
- 7.28.2.1
+ -
* - cnd_broadcast
- |check|
- 7.28.3.1
+ -
* - cnd_destroy
- |check|
- 7.28.3.2
+ -
* - cnd_init
- |check|
- 7.28.3.3
+ -
* - cnd_signal
- |check|
- 7.28.3.4
+ -
* - cnd_timedwait
-
- 7.28.3.5
+ -
* - cnd_wait
- |check|
- 7.28.3.6
+ -
* - mtx_destroy
- |check|
- 7.28.4.1
+ -
* - mtx_init
- |check|
- 7.28.4.2
+ -
* - mtx_lock
- |check|
- 7.28.4.3
+ -
* - mtx_timedlock
-
- 7.28.4.4
+ -
* - mtx_trylock
-
- 7.28.4.5
+ -
* - mtx_unlock
- |check|
- 7.28.4.6
+ -
* - thrd_create
- |check|
- 7.28.5.1
+ -
* - thrd_current
- |check|
- 7.28.5.2
+ -
* - thrd_detach
- |check|
- 7.28.5.3
+ -
* - thrd_equal
- |check|
- 7.28.5.4
+ -
* - thrd_exit
- |check|
- 7.28.5.5
+ -
* - thrd_join
- |check|
- 7.28.5.6
+ -
* - thrd_sleep
-
- 7.28.5.7
+ -
* - thrd_yield
-
- 7.28.5.8
+ -
* - tss_create
- |check|
- 7.28.6.1
+ -
* - tss_delete
- |check|
- 7.28.6.2
+ -
* - tss_get
- |check|
- 7.28.6.3
+ -
* - tss_set
- |check|
- 7.28.6.4
+ -
diff --git a/libc/include/llvm-libc-macros/linux/CMakeLists.txt b/libc/include/llvm-libc-macros/linux/CMakeLists.txt
index a07803103eef..461b190c02ea 100644
--- a/libc/include/llvm-libc-macros/linux/CMakeLists.txt
+++ b/libc/include/llvm-libc-macros/linux/CMakeLists.txt
@@ -1,13 +1,7 @@
-add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/mips)
-add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/sparc)
-
add_header(
error_number_macros
HDR
error-number-macros.h
- DEPENDS
- .mips.error_number_macros
- .sparc.error_number_macros
)
add_header(
diff --git a/libc/include/llvm-libc-macros/linux/error-number-macros.h b/libc/include/llvm-libc-macros/linux/error-number-macros.h
index 4c8b3feb3dc3..1643a70918da 100644
--- a/libc/include/llvm-libc-macros/linux/error-number-macros.h
+++ b/libc/include/llvm-libc-macros/linux/error-number-macros.h
@@ -1,13 +1,6 @@
#ifndef LLVM_LIBC_MACROS_LINUX_ERROR_NUMBER_MACROS_H
#define LLVM_LIBC_MACROS_LINUX_ERROR_NUMBER_MACROS_H
-#if defined(__mips__)
-#include "mips/error-number-macros.h"
-
-#elif defined(__sparc__)
-#include "sparc/error-number-macros.h"
-
-#else
#ifndef ECANCELED
#define ECANCELED 125
#endif // ECANCELED
@@ -27,6 +20,5 @@
#ifndef EHWPOISON
#define EHWPOISON 133
#endif // EHWPOISON
-#endif
#endif // LLVM_LIBC_MACROS_LINUX_ERROR_NUMBER_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/mips/CMakeLists.txt b/libc/include/llvm-libc-macros/linux/mips/CMakeLists.txt
deleted file mode 100644
index eee4cfd19396..000000000000
--- a/libc/include/llvm-libc-macros/linux/mips/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-add_header(
- error_number_macros
- HDR
- error-number-macros.h
-)
diff --git a/libc/include/llvm-libc-macros/linux/mips/error-number-macros.h b/libc/include/llvm-libc-macros/linux/mips/error-number-macros.h
deleted file mode 100644
index af2a4243e3ce..000000000000
--- a/libc/include/llvm-libc-macros/linux/mips/error-number-macros.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef LLVM_LIBC_MACROS_LINUX_MIPS_ERROR_NUMBER_MACROS_H
-#define LLVM_LIBC_MACROS_LINUX_MIPS_ERROR_NUMBER_MACROS_H
-
-#ifndef ECANCELED
-#define ECANCELED 158
-#endif // ECANCELED
-
-#ifndef EOWNERDEAD
-#define EOWNERDEAD 165
-#endif // EOWNERDEAD
-
-#ifndef ENOTRECOVERABLE
-#define ENOTRECOVERABLE 166
-#endif // ENOTRECOVERABLE
-
-#ifndef ERFKILL
-#define ERFKILL 167
-#endif // ERFKILL
-
-#ifndef EHWPOISON
-#define EHWPOISON 168
-#endif // EHWPOISON
-
-#endif // LLVM_LIBC_MACROS_LINUX_MIPS_ERROR_NUMBER_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sparc/CMakeLists.txt b/libc/include/llvm-libc-macros/linux/sparc/CMakeLists.txt
deleted file mode 100644
index eee4cfd19396..000000000000
--- a/libc/include/llvm-libc-macros/linux/sparc/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-add_header(
- error_number_macros
- HDR
- error-number-macros.h
-)
diff --git a/libc/include/llvm-libc-macros/linux/sparc/error-number-macros.h b/libc/include/llvm-libc-macros/linux/sparc/error-number-macros.h
deleted file mode 100644
index 76a1408bf760..000000000000
--- a/libc/include/llvm-libc-macros/linux/sparc/error-number-macros.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef LLVM_LIBC_MACROS_LINUX_SPARC_ERROR_NUMBER_MACROS_H
-#define LLVM_LIBC_MACROS_LINUX_SPARC_ERROR_NUMBER_MACROS_H
-
-#ifndef ECANCELED
-#define ECANCELED 127
-#endif // ECANCELED
-
-#ifndef EOWNERDEAD
-#define EOWNERDEAD 132
-#endif // EOWNERDEAD
-
-#ifndef ENOTRECOVERABLE
-#define ENOTRECOVERABLE 133
-#endif // ENOTRECOVERABLE
-
-#ifndef ERFKILL
-#define ERFKILL 134
-#endif // ERFKILL
-
-#ifndef EHWPOISON
-#define EHWPOISON 135
-#endif // EHWPOISON
-
-#endif // LLVM_LIBC_MACROS_LINUX_SPARC_ERROR_NUMBER_MACROS_H
diff --git a/libc/src/__support/threads/CMakeLists.txt b/libc/src/__support/threads/CMakeLists.txt
index 34412be4dfed..9ea0b59befe7 100644
--- a/libc/src/__support/threads/CMakeLists.txt
+++ b/libc/src/__support/threads/CMakeLists.txt
@@ -71,3 +71,12 @@ if(TARGET libc.src.__support.threads.${LIBC_TARGET_OS}.callonce)
.${LIBC_TARGET_OS}.callonce
)
endif()
+
+if(TARGET libc.src.__support.threads.${LIBC_TARGET_OS}.CndVar)
+ add_object_library(
+ CndVar
+ ALIAS
+ DEPENDS
+ .${LIBC_TARGET_OS}.CndVar
+ )
+endif()
diff --git a/libc/src/__support/threads/CndVar.h b/libc/src/__support/threads/CndVar.h
new file mode 100644
index 000000000000..baa2a686c57d
--- /dev/null
+++ b/libc/src/__support/threads/CndVar.h
@@ -0,0 +1,52 @@
+//===-- A platform independent abstraction layer for cond vars --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC___SUPPORT_SRC_THREADS_LINUX_CNDVAR_H
+#define LLVM_LIBC___SUPPORT_SRC_THREADS_LINUX_CNDVAR_H
+
+#include "src/__support/threads/linux/futex_utils.h" // Futex
+#include "src/__support/threads/mutex.h" // Mutex
+
+#include <stdint.h> // uint32_t
+
+namespace LIBC_NAMESPACE {
+
+struct CndVar {
+ enum CndWaiterStatus : uint32_t {
+ WS_Waiting = 0xE,
+ WS_Signalled = 0x5,
+ };
+
+ struct CndWaiter {
+ Futex futex_word = WS_Waiting;
+ CndWaiter *next = nullptr;
+ };
+
+ CndWaiter *waitq_front;
+ CndWaiter *waitq_back;
+ Mutex qmtx;
+
+ static int init(CndVar *cv) {
+ cv->waitq_front = cv->waitq_back = nullptr;
+ auto err = Mutex::init(&cv->qmtx, false, false, false);
+ return err == MutexError::NONE ? 0 : -1;
+ }
+
+ static void destroy(CndVar *cv) {
+ cv->waitq_front = cv->waitq_back = nullptr;
+ }
+
+ // Returns 0 on success, -1 on error.
+ int wait(Mutex *m);
+ void notify_one();
+ void broadcast();
+};
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC___SUPPORT_THREADS_LINUX_CNDVAR_H
diff --git a/libc/src/__support/threads/linux/CMakeLists.txt b/libc/src/__support/threads/linux/CMakeLists.txt
index d3353f6b3ff8..39c4ad20201c 100644
--- a/libc/src/__support/threads/linux/CMakeLists.txt
+++ b/libc/src/__support/threads/linux/CMakeLists.txt
@@ -63,3 +63,16 @@ add_object_library(
DEPENDS
.futex_utils
)
+
+add_object_library(
+ CndVar
+ SRCS
+ CndVar.cpp
+ HDRS
+ ../CndVar.h
+ DEPENDS
+ libc.include.sys_syscall
+ libc.src.__support.OSUtil.osutil
+ libc.src.__support.threads.linux.futex_word_type
+ libc.src.__support.threads.mutex
+)
diff --git a/libc/src/__support/threads/linux/CndVar.cpp b/libc/src/__support/threads/linux/CndVar.cpp
new file mode 100644
index 000000000000..daf56bca1ed2
--- /dev/null
+++ b/libc/src/__support/threads/linux/CndVar.cpp
@@ -0,0 +1,103 @@
+//===-- Utility condition variable class ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/threads/CndVar.h"
+#include "src/__support/OSUtil/syscall.h" // syscall_impl
+#include "src/__support/threads/linux/futex_word.h" // FutexWordType
+#include "src/__support/threads/mutex.h" // Mutex, MutexLock
+
+#include <sys/syscall.h> // For syscall numbers.
+
+namespace LIBC_NAMESPACE {
+
+int CndVar::wait(Mutex *m) {
+ // The goal is to perform "unlock |m| and wait" in an
+ // atomic operation. However, it is not possible to do it
+ // in the true sense so we do it in spirit. Before unlocking
+ // |m|, a new waiter object is added to the waiter queue with
+ // the waiter queue locked. Iff a signalling thread signals
+ // the waiter before the waiter actually starts waiting, the
+ // wait operation will not begin at all and the waiter immediately
+ // returns.
+
+ CndWaiter waiter;
+ {
+ MutexLock ml(&qmtx);
+ CndWaiter *old_back = nullptr;
+ if (waitq_front == nullptr) {
+ waitq_front = waitq_back = &waiter;
+ } else {
+ old_back = waitq_back;
+ waitq_back->next = &waiter;
+ waitq_back = &waiter;
+ }
+
+ if (m->unlock() != MutexError::NONE) {
+ // If we do not remove the queued up waiter before returning,
+ // then another thread can potentially signal a non-existing
+ // waiter. Note also that we do this with |qmtx| locked. This
+ // ensures that another thread will not signal the withdrawing
+ // waiter.
+ waitq_back = old_back;
+ if (waitq_back == nullptr)
+ waitq_front = nullptr;
+ else
+ waitq_back->next = nullptr;
+
+ return -1;
+ }
+ }
+
+ waiter.futex_word.wait(WS_Waiting, cpp::nullopt, true);
+
+ // At this point, if locking |m| fails, we can simply return as the
+ // queued up waiter would have been removed from the queue.
+ auto err = m->lock();
+ return err == MutexError::NONE ? 0 : -1;
+}
+
+void CndVar::notify_one() {
+ // We don't use an RAII locker in this method as we want to unlock
+ // |qmtx| and signal the waiter using a single FUTEX_WAKE_OP signal.
+ qmtx.lock();
+ if (waitq_front == nullptr)
+ qmtx.unlock();
+
+ CndWaiter *first = waitq_front;
+ waitq_front = waitq_front->next;
+ if (waitq_front == nullptr)
+ waitq_back = nullptr;
+
+ qmtx.futex_word = FutexWordType(Mutex::LockState::Free);
+
+ // this is a special WAKE_OP, so we use syscall directly
+ LIBC_NAMESPACE::syscall_impl<long>(
+ FUTEX_SYSCALL_ID, &qmtx.futex_word.val, FUTEX_WAKE_OP, 1, 1,
+ &first->futex_word.val,
+ FUTEX_OP(FUTEX_OP_SET, WS_Signalled, FUTEX_OP_CMP_EQ, WS_Waiting));
+}
+
+void CndVar::broadcast() {
+ MutexLock ml(&qmtx);
+ uint32_t dummy_futex_word;
+ CndWaiter *waiter = waitq_front;
+ waitq_front = waitq_back = nullptr;
+ while (waiter != nullptr) {
+ // FUTEX_WAKE_OP is used instead of just FUTEX_WAKE as it allows us to
+ // atomically update the waiter status to WS_Signalled before waking
+ // up the waiter. A dummy location is used for the other futex of
+ // FUTEX_WAKE_OP.
+ LIBC_NAMESPACE::syscall_impl<long>(
+ FUTEX_SYSCALL_ID, &dummy_futex_word, FUTEX_WAKE_OP, 1, 1,
+ &waiter->futex_word.val,
+ FUTEX_OP(FUTEX_OP_SET, WS_Signalled, FUTEX_OP_CMP_EQ, WS_Waiting));
+ waiter = waiter->next;
+ }
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/setjmp/x86_64/CMakeLists.txt b/libc/src/setjmp/x86_64/CMakeLists.txt
index 9899c00e7c4a..ae84322a6540 100644
--- a/libc/src/setjmp/x86_64/CMakeLists.txt
+++ b/libc/src/setjmp/x86_64/CMakeLists.txt
@@ -9,6 +9,11 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
-fno-omit-frame-pointer
+ # TODO: Remove once one of these lands:
+ # https://github.com/llvm/llvm-project/pull/87837
+ # https://github.com/llvm/llvm-project/pull/88054
+ # https://github.com/llvm/llvm-project/pull/88157
+ -ftrivial-auto-var-init=uninitialized
)
add_entrypoint_object(
diff --git a/libc/src/threads/linux/CMakeLists.txt b/libc/src/threads/linux/CMakeLists.txt
index 68b7106c2052..a5a02e47aab3 100644
--- a/libc/src/threads/linux/CMakeLists.txt
+++ b/libc/src/threads/linux/CMakeLists.txt
@@ -1,7 +1,6 @@
add_header_library(
threads_utils
HDRS
- CndVar.h
Futex.h
DEPENDS
libc.include.sys_syscall
@@ -20,8 +19,8 @@ add_entrypoint_object(
HDRS
../cnd_init.h
DEPENDS
- .threads_utils
libc.include.threads
+ libc.src.__support.threads.CndVar
)
add_entrypoint_object(
@@ -31,8 +30,8 @@ add_entrypoint_object(
HDRS
../cnd_destroy.h
DEPENDS
- .threads_utils
libc.include.threads
+ libc.src.__support.threads.CndVar
)
add_entrypoint_object(
@@ -42,9 +41,9 @@ add_entrypoint_object(
HDRS
../cnd_wait.h
DEPENDS
- .threads_utils
libc.include.threads
libc.src.__support.threads.mutex
+ libc.src.__support.threads.CndVar
)
add_entrypoint_object(
@@ -54,8 +53,8 @@ add_entrypoint_object(
HDRS
../cnd_signal.h
DEPENDS
- .threads_utils
libc.include.threads
+ libc.src.__support.threads.CndVar
)
add_entrypoint_object(
@@ -65,6 +64,6 @@ add_entrypoint_object(
HDRS
../cnd_broadcast.h
DEPENDS
- .threads_utils
libc.include.threads
+ libc.src.__support.threads.CndVar
)
diff --git a/libc/src/threads/linux/CndVar.h b/libc/src/threads/linux/CndVar.h
deleted file mode 100644
index c08ffa393856..000000000000
--- a/libc/src/threads/linux/CndVar.h
+++ /dev/null
@@ -1,148 +0,0 @@
-//===-- Utility condition variable class ------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SRC_THREADS_LINUX_CNDVAR_H
-#define LLVM_LIBC_SRC_THREADS_LINUX_CNDVAR_H
-
-#include "src/__support/CPP/atomic.h"
-#include "src/__support/CPP/mutex.h" // lock_guard
-#include "src/__support/CPP/optional.h"
-#include "src/__support/OSUtil/syscall.h" // For syscall functions.
-#include "src/__support/threads/linux/futex_utils.h"
-#include "src/__support/threads/mutex.h"
-
-#include <linux/futex.h> // For futex operations.
-#include <stdint.h>
-#include <sys/syscall.h> // For syscall numbers.
-#include <threads.h> // For values like thrd_success etc.
-
-namespace LIBC_NAMESPACE {
-
-struct CndVar {
- enum CndWaiterStatus : uint32_t {
- WS_Waiting = 0xE,
- WS_Signalled = 0x5,
- };
-
- struct CndWaiter {
- Futex futex_word = WS_Waiting;
- CndWaiter *next = nullptr;
- };
-
- CndWaiter *waitq_front;
- CndWaiter *waitq_back;
- Mutex qmtx;
-
- static int init(CndVar *cv) {
- cv->waitq_front = cv->waitq_back = nullptr;
- auto err = Mutex::init(&cv->qmtx, false, false, false);
- return err == MutexError::NONE ? thrd_success : thrd_error;
- }
-
- static void destroy(CndVar *cv) {
- cv->waitq_front = cv->waitq_back = nullptr;
- }
-
- int wait(Mutex *m) {
- // The goal is to perform "unlock |m| and wait" in an
- // atomic operation. However, it is not possible to do it
- // in the true sense so we do it in spirit. Before unlocking
- // |m|, a new waiter object is added to the waiter queue with
- // the waiter queue locked. Iff a signalling thread signals
- // the waiter before the waiter actually starts waiting, the
- // wait operation will not begin at all and the waiter immediately
- // returns.
-
- CndWaiter waiter;
- {
- cpp::lock_guard ml(qmtx);
- CndWaiter *old_back = nullptr;
- if (waitq_front == nullptr) {
- waitq_front = waitq_back = &waiter;
- } else {
- old_back = waitq_back;
- waitq_back->next = &waiter;
- waitq_back = &waiter;
- }
-
- if (m->unlock() != MutexError::NONE) {
- // If we do not remove the queued up waiter before returning,
- // then another thread can potentially signal a non-existing
- // waiter. Note also that we do this with |qmtx| locked. This
- // ensures that another thread will not signal the withdrawing
- // waiter.
- waitq_back = old_back;
- if (waitq_back == nullptr)
- waitq_front = nullptr;
- else
- waitq_back->next = nullptr;
-
- return thrd_error;
- }
- }
-
- waiter.futex_word.wait(WS_Waiting, cpp::nullopt, true);
-
- // At this point, if locking |m| fails, we can simply return as the
- // queued up waiter would have been removed from the queue.
- auto err = m->lock();
- return err == MutexError::NONE ? thrd_success : thrd_error;
- }
-
- int notify_one() {
- // We don't use an RAII locker in this method as we want to unlock
- // |qmtx| and signal the waiter using a single FUTEX_WAKE_OP signal.
- qmtx.lock();
- if (waitq_front == nullptr) {
- qmtx.unlock();
- return thrd_success;
- }
-
- CndWaiter *first = waitq_front;
- waitq_front = waitq_front->next;
- if (waitq_front == nullptr)
- waitq_back = nullptr;
-
- qmtx.futex_word = FutexWordType(Mutex::LockState::Free);
-
- // this is a special WAKE_OP, so we use syscall directly
- LIBC_NAMESPACE::syscall_impl<long>(
- FUTEX_SYSCALL_ID, &qmtx.futex_word.val, FUTEX_WAKE_OP, 1, 1,
- &first->futex_word.val,
- FUTEX_OP(FUTEX_OP_SET, WS_Signalled, FUTEX_OP_CMP_EQ, WS_Waiting));
- return thrd_success;
- }
-
- int broadcast() {
- cpp::lock_guard ml(qmtx);
- uint32_t dummy_futex_word;
- CndWaiter *waiter = waitq_front;
- waitq_front = waitq_back = nullptr;
- while (waiter != nullptr) {
- // FUTEX_WAKE_OP is used instead of just FUTEX_WAKE as it allows us to
- // atomically update the waiter status to WS_Signalled before waking
- // up the waiter. A dummy location is used for the other futex of
- // FUTEX_WAKE_OP.
- LIBC_NAMESPACE::syscall_impl<long>(
- FUTEX_SYSCALL_ID, &dummy_futex_word, FUTEX_WAKE_OP, 1, 1,
- &waiter->futex_word.val,
- FUTEX_OP(FUTEX_OP_SET, WS_Signalled, FUTEX_OP_CMP_EQ, WS_Waiting));
- waiter = waiter->next;
- }
- return thrd_success;
- }
-};
-
-static_assert(sizeof(CndVar) == sizeof(cnd_t),
- "Mismatch in the size of the "
- "internal representation of condition variable and the public "
- "cnd_t type.");
-
-} // namespace LIBC_NAMESPACE
-
-#endif // LLVM_LIBC_SRC_THREADS_LINUX_CNDVAR_H
diff --git a/libc/src/threads/linux/cnd_broadcast.cpp b/libc/src/threads/linux/cnd_broadcast.cpp
index 180ac6d68ee8..a56aaa21ee12 100644
--- a/libc/src/threads/linux/cnd_broadcast.cpp
+++ b/libc/src/threads/linux/cnd_broadcast.cpp
@@ -6,16 +6,21 @@
//
//===----------------------------------------------------------------------===//
-#include "CndVar.h"
-
#include "src/threads/cnd_broadcast.h"
#include "src/__support/common.h"
+#include "src/__support/threads/CndVar.h"
+
+// TODO: https://github.com/llvm/llvm-project/issues/92968
+#include <threads.h> // cnd_t, thrd_error, thrd_success
namespace LIBC_NAMESPACE {
+static_assert(sizeof(CndVar) == sizeof(cnd_t));
+
LLVM_LIBC_FUNCTION(int, cnd_broadcast, (cnd_t * cond)) {
CndVar *cndvar = reinterpret_cast<CndVar *>(cond);
- return cndvar->broadcast();
+ cndvar->broadcast();
+ return thrd_success;
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/threads/linux/cnd_destroy.cpp b/libc/src/threads/linux/cnd_destroy.cpp
index 08eb3a1057b1..2b03b18c48e4 100644
--- a/libc/src/threads/linux/cnd_destroy.cpp
+++ b/libc/src/threads/linux/cnd_destroy.cpp
@@ -6,13 +6,16 @@
//
//===----------------------------------------------------------------------===//
-#include "CndVar.h"
-
#include "src/threads/cnd_destroy.h"
#include "src/__support/common.h"
+#include "src/__support/threads/CndVar.h"
+
+#include <threads.h> // cnd_t
namespace LIBC_NAMESPACE {
+static_assert(sizeof(CndVar) == sizeof(cnd_t));
+
LLVM_LIBC_FUNCTION(void, cnd_destroy, (cnd_t * cond)) {
CndVar *cndvar = reinterpret_cast<CndVar *>(cond);
CndVar::destroy(cndvar);
diff --git a/libc/src/threads/linux/cnd_init.cpp b/libc/src/threads/linux/cnd_init.cpp
index 5e3f360b1d2b..d3d2c8a57d82 100644
--- a/libc/src/threads/linux/cnd_init.cpp
+++ b/libc/src/threads/linux/cnd_init.cpp
@@ -6,16 +6,19 @@
//
//===----------------------------------------------------------------------===//
-#include "CndVar.h"
-
#include "src/threads/cnd_init.h"
#include "src/__support/common.h"
+#include "src/__support/threads/CndVar.h"
+
+#include <threads.h> // cnd_t, thrd_error, thrd_success
namespace LIBC_NAMESPACE {
+static_assert(sizeof(CndVar) == sizeof(cnd_t));
+
LLVM_LIBC_FUNCTION(int, cnd_init, (cnd_t * cond)) {
CndVar *cndvar = reinterpret_cast<CndVar *>(cond);
- return CndVar::init(cndvar);
+ return CndVar::init(cndvar) ? thrd_error : thrd_success;
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/threads/linux/cnd_signal.cpp b/libc/src/threads/linux/cnd_signal.cpp
index dba01abdefbc..f144013e0882 100644
--- a/libc/src/threads/linux/cnd_signal.cpp
+++ b/libc/src/threads/linux/cnd_signal.cpp
@@ -6,16 +6,20 @@
//
//===----------------------------------------------------------------------===//
-#include "CndVar.h"
-
#include "src/threads/cnd_signal.h"
#include "src/__support/common.h"
+#include "src/__support/threads/CndVar.h"
+
+#include <threads.h> // cnd_t, thrd_error, thrd_success
namespace LIBC_NAMESPACE {
+static_assert(sizeof(CndVar) == sizeof(cnd_t));
+
LLVM_LIBC_FUNCTION(int, cnd_signal, (cnd_t * cond)) {
CndVar *cndvar = reinterpret_cast<CndVar *>(cond);
- return cndvar->notify_one();
+ cndvar->notify_one();
+ return thrd_success;
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/threads/linux/cnd_wait.cpp b/libc/src/threads/linux/cnd_wait.cpp
index db3d7f1436eb..97cade3f231d 100644
--- a/libc/src/threads/linux/cnd_wait.cpp
+++ b/libc/src/threads/linux/cnd_wait.cpp
@@ -6,18 +6,21 @@
//
//===----------------------------------------------------------------------===//
-#include "CndVar.h"
-
+#include "src/threads/cnd_wait.h"
#include "src/__support/common.h"
+#include "src/__support/threads/CndVar.h"
#include "src/__support/threads/mutex.h"
-#include "src/threads/cnd_wait.h"
+
+#include <threads.h> // cnd_t, mtx_t, thrd_error, thrd_success
namespace LIBC_NAMESPACE {
+static_assert(sizeof(CndVar) == sizeof(cnd_t));
+
LLVM_LIBC_FUNCTION(int, cnd_wait, (cnd_t * cond, mtx_t *mtx)) {
CndVar *cndvar = reinterpret_cast<CndVar *>(cond);
Mutex *mutex = reinterpret_cast<Mutex *>(mtx);
- return cndvar->wait(mutex);
+ return cndvar->wait(mutex) ? thrd_error : thrd_success;
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/time/gpu/time_utils.cpp b/libc/src/time/gpu/time_utils.cpp
index 67fe5b4861ac..1a674b2fdca2 100644
--- a/libc/src/time/gpu/time_utils.cpp
+++ b/libc/src/time/gpu/time_utils.cpp
@@ -15,8 +15,7 @@ namespace LIBC_NAMESPACE {
// insufficient.
// TODO: Once we have another use-case for this we should put it in a common
// device environment struct.
-extern "C" [[gnu::visibility("protected")]] uint64_t __llvm_libc_clock_freq =
- clock_freq;
+gpu::Constant<uint64_t> __llvm_libc_clock_freq = clock_freq;
#endif
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/time/gpu/time_utils.h b/libc/src/time/gpu/time_utils.h
index da713886b643..77eeb896f6c3 100644
--- a/libc/src/time/gpu/time_utils.h
+++ b/libc/src/time/gpu/time_utils.h
@@ -23,7 +23,10 @@ constexpr uint64_t clock_freq = 100000000UL;
// We provide an externally visible symbol such that the runtime can set
// this to the correct value.
-extern "C" [[gnu::visibility("protected")]] uint64_t __llvm_libc_clock_freq;
+extern "C" {
+[[gnu::visibility("protected")]]
+extern gpu::Constant<uint64_t> __llvm_libc_clock_freq;
+}
#define GPU_CLOCKS_PER_SEC static_cast<clock_t>(__llvm_libc_clock_freq)
#elif defined(LIBC_TARGET_ARCH_IS_NVPTX)
diff --git a/libc/startup/baremetal/CMakeLists.txt b/libc/startup/baremetal/CMakeLists.txt
new file mode 100644
index 000000000000..4faced93fabe
--- /dev/null
+++ b/libc/startup/baremetal/CMakeLists.txt
@@ -0,0 +1,11 @@
+add_entrypoint_object(
+ init
+ SRCS
+ init.cpp
+)
+
+add_entrypoint_object(
+ fini
+ SRCS
+ fini.cpp
+)
diff --git a/libc/startup/baremetal/fini.cpp b/libc/startup/baremetal/fini.cpp
new file mode 100644
index 000000000000..84997fb4fa1d
--- /dev/null
+++ b/libc/startup/baremetal/fini.cpp
@@ -0,0 +1,27 @@
+//===-- Implementation file of __libc_fini_array --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <stddef.h>
+#include <stdint.h>
+
+extern "C" {
+extern uintptr_t __fini_array_start[];
+extern uintptr_t __fini_array_end[];
+}
+
+namespace LIBC_NAMESPACE {
+
+using FiniCallback = void(void);
+
+extern "C" void __libc_fini_array(void) {
+ size_t fini_array_size = __fini_array_end - __fini_array_start;
+ for (size_t i = fini_array_size; i > 0; --i)
+ reinterpret_cast<FiniCallback *>(__fini_array_start[i - 1])();
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/startup/baremetal/init.cpp b/libc/startup/baremetal/init.cpp
new file mode 100644
index 000000000000..08dff74f0519
--- /dev/null
+++ b/libc/startup/baremetal/init.cpp
@@ -0,0 +1,32 @@
+//===-- Implementation file of __libc_init_array --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <stddef.h>
+#include <stdint.h>
+
+extern "C" {
+extern uintptr_t __preinit_array_start[];
+extern uintptr_t __preinit_array_end[];
+extern uintptr_t __init_array_start[];
+extern uintptr_t __init_array_end[];
+}
+
+namespace LIBC_NAMESPACE {
+
+using InitCallback = void(void);
+
+extern "C" void __libc_init_array(void) {
+ size_t preinit_array_size = __preinit_array_end - __preinit_array_start;
+ for (size_t i = 0; i < preinit_array_size; ++i)
+ reinterpret_cast<InitCallback *>(__preinit_array_start[i])();
+ size_t init_array_size = __init_array_end - __init_array_start;
+ for (size_t i = 0; i < init_array_size; ++i)
+ reinterpret_cast<InitCallback *>(__init_array_start[i])();
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/test/integration/scudo/CMakeLists.txt b/libc/test/integration/scudo/CMakeLists.txt
index 8a085b618044..a5f7e3b63d24 100644
--- a/libc/test/integration/scudo/CMakeLists.txt
+++ b/libc/test/integration/scudo/CMakeLists.txt
@@ -9,6 +9,7 @@ endif()
# test will have to link to the LLVM libc startup system. LLVM libc's startup
# system is not complete enough to allow this. It is also desireable to
# keep the dependencies as minimal as possible.
+
add_entrypoint_library(
libc_for_scudo_integration_test
DEPENDS
@@ -17,6 +18,9 @@ add_entrypoint_library(
libc.src.stdlib.realloc
libc.src.stdlib.aligned_alloc
libc.src.stdlib.free
+ libc.src.errno.errno
+ libc.src.unistd.__llvm_libc_syscall
+ libc.src.sched.__sched_getcpucount
)
add_executable(
diff --git a/libc/utils/docgen/ctype.json b/libc/utils/docgen/ctype.json
index 25eeb683846c..af97e4bbbc0a 100644
--- a/libc/utils/docgen/ctype.json
+++ b/libc/utils/docgen/ctype.json
@@ -1,46 +1,46 @@
{
"functions": {
"isalnum": {
- "defined": "7.4.1.1"
+ "c-definition": "7.4.1.1"
},
"isalpha": {
- "defined": "7.4.1.2"
+ "c-definition": "7.4.1.2"
},
"isblank": {
- "defined": "7.4.1.3"
+ "c-definition": "7.4.1.3"
},
"iscntrl": {
- "defined": "7.4.1.4"
+ "c-definition": "7.4.1.4"
},
"isdigit": {
- "defined": "7.4.1.5"
+ "c-definition": "7.4.1.5"
},
"isgraph": {
- "defined": "7.4.1.6"
+ "c-definition": "7.4.1.6"
},
"islower": {
- "defined": "7.4.1.7"
+ "c-definition": "7.4.1.7"
},
"isprint": {
- "defined": "7.4.1.8"
+ "c-definition": "7.4.1.8"
},
"ispunct": {
- "defined": "7.4.1.9"
+ "c-definition": "7.4.1.9"
},
"isspace": {
- "defined": "7.4.1.10"
+ "c-definition": "7.4.1.10"
},
"isupper": {
- "defined": "7.4.1.11"
+ "c-definition": "7.4.1.11"
},
"isxdigit": {
- "defined": "7.4.1.12"
+ "c-definition": "7.4.1.12"
},
"tolower" : {
- "defined": "7.4.2.1"
+ "c-definition": "7.4.2.1"
},
"toupper": {
- "defined": "7.4.2.2"
+ "c-definition": "7.4.2.2"
}
}
}
diff --git a/libc/utils/docgen/docgen.py b/libc/utils/docgen/docgen.py
index 23d45305fe51..25e22d4d5877 100755
--- a/libc/utils/docgen/docgen.py
+++ b/libc/utils/docgen/docgen.py
@@ -13,70 +13,167 @@ from typing import Dict
import sys
import json
-
-def load_api(hname: str) -> Dict:
- p = Path(__file__).parent / Path(hname).with_suffix(".json")
- api = p.read_text(encoding="utf-8")
+from header import Header
+
+
+class DocgenAPIFormatError(Exception):
+ """Raised on fatal formatting errors with a description of a formatting error"""
+
+
+def check_api(header: Header, api: Dict):
+ """
+ Checks that docgen json files are properly formatted. If there are any
+ fatal formatting errors, raises exceptions with error messages useful for
+ fixing formatting. Warnings are printed to stderr on non-fatal formatting
+ errors. The code that runs after ``check_api(api)`` is called expects that
+ ``check_api`` executed without raising formatting exceptions so the json
+ matches the formatting specified here.
+
+ The json file may contain:
+ * an optional macros object
+ * an optional functions object
+
+ Formatting of ``macros`` and ``functions`` objects
+ ==================================================
+
+ If a macros or functions object is present, then it may contain nested
+ objects. Each of these nested objects should have a name matching a macro
+ or function's name, and each nested object must have the property:
+ ``"c-definition"`` or ``"posix-definition"``.
+
+ Description of properties
+ =========================
+ The defined property is intended to be a reference to a part of the
+ standard that defines the function or macro. For the ``"c-definition"`` property,
+ this should be a C standard section number. For the ``"posix-definition"`` property,
+ this should be a link to the definition.
+
+ :param api: docgen json file contents parsed into a dict
+ """
+ errors = []
+ cdef = "c-definition"
+ pdef = "posix-definition"
+
+ # Validate macros
+ if "macros" in api:
+ if not header.macro_file_exists():
+ print(
+ f"warning: Macro definitions are listed for {header.name}, but no macro file can be found in the directory tree rooted at {header.macros_dir}. All macros will be listed as not implemented.",
+ file=sys.stderr,
+ )
+
+ macros = api["macros"]
+
+ for name, obj in macros.items():
+ if not (cdef in obj or pdef in obj):
+ err = f'error: Macro {name} does not contain at least one required property: "{cdef}" or "{pdef}"'
+ errors.append(err)
+
+ # Validate functions
+ if "functions" in api:
+ if not header.fns_dir_exists():
+ print(
+ f"warning: Function definitions are listed for {header.name}, but no function implementation directory exists at {header.fns_dir}. All functions will be listed as not implemented.",
+ file=sys.stderr,
+ )
+
+ fns = api["functions"]
+ for name, obj in fns.items():
+ if not (cdef in obj or pdef in obj):
+ err = f'error: function {name} does not contain at least one required property: "{cdef}" or "{pdef}"'
+ errors.append(err)
+
+ if errors:
+ raise DocgenAPIFormatError("\n".join(errors))
+
+
+def load_api(header: Header) -> Dict:
+ api = header.docgen_json.read_text(encoding="utf-8")
return json.loads(api)
-# TODO: we may need to get more sophisticated for less generic implementations.
-# Does libc/src/{hname minus .h suffix}/{fname}.cpp exist?
-def is_implemented(hname: str, fname: str) -> bool:
- path = Path(
- Path(__file__).parent.parent.parent,
- "src",
- hname.rstrip(".h")
+def print_tbl_dir():
+ print(
+ f"""
+.. list-table::
+ :widths: auto
+ :align: center
+ :header-rows: 1
+
+ * - Function
+ - Implemented
+ - C23 Standard Section
+ - POSIX.1-2017 Standard Section"""
)
- if not path.exists():
- raise FileNotFoundError(f"implementation dir does not exist: {path}")
- if not path.is_dir():
- raise NotADirectoryError(f"implementation dir is not a dir: {path}")
+def print_functions_rst(header: Header, functions: Dict):
+ tbl_hdr = "Functions"
+ print(tbl_hdr)
+ print("=" * len(tbl_hdr))
+
+ print_tbl_dir()
+
+ for name in sorted(functions.keys()):
+ print(f" * - {name}")
+
+ if header.fns_dir_exists() and header.implements_fn(name):
+ print(" - |check|")
+ else:
+ print(" -")
+
+ if "c-definition" in functions[name]:
+ print(f' - {functions[name]["c-definition"]}')
+ else:
+ print(" -")
+
+ if "posix-definition" in functions[name]:
+ print(f' - {functions[name]["posix-definition"]}')
+ else:
+ print(" -")
- # Recursively search for the target source file in the subdirectories under
- # libc/src/{hname}.
- for _ in path.glob("**/" + fname + ".cpp"):
- return True
- return False
+def print_macros_rst(header: Header, macros: Dict):
+ tbl_hdr = "Macros"
+ print(tbl_hdr)
+ print("=" * len(tbl_hdr))
+ print_tbl_dir()
-def print_functions(header: str, functions: Dict):
- for key in sorted(functions.keys()):
- print(f" * - {key}")
+ for name in sorted(macros.keys()):
+ print(f" * - {name}")
- if is_implemented(header, key):
+ if header.macro_file_exists() and header.implements_macro(name):
print(" - |check|")
else:
print(" -")
- # defined is optional. Having any content is optional.
- if functions[key] is not None and "defined" in functions[key]:
- print(f' - {functions[key]["defined"]}')
+ if "c-definition" in macros[name]:
+ print(f' - {macros[name]["c-definition"]}')
else:
print(" -")
+ if "posix-definition" in macros[name]:
+ print(f' - {macros[name]["posix-definition"]}')
+ else:
+ print(" -")
+ print()
+
-def print_header(header: str, api: Dict):
+def print_impl_status_rst(header: Header, api: Dict):
print(".. include:: check.rst\n")
- fns = f"{header} Functions"
- print(fns)
- print("=" * (len(fns)))
- print(
- f"""
-.. list-table::
- :widths: auto
- :align: center
- :header-rows: 1
- * - Function
- - Implemented
- - Standard"""
- )
- # TODO: how do we want to signal implementation of macros?
- print_functions(header, api["functions"])
+ print("=" * len(header.name))
+ print(header.name)
+ print("=" * len(header.name))
+ print()
+
+ # the macro and function sections are both optional
+ if "macros" in api:
+ print_macros_rst(header, api["macros"])
+
+ if "functions" in api:
+ print_functions_rst(header, api["functions"])
def parse_args() -> Namespace:
@@ -88,6 +185,8 @@ def parse_args() -> Namespace:
if __name__ == "__main__":
args = parse_args()
- api = load_api(args.header_name)
+ header = Header(args.header_name)
+ api = load_api(header)
+ check_api(header, api)
- print_header(args.header_name, api)
+ print_impl_status_rst(header, api)
diff --git a/libc/utils/docgen/fenv.json b/libc/utils/docgen/fenv.json
index 9aa3f641ddc9..788b196c053b 100644
--- a/libc/utils/docgen/fenv.json
+++ b/libc/utils/docgen/fenv.json
@@ -1,114 +1,114 @@
{
"macros": {
"__STDC_VERSION_FENV_H__": {
- "defined": "7.6.5"
+ "c-definition": "7.6.5"
},
"FE_DIVBYZERO": {
- "defined": "7.6.9"
+ "c-definition": "7.6.9"
},
"FE_INEXACT": {
- "defined": "7.6.9"
+ "c-definition": "7.6.9"
},
"FE_INVALID": {
- "defined": "7.6.9"
+ "c-definition": "7.6.9"
},
"FE_OVERFLOW": {
- "defined": "7.6.9"
+ "c-definition": "7.6.9"
},
"FE_UNDERFLOW": {
- "defined": "7.6.9"
+ "c-definition": "7.6.9"
},
"FE_ALL_EXCEPT": {
- "defined": "7.6.12"
+ "c-definition": "7.6.12"
},
"FE_DFL_MODE": {
- "defined": "7.6.11"
+ "c-definition": "7.6.11"
},
"FE_DOWNARD": {
- "defined": "7.6.13"
+ "c-definition": "7.6.13"
},
"FE_TONEAREST": {
- "defined": "7.6.13"
+ "c-definition": "7.6.13"
},
"FE_TONEARESTFROMZERO": {
- "defined": "7.6.13"
+ "c-definition": "7.6.13"
},
"FE_TOWARDZERO": {
- "defined": "7.6.13"
+ "c-definition": "7.6.13"
},
"FE_UPWARD": {
- "defined": "7.6.13"
+ "c-definition": "7.6.13"
},
"FE_DEC_DOWNWARD": {
- "defined": "7.6.14"
+ "c-definition": "7.6.14"
},
"FE_DEC_TONEAREST": {
- "defined": "7.6.14"
+ "c-definition": "7.6.14"
},
"FE_DEC_TONEARESTFROMZERO": {
- "defined": "7.6.14"
+ "c-definition": "7.6.14"
},
"FE_DEC_TOWARDZERO": {
- "defined": "7.6.14"
+ "c-definition": "7.6.14"
},
"FE_DEC_UPWARD": {
- "defined": "7.6.14"
+ "c-definition": "7.6.14"
},
"FE_DFL_ENV": {
- "defined": "7.6.17"
+ "c-definition": "7.6.17"
}
},
"functions": {
"feclearexcept": {
- "defined": "7.6.4.1"
+ "c-definition": "7.6.4.1"
},
"fegetexceptflag": {
- "defined": "7.6.4.2"
+ "c-definition": "7.6.4.2"
},
"feraiseexcept": {
- "defined": "7.6.4.3"
+ "c-definition": "7.6.4.3"
},
"fesetexcept": {
- "defined": "7.6.4.4"
+ "c-definition": "7.6.4.4"
},
"fesetexceptflag": {
- "defined": "7.6.4.5"
+ "c-definition": "7.6.4.5"
},
"fetestexceptflag": {
- "defined": "7.6.4.6"
+ "c-definition": "7.6.4.6"
},
"fetestexcept": {
- "defined": "7.6.4.7"
+ "c-definition": "7.6.4.7"
},
"fegetmode": {
- "defined": "7.6.5.1"
+ "c-definition": "7.6.5.1"
},
"fegetround": {
- "defined": "7.6.5.2"
+ "c-definition": "7.6.5.2"
},
"fe_dec_getround": {
- "defined": "7.6.5.3"
+ "c-definition": "7.6.5.3"
},
"fesetmode": {
- "defined": "7.6.5.4"
+ "c-definition": "7.6.5.4"
},
"fesetround": {
- "defined": "7.6.5.5"
+ "c-definition": "7.6.5.5"
},
"fe_dec_setround": {
- "defined": "7.6.5.6"
+ "c-definition": "7.6.5.6"
},
"fegetenv": {
- "defined": "7.6.6.1"
+ "c-definition": "7.6.6.1"
},
"feholdexcept": {
- "defined": "7.6.6.2"
+ "c-definition": "7.6.6.2"
},
"fesetenv": {
- "defined": "7.6.6.3"
+ "c-definition": "7.6.6.3"
},
"feupdateenv": {
- "defined": "7.6.6.4"
+ "c-definition": "7.6.6.4"
}
}
}
diff --git a/libc/utils/docgen/header.py b/libc/utils/docgen/header.py
new file mode 100644
index 000000000000..dde210078db2
--- /dev/null
+++ b/libc/utils/docgen/header.py
@@ -0,0 +1,87 @@
+# ====- Information about standard headers used by docgen ----*- python -*--==#
+#
+# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# ==-------------------------------------------------------------------------==#
+from pathlib import Path
+from typing import Generator
+
+
+class Header:
+ """
+ Maintains implementation information about a standard header file:
+ * where does its implementation dir live
+ * where is its macros file
+ * where is its docgen json file
+
+ By convention, the macro-only part of a header file is in a header-specific
+ file somewhere in the directory tree with root at
+ ``$LLVM_PROJECT_ROOT/libc/include/llvm-libc-macros``. Docgen expects that
+ if a macro is implemented, that it appears in a string
+ ``#define MACRO_NAME`` in some ``*-macros.h`` file in the directory tree.
+ Docgen searches for this string in the file to set the implementation status
+ shown in the generated rst docs rendered as html for display at
+ <libc.llvm.org>.
+
+ By convention, each function for a header is implemented in a function-specific
+ cpp file somewhere in the directory tree with root at, e.g,
+ ``$LLVM_PROJECT_ROOT/libc/src/fenv``. Some headers have architecture-specific
+ implementations, like ``math``, and some don't, like ``fenv``. Docgen uses the
+ presence of this function-specific cpp file to set the implementation status
+ shown in the generated rst docs rendered as html for display at
+ <libc.llvm.org>.
+ """
+
+ def __init__(self, header_name: str):
+ """
+ :param header_name: e.g., ``"threads.h"`` or ``"signal.h"``
+ """
+ self.name = header_name
+ self.stem = header_name.rstrip(".h")
+ self.docgen_root = Path(__file__).parent
+ self.libc_root = self.docgen_root.parent.parent
+ self.docgen_json = self.docgen_root / Path(header_name).with_suffix(".json")
+ self.fns_dir = Path(self.libc_root, "src", self.stem)
+ self.macros_dir = Path(self.libc_root, "include", "llvm-libc-macros")
+
+ def macro_file_exists(self) -> bool:
+ for _ in self.__get_macro_files():
+ return True
+
+ return False
+
+ def fns_dir_exists(self) -> bool:
+ return self.fns_dir.exists() and self.fns_dir.is_dir()
+
+ def implements_fn(self, fn_name: str) -> bool:
+ for _ in self.fns_dir.glob(f"**/{fn_name}.cpp"):
+ return True
+
+ return False
+
+ def implements_macro(self, m_name: str) -> bool:
+ """
+ Some macro files are in, e.g.,
+ ``$LLVM_PROJECT_ROOT/libc/include/llvm-libc-macros/fenv-macros.h``,
+ but others are in subdirectories, e.g., ``signal.h`` has the macro
+ definitions in
+ ``$LLVM_PROJECT_ROOT/libc/include/llvm-libc-macros/linux/signal-macros.h``.
+
+ :param m_name: name of macro, e.g., ``FE_ALL_EXCEPT``
+ """
+ for f in self.__get_macro_files():
+ if f"#define {m_name}" in f.read_text():
+ return True
+
+ return False
+
+ def __get_macro_files(self) -> Generator[Path, None, None]:
+ """
+ This function uses a glob on, e.g., ``"**/fcntl.macros.h"`` because the
+ macro file might be located in a subdirectory:
+ libc/include/llvm-libc-macros/fcntl-macros.h
+ libc/include/llvm-libc-macros/linux/fcntl-macros.h
+ """
+ return self.macros_dir.glob(f"**/{self.stem}-macros.h")
diff --git a/libc/utils/docgen/signal.json b/libc/utils/docgen/signal.json
index d5380d348b7d..337b0c19717b 100644
--- a/libc/utils/docgen/signal.json
+++ b/libc/utils/docgen/signal.json
@@ -1,47 +1,152 @@
{
"macros": {
"SIG_DFL": {
- "defined": "7.14.3"
+ "c-definition": "7.14.3",
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
},
"SIG_ERR": {
- "defined": "7.14.3"
+ "c-definition": "7.14.3",
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIG_HOLD": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
},
"SIG_IGN": {
- "defined": "7.14.3"
+ "c-definition": "7.14.3",
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGRTMIN": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGRTMAX": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
},
"SIGABRT": {
- "defined": "7.14.3"
+ "c-definition": "7.14.3",
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGALRM": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGBUS": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGCHLD": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGCONT": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
},
"SIGFPE": {
- "defined": "7.14.3"
+ "c-definition": "7.14.3",
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGHUP": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
},
"SIGILL": {
- "defined": "7.14.3"
+ "c-definition": "7.14.3",
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
},
"SIGINT": {
- "defined": "7.14.3"
+ "c-definition": "7.14.3",
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGKILL": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGPIPE": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGPIPE": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGQUIT": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
},
"SIGSEGV": {
- "defined": "7.14.3"
+ "c-definition": "7.14.3",
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGSTOP": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
},
"SIGTERM": {
- "defined": "7.14.3"
+ "c-definition": "7.14.3",
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGTSTP": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGTTIN": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGTTOU": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGUSR1": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGUSR2": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGPOLL": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGPROF": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGSYS": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGTRAP": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGURG": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGVTALRM": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGXCPU": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
+ },
+ "SIGXFSZ": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html"
}
},
"functions": {
"signal": {
- "defined": "7.14.1.1"
+ "c-definition": "7.14.1.1",
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/functions/signal.html"
},
"raise": {
- "defined": "7.14.2.1"
- },
- "kill": null,
- "sigaction": null,
- "sigaddset": null,
- "sigaltstack": null,
- "sigdelset": null,
- "sigemptyset": null,
- "sigfillset": null,
- "sigprocmask": null
+ "c-definition": "7.14.2.1",
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/functions/raise.html"
+ },
+ "kill": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/functions/kill.html"
+ },
+ "sigaction": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigaction.html"
+ },
+ "sigaddset": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigaddset.html"
+ },
+ "sigaltstack": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigaltstack.html"
+ },
+ "sigdelset": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigdelset.html"
+ },
+ "sigemptyset": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigemptyset.html"
+ },
+ "sigfillset": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigfillset.html"
+ },
+ "sigprocmask": {
+ "posix-definition": "https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigprocmask.html"
+ }
}
}
diff --git a/libc/utils/docgen/stdbit.json b/libc/utils/docgen/stdbit.json
index 88106cf0e4f9..25060c1ff9fd 100644
--- a/libc/utils/docgen/stdbit.json
+++ b/libc/utils/docgen/stdbit.json
@@ -1,270 +1,270 @@
{
"macros": {
"__STDC_VERSION_STDBIT_H__": {
- "defined": "7.18.1.2"
+ "c-definition": "7.18.1.2"
},
"__STDC_ENDIAN_LITTLE__": {
- "defined": "7.18.2.2"
+ "c-definition": "7.18.2.2"
},
"__STDC_ENDIAN_BIG__": {
- "defined": "7.18.2.2"
+ "c-definition": "7.18.2.2"
},
"__STDC_ENDIAN_NATIVE__": {
- "defined": "7.18.2.2"
+ "c-definition": "7.18.2.2"
},
"stdc_leading_zeros": {
- "defined": "7.18.3.1"
+ "c-definition": "7.18.3.1"
},
"stdc_leading_ones": {
- "defined": "7.18.4.1"
+ "c-definition": "7.18.4.1"
},
"stdc_trailing_zeros": {
- "defined": "7.18.5.1"
+ "c-definition": "7.18.5.1"
},
"stdc_trailing_ones": {
- "defined": "7.18.6.1"
+ "c-definition": "7.18.6.1"
},
"stdc_first_leading_zero": {
- "defined": "7.18.7.1"
+ "c-definition": "7.18.7.1"
},
"stdc_first_leading_one": {
- "defined": "7.18.8.1"
+ "c-definition": "7.18.8.1"
},
"stdc_first_trailing_zero": {
- "defined": "7.18.9.1"
+ "c-definition": "7.18.9.1"
},
"stdc_first_trailing_one": {
- "defined": "7.18.10.1"
+ "c-definition": "7.18.10.1"
},
"stdc_count_zeros": {
- "defined": "7.18.11.1"
+ "c-definition": "7.18.11.1"
},
"stdc_count_ones": {
- "defined": "7.18.12.1"
+ "c-definition": "7.18.12.1"
},
"stdc_has_single_bit": {
- "defined": "7.18.13.1"
+ "c-definition": "7.18.13.1"
},
"stdc_bit_width": {
- "defined": "7.18.14.1"
+ "c-definition": "7.18.14.1"
},
"stdc_bit_floor": {
- "defined": "7.18.15.1"
+ "c-definition": "7.18.15.1"
},
"stdc_bit_ceil": {
- "defined": "7.18.16.1"
+ "c-definition": "7.18.16.1"
}
},
"functions": {
"stdc_leading_zeros_uc": {
- "defined": "7.18.3"
+ "c-definition": "7.18.3"
},
"stdc_leading_zeros_us": {
- "defined": "7.18.3"
+ "c-definition": "7.18.3"
},
"stdc_leading_zeros_ui": {
- "defined": "7.18.3"
+ "c-definition": "7.18.3"
},
"stdc_leading_zeros_ul": {
- "defined": "7.18.3"
+ "c-definition": "7.18.3"
},
"stdc_leading_zeros_ull": {
- "defined": "7.18.3"
+ "c-definition": "7.18.3"
},
"stdc_leading_ones_uc": {
- "defined": "7.18.4"
+ "c-definition": "7.18.4"
},
"stdc_leading_ones_us": {
- "defined": "7.18.4"
+ "c-definition": "7.18.4"
},
"stdc_leading_ones_ui": {
- "defined": "7.18.4"
+ "c-definition": "7.18.4"
},
"stdc_leading_ones_ul": {
- "defined": "7.18.4"
+ "c-definition": "7.18.4"
},
"stdc_leading_ones_ull": {
- "defined": "7.18.4"
+ "c-definition": "7.18.4"
},
"stdc_trailing_zeros_uc": {
- "defined": "7.18.5"
+ "c-definition": "7.18.5"
},
"stdc_trailing_zeros_us": {
- "defined": "7.18.5"
+ "c-definition": "7.18.5"
},
"stdc_trailing_zeros_ui": {
- "defined": "7.18.5"
+ "c-definition": "7.18.5"
},
"stdc_trailing_zeros_ul": {
- "defined": "7.18.5"
+ "c-definition": "7.18.5"
},
"stdc_trailing_zeros_ull": {
- "defined": "7.18.5"
+ "c-definition": "7.18.5"
},
"stdc_trailing_ones_uc": {
- "defined": "7.18.6"
+ "c-definition": "7.18.6"
},
"stdc_trailing_ones_us": {
- "defined": "7.18.6"
+ "c-definition": "7.18.6"
},
"stdc_trailing_ones_ui": {
- "defined": "7.18.6"
+ "c-definition": "7.18.6"
},
"stdc_trailing_ones_ul": {
- "defined": "7.18.6"
+ "c-definition": "7.18.6"
},
"stdc_trailing_ones_ull": {
- "defined": "7.18.6"
+ "c-definition": "7.18.6"
},
"stdc_first_leading_zero_uc": {
- "defined": "7.18.7"
+ "c-definition": "7.18.7"
},
"stdc_first_leading_zero_us": {
- "defined": "7.18.7"
+ "c-definition": "7.18.7"
},
"stdc_first_leading_zero_ui": {
- "defined": "7.18.7"
+ "c-definition": "7.18.7"
},
"stdc_first_leading_zero_ul": {
- "defined": "7.18.7"
+ "c-definition": "7.18.7"
},
"stdc_first_leading_zero_ull": {
- "defined": "7.18.7"
+ "c-definition": "7.18.7"
},
"stdc_first_leading_one_uc": {
- "defined": "7.18.8"
+ "c-definition": "7.18.8"
},
"stdc_first_leading_one_us": {
- "defined": "7.18.8"
+ "c-definition": "7.18.8"
},
"stdc_first_leading_one_ui": {
- "defined": "7.18.8"
+ "c-definition": "7.18.8"
},
"stdc_first_leading_one_ul": {
- "defined": "7.18.8"
+ "c-definition": "7.18.8"
},
"stdc_first_leading_one_ull": {
- "defined": "7.18.8"
+ "c-definition": "7.18.8"
},
"stdc_first_trailing_zero_uc": {
- "defined": "7.18.9"
+ "c-definition": "7.18.9"
},
"stdc_first_trailing_zero_us": {
- "defined": "7.18.9"
+ "c-definition": "7.18.9"
},
"stdc_first_trailing_zero_ui": {
- "defined": "7.18.9"
+ "c-definition": "7.18.9"
},
"stdc_first_trailing_zero_ul": {
- "defined": "7.18.9"
+ "c-definition": "7.18.9"
},
"stdc_first_trailing_zero_ull": {
- "defined": "7.18.9"
+ "c-definition": "7.18.9"
},
"stdc_first_trailing_one_uc": {
- "defined": "7.18.10"
+ "c-definition": "7.18.10"
},
"stdc_first_trailing_one_us": {
- "defined": "7.18.10"
+ "c-definition": "7.18.10"
},
"stdc_first_trailing_one_ui": {
- "defined": "7.18.10"
+ "c-definition": "7.18.10"
},
"stdc_first_trailing_one_ul": {
- "defined": "7.18.10"
+ "c-definition": "7.18.10"
},
"stdc_first_trailing_one_ull": {
- "defined": "7.18.10"
+ "c-definition": "7.18.10"
},
"stdc_count_zeros_uc": {
- "defined": "7.18.11"
+ "c-definition": "7.18.11"
},
"stdc_count_zeros_us": {
- "defined": "7.18.11"
+ "c-definition": "7.18.11"
},
"stdc_count_zeros_ui": {
- "defined": "7.18.11"
+ "c-definition": "7.18.11"
},
"stdc_count_zeros_ul": {
- "defined": "7.18.11"
+ "c-definition": "7.18.11"
},
"stdc_count_zeros_ull": {
- "defined": "7.18.11"
+ "c-definition": "7.18.11"
},
"stdc_count_ones_uc": {
- "defined": "7.18.12"
+ "c-definition": "7.18.12"
},
"stdc_count_ones_us": {
- "defined": "7.18.12"
+ "c-definition": "7.18.12"
},
"stdc_count_ones_ui": {
- "defined": "7.18.12"
+ "c-definition": "7.18.12"
},
"stdc_count_ones_ul": {
- "defined": "7.18.12"
+ "c-definition": "7.18.12"
},
"stdc_count_ones_ull": {
- "defined": "7.18.12"
+ "c-definition": "7.18.12"
},
"stdc_has_single_bit_uc": {
- "defined": "7.18.13"
+ "c-definition": "7.18.13"
},
"stdc_has_single_bit_us": {
- "defined": "7.18.13"
+ "c-definition": "7.18.13"
},
"stdc_has_single_bit_ui": {
- "defined": "7.18.13"
+ "c-definition": "7.18.13"
},
"stdc_has_single_bit_ul": {
- "defined": "7.18.13"
+ "c-definition": "7.18.13"
},
"stdc_has_single_bit_ull": {
- "defined": "7.18.13"
+ "c-definition": "7.18.13"
},
"stdc_bit_width_uc": {
- "defined": "7.18.14"
+ "c-definition": "7.18.14"
},
"stdc_bit_width_us": {
- "defined": "7.18.14"
+ "c-definition": "7.18.14"
},
"stdc_bit_width_ui": {
- "defined": "7.18.14"
+ "c-definition": "7.18.14"
},
"stdc_bit_width_ul": {
- "defined": "7.18.14"
+ "c-definition": "7.18.14"
},
"stdc_bit_width_ull": {
- "defined": "7.18.14"
+ "c-definition": "7.18.14"
},
"stdc_bit_floor_uc": {
- "defined": "7.18.15"
+ "c-definition": "7.18.15"
},
"stdc_bit_floor_us": {
- "defined": "7.18.15"
+ "c-definition": "7.18.15"
},
"stdc_bit_floor_ui": {
- "defined": "7.18.15"
+ "c-definition": "7.18.15"
},
"stdc_bit_floor_ul": {
- "defined": "7.18.15"
+ "c-definition": "7.18.15"
},
"stdc_bit_floor_ull": {
- "defined": "7.18.15"
+ "c-definition": "7.18.15"
},
"stdc_bit_ceil_uc": {
- "defined": "7.18.16"
+ "c-definition": "7.18.16"
},
"stdc_bit_ceil_us": {
- "defined": "7.18.16"
+ "c-definition": "7.18.16"
},
"stdc_bit_ceil_ui": {
- "defined": "7.18.16"
+ "c-definition": "7.18.16"
},
"stdc_bit_ceil_ul": {
- "defined": "7.18.16"
+ "c-definition": "7.18.16"
},
"stdc_bit_ceil_ull": {
- "defined": "7.18.16"
+ "c-definition": "7.18.16"
}
}
}
diff --git a/libc/utils/docgen/threads.json b/libc/utils/docgen/threads.json
index aef6ffaf75ba..8591cbde55a4 100644
--- a/libc/utils/docgen/threads.json
+++ b/libc/utils/docgen/threads.json
@@ -1,87 +1,87 @@
{
"macros": {
"ONCE_FLAG_INIT": {
- "defined": "7.28.1.3"
+ "c-definition": "7.28.1.3"
},
"TSS_DTOR_ITERATIONS": {
- "defined": "7.28.1.3"
+ "c-definition": "7.28.1.3"
}
},
"functions": {
"call_once": {
- "defined": "7.28.2.1"
+ "c-definition": "7.28.2.1"
},
"cnd_broadcast": {
- "defined": "7.28.3.1"
+ "c-definition": "7.28.3.1"
},
"cnd_destroy": {
- "defined": "7.28.3.2"
+ "c-definition": "7.28.3.2"
},
"cnd_init": {
- "defined": "7.28.3.3"
+ "c-definition": "7.28.3.3"
},
"cnd_signal": {
- "defined": "7.28.3.4"
+ "c-definition": "7.28.3.4"
},
"cnd_timedwait": {
- "defined": "7.28.3.5"
+ "c-definition": "7.28.3.5"
},
"cnd_wait": {
- "defined": "7.28.3.6"
+ "c-definition": "7.28.3.6"
},
"mtx_destroy": {
- "defined": "7.28.4.1"
+ "c-definition": "7.28.4.1"
},
"mtx_init": {
- "defined": "7.28.4.2"
+ "c-definition": "7.28.4.2"
},
"mtx_lock": {
- "defined": "7.28.4.3"
+ "c-definition": "7.28.4.3"
},
"mtx_timedlock": {
- "defined": "7.28.4.4"
+ "c-definition": "7.28.4.4"
},
"mtx_trylock": {
- "defined": "7.28.4.5"
+ "c-definition": "7.28.4.5"
},
"mtx_unlock": {
- "defined": "7.28.4.6"
+ "c-definition": "7.28.4.6"
},
"thrd_create": {
- "defined": "7.28.5.1"
+ "c-definition": "7.28.5.1"
},
"thrd_current": {
- "defined": "7.28.5.2"
+ "c-definition": "7.28.5.2"
},
"thrd_detach": {
- "defined": "7.28.5.3"
+ "c-definition": "7.28.5.3"
},
"thrd_equal": {
- "defined": "7.28.5.4"
+ "c-definition": "7.28.5.4"
},
"thrd_exit": {
- "defined": "7.28.5.5"
+ "c-definition": "7.28.5.5"
},
"thrd_join": {
- "defined": "7.28.5.6"
+ "c-definition": "7.28.5.6"
},
"thrd_sleep": {
- "defined": "7.28.5.7"
+ "c-definition": "7.28.5.7"
},
"thrd_yield": {
- "defined": "7.28.5.8"
+ "c-definition": "7.28.5.8"
},
"tss_create": {
- "defined": "7.28.6.1"
+ "c-definition": "7.28.6.1"
},
"tss_delete": {
- "defined": "7.28.6.2"
+ "c-definition": "7.28.6.2"
},
"tss_get": {
- "defined": "7.28.6.3"
+ "c-definition": "7.28.6.3"
},
"tss_set": {
- "defined": "7.28.6.4"
+ "c-definition": "7.28.6.4"
}
}
}
diff --git a/libcxx/CMakeLists.txt b/libcxx/CMakeLists.txt
index f34cb178e076..cb5e0e5e6cdb 100644
--- a/libcxx/CMakeLists.txt
+++ b/libcxx/CMakeLists.txt
@@ -5,6 +5,7 @@
# Setup Project
#===============================================================================
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "libc++")
set(LLVM_COMMON_CMAKE_UTILS "${CMAKE_CURRENT_SOURCE_DIR}/../cmake")
diff --git a/libcxx/docs/ReleaseNotes/19.rst b/libcxx/docs/ReleaseNotes/19.rst
index 83fcd40bb80c..0bc343acd281 100644
--- a/libcxx/docs/ReleaseNotes/19.rst
+++ b/libcxx/docs/ReleaseNotes/19.rst
@@ -53,6 +53,7 @@ Implemented Papers
- P2387R3 - Pipe support for user-defined range adaptors
- P2713R1 - Escaping improvements in ``std::format``
- P2231R1 - Missing ``constexpr`` in ``std::optional`` and ``std::variant``
+- P0019R8 - ``std::atomic_ref``
Improvements and New Features
-----------------------------
diff --git a/libcxx/docs/Status/Cxx20Issues.csv b/libcxx/docs/Status/Cxx20Issues.csv
index db57b15256a6..5f83fa3a92e8 100644
--- a/libcxx/docs/Status/Cxx20Issues.csv
+++ b/libcxx/docs/Status/Cxx20Issues.csv
@@ -70,7 +70,7 @@
"`3041 <https://wg21.link/LWG3041>`__","Unnecessary ``decay``\ in ``reference_wrapper``\ ","Jacksonville","|Complete|",""
"`3042 <https://wg21.link/LWG3042>`__","``is_literal_type_v``\ should be inline","Jacksonville","|Complete|",""
"`3043 <https://wg21.link/LWG3043>`__","Bogus postcondition for ``filesystem_error``\ constructor","Jacksonville","|Complete|",""
-"`3045 <https://wg21.link/LWG3045>`__","``atomic<floating-point>``\ doesn't have ``value_type``\ or ``difference_type``\ ","Jacksonville","",""
+"`3045 <https://wg21.link/LWG3045>`__","``atomic<floating-point>``\ doesn't have ``value_type``\ or ``difference_type``\ ","Jacksonville","|Complete|","18.0"
"`3048 <https://wg21.link/LWG3048>`__","``transform_reduce(exec, first1, last1, first2, init)``\ discards execution policy","Jacksonville","|Complete|","17.0"
"`3051 <https://wg21.link/LWG3051>`__","Floating point classifications were inadvertently changed in P0175","Jacksonville","|Nothing To Do|",""
"`3075 <https://wg21.link/LWG3075>`__","``basic_string``\ needs deduction guides from ``basic_string_view``\ ","Jacksonville","|Complete|",""
diff --git a/libcxx/docs/Status/Cxx20Papers.csv b/libcxx/docs/Status/Cxx20Papers.csv
index 955aa5f614af..6598cd18358f 100644
--- a/libcxx/docs/Status/Cxx20Papers.csv
+++ b/libcxx/docs/Status/Cxx20Papers.csv
@@ -26,7 +26,7 @@
"`P0905R1 <https://wg21.link/P0905R1>`__","CWG","Symmetry for spaceship","Jacksonville","|Complete|","7.0","|spaceship|"
"`P0966R1 <https://wg21.link/P0966R1>`__","LWG","``string::reserve``\ Should Not Shrink","Jacksonville","|Complete| [#note-P0966]_","12.0"
"","","","","","",""
-"`P0019R8 <https://wg21.link/P0019R8>`__","LWG","Atomic Ref","Rapperswil","",""
+"`P0019R8 <https://wg21.link/P0019R8>`__","LWG","Atomic Ref","Rapperswil","|Complete|","19.0"
"`P0458R2 <https://wg21.link/P0458R2>`__","LWG","Checking for Existence of an Element in Associative Containers","Rapperswil","|Complete|","13.0"
"`P0475R1 <https://wg21.link/P0475R1>`__","LWG","LWG 2511: guaranteed copy elision for piecewise construction","Rapperswil","|Complete|",""
"`P0476R2 <https://wg21.link/P0476R2>`__","LWG","Bit-casting object representations","Rapperswil","|Complete|","14.0"
@@ -125,7 +125,7 @@
"`P1612R1 <https://wg21.link/P1612R1>`__","LWG","Relocate Endian's Specification","Cologne","|Complete|","10.0"
"`P1614R2 <https://wg21.link/P1614R2>`__","LWG","The Mothership has Landed","Cologne","|In Progress|",""
"`P1638R1 <https://wg21.link/P1638R1>`__","LWG","basic_istream_view::iterator should not be copyable","Cologne","|Complete|","16.0","|ranges|"
-"`P1643R1 <https://wg21.link/P1643R1>`__","LWG","Add wait/notify to atomic_ref","Cologne","",""
+"`P1643R1 <https://wg21.link/P1643R1>`__","LWG","Add wait/notify to atomic_ref","Cologne","|Complete|","19.0"
"`P1644R0 <https://wg21.link/P1644R0>`__","LWG","Add wait/notify to atomic<shared_ptr>","Cologne","",""
"`P1650R0 <https://wg21.link/P1650R0>`__","LWG","Output std::chrono::days with 'd' suffix","Cologne","|Complete|","16.0"
"`P1651R0 <https://wg21.link/P1651R0>`__","LWG","bind_front should not unwrap reference_wrapper","Cologne","|Complete|","13.0"
diff --git a/libcxx/docs/Status/Cxx23Issues.csv b/libcxx/docs/Status/Cxx23Issues.csv
index d421feef8db9..cc601b3cd3c9 100644
--- a/libcxx/docs/Status/Cxx23Issues.csv
+++ b/libcxx/docs/Status/Cxx23Issues.csv
@@ -98,7 +98,7 @@
`3555 <https://wg21.link/LWG3555>`__,"``{transform,elements}_view::iterator::iterator_concept`` should consider const-qualification of the underlying range","June 2021","","","|ranges|"
"","","","","",""
`2191 <https://wg21.link/LWG2191>`__,"Incorrect specification of ``match_results(match_results&&)``","October 2021","|Nothing To Do|",""
-`2381 <https://wg21.link/LWG2381>`__,"Inconsistency in parsing floating point numbers","October 2021","",""
+`2381 <https://wg21.link/LWG2381>`__,"Inconsistency in parsing floating point numbers","October 2021","|Complete|","19.0"
`2762 <https://wg21.link/LWG2762>`__,"``unique_ptr operator*()`` should be ``noexcept``","October 2021","",""
`3121 <https://wg21.link/LWG3121>`__,"``tuple`` constructor constraints for ``UTypes&&...`` overloads","October 2021","",""
`3123 <https://wg21.link/LWG3123>`__,"``duration`` constructor from representation shouldn't be effectively non-throwing","October 2021","","","|chrono|"
diff --git a/libcxx/docs/Status/ParallelismProjects.csv b/libcxx/docs/Status/ParallelismProjects.csv
index 06da008ac5fe..2ddac1e52f02 100644
--- a/libcxx/docs/Status/ParallelismProjects.csv
+++ b/libcxx/docs/Status/ParallelismProjects.csv
@@ -24,6 +24,7 @@ Section,Description,Dependencies,Assignee,Complete
| `[parallel.simd.class] <https://wg21.link/N4808>`_, "`simd generate constructor <https://reviews.llvm.org/D159442>`_", None, Yin Zhang, |Complete|
| `[parallel.simd.class] <https://wg21.link/N4808>`_, "`simd load constructor <https://github.com/llvm/llvm-project/pull/76610>`_", None, Yin Zhang, |Complete|
| `[parallel.simd.class] <https://wg21.link/N4808>`_, "`simd subscript operators <https://github.com/llvm/llvm-project/pull/68960>`_", None, Yin Zhang, |Complete|
+| `[parallel.simd.class] <https://wg21.link/N4808>`_, "`simd copy functions <https://github.com/llvm/llvm-project/pull/78935>`_", None, Yin Zhang, |Complete|
| `[parallel.simd.class] <https://wg21.link/N4808>`_, "Class template simd implementation", None, Yin Zhang, |In Progress|
| `[parallel.simd.nonmembers] <https://wg21.link/N4808>`_, "simd non-member operations", None, Yin Zhang, |In Progress|
| `[parallel.simd.mask.class] <https://wg21.link/N4808>`_, "`Class template simd_mask declaration and alias <https://reviews.llvm.org/D144362>`_", [parallel.simd.abi], Yin Zhang, |Complete|
@@ -33,5 +34,6 @@ Section,Description,Dependencies,Assignee,Complete
| `[parallel.simd.mask.class] <https://wg21.link/N4808>`_, "`simd_mask implicit type conversion constructor <https://github.com/llvm/llvm-project/pull/71132>`_", None, Yin Zhang, |Complete|
| `[parallel.simd.mask.class] <https://wg21.link/N4808>`_, "`simd_mask load constructor <https://github.com/llvm/llvm-project/pull/76610>`_", None, Yin Zhang, |Complete|
| `[parallel.simd.mask.class] <https://wg21.link/N4808>`_, "`simd_mask subscript operators <https://github.com/llvm/llvm-project/pull/68960>`_", None, Yin Zhang, |Complete|
+| `[parallel.simd.mask.class] <https://wg21.link/N4808>`_, "`simd_mask copy functions <https://github.com/llvm/llvm-project/pull/78935>`_", None, Yin Zhang, |Complete|
| `[parallel.simd.mask.class] <https://wg21.link/N4808>`_, "Class template simd_mask implementation", None, Yin Zhang, |In Progress|
| `[parallel.simd.mask.nonmembers] <https://wg21.link/N4808>`_, "simd_mask non-member operations", None, Yin Zhang, |In Progress|
diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index 01e9c247560c..33ee5b26bd62 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -72,23 +72,8 @@ set(files
__algorithm/partition_point.h
__algorithm/pop_heap.h
__algorithm/prev_permutation.h
- __algorithm/pstl_any_all_none_of.h
- __algorithm/pstl_copy.h
- __algorithm/pstl_count.h
- __algorithm/pstl_equal.h
- __algorithm/pstl_fill.h
- __algorithm/pstl_find.h
- __algorithm/pstl_for_each.h
+ __algorithm/pstl.h
__algorithm/pstl_frontend_dispatch.h
- __algorithm/pstl_generate.h
- __algorithm/pstl_is_partitioned.h
- __algorithm/pstl_merge.h
- __algorithm/pstl_move.h
- __algorithm/pstl_replace.h
- __algorithm/pstl_rotate_copy.h
- __algorithm/pstl_sort.h
- __algorithm/pstl_stable_sort.h
- __algorithm/pstl_transform.h
__algorithm/push_heap.h
__algorithm/ranges_adjacent_find.h
__algorithm/ranges_all_of.h
@@ -224,6 +209,7 @@ set(files
__atomic/atomic_flag.h
__atomic/atomic_init.h
__atomic/atomic_lock_free.h
+ __atomic/atomic_ref.h
__atomic/atomic_sync.h
__atomic/check_memory_order.h
__atomic/contention_t.h
@@ -232,6 +218,7 @@ set(files
__atomic/is_always_lock_free.h
__atomic/kill_dependency.h
__atomic/memory_order.h
+ __atomic/to_gcc_order.h
__availability
__bit/bit_cast.h
__bit/bit_ceil.h
@@ -575,8 +562,7 @@ set(files
__numeric/iota.h
__numeric/midpoint.h
__numeric/partial_sum.h
- __numeric/pstl_reduce.h
- __numeric/pstl_transform_reduce.h
+ __numeric/pstl.h
__numeric/reduce.h
__numeric/saturation_arithmetic.h
__numeric/transform_exclusive_scan.h
diff --git a/libcxx/include/__algorithm/copy_move_common.h b/libcxx/include/__algorithm/copy_move_common.h
index 12a26c6d6a64..8a98451a8f96 100644
--- a/libcxx/include/__algorithm/copy_move_common.h
+++ b/libcxx/include/__algorithm/copy_move_common.h
@@ -21,7 +21,6 @@
#include <__type_traits/is_constant_evaluated.h>
#include <__type_traits/is_constructible.h>
#include <__type_traits/is_trivially_assignable.h>
-#include <__type_traits/is_trivially_copyable.h>
#include <__type_traits/is_volatile.h>
#include <__utility/move.h>
#include <__utility/pair.h>
diff --git a/libcxx/include/__algorithm/pstl.h b/libcxx/include/__algorithm/pstl.h
new file mode 100644
index 000000000000..68b4e3e77ec6
--- /dev/null
+++ b/libcxx/include/__algorithm/pstl.h
@@ -0,0 +1,1366 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ALGORITHM_PSTL_H
+#define _LIBCPP___ALGORITHM_PSTL_H
+
+#include <__algorithm/copy_n.h>
+#include <__algorithm/count.h>
+#include <__algorithm/equal.h>
+#include <__algorithm/fill_n.h>
+#include <__algorithm/for_each.h>
+#include <__algorithm/for_each_n.h>
+#include <__algorithm/pstl_frontend_dispatch.h>
+#include <__atomic/atomic.h>
+#include <__config>
+#include <__functional/identity.h>
+#include <__functional/operations.h>
+#include <__iterator/concepts.h>
+#include <__iterator/cpp17_iterator_concepts.h>
+#include <__iterator/iterator_traits.h>
+#include <__numeric/pstl.h>
+#include <__pstl/configuration.h>
+#include <__type_traits/enable_if.h>
+#include <__type_traits/is_constant_evaluated.h>
+#include <__type_traits/is_execution_policy.h>
+#include <__type_traits/is_trivially_copyable.h>
+#include <__type_traits/remove_cvref.h>
+#include <__utility/empty.h>
+#include <__utility/move.h>
+#include <optional>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
+#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Predicate,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>>
+__find_if(_ExecutionPolicy&&, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept {
+ using _Backend = typename __select_backend<_RawPolicy>::type;
+ return std::__pstl_find_if<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__pred));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Predicate,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _ForwardIterator
+find_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find_if requires ForwardIterators");
+ auto __res = std::__find_if(__policy, std::move(__first), std::move(__last), std::move(__pred));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class>
+void __pstl_any_of(); // declaration needed for the frontend dispatch below
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Predicate,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool> __any_of(
+ _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_any_of, _RawPolicy),
+ [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) -> optional<bool> {
+ auto __res = std::__find_if(__policy, __g_first, __g_last, __g_pred);
+ if (!__res)
+ return nullopt;
+ return *__res != __g_last;
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__pred));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Predicate,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool
+any_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "any_of requires a ForwardIterator");
+ auto __res = std::__any_of(__policy, std::move(__first), std::move(__last), std::move(__pred));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class>
+void __pstl_all_of(); // declaration needed for the frontend dispatch below
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Pred,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool>
+__all_of(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Pred&& __pred) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_all_of, _RawPolicy),
+ [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Pred __g_pred) -> optional<bool> {
+ auto __res = std::__any_of(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __value) {
+ return !__g_pred(__value);
+ });
+ if (!__res)
+ return nullopt;
+ return !*__res;
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__pred));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Pred,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool
+all_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "all_of requires a ForwardIterator");
+ auto __res = std::__all_of(__policy, std::move(__first), std::move(__last), std::move(__pred));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class>
+void __pstl_none_of(); // declaration needed for the frontend dispatch below
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Pred,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool>
+__none_of(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Pred&& __pred) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_none_of, _RawPolicy),
+ [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Pred __g_pred) -> optional<bool> {
+ auto __res = std::__any_of(__policy, __g_first, __g_last, __g_pred);
+ if (!__res)
+ return nullopt;
+ return !*__res;
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__pred));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Pred,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool
+none_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "none_of requires a ForwardIterator");
+ auto __res = std::__none_of(__policy, std::move(__first), std::move(__last), std::move(__pred));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _UnaryOperation,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardOutIterator>>
+__transform(_ExecutionPolicy&&,
+ _ForwardIterator&& __first,
+ _ForwardIterator&& __last,
+ _ForwardOutIterator&& __result,
+ _UnaryOperation&& __op) noexcept {
+ using _Backend = typename __select_backend<_RawPolicy>::type;
+ return std::__pstl_transform<_RawPolicy>(
+ _Backend{}, std::move(__first), std::move(__last), std::move(__result), std::move(__op));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _UnaryOperation,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform(
+ _ExecutionPolicy&& __policy,
+ _ForwardIterator __first,
+ _ForwardIterator __last,
+ _ForwardOutIterator __result,
+ _UnaryOperation __op) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "transform requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "transform requires an OutputIterator");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
+ _ForwardOutIterator, decltype(__op(*__first)), "transform requires an OutputIterator");
+ auto __res = std::__transform(__policy, std::move(__first), std::move(__last), std::move(__result), std::move(__op));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator1,
+ class _ForwardIterator2,
+ class _ForwardOutIterator,
+ class _BinaryOperation,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardOutIterator>>
+__transform(_ExecutionPolicy&&,
+ _ForwardIterator1&& __first1,
+ _ForwardIterator1&& __last1,
+ _ForwardIterator2&& __first2,
+ _ForwardOutIterator&& __result,
+ _BinaryOperation&& __op) noexcept {
+ using _Backend = typename __select_backend<_RawPolicy>::type;
+ return std::__pstl_transform<_RawPolicy>(
+ _Backend{}, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__result), std::move(__op));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator1,
+ class _ForwardIterator2,
+ class _ForwardOutIterator,
+ class _BinaryOperation,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform(
+ _ExecutionPolicy&& __policy,
+ _ForwardIterator1 __first1,
+ _ForwardIterator1 __last1,
+ _ForwardIterator2 __first2,
+ _ForwardOutIterator __result,
+ _BinaryOperation __op) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "transform requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "transform requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "transform requires an OutputIterator");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
+ _ForwardOutIterator, decltype(__op(*__first1, *__first2)), "transform requires an OutputIterator");
+ auto __res = std::__transform(
+ __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__result), std::move(__op));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Function,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
+__for_each(_ExecutionPolicy&&, _ForwardIterator&& __first, _ForwardIterator&& __last, _Function&& __func) noexcept {
+ using _Backend = typename __select_backend<_RawPolicy>::type;
+ return std::__pstl_for_each<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__func));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Function,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void
+for_each(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Function __func) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "for_each requires ForwardIterators");
+ if (!std::__for_each(__policy, std::move(__first), std::move(__last), std::move(__func)))
+ std::__throw_bad_alloc();
+}
+
+// TODO: Use the std::copy/move shenanigans to forward to std::memmove
+
+template <class>
+void __pstl_copy();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator>
+__copy(_ExecutionPolicy&& __policy,
+ _ForwardIterator&& __first,
+ _ForwardIterator&& __last,
+ _ForwardOutIterator&& __result) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_copy, _RawPolicy),
+ [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _ForwardOutIterator __g_result) {
+ return std::__transform(__policy, __g_first, __g_last, __g_result, __identity());
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__result));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator
+copy(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
+ _ForwardIterator, "copy(first, last, result) requires [first, last) to be ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
+ _ForwardOutIterator, "copy(first, last, result) requires result to be a ForwardIterator");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
+ _ForwardOutIterator, decltype(*__first), "copy(first, last, result) requires result to be an OutputIterator");
+ auto __res = std::__copy(__policy, std::move(__first), std::move(__last), std::move(__result));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class>
+void __pstl_copy_n();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _Size,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> __copy_n(
+ _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __n, _ForwardOutIterator&& __result) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_copy_n, _RawPolicy),
+ [&__policy](
+ _ForwardIterator __g_first, _Size __g_n, _ForwardOutIterator __g_result) -> optional<_ForwardIterator> {
+ if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value) {
+ return std::__copy(__policy, std::move(__g_first), std::move(__g_first + __g_n), std::move(__g_result));
+ } else {
+ (void)__policy;
+ return std::copy_n(__g_first, __g_n, __g_result);
+ }
+ },
+ std::move(__first),
+ std::move(__n),
+ std::move(__result));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _Size,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator
+copy_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _ForwardOutIterator __result) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
+ _ForwardIterator, "copy_n(first, n, result) requires first to be a ForwardIterator");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
+ _ForwardOutIterator, "copy_n(first, n, result) requires result to be a ForwardIterator");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
+ _ForwardOutIterator, decltype(*__first), "copy_n(first, n, result) requires result to be an OutputIterator");
+ auto __res = std::__copy_n(__policy, std::move(__first), std::move(__n), std::move(__result));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class>
+void __pstl_count_if(); // declaration needed for the frontend dispatch below
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Predicate,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__iter_diff_t<_ForwardIterator>> __count_if(
+ _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept {
+ using __diff_t = __iter_diff_t<_ForwardIterator>;
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_count_if, _RawPolicy),
+ [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) -> optional<__diff_t> {
+ return std::__transform_reduce(
+ __policy,
+ std::move(__g_first),
+ std::move(__g_last),
+ __diff_t(),
+ std::plus{},
+ [&](__iter_reference<_ForwardIterator> __element) -> bool { return __g_pred(__element); });
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__pred));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Predicate,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator>
+count_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
+ _ForwardIterator, "count_if(first, last, pred) requires [first, last) to be ForwardIterators");
+ auto __res = std::__count_if(__policy, std::move(__first), std::move(__last), std::move(__pred));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class>
+void __pstl_count(); // declaration needed for the frontend dispatch below
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__iter_diff_t<_ForwardIterator>> __count(
+ _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, const _Tp& __value) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_count, _RawPolicy),
+ [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value)
+ -> optional<__iter_diff_t<_ForwardIterator>> {
+ return std::count_if(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __v) {
+ return __v == __g_value;
+ });
+ },
+ std::forward<_ForwardIterator>(__first),
+ std::forward<_ForwardIterator>(__last),
+ __value);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator>
+count(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
+ _ForwardIterator, "count(first, last, val) requires [first, last) to be ForwardIterators");
+ auto __res = std::__count(__policy, std::move(__first), std::move(__last), __value);
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *__res;
+}
+
+template <class>
+void __pstl_equal();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator1,
+ class _ForwardIterator2,
+ class _Pred,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool>
+__equal(_ExecutionPolicy&& __policy,
+ _ForwardIterator1&& __first1,
+ _ForwardIterator1&& __last1,
+ _ForwardIterator2&& __first2,
+ _Pred&& __pred) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_equal, _RawPolicy),
+ [&__policy](
+ _ForwardIterator1 __g_first1, _ForwardIterator1 __g_last1, _ForwardIterator2 __g_first2, _Pred __g_pred) {
+ return std::__transform_reduce(
+ __policy,
+ std::move(__g_first1),
+ std::move(__g_last1),
+ std::move(__g_first2),
+ true,
+ std::logical_and{},
+ std::move(__g_pred));
+ },
+ std::move(__first1),
+ std::move(__last1),
+ std::move(__first2),
+ std::move(__pred));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator1,
+ class _ForwardIterator2,
+ class _Pred,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI bool
+equal(_ExecutionPolicy&& __policy,
+ _ForwardIterator1 __first1,
+ _ForwardIterator1 __last1,
+ _ForwardIterator2 __first2,
+ _Pred __pred) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators");
+ auto __res = std::__equal(__policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__pred));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *__res;
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator1,
+ class _ForwardIterator2,
+ enable_if_t<is_execution_policy_v<__remove_cvref_t<_ExecutionPolicy>>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI bool
+equal(_ExecutionPolicy&& __policy, _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators");
+ auto __res = std::__equal(__policy, std::move(__first1), std::move(__last1), std::move(__first2), std::equal_to{});
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *__res;
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator1,
+ class _ForwardIterator2,
+ class _Pred,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool>
+__equal(_ExecutionPolicy&& __policy,
+ _ForwardIterator1&& __first1,
+ _ForwardIterator1&& __last1,
+ _ForwardIterator2&& __first2,
+ _ForwardIterator2&& __last2,
+ _Pred&& __pred) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_equal, _RawPolicy),
+ [&__policy](_ForwardIterator1 __g_first1,
+ _ForwardIterator1 __g_last1,
+ _ForwardIterator2 __g_first2,
+ _ForwardIterator2 __g_last2,
+ _Pred __g_pred) -> optional<bool> {
+ if constexpr (__has_random_access_iterator_category<_ForwardIterator1>::value &&
+ __has_random_access_iterator_category<_ForwardIterator2>::value) {
+ if (__g_last1 - __g_first1 != __g_last2 - __g_first2)
+ return false;
+ return std::__equal(
+ __policy, std::move(__g_first1), std::move(__g_last1), std::move(__g_first2), std::move(__g_pred));
+ } else {
+ (void)__policy; // Avoid unused lambda capture warning
+ return std::equal(
+ std::move(__g_first1),
+ std::move(__g_last1),
+ std::move(__g_first2),
+ std::move(__g_last2),
+ std::move(__g_pred));
+ }
+ },
+ std::move(__first1),
+ std::move(__last1),
+ std::move(__first2),
+ std::move(__last2),
+ std::move(__pred));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator1,
+ class _ForwardIterator2,
+ class _Pred,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI bool
+equal(_ExecutionPolicy&& __policy,
+ _ForwardIterator1 __first1,
+ _ForwardIterator1 __last1,
+ _ForwardIterator2 __first2,
+ _ForwardIterator2 __last2,
+ _Pred __pred) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators");
+ auto __res = std::__equal(
+ __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), std::move(__pred));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *__res;
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator1,
+ class _ForwardIterator2,
+ enable_if_t<is_execution_policy_v<__remove_cvref_t<_ExecutionPolicy>>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI bool
+equal(_ExecutionPolicy&& __policy,
+ _ForwardIterator1 __first1,
+ _ForwardIterator1 __last1,
+ _ForwardIterator2 __first2,
+ _ForwardIterator2 __last2) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators");
+ auto __res = std::__equal(
+ __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), std::equal_to{});
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *__res;
+}
+
+template <class>
+void __pstl_fill(); // declaration needed for the frontend dispatch below
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI optional<__empty> __fill(
+ _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, const _Tp& __value) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_fill, _RawPolicy),
+ [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value) {
+ return std::__for_each(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __element) {
+ __element = __g_value;
+ });
+ },
+ std::forward<_ForwardIterator>(__first),
+ std::forward<_ForwardIterator>(__last),
+ __value);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void
+fill(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "fill requires ForwardIterators");
+ if (!std::__fill(__policy, std::move(__first), std::move(__last), __value))
+ std::__throw_bad_alloc();
+}
+
+template <class>
+void __pstl_fill_n(); // declaration needed for the frontend dispatch below
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _SizeT,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
+__fill_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _SizeT&& __n, const _Tp& __value) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_fill_n, _RawPolicy),
+ [&](_ForwardIterator __g_first, _SizeT __g_n, const _Tp& __g_value) {
+ if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value)
+ std::fill(__policy, __g_first, __g_first + __g_n, __g_value);
+ else
+ std::fill_n(__g_first, __g_n, __g_value);
+ return optional<__empty>{__empty{}};
+ },
+ std::move(__first),
+ std::move(__n),
+ __value);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _SizeT,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void
+fill_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _SizeT __n, const _Tp& __value) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "fill_n requires ForwardIterators");
+ if (!std::__fill_n(__policy, std::move(__first), std::move(__n), __value))
+ std::__throw_bad_alloc();
+}
+
+template <class>
+void __pstl_find_if_not();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Predicate,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>> __find_if_not(
+ _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_find_if_not, _RawPolicy),
+ [&](_ForwardIterator&& __g_first, _ForwardIterator&& __g_last, _Predicate&& __g_pred)
+ -> optional<__remove_cvref_t<_ForwardIterator>> {
+ return std::__find_if(
+ __policy, __g_first, __g_last, [&](__iter_reference<__remove_cvref_t<_ForwardIterator>> __value) {
+ return !__g_pred(__value);
+ });
+ },
+ std::forward<_ForwardIterator>(__first),
+ std::forward<_ForwardIterator>(__last),
+ std::forward<_Predicate>(__pred));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Predicate,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _ForwardIterator
+find_if_not(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find_if_not requires ForwardIterators");
+ auto __res = std::__find_if_not(__policy, std::move(__first), std::move(__last), std::move(__pred));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class>
+void __pstl_find();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>> __find(
+ _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, const _Tp& __value) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_find, _RawPolicy),
+ [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value) -> optional<_ForwardIterator> {
+ return std::find_if(
+ __policy, __g_first, __g_last, [&](__iter_reference<__remove_cvref_t<_ForwardIterator>> __element) {
+ return __element == __g_value;
+ });
+ },
+ std::forward<_ForwardIterator>(__first),
+ std::forward<_ForwardIterator>(__last),
+ __value);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _ForwardIterator
+find(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find requires ForwardIterators");
+ auto __res = std::__find(__policy, std::move(__first), std::move(__last), __value);
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class>
+void __pstl_for_each_n(); // declaration needed for the frontend dispatch below
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Size,
+ class _Function,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
+__for_each_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __size, _Function&& __func) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_for_each_n, _RawPolicy),
+ [&](_ForwardIterator __g_first, _Size __g_size, _Function __g_func) -> optional<__empty> {
+ if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value) {
+ std::for_each(__policy, std::move(__g_first), __g_first + __g_size, std::move(__g_func));
+ return __empty{};
+ } else {
+ std::for_each_n(std::move(__g_first), __g_size, std::move(__g_func));
+ return __empty{};
+ }
+ },
+ std::move(__first),
+ std::move(__size),
+ std::move(__func));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Size,
+ class _Function,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void
+for_each_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __size, _Function __func) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "for_each_n requires a ForwardIterator");
+ auto __res = std::__for_each_n(__policy, std::move(__first), std::move(__size), std::move(__func));
+ if (!__res)
+ std::__throw_bad_alloc();
+}
+
+template <class>
+void __pstl_generate();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Generator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __generate(
+ _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Generator&& __gen) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_generate, _RawPolicy),
+ [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _Generator __g_gen) {
+ return std::__for_each(
+ __policy, std::move(__g_first), std::move(__g_last), [&](__iter_reference<_ForwardIterator> __element) {
+ __element = __g_gen();
+ });
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__gen));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Generator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void
+generate(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Generator __gen) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "generate requires ForwardIterators");
+ if (!std::__generate(__policy, std::move(__first), std::move(__last), std::move(__gen)))
+ std::__throw_bad_alloc();
+}
+
+template <class>
+void __pstl_generate_n();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Size,
+ class _Generator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
+__generate_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __n, _Generator&& __gen) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_generate_n, _RawPolicy),
+ [&__policy](_ForwardIterator __g_first, _Size __g_n, _Generator __g_gen) {
+ return std::__for_each_n(
+ __policy, std::move(__g_first), std::move(__g_n), [&](__iter_reference<_ForwardIterator> __element) {
+ __element = __g_gen();
+ });
+ },
+ std::move(__first),
+ __n,
+ std::move(__gen));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Size,
+ class _Generator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void
+generate_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _Generator __gen) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "generate_n requires a ForwardIterator");
+ if (!std::__generate_n(__policy, std::move(__first), std::move(__n), std::move(__gen)))
+ std::__throw_bad_alloc();
+}
+
+template <class>
+void __pstl_is_partitioned();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Predicate,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool> __is_partitioned(
+ _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_is_partitioned, _RawPolicy),
+ [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) {
+ __g_first = std::find_if_not(__policy, __g_first, __g_last, __g_pred);
+ if (__g_first == __g_last)
+ return true;
+ ++__g_first;
+ return std::none_of(__policy, __g_first, __g_last, __g_pred);
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__pred));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Predicate,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI bool
+is_partitioned(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "is_partitioned requires ForwardIterators");
+ auto __res = std::__is_partitioned(__policy, std::move(__first), std::move(__last), std::move(__pred));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator1,
+ class _ForwardIterator2,
+ class _ForwardOutIterator,
+ class _Comp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator>
+__merge(_ExecutionPolicy&&,
+ _ForwardIterator1&& __first1,
+ _ForwardIterator1&& __last1,
+ _ForwardIterator2&& __first2,
+ _ForwardIterator2&& __last2,
+ _ForwardOutIterator&& __result,
+ _Comp&& __comp) noexcept {
+ using _Backend = typename __select_backend<_RawPolicy>::type;
+ return std::__pstl_merge<_RawPolicy>(
+ _Backend{},
+ std::forward<_ForwardIterator1>(__first1),
+ std::forward<_ForwardIterator1>(__last1),
+ std::forward<_ForwardIterator2>(__first2),
+ std::forward<_ForwardIterator2>(__last2),
+ std::forward<_ForwardOutIterator>(__result),
+ std::forward<_Comp>(__comp));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator1,
+ class _ForwardIterator2,
+ class _ForwardOutIterator,
+ class _Comp = std::less<>,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator
+merge(_ExecutionPolicy&& __policy,
+ _ForwardIterator1 __first1,
+ _ForwardIterator1 __last1,
+ _ForwardIterator2 __first2,
+ _ForwardIterator2 __last2,
+ _ForwardOutIterator __result,
+ _Comp __comp = {}) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "merge requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "merge requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first1), "merge requires an OutputIterator");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first2), "merge requires an OutputIterator");
+ auto __res = std::__merge(
+ __policy,
+ std::move(__first1),
+ std::move(__last1),
+ std::move(__first2),
+ std::move(__last2),
+ std::move(__result),
+ std::move(__comp));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+// TODO: Use the std::copy/move shenanigans to forward to std::memmove
+// Investigate whether we want to still forward to std::transform(policy)
+// in that case for the execution::par part, or whether we actually want
+// to run everything serially in that case.
+
+template <class>
+void __pstl_move();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator>
+__move(_ExecutionPolicy&& __policy,
+ _ForwardIterator&& __first,
+ _ForwardIterator&& __last,
+ _ForwardOutIterator&& __result) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_move, _RawPolicy),
+ [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _ForwardOutIterator __g_result) {
+ return std::__transform(__policy, __g_first, __g_last, __g_result, [](auto&& __v) { return std::move(__v); });
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__result));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator
+move(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "move requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "move requires an OutputIterator");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
+ _ForwardOutIterator, decltype(std::move(*__first)), "move requires an OutputIterator");
+ auto __res = std::__move(__policy, std::move(__first), std::move(__last), std::move(__result));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *__res;
+}
+
+template <class>
+void __pstl_replace_if();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Pred,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
+__replace_if(_ExecutionPolicy&& __policy,
+ _ForwardIterator&& __first,
+ _ForwardIterator&& __last,
+ _Pred&& __pred,
+ const _Tp& __new_value) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_if, _RawPolicy),
+ [&__policy](
+ _ForwardIterator&& __g_first, _ForwardIterator&& __g_last, _Pred&& __g_pred, const _Tp& __g_new_value) {
+ std::for_each(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __element) {
+ if (__g_pred(__element))
+ __element = __g_new_value;
+ });
+ return optional<__empty>{__empty{}};
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__pred),
+ __new_value);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Pred,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void
+replace_if(_ExecutionPolicy&& __policy,
+ _ForwardIterator __first,
+ _ForwardIterator __last,
+ _Pred __pred,
+ const _Tp& __new_value) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_if requires ForwardIterators");
+ auto __res = std::__replace_if(__policy, std::move(__first), std::move(__last), std::move(__pred), __new_value);
+ if (!__res)
+ std::__throw_bad_alloc();
+}
+
+template <class>
+void __pstl_replace();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
+__replace(_ExecutionPolicy&& __policy,
+ _ForwardIterator&& __first,
+ _ForwardIterator&& __last,
+ const _Tp& __old_value,
+ const _Tp& __new_value) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace, _RawPolicy),
+ [&__policy](
+ _ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_old_value, const _Tp& __g_new_value) {
+ return std::__replace_if(
+ __policy,
+ std::move(__g_first),
+ std::move(__g_last),
+ [&](__iter_reference<_ForwardIterator> __element) { return __element == __g_old_value; },
+ __g_new_value);
+ },
+ std::forward<_ForwardIterator>(__first),
+ std::forward<_ForwardIterator>(__last),
+ __old_value,
+ __new_value);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void
+replace(_ExecutionPolicy&& __policy,
+ _ForwardIterator __first,
+ _ForwardIterator __last,
+ const _Tp& __old_value,
+ const _Tp& __new_value) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace requires ForwardIterators");
+ if (!std::__replace(__policy, std::move(__first), std::move(__last), __old_value, __new_value))
+ std::__throw_bad_alloc();
+}
+
+template <class>
+void __pstl_replace_copy_if();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _Pred,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __replace_copy_if(
+ _ExecutionPolicy&& __policy,
+ _ForwardIterator&& __first,
+ _ForwardIterator&& __last,
+ _ForwardOutIterator&& __result,
+ _Pred&& __pred,
+ const _Tp& __new_value) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_copy_if, _RawPolicy),
+ [&__policy](_ForwardIterator __g_first,
+ _ForwardIterator __g_last,
+ _ForwardOutIterator __g_result,
+ _Pred __g_pred,
+ const _Tp& __g_new_value) -> optional<__empty> {
+ if (!std::__transform(
+ __policy, __g_first, __g_last, __g_result, [&](__iter_reference<_ForwardIterator> __element) {
+ return __g_pred(__element) ? __g_new_value : __element;
+ }))
+ return nullopt;
+ return __empty{};
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__result),
+ std::move(__pred),
+ __new_value);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _Pred,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void replace_copy_if(
+ _ExecutionPolicy&& __policy,
+ _ForwardIterator __first,
+ _ForwardIterator __last,
+ _ForwardOutIterator __result,
+ _Pred __pred,
+ const _Tp& __new_value) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_copy_if requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "replace_copy_if requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
+ _ForwardOutIterator, decltype(*__first), "replace_copy_if requires an OutputIterator");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, const _Tp&, "replace_copy requires an OutputIterator");
+ if (!std::__replace_copy_if(
+ __policy, std::move(__first), std::move(__last), std::move(__result), std::move(__pred), __new_value))
+ std::__throw_bad_alloc();
+}
+
+template <class>
+void __pstl_replace_copy();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __replace_copy(
+ _ExecutionPolicy&& __policy,
+ _ForwardIterator&& __first,
+ _ForwardIterator&& __last,
+ _ForwardOutIterator&& __result,
+ const _Tp& __old_value,
+ const _Tp& __new_value) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_copy, _RawPolicy),
+ [&__policy](_ForwardIterator __g_first,
+ _ForwardIterator __g_last,
+ _ForwardOutIterator __g_result,
+ const _Tp& __g_old_value,
+ const _Tp& __g_new_value) {
+ return std::__replace_copy_if(
+ __policy,
+ std::move(__g_first),
+ std::move(__g_last),
+ std::move(__g_result),
+ [&](__iter_reference<_ForwardIterator> __element) { return __element == __g_old_value; },
+ __g_new_value);
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__result),
+ __old_value,
+ __new_value);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _Tp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void replace_copy(
+ _ExecutionPolicy&& __policy,
+ _ForwardIterator __first,
+ _ForwardIterator __last,
+ _ForwardOutIterator __result,
+ const _Tp& __old_value,
+ const _Tp& __new_value) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_copy requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "replace_copy requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
+ _ForwardOutIterator, decltype(*__first), "replace_copy requires an OutputIterator");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, const _Tp&, "replace_copy requires an OutputIterator");
+ if (!std::__replace_copy(
+ __policy, std::move(__first), std::move(__last), std::move(__result), __old_value, __new_value))
+ std::__throw_bad_alloc();
+}
+
+template <class>
+void __pstl_rotate_copy();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator>
+__rotate_copy(_ExecutionPolicy&& __policy,
+ _ForwardIterator&& __first,
+ _ForwardIterator&& __middle,
+ _ForwardIterator&& __last,
+ _ForwardOutIterator&& __result) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_rotate_copy, _RawPolicy),
+ [&__policy](_ForwardIterator __g_first,
+ _ForwardIterator __g_middle,
+ _ForwardIterator __g_last,
+ _ForwardOutIterator __g_result) -> optional<_ForwardOutIterator> {
+ auto __result_mid =
+ std::__copy(__policy, _ForwardIterator(__g_middle), std::move(__g_last), std::move(__g_result));
+ if (!__result_mid)
+ return nullopt;
+ return std::__copy(__policy, std::move(__g_first), std::move(__g_middle), *std::move(__result_mid));
+ },
+ std::move(__first),
+ std::move(__middle),
+ std::move(__last),
+ std::move(__result));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _ForwardOutIterator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator rotate_copy(
+ _ExecutionPolicy&& __policy,
+ _ForwardIterator __first,
+ _ForwardIterator __middle,
+ _ForwardIterator __last,
+ _ForwardOutIterator __result) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "rotate_copy requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "rotate_copy requires ForwardIterators");
+ _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
+ _ForwardOutIterator, decltype(*__first), "rotate_copy requires an OutputIterator");
+ auto __res =
+ std::__rotate_copy(__policy, std::move(__first), std::move(__middle), std::move(__last), std::move(__result));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *__res;
+}
+
+template <class _ExecutionPolicy,
+ class _RandomAccessIterator,
+ class _Comp = less<>,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __stable_sort(
+ _ExecutionPolicy&&, _RandomAccessIterator&& __first, _RandomAccessIterator&& __last, _Comp&& __comp = {}) noexcept {
+ using _Backend = typename __select_backend<_RawPolicy>::type;
+ return std::__pstl_stable_sort<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__comp));
+}
+
+template <class _ExecutionPolicy,
+ class _RandomAccessIterator,
+ class _Comp = less<>,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void stable_sort(
+ _ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp = {}) {
+ _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "stable_sort requires RandomAccessIterators");
+ if (!std::__stable_sort(__policy, std::move(__first), std::move(__last), std::move(__comp)))
+ std::__throw_bad_alloc();
+}
+
+template <class>
+void __pstl_sort();
+
+template <class _ExecutionPolicy,
+ class _RandomAccessIterator,
+ class _Comp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
+__sort(_ExecutionPolicy&& __policy,
+ _RandomAccessIterator&& __first,
+ _RandomAccessIterator&& __last,
+ _Comp&& __comp) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_sort, _RawPolicy),
+ [&__policy](_RandomAccessIterator __g_first, _RandomAccessIterator __g_last, _Comp __g_comp) {
+ std::stable_sort(__policy, std::move(__g_first), std::move(__g_last), std::move(__g_comp));
+ return optional<__empty>{__empty{}};
+ },
+ std::forward<_RandomAccessIterator>(__first),
+ std::forward<_RandomAccessIterator>(__last),
+ std::forward<_Comp>(__comp));
+}
+
+template <class _ExecutionPolicy,
+ class _RandomAccessIterator,
+ class _Comp,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void
+sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) {
+ _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "sort requires RandomAccessIterators");
+ if (!std::__sort(__policy, std::move(__first), std::move(__last), std::move(__comp)))
+ std::__throw_bad_alloc();
+}
+
+template <class _ExecutionPolicy,
+ class _RandomAccessIterator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI void
+sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last) {
+ _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "sort requires RandomAccessIterators");
+ if (!std::__sort(__policy, std::move(__first), std::move(__last), less{}))
+ std::__throw_bad_alloc();
+}
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
+
+_LIBCPP_POP_MACROS
+
+#endif // _LIBCPP___ALGORITHM_PSTL_H
diff --git a/libcxx/include/__algorithm/pstl_any_all_none_of.h b/libcxx/include/__algorithm/pstl_any_all_none_of.h
deleted file mode 100644
index e27463dab8a3..000000000000
--- a/libcxx/include/__algorithm/pstl_any_all_none_of.h
+++ /dev/null
@@ -1,152 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_ANY_ALL_NONE_OF_H
-#define _LIBCPP___ALGORITHM_PSTL_ANY_ALL_NONE_OF_H
-
-#include <__algorithm/pstl_find.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__config>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class>
-void __pstl_any_of(); // declaration needed for the frontend dispatch below
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Predicate,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool> __any_of(
- _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_any_of, _RawPolicy),
- [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) -> optional<bool> {
- auto __res = std::__find_if(__policy, __g_first, __g_last, __g_pred);
- if (!__res)
- return nullopt;
- return *__res != __g_last;
- },
- std::move(__first),
- std::move(__last),
- std::move(__pred));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Predicate,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool
-any_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "any_of requires a ForwardIterator");
- auto __res = std::__any_of(__policy, std::move(__first), std::move(__last), std::move(__pred));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-template <class>
-void __pstl_all_of(); // declaration needed for the frontend dispatch below
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Pred,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool>
-__all_of(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Pred&& __pred) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_all_of, _RawPolicy),
- [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Pred __g_pred) -> optional<bool> {
- auto __res = std::__any_of(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __value) {
- return !__g_pred(__value);
- });
- if (!__res)
- return nullopt;
- return !*__res;
- },
- std::move(__first),
- std::move(__last),
- std::move(__pred));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Pred,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool
-all_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "all_of requires a ForwardIterator");
- auto __res = std::__all_of(__policy, std::move(__first), std::move(__last), std::move(__pred));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-template <class>
-void __pstl_none_of(); // declaration needed for the frontend dispatch below
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Pred,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool>
-__none_of(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Pred&& __pred) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_none_of, _RawPolicy),
- [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Pred __g_pred) -> optional<bool> {
- auto __res = std::__any_of(__policy, __g_first, __g_last, __g_pred);
- if (!__res)
- return nullopt;
- return !*__res;
- },
- std::move(__first),
- std::move(__last),
- std::move(__pred));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Pred,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool
-none_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "none_of requires a ForwardIterator");
- auto __res = std::__none_of(__policy, std::move(__first), std::move(__last), std::move(__pred));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_ANY_ALL_NONE_OF_H
diff --git a/libcxx/include/__algorithm/pstl_copy.h b/libcxx/include/__algorithm/pstl_copy.h
deleted file mode 100644
index 0fcea33c3919..000000000000
--- a/libcxx/include/__algorithm/pstl_copy.h
+++ /dev/null
@@ -1,134 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_COPY_H
-#define _LIBCPP___ALGORITHM_PSTL_COPY_H
-
-#include <__algorithm/copy_n.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__algorithm/pstl_transform.h>
-#include <__config>
-#include <__functional/identity.h>
-#include <__iterator/concepts.h>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_constant_evaluated.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/is_trivially_copyable.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-// TODO: Use the std::copy/move shenanigans to forward to std::memmove
-
-template <class>
-void __pstl_copy();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator>
-__copy(_ExecutionPolicy&& __policy,
- _ForwardIterator&& __first,
- _ForwardIterator&& __last,
- _ForwardOutIterator&& __result) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_copy, _RawPolicy),
- [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _ForwardOutIterator __g_result) {
- return std::__transform(__policy, __g_first, __g_last, __g_result, __identity());
- },
- std::move(__first),
- std::move(__last),
- std::move(__result));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator
-copy(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
- _ForwardIterator, "copy(first, last, result) requires [first, last) to be ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
- _ForwardOutIterator, "copy(first, last, result) requires result to be a ForwardIterator");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
- _ForwardOutIterator, decltype(*__first), "copy(first, last, result) requires result to be an OutputIterator");
- auto __res = std::__copy(__policy, std::move(__first), std::move(__last), std::move(__result));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-template <class>
-void __pstl_copy_n();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _Size,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> __copy_n(
- _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __n, _ForwardOutIterator&& __result) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_copy_n, _RawPolicy),
- [&__policy](
- _ForwardIterator __g_first, _Size __g_n, _ForwardOutIterator __g_result) -> optional<_ForwardIterator> {
- if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value)
- return std::__copy(__policy, std::move(__g_first), std::move(__g_first + __g_n), std::move(__g_result));
- else
- return std::copy_n(__g_first, __g_n, __g_result);
- },
- std::move(__first),
- std::move(__n),
- std::move(__result));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _Size,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator
-copy_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _ForwardOutIterator __result) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
- _ForwardIterator, "copy_n(first, n, result) requires first to be a ForwardIterator");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
- _ForwardOutIterator, "copy_n(first, n, result) requires result to be a ForwardIterator");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
- _ForwardOutIterator, decltype(*__first), "copy_n(first, n, result) requires result to be an OutputIterator");
- auto __res = std::__copy_n(__policy, std::move(__first), std::move(__n), std::move(__result));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_COPY_H
diff --git a/libcxx/include/__algorithm/pstl_count.h b/libcxx/include/__algorithm/pstl_count.h
deleted file mode 100644
index 64c84d855e4f..000000000000
--- a/libcxx/include/__algorithm/pstl_count.h
+++ /dev/null
@@ -1,126 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_COUNT_H
-#define _LIBCPP___ALGORITHM_PSTL_COUNT_H
-
-#include <__algorithm/count.h>
-#include <__algorithm/for_each.h>
-#include <__algorithm/pstl_for_each.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__atomic/atomic.h>
-#include <__config>
-#include <__functional/operations.h>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__iterator/iterator_traits.h>
-#include <__numeric/pstl_transform_reduce.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class>
-void __pstl_count_if(); // declaration needed for the frontend dispatch below
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Predicate,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__iter_diff_t<_ForwardIterator>> __count_if(
- _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept {
- using __diff_t = __iter_diff_t<_ForwardIterator>;
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_count_if, _RawPolicy),
- [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) -> optional<__diff_t> {
- return std::__transform_reduce(
- __policy,
- std::move(__g_first),
- std::move(__g_last),
- __diff_t(),
- std::plus{},
- [&](__iter_reference<_ForwardIterator> __element) -> bool { return __g_pred(__element); });
- },
- std::move(__first),
- std::move(__last),
- std::move(__pred));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Predicate,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator>
-count_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
- _ForwardIterator, "count_if(first, last, pred) requires [first, last) to be ForwardIterators");
- auto __res = std::__count_if(__policy, std::move(__first), std::move(__last), std::move(__pred));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-template <class>
-void __pstl_count(); // declaration needed for the frontend dispatch below
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__iter_diff_t<_ForwardIterator>>
-__count(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_count, _RawPolicy),
- [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value)
- -> optional<__iter_diff_t<_ForwardIterator>> {
- return std::count_if(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __v) {
- return __v == __g_value;
- });
- },
- std::move(__first),
- std::move(__last),
- __value);
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator>
-count(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(
- _ForwardIterator, "count(first, last, val) requires [first, last) to be ForwardIterators");
- auto __res = std::__count(__policy, std::move(__first), std::move(__last), __value);
- if (!__res)
- std::__throw_bad_alloc();
- return *__res;
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_COUNT_H
diff --git a/libcxx/include/__algorithm/pstl_equal.h b/libcxx/include/__algorithm/pstl_equal.h
deleted file mode 100644
index 0b38197d7f63..000000000000
--- a/libcxx/include/__algorithm/pstl_equal.h
+++ /dev/null
@@ -1,184 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_EQUAL_H
-#define _LIBCPP___ALGORITHM_PSTL_EQUAL_H
-
-#include <__algorithm/equal.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__config>
-#include <__functional/operations.h>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__iterator/iterator_traits.h>
-#include <__numeric/pstl_transform_reduce.h>
-#include <__utility/move.h>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class>
-void __pstl_equal();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator1,
- class _ForwardIterator2,
- class _Pred,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool>
-__equal(_ExecutionPolicy&& __policy,
- _ForwardIterator1&& __first1,
- _ForwardIterator1&& __last1,
- _ForwardIterator2&& __first2,
- _Pred&& __pred) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_equal, _RawPolicy),
- [&__policy](
- _ForwardIterator1 __g_first1, _ForwardIterator1 __g_last1, _ForwardIterator2 __g_first2, _Pred __g_pred) {
- return std::__transform_reduce(
- __policy,
- std::move(__g_first1),
- std::move(__g_last1),
- std::move(__g_first2),
- true,
- std::logical_and{},
- std::move(__g_pred));
- },
- std::move(__first1),
- std::move(__last1),
- std::move(__first2),
- std::move(__pred));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator1,
- class _ForwardIterator2,
- class _Pred,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI bool
-equal(_ExecutionPolicy&& __policy,
- _ForwardIterator1 __first1,
- _ForwardIterator1 __last1,
- _ForwardIterator2 __first2,
- _Pred __pred) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators");
- auto __res = std::__equal(__policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__pred));
- if (!__res)
- std::__throw_bad_alloc();
- return *__res;
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator1,
- class _ForwardIterator2,
- enable_if_t<is_execution_policy_v<__remove_cvref_t<_ExecutionPolicy>>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI bool
-equal(_ExecutionPolicy&& __policy, _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators");
- return std::equal(__policy, std::move(__first1), std::move(__last1), std::move(__first2), std::equal_to{});
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator1,
- class _ForwardIterator2,
- class _Pred,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool>
-__equal(_ExecutionPolicy&& __policy,
- _ForwardIterator1&& __first1,
- _ForwardIterator1&& __last1,
- _ForwardIterator2&& __first2,
- _ForwardIterator2&& __last2,
- _Pred&& __pred) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_equal, _RawPolicy),
- [&__policy](_ForwardIterator1 __g_first1,
- _ForwardIterator1 __g_last1,
- _ForwardIterator2 __g_first2,
- _ForwardIterator2 __g_last2,
- _Pred __g_pred) -> optional<bool> {
- if constexpr (__has_random_access_iterator_category<_ForwardIterator1>::value &&
- __has_random_access_iterator_category<_ForwardIterator2>::value) {
- if (__g_last1 - __g_first1 != __g_last2 - __g_first2)
- return false;
- return std::__equal(
- __policy, std::move(__g_first1), std::move(__g_last1), std::move(__g_first2), std::move(__g_pred));
- } else {
- (void)__policy; // Avoid unused lambda capture warning
- return std::equal(
- std::move(__g_first1),
- std::move(__g_last1),
- std::move(__g_first2),
- std::move(__g_last2),
- std::move(__g_pred));
- }
- },
- std::move(__first1),
- std::move(__last1),
- std::move(__first2),
- std::move(__last2),
- std::move(__pred));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator1,
- class _ForwardIterator2,
- class _Pred,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI bool
-equal(_ExecutionPolicy&& __policy,
- _ForwardIterator1 __first1,
- _ForwardIterator1 __last1,
- _ForwardIterator2 __first2,
- _ForwardIterator2 __last2,
- _Pred __pred) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators");
- auto __res = std::__equal(
- __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), std::move(__pred));
- if (!__res)
- std::__throw_bad_alloc();
- return *__res;
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator1,
- class _ForwardIterator2,
- enable_if_t<is_execution_policy_v<__remove_cvref_t<_ExecutionPolicy>>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI bool
-equal(_ExecutionPolicy&& __policy,
- _ForwardIterator1 __first1,
- _ForwardIterator1 __last1,
- _ForwardIterator2 __first2,
- _ForwardIterator2 __last2) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators");
- return std::equal(
- __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), std::equal_to{});
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_EQUAL_H
diff --git a/libcxx/include/__algorithm/pstl_fill.h b/libcxx/include/__algorithm/pstl_fill.h
deleted file mode 100644
index fd248506bc4b..000000000000
--- a/libcxx/include/__algorithm/pstl_fill.h
+++ /dev/null
@@ -1,114 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_FILL_H
-#define _LIBCPP___ALGORITHM_PSTL_FILL_H
-
-#include <__algorithm/fill_n.h>
-#include <__algorithm/pstl_for_each.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__config>
-#include <__iterator/concepts.h>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__iterator/iterator_traits.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class>
-void __pstl_fill(); // declaration needed for the frontend dispatch below
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI optional<__empty>
-__fill(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_fill, _RawPolicy),
- [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value) {
- return std::__for_each(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __element) {
- __element = __g_value;
- });
- },
- std::move(__first),
- std::move(__last),
- __value);
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void
-fill(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "fill requires ForwardIterators");
- if (!std::__fill(__policy, std::move(__first), std::move(__last), __value))
- std::__throw_bad_alloc();
-}
-
-template <class>
-void __pstl_fill_n(); // declaration needed for the frontend dispatch below
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _SizeT,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
-__fill_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _SizeT&& __n, const _Tp& __value) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_fill_n, _RawPolicy),
- [&](_ForwardIterator __g_first, _SizeT __g_n, const _Tp& __g_value) {
- if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value)
- std::fill(__policy, __g_first, __g_first + __g_n, __g_value);
- else
- std::fill_n(__g_first, __g_n, __g_value);
- return optional<__empty>{__empty{}};
- },
- std::move(__first),
- std::move(__n),
- __value);
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _SizeT,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void
-fill_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _SizeT __n, const _Tp& __value) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "fill_n requires ForwardIterators");
- if (!std::__fill_n(__policy, std::move(__first), std::move(__n), __value))
- std::__throw_bad_alloc();
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_FILL_H
diff --git a/libcxx/include/__algorithm/pstl_find.h b/libcxx/include/__algorithm/pstl_find.h
deleted file mode 100644
index b4c4dfb2ffb6..000000000000
--- a/libcxx/include/__algorithm/pstl_find.h
+++ /dev/null
@@ -1,141 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_FIND_H
-#define _LIBCPP___ALGORITHM_PSTL_FIND_H
-
-#include <__algorithm/comp.h>
-#include <__algorithm/find.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__config>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Predicate,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>>
-__find_if(_ExecutionPolicy&&, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept {
- using _Backend = typename __select_backend<_RawPolicy>::type;
- return std::__pstl_find_if<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__pred));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Predicate,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _ForwardIterator
-find_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find_if requires ForwardIterators");
- auto __res = std::__find_if(__policy, std::move(__first), std::move(__last), std::move(__pred));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-template <class>
-void __pstl_find_if_not();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Predicate,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>>
-__find_if_not(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_find_if_not, _RawPolicy),
- [&](_ForwardIterator&& __g_first, _ForwardIterator&& __g_last, _Predicate&& __g_pred)
- -> optional<__remove_cvref_t<_ForwardIterator>> {
- return std::__find_if(
- __policy, __g_first, __g_last, [&](__iter_reference<__remove_cvref_t<_ForwardIterator>> __value) {
- return !__g_pred(__value);
- });
- },
- std::move(__first),
- std::move(__last),
- std::move(__pred));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Predicate,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _ForwardIterator
-find_if_not(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find_if_not requires ForwardIterators");
- auto __res = std::__find_if_not(__policy, std::move(__first), std::move(__last), std::move(__pred));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-template <class>
-void __pstl_find();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>>
-__find(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_find, _RawPolicy),
- [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value) -> optional<_ForwardIterator> {
- return std::find_if(
- __policy, __g_first, __g_last, [&](__iter_reference<__remove_cvref_t<_ForwardIterator>> __element) {
- return __element == __g_value;
- });
- },
- std::move(__first),
- std::move(__last),
- __value);
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _ForwardIterator
-find(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find requires ForwardIterators");
- auto __res = std::__find(__policy, std::move(__first), std::move(__last), __value);
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_FIND_H
diff --git a/libcxx/include/__algorithm/pstl_for_each.h b/libcxx/include/__algorithm/pstl_for_each.h
deleted file mode 100644
index a99eb6d97fd2..000000000000
--- a/libcxx/include/__algorithm/pstl_for_each.h
+++ /dev/null
@@ -1,108 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_FOR_EACH_H
-#define _LIBCPP___ALGORITHM_PSTL_FOR_EACH_H
-
-#include <__algorithm/for_each.h>
-#include <__algorithm/for_each_n.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__config>
-#include <__iterator/concepts.h>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/remove_cvref.h>
-#include <__type_traits/void_t.h>
-#include <__utility/empty.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Function,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
-__for_each(_ExecutionPolicy&&, _ForwardIterator&& __first, _ForwardIterator&& __last, _Function&& __func) noexcept {
- using _Backend = typename __select_backend<_RawPolicy>::type;
- return std::__pstl_for_each<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__func));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Function,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void
-for_each(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Function __func) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "for_each requires ForwardIterators");
- if (!std::__for_each(__policy, std::move(__first), std::move(__last), std::move(__func)))
- std::__throw_bad_alloc();
-}
-
-template <class>
-void __pstl_for_each_n(); // declaration needed for the frontend dispatch below
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Size,
- class _Function,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
-__for_each_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __size, _Function&& __func) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_for_each_n, _RawPolicy),
- [&](_ForwardIterator __g_first, _Size __g_size, _Function __g_func) -> optional<__empty> {
- if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value) {
- std::for_each(__policy, std::move(__g_first), __g_first + __g_size, std::move(__g_func));
- return __empty{};
- } else {
- std::for_each_n(std::move(__g_first), __g_size, std::move(__g_func));
- return __empty{};
- }
- },
- std::move(__first),
- std::move(__size),
- std::move(__func));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Size,
- class _Function,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void
-for_each_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __size, _Function __func) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "for_each_n requires a ForwardIterator");
- auto __res = std::__for_each_n(__policy, std::move(__first), std::move(__size), std::move(__func));
- if (!__res)
- std::__throw_bad_alloc();
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_FOR_EACH_H
diff --git a/libcxx/include/__algorithm/pstl_generate.h b/libcxx/include/__algorithm/pstl_generate.h
deleted file mode 100644
index 350c0e4798be..000000000000
--- a/libcxx/include/__algorithm/pstl_generate.h
+++ /dev/null
@@ -1,113 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_GENERATE_H
-#define _LIBCPP___ALGORITHM_PSTL_GENERATE_H
-
-#include <__algorithm/pstl_for_each.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__config>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__iterator/iterator_traits.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class>
-void __pstl_generate();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Generator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
-__generate(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Generator&& __gen) {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_generate, _RawPolicy),
- [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _Generator __g_gen) {
- return std::__for_each(
- __policy, std::move(__g_first), std::move(__g_last), [&](__iter_reference<_ForwardIterator> __element) {
- __element = __g_gen();
- });
- },
- std::move(__first),
- std::move(__last),
- std::move(__gen));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Generator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void
-generate(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Generator __gen) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "generate requires ForwardIterators");
- if (!std::__generate(__policy, std::move(__first), std::move(__last), std::move(__gen)))
- std::__throw_bad_alloc();
-}
-
-template <class>
-void __pstl_generate_n();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Size,
- class _Generator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
-__generate_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __n, _Generator&& __gen) {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_generate_n, _RawPolicy),
- [&__policy](_ForwardIterator __g_first, _Size __g_n, _Generator __g_gen) {
- return std::__for_each_n(
- __policy, std::move(__g_first), std::move(__g_n), [&](__iter_reference<_ForwardIterator> __element) {
- __element = __g_gen();
- });
- },
- std::move(__first),
- __n,
- std::move(__gen));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Size,
- class _Generator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void
-generate_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _Generator __gen) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "generate_n requires a ForwardIterator");
- if (!std::__generate_n(__policy, std::move(__first), std::move(__n), std::move(__gen)))
- std::__throw_bad_alloc();
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_GENERATE_H
diff --git a/libcxx/include/__algorithm/pstl_is_partitioned.h b/libcxx/include/__algorithm/pstl_is_partitioned.h
deleted file mode 100644
index 2dd5cf3ca2a2..000000000000
--- a/libcxx/include/__algorithm/pstl_is_partitioned.h
+++ /dev/null
@@ -1,79 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_IS_PARITTIONED
-#define _LIBCPP___ALGORITHM_PSTL_IS_PARITTIONED
-
-#include <__algorithm/pstl_any_all_none_of.h>
-#include <__algorithm/pstl_find.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__config>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class>
-void __pstl_is_partitioned();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Predicate,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<bool> __is_partitioned(
- _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_is_partitioned, _RawPolicy),
- [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) {
- __g_first = std::find_if_not(__policy, __g_first, __g_last, __g_pred);
- if (__g_first == __g_last)
- return true;
- ++__g_first;
- return std::none_of(__policy, __g_first, __g_last, __g_pred);
- },
- std::move(__first),
- std::move(__last),
- std::move(__pred));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Predicate,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI bool
-is_partitioned(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "is_partitioned requires ForwardIterators");
- auto __res = std::__is_partitioned(__policy, std::move(__first), std::move(__last), std::move(__pred));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_IS_PARITTIONED
diff --git a/libcxx/include/__algorithm/pstl_merge.h b/libcxx/include/__algorithm/pstl_merge.h
deleted file mode 100644
index 87f634a67f58..000000000000
--- a/libcxx/include/__algorithm/pstl_merge.h
+++ /dev/null
@@ -1,97 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_MERGE_H
-#define _LIBCPP___ALGORITHM_PSTL_MERGE_H
-
-#include <__config>
-#include <__functional/operations.h>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class _ExecutionPolicy,
- class _ForwardIterator1,
- class _ForwardIterator2,
- class _ForwardOutIterator,
- class _Comp = std::less<>,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator>
-__merge(_ExecutionPolicy&&,
- _ForwardIterator1 __first1,
- _ForwardIterator1 __last1,
- _ForwardIterator2 __first2,
- _ForwardIterator2 __last2,
- _ForwardOutIterator __result,
- _Comp __comp = {}) noexcept {
- using _Backend = typename __select_backend<_RawPolicy>::type;
- return std::__pstl_merge<_RawPolicy>(
- _Backend{},
- std::move(__first1),
- std::move(__last1),
- std::move(__first2),
- std::move(__last2),
- std::move(__result),
- std::move(__comp));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator1,
- class _ForwardIterator2,
- class _ForwardOutIterator,
- class _Comp = std::less<>,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator
-merge(_ExecutionPolicy&& __policy,
- _ForwardIterator1 __first1,
- _ForwardIterator1 __last1,
- _ForwardIterator2 __first2,
- _ForwardIterator2 __last2,
- _ForwardOutIterator __result,
- _Comp __comp = {}) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "merge requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "merge requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first1), "merge requires an OutputIterator");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first2), "merge requires an OutputIterator");
- auto __res = std::__merge(
- __policy,
- std::move(__first1),
- std::move(__last1),
- std::move(__first2),
- std::move(__last2),
- std::move(__result),
- std::move(__comp));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_MERGE_H
diff --git a/libcxx/include/__algorithm/pstl_move.h b/libcxx/include/__algorithm/pstl_move.h
deleted file mode 100644
index 3155ddedf91b..000000000000
--- a/libcxx/include/__algorithm/pstl_move.h
+++ /dev/null
@@ -1,89 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_MOVE_H
-#define _LIBCPP___ALGORITHM_PSTL_MOVE_H
-
-#include <__algorithm/copy_n.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__algorithm/pstl_transform.h>
-#include <__config>
-#include <__functional/identity.h>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__iterator/iterator_traits.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_constant_evaluated.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/is_trivially_copyable.h>
-#include <__type_traits/remove_cvref.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-// TODO: Use the std::copy/move shenanigans to forward to std::memmove
-// Investigate whether we want to still forward to std::transform(policy)
-// in that case for the execution::par part, or whether we actually want
-// to run everything serially in that case.
-
-template <class>
-void __pstl_move();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator>
-__move(_ExecutionPolicy&& __policy,
- _ForwardIterator&& __first,
- _ForwardIterator&& __last,
- _ForwardOutIterator&& __result) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_move, _RawPolicy),
- [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _ForwardOutIterator __g_result) {
- return std::__transform(__policy, __g_first, __g_last, __g_result, [](auto&& __v) { return std::move(__v); });
- },
- std::move(__first),
- std::move(__last),
- std::move(__result));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator
-move(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "move requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "move requires an OutputIterator");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
- _ForwardOutIterator, decltype(std::move(*__first)), "move requires an OutputIterator");
- auto __res = std::__move(__policy, std::move(__first), std::move(__last), std::move(__result));
- if (!__res)
- std::__throw_bad_alloc();
- return *__res;
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_MOVE_H
diff --git a/libcxx/include/__algorithm/pstl_replace.h b/libcxx/include/__algorithm/pstl_replace.h
deleted file mode 100644
index b2ded54dfe25..000000000000
--- a/libcxx/include/__algorithm/pstl_replace.h
+++ /dev/null
@@ -1,260 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_REPLACE_H
-#define _LIBCPP___ALGORITHM_PSTL_REPLACE_H
-
-#include <__algorithm/pstl_for_each.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__algorithm/pstl_transform.h>
-#include <__config>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__iterator/iterator_traits.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class>
-void __pstl_replace_if();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Pred,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
-__replace_if(_ExecutionPolicy&& __policy,
- _ForwardIterator&& __first,
- _ForwardIterator&& __last,
- _Pred&& __pred,
- const _Tp& __new_value) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_if, _RawPolicy),
- [&__policy](
- _ForwardIterator&& __g_first, _ForwardIterator&& __g_last, _Pred&& __g_pred, const _Tp& __g_new_value) {
- std::for_each(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __element) {
- if (__g_pred(__element))
- __element = __g_new_value;
- });
- return optional<__empty>{__empty{}};
- },
- std::move(__first),
- std::move(__last),
- std::move(__pred),
- __new_value);
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Pred,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void
-replace_if(_ExecutionPolicy&& __policy,
- _ForwardIterator __first,
- _ForwardIterator __last,
- _Pred __pred,
- const _Tp& __new_value) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_if requires ForwardIterators");
- auto __res = std::__replace_if(__policy, std::move(__first), std::move(__last), std::move(__pred), __new_value);
- if (!__res)
- std::__throw_bad_alloc();
-}
-
-template <class>
-void __pstl_replace();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty>
-__replace(_ExecutionPolicy&& __policy,
- _ForwardIterator __first,
- _ForwardIterator __last,
- const _Tp& __old_value,
- const _Tp& __new_value) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace, _RawPolicy),
- [&__policy](
- _ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_old_value, const _Tp& __g_new_value) {
- return std::__replace_if(
- __policy,
- std::move(__g_first),
- std::move(__g_last),
- [&](__iter_reference<_ForwardIterator> __element) { return __element == __g_old_value; },
- __g_new_value);
- },
- std::move(__first),
- std::move(__last),
- __old_value,
- __new_value);
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void
-replace(_ExecutionPolicy&& __policy,
- _ForwardIterator __first,
- _ForwardIterator __last,
- const _Tp& __old_value,
- const _Tp& __new_value) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace requires ForwardIterators");
- if (!std::__replace(__policy, std::move(__first), std::move(__last), __old_value, __new_value))
- std::__throw_bad_alloc();
-}
-
-template <class>
-void __pstl_replace_copy_if();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _Pred,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __replace_copy_if(
- _ExecutionPolicy&& __policy,
- _ForwardIterator&& __first,
- _ForwardIterator&& __last,
- _ForwardOutIterator&& __result,
- _Pred&& __pred,
- const _Tp& __new_value) {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_copy_if, _RawPolicy),
- [&__policy](_ForwardIterator __g_first,
- _ForwardIterator __g_last,
- _ForwardOutIterator __g_result,
- _Pred __g_pred,
- const _Tp& __g_new_value) -> optional<__empty> {
- if (!std::__transform(
- __policy, __g_first, __g_last, __g_result, [&](__iter_reference<_ForwardIterator> __element) {
- return __g_pred(__element) ? __g_new_value : __element;
- }))
- return nullopt;
- return __empty{};
- },
- std::move(__first),
- std::move(__last),
- std::move(__result),
- std::move(__pred),
- __new_value);
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _Pred,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void replace_copy_if(
- _ExecutionPolicy&& __policy,
- _ForwardIterator __first,
- _ForwardIterator __last,
- _ForwardOutIterator __result,
- _Pred __pred,
- const _Tp& __new_value) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_copy_if requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "replace_copy_if requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
- _ForwardOutIterator, decltype(*__first), "replace_copy_if requires an OutputIterator");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, const _Tp&, "replace_copy requires an OutputIterator");
- if (!std::__replace_copy_if(
- __policy, std::move(__first), std::move(__last), std::move(__result), std::move(__pred), __new_value))
- std::__throw_bad_alloc();
-}
-
-template <class>
-void __pstl_replace_copy();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __replace_copy(
- _ExecutionPolicy&& __policy,
- _ForwardIterator&& __first,
- _ForwardIterator&& __last,
- _ForwardOutIterator&& __result,
- const _Tp& __old_value,
- const _Tp& __new_value) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_copy, _RawPolicy),
- [&__policy](_ForwardIterator __g_first,
- _ForwardIterator __g_last,
- _ForwardOutIterator __g_result,
- const _Tp& __g_old_value,
- const _Tp& __g_new_value) {
- return std::__replace_copy_if(
- __policy,
- std::move(__g_first),
- std::move(__g_last),
- std::move(__g_result),
- [&](__iter_reference<_ForwardIterator> __element) { return __element == __g_old_value; },
- __g_new_value);
- },
- std::move(__first),
- std::move(__last),
- std::move(__result),
- __old_value,
- __new_value);
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _Tp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void replace_copy(
- _ExecutionPolicy&& __policy,
- _ForwardIterator __first,
- _ForwardIterator __last,
- _ForwardOutIterator __result,
- const _Tp& __old_value,
- const _Tp& __new_value) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_copy requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "replace_copy requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
- _ForwardOutIterator, decltype(*__first), "replace_copy requires an OutputIterator");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, const _Tp&, "replace_copy requires an OutputIterator");
- if (!std::__replace_copy(
- __policy, std::move(__first), std::move(__last), std::move(__result), __old_value, __new_value))
- std::__throw_bad_alloc();
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_REPLACE_H
diff --git a/libcxx/include/__algorithm/pstl_rotate_copy.h b/libcxx/include/__algorithm/pstl_rotate_copy.h
deleted file mode 100644
index 1a32b710877c..000000000000
--- a/libcxx/include/__algorithm/pstl_rotate_copy.h
+++ /dev/null
@@ -1,90 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_ROTATE_COPY_H
-#define _LIBCPP___ALGORITHM_PSTL_ROTATE_COPY_H
-
-#include <__algorithm/pstl_copy.h>
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/is_execution_policy.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class>
-void __pstl_rotate_copy();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator>
-__rotate_copy(_ExecutionPolicy&& __policy,
- _ForwardIterator&& __first,
- _ForwardIterator&& __middle,
- _ForwardIterator&& __last,
- _ForwardOutIterator&& __result) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_rotate_copy, _RawPolicy),
- [&__policy](_ForwardIterator __g_first,
- _ForwardIterator __g_middle,
- _ForwardIterator __g_last,
- _ForwardOutIterator __g_result) -> optional<_ForwardOutIterator> {
- auto __result_mid =
- std::__copy(__policy, _ForwardIterator(__g_middle), std::move(__g_last), std::move(__g_result));
- if (!__result_mid)
- return nullopt;
- return std::__copy(__policy, std::move(__g_first), std::move(__g_middle), *std::move(__result_mid));
- },
- std::move(__first),
- std::move(__middle),
- std::move(__last),
- std::move(__result));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator rotate_copy(
- _ExecutionPolicy&& __policy,
- _ForwardIterator __first,
- _ForwardIterator __middle,
- _ForwardIterator __last,
- _ForwardOutIterator __result) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "rotate_copy requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "rotate_copy requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
- _ForwardOutIterator, decltype(*__first), "rotate_copy requires an OutputIterator");
- auto __res =
- std::__rotate_copy(__policy, std::move(__first), std::move(__middle), std::move(__last), std::move(__result));
- if (!__res)
- std::__throw_bad_alloc();
- return *__res;
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_ROTATE_COPY_H
diff --git a/libcxx/include/__algorithm/pstl_sort.h b/libcxx/include/__algorithm/pstl_sort.h
deleted file mode 100644
index 769dd81af77e..000000000000
--- a/libcxx/include/__algorithm/pstl_sort.h
+++ /dev/null
@@ -1,85 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_SORT_H
-#define _LIBCPP___ALGORITHM_PSTL_SORT_H
-
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__algorithm/pstl_stable_sort.h>
-#include <__config>
-#include <__functional/operations.h>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/empty.h>
-#include <__utility/forward.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class>
-void __pstl_sort();
-
-template <class _ExecutionPolicy,
- class _RandomAccessIterator,
- class _Comp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __sort(
- _ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_sort, _RawPolicy),
- [&__policy](_RandomAccessIterator __g_first, _RandomAccessIterator __g_last, _Comp __g_comp) {
- std::stable_sort(__policy, std::move(__g_first), std::move(__g_last), std::move(__g_comp));
- return optional<__empty>{__empty{}};
- },
- std::move(__first),
- std::move(__last),
- std::move(__comp));
-}
-
-template <class _ExecutionPolicy,
- class _RandomAccessIterator,
- class _Comp,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void
-sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) {
- _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "sort requires RandomAccessIterators");
- if (!std::__sort(__policy, std::move(__first), std::move(__last), std::move(__comp)))
- std::__throw_bad_alloc();
-}
-
-template <class _ExecutionPolicy,
- class _RandomAccessIterator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void
-sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last) {
- _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "sort requires RandomAccessIterators");
- std::sort(std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), less{});
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_SORT_H
diff --git a/libcxx/include/__algorithm/pstl_stable_sort.h b/libcxx/include/__algorithm/pstl_stable_sort.h
deleted file mode 100644
index f5e0dd40f72b..000000000000
--- a/libcxx/include/__algorithm/pstl_stable_sort.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_STABLE_SORT_H
-#define _LIBCPP___ALGORITHM_PSTL_STABLE_SORT_H
-
-#include <__config>
-#include <__functional/operations.h>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/empty.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class _ExecutionPolicy,
- class _RandomAccessIterator,
- class _Comp = less<>,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __stable_sort(
- _ExecutionPolicy&&, _RandomAccessIterator&& __first, _RandomAccessIterator&& __last, _Comp&& __comp = {}) noexcept {
- using _Backend = typename __select_backend<_RawPolicy>::type;
- return std::__pstl_stable_sort<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__comp));
-}
-
-template <class _ExecutionPolicy,
- class _RandomAccessIterator,
- class _Comp = less<>,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI void stable_sort(
- _ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp = {}) {
- _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "stable_sort requires RandomAccessIterators");
- if (!std::__stable_sort(__policy, std::move(__first), std::move(__last), std::move(__comp)))
- std::__throw_bad_alloc();
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_STABLE_SORT_H
diff --git a/libcxx/include/__algorithm/pstl_transform.h b/libcxx/include/__algorithm/pstl_transform.h
deleted file mode 100644
index 80e1d6b496f2..000000000000
--- a/libcxx/include/__algorithm/pstl_transform.h
+++ /dev/null
@@ -1,122 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___ALGORITHM_PSTL_TRANSFORM_H
-#define _LIBCPP___ALGORITHM_PSTL_TRANSFORM_H
-
-#include <__config>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__pstl/configuration.h>
-#include <__type_traits/enable_if.h>
-#include <__type_traits/is_execution_policy.h>
-#include <__type_traits/remove_cvref.h>
-#include <__utility/move.h>
-#include <optional>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _UnaryOperation,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardOutIterator>>
-__transform(_ExecutionPolicy&&,
- _ForwardIterator&& __first,
- _ForwardIterator&& __last,
- _ForwardOutIterator&& __result,
- _UnaryOperation&& __op) noexcept {
- using _Backend = typename __select_backend<_RawPolicy>::type;
- return std::__pstl_transform<_RawPolicy>(
- _Backend{}, std::move(__first), std::move(__last), std::move(__result), std::move(__op));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _ForwardOutIterator,
- class _UnaryOperation,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform(
- _ExecutionPolicy&& __policy,
- _ForwardIterator __first,
- _ForwardIterator __last,
- _ForwardOutIterator __result,
- _UnaryOperation __op) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "transform requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "transform requires an OutputIterator");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
- _ForwardOutIterator, decltype(__op(*__first)), "transform requires an OutputIterator");
- auto __res = std::__transform(__policy, std::move(__first), std::move(__last), std::move(__result), std::move(__op));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator1,
- class _ForwardIterator2,
- class _ForwardOutIterator,
- class _BinaryOperation,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardOutIterator>>
-__transform(_ExecutionPolicy&&,
- _ForwardIterator1&& __first1,
- _ForwardIterator1&& __last1,
- _ForwardIterator2&& __first2,
- _ForwardOutIterator&& __result,
- _BinaryOperation&& __op) noexcept {
- using _Backend = typename __select_backend<_RawPolicy>::type;
- return std::__pstl_transform<_RawPolicy>(
- _Backend{}, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__result), std::move(__op));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator1,
- class _ForwardIterator2,
- class _ForwardOutIterator,
- class _BinaryOperation,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform(
- _ExecutionPolicy&& __policy,
- _ForwardIterator1 __first1,
- _ForwardIterator1 __last1,
- _ForwardIterator2 __first2,
- _ForwardOutIterator __result,
- _BinaryOperation __op) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "transform requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "transform requires ForwardIterators");
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "transform requires an OutputIterator");
- _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(
- _ForwardOutIterator, decltype(__op(*__first1, *__first2)), "transform requires an OutputIterator");
- auto __res = std::__transform(
- __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__result), std::move(__op));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___ALGORITHM_PSTL_TRANSFORM_H
diff --git a/libcxx/include/__atomic/atomic_ref.h b/libcxx/include/__atomic/atomic_ref.h
new file mode 100644
index 000000000000..156f1961151c
--- /dev/null
+++ b/libcxx/include/__atomic/atomic_ref.h
@@ -0,0 +1,360 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ATOMIC_ATOMIC_REF_H
+#define _LIBCPP___ATOMIC_ATOMIC_REF_H
+
+#include <__assert>
+#include <__atomic/atomic_sync.h>
+#include <__atomic/check_memory_order.h>
+#include <__atomic/to_gcc_order.h>
+#include <__concepts/arithmetic.h>
+#include <__concepts/same_as.h>
+#include <__config>
+#include <__memory/addressof.h>
+#include <__type_traits/has_unique_object_representation.h>
+#include <__type_traits/is_trivially_copyable.h>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if _LIBCPP_STD_VER >= 20
+
+template <class _Tp>
+struct __atomic_ref_base {
+protected:
+ _Tp* __ptr_;
+
+ _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __ptr_(std::addressof(__obj)) {}
+
+private:
+ _LIBCPP_HIDE_FROM_ABI static _Tp* __clear_padding(_Tp& __val) noexcept {
+ _Tp* __ptr = std::addressof(__val);
+# if __has_builtin(__builtin_clear_padding)
+ __builtin_clear_padding(__ptr);
+# endif
+ return __ptr;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI static bool __compare_exchange(
+ _Tp* __ptr, _Tp* __expected, _Tp* __desired, bool __is_weak, int __success, int __failure) noexcept {
+ if constexpr (
+# if __has_builtin(__builtin_clear_padding)
+ has_unique_object_representations_v<_Tp> || floating_point<_Tp>
+# else
+ true // NOLINT(readability-simplify-boolean-expr)
+# endif
+ ) {
+ return __atomic_compare_exchange(__ptr, __expected, __desired, __is_weak, __success, __failure);
+ } else { // _Tp has padding bits and __builtin_clear_padding is available
+ __clear_padding(*__desired);
+ _Tp __copy = *__expected;
+ __clear_padding(__copy);
+ // The algorithm we use here is basically to perform `__atomic_compare_exchange` on the
+ // values until it has either succeeded, or failed because the value representation of the
+ // objects involved was different. This is why we loop around __atomic_compare_exchange:
+ // we basically loop until its failure is caused by the value representation of the objects
+ // being different, not only their object representation.
+ while (true) {
+ _Tp __prev = __copy;
+ if (__atomic_compare_exchange(__ptr, std::addressof(__copy), __desired, __is_weak, __success, __failure)) {
+ return true;
+ }
+ _Tp __curr = __copy;
+ if (std::memcmp(__clear_padding(__prev), __clear_padding(__curr), sizeof(_Tp)) != 0) {
+ // Value representation without padding bits do not compare equal ->
+ // write the current content of *ptr into *expected
+ std::memcpy(__expected, std::addressof(__copy), sizeof(_Tp));
+ return false;
+ }
+ }
+ }
+ }
+
+ friend struct __atomic_waitable_traits<__atomic_ref_base<_Tp>>;
+
+public:
+ using value_type = _Tp;
+
+ static constexpr size_t required_alignment = alignof(_Tp);
+
+ // The __atomic_always_lock_free builtin takes into account the alignment of the pointer if provided,
+ // so we create a fake pointer with a suitable alignment when querying it. Note that we are guaranteed
+ // that the pointer is going to be aligned properly at runtime because that is a (checked) precondition
+ // of atomic_ref's constructor.
+ static constexpr bool is_always_lock_free =
+ __atomic_always_lock_free(sizeof(_Tp), reinterpret_cast<void*>(-required_alignment));
+
+ _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { return __atomic_is_lock_free(sizeof(_Tp), __ptr_); }
+
+ _LIBCPP_HIDE_FROM_ABI void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept
+ _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ __order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst,
+ "atomic_ref: memory order argument to atomic store operation is invalid");
+ __atomic_store(__ptr_, __clear_padding(__desired), std::__to_gcc_order(__order));
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept {
+ store(__desired);
+ return __desired;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __order = memory_order::seq_cst) const noexcept
+ _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__order) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
+ __order == memory_order::seq_cst,
+ "atomic_ref: memory order argument to atomic load operation is invalid");
+ alignas(_Tp) byte __mem[sizeof(_Tp)];
+ auto* __ret = reinterpret_cast<_Tp*>(__mem);
+ __atomic_load(__ptr_, __ret, std::__to_gcc_order(__order));
+ return *__ret;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI operator _Tp() const noexcept { return load(); }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+ alignas(_Tp) byte __mem[sizeof(_Tp)];
+ auto* __ret = reinterpret_cast<_Tp*>(__mem);
+ __atomic_exchange(__ptr_, __clear_padding(__desired), __ret, std::__to_gcc_order(__order));
+ return *__ret;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ __failure == memory_order::relaxed || __failure == memory_order::consume ||
+ __failure == memory_order::acquire || __failure == memory_order::seq_cst,
+ "atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid");
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ true,
+ std::__to_gcc_order(__success),
+ std::__to_gcc_order(__failure));
+ }
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ __failure == memory_order::relaxed || __failure == memory_order::consume ||
+ __failure == memory_order::acquire || __failure == memory_order::seq_cst,
+ "atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid");
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ false,
+ std::__to_gcc_order(__success),
+ std::__to_gcc_order(__failure));
+ }
+
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ true,
+ std::__to_gcc_order(__order),
+ std::__to_gcc_failure_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ false,
+ std::__to_gcc_order(__order),
+ std::__to_gcc_failure_order(__order));
+ }
+
+ _LIBCPP_HIDE_FROM_ABI void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept
+ _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__order) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
+ __order == memory_order::seq_cst,
+ "atomic_ref: memory order argument to atomic wait operation is invalid");
+ std::__atomic_wait(*this, __old, __order);
+ }
+ _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept { std::__atomic_notify_one(*this); }
+ _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept { std::__atomic_notify_all(*this); }
+};
+
+template <class _Tp>
+struct __atomic_waitable_traits<__atomic_ref_base<_Tp>> {
+ static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_ref_base<_Tp>& __a, memory_order __order) {
+ return __a.load(__order);
+ }
+ static _LIBCPP_HIDE_FROM_ABI const _Tp* __atomic_contention_address(const __atomic_ref_base<_Tp>& __a) {
+ return __a.__ptr_;
+ }
+};
+
+template <class _Tp>
+struct atomic_ref : public __atomic_ref_base<_Tp> {
+ static_assert(is_trivially_copyable_v<_Tp>, "std::atomic_ref<T> requires that 'T' be a trivially copyable type");
+
+ using __base = __atomic_ref_base<_Tp>;
+
+ _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
+ "atomic_ref ctor: referenced object must be aligned to required_alignment");
+ }
+
+ _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+ atomic_ref& operator=(const atomic_ref&) = delete;
+};
+
+template <class _Tp>
+ requires(std::integral<_Tp> && !std::same_as<bool, _Tp>)
+struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
+ using __base = __atomic_ref_base<_Tp>;
+
+ using difference_type = __base::value_type;
+
+ _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
+ "atomic_ref ctor: referenced object must be aligned to required_alignment");
+ }
+
+ _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+ atomic_ref& operator=(const atomic_ref&) = delete;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_add(this->__ptr_, __arg, std::__to_gcc_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_sub(this->__ptr_, __arg, std::__to_gcc_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_and(this->__ptr_, __arg, std::__to_gcc_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_or(this->__ptr_, __arg, std::__to_gcc_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_xor(this->__ptr_, __arg, std::__to_gcc_order(__order));
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) const noexcept { return fetch_sub(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator++() const noexcept { return fetch_add(_Tp(1)) + _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator--() const noexcept { return fetch_sub(_Tp(1)) - _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; }
+};
+
+template <class _Tp>
+ requires std::floating_point<_Tp>
+struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
+ using __base = __atomic_ref_base<_Tp>;
+
+ using difference_type = __base::value_type;
+
+ _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
+ "atomic_ref ctor: referenced object must be aligned to required_alignment");
+ }
+
+ _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+ atomic_ref& operator=(const atomic_ref&) = delete;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _Tp __old = this->load(memory_order_relaxed);
+ _Tp __new = __old + __arg;
+ while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) {
+ __new = __old + __arg;
+ }
+ return __old;
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _Tp __old = this->load(memory_order_relaxed);
+ _Tp __new = __old - __arg;
+ while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) {
+ __new = __old - __arg;
+ }
+ return __old;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+};
+
+template <class _Tp>
+struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> {
+ using __base = __atomic_ref_base<_Tp*>;
+
+ using difference_type = ptrdiff_t;
+
+ _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {}
+
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator=(_Tp* __desired) const noexcept { return __base::operator=(__desired); }
+
+ atomic_ref& operator=(const atomic_ref&) = delete;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_add(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_sub(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order));
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) const noexcept { return fetch_add(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) const noexcept { return fetch_sub(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator++() const noexcept { return fetch_add(1) + 1; }
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator--() const noexcept { return fetch_sub(1) - 1; }
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __arg) const noexcept { return fetch_add(__arg) + __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+};
+
+_LIBCPP_CTAD_SUPPORTED_FOR_TYPE(atomic_ref);
+
+#endif // _LIBCPP_STD_VER >= 20
+
+_LIBCPP_END_NAMESPACE_STD
+
+_LIBCPP_POP_MACROS
+
+#endif // _LIBCPP__ATOMIC_ATOMIC_REF_H
diff --git a/libcxx/include/__atomic/atomic_sync.h b/libcxx/include/__atomic/atomic_sync.h
index e583dca38c4c..175700be54c0 100644
--- a/libcxx/include/__atomic/atomic_sync.h
+++ b/libcxx/include/__atomic/atomic_sync.h
@@ -12,6 +12,7 @@
#include <__atomic/contention_t.h>
#include <__atomic/cxx_atomic_impl.h>
#include <__atomic/memory_order.h>
+#include <__atomic/to_gcc_order.h>
#include <__availability>
#include <__chrono/duration.h>
#include <__config>
diff --git a/libcxx/include/__atomic/check_memory_order.h b/libcxx/include/__atomic/check_memory_order.h
index 3012aec0521b..536f764a6190 100644
--- a/libcxx/include/__atomic/check_memory_order.h
+++ b/libcxx/include/__atomic/check_memory_order.h
@@ -27,4 +27,8 @@
_LIBCPP_DIAGNOSE_WARNING(__f == memory_order_release || __f == memory_order_acq_rel, \
"memory order argument to atomic operation is invalid")
+#define _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__m) \
+ _LIBCPP_DIAGNOSE_WARNING(__m == memory_order_release || __m == memory_order_acq_rel, \
+ "memory order argument to atomic operation is invalid")
+
#endif // _LIBCPP___ATOMIC_CHECK_MEMORY_ORDER_H
diff --git a/libcxx/include/__atomic/cxx_atomic_impl.h b/libcxx/include/__atomic/cxx_atomic_impl.h
index b900cc135f78..18e88aa97bec 100644
--- a/libcxx/include/__atomic/cxx_atomic_impl.h
+++ b/libcxx/include/__atomic/cxx_atomic_impl.h
@@ -10,6 +10,7 @@
#define _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H
#include <__atomic/memory_order.h>
+#include <__atomic/to_gcc_order.h>
#include <__config>
#include <__memory/addressof.h>
#include <__type_traits/is_assignable.h>
@@ -54,32 +55,6 @@ struct __cxx_atomic_base_impl {
_Tp __a_value;
};
-_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
- // Avoid switch statement to make this a constexpr.
- return __order == memory_order_relaxed
- ? __ATOMIC_RELAXED
- : (__order == memory_order_acquire
- ? __ATOMIC_ACQUIRE
- : (__order == memory_order_release
- ? __ATOMIC_RELEASE
- : (__order == memory_order_seq_cst
- ? __ATOMIC_SEQ_CST
- : (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_CONSUME))));
-}
-
-_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) {
- // Avoid switch statement to make this a constexpr.
- return __order == memory_order_relaxed
- ? __ATOMIC_RELAXED
- : (__order == memory_order_acquire
- ? __ATOMIC_ACQUIRE
- : (__order == memory_order_release
- ? __ATOMIC_RELAXED
- : (__order == memory_order_seq_cst
- ? __ATOMIC_SEQ_CST
- : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME))));
-}
-
template <typename _Tp>
_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
__cxx_atomic_assign_volatile(__a->__a_value, __val);
diff --git a/libcxx/include/__atomic/to_gcc_order.h b/libcxx/include/__atomic/to_gcc_order.h
new file mode 100644
index 000000000000..d04c111addd3
--- /dev/null
+++ b/libcxx/include/__atomic/to_gcc_order.h
@@ -0,0 +1,54 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ATOMIC_TO_GCC_ORDER_H
+#define _LIBCPP___ATOMIC_TO_GCC_ORDER_H
+
+#include <__atomic/memory_order.h>
+#include <__config>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if defined(__ATOMIC_RELAXED) && defined(__ATOMIC_CONSUME) && defined(__ATOMIC_ACQUIRE) && \
+ defined(__ATOMIC_RELEASE) && defined(__ATOMIC_ACQ_REL) && defined(__ATOMIC_SEQ_CST)
+
+_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
+ // Avoid switch statement to make this a constexpr.
+ return __order == memory_order_relaxed
+ ? __ATOMIC_RELAXED
+ : (__order == memory_order_acquire
+ ? __ATOMIC_ACQUIRE
+ : (__order == memory_order_release
+ ? __ATOMIC_RELEASE
+ : (__order == memory_order_seq_cst
+ ? __ATOMIC_SEQ_CST
+ : (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_CONSUME))));
+}
+
+_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) {
+ // Avoid switch statement to make this a constexpr.
+ return __order == memory_order_relaxed
+ ? __ATOMIC_RELAXED
+ : (__order == memory_order_acquire
+ ? __ATOMIC_ACQUIRE
+ : (__order == memory_order_release
+ ? __ATOMIC_RELAXED
+ : (__order == memory_order_seq_cst
+ ? __ATOMIC_SEQ_CST
+ : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME))));
+}
+
+#endif
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___ATOMIC_TO_GCC_ORDER_H
diff --git a/libcxx/include/__exception/exception_ptr.h b/libcxx/include/__exception/exception_ptr.h
index c9027de9238c..868fd7c01533 100644
--- a/libcxx/include/__exception/exception_ptr.h
+++ b/libcxx/include/__exception/exception_ptr.h
@@ -38,11 +38,14 @@ struct __cxa_exception;
_LIBCPP_OVERRIDABLE_FUNC_VIS __cxa_exception* __cxa_init_primary_exception(
void*,
std::type_info*,
- void(
# if defined(_WIN32)
- __thiscall
+ void(__thiscall*)(void*)) throw();
+# elif defined(__wasm__)
+ // In Wasm, a destructor returns its argument
+ void* (*)(void*)) throw();
+# else
+ void (*)(void*)) throw();
# endif
- *)(void*)) throw();
}
} // namespace __cxxabiv1
@@ -92,8 +95,16 @@ _LIBCPP_HIDE_FROM_ABI exception_ptr make_exception_ptr(_Ep __e) _NOEXCEPT {
using _Ep2 = __decay_t<_Ep>;
void* __ex = __cxxabiv1::__cxa_allocate_exception(sizeof(_Ep));
+# ifdef __wasm__
+ // In Wasm, a destructor returns its argument
+ (void)__cxxabiv1::__cxa_init_primary_exception(__ex, const_cast<std::type_info*>(&typeid(_Ep)), [](void* __p) -> void* {
+# else
(void)__cxxabiv1::__cxa_init_primary_exception(__ex, const_cast<std::type_info*>(&typeid(_Ep)), [](void* __p) {
+# endif
std::__destroy_at(static_cast<_Ep2*>(__p));
+# ifdef __wasm__
+ return __p;
+# endif
});
try {
diff --git a/libcxx/include/__locale b/libcxx/include/__locale
index 36ac099d650e..1e97c7594c8b 100644
--- a/libcxx/include/__locale
+++ b/libcxx/include/__locale
@@ -343,12 +343,12 @@ public:
static const mask __regex_word = 0x4000; // 0x8000 and 0x0100 and 0x00ff are used
# define _LIBCPP_CTYPE_MASK_IS_COMPOSITE_PRINT
# define _LIBCPP_CTYPE_MASK_IS_COMPOSITE_ALPHA
-#elif defined(__APPLE__) || defined(__FreeBSD__) || defined(__EMSCRIPTEN__) || defined(__NetBSD__)
+#elif defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__)
# ifdef __APPLE__
typedef __uint32_t mask;
# elif defined(__FreeBSD__)
typedef unsigned long mask;
-# elif defined(__EMSCRIPTEN__) || defined(__NetBSD__)
+# elif defined(__NetBSD__)
typedef unsigned short mask;
# endif
static const mask space = _CTYPE_S;
diff --git a/libcxx/include/__numeric/pstl_transform_reduce.h b/libcxx/include/__numeric/pstl.h
index fe41b1c86f3b..05559b4d3f3c 100644
--- a/libcxx/include/__numeric/pstl_transform_reduce.h
+++ b/libcxx/include/__numeric/pstl.h
@@ -6,13 +6,15 @@
//
//===----------------------------------------------------------------------===//
-#ifndef _LIBCPP___NUMERIC_PSTL_TRANSFORM_REDUCE_H
-#define _LIBCPP___NUMERIC_PSTL_TRANSFORM_REDUCE_H
+#ifndef _LIBCPP___NUMERIC_PSTL_H
+#define _LIBCPP___NUMERIC_PSTL_H
#include <__algorithm/pstl_frontend_dispatch.h>
#include <__config>
+#include <__functional/identity.h>
#include <__functional/operations.h>
#include <__iterator/cpp17_iterator_concepts.h>
+#include <__iterator/iterator_traits.h>
#include <__numeric/transform_reduce.h>
#include <__pstl/configuration.h>
#include <__type_traits/is_execution_policy.h>
@@ -153,10 +155,85 @@ _LIBCPP_HIDE_FROM_ABI _Tp transform_reduce(
return *std::move(__res);
}
+template <class>
+void __pstl_reduce();
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Tp,
+ class _BinaryOperation = plus<>,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_Tp>
+__reduce(_ExecutionPolicy&& __policy,
+ _ForwardIterator&& __first,
+ _ForwardIterator&& __last,
+ _Tp&& __init,
+ _BinaryOperation&& __op = {}) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_reduce, _RawPolicy),
+ [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _Tp __g_init, _BinaryOperation __g_op) {
+ return std::__transform_reduce(
+ __policy, std::move(__g_first), std::move(__g_last), std::move(__g_init), std::move(__g_op), __identity{});
+ },
+ std::move(__first),
+ std::move(__last),
+ std::move(__init),
+ std::move(__op));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _Tp,
+ class _BinaryOperation = plus<>,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI _Tp
+reduce(_ExecutionPolicy&& __policy,
+ _ForwardIterator __first,
+ _ForwardIterator __last,
+ _Tp __init,
+ _BinaryOperation __op = {}) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "reduce requires ForwardIterators");
+ auto __res = std::__reduce(__policy, std::move(__first), std::move(__last), std::move(__init), std::move(__op));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__iter_value_type<_ForwardIterator>>
+__reduce(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last) noexcept {
+ return std::__pstl_frontend_dispatch(
+ _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_reduce, _RawPolicy),
+ [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last) {
+ return std::__reduce(
+ __policy, std::move(__g_first), std::move(__g_last), __iter_value_type<_ForwardIterator>());
+ },
+ std::move(__first),
+ std::move(__last));
+}
+
+template <class _ExecutionPolicy,
+ class _ForwardIterator,
+ class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
+ enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
+_LIBCPP_HIDE_FROM_ABI __iter_value_type<_ForwardIterator>
+reduce(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last) {
+ _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "reduce requires ForwardIterators");
+ auto __res = std::__reduce(__policy, std::move(__first), std::move(__last));
+ if (!__res)
+ std::__throw_bad_alloc();
+ return *std::move(__res);
+}
+
_LIBCPP_END_NAMESPACE_STD
#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
_LIBCPP_POP_MACROS
-#endif // _LIBCPP___NUMERIC_PSTL_TRANSFORM_REDUCE_H
+#endif // _LIBCPP___NUMERIC_PSTL_H
diff --git a/libcxx/include/__numeric/pstl_reduce.h b/libcxx/include/__numeric/pstl_reduce.h
deleted file mode 100644
index d678b9480070..000000000000
--- a/libcxx/include/__numeric/pstl_reduce.h
+++ /dev/null
@@ -1,112 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP___NUMERIC_PSTL_REDUCE_H
-#define _LIBCPP___NUMERIC_PSTL_REDUCE_H
-
-#include <__algorithm/pstl_frontend_dispatch.h>
-#include <__config>
-#include <__functional/identity.h>
-#include <__iterator/cpp17_iterator_concepts.h>
-#include <__iterator/iterator_traits.h>
-#include <__numeric/pstl_transform_reduce.h>
-#include <__type_traits/is_execution_policy.h>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-_LIBCPP_PUSH_MACROS
-#include <__undef_macros>
-
-#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_BEGIN_NAMESPACE_STD
-
-template <class>
-void __pstl_reduce();
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Tp,
- class _BinaryOperation = plus<>,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_Tp>
-__reduce(_ExecutionPolicy&& __policy,
- _ForwardIterator&& __first,
- _ForwardIterator&& __last,
- _Tp&& __init,
- _BinaryOperation&& __op = {}) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_reduce, _RawPolicy),
- [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _Tp __g_init, _BinaryOperation __g_op) {
- return std::__transform_reduce(
- __policy, std::move(__g_first), std::move(__g_last), std::move(__g_init), std::move(__g_op), __identity{});
- },
- std::move(__first),
- std::move(__last),
- std::move(__init),
- std::move(__op));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _Tp,
- class _BinaryOperation = plus<>,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI _Tp
-reduce(_ExecutionPolicy&& __policy,
- _ForwardIterator __first,
- _ForwardIterator __last,
- _Tp __init,
- _BinaryOperation __op = {}) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "reduce requires ForwardIterators");
- auto __res = std::__reduce(__policy, std::move(__first), std::move(__last), std::move(__init), std::move(__op));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__iter_value_type<_ForwardIterator>>
-__reduce(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last) noexcept {
- return std::__pstl_frontend_dispatch(
- _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_reduce, _RawPolicy),
- [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last) {
- return std::__reduce(
- __policy, std::move(__g_first), std::move(__g_last), __iter_value_type<_ForwardIterator>());
- },
- std::move(__first),
- std::move(__last));
-}
-
-template <class _ExecutionPolicy,
- class _ForwardIterator,
- class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>,
- enable_if_t<is_execution_policy_v<_RawPolicy>, int> = 0>
-_LIBCPP_HIDE_FROM_ABI __iter_value_type<_ForwardIterator>
-reduce(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last) {
- _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "reduce requires ForwardIterators");
- auto __res = std::__reduce(__policy, std::move(__first), std::move(__last));
- if (!__res)
- std::__throw_bad_alloc();
- return *std::move(__res);
-}
-
-_LIBCPP_END_NAMESPACE_STD
-
-#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17
-
-_LIBCPP_POP_MACROS
-
-#endif // _LIBCPP___NUMERIC_PSTL_REDUCE_H
diff --git a/libcxx/include/__type_traits/has_unique_object_representation.h b/libcxx/include/__type_traits/has_unique_object_representation.h
index c0ada5618f0e..1aa044990032 100644
--- a/libcxx/include/__type_traits/has_unique_object_representation.h
+++ b/libcxx/include/__type_traits/has_unique_object_representation.h
@@ -11,8 +11,6 @@
#include <__config>
#include <__type_traits/integral_constant.h>
-#include <__type_traits/remove_all_extents.h>
-#include <__type_traits/remove_cv.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
@@ -24,10 +22,10 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template <class _Tp>
struct _LIBCPP_TEMPLATE_VIS has_unique_object_representations
- : public integral_constant<bool, __has_unique_object_representations(remove_cv_t<remove_all_extents_t<_Tp>>)> {};
+ : public integral_constant<bool, __has_unique_object_representations(_Tp)> {};
template <class _Tp>
-inline constexpr bool has_unique_object_representations_v = has_unique_object_representations<_Tp>::value;
+inline constexpr bool has_unique_object_representations_v = __has_unique_object_representations(_Tp);
#endif
diff --git a/libcxx/include/algorithm b/libcxx/include/algorithm
index 869fc19737b5..a522a60f1b55 100644
--- a/libcxx/include/algorithm
+++ b/libcxx/include/algorithm
@@ -1882,22 +1882,7 @@ template <class BidirectionalIterator, class Compare>
#if _LIBCPP_STD_VER >= 17
# include <__algorithm/clamp.h>
# include <__algorithm/for_each_n.h>
-# include <__algorithm/pstl_any_all_none_of.h>
-# include <__algorithm/pstl_copy.h>
-# include <__algorithm/pstl_count.h>
-# include <__algorithm/pstl_equal.h>
-# include <__algorithm/pstl_fill.h>
-# include <__algorithm/pstl_find.h>
-# include <__algorithm/pstl_for_each.h>
-# include <__algorithm/pstl_generate.h>
-# include <__algorithm/pstl_is_partitioned.h>
-# include <__algorithm/pstl_merge.h>
-# include <__algorithm/pstl_move.h>
-# include <__algorithm/pstl_replace.h>
-# include <__algorithm/pstl_rotate_copy.h>
-# include <__algorithm/pstl_sort.h>
-# include <__algorithm/pstl_stable_sort.h>
-# include <__algorithm/pstl_transform.h>
+# include <__algorithm/pstl.h>
# include <__algorithm/sample.h>
#endif // _LIBCPP_STD_VER >= 17
diff --git a/libcxx/include/atomic b/libcxx/include/atomic
index cb142b09bff3..80a0f9ee373e 100644
--- a/libcxx/include/atomic
+++ b/libcxx/include/atomic
@@ -599,6 +599,7 @@ template <class T>
#include <__atomic/atomic_flag.h>
#include <__atomic/atomic_init.h>
#include <__atomic/atomic_lock_free.h>
+#include <__atomic/atomic_ref.h>
#include <__atomic/atomic_sync.h>
#include <__atomic/check_memory_order.h>
#include <__atomic/contention_t.h>
diff --git a/libcxx/include/experimental/__simd/scalar.h b/libcxx/include/experimental/__simd/scalar.h
index aff2cd11cfcf..1add4653209a 100644
--- a/libcxx/include/experimental/__simd/scalar.h
+++ b/libcxx/include/experimental/__simd/scalar.h
@@ -62,6 +62,11 @@ struct __simd_operations<_Tp, simd_abi::__scalar> {
static _LIBCPP_HIDE_FROM_ABI void __load(_SimdStorage& __s, const _Up* __mem) noexcept {
__s.__data = static_cast<_Tp>(__mem[0]);
}
+
+ template <class _Up>
+ static _LIBCPP_HIDE_FROM_ABI void __store(_SimdStorage __s, _Up* __mem) noexcept {
+ *__mem = static_cast<_Up>(__s.__data);
+ }
};
template <class _Tp>
@@ -71,6 +76,8 @@ struct __mask_operations<_Tp, simd_abi::__scalar> {
static _LIBCPP_HIDE_FROM_ABI _MaskStorage __broadcast(bool __v) noexcept { return {__v}; }
static _LIBCPP_HIDE_FROM_ABI void __load(_MaskStorage& __s, const bool* __mem) noexcept { __s.__data = __mem[0]; }
+
+ static _LIBCPP_HIDE_FROM_ABI void __store(_MaskStorage __s, bool* __mem) noexcept { __mem[0] = __s.__data; }
};
} // namespace parallelism_v2
diff --git a/libcxx/include/experimental/__simd/simd.h b/libcxx/include/experimental/__simd/simd.h
index db4ebb8e4a38..37e334aad6da 100644
--- a/libcxx/include/experimental/__simd/simd.h
+++ b/libcxx/include/experimental/__simd/simd.h
@@ -70,6 +70,17 @@ public:
_Impl::__load(__s_, _Flags::template __apply<simd>(__mem));
}
+ // copy functions
+ template <class _Up, class _Flags, enable_if_t<__is_vectorizable_v<_Up> && is_simd_flag_type_v<_Flags>, int> = 0>
+ _LIBCPP_HIDE_FROM_ABI void copy_from(const _Up* __mem, _Flags) {
+ _Impl::__load(__s_, _Flags::template __apply<simd>(__mem));
+ }
+
+ template <class _Up, class _Flags, enable_if_t<__is_vectorizable_v<_Up> && is_simd_flag_type_v<_Flags>, int> = 0>
+ _LIBCPP_HIDE_FROM_ABI void copy_to(_Up* __mem, _Flags) const {
+ _Impl::__store(__s_, _Flags::template __apply<simd>(__mem));
+ }
+
// scalar access [simd.subscr]
_LIBCPP_HIDE_FROM_ABI reference operator[](size_t __i) noexcept { return reference(__s_, __i); }
_LIBCPP_HIDE_FROM_ABI value_type operator[](size_t __i) const noexcept { return __s_.__get(__i); }
diff --git a/libcxx/include/experimental/__simd/simd_mask.h b/libcxx/include/experimental/__simd/simd_mask.h
index 754db7992683..fd6dee2e28ee 100644
--- a/libcxx/include/experimental/__simd/simd_mask.h
+++ b/libcxx/include/experimental/__simd/simd_mask.h
@@ -58,6 +58,17 @@ public:
_Impl::__load(__s_, _Flags::template __apply<simd_mask>(__mem));
}
+ // copy functions
+ template <class _Flags, enable_if_t<is_simd_flag_type_v<_Flags>, int> = 0>
+ _LIBCPP_HIDE_FROM_ABI void copy_from(const value_type* __mem, _Flags) {
+ _Impl::__load(__s_, _Flags::template __apply<simd_mask>(__mem));
+ }
+
+ template <class _Flags, enable_if_t<is_simd_flag_type_v<_Flags>, int> = 0>
+ _LIBCPP_HIDE_FROM_ABI void copy_to(value_type* __mem, _Flags) const {
+ _Impl::__store(__s_, _Flags::template __apply<simd_mask>(__mem));
+ }
+
// scalar access [simd.mask.subscr]
_LIBCPP_HIDE_FROM_ABI reference operator[](size_t __i) noexcept { return reference(__s_, __i); }
_LIBCPP_HIDE_FROM_ABI value_type operator[](size_t __i) const noexcept { return __s_.__get(__i); }
diff --git a/libcxx/include/experimental/__simd/vec_ext.h b/libcxx/include/experimental/__simd/vec_ext.h
index c9423df93cfa..316866b84873 100644
--- a/libcxx/include/experimental/__simd/vec_ext.h
+++ b/libcxx/include/experimental/__simd/vec_ext.h
@@ -80,6 +80,12 @@ struct __simd_operations<_Tp, simd_abi::__vec_ext<_Np>> {
for (size_t __i = 0; __i < _Np; __i++)
__s.__data[__i] = static_cast<_Tp>(__mem[__i]);
}
+
+ template <class _Up>
+ static _LIBCPP_HIDE_FROM_ABI void __store(_SimdStorage __s, _Up* __mem) noexcept {
+ for (size_t __i = 0; __i < _Np; __i++)
+ __mem[__i] = static_cast<_Up>(__s.__data[__i]);
+ }
};
template <class _Tp, int _Np>
@@ -99,6 +105,11 @@ struct __mask_operations<_Tp, simd_abi::__vec_ext<_Np>> {
for (size_t __i = 0; __i < _Np; __i++)
__s.__data[__i] = experimental::__set_all_bits<_Tp>(__mem[__i]);
}
+
+ static _LIBCPP_HIDE_FROM_ABI void __store(_MaskStorage __s, bool* __mem) noexcept {
+ for (size_t __i = 0; __i < _Np; __i++)
+ __mem[__i] = static_cast<bool>(__s.__data[__i]);
+ }
};
} // namespace parallelism_v2
diff --git a/libcxx/include/forward_list b/libcxx/include/forward_list
index 5a7521eed410..80dd49fe3d75 100644
--- a/libcxx/include/forward_list
+++ b/libcxx/include/forward_list
@@ -554,7 +554,6 @@ protected:
return __guard.__release_ptr();
}
- template <class... _Args>
_LIBCPP_HIDE_FROM_ABI void __delete_node(__node_pointer __node) {
// For the same reason as above, we use the allocator's destroy() method for the value_type,
// but not for the node itself.
diff --git a/libcxx/include/list b/libcxx/include/list
index 90bddcc29db0..610a24e38460 100644
--- a/libcxx/include/list
+++ b/libcxx/include/list
@@ -567,7 +567,6 @@ protected:
return __guard.__release_ptr();
}
- template <class... _Args>
_LIBCPP_HIDE_FROM_ABI void __delete_node(__node_pointer __node) {
// For the same reason as above, we use the allocator's destroy() method for the value_type,
// but not for the node itself.
diff --git a/libcxx/include/locale b/libcxx/include/locale
index 748b276a8525..041d7bcd27fc 100644
--- a/libcxx/include/locale
+++ b/libcxx/include/locale
@@ -368,7 +368,11 @@ struct _LIBCPP_EXPORTED_FROM_ABI __num_get_base {
static const int __num_get_buf_sz = 40;
static int __get_base(ios_base&);
- static const char __src[33];
+ static const char __src[33]; // "0123456789abcdefABCDEFxX+-pPiInN"
+ // count of leading characters in __src used for parsing integers ("012..X+-")
+ static const size_t __int_chr_cnt = 26;
+ // count of leading characters in __src used for parsing floating-point values ("012..-pP")
+ static const size_t __fp_chr_cnt = 28;
};
_LIBCPP_EXPORTED_FROM_ABI void
@@ -431,7 +435,7 @@ private:
template <typename _Tp>
const _Tp* __do_widen_p(ios_base& __iob, _Tp* __atoms) const {
locale __loc = __iob.getloc();
- use_facet<ctype<_Tp> >(__loc).widen(__src, __src + 26, __atoms);
+ use_facet<ctype<_Tp> >(__loc).widen(__src, __src + __int_chr_cnt, __atoms);
return __atoms;
}
@@ -447,7 +451,7 @@ private:
template <class _CharT>
string __num_get<_CharT>::__stage2_int_prep(ios_base& __iob, _CharT* __atoms, _CharT& __thousands_sep) {
locale __loc = __iob.getloc();
- std::use_facet<ctype<_CharT> >(__loc).widen(__src, __src + 26, __atoms);
+ std::use_facet<ctype<_CharT> >(__loc).widen(__src, __src + __int_chr_cnt, __atoms);
const numpunct<_CharT>& __np = std::use_facet<numpunct<_CharT> >(__loc);
__thousands_sep = __np.thousands_sep();
return __np.grouping();
@@ -458,7 +462,7 @@ template <class _CharT>
string __num_get<_CharT>::__stage2_float_prep(
ios_base& __iob, _CharT* __atoms, _CharT& __decimal_point, _CharT& __thousands_sep) {
locale __loc = __iob.getloc();
- std::use_facet<ctype<_CharT> >(__loc).widen(__src, __src + 32, __atoms);
+ std::use_facet<ctype<_CharT> >(__loc).widen(__src, __src + __fp_chr_cnt, __atoms);
const numpunct<_CharT>& __np = std::use_facet<numpunct<_CharT> >(__loc);
__decimal_point = __np.decimal_point();
__thousands_sep = __np.thousands_sep();
@@ -490,7 +494,7 @@ __num_get<_CharT>::__stage2_int_loop(_CharT __ct, int __base, char* __a, char*&
}
return 0;
}
- ptrdiff_t __f = std::find(__atoms, __atoms + 26, __ct) - __atoms;
+ ptrdiff_t __f = std::find(__atoms, __atoms + __int_chr_cnt, __ct) - __atoms;
if (__f >= 24)
return -1;
switch (__base) {
@@ -546,8 +550,8 @@ int __num_get<_CharT>::__stage2_float_loop(
}
return 0;
}
- ptrdiff_t __f = std::find(__atoms, __atoms + 32, __ct) - __atoms;
- if (__f >= 32)
+ ptrdiff_t __f = std::find(__atoms, __atoms + __num_get_base::__fp_chr_cnt, __ct) - __atoms;
+ if (__f >= static_cast<ptrdiff_t>(__num_get_base::__fp_chr_cnt))
return -1;
char __x = __src[__f];
if (__x == '-' || __x == '+') {
@@ -846,7 +850,7 @@ _InputIterator num_get<_CharT, _InputIterator>::__do_get_signed(
int __base = this->__get_base(__iob);
// Stage 2
char_type __thousands_sep;
- const int __atoms_size = 26;
+ const int __atoms_size = __num_get_base::__int_chr_cnt;
#ifdef _LIBCPP_ABI_OPTIMIZED_LOCALE_NUM_GET
char_type __atoms1[__atoms_size];
const char_type* __atoms = this->__do_widen(__iob, __atoms1);
@@ -895,7 +899,7 @@ _InputIterator num_get<_CharT, _InputIterator>::__do_get_unsigned(
int __base = this->__get_base(__iob);
// Stage 2
char_type __thousands_sep;
- const int __atoms_size = 26;
+ const int __atoms_size = __num_get_base::__int_chr_cnt;
#ifdef _LIBCPP_ABI_OPTIMIZED_LOCALE_NUM_GET
char_type __atoms1[__atoms_size];
const char_type* __atoms = this->__do_widen(__iob, __atoms1);
@@ -942,7 +946,7 @@ _InputIterator num_get<_CharT, _InputIterator>::__do_get_floating_point(
iter_type __b, iter_type __e, ios_base& __iob, ios_base::iostate& __err, _Fp& __v) const {
// Stage 1, nothing to do
// Stage 2
- char_type __atoms[32];
+ char_type __atoms[__num_get_base::__fp_chr_cnt];
char_type __decimal_point;
char_type __thousands_sep;
string __grouping = this->__stage2_float_prep(__iob, __atoms, __decimal_point, __thousands_sep);
@@ -951,10 +955,11 @@ _InputIterator num_get<_CharT, _InputIterator>::__do_get_floating_point(
char* __a = &__buf[0];
char* __a_end = __a;
unsigned __g[__num_get_base::__num_get_buf_sz];
- unsigned* __g_end = __g;
- unsigned __dc = 0;
- bool __in_units = true;
- char __exp = 'E';
+ unsigned* __g_end = __g;
+ unsigned __dc = 0;
+ bool __in_units = true;
+ char __exp = 'E';
+ bool __is_leading_parsed = false;
for (; __b != __e; ++__b) {
if (__a_end == __a + __buf.size()) {
size_t __tmp = __buf.size();
@@ -977,6 +982,21 @@ _InputIterator num_get<_CharT, _InputIterator>::__do_get_floating_point(
__dc,
__atoms))
break;
+
+ // the leading character excluding the sign must be a decimal digit
+ if (!__is_leading_parsed) {
+ if (__a_end - __a >= 1 && __a[0] != '-' && __a[0] != '+') {
+ if ('0' <= __a[0] && __a[0] <= '9')
+ __is_leading_parsed = true;
+ else
+ break;
+ } else if (__a_end - __a >= 2 && (__a[0] == '-' || __a[0] == '+')) {
+ if ('0' <= __a[1] && __a[1] <= '9')
+ __is_leading_parsed = true;
+ else
+ break;
+ }
+ }
}
if (__grouping.size() != 0 && __in_units && __g_end - __g < __num_get_base::__num_get_buf_sz)
*__g_end++ = __dc;
@@ -996,10 +1016,11 @@ _InputIterator num_get<_CharT, _InputIterator>::do_get(
// Stage 1
int __base = 16;
// Stage 2
- char_type __atoms[26];
+ char_type __atoms[__num_get_base::__int_chr_cnt];
char_type __thousands_sep = char_type();
string __grouping;
- std::use_facet<ctype<_CharT> >(__iob.getloc()).widen(__num_get_base::__src, __num_get_base::__src + 26, __atoms);
+ std::use_facet<ctype<_CharT> >(__iob.getloc())
+ .widen(__num_get_base::__src, __num_get_base::__src + __num_get_base::__int_chr_cnt, __atoms);
string __buf;
__buf.resize(__buf.capacity());
char* __a = &__buf[0];
diff --git a/libcxx/include/module.modulemap b/libcxx/include/module.modulemap
index 70dac2f19846..ca0fbe2cc7ae 100644
--- a/libcxx/include/module.modulemap
+++ b/libcxx/include/module.modulemap
@@ -714,35 +714,14 @@ module std_private_algorithm_partition_copy [system
module std_private_algorithm_partition_point [system] { header "__algorithm/partition_point.h" }
module std_private_algorithm_pop_heap [system] { header "__algorithm/pop_heap.h" }
module std_private_algorithm_prev_permutation [system] { header "__algorithm/prev_permutation.h" }
-module std_private_algorithm_pstl_any_all_none_of [system] { header "__algorithm/pstl_any_all_none_of.h" }
-module std_private_algorithm_pstl_copy [system] { header "__algorithm/pstl_copy.h" }
-module std_private_algorithm_pstl_count [system] { header "__algorithm/pstl_count.h" }
-module std_private_algorithm_pstl_equal [system] { header "__algorithm/pstl_equal.h" }
-module std_private_algorithm_pstl_fill [system] { header "__algorithm/pstl_fill.h" }
-module std_private_algorithm_pstl_find [system] {
- header "__algorithm/pstl_find.h"
- export *
-}
-module std_private_algorithm_pstl_for_each [system] {
- header "__algorithm/pstl_for_each.h"
+module std_private_algorithm_pstl [system] {
+ header "__algorithm/pstl.h"
export *
}
module std_private_algorithm_pstl_frontend_dispatch [system] {
header "__algorithm/pstl_frontend_dispatch.h"
export std_private_utility_forward
}
-module std_private_algorithm_pstl_generate [system] { header "__algorithm/pstl_generate.h" }
-module std_private_algorithm_pstl_is_partitioned [system] { header "__algorithm/pstl_is_partitioned.h" }
-module std_private_algorithm_pstl_merge [system] { header "__algorithm/pstl_merge.h" }
-module std_private_algorithm_pstl_move [system] { header "__algorithm/pstl_move.h" }
-module std_private_algorithm_pstl_replace [system] { header "__algorithm/pstl_replace.h" }
-module std_private_algorithm_pstl_rotate_copy [system] { header "__algorithm/pstl_rotate_copy.h" }
-module std_private_algorithm_pstl_sort [system] { header "__algorithm/pstl_sort.h" }
-module std_private_algorithm_pstl_stable_sort [system] {
- header "__algorithm/pstl_stable_sort.h"
- export std_private_functional_operations
-}
-module std_private_algorithm_pstl_transform [system] { header "__algorithm/pstl_transform.h" }
module std_private_algorithm_push_heap [system] { header "__algorithm/push_heap.h" }
module std_private_algorithm_ranges_adjacent_find [system] { header "__algorithm/ranges_adjacent_find.h" }
module std_private_algorithm_ranges_all_of [system] { header "__algorithm/ranges_all_of.h" }
@@ -1066,7 +1045,11 @@ module std_private_atomic_atomic_flag [system] {
}
module std_private_atomic_atomic_init [system] { header "__atomic/atomic_init.h" }
module std_private_atomic_atomic_lock_free [system] { header "__atomic/atomic_lock_free.h" }
-module std_private_atomic_atomic_sync [system] { header "__atomic/atomic_sync.h" }
+module std_private_atomic_atomic_ref [system] { header "__atomic/atomic_ref.h" }
+module std_private_atomic_atomic_sync [system] {
+ header "__atomic/atomic_sync.h"
+ export std_private_atomic_to_gcc_order
+}
module std_private_atomic_check_memory_order [system] { header "__atomic/check_memory_order.h" }
module std_private_atomic_contention_t [system] { header "__atomic/contention_t.h" }
module std_private_atomic_cxx_atomic_impl [system] { header "__atomic/cxx_atomic_impl.h" }
@@ -1074,6 +1057,10 @@ module std_private_atomic_fence [system] { header "__atomic/fence.
module std_private_atomic_is_always_lock_free [system] { header "__atomic/is_always_lock_free.h" }
module std_private_atomic_kill_dependency [system] { header "__atomic/kill_dependency.h" }
module std_private_atomic_memory_order [system] { header "__atomic/memory_order.h" }
+module std_private_atomic_to_gcc_order [system] {
+ header "__atomic/to_gcc_order.h"
+ export std_private_atomic_memory_order
+}
module std_private_bit_bit_cast [system] { header "__bit/bit_cast.h" }
module std_private_bit_bit_ceil [system] { header "__bit/bit_ceil.h" }
@@ -1582,9 +1569,8 @@ module std_private_numeric_inner_product [system] { header "__numeric
module std_private_numeric_iota [system] { header "__numeric/iota.h" }
module std_private_numeric_midpoint [system] { header "__numeric/midpoint.h" }
module std_private_numeric_partial_sum [system] { header "__numeric/partial_sum.h" }
-module std_private_numeric_pstl_reduce [system] { header "__numeric/pstl_reduce.h" }
-module std_private_numeric_pstl_transform_reduce [system] {
- header "__numeric/pstl_transform_reduce.h"
+module std_private_numeric_pstl [system] {
+ header "__numeric/pstl.h"
export *
}
module std_private_numeric_reduce [system] { header "__numeric/reduce.h" }
diff --git a/libcxx/include/numeric b/libcxx/include/numeric
index 8b429fa2f7e7..9fb5e9fb1da7 100644
--- a/libcxx/include/numeric
+++ b/libcxx/include/numeric
@@ -168,23 +168,30 @@ constexpr T saturate_cast(U x) noexcept; // freestanding, Sin
#include <__numeric/iota.h>
#include <__numeric/midpoint.h>
#include <__numeric/partial_sum.h>
-#include <__numeric/pstl_reduce.h>
-#include <__numeric/pstl_transform_reduce.h>
#include <__numeric/reduce.h>
#include <__numeric/saturation_arithmetic.h>
#include <__numeric/transform_exclusive_scan.h>
#include <__numeric/transform_inclusive_scan.h>
#include <__numeric/transform_reduce.h>
+#if _LIBCPP_STD_VER >= 17
+# include <__numeric/pstl.h>
+#endif
+
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
#if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 20
+# include <climits>
# include <cmath>
# include <concepts>
+# include <cstdint>
+# include <execution>
# include <functional>
# include <iterator>
+# include <new>
+# include <optional>
# include <type_traits>
#endif
diff --git a/libcxx/include/vector b/libcxx/include/vector
index 976bde9b9048..b190557fb7b7 100644
--- a/libcxx/include/vector
+++ b/libcxx/include/vector
@@ -424,11 +424,36 @@ public:
#endif
: __end_cap_(nullptr, __a) {
}
- _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI explicit vector(size_type __n);
+
+ _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI explicit vector(size_type __n) {
+ auto __guard = std::__make_exception_guard(__destroy_vector(*this));
+ if (__n > 0) {
+ __vallocate(__n);
+ __construct_at_end(__n);
+ }
+ __guard.__complete();
+ }
+
#if _LIBCPP_STD_VER >= 14
- _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI explicit vector(size_type __n, const allocator_type& __a);
+ _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI explicit vector(size_type __n, const allocator_type& __a)
+ : __end_cap_(nullptr, __a) {
+ auto __guard = std::__make_exception_guard(__destroy_vector(*this));
+ if (__n > 0) {
+ __vallocate(__n);
+ __construct_at_end(__n);
+ }
+ __guard.__complete();
+ }
#endif
- _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI vector(size_type __n, const value_type& __x);
+
+ _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI vector(size_type __n, const value_type& __x) {
+ auto __guard = std::__make_exception_guard(__destroy_vector(*this));
+ if (__n > 0) {
+ __vallocate(__n);
+ __construct_at_end(__n, __x);
+ }
+ __guard.__complete();
+ }
template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0>
_LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI
@@ -1126,39 +1151,6 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 void vector<_Tp, _Allocator>::__append(size_type _
}
template <class _Tp, class _Allocator>
-_LIBCPP_CONSTEXPR_SINCE_CXX20 vector<_Tp, _Allocator>::vector(size_type __n) {
- auto __guard = std::__make_exception_guard(__destroy_vector(*this));
- if (__n > 0) {
- __vallocate(__n);
- __construct_at_end(__n);
- }
- __guard.__complete();
-}
-
-#if _LIBCPP_STD_VER >= 14
-template <class _Tp, class _Allocator>
-_LIBCPP_CONSTEXPR_SINCE_CXX20 vector<_Tp, _Allocator>::vector(size_type __n, const allocator_type& __a)
- : __end_cap_(nullptr, __a) {
- auto __guard = std::__make_exception_guard(__destroy_vector(*this));
- if (__n > 0) {
- __vallocate(__n);
- __construct_at_end(__n);
- }
- __guard.__complete();
-}
-#endif
-
-template <class _Tp, class _Allocator>
-_LIBCPP_CONSTEXPR_SINCE_CXX20 vector<_Tp, _Allocator>::vector(size_type __n, const value_type& __x) {
- auto __guard = std::__make_exception_guard(__destroy_vector(*this));
- if (__n > 0) {
- __vallocate(__n);
- __construct_at_end(__n, __x);
- }
- __guard.__complete();
-}
-
-template <class _Tp, class _Allocator>
template <class _InputIterator,
__enable_if_t<__has_exactly_input_iterator_category<_InputIterator>::value &&
is_constructible<_Tp, typename iterator_traits<_InputIterator>::reference>::value,
diff --git a/libcxx/modules/std/atomic.inc b/libcxx/modules/std/atomic.inc
index d77d7a5bb0fb..e8cf90d01258 100644
--- a/libcxx/modules/std/atomic.inc
+++ b/libcxx/modules/std/atomic.inc
@@ -22,7 +22,7 @@ export namespace std {
// [atomics.ref.generic], class template atomic_ref
// [atomics.ref.pointer], partial specialization for pointers
- // using std::atomic_ref _LIBCPP_USING_IF_EXISTS;
+ using std::atomic_ref _LIBCPP_USING_IF_EXISTS;
// [atomics.types.generic], class template atomic
using std::atomic _LIBCPP_USING_IF_EXISTS;
diff --git a/libcxx/src/chrono.cpp b/libcxx/src/chrono.cpp
index e7d6dfbc2292..83e8a64504ae 100644
--- a/libcxx/src/chrono.cpp
+++ b/libcxx/src/chrono.cpp
@@ -77,8 +77,8 @@ typedef void(WINAPI* GetSystemTimeAsFileTimePtr)(LPFILETIME);
class GetSystemTimeInit {
public:
GetSystemTimeInit() {
- fp =
- (GetSystemTimeAsFileTimePtr)GetProcAddress(GetModuleHandleW(L"kernel32.dll"), "GetSystemTimePreciseAsFileTime");
+ fp = (GetSystemTimeAsFileTimePtr)(void*)GetProcAddress(
+ GetModuleHandleW(L"kernel32.dll"), "GetSystemTimePreciseAsFileTime");
if (fp == nullptr)
fp = GetSystemTimeAsFileTime;
}
diff --git a/libcxx/src/locale.cpp b/libcxx/src/locale.cpp
index 1ca88e30f63a..c5ab6de5d657 100644
--- a/libcxx/src/locale.cpp
+++ b/libcxx/src/locale.cpp
@@ -102,8 +102,6 @@ inline constexpr size_t countof(const T* const begin, const T* const end) {
return static_cast<size_t>(end - begin);
}
-} // namespace
-
string build_name(const string& other, const string& one, locale::category c) {
if (other == "*" || one == "*")
return "*";
@@ -115,6 +113,8 @@ string build_name(const string& other, const string& one, locale::category c) {
return "*";
}
+} // namespace
+
const locale::category locale::none;
const locale::category locale::collate;
const locale::category locale::ctype;
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp
new file mode 100644
index 000000000000..066ed1191dd0
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp
@@ -0,0 +1,58 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// bool compare_exchange_strong(T& expected, T desired, memory_order success, memory_order failure) const noexcept;
+//
+// Preconditions: failure is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "atomic_helpers.h"
+#include "check_assertion.h"
+
+template <typename T>
+struct TestCompareExchangeStrongInvalidMemoryOrder {
+ void operator()() const {
+ { // no assertion should trigger here
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_strong(t, T(3), std::memory_order_relaxed, std::memory_order_relaxed);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_strong(t, T(3), std::memory_order_relaxed, std::memory_order_release);
+ }()),
+ "atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_strong(t, T(3), std::memory_order_relaxed, std::memory_order_acq_rel);
+ }()),
+ "atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid");
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestCompareExchangeStrongInvalidMemoryOrder>()();
+ return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp
new file mode 100644
index 000000000000..e83a143df3f0
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp
@@ -0,0 +1,58 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// bool compare_exchange_weak(T& expected, T desired, memory_order success, memory_order failure) const noexcept;
+//
+// Preconditions: failure is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "atomic_helpers.h"
+#include "check_assertion.h"
+
+template <typename T>
+struct TestCompareExchangeWeakInvalidMemoryOrder {
+ void operator()() const {
+ { // no assertion should trigger here
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_weak(t, T(3), std::memory_order_relaxed, std::memory_order_relaxed);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_weak(t, T(3), std::memory_order_relaxed, std::memory_order_release);
+ }()),
+ "atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_weak(t, T(3), std::memory_order_relaxed, std::memory_order_acq_rel);
+ }()),
+ "atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid");
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestCompareExchangeWeakInvalidMemoryOrder>()();
+ return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp
new file mode 100644
index 000000000000..ef3705d1db27
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp
@@ -0,0 +1,40 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+
+// <atomic>
+
+// atomic_ref(T& obj);
+//
+// Preconditions: The referenced object is aligned to required_alignment.
+
+#include <atomic>
+#include <cstddef>
+
+#include "check_assertion.h"
+
+int main(int, char**) {
+ { // no assertion should trigger here
+ alignas(float) std::byte c[sizeof(float)];
+ float* f = new (c) float(3.14f);
+ [[maybe_unused]] std::atomic_ref<float> r(*f);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ alignas(float) std::byte c[2 * sizeof(float)]; // intentionally larger
+ float* f = new (c + 1) float(3.14f); // intentionally misaligned
+ [[maybe_unused]] std::atomic_ref<float> r(*f);
+ }()),
+ "atomic_ref ctor: referenced object must be aligned to required_alignment");
+
+ return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp
new file mode 100644
index 000000000000..bc92b3dc3622
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp
@@ -0,0 +1,55 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// T load(memory_order order = memory_order::seq_cst) const noexcept;
+//
+// Preconditions: order is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "atomic_helpers.h"
+#include "check_assertion.h"
+
+template <typename T>
+struct TestLoadInvalidMemoryOrder {
+ void operator()() const {
+ { // no assertion should trigger here
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ (void)a.load(std::memory_order_relaxed);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ (void)a.load(std::memory_order_release);
+ }()),
+ "atomic_ref: memory order argument to atomic load operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ (void)a.load(std::memory_order_acq_rel);
+ }()),
+ "atomic_ref: memory order argument to atomic load operation is invalid");
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestLoadInvalidMemoryOrder>()();
+ return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp
new file mode 100644
index 000000000000..ab0d4a220c94
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp
@@ -0,0 +1,63 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// void store(T desired, memory_order order = memory_order::seq_cst) const noexcept;
+//
+// Preconditions: order is memory_order::relaxed, memory_order::release, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "atomic_helpers.h"
+#include "check_assertion.h"
+
+template <typename T>
+struct TestStoreInvalidMemoryOrder {
+ void operator()() const {
+ { // no assertion should trigger here
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.store(T(2), std::memory_order_relaxed);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.store(T(2), std::memory_order_consume);
+ }()),
+ "atomic_ref: memory order argument to atomic store operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.store(T(2), std::memory_order_acquire);
+ }()),
+ "atomic_ref: memory order argument to atomic store operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.store(T(2), std::memory_order_acq_rel);
+ }()),
+ "atomic_ref: memory order argument to atomic store operation is invalid");
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestStoreInvalidMemoryOrder>()();
+ return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp
new file mode 100644
index 000000000000..dcec2fb62854
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp
@@ -0,0 +1,55 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// void wait(T old, memory_order order = memory_order::seq_cst) const noexcept;
+//
+// Preconditions: order is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "atomic_helpers.h"
+#include "check_assertion.h"
+
+template <typename T>
+struct TestWaitInvalidMemoryOrder {
+ void operator()() const {
+ { // no assertion should trigger here
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.wait(T(2), std::memory_order_relaxed);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.wait(T(2), std::memory_order_release);
+ }()),
+ "atomic_ref: memory order argument to atomic wait operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.wait(T(2), std::memory_order_acq_rel);
+ }()),
+ "atomic_ref: memory order argument to atomic wait operation is invalid");
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestWaitInvalidMemoryOrder>()();
+ return 0;
+}
diff --git a/libcxx/test/libcxx/language.support/support.dynamic/libcpp_deallocate.sh.cpp b/libcxx/test/libcxx/language.support/support.dynamic/libcpp_deallocate.sh.cpp
index f94ceaf57dba..aa3ce210e363 100644
--- a/libcxx/test/libcxx/language.support/support.dynamic/libcpp_deallocate.sh.cpp
+++ b/libcxx/test/libcxx/language.support/support.dynamic/libcpp_deallocate.sh.cpp
@@ -21,6 +21,9 @@
// GCC doesn't support the aligned-allocation flags.
// XFAIL: gcc
+// TODO(mordante) fix this test after updating clang in Docker
+// UNSUPPORTED: clang-15, clang-16, clang-17, clang-18, clang-19
+
// RUN: %{build} -faligned-allocation -fsized-deallocation
// RUN: %{run}
// RUN: %{build} -faligned-allocation -fno-sized-deallocation -DNO_SIZE
diff --git a/libcxx/test/libcxx/lit.local.cfg b/libcxx/test/libcxx/lit.local.cfg
index 147367323d4a..4467d8070cc7 100644
--- a/libcxx/test/libcxx/lit.local.cfg
+++ b/libcxx/test/libcxx/lit.local.cfg
@@ -1,4 +1,5 @@
# The tests in this directory need to run Python
-import pipes, sys
+import shlex
+import sys
-config.substitutions.append(("%{python}", pipes.quote(sys.executable)))
+config.substitutions.append(("%{python}", shlex.quote(sys.executable)))
diff --git a/libcxx/test/std/algorithms/alg.modifying.operations/alg.fill/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.modifying.operations/alg.fill/pstl.exception_handling.pass.cpp
deleted file mode 100644
index dda642be85bc..000000000000
--- a/libcxx/test/std/algorithms/alg.modifying.operations/alg.fill/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::fill(ExecutionPolicy) and std::fill_n(ExecutionPolicy) terminate on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-#ifndef TEST_HAS_NO_EXCEPTIONS
-struct ThrowOnCopy {
- ThrowOnCopy& operator=(const ThrowOnCopy&) { throw int{}; }
-};
-#endif
-
-int main(int, char**) {
- ThrowOnCopy a[2]{};
- int b[2]{};
-
- test_execution_policies([&](auto&& policy) {
- // std::fill
- EXPECT_STD_TERMINATE([&] { (void)std::fill(policy, std::begin(a), std::end(a), ThrowOnCopy{}); });
- EXPECT_STD_TERMINATE([&] {
- try {
- (void)std::fill(
- policy, util::throw_on_move_iterator(std::begin(b), 1), util::throw_on_move_iterator(std::end(b), 1), 0);
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
-
- // std::fill_n
- EXPECT_STD_TERMINATE([&] { (void)std::fill_n(policy, std::begin(a), std::size(a), ThrowOnCopy{}); });
- EXPECT_STD_TERMINATE([&] {
- try {
- (void)std::fill_n(policy, util::throw_on_move_iterator(std::begin(b), 1), std::size(b), 0);
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/alg.modifying.operations/alg.move/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.modifying.operations/alg.move/pstl.exception_handling.pass.cpp
deleted file mode 100644
index bb8ab4217222..000000000000
--- a/libcxx/test/std/algorithms/alg.modifying.operations/alg.move/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::move(ExecutionPolicy) terminates on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([](auto&& policy) {
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- int b[] = {1, 2};
- (void)std::move(policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- util::throw_on_move_iterator(std::begin(b), 1));
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/alg.modifying.operations/alg.replace/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.modifying.operations/alg.replace/pstl.exception_handling.pass.cpp
deleted file mode 100644
index c02496bef421..000000000000
--- a/libcxx/test/std/algorithms/alg.modifying.operations/alg.replace/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,118 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::replace(ExecutionPolicy), std::replace_if(ExecutionPolicy), std::replace_copy(ExecutionPolicy)
-// and std::replace_copy_if(ExecutionPolicy) terminate on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-struct ThrowOnCompare {};
-
-#ifndef TEST_HAS_NO_EXCEPTIONS
-bool operator==(ThrowOnCompare, ThrowOnCompare) { throw int{}; }
-#endif
-
-int main(int, char**) {
- test_execution_policies([&](auto&& policy) {
- // std::replace
- EXPECT_STD_TERMINATE([&] {
- ThrowOnCompare a[2]{};
- (void)std::replace(policy, std::begin(a), std::end(a), ThrowOnCompare{}, ThrowOnCompare{});
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::replace(
- policy, util::throw_on_move_iterator(std::begin(a), 1), util::throw_on_move_iterator(std::end(a), 1), 1, 2);
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
-
- // std::replace_if
- EXPECT_STD_TERMINATE([&] {
- ThrowOnCompare a[2]{};
- (void)std::replace_if(
- policy, std::begin(a), std::end(a), [](ThrowOnCompare&) -> bool { throw int{}; }, ThrowOnCompare{});
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::replace_if(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- [](int) { return true; },
- 2);
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
-
- // std::replace_copy
- EXPECT_STD_TERMINATE([&] {
- ThrowOnCompare a[2]{};
- (void)std::replace_copy(policy, std::begin(a), std::end(a), std::begin(a), ThrowOnCompare{}, ThrowOnCompare{});
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::replace_copy(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- util::throw_on_move_iterator(std::begin(a), 1),
- 1,
- 2);
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
-
- // std::replace_copy_if
- EXPECT_STD_TERMINATE([&] {
- ThrowOnCompare a[2]{};
- (void)std::replace_copy_if(
- policy,
- std::begin(a),
- std::end(a),
- std::begin(a),
- [](ThrowOnCompare& i) { return i == i; },
- ThrowOnCompare{});
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::replace_copy_if(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- util::throw_on_move_iterator(std::begin(a), 1),
- [](int) { return true; },
- 2);
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/alg.modifying.operations/alg.rotate/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.modifying.operations/alg.rotate/pstl.exception_handling.pass.cpp
deleted file mode 100644
index 88d177a6e39f..000000000000
--- a/libcxx/test/std/algorithms/alg.modifying.operations/alg.rotate/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::find(ExecutionPolicy), std::find_if(ExecutionPolicy) and std::find_if_not(ExecutionPolicy) terminate
-// on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([](auto&& policy) {
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- int b[] = {1, 2};
- (void)std::rotate_copy(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- util::throw_on_move_iterator(std::begin(b), 1));
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/alg.modifying.operations/alg.transform/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.modifying.operations/alg.transform/pstl.exception_handling.pass.cpp
deleted file mode 100644
index 439204060e18..000000000000
--- a/libcxx/test/std/algorithms/alg.modifying.operations/alg.transform/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::transform(ExecutionPolicy) terminates on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([&](auto&& policy) {
- EXPECT_STD_TERMINATE([&] {
- int a[2]{};
- int b[2]{};
- int c[2]{};
- (void)std::transform(
- policy, std::begin(a), std::end(a), std::begin(b), std::begin(c), [](auto v, auto) -> decltype(v) {
- throw int{};
- });
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::transform(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- util::throw_on_move_iterator(std::begin(a), 1),
- [](int i) { return i; });
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
-
- EXPECT_STD_TERMINATE([&] {
- int a[2]{};
- int b[2]{};
- (void)std::transform(policy, std::begin(a), std::end(a), std::begin(b), [](auto v) -> decltype(v) {
- throw int{};
- });
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::transform(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::begin(a), 1),
- std::plus{});
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/alg.nonmodifying/alg.all_of/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.nonmodifying/alg.all_of/pstl.exception_handling.pass.cpp
deleted file mode 100644
index d1c031bdd97a..000000000000
--- a/libcxx/test/std/algorithms/alg.nonmodifying/alg.all_of/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::all_of(ExecutionPolicy) terminates on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([](auto&& policy) {
- EXPECT_STD_TERMINATE([&] {
- int a[] = {1, 2};
- (void)std::all_of(policy, std::begin(a), std::end(a), [](int i) -> bool { throw i; });
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::all_of(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- [](int) { return true; });
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/alg.nonmodifying/alg.any_of/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.nonmodifying/alg.any_of/pstl.exception_handling.pass.cpp
deleted file mode 100644
index 58fe79b34c00..000000000000
--- a/libcxx/test/std/algorithms/alg.nonmodifying/alg.any_of/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::any_of(ExecutionPolicy) terminates on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([](auto&& policy) {
- EXPECT_STD_TERMINATE([&] {
- int a[] = {1, 2};
- (void)std::any_of(policy, std::begin(a), std::end(a), [](int i) -> bool { throw i; });
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::any_of(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- [](int) { return true; });
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/alg.nonmodifying/alg.equal/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.nonmodifying/alg.equal/pstl.exception_handling.pass.cpp
deleted file mode 100644
index 1bcd858f3c02..000000000000
--- a/libcxx/test/std/algorithms/alg.nonmodifying/alg.equal/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::equal(ExecutionPolicy) terminates on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([](auto&& policy) {
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- int b[] = {1, 2};
- (void)std::equal(policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- util::throw_on_move_iterator(std::begin(b), 1));
- } catch (const util::iterator_error&) {
- assert(false);
- }
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- int b[] = {1, 2};
- (void)std::equal(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- util::throw_on_move_iterator(std::begin(b), 1),
- util::throw_on_move_iterator(std::end(b), 1));
- } catch (const util::iterator_error&) {
- assert(false);
- }
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/alg.nonmodifying/alg.find/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.nonmodifying/alg.find/pstl.exception_handling.pass.cpp
deleted file mode 100644
index b0ee4f8d062e..000000000000
--- a/libcxx/test/std/algorithms/alg.nonmodifying/alg.find/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,87 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::find(ExecutionPolicy), std::find_if(ExecutionPolicy) and std::find_if_not(ExecutionPolicy) terminate
-// on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-struct ThrowOnCompare {};
-
-#ifndef TEST_HAS_NO_EXCEPTIONS
-bool operator==(ThrowOnCompare, ThrowOnCompare) { throw int{}; }
-#endif
-
-int main(int, char**) {
- test_execution_policies([](auto&& policy) {
- // std::find
- EXPECT_STD_TERMINATE([&] {
- ThrowOnCompare a[2] = {};
- (void)std::find(policy, std::begin(a), std::end(a), ThrowOnCompare{});
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::find(
- policy, util::throw_on_move_iterator(std::begin(a), 1), util::throw_on_move_iterator(std::end(a), 1), 0);
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
-
- // std::find_if
- EXPECT_STD_TERMINATE([&] {
- int a[] = {1, 2};
- (void)std::find_if(policy, std::begin(a), std::end(a), [](int) -> bool { throw int{}; });
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::find_if(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- [](int) { return true; });
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
-
- // std::find_if_not
- EXPECT_STD_TERMINATE([&] {
- int a[] = {1, 2};
- (void)std::find_if_not(policy, std::begin(a), std::end(a), [](int) -> bool { throw int{}; });
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::find_if_not(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- [](int) { return true; });
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/alg.nonmodifying/alg.foreach/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.nonmodifying/alg.foreach/pstl.exception_handling.pass.cpp
deleted file mode 100644
index a63276f1e025..000000000000
--- a/libcxx/test/std/algorithms/alg.nonmodifying/alg.foreach/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::for_each(ExecutionPolicy) and std::for_each_n(ExecutionPolicy) terminate on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([](auto&& policy) {
- int a[] = {1, 2};
- // std::for_each
- EXPECT_STD_TERMINATE([&] { std::for_each(policy, std::begin(a), std::end(a), [](int) { throw int{}; }); });
- EXPECT_STD_TERMINATE([&] {
- try {
- (void)std::for_each(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- [](int) {});
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
-
- // std::for_each_n
- EXPECT_STD_TERMINATE([&] { std::for_each_n(policy, std::data(a), std::size(a), [](int) { throw int{}; }); });
- EXPECT_STD_TERMINATE([&] {
- try {
- (void)std::for_each_n(policy, util::throw_on_move_iterator(std::begin(a), 1), std::size(a), [](int) {});
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/alg.nonmodifying/alg.none_of/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.nonmodifying/alg.none_of/pstl.exception_handling.pass.cpp
deleted file mode 100644
index 26e6fea5904f..000000000000
--- a/libcxx/test/std/algorithms/alg.nonmodifying/alg.none_of/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::none_of(ExecutionPolicy) terminates on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([](auto&& policy) {
- EXPECT_STD_TERMINATE([&] {
- int a[] = {1, 2};
- (void)std::none_of(policy, std::begin(a), std::end(a), [](int i) -> bool { throw i; });
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::none_of(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- [](int) { return true; });
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/alg.sorting/alg.merge/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.sorting/alg.merge/pstl.exception_handling.pass.cpp
deleted file mode 100644
index b48a5a9fa2b7..000000000000
--- a/libcxx/test/std/algorithms/alg.sorting/alg.merge/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::merge(ExecutionPolicy) terminates on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([](auto&& policy) {
- EXPECT_STD_TERMINATE([&] {
- int a[] = {1, 2};
- std::merge(policy, std::begin(a), std::end(a), std::begin(a), std::end(a), std::begin(a), [](int, int) -> bool {
- throw int{};
- });
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::merge(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- util::throw_on_move_iterator(std::begin(a), 1),
- std::less{});
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-
- return 0;
-}
diff --git a/libcxx/test/std/algorithms/alg.sorting/alg.sort/stable.sort/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/alg.sorting/alg.sort/stable.sort/pstl.exception_handling.pass.cpp
deleted file mode 100644
index 1dc603cfaa55..000000000000
--- a/libcxx/test/std/algorithms/alg.sorting/alg.sort/stable.sort/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::stable_sort(ExecutionPolicy) terminates on user-thrown exceptions
-
-#include <algorithm>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([](auto&& policy) {
- EXPECT_STD_TERMINATE([&] {
- int a[] = {1, 2};
- std::stable_sort(policy, std::begin(a), std::end(a), [](int, int) -> bool { throw int{}; });
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::stable_sort(
- policy, util::throw_on_move_iterator(std::begin(a), 1), util::throw_on_move_iterator(std::end(a), 1));
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/numeric.ops/reduce/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/numeric.ops/reduce/pstl.exception_handling.pass.cpp
deleted file mode 100644
index d52889b1be14..000000000000
--- a/libcxx/test/std/algorithms/numeric.ops/reduce/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::reduce(ExecutionPolicy) terminates on user-thrown exceptions
-
-#include <numeric>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([&](auto&& policy) {
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::reduce(
- policy, util::throw_on_move_iterator(std::begin(a), 1), util::throw_on_move_iterator(std::end(a), 1));
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
-
- EXPECT_STD_TERMINATE([&] {
- int a[2]{};
- (void)std::reduce(policy, std::begin(a), std::end(a), 1, [](int, int) -> int { throw 1; });
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::reduce(
- policy, util::throw_on_move_iterator(std::begin(a), 1), util::throw_on_move_iterator(std::end(a), 1), 1);
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/numeric.ops/transform.reduce/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/numeric.ops/transform.reduce/pstl.exception_handling.pass.cpp
deleted file mode 100644
index 5ac04334f000..000000000000
--- a/libcxx/test/std/algorithms/numeric.ops/transform.reduce/pstl.exception_handling.pass.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14
-// UNSUPPORTED: no-exceptions
-// `check_assertion.h` requires Unix headers and regex support.
-// UNSUPPORTED: !has-unix-headers, no-localization
-
-// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-
-// check that std::reduce(ExecutionPolicy) terminates on user-thrown exceptions
-
-#include <numeric>
-
-#include "check_assertion.h"
-#include "test_execution_policies.h"
-#include "test_iterators.h"
-
-int main(int, char**) {
- test_execution_policies([&](auto&& policy) {
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::transform_reduce(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- util::throw_on_move_iterator(std::begin(a), 1),
- 1);
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
-
- EXPECT_STD_TERMINATE([&] {
- int a[2]{};
- (void)std::transform_reduce(
- policy, std::begin(a), std::end(a), 1, [](int, int) -> int { throw 1; }, [](int) -> int { return 0; });
- });
- EXPECT_STD_TERMINATE([&] {
- try {
- int a[] = {1, 2};
- (void)std::transform_reduce(
- policy,
- util::throw_on_move_iterator(std::begin(a), 1),
- util::throw_on_move_iterator(std::end(a), 1),
- 1,
- std::plus{},
- [](int) -> int { return 0; });
- } catch (const util::iterator_error&) {
- assert(false);
- }
- std::terminate(); // make the test pass in case the algorithm didn't move the iterator
- });
- });
-}
diff --git a/libcxx/test/std/algorithms/pstl.exception_handling.pass.cpp b/libcxx/test/std/algorithms/pstl.exception_handling.pass.cpp
new file mode 100644
index 000000000000..bedb2258d1fd
--- /dev/null
+++ b/libcxx/test/std/algorithms/pstl.exception_handling.pass.cpp
@@ -0,0 +1,339 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14
+// UNSUPPORTED: no-exceptions
+// `check_assertion.h` requires Unix headers and regex support.
+// UNSUPPORTED: !has-unix-headers, no-localization
+
+// UNSUPPORTED: libcpp-has-no-incomplete-pstl
+
+// <algorithm>
+// <numeric>
+//
+// Check that PSTL algorithms terminate on user-thrown exceptions.
+
+#include <algorithm>
+#include <numeric>
+
+#include "check_assertion.h"
+#include "test_execution_policies.h"
+#include "test_iterators.h"
+
+template <class F>
+void assert_non_throwing(F f) {
+ // We wrap this whole test in EXPECT_STD_TERMINATE because if f() terminates, we want the test to pass,
+ // since this signals proper handling of user exceptions in the PSTL.
+ EXPECT_STD_TERMINATE([&] {
+ bool threw = false;
+ try {
+ f();
+ } catch (...) {
+ threw = true;
+ }
+ // If nothing was thrown, call std::terminate() to pass the EXPECT_STD_TERMINATE assertion.
+ // Otherwise, don't call std::terminate() to fail the assertion.
+ if (!threw)
+ std::terminate();
+ });
+}
+
+struct ThrowToken {
+ void activate() { active_ = true; }
+ void deactivate() { active_ = false; }
+ bool active() const { return active_; }
+
+private:
+ bool active_{false};
+};
+
+template <class Func>
+struct on_scope_exit {
+ explicit on_scope_exit(Func func) : func_(func) {}
+ ~on_scope_exit() { func_(); }
+
+private:
+ Func func_;
+};
+template <class Func>
+on_scope_exit(Func) -> on_scope_exit<Func>;
+
+int main(int, char**) {
+ test_execution_policies([&](auto&& policy) {
+ int a[] = {1, 2, 3, 4};
+ int b[] = {1, 2, 3};
+ int n = 2;
+ int storage[999];
+ int val = 99;
+ int init = 1;
+
+ // We generate a certain number of "tokens" and we activate exactly one on each iteration. We then
+ // throw in a given operation only when that token is active. That way we check that each argument
+ // of the algorithm is handled properly.
+ ThrowToken tokens[7];
+ for (ThrowToken& t : tokens) {
+ t.activate();
+ on_scope_exit _([&] { t.deactivate(); });
+
+ auto first1 = util::throw_on_move_iterator(std::begin(a), tokens[0].active() ? 1 : -1);
+ auto last1 = util::throw_on_move_iterator(std::end(a), tokens[1].active() ? 1 : -1);
+ auto first2 = util::throw_on_move_iterator(std::begin(b), tokens[2].active() ? 1 : -1);
+ auto last2 = util::throw_on_move_iterator(std::end(b), tokens[3].active() ? 1 : -1);
+ auto dest = util::throw_on_move_iterator(std::end(storage), tokens[4].active() ? 1 : -1);
+ auto maybe_throw = [](ThrowToken const& token, auto f) {
+ return [&token, f](auto... args) {
+ if (token.active())
+ throw 1;
+ return f(args...);
+ };
+ };
+
+ {
+ auto pred = maybe_throw(tokens[5], [](int x) -> bool { return x % 2 == 0; });
+
+ // all_of(first, last, pred)
+ assert_non_throwing([=, &policy] { (void)std::all_of(policy, std::move(first1), std::move(last1), pred); });
+
+ // any_of(first, last, pred)
+ assert_non_throwing([=, &policy] { (void)std::any_of(policy, std::move(first1), std::move(last1), pred); });
+
+ // none_of(first, last, pred)
+ assert_non_throwing([=, &policy] { (void)std::none_of(policy, std::move(first1), std::move(last1), pred); });
+ }
+
+ {
+ // copy(first, last, dest)
+ assert_non_throwing([=, &policy] {
+ (void)std::copy(policy, std::move(first1), std::move(last1), std::move(dest));
+ });
+
+ // copy_n(first, n, dest)
+ assert_non_throwing([=, &policy] { (void)std::copy_n(policy, std::move(first1), n, std::move(dest)); });
+ }
+
+ {
+ auto pred = maybe_throw(tokens[5], [](int x) -> bool { return x % 2 == 0; });
+
+ // count(first, last, val)
+ assert_non_throwing([=, &policy] { (void)std::count(policy, std::move(first1), std::move(last1), val); });
+
+ // count_if(first, last, pred)
+ assert_non_throwing([=, &policy] { (void)std::count_if(policy, std::move(first1), std::move(last1), pred); });
+ }
+
+ {
+ auto binary_pred = maybe_throw(tokens[5], [](int x, int y) -> bool { return x == y; });
+
+ // equal(first1, last1, first2)
+ assert_non_throwing([=, &policy] {
+ (void)std::equal(policy, std::move(first1), std::move(last1), std::move(first2));
+ });
+
+ // equal(first1, last1, first2, binary_pred)
+ assert_non_throwing([=, &policy] {
+ (void)std::equal(policy, std::move(first1), std::move(last1), std::move(first2), binary_pred);
+ });
+
+ // equal(first1, last1, first2, last2)
+ assert_non_throwing([=, &policy] {
+ (void)std::equal(policy, std::move(first1), std::move(last1), std::move(first2), std::move(last2));
+ });
+
+ // equal(first1, last1, first2, last2, binary_pred)
+ assert_non_throwing([=, &policy] {
+ (void)std::equal(
+ policy, std::move(first1), std::move(last1), std::move(first2), std::move(last2), binary_pred);
+ });
+ }
+
+ {
+ // fill(first, last, val)
+ assert_non_throwing([=, &policy] { (void)std::fill(policy, std::move(first1), std::move(last1), val); });
+
+ // fill_n(first, n, val)
+ assert_non_throwing([=, &policy] { (void)std::fill_n(policy, std::move(first1), n, val); });
+ }
+
+ {
+ auto pred = maybe_throw(tokens[5], [](int x) -> bool { return x % 2 == 0; });
+
+ // find(first, last, val)
+ assert_non_throwing([=, &policy] { (void)std::find(policy, std::move(first1), std::move(last1), val); });
+
+ // find_if(first, last, pred)
+ assert_non_throwing([=, &policy] { (void)std::find_if(policy, std::move(first1), std::move(last1), pred); });
+
+ // find_if_not(first, last, pred)
+ assert_non_throwing([=, &policy] {
+ (void)std::find_if_not(policy, std::move(first1), std::move(last1), pred);
+ });
+ }
+
+ {
+ auto func = maybe_throw(tokens[5], [](int) {});
+
+ // for_each(first, last, func)
+ assert_non_throwing([=, &policy] { (void)std::for_each(policy, std::move(first1), std::move(last1), func); });
+
+ // for_each_n(first, n, func)
+ assert_non_throwing([=, &policy] { (void)std::for_each_n(policy, std::move(first1), n, func); });
+ }
+
+ {
+ auto gen = maybe_throw(tokens[5], []() -> int { return 42; });
+
+ // generate(first, last, func)
+ assert_non_throwing([=, &policy] { (void)std::generate(policy, std::move(first1), std::move(last1), gen); });
+
+ // generate_n(first, n, func)
+ assert_non_throwing([=, &policy] { (void)std::generate_n(policy, std::move(first1), n, gen); });
+ }
+
+ {
+ auto pred = maybe_throw(tokens[5], [](int x) -> bool { return x % 2 == 0; });
+
+ // is_partitioned(first, last, pred)
+ assert_non_throwing([=, &policy] {
+ (void)std::is_partitioned(policy, std::move(first1), std::move(last1), pred);
+ });
+ }
+
+ {
+ auto compare = maybe_throw(tokens[5], [](int x, int y) -> bool { return x < y; });
+
+ // merge(first1, last1, first2, last2, dest)
+ assert_non_throwing([=, &policy] {
+ (void)std::merge(
+ policy, std::move(first1), std::move(last1), std::move(first2), std::move(last2), std::move(dest));
+ });
+
+ // merge(first1, last1, first2, last2, dest, comp)
+ assert_non_throwing([=, &policy] {
+ (void)std::merge(
+ policy,
+ std::move(first1),
+ std::move(last1),
+ std::move(first2),
+ std::move(last2),
+ std::move(dest),
+ compare);
+ });
+ }
+
+ {
+ // move(first, last, dest)
+ assert_non_throwing([=, &policy] {
+ (void)std::move(policy, std::move(first1), std::move(last1), std::move(dest));
+ });
+ }
+
+ {
+ auto pred = maybe_throw(tokens[5], [](int x) -> bool { return x % 2 == 0; });
+
+ // replace_if(first, last, pred, val)
+ assert_non_throwing([=, &policy] {
+ (void)std::replace_if(policy, std::move(first1), std::move(last1), pred, val);
+ });
+
+ // replace(first, last, val1, val2)
+ assert_non_throwing([=, &policy] {
+ (void)std::replace(policy, std::move(first1), std::move(last1), val, val);
+ });
+
+ // replace_copy_if(first, last, dest, pred, val)
+ assert_non_throwing([=, &policy] {
+ (void)std::replace_copy_if(policy, std::move(first1), std::move(last1), std::move(dest), pred, val);
+ });
+
+ // replace_copy(first, last, dest, val1, val2)
+ assert_non_throwing([=, &policy] {
+ (void)std::replace_copy(policy, std::move(first1), std::move(last1), std::move(dest), val, val);
+ });
+ }
+
+ {
+ auto mid1 = util::throw_on_move_iterator(std::begin(a) + 2, tokens[5].active() ? 1 : -1);
+
+ // rotate_copy(first, mid, last, dest)
+ assert_non_throwing([=, &policy] {
+ (void)std::rotate_copy(policy, std::move(first1), std::move(mid1), std::move(last1), std::move(dest));
+ });
+ }
+
+ {
+ auto compare = maybe_throw(tokens[5], [](int x, int y) -> bool { return x < y; });
+
+ // sort(first, last)
+ assert_non_throwing([=, &policy] { (void)std::sort(policy, std::move(first1), std::move(last1)); });
+
+ // sort(first, last, comp)
+ assert_non_throwing([=, &policy] { (void)std::sort(policy, std::move(first1), std::move(last1), compare); });
+
+ // stable_sort(first, last)
+ assert_non_throwing([=, &policy] { (void)std::stable_sort(policy, std::move(first1), std::move(last1)); });
+
+ // stable_sort(first, last, comp)
+ assert_non_throwing([=, &policy] {
+ (void)std::stable_sort(policy, std::move(first1), std::move(last1), compare);
+ });
+ }
+
+ {
+ auto unary = maybe_throw(tokens[5], [](int x) -> int { return x * 2; });
+ auto binary = maybe_throw(tokens[5], [](int x, int y) -> int { return x * y; });
+
+ // transform(first, last, dest, func)
+ assert_non_throwing([=, &policy] {
+ (void)std::transform(policy, std::move(first1), std::move(last1), std::move(dest), unary);
+ });
+
+ // transform(first1, last1, first2, dest, func)
+ assert_non_throwing([=, &policy] {
+ (void)std::transform(policy, std::move(first1), std::move(last1), std::move(first2), std::move(dest), binary);
+ });
+ }
+
+ {
+ auto reduction = maybe_throw(tokens[5], [](int x, int y) -> int { return x + y; });
+ auto transform_unary = maybe_throw(tokens[6], [](int x) -> int { return x * 2; });
+ auto transform_binary = maybe_throw(tokens[6], [](int x, int y) -> int { return x * y; });
+
+ // transform_reduce(first1, last1, first2, init)
+ assert_non_throwing([=, &policy] {
+ (void)std::transform_reduce(policy, std::move(first1), std::move(last1), std::move(first2), init);
+ });
+
+ // transform_reduce(first1, last1, init, reduce, transform)
+ assert_non_throwing([=, &policy] {
+ (void)std::transform_reduce(policy, std::move(first1), std::move(last1), init, reduction, transform_unary);
+ });
+
+ // transform_reduce(first1, last1, first2, init, reduce, transform)
+ assert_non_throwing([=, &policy] {
+ (void)std::transform_reduce(
+ policy, std::move(first1), std::move(last1), std::move(first2), init, reduction, transform_binary);
+ });
+ }
+
+ {
+ auto reduction = maybe_throw(tokens[5], [](int x, int y) -> int { return x + y; });
+
+ // reduce(first, last)
+ assert_non_throwing([=, &policy] { (void)std::reduce(policy, std::move(first1), std::move(last1)); });
+
+ // reduce(first, last, init)
+ assert_non_throwing([=, &policy] { (void)std::reduce(policy, std::move(first1), std::move(last1), init); });
+
+ // reduce(first, last, init, binop)
+ assert_non_throwing([=, &policy] {
+ (void)std::reduce(policy, std::move(first1), std::move(last1), init, reduction);
+ });
+ }
+ }
+ });
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp
new file mode 100644
index 000000000000..3887211752c6
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp
@@ -0,0 +1,50 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// T operator=(T) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestAssign {
+ void operator()() const {
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a = T(2));
+ assert(y == T(2));
+ assert(x == T(2));
+
+ ASSERT_NOEXCEPT(a = T(0));
+ static_assert(std::is_nothrow_assignable_v<std::atomic_ref<T>, T>);
+
+ static_assert(!std::is_copy_assignable_v<std::atomic_ref<T>>);
+ }
+
+ {
+ auto assign = [](std::atomic_ref<T> const& y, T, T new_val) { y = new_val; };
+ auto load = [](std::atomic_ref<T> const& y) { return y.load(); };
+ test_seq_cst<T>(assign, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestAssign>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp
new file mode 100644
index 000000000000..2be1e9962880
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp
@@ -0,0 +1,60 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator&=(integral-type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_bitwise_and_assign = requires { std::declval<T const>() &= std::declval<T>(); };
+
+template <typename T>
+struct TestDoesNotHaveBitwiseAndAssign {
+ void operator()() const { static_assert(!has_bitwise_and_assign<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestBitwiseAndAssign {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a &= T(1));
+ assert(y == T(1));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(a &= T(0));
+
+ y = (a &= T(2));
+ assert(y == T(0));
+ assert(x == T(0));
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestBitwiseAndAssign>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveBitwiseAndAssign>()();
+
+ TestEachPointerType<TestDoesNotHaveBitwiseAndAssign>()();
+
+ TestDoesNotHaveBitwiseAndAssign<bool>()();
+ TestDoesNotHaveBitwiseAndAssign<UserAtomicType>()();
+ TestDoesNotHaveBitwiseAndAssign<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp
new file mode 100644
index 000000000000..5c22c8a2b2b6
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp
@@ -0,0 +1,56 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator|=(integral-type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_bitwise_or_assign = requires { std::declval<T const>() |= std::declval<T>(); };
+
+template < typename T>
+struct TestDoesNotHaveBitwiseOrAssign {
+ void operator()() const { static_assert(!has_bitwise_or_assign<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestBitwiseOrAssign {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a |= T(2));
+ assert(y == T(3));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a |= T(0));
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestBitwiseOrAssign>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveBitwiseOrAssign>()();
+
+ TestEachPointerType<TestDoesNotHaveBitwiseOrAssign>()();
+
+ TestDoesNotHaveBitwiseOrAssign<bool>()();
+ TestDoesNotHaveBitwiseOrAssign<UserAtomicType>()();
+ TestDoesNotHaveBitwiseOrAssign<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp
new file mode 100644
index 000000000000..4dc4fd307f58
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp
@@ -0,0 +1,56 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator|=(integral-type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_bitwise_xor_assign = requires { std::declval<T const>() ^= std::declval<T>(); };
+
+template <typename T>
+struct TestDoesNotHaveBitwiseXorAssign {
+ void operator()() const { static_assert(!has_bitwise_xor_assign<std::atomic_ref<float>>); }
+};
+
+template <typename T>
+struct TestBitwiseXorAssign {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a ^= T(2));
+ assert(y == T(3));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a ^= T(0));
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestBitwiseXorAssign>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveBitwiseXorAssign>()();
+
+ TestEachPointerType<TestDoesNotHaveBitwiseXorAssign>()();
+
+ TestDoesNotHaveBitwiseXorAssign<bool>()();
+ TestDoesNotHaveBitwiseXorAssign<UserAtomicType>()();
+ TestDoesNotHaveBitwiseXorAssign<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp b/libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp
new file mode 100644
index 000000000000..72b2f444c476
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp
@@ -0,0 +1,221 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// bool compare_exchange_strong(T&, T, memory_order, memory_order) const noexcept;
+// bool compare_exchange_strong(T&, T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestCompareExchangeStrong {
+ void operator()() const {
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y = a.compare_exchange_strong(t, T(2));
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_strong(t, T(3));
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_strong(t, T(2)));
+ }
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y = a.compare_exchange_strong(t, T(2), std::memory_order_seq_cst);
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_strong(t, T(3), std::memory_order_seq_cst);
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_strong(t, T(2), std::memory_order_seq_cst));
+ }
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y =
+ a.compare_exchange_strong(t, T(2), std::memory_order_release, std::memory_order_relaxed);
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_strong(t, T(3), std::memory_order_release, std::memory_order_relaxed);
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_strong(t, T(2), std::memory_order_release, std::memory_order_relaxed));
+ }
+
+ // success memory_order::release
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::release, std::memory_order::relaxed);
+ assert(r);
+ };
+
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::release);
+ assert(r);
+ };
+ test_acquire_release<T>(store_one_arg, load);
+ }
+
+ // success memory_order::acquire
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acquire, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acquire)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load_one_arg);
+ }
+
+ // success memory_order::acq_rel
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::acq_rel, std::memory_order::relaxed);
+ assert(r);
+ };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acq_rel, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::acq_rel);
+ assert(r);
+ };
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acq_rel)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store_one_arg, load_one_arg);
+ }
+
+ // success memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::seq_cst, std::memory_order::relaxed);
+ assert(r);
+ };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::seq_cst, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_seq_cst<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::seq_cst);
+ assert(r);
+ };
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::seq_cst)) {
+ }
+ return val;
+ };
+ test_seq_cst<T>(store_one_arg, load_one_arg);
+ }
+
+ // failure memory_order::acquire
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r =
+ x.compare_exchange_strong(unexpected, unexpected, std::memory_order::relaxed, std::memory_order::acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r = x.compare_exchange_strong(unexpected, unexpected, std::memory_order::acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load_one_arg);
+
+ // acq_rel replaced by acquire
+ auto load_one_arg_acq_rel = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r = x.compare_exchange_strong(unexpected, unexpected, std::memory_order::acq_rel);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load_one_arg_acq_rel);
+ }
+
+ // failure memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::seq_cst); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r =
+ x.compare_exchange_strong(unexpected, unexpected, std::memory_order::relaxed, std::memory_order::seq_cst);
+ assert(!r);
+ return result;
+ };
+ test_seq_cst<T>(store, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestCompareExchangeStrong>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp b/libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp
new file mode 100644
index 000000000000..5219a8e3714f
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp
@@ -0,0 +1,226 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// bool compare_exchange_weak(T&, T, memory_order, memory_order) const noexcept;
+// bool compare_exchange_weak(T&, T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <concepts>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestCompareExchangeWeak {
+ void operator()() const {
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y = a.compare_exchange_weak(t, T(2));
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_weak(t, T(3));
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_weak(t, T(2)));
+ }
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y = a.compare_exchange_weak(t, T(2), std::memory_order_seq_cst);
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_weak(t, T(3), std::memory_order_seq_cst);
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_weak(t, T(2), std::memory_order_seq_cst));
+ }
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y =
+ a.compare_exchange_weak(t, T(2), std::memory_order_release, std::memory_order_relaxed);
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_weak(t, T(3), std::memory_order_release, std::memory_order_relaxed);
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_weak(t, T(2), std::memory_order_release, std::memory_order_relaxed));
+ }
+
+ // success memory_order::release
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::release, std::memory_order::relaxed)) {
+ }
+ };
+
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::release)) {
+ }
+ };
+ test_acquire_release<T>(store_one_arg, load);
+ }
+
+ // success memory_order::acquire
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acquire, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acquire)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load_one_arg);
+ }
+
+ // success memory_order::acq_rel
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::acq_rel, std::memory_order::relaxed)) {
+ }
+ };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acq_rel, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::acq_rel)) {
+ }
+ };
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acq_rel)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store_one_arg, load_one_arg);
+ }
+
+ // success memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::seq_cst, std::memory_order::relaxed)) {
+ }
+ };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::seq_cst, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_seq_cst<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::seq_cst)) {
+ }
+ };
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::seq_cst)) {
+ }
+ return val;
+ };
+ test_seq_cst<T>(store_one_arg, load_one_arg);
+ }
+
+ // failure memory_order::acquire
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r =
+ x.compare_exchange_weak(unexpected, unexpected, std::memory_order::relaxed, std::memory_order::acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r = x.compare_exchange_weak(unexpected, unexpected, std::memory_order::acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load_one_arg);
+
+ // acq_rel replaced by acquire
+ auto load_one_arg_acq_rel = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r = x.compare_exchange_weak(unexpected, unexpected, std::memory_order::acq_rel);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load_one_arg_acq_rel);
+ }
+
+ // failure memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::seq_cst); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r =
+ x.compare_exchange_weak(unexpected, unexpected, std::memory_order::relaxed, std::memory_order::seq_cst);
+ assert(!r);
+ return result;
+ };
+ test_seq_cst<T>(store, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestCompareExchangeWeak>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/convert.pass.cpp b/libcxx/test/std/atomics/atomics.ref/convert.pass.cpp
new file mode 100644
index 000000000000..2a58a5ea6ae2
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/convert.pass.cpp
@@ -0,0 +1,45 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// operator T() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestConvert {
+ void operator()() const {
+ T x(T(1));
+
+ T copy = x;
+ std::atomic_ref<T> const a(copy);
+
+ T converted = a;
+ assert(converted == x);
+
+ ASSERT_NOEXCEPT(T(a));
+ static_assert(std::is_nothrow_convertible_v<std::atomic_ref<T>, T>);
+
+ auto store = [](std::atomic_ref<T> const& y, T, T new_val) { y.store(new_val); };
+ auto load = [](std::atomic_ref<T> const& y) { return static_cast<T>(y); };
+ test_seq_cst<T>(store, load);
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestConvert>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp b/libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp
new file mode 100644
index 000000000000..d6c647406abf
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp
@@ -0,0 +1,37 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <atomic>
+
+// explicit atomic_ref(T&);
+
+#include <atomic>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestCtor {
+ void operator()() const {
+ // check that the constructor is explicit
+ static_assert(!std::is_convertible_v<T, std::atomic_ref<T>>);
+ static_assert(std::is_constructible_v<std::atomic_ref<T>, T&>);
+
+ T x(T(0));
+ std::atomic_ref<T> a(x);
+ (void)a;
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestCtor>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp b/libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp
new file mode 100644
index 000000000000..24a399ac4711
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp
@@ -0,0 +1,33 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <atomic>
+
+// explicit atomic_ref(T&);
+
+#include <atomic>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestDeduction {
+ void operator()() const {
+ T x(T(0));
+ std::atomic_ref a(x);
+ ASSERT_SAME_TYPE(decltype(a), std::atomic_ref<T>);
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestDeduction>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp b/libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp
new file mode 100644
index 000000000000..cd998d46b7e8
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp
@@ -0,0 +1,45 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// T exchange(T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestExchange {
+ void operator()() const {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.exchange(T(2));
+ assert(y == T(1));
+ ASSERT_NOEXCEPT(a.exchange(T(2)));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.exchange(T(3), std::memory_order_seq_cst);
+ assert(y == T(2));
+ ASSERT_NOEXCEPT(a.exchange(T(3), std::memory_order_seq_cst));
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestExchange>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp
new file mode 100644
index 000000000000..908a6879bd06
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp
@@ -0,0 +1,113 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type fetch_add(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+// floating-point-type fetch_add(floating-point-type, memory_order = memory_order::seq_cst) const noexcept;
+// T* fetch_add(difference_type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_add = requires {
+ std::declval<T const>().fetch_add(std::declval<T>());
+ std::declval<T const>().fetch_add(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+template <typename T>
+struct TestDoesNotHaveFetchAdd {
+ void operator()() const { static_assert(!has_fetch_add<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestFetchAdd {
+ void operator()() const {
+ if constexpr (std::is_arithmetic_v<T>) {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_add(T(2));
+ assert(y == T(1));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.fetch_add(T(0)));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_add(T(4), std::memory_order_relaxed);
+ assert(y == T(3));
+ assert(x == T(7));
+ ASSERT_NOEXCEPT(a.fetch_add(T(0), std::memory_order_relaxed));
+ }
+ } else if constexpr (std::is_pointer_v<T>) {
+ using U = std::remove_pointer_t<T>;
+ U t[9] = {};
+ T p{&t[1]};
+ std::atomic_ref<T> const a(p);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_add(2);
+ assert(y == &t[1]);
+ assert(a == &t[3]);
+ ASSERT_NOEXCEPT(a.fetch_add(0));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_add(4, std::memory_order_relaxed);
+ assert(y == &t[3]);
+ assert(a == &t[7]);
+ ASSERT_NOEXCEPT(a.fetch_add(0, std::memory_order_relaxed));
+ }
+ } else {
+ static_assert(std::is_void_v<T>);
+ }
+
+ // memory_order::release
+ {
+ auto fetch_add = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ x.fetch_add(new_val - old_val, std::memory_order::release);
+ };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T>(fetch_add, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto fetch_add_no_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) { x.fetch_add(new_val - old_val); };
+ auto fetch_add_with_order = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ x.fetch_add(new_val - old_val, std::memory_order::seq_cst);
+ };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(); };
+ test_seq_cst<T>(fetch_add_no_arg, load);
+ test_seq_cst<T>(fetch_add_with_order, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestFetchAdd>()();
+
+ TestFetchAdd<float>()();
+ TestFetchAdd<double>()();
+
+ TestEachPointerType<TestFetchAdd>()();
+
+ TestDoesNotHaveFetchAdd<bool>()();
+ TestDoesNotHaveFetchAdd<UserAtomicType>()();
+ TestDoesNotHaveFetchAdd<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp
new file mode 100644
index 000000000000..8f0bec21fe72
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp
@@ -0,0 +1,69 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type fetch_and(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_and = requires {
+ std::declval<T const>().fetch_and(std::declval<T>());
+ std::declval<T const>().fetch_and(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+template <typename T>
+struct TestDoesNotHaveFetchAnd {
+ void operator()() const { static_assert(!has_fetch_and<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestFetchAnd {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_and(T(2));
+ assert(y == T(1));
+ assert(x == T(0));
+ ASSERT_NOEXCEPT(a.fetch_and(T(0)));
+ }
+
+ x = T(1);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_and(T(2), std::memory_order_relaxed);
+ assert(y == T(1));
+ assert(x == T(0));
+ ASSERT_NOEXCEPT(a.fetch_and(T(0), std::memory_order_relaxed));
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestFetchAnd>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveFetchAnd>()();
+
+ TestEachPointerType<TestDoesNotHaveFetchAnd>()();
+
+ TestDoesNotHaveFetchAnd<bool>()();
+ TestDoesNotHaveFetchAnd<UserAtomicType>()();
+ TestDoesNotHaveFetchAnd<LargeUserAtomicType>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp
new file mode 100644
index 000000000000..2045868fde42
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp
@@ -0,0 +1,68 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type fetch_or(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <concepts>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_or = requires {
+ std::declval<T const>().fetch_or(std::declval<T>());
+ std::declval<T const>().fetch_or(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+template <typename T>
+struct TestDoesNotHaveFetchOr {
+ void operator()() const { static_assert(!has_fetch_or<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestFetchOr {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_or(T(2));
+ assert(y == T(1));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.fetch_or(T(0)));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_or(T(2), std::memory_order_relaxed);
+ assert(y == T(3));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.fetch_or(T(0), std::memory_order_relaxed));
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestFetchOr>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveFetchOr>()();
+
+ TestEachPointerType<TestDoesNotHaveFetchOr>()();
+
+ TestDoesNotHaveFetchOr<bool>()();
+ TestDoesNotHaveFetchOr<UserAtomicType>()();
+ TestDoesNotHaveFetchOr<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp
new file mode 100644
index 000000000000..545604530ada
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp
@@ -0,0 +1,113 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type fetch_sub(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+// floating-point-type fetch_sub(floating-point-type, memory_order = memory_order::seq_cst) const noexcept;
+// T* fetch_sub(difference_type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_sub = requires {
+ std::declval<T const>().fetch_sub(std::declval<T>());
+ std::declval<T const>().fetch_sub(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+template <typename T>
+struct TestDoesNotHaveFetchSub {
+ void operator()() const { static_assert(!has_fetch_sub<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestFetchSub {
+ void operator()() const {
+ if constexpr (std::is_arithmetic_v<T>) {
+ T x(T(7));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_sub(T(4));
+ assert(y == T(7));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.fetch_sub(T(0)));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_sub(T(2), std::memory_order_relaxed);
+ assert(y == T(3));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(a.fetch_sub(T(0), std::memory_order_relaxed));
+ }
+ } else if constexpr (std::is_pointer_v<T>) {
+ using U = std::remove_pointer_t<T>;
+ U t[9] = {};
+ T p{&t[7]};
+ std::atomic_ref<T> const a(p);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_sub(4);
+ assert(y == &t[7]);
+ assert(a == &t[3]);
+ ASSERT_NOEXCEPT(a.fetch_sub(0));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_sub(2, std::memory_order_relaxed);
+ assert(y == &t[3]);
+ assert(a == &t[1]);
+ ASSERT_NOEXCEPT(a.fetch_sub(0, std::memory_order_relaxed));
+ }
+ } else {
+ static_assert(std::is_void_v<T>);
+ }
+
+ // memory_order::release
+ {
+ auto fetch_sub = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ x.fetch_sub(old_val - new_val, std::memory_order::release);
+ };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T>(fetch_sub, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto fetch_sub_no_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) { x.fetch_sub(old_val - new_val); };
+ auto fetch_sub_with_order = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ x.fetch_sub(old_val - new_val, std::memory_order::seq_cst);
+ };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(); };
+ test_seq_cst<T>(fetch_sub_no_arg, load);
+ test_seq_cst<T>(fetch_sub_with_order, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestFetchSub>()();
+
+ TestFetchSub<float>()();
+ TestFetchSub<double>()();
+
+ TestEachPointerType<TestFetchSub>()();
+
+ TestDoesNotHaveFetchSub<bool>()();
+ TestDoesNotHaveFetchSub<UserAtomicType>()();
+ TestDoesNotHaveFetchSub<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp
new file mode 100644
index 000000000000..aade87f961f1
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp
@@ -0,0 +1,68 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type fetch_xor(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <concepts>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_xor = requires {
+ std::declval<T const>().fetch_xor(std::declval<T>());
+ std::declval<T const>().fetch_xor(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+template <typename T>
+struct TestDoesNotHaveFetchXor {
+ void operator()() const { static_assert(!has_fetch_xor<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestFetchXor {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_xor(T(2));
+ assert(y == T(1));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.fetch_xor(T(0)));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_xor(T(2), std::memory_order_relaxed);
+ assert(y == T(3));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(a.fetch_xor(T(0), std::memory_order_relaxed));
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestFetchXor>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveFetchXor>()();
+
+ TestEachPointerType<TestDoesNotHaveFetchXor>()();
+
+ TestDoesNotHaveFetchXor<bool>()();
+ TestDoesNotHaveFetchXor<UserAtomicType>()();
+ TestDoesNotHaveFetchXor<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp b/libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp
new file mode 100644
index 000000000000..c84c89b4d2b4
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp
@@ -0,0 +1,97 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator++(int) const noexcept;
+// integral-type operator--(int) const noexcept;
+// integral-type operator++() const noexcept;
+// integral-type operator--() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_pre_increment_operator = requires { ++std::declval<T const>(); };
+
+template <typename T>
+concept has_post_increment_operator = requires { std::declval<T const>()++; };
+
+template <typename T>
+concept has_pre_decrement_operator = requires { --std::declval<T const>(); };
+
+template <typename T>
+concept has_post_decrement_operator = requires { std::declval<T const>()--; };
+
+template <typename T>
+constexpr bool does_not_have_increment_nor_decrement_operators() {
+ return !has_pre_increment_operator<T> && !has_pre_decrement_operator<T> && !has_post_increment_operator<T> &&
+ !has_post_decrement_operator<T>;
+}
+
+template <typename T>
+struct TestDoesNotHaveIncrementDecrement {
+ void operator()() const { static_assert(does_not_have_increment_nor_decrement_operators<T>()); }
+};
+
+template <typename T>
+struct TestIncrementDecrement {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = ++a;
+ assert(y == T(2));
+ assert(x == T(2));
+ ASSERT_NOEXCEPT(++a);
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = --a;
+ assert(y == T(1));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(--a);
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a++;
+ assert(y == T(1));
+ assert(x == T(2));
+ ASSERT_NOEXCEPT(a++);
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a--;
+ assert(y == T(2));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(a--);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestIncrementDecrement>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveIncrementDecrement>()();
+
+ TestEachPointerType<TestDoesNotHaveIncrementDecrement>()();
+
+ TestDoesNotHaveIncrementDecrement<bool>()();
+ TestDoesNotHaveIncrementDecrement<UserAtomicType>()();
+ TestDoesNotHaveIncrementDecrement<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp b/libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp
new file mode 100644
index 000000000000..94f65e3b4b66
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp
@@ -0,0 +1,71 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <atomic>
+
+// static constexpr bool is_always_lock_free;
+// bool is_lock_free() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+
+#include "test_macros.h"
+
+template <typename T>
+void check_always_lock_free(std::atomic_ref<T> const a) {
+ std::same_as<const bool> decltype(auto) is_always_lock_free = std::atomic_ref<T>::is_always_lock_free;
+ if (is_always_lock_free) {
+ std::same_as<bool> decltype(auto) is_lock_free = a.is_lock_free();
+ assert(is_lock_free);
+ }
+ ASSERT_NOEXCEPT(a.is_lock_free());
+}
+
+#define CHECK_ALWAYS_LOCK_FREE(T) \
+ do { \
+ typedef T type; \
+ type obj{}; \
+ check_always_lock_free(std::atomic_ref<type>(obj)); \
+ } while (0)
+
+void test() {
+ int i = 0;
+ check_always_lock_free(std::atomic_ref<int>(i));
+
+ float f = 0.f;
+ check_always_lock_free(std::atomic_ref<float>(f));
+
+ int* p = &i;
+ check_always_lock_free(std::atomic_ref<int*>(p));
+
+ CHECK_ALWAYS_LOCK_FREE(struct Empty{});
+ CHECK_ALWAYS_LOCK_FREE(struct OneInt { int i; });
+ CHECK_ALWAYS_LOCK_FREE(struct IntArr2 { int i[2]; });
+ CHECK_ALWAYS_LOCK_FREE(struct FloatArr3 { float i[3]; });
+ CHECK_ALWAYS_LOCK_FREE(struct LLIArr2 { long long int i[2]; });
+ CHECK_ALWAYS_LOCK_FREE(struct LLIArr4 { long long int i[4]; });
+ CHECK_ALWAYS_LOCK_FREE(struct LLIArr8 { long long int i[8]; });
+ CHECK_ALWAYS_LOCK_FREE(struct LLIArr16 { long long int i[16]; });
+ CHECK_ALWAYS_LOCK_FREE(struct Padding {
+ char c; /* padding */
+ long long int i;
+ });
+ CHECK_ALWAYS_LOCK_FREE(union IntFloat {
+ int i;
+ float f;
+ });
+ CHECK_ALWAYS_LOCK_FREE(enum class CharEnumClass : char{foo});
+}
+
+int main(int, char**) {
+ test();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/load.pass.cpp b/libcxx/test/std/atomics/atomics.ref/load.pass.cpp
new file mode 100644
index 000000000000..feed0fbaed84
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/load.pass.cpp
@@ -0,0 +1,62 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// T load(memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <concepts>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestLoad {
+ void operator()() const {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.load();
+ assert(y == T(1));
+ ASSERT_NOEXCEPT(a.load());
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.load(std::memory_order_seq_cst);
+ assert(y == T(1));
+ ASSERT_NOEXCEPT(a.load(std::memory_order_seq_cst));
+ }
+
+ // memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& y, T, T new_val) { y.store(new_val); };
+ auto load_no_arg = [](std::atomic_ref<T> const& y) { return y.load(); };
+ auto load_with_order = [](std::atomic_ref<T> const& y) { return y.load(std::memory_order::seq_cst); };
+ test_seq_cst<T>(store, load_no_arg);
+ test_seq_cst<T>(store, load_with_order);
+ }
+
+ // memory_order::release
+ {
+ auto store = [](std::atomic_ref<T> const& y, T, T new_val) { y.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& y) { return y.load(std::memory_order::acquire); };
+ test_acquire_release<T>(store, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestLoad>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp b/libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp
new file mode 100644
index 000000000000..d4e2f0126d62
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp
@@ -0,0 +1,132 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20
+
+// <atomic>
+
+// template <class T>
+// struct atomic_ref
+// {
+// using value_type = T;
+// using difference_type = value_type; // only for atomic_ref<Integral> and
+// // atomic_ref<Floating> specializations
+// using difference_type = std::ptrdiff_t; // only for atomic_ref<T*> specializations
+//
+// explicit atomic_ref(T&);
+// atomic_ref(const atomic_ref&) noexcept;
+// atomic_ref& operator=(const atomic_ref&) = delete;
+// };
+
+#include <atomic>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <class T>
+concept has_difference_type = requires { typename T::difference_type; };
+
+template <class T>
+void check_member_types() {
+ if constexpr ((std::is_integral_v<T> && !std::is_same_v<T, bool>) || std::is_floating_point_v<T>) {
+ ASSERT_SAME_TYPE(typename std::atomic_ref<T>::value_type, T);
+ ASSERT_SAME_TYPE(typename std::atomic_ref<T>::difference_type, T);
+ } else if constexpr (std::is_pointer_v<T>) {
+ ASSERT_SAME_TYPE(typename std::atomic_ref<T>::value_type, T);
+ ASSERT_SAME_TYPE(typename std::atomic_ref<T>::difference_type, std::ptrdiff_t);
+ } else {
+ ASSERT_SAME_TYPE(typename std::atomic_ref<T>::value_type, T);
+ static_assert(!has_difference_type<std::atomic_ref<T>>);
+ }
+}
+
+template <class T>
+void test() {
+ // value_type and difference_type (except for primary template)
+ check_member_types<T>();
+
+ static_assert(std::is_nothrow_copy_constructible_v<std::atomic_ref<T>>);
+
+ static_assert(!std::is_copy_assignable_v<std::atomic_ref<T>>);
+
+ // explicit constructor
+ static_assert(!std::is_convertible_v<T, std::atomic_ref<T>>);
+ static_assert(std::is_constructible_v<std::atomic_ref<T>, T&>);
+}
+
+void testall() {
+ // Primary template
+ struct Empty {};
+ test<Empty>();
+ struct Trivial {
+ int a;
+ float b;
+ };
+ test<Trivial>();
+ test<bool>();
+
+ // Partial specialization for pointer types
+ test<void*>();
+
+ // Specialization for integral types
+ // + character types
+ test<char>();
+ test<char8_t>();
+ test<char16_t>();
+ test<char32_t>();
+ test<wchar_t>();
+ // + standard signed integer types
+ test<signed char>();
+ test<short>();
+ test<int>();
+ test<long>();
+ test<long long>();
+ // + standard unsigned integer types
+ test<unsigned char>();
+ test<unsigned short>();
+ test<unsigned int>();
+ test<unsigned long>();
+ test<unsigned long long>();
+ // + any other types needed by the typedefs in the header <cstdint>
+ test<int8_t>();
+ test<int16_t>();
+ test<int32_t>();
+ test<int64_t>();
+ test<int_fast8_t>();
+ test<int_fast16_t>();
+ test<int_fast32_t>();
+ test<int_fast64_t>();
+ test<int_least8_t>();
+ test<int_least16_t>();
+ test<int_least32_t>();
+ test<int_least64_t>();
+ test<intmax_t>();
+ test<intptr_t>();
+ test<uint8_t>();
+ test<uint16_t>();
+ test<uint32_t>();
+ test<uint64_t>();
+ test<uint_fast8_t>();
+ test<uint_fast16_t>();
+ test<uint_fast32_t>();
+ test<uint_fast64_t>();
+ test<uint_least8_t>();
+ test<uint_least16_t>();
+ test<uint_least32_t>();
+ test<uint_least64_t>();
+ test<uintmax_t>();
+ test<uintptr_t>();
+
+ // Specialization for floating-point types
+ // + floating-point types
+ test<float>();
+ test<double>();
+ test<long double>();
+ // + TODO extended floating-point types
+}
+
+int main(int, char**) { return 0; }
diff --git a/libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp b/libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp
new file mode 100644
index 000000000000..382b19f8c1d7
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp
@@ -0,0 +1,78 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: no-threads
+// XFAIL: availability-synchronization_library-missing
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// void notify_all() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <thread>
+#include <type_traits>
+#include <vector>
+
+#include "atomic_helpers.h"
+#include "make_test_thread.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestNotifyAll {
+ void operator()() const {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ bool done = false;
+ std::atomic<int> started_num = 0;
+ std::atomic<int> wait_done_num = 0;
+
+ constexpr auto number_of_threads = 8;
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+
+ for (auto j = 0; j < number_of_threads; ++j) {
+ threads.push_back(support::make_test_thread([&a, &started_num, &done, &wait_done_num] {
+ started_num.fetch_add(1, std::memory_order::relaxed);
+
+ a.wait(T(1));
+ wait_done_num.fetch_add(1, std::memory_order::relaxed);
+
+ // likely to fail if wait did not block
+ assert(done);
+ }));
+ }
+
+ while (started_num.load(std::memory_order::relaxed) != number_of_threads) {
+ std::this_thread::yield();
+ }
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+
+ done = true;
+ a.store(T(3));
+ a.notify_all();
+
+ // notify_all should unblock all the threads so that the loop below won't stuck
+ while (wait_done_num.load(std::memory_order::relaxed) != number_of_threads) {
+ std::this_thread::yield();
+ }
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+
+ ASSERT_NOEXCEPT(a.notify_all());
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestNotifyAll>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp b/libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp
new file mode 100644
index 000000000000..611e67417e4d
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp
@@ -0,0 +1,46 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: no-threads
+// XFAIL: availability-synchronization_library-missing
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// void notify_one() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <thread>
+#include <type_traits>
+#include <vector>
+
+#include "atomic_helpers.h"
+#include "make_test_thread.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestNotifyOne {
+ void operator()() const {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::thread t = support::make_test_thread([&]() {
+ a.store(T(3));
+ a.notify_one();
+ });
+ a.wait(T(1));
+ assert(a.load() == T(3));
+ t.join();
+ ASSERT_NOEXCEPT(a.notify_one());
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestNotifyOne>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp b/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp
new file mode 100644
index 000000000000..571d626035fa
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp
@@ -0,0 +1,79 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator-=(integral-type) const noexcept;
+// floating-point-type operator-=(floating-point-type) const noexcept;
+// T* operator-=(difference_type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_operator_minus_equals = requires { std::declval<T const>() -= std::declval<T>(); };
+
+template <typename T>
+struct TestDoesNotHaveOperatorMinusEquals {
+ void operator()() const { static_assert(!has_operator_minus_equals<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestOperatorMinusEquals {
+ void operator()() const {
+ if constexpr (std::is_arithmetic_v<T>) {
+ T x(T(3));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a -= T(2));
+ assert(y == T(1));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(a -= T(0));
+ } else if constexpr (std::is_pointer_v<T>) {
+ using U = std::remove_pointer_t<T>;
+ U t[9] = {};
+ T p{&t[3]};
+ std::atomic_ref<T> const a(p);
+
+ std::same_as<T> decltype(auto) y = (a -= 2);
+ assert(y == &t[1]);
+ assert(a == &t[1]);
+ ASSERT_NOEXCEPT(a -= 0);
+ } else {
+ static_assert(std::is_void_v<T>);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto minus_equals = [](std::atomic_ref<T> const& x, T old_val, T new_val) { x -= (old_val - new_val); };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(); };
+ test_seq_cst<T>(minus_equals, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestOperatorMinusEquals>()();
+
+ TestOperatorMinusEquals<float>()();
+ TestOperatorMinusEquals<double>()();
+
+ TestEachPointerType<TestOperatorMinusEquals>()();
+
+ TestDoesNotHaveOperatorMinusEquals<bool>()();
+ TestDoesNotHaveOperatorMinusEquals<UserAtomicType>()();
+ TestDoesNotHaveOperatorMinusEquals<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp b/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp
new file mode 100644
index 000000000000..de48ea56f57f
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp
@@ -0,0 +1,79 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator+=(integral-type) const noexcept;
+// floating-point-type operator+=(floating-point-type) const noexcept;
+// T* operator+=(difference_type) const noexcept;
+
+#include <atomic>
+#include <concepts>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_operator_plus_equals = requires { std::declval<T const>() += std::declval<T>(); };
+
+template <typename T>
+struct TestDoesNotHaveOperatorPlusEquals {
+ void operator()() const { static_assert(!has_operator_plus_equals<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestOperatorPlusEquals {
+ void operator()() const {
+ if constexpr (std::is_arithmetic_v<T>) {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a += T(2));
+ assert(y == T(3));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a += T(0));
+ } else if constexpr (std::is_pointer_v<T>) {
+ using U = std::remove_pointer_t<T>;
+ U t[9] = {};
+ T p{&t[1]};
+ std::atomic_ref<T> const a(p);
+
+ std::same_as<T> decltype(auto) y = (a += 2);
+ assert(y == &t[3]);
+ assert(a == &t[3]);
+ ASSERT_NOEXCEPT(a += 0);
+ } else {
+ static_assert(std::is_void_v<T>);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto plus_equals = [](std::atomic_ref<T> const& x, T old_val, T new_val) { x += (new_val - old_val); };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(); };
+ test_seq_cst<T>(plus_equals, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestOperatorPlusEquals>()();
+
+ TestOperatorPlusEquals<float>()();
+ TestOperatorPlusEquals<double>()();
+
+ TestEachPointerType<TestOperatorPlusEquals>()();
+
+ TestDoesNotHaveOperatorPlusEquals<bool>()();
+ TestDoesNotHaveOperatorPlusEquals<UserAtomicType>()();
+ TestDoesNotHaveOperatorPlusEquals<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp b/libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp
new file mode 100644
index 000000000000..86e0cba4dbf0
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp
@@ -0,0 +1,39 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// static constexpr size_t required_alignment;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+
+template <typename T>
+constexpr void check_required_alignment() {
+ std::same_as<const std::size_t> decltype(auto) required_alignment = std::atomic_ref<T>::required_alignment;
+ assert(required_alignment >= alignof(T));
+}
+
+constexpr bool test() {
+ check_required_alignment<int>();
+ check_required_alignment<float>();
+ check_required_alignment<int*>();
+ struct Empty {};
+ check_required_alignment<Empty>();
+ struct Trivial {
+ int a;
+ };
+ check_required_alignment<Trivial>();
+ return true;
+}
+
+int main(int, char**) {
+ test();
+ static_assert(test());
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/requires-trivially-copyable.verify.cpp b/libcxx/test/std/atomics/atomics.ref/requires-trivially-copyable.verify.cpp
new file mode 100644
index 000000000000..9a8b036ffd1f
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/requires-trivially-copyable.verify.cpp
@@ -0,0 +1,26 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <atomic_ref>
+
+// template<class T>
+// class atomic_ref;
+
+// The program is ill-formed if is_trivially_copyable_v<T> is false.
+
+#include <atomic>
+
+void trivially_copyable() {
+ struct X {
+ X() = default;
+ X(X const&) {} // -> not trivially copyable
+ } x;
+ // expected-error-re@*:* {{static assertion failed {{.*}}atomic_ref<T> requires that 'T' be a trivially copyable type}}
+ std::atomic_ref<X> r(x);
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/store.pass.cpp b/libcxx/test/std/atomics/atomics.ref/store.pass.cpp
new file mode 100644
index 000000000000..ea01a3d02a34
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/store.pass.cpp
@@ -0,0 +1,61 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// void store(T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestStore {
+ void operator()() const {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ a.store(T(2));
+ assert(x == T(2));
+ ASSERT_NOEXCEPT(a.store(T(1)));
+
+ a.store(T(3), std::memory_order_seq_cst);
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.store(T(0), std::memory_order_seq_cst));
+
+ // TODO memory_order::relaxed
+
+ // memory_order::seq_cst
+ {
+ auto store_no_arg = [](std::atomic_ref<T> const& y, T, T new_val) { y.store(new_val); };
+ auto store_with_order = [](std::atomic_ref<T> const& y, T, T new_val) {
+ y.store(new_val, std::memory_order::seq_cst);
+ };
+ auto load = [](std::atomic_ref<T> const& y) { return y.load(); };
+ test_seq_cst<T>(store_no_arg, load);
+ test_seq_cst<T>(store_with_order, load);
+ }
+
+ // memory_order::release
+ {
+ auto store = [](std::atomic_ref<T> const& y, T, T new_val) { y.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& y) { return y.load(std::memory_order::acquire); };
+ test_acquire_release<T>(store, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestStore>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/test_helper.h b/libcxx/test/std/atomics/atomics.ref/test_helper.h
new file mode 100644
index 000000000000..225a70c5a16c
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/test_helper.h
@@ -0,0 +1,136 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
+#define TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
+
+#include <atomic>
+#include <cassert>
+#include <cmath>
+#include <vector>
+
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include "make_test_thread.h"
+# include <thread>
+#endif
+
+template <class T>
+bool equals(T x, T y) {
+ return x == y;
+}
+
+template <class T>
+T make_value(int i) {
+ assert(i == 0 || i == 1);
+ if constexpr (std::is_pointer_v<T>) {
+ // So that pointers returned can be subtracted from one another
+ static std::remove_const_t<std::remove_pointer_t<T>> d[2];
+ return &d[i];
+ } else {
+ return T(i);
+ }
+}
+
+// Test that all threads see the exact same sequence of events
+// Test will pass 100% if store_op and load_op are correctly
+// affecting the memory with seq_cst order
+template <class T, class StoreOp, class LoadOp>
+void test_seq_cst(StoreOp store_op, LoadOp load_op) {
+#ifndef TEST_HAS_NO_THREADS
+ for (int i = 0; i < 100; ++i) {
+ T old_value(make_value<T>(0));
+ T new_value(make_value<T>(1));
+
+ T copy_x = old_value;
+ std::atomic_ref<T> const x(copy_x);
+ T copy_y = old_value;
+ std::atomic_ref<T> const y(copy_y);
+
+ std::atomic_bool x_updated_first(false);
+ std::atomic_bool y_updated_first(false);
+
+ auto t1 = support::make_test_thread([&] { store_op(x, old_value, new_value); });
+
+ auto t2 = support::make_test_thread([&] { store_op(y, old_value, new_value); });
+
+ auto t3 = support::make_test_thread([&] {
+ while (!equals(load_op(x), new_value)) {
+ std::this_thread::yield();
+ }
+ if (!equals(load_op(y), new_value)) {
+ x_updated_first.store(true, std::memory_order_relaxed);
+ }
+ });
+
+ auto t4 = support::make_test_thread([&] {
+ while (!equals(load_op(y), new_value)) {
+ std::this_thread::yield();
+ }
+ if (!equals(load_op(x), new_value)) {
+ y_updated_first.store(true, std::memory_order_relaxed);
+ }
+ });
+
+ t1.join();
+ t2.join();
+ t3.join();
+ t4.join();
+ // thread 3 and thread 4 cannot see different orders of storing x and y
+ assert(!(x_updated_first && y_updated_first));
+ }
+#else
+ (void)store_op;
+ (void)load_op;
+#endif
+}
+
+// Test that all writes before the store are seen by other threads after the load
+// Test will pass 100% if store_op and load_op are correctly
+// affecting the memory with acquire-release order
+template <class T, class StoreOp, class LoadOp>
+void test_acquire_release(StoreOp store_op, LoadOp load_op) {
+#ifndef TEST_HAS_NO_THREADS
+ for (auto i = 0; i < 100; ++i) {
+ T old_value(make_value<T>(0));
+ T new_value(make_value<T>(1));
+
+ T copy = old_value;
+ std::atomic_ref<T> const at(copy);
+ int non_atomic = 5;
+
+ constexpr auto number_of_threads = 8;
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+
+ for (auto j = 0; j < number_of_threads; ++j) {
+ threads.push_back(support::make_test_thread([&at, &non_atomic, load_op, new_value] {
+ while (!equals(load_op(at), new_value)) {
+ std::this_thread::yield();
+ }
+ // Other thread's writes before the release store are visible
+ // in this thread's read after the acquire load
+ assert(non_atomic == 6);
+ }));
+ }
+
+ non_atomic = 6;
+ store_op(at, old_value, new_value);
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+ }
+#else
+ (void)store_op;
+ (void)load_op;
+#endif
+}
+
+#endif // TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
diff --git a/libcxx/test/std/atomics/atomics.ref/wait.pass.cpp b/libcxx/test/std/atomics/atomics.ref/wait.pass.cpp
new file mode 100644
index 000000000000..e5310febf5c5
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/wait.pass.cpp
@@ -0,0 +1,88 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: no-threads
+// XFAIL: availability-synchronization_library-missing
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// void wait(T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "make_test_thread.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestWait {
+ void operator()() const {
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ assert(a.load() == T(1));
+ a.wait(T(0));
+ std::thread t1 = support::make_test_thread([&]() {
+ a.store(T(3));
+ a.notify_one();
+ });
+ a.wait(T(1));
+ assert(a.load() == T(3));
+ t1.join();
+ ASSERT_NOEXCEPT(a.wait(T(0)));
+
+ assert(a.load() == T(3));
+ a.wait(T(0), std::memory_order_seq_cst);
+ std::thread t2 = support::make_test_thread([&]() {
+ a.store(T(5));
+ a.notify_one();
+ });
+ a.wait(T(3), std::memory_order_seq_cst);
+ assert(a.load() == T(5));
+ t2.join();
+ ASSERT_NOEXCEPT(a.wait(T(0), std::memory_order_seq_cst));
+ }
+
+ // memory_order::acquire
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ x.wait(T(255), std::memory_order::acquire);
+ return result;
+ };
+ test_acquire_release<T>(store, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val); };
+ auto load_no_arg = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ x.wait(T(255));
+ return result;
+ };
+ auto load_with_order = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ x.wait(T(255), std::memory_order::seq_cst);
+ return result;
+ };
+ test_seq_cst<T>(store, load_no_arg);
+ test_seq_cst<T>(store, load_with_order);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestWait>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/types.compile.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/types.compile.pass.cpp
index 1a4e6dfe0b31..b38123628fe0 100644
--- a/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/types.compile.pass.cpp
+++ b/libcxx/test/std/atomics/atomics.types.generic/atomics.types.float/types.compile.pass.cpp
@@ -17,8 +17,11 @@
template <class T>
void test() {
+ // LWG 3045. atomic<floating-point> doesn't have value_type or difference_type
+ // https://cplusplus.github.io/LWG/issue3045
static_assert(std::is_same_v<typename std::atomic<T>::value_type, T>);
static_assert(std::is_same_v<typename std::atomic<T>::difference_type, T>);
+
static_assert(std::is_standard_layout_v<std::atomic<T>>);
static_assert(std::is_trivially_destructible_v<std::atomic<T>>);
}
diff --git a/libcxx/test/std/containers/associative/map/map.value_compare/types.pass.cpp b/libcxx/test/std/containers/associative/map/map.value_compare/types.pass.cpp
index 1d6069933eea..52111ddb3f27 100644
--- a/libcxx/test/std/containers/associative/map/map.value_compare/types.pass.cpp
+++ b/libcxx/test/std/containers/associative/map/map.value_compare/types.pass.cpp
@@ -10,7 +10,7 @@
// class value_compare
-// REQUIRES: c++98 || c++03 || c++11 || c++14
+// REQUIRES: c++03 || c++11 || c++14
#include <map>
#include <string>
diff --git a/libcxx/test/std/containers/associative/multimap/multimap.value_compare/types.pass.cpp b/libcxx/test/std/containers/associative/multimap/multimap.value_compare/types.pass.cpp
index 6ecaf9247ebe..0d0c74f29f42 100644
--- a/libcxx/test/std/containers/associative/multimap/multimap.value_compare/types.pass.cpp
+++ b/libcxx/test/std/containers/associative/multimap/multimap.value_compare/types.pass.cpp
@@ -10,7 +10,7 @@
// class value_compare
-// REQUIRES: c++98 || c++03 || c++11 || c++14
+// REQUIRES: c++03 || c++11 || c++14
#include <map>
#include <string>
diff --git a/libcxx/test/std/experimental/simd/simd.class/simd_copy.pass.cpp b/libcxx/test/std/experimental/simd/simd.class/simd_copy.pass.cpp
new file mode 100644
index 000000000000..8fcc811f6df3
--- /dev/null
+++ b/libcxx/test/std/experimental/simd/simd.class/simd_copy.pass.cpp
@@ -0,0 +1,173 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14
+
+// FIXME: Fatal error with following targets (remove XFAIL when fixed):
+// Pass-by-value arguments with alignment greater than register width are not supported.
+// XFAIL: target=powerpc{{.*}}-ibm-aix7.2.5.7
+
+// <experimental/simd>
+//
+// [simd.class]
+// template<class U, class Flags> void copy_from(const U* mem, Flags);
+// template<class U, class Flags> void copy_to(U* mem, Flags) const;
+
+#include "../test_utils.h"
+
+namespace ex = std::experimental::parallelism_v2;
+
+template <class T, class SimdAbi, std::size_t array_size>
+struct ElementAlignedCopyFromHelper {
+ template <class U>
+ void operator()() const {
+ U buffer[array_size];
+ for (size_t i = 0; i < array_size; ++i)
+ buffer[i] = static_cast<U>(i);
+ ex::simd<T, SimdAbi> origin_simd;
+ origin_simd.copy_from(buffer, ex::element_aligned_tag());
+ assert_simd_values_equal(origin_simd, buffer);
+ }
+};
+
+template <class T, class SimdAbi, std::size_t array_size>
+struct VectorAlignedCopyFromHelper {
+ template <class U>
+ void operator()() const {
+ alignas(ex::memory_alignment_v<ex::simd<T, SimdAbi>, U>) U buffer[array_size];
+ for (size_t i = 0; i < array_size; ++i)
+ buffer[i] = static_cast<U>(i);
+ ex::simd<T, SimdAbi> origin_simd;
+ origin_simd.copy_from(buffer, ex::vector_aligned_tag());
+ assert_simd_values_equal(origin_simd, buffer);
+ }
+};
+
+template <class T, class SimdAbi, std::size_t array_size>
+struct OveralignedCopyFromHelper {
+ template <class U>
+ void operator()() const {
+ alignas(bit_ceil(sizeof(U) + 1)) U buffer[array_size];
+ for (size_t i = 0; i < array_size; ++i)
+ buffer[i] = static_cast<U>(i);
+ ex::simd<T, SimdAbi> origin_simd;
+ origin_simd.copy_from(buffer, ex::overaligned_tag<bit_ceil(sizeof(U) + 1)>());
+ assert_simd_values_equal(origin_simd, buffer);
+ }
+};
+
+template <class T, std::size_t>
+struct CheckSimdCopyFrom {
+ template <class SimdAbi>
+ void operator()() {
+ constexpr std::size_t array_size = ex::simd_size_v<T, SimdAbi>;
+
+ types::for_each(simd_test_types(), ElementAlignedCopyFromHelper<T, SimdAbi, array_size>());
+ types::for_each(simd_test_types(), VectorAlignedCopyFromHelper<T, SimdAbi, array_size>());
+ types::for_each(simd_test_types(), OveralignedCopyFromHelper<T, SimdAbi, array_size>());
+ }
+};
+
+template <class T, class SimdAbi, std::size_t array_size>
+struct ElementAlignedCopyToHelper {
+ template <class U>
+ void operator()() const {
+ U buffer[array_size];
+ ex::simd<T, SimdAbi> origin_simd([](T i) { return i; });
+ origin_simd.copy_to(buffer, ex::element_aligned_tag());
+ assert_simd_values_equal(origin_simd, buffer);
+ }
+};
+
+template <class T, class SimdAbi, std::size_t array_size>
+struct VectorAlignedCopyToHelper {
+ template <class U>
+ void operator()() const {
+ alignas(ex::memory_alignment_v<ex::simd<T, SimdAbi>, U>) U buffer[array_size];
+ ex::simd<T, SimdAbi> origin_simd([](T i) { return i; });
+ origin_simd.copy_to(buffer, ex::vector_aligned_tag());
+ assert_simd_values_equal(origin_simd, buffer);
+ }
+};
+
+template <class T, class SimdAbi, std::size_t array_size>
+struct OveralignedCopyToHelper {
+ template <class U>
+ void operator()() const {
+ alignas(bit_ceil(sizeof(U) + 1)) U buffer[array_size];
+ ex::simd<T, SimdAbi> origin_simd([](T i) { return i; });
+ origin_simd.copy_to(buffer, ex::overaligned_tag<bit_ceil(sizeof(U) + 1)>());
+ assert_simd_values_equal(origin_simd, buffer);
+ }
+};
+
+template <class T, std::size_t>
+struct CheckSimdCopyTo {
+ template <class SimdAbi>
+ void operator()() {
+ constexpr std::size_t array_size = ex::simd_size_v<T, SimdAbi>;
+
+ types::for_each(simd_test_types(), ElementAlignedCopyToHelper<T, SimdAbi, array_size>());
+ types::for_each(simd_test_types(), VectorAlignedCopyToHelper<T, SimdAbi, array_size>());
+ types::for_each(simd_test_types(), OveralignedCopyToHelper<T, SimdAbi, array_size>());
+ }
+};
+
+template <class U, class T, class Flags, class SimdAbi = ex::simd_abi::compatible<T>, class = void>
+struct has_copy_from : std::false_type {};
+
+template <class U, class T, class Flags, class SimdAbi>
+struct has_copy_from<U,
+ T,
+ Flags,
+ SimdAbi,
+ std::void_t<decltype(std::declval<ex::simd<T, SimdAbi>>().copy_from(
+ std::declval<const U*>(), std::declval<Flags>()))>> : std::true_type {};
+
+template <class U, class T, class Flags, class SimdAbi = ex::simd_abi::compatible<T>, class = void>
+struct has_copy_to : std::false_type {};
+
+template <class U, class T, class Flags, class SimdAbi>
+struct has_copy_to<
+ U,
+ T,
+ Flags,
+ SimdAbi,
+ std::void_t<decltype(std::declval<ex::simd<T, SimdAbi>>().copy_to(std::declval<U*>(), std::declval<Flags>()))>>
+ : std::true_type {};
+
+template <class T, std::size_t>
+struct CheckSimdCopyTraits {
+ template <class SimdAbi>
+ void operator()() {
+ // These functions shall not participate in overload resolution unless
+ // is_simd_flag_type_v<Flags> is true, and
+ // U is a vectorizable type.
+ static_assert(has_copy_from<int, T, ex::element_aligned_tag, SimdAbi>::value);
+ static_assert(has_copy_to<int, T, ex::element_aligned_tag, SimdAbi>::value);
+
+ // is_simd_flag_type_v<Flags> is false
+ static_assert(!has_copy_from<int, T, T, SimdAbi>::value);
+ static_assert(!has_copy_to<int, T, T, SimdAbi>::value);
+ static_assert(!has_copy_from<int, T, SimdAbi, SimdAbi>::value);
+ static_assert(!has_copy_to<int, T, SimdAbi, SimdAbi>::value);
+
+ // U is not a vectorizable type.
+ static_assert(!has_copy_from<SimdAbi, T, ex::element_aligned_tag, SimdAbi>::value);
+ static_assert(!has_copy_to<SimdAbi, T, ex::element_aligned_tag, SimdAbi>::value);
+ static_assert(!has_copy_from<ex::element_aligned_tag, T, ex::element_aligned_tag, SimdAbi>::value);
+ static_assert(!has_copy_to<ex::element_aligned_tag, T, ex::element_aligned_tag, SimdAbi>::value);
+ }
+};
+
+int main(int, char**) {
+ test_all_simd_abi<CheckSimdCopyFrom>();
+ test_all_simd_abi<CheckSimdCopyTo>();
+ test_all_simd_abi<CheckSimdCopyTraits>();
+ return 0;
+}
diff --git a/libcxx/test/std/experimental/simd/simd.mask.class/simd_mask_copy.pass.cpp b/libcxx/test/std/experimental/simd/simd.mask.class/simd_mask_copy.pass.cpp
new file mode 100644
index 000000000000..0c3b4c9ea6d5
--- /dev/null
+++ b/libcxx/test/std/experimental/simd/simd.mask.class/simd_mask_copy.pass.cpp
@@ -0,0 +1,127 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14
+
+// <experimental/simd>
+//
+// [simd.class]
+// template<class Flags> void copy_from(const value_type* mem, Flags);
+// template<class Flags> void copy_to(value_type* mem, Flags);
+
+#include "../test_utils.h"
+
+namespace ex = std::experimental::parallelism_v2;
+
+template <class T, std::size_t>
+struct CheckSimdMaskCopyFrom {
+ template <class SimdAbi>
+ void operator()() {
+ constexpr std::size_t array_size = ex::simd_size_v<T, SimdAbi>;
+
+ // element aligned tag
+ constexpr std::size_t element_alignas_size = alignof(bool);
+ alignas(element_alignas_size) bool element_buffer[array_size];
+ for (size_t i = 0; i < array_size; ++i)
+ element_buffer[i] = static_cast<bool>(i % 2);
+ ex::simd_mask<T, SimdAbi> element_mask;
+ element_mask.copy_from(element_buffer, ex::element_aligned_tag());
+ assert_simd_mask_values_equal(element_mask, element_buffer);
+
+ // vector aligned tag
+ constexpr std::size_t vector_alignas_size = ex::memory_alignment_v<ex::simd_mask<T, SimdAbi>>;
+ alignas(vector_alignas_size) bool vector_buffer[array_size];
+ for (size_t i = 0; i < array_size; ++i)
+ vector_buffer[i] = static_cast<bool>(i % 2);
+ ex::simd_mask<T, SimdAbi> vector_mask;
+ vector_mask.copy_from(vector_buffer, ex::vector_aligned_tag());
+ assert_simd_mask_values_equal(vector_mask, vector_buffer);
+
+ // overaligned tag
+ constexpr std::size_t over_alignas_size = bit_ceil(sizeof(bool) + 1);
+ alignas(over_alignas_size) bool overaligned_buffer[array_size];
+ for (size_t i = 0; i < array_size; ++i)
+ overaligned_buffer[i] = static_cast<bool>(i % 2);
+ ex::simd_mask<T, SimdAbi> overaligned_mask;
+ overaligned_mask.copy_from(overaligned_buffer, ex::overaligned_tag<over_alignas_size>());
+ assert_simd_mask_values_equal(overaligned_mask, overaligned_buffer);
+ }
+};
+
+template <class T, std::size_t>
+struct CheckSimdMaskCopyTo {
+ template <class SimdAbi>
+ void operator()() {
+ constexpr std::size_t array_size = ex::simd_size_v<T, SimdAbi>;
+
+ // element aligned tag
+ constexpr std::size_t element_alignas_size = alignof(bool);
+ alignas(element_alignas_size) bool element_buffer[array_size];
+ ex::simd_mask<T, SimdAbi> element_mask(true);
+ element_mask.copy_to(element_buffer, ex::element_aligned_tag());
+ assert_simd_mask_values_equal(element_mask, element_buffer);
+
+ // vector aligned tag
+ constexpr std::size_t vector_alignas_size = ex::memory_alignment_v<ex::simd_mask<T, SimdAbi>>;
+ alignas(vector_alignas_size) bool vector_buffer[array_size];
+ ex::simd_mask<T, SimdAbi> vector_mask(false);
+ vector_mask.copy_to(vector_buffer, ex::vector_aligned_tag());
+ assert_simd_mask_values_equal(vector_mask, vector_buffer);
+
+ // overaligned tag
+ constexpr std::size_t over_alignas_size = bit_ceil(sizeof(bool) + 1);
+ alignas(over_alignas_size) bool overaligned_buffer[array_size];
+ ex::simd_mask<T, SimdAbi> overaligned_mask(true);
+ overaligned_mask.copy_to(overaligned_buffer, ex::overaligned_tag<over_alignas_size>());
+ assert_simd_mask_values_equal(overaligned_mask, overaligned_buffer);
+ }
+};
+
+template <class T, class Flags, class SimdAbi = ex::simd_abi::compatible<T>, class = void>
+struct has_copy_from : std::false_type {};
+
+template <class T, class Flags, class SimdAbi>
+struct has_copy_from<T,
+ Flags,
+ SimdAbi,
+ std::void_t<decltype(std::declval<ex::simd_mask<T, SimdAbi>>().copy_from(
+ std::declval<const bool*>(), std::declval<Flags>()))>> : std::true_type {};
+
+template <class T, class Flags, class SimdAbi = ex::simd_abi::compatible<T>, class = void>
+struct has_copy_to : std::false_type {};
+
+template <class T, class Flags, class SimdAbi>
+struct has_copy_to<T,
+ Flags,
+ SimdAbi,
+ std::void_t<decltype(std::declval<ex::simd_mask<T, SimdAbi>>().copy_to(
+ std::declval<bool*>(), std::declval<Flags>()))>> : std::true_type {};
+
+template <class T, std::size_t>
+struct CheckSimdMaskCopyTraits {
+ template <class SimdAbi>
+ void operator()() {
+ // These functions shall not participate in overload resolution unless
+ // is_simd_flag_type_v<Flags> is true
+ static_assert(has_copy_from<T, ex::element_aligned_tag, SimdAbi>::value);
+ static_assert(has_copy_to<T, ex::element_aligned_tag, SimdAbi>::value);
+
+ // is_simd_flag_type_v<Flags> is false
+ static_assert(!has_copy_from<T, T, SimdAbi>::value);
+ static_assert(!has_copy_to<T, T, SimdAbi>::value);
+ static_assert(!has_copy_from<T, SimdAbi, SimdAbi>::value);
+ static_assert(!has_copy_to<T, SimdAbi, SimdAbi>::value);
+ }
+};
+
+int main(int, char**) {
+ test_all_simd_abi<CheckSimdMaskCopyFrom>();
+ test_all_simd_abi<CheckSimdMaskCopyTo>();
+ test_all_simd_abi<CheckSimdMaskCopyTraits>();
+ return 0;
+}
diff --git a/libcxx/test/std/iterators/predef.iterators/counted.iterator/implicit_ctad.pass.cpp b/libcxx/test/std/iterators/predef.iterators/counted.iterator/implicit_ctad.pass.cpp
index 2786dfbb7a60..5b4853a783c2 100644
--- a/libcxx/test/std/iterators/predef.iterators/counted.iterator/implicit_ctad.pass.cpp
+++ b/libcxx/test/std/iterators/predef.iterators/counted.iterator/implicit_ctad.pass.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-// UNSUPPORTED: c++98, c++03, c++11, c++14, c++17
+// UNSUPPORTED: c++03, c++11, c++14, c++17
// counted_iterator
diff --git a/libcxx/test/std/iterators/predef.iterators/insert.iterators/back.insert.iterator/implicit_ctad.pass.cpp b/libcxx/test/std/iterators/predef.iterators/insert.iterators/back.insert.iterator/implicit_ctad.pass.cpp
index 10729e0029d0..3c2e6af98d55 100644
--- a/libcxx/test/std/iterators/predef.iterators/insert.iterators/back.insert.iterator/implicit_ctad.pass.cpp
+++ b/libcxx/test/std/iterators/predef.iterators/insert.iterators/back.insert.iterator/implicit_ctad.pass.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <iterator>
diff --git a/libcxx/test/std/iterators/predef.iterators/insert.iterators/front.insert.iterator/implicit_ctad.pass.cpp b/libcxx/test/std/iterators/predef.iterators/insert.iterators/front.insert.iterator/implicit_ctad.pass.cpp
index f91d472e9ea2..f9b086aea4fc 100644
--- a/libcxx/test/std/iterators/predef.iterators/insert.iterators/front.insert.iterator/implicit_ctad.pass.cpp
+++ b/libcxx/test/std/iterators/predef.iterators/insert.iterators/front.insert.iterator/implicit_ctad.pass.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <iterator>
diff --git a/libcxx/test/std/iterators/predef.iterators/move.iterators/move.iterator/implicit_ctad.pass.cpp b/libcxx/test/std/iterators/predef.iterators/move.iterators/move.iterator/implicit_ctad.pass.cpp
index e5744465daa9..b84a07017dae 100644
--- a/libcxx/test/std/iterators/predef.iterators/move.iterators/move.iterator/implicit_ctad.pass.cpp
+++ b/libcxx/test/std/iterators/predef.iterators/move.iterators/move.iterator/implicit_ctad.pass.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <iterator>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/sized_delete_array14.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/sized_delete_array14.pass.cpp
index 21663cdf956d..0241e7cefcac 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/sized_delete_array14.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/sized_delete_array14.pass.cpp
@@ -8,11 +8,11 @@
// test sized operator delete[] replacement.
+// TODO(mordante) fix this test after updating clang in Docker
+// UNSUPPORTED: clang-15, clang-16, clang-17, clang-18, clang-19
// UNSUPPORTED: sanitizer-new-delete, c++03, c++11
-
-// NOTE: Clang does not enable sized-deallocation in C++14 and beyond by
-// default. It is only enabled when -fsized-deallocation is given.
-// XFAIL: clang, apple-clang
+// XFAIL: apple-clang
+// XFAIL: using-built-library-before-llvm-11
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/sized_delete14.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/sized_delete14.pass.cpp
index a8701ce7a86c..2ab691618ea4 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/sized_delete14.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/sized_delete14.pass.cpp
@@ -8,11 +8,11 @@
// test sized operator delete replacement.
+// TODO(mordante) fix this test after updating clang in Docker
+// UNSUPPORTED: clang-15, clang-16, clang-17, clang-18, clang-19
// UNSUPPORTED: sanitizer-new-delete, c++03, c++11
-
-// NOTE: Clang does not enable sized-deallocation in C++14 and beyond by
-// default. It is only enabled when -fsized-deallocation is given.
-// XFAIL: clang, apple-clang
+// XFAIL: apple-clang
+// XFAIL: using-built-library-before-llvm-11
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp
index c802ab787682..fbd1c7c5715e 100644
--- a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp
@@ -6,6 +6,11 @@
//
//===----------------------------------------------------------------------===//
+// The fix for LWG2381 (https://github.com/llvm/llvm-project/pull/77948) changed
+// behavior of FP parsing, while Apple back-deployment targets remain broken due
+// to the dylib.
+// UNSUPPORTED: using-built-library-before-llvm-19
+
// <locale>
// class num_get<charT, InputIterator>
@@ -116,9 +121,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
const char str[] = "INF";
@@ -128,9 +133,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
const char str[] = "-inf";
@@ -140,9 +145,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
const char str[] = "-INF";
@@ -152,9 +157,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
const char str[] = "nan";
@@ -164,9 +169,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
const char str[] = "NAN";
@@ -176,9 +181,129 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "+p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "+P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "-p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "-P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "+e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "+E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "-e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "-E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
v = -1;
diff --git a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp
index 79c8480d0699..b5ac7d876157 100644
--- a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp
@@ -6,6 +6,11 @@
//
//===----------------------------------------------------------------------===//
+// The fix for LWG2381 (https://github.com/llvm/llvm-project/pull/77948) changed
+// behavior of FP parsing, while Apple back-deployment targets remain broken due
+// to the dylib.
+// UNSUPPORTED: using-built-library-before-llvm-19
+
// <locale>
// class num_get<charT, InputIterator>
@@ -105,9 +110,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
const char str[] = "INF";
@@ -117,9 +122,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
const char str[] = "-inf";
@@ -129,9 +134,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
const char str[] = "-INF";
@@ -141,9 +146,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
const char str[] = "nan";
@@ -153,9 +158,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
const char str[] = "NAN";
@@ -165,9 +170,129 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "+p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "+P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "-p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "-P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "+e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "+E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "-e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "-E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
v = -1;
diff --git a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp
index e2b2aeafd1ef..9617899f749c 100644
--- a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp
@@ -6,6 +6,11 @@
//
//===----------------------------------------------------------------------===//
+// The fix for LWG2381 (https://github.com/llvm/llvm-project/pull/77948) changed
+// behavior of FP parsing, while Apple back-deployment targets remain broken due
+// to the dylib.
+// UNSUPPORTED: using-built-library-before-llvm-19
+
// <locale>
// class num_get<charT, InputIterator>
@@ -105,9 +110,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "INF";
@@ -117,9 +122,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "-inf";
@@ -129,9 +134,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "-INF";
@@ -141,9 +146,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "nan";
@@ -153,9 +158,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "NAN";
@@ -165,9 +170,129 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "+p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "+P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "-p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "-P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "+e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "+E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "-e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "-E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "1.189731495357231765021264e+49321";
diff --git a/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/thousands_sep.pass.cpp b/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/thousands_sep.pass.cpp
index d7e1178c92e0..f368b1069c06 100644
--- a/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/thousands_sep.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/thousands_sep.pass.cpp
@@ -9,7 +9,6 @@
// NetBSD does not support LC_NUMERIC at the moment
// XFAIL: netbsd
-// XFAIL: LIBCXX-AIX-FIXME
// XFAIL: LIBCXX-FREEBSD-FIXME
// REQUIRES: locale.en_US.UTF-8
@@ -64,8 +63,8 @@ int main(int, char**)
// The below tests work around GLIBC's use of U202F as LC_NUMERIC thousands_sep.
std::locale l(LOCALE_fr_FR_UTF_8);
{
-#if defined(_CS_GNU_LIBC_VERSION) || defined(_WIN32)
- const char sep = ' ';
+#if defined(_CS_GNU_LIBC_VERSION) || defined(_WIN32) || defined(_AIX)
+ const char sep = ' ';
#else
const char sep = ',';
#endif
@@ -77,11 +76,13 @@ int main(int, char**)
{
#if defined(_CS_GNU_LIBC_VERSION)
const wchar_t wsep = glibc_version_less_than("2.27") ? L' ' : L'\u202f';
-#elif defined(_WIN32)
- const wchar_t wsep = L'\u00A0';
-#else
- const wchar_t wsep = L',';
-#endif
+# elif defined(_AIX)
+ const wchar_t wsep = L'\u202F';
+# elif defined(_WIN32)
+ const wchar_t wsep = L'\u00A0';
+# else
+ const wchar_t wsep = L',';
+# endif
typedef wchar_t C;
const std::numpunct<C>& np = std::use_facet<std::numpunct<C> >(l);
assert(np.thousands_sep() == wsep);
diff --git a/libcxx/test/std/algorithms/numeric.ops/reduce/pstl.reduce.pass.cpp b/libcxx/test/std/numerics/numeric.ops/reduce/pstl.reduce.pass.cpp
index b083c4f80e0b..f5748d7c823b 100644
--- a/libcxx/test/std/algorithms/numeric.ops/reduce/pstl.reduce.pass.cpp
+++ b/libcxx/test/std/numerics/numeric.ops/reduce/pstl.reduce.pass.cpp
@@ -10,7 +10,7 @@
// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-// <algorithm>
+// <numeric>
// template<class ExecutionPolicy, class ForwardIterator>
// typename iterator_traits<ForwardIterator>::value_type
diff --git a/libcxx/test/std/algorithms/numeric.ops/transform.reduce/pstl.transform_reduce.binary.pass.cpp b/libcxx/test/std/numerics/numeric.ops/transform.reduce/pstl.transform_reduce.binary.pass.cpp
index 18b56f237c3e..6d8bb47ac7dc 100644
--- a/libcxx/test/std/algorithms/numeric.ops/transform.reduce/pstl.transform_reduce.binary.pass.cpp
+++ b/libcxx/test/std/numerics/numeric.ops/transform.reduce/pstl.transform_reduce.binary.pass.cpp
@@ -10,7 +10,7 @@
// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-// <algorithm>
+// <numeric>
// template<class ExecutionPolicy,
// class ForwardIterator1, class ForwardIterator2, class T>
diff --git a/libcxx/test/std/algorithms/numeric.ops/transform.reduce/pstl.transform_reduce.unary.pass.cpp b/libcxx/test/std/numerics/numeric.ops/transform.reduce/pstl.transform_reduce.unary.pass.cpp
index a32a4f85f633..4cea3d405aa0 100644
--- a/libcxx/test/std/algorithms/numeric.ops/transform.reduce/pstl.transform_reduce.unary.pass.cpp
+++ b/libcxx/test/std/numerics/numeric.ops/transform.reduce/pstl.transform_reduce.unary.pass.cpp
@@ -10,7 +10,7 @@
// UNSUPPORTED: libcpp-has-no-incomplete-pstl
-// <algorithm>
+// <numeric>
// template<class ExecutionPolicy,
// class ForwardIterator, class T,
diff --git a/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/assign.pass.cpp b/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/assign.pass.cpp
index 63a1a8adf4e3..d71b76926ce3 100644
--- a/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/assign.pass.cpp
+++ b/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/assign.pass.cpp
@@ -84,12 +84,12 @@ int main(int, char**)
test_ext<unsigned int>();
test<unsigned long>();
// This isn't implemented on platforms without __int128
-#ifndef _LIBCPP_HAS_NO_INT128
+#ifndef TEST_HAS_NO_INT128
test_ext<unsigned long>();
#endif
test<unsigned long long>();
// This isn't implemented on platforms without __int128
-#ifndef _LIBCPP_HAS_NO_INT128
+#ifndef TEST_HAS_NO_INT128
test_ext<unsigned long long>();
#endif
diff --git a/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/copy.pass.cpp b/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/copy.pass.cpp
index c45f45d0f20a..50389ef80139 100644
--- a/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/copy.pass.cpp
+++ b/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/copy.pass.cpp
@@ -83,12 +83,12 @@ int main(int, char**)
test_ext<unsigned int>();
test<unsigned long>();
// This isn't implemented on platforms without __int128
-#ifndef _LIBCPP_HAS_NO_INT128
+#ifndef TEST_HAS_NO_INT128
test_ext<unsigned long>();
#endif
test<unsigned long long>();
// This isn't implemented on platforms without __int128
-#ifndef _LIBCPP_HAS_NO_INT128
+#ifndef TEST_HAS_NO_INT128
test_ext<unsigned long long>();
#endif
diff --git a/libcxx/test/std/strings/string.view/string.view.deduct/implicit.pass.cpp b/libcxx/test/std/strings/string.view/string.view.deduct/implicit.pass.cpp
index c76c4a01c696..36584f76bebd 100644
--- a/libcxx/test/std/strings/string.view/string.view.deduct/implicit.pass.cpp
+++ b/libcxx/test/std/strings/string.view/string.view.deduct/implicit.pass.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <string_view>
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp
index 42150207c3c4..6a054f74b9fb 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for.pass.cpp
@@ -5,9 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -19,77 +18,92 @@
// const chrono::duration<Rep, Period>& rel_time);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-bool expect_timeout = false;
-
-void f()
-{
- typedef std::chrono::system_clock Clock;
- typedef std::chrono::milliseconds milliseconds;
- std::unique_lock<std::mutex> lk(mut);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- Clock::time_point wait_end = t0 + milliseconds(250);
- Clock::duration d;
- do {
- d = wait_end - Clock::now();
- if (d <= milliseconds(0)) break;
- } while (test2 == 0 && cv.wait_for(lk, d) == std::cv_status::no_timeout);
- Clock::time_point t1 = Clock::now();
- if (!expect_timeout)
- {
- assert(t1 - t0 < milliseconds(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - milliseconds(250) < milliseconds(50));
- assert(test2 == 0);
- }
+template <class Function>
+std::chrono::microseconds measure(Function f) {
+ std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
+ f();
+ std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
+ return std::chrono::duration_cast<std::chrono::microseconds>(end - start);
}
-int main(int, char**)
-{
- {
- std::unique_lock<std::mutex> lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- expect_timeout = true;
- {
- std::unique_lock<std::mutex> lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+int main(int, char**) {
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_for() and we wait
+ // again in case we get awoken spuriously. Note that it can actually
+ // happen that we get awoken spuriously and fail to recognize it
+ // (making this test useless), but the likelihood should be small.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> likely_spurious(true);
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ do {
+ std::cv_status result = cv.wait_for(lock, timeout);
+ assert(result == std::cv_status::no_timeout);
+ } while (likely_spurious);
+ });
+
+ // This can technically fail if we have many spurious awakenings, but in practice the
+ // tolerance is so high that it shouldn't be a problem.
+ assert(elapsed < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This blocks the condition variable inside its wait call
+ // so we can notify it while it is waiting.
+ std::unique_lock<std::mutex> lock(mutex);
+ cv.notify_one();
+ likely_spurious = false;
+ lock.unlock();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable
+ // with a certain timeout, and we never awaken it. To guard against
+ // spurious wakeups, we wait again whenever we are awoken for a reason
+ // other than a timeout.
+ {
+ auto timeout = std::chrono::milliseconds(250);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ std::cv_status result;
+ do {
+ auto elapsed = measure([&] { result = cv.wait_for(lock, timeout); });
+ if (result == std::cv_status::timeout)
+ assert(elapsed >= timeout);
+ } while (result != std::cv_status::timeout);
+ });
+
+ t1.join();
+ }
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp
index 872bcb6d8a57..76fc7393bc8f 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_for_pred.pass.cpp
@@ -5,9 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -20,82 +19,141 @@
// Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
-
- bool operator()() {return i_ != 0;}
-};
-
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-void f()
-{
- typedef std::chrono::system_clock Clock;
- typedef std::chrono::milliseconds milliseconds;
- std::unique_lock<std::mutex> lk(mut);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- bool r = cv.wait_for(lk, milliseconds(250), Pred(test2));
- ((void)r); // Prevent unused warning
- Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < milliseconds(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - milliseconds(250) < milliseconds(50));
- assert(test2 == 0);
- }
- ++runs;
+template <class Function>
+std::chrono::microseconds measure(Function f) {
+ std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
+ f();
+ std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
+ return std::chrono::duration_cast<std::chrono::microseconds>(end - start);
}
-int main(int, char**)
-{
- {
- std::unique_lock<std::mutex>lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- std::unique_lock<std::mutex>lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+int main(int, char**) {
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_for() and we try to minimize
+ // the likelihood that we got awoken by a spurious wakeup by updating the
+ // likely_spurious flag only immediately before we perform the notification.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> likely_spurious(true);
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ bool result = cv.wait_for(lock, timeout, [&] { return !likely_spurious; });
+ assert(result); // return value should be true since we didn't time out
+ });
+ assert(elapsed < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ std::unique_lock<std::mutex> lock(mutex);
+
+ likely_spurious = false;
+ lock.unlock();
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable with a certain
+ // timeout, and we never awaken it. The "stop waiting" predicate always returns false,
+ // which means that we can't get out of the wait via a spurious wakeup.
+ {
+ auto timeout = std::chrono::milliseconds(250);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ auto elapsed = measure([&] {
+ bool result = cv.wait_for(lock, timeout, [] { return false; }); // never stop waiting (until timeout)
+ assert(!result); // return value should be false since the predicate returns false after the timeout
+ });
+ assert(elapsed >= timeout);
+ });
+
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we set a fairly long timeout in wait_for() and we basically never
+ // wake up the condition variable. This way, we are hoping to get out of the wait
+ // via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> awoken(false);
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ bool result = cv.wait_for(lock, timeout, [&] { return true; });
+ awoken = true;
+ assert(result); // return value should be true since we didn't time out
+ });
+ assert(elapsed < timeout); // can technically fail if t2 never executes and we timeout, but very unlikely
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ std::unique_lock<std::mutex> lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ bool woke = awoken.load();
+ assert(woke || !woke);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test
+ // doesn't keep running until the timeout.
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp
index 15feba55616b..5ce5bccb37f1 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_pred.pass.cpp
@@ -5,9 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -17,51 +16,98 @@
// void wait(unique_lock<mutex>& lock, Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
#include <mutex>
#include <thread>
-#include <functional>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
-
- bool operator()() {return i_ != 0;}
-};
-
-void f()
-{
- std::unique_lock<std::mutex> lk(mut);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- cv.wait(lk, Pred(test2));
- assert(test2 != 0);
-}
+int main(int, char**) {
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we try to minimize the likelihood that we got awoken by a
+ // spurious wakeup by updating the likely_spurious flag only immediately
+ // before we perform the notification.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> likely_spurious(true);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ ready = true;
+ cv.wait(lock, [&] { return !likely_spurious; });
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ std::unique_lock<std::mutex> lock(mutex);
+
+ likely_spurious = false;
+ lock.unlock();
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we basically never wake up the condition variable. This way, we
+ // are hoping to get out of the wait via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> awoken(false);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ ready = true;
+ cv.wait(lock, [&] { return true; });
+ awoken = true;
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ std::unique_lock<std::mutex> lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ bool woke = awoken.load();
+ assert(woke || !woke);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test finishes.
+ cv.notify_one();
+ });
-int main(int, char**)
-{
- std::unique_lock<std::mutex>lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
+ t2.join();
+ t1.join();
+ }
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
index 03205e68dca6..6f3a5a01cdd1 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until.pass.cpp
@@ -5,9 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -19,100 +18,100 @@
// const chrono::time_point<Clock, Duration>& abs_time);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-struct TestClock
-{
- typedef std::chrono::milliseconds duration;
- typedef duration::rep rep;
- typedef duration::period period;
- typedef std::chrono::time_point<TestClock> time_point;
- static const bool is_steady = true;
-
- static time_point now()
- {
- using namespace std::chrono;
- return time_point(duration_cast<duration>(
- steady_clock::now().time_since_epoch()
- ));
- }
+struct TestClock {
+ typedef std::chrono::milliseconds duration;
+ typedef duration::rep rep;
+ typedef duration::period period;
+ typedef std::chrono::time_point<TestClock> time_point;
+ static const bool is_steady = true;
+
+ static time_point now() {
+ using namespace std::chrono;
+ return time_point(duration_cast<duration>(steady_clock::now().time_since_epoch()));
+ }
};
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-template <typename Clock>
-void f()
-{
- std::unique_lock<std::mutex> lk(mut);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- typename Clock::time_point t0 = Clock::now();
- typename Clock::time_point t = t0 + std::chrono::milliseconds(250);
- while (test2 == 0 && cv.wait_until(lk, t) == std::cv_status::no_timeout)
- ;
- typename Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < std::chrono::milliseconds(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - std::chrono::milliseconds(250) < std::chrono::milliseconds(50));
- assert(test2 == 0);
- }
- ++runs;
-}
+template <class Clock>
+void test() {
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_until() and we wait
+ // again in case we get awoken spuriously. Note that it can actually
+ // happen that we get awoken spuriously and fail to recognize it
+ // (making this test useless), but the likelihood should be small.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> likely_spurious(true);
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ ready = true;
+ do {
+ std::cv_status result = cv.wait_until(lock, timeout);
+ assert(result == std::cv_status::no_timeout);
+ } while (likely_spurious);
+
+ // This can technically fail if we have many spurious awakenings, but in practice the
+ // tolerance is so high that it shouldn't be a problem.
+ assert(Clock::now() < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This blocks the condition variable inside its wait call
+ // so we can notify it while it is waiting.
+ std::unique_lock<std::mutex> lock(mutex);
+ cv.notify_one();
+ likely_spurious = false;
+ lock.unlock();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable
+ // with a certain timeout, and we never awaken it. To guard against
+ // spurious wakeups, we wait again whenever we are awoken for a reason
+ // other than a timeout.
+ {
+ auto timeout = Clock::now() + std::chrono::milliseconds(250);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ std::cv_status result;
+ do {
+ result = cv.wait_until(lock, timeout);
+ if (result == std::cv_status::timeout)
+ assert(Clock::now() >= timeout);
+ } while (result != std::cv_status::timeout);
+ });
-template <typename Clock>
-void run_test()
-{
- runs = 0;
- test1 = 0;
- test2 = 0;
- {
- std::unique_lock<std::mutex>lk(mut);
- std::thread t = support::make_test_thread(f<Clock>);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- std::unique_lock<std::mutex>lk(mut);
- std::thread t = support::make_test_thread(f<Clock>);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+ t1.join();
+ }
}
-int main(int, char**)
-{
- run_test<TestClock>();
- run_test<std::chrono::steady_clock>();
- run_test<std::chrono::system_clock>();
- return 0;
+int main(int, char**) {
+ test<TestClock>();
+ test<std::chrono::steady_clock>();
+ return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until_pred.pass.cpp
index fb8bd6e38069..847d0c10c572 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvar/wait_until_pred.pass.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -20,99 +19,145 @@
// Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-struct Clock
-{
- typedef std::chrono::milliseconds duration;
- typedef duration::rep rep;
- typedef duration::period period;
- typedef std::chrono::time_point<Clock> time_point;
- static const bool is_steady = true;
-
- static time_point now()
- {
- using namespace std::chrono;
- return time_point(duration_cast<duration>(
- steady_clock::now().time_since_epoch()
- ));
- }
+struct TestClock {
+ typedef std::chrono::milliseconds duration;
+ typedef duration::rep rep;
+ typedef duration::period period;
+ typedef std::chrono::time_point<TestClock> time_point;
+ static const bool is_steady = true;
+
+ static time_point now() {
+ using namespace std::chrono;
+ return time_point(duration_cast<duration>(steady_clock::now().time_since_epoch()));
+ }
};
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
+template <class Clock>
+void test() {
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_until() and we try to minimize
+ // the likelihood that we got awoken by a spurious wakeup by updating the
+ // likely_spurious flag only immediately before we perform the notification.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> likely_spurious(true);
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
- bool operator()() {return i_ != 0;}
-};
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ ready = true;
+ bool result = cv.wait_until(lock, timeout, [&] { return !likely_spurious; });
+ assert(result); // return value should be true since we didn't time out
+ assert(Clock::now() < timeout);
+ });
-std::condition_variable cv;
-std::mutex mut;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-void f()
-{
- std::unique_lock<std::mutex> lk(mut);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- Clock::time_point t = t0 + Clock::duration(250);
- bool r = cv.wait_until(lk, t, Pred(test2));
- Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < Clock::duration(250));
- assert(test2 != 0);
- assert(r);
- }
- else
- {
- assert(t1 - t0 - Clock::duration(250) < Clock::duration(50));
- assert(test2 == 0);
- assert(!r);
- }
- ++runs;
-}
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ std::unique_lock<std::mutex> lock(mutex);
+
+ likely_spurious = false;
+ lock.unlock();
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable with a certain
+ // timeout, and we never awaken it. The "stop waiting" predicate always returns false,
+ // which means that we can't get out of the wait via a spurious wakeup.
+ {
+ auto timeout = Clock::now() + std::chrono::milliseconds(250);
+ std::condition_variable cv;
+ std::mutex mutex;
-int main(int, char**)
-{
- {
- std::unique_lock<std::mutex> lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- std::unique_lock<std::mutex> lk(mut);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ bool result = cv.wait_until(lock, timeout, [] { return false; }); // never stop waiting (until timeout)
+ assert(!result); // return value should be false since the predicate returns false after the timeout
+ assert(Clock::now() >= timeout);
+ });
+
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we set a fairly long timeout in wait_until() and we basically never
+ // wake up the condition variable. This way, we are hoping to get out of the wait
+ // via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> awoken(false);
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable cv;
+ std::mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ std::unique_lock<std::mutex> lock(mutex);
+ ready = true;
+ bool result = cv.wait_until(lock, timeout, [&] { return true; });
+ awoken = true;
+ assert(result); // return value should be true since we didn't time out
+ assert(Clock::now() < timeout); // can technically fail if t2 never executes and we timeout, but very unlikely
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ std::unique_lock<std::mutex> lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ bool woke = awoken.load();
+ assert(woke || !woke);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test
+ // doesn't keep running until the timeout.
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+}
+int main(int, char**) {
+ test<TestClock>();
+ test<std::chrono::steady_clock>();
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for.pass.cpp
index 95acef90470e..eab38081d7b7 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for.pass.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -18,81 +17,105 @@
// wait_for(Lock& lock, const chrono::duration<Rep, Period>& rel_time);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-std::condition_variable_any cv;
-
-typedef std::timed_mutex L0;
-typedef std::unique_lock<L0> L1;
-
-L0 m0;
-
-int test1 = 0;
-int test2 = 0;
-
-bool expect_timeout = false;
-
-void f()
-{
- typedef std::chrono::system_clock Clock;
- typedef std::chrono::milliseconds milliseconds;
- L1 lk(m0);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- Clock::time_point wait_end = t0 + milliseconds(250);
- Clock::duration d;
- do {
- d = wait_end - Clock::now();
- if (d <= milliseconds(0)) break;
- } while (test2 == 0 && cv.wait_for(lk, d) == std::cv_status::no_timeout);
- Clock::time_point t1 = Clock::now();
- if (!expect_timeout)
- {
- assert(t1 - t0 < milliseconds(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - milliseconds(250) < milliseconds(50));
- assert(test2 == 0);
- }
+template <class Mutex>
+struct MyLock : std::unique_lock<Mutex> {
+ using std::unique_lock<Mutex>::unique_lock;
+};
+
+template <class Function>
+std::chrono::microseconds measure(Function f) {
+ std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
+ f();
+ std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
+ return std::chrono::duration_cast<std::chrono::microseconds>(end - start);
}
-int main(int, char**)
-{
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- expect_timeout = true;
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+template <class Lock>
+void test() {
+ using Mutex = typename Lock::mutex_type;
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_for() and we wait
+ // again in case we get awoken spuriously. Note that it can actually
+ // happen that we get awoken spuriously and fail to recognize it
+ // (making this test useless), but the likelihood should be small.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> likely_spurious(true);
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ do {
+ std::cv_status result = cv.wait_for(lock, timeout);
+ assert(result == std::cv_status::no_timeout);
+ } while (likely_spurious);
+ });
+
+ // This can technically fail if we have many spurious awakenings, but in practice the
+ // tolerance is so high that it shouldn't be a problem.
+ assert(elapsed < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This blocks the condition variable inside its wait call
+ // so we can notify it while it is waiting.
+ Lock lock(mutex);
+ cv.notify_one();
+ likely_spurious = false;
+ lock.unlock();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable
+ // with a certain timeout, and we never awaken it. To guard against
+ // spurious wakeups, we wait again whenever we are awoken for a reason
+ // other than a timeout.
+ {
+ auto timeout = std::chrono::milliseconds(250);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ std::cv_status result;
+ do {
+ auto elapsed = measure([&] { result = cv.wait_for(lock, timeout); });
+ if (result == std::cv_status::timeout)
+ assert(elapsed >= timeout);
+ } while (result != std::cv_status::timeout);
+ });
+
+ t1.join();
+ }
+}
+int main(int, char**) {
+ test<std::unique_lock<std::mutex>>();
+ test<std::unique_lock<std::timed_mutex>>();
+ test<MyLock<std::mutex>>();
+ test<MyLock<std::timed_mutex>>();
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_pred.pass.cpp
index 0b560022bc67..2dc36938b41e 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_pred.pass.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -19,89 +18,148 @@
// Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
-
- bool operator()() {return i_ != 0;}
+template <class Mutex>
+struct MyLock : std::unique_lock<Mutex> {
+ using std::unique_lock<Mutex>::unique_lock;
};
-std::condition_variable_any cv;
-
-typedef std::timed_mutex L0;
-typedef std::unique_lock<L0> L1;
-
-L0 m0;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-bool expect_result = false;
-
-void f()
-{
- typedef std::chrono::system_clock Clock;
- typedef std::chrono::milliseconds milliseconds;
- L1 lk(m0);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- bool result = cv.wait_for(lk, milliseconds(250), Pred(test2));
- assert(result == expect_result);
- Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < milliseconds(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - milliseconds(250) < milliseconds(50));
- assert(test2 == 0);
- }
- ++runs;
+template <class Function>
+std::chrono::microseconds measure(Function f) {
+ std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
+ f();
+ std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
+ return std::chrono::duration_cast<std::chrono::microseconds>(end - start);
}
-int main(int, char**)
-{
- {
- expect_result = true;
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- expect_result = false;
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
-
- return 0;
+template <class Lock>
+void test() {
+ using Mutex = typename Lock::mutex_type;
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_for() and we try to minimize
+ // the likelihood that we got awoken by a spurious wakeup by updating the
+ // likely_spurious flag only immediately before we perform the notification.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> likely_spurious(true);
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ bool result = cv.wait_for(lock, timeout, [&] { return !likely_spurious; });
+ assert(result); // return value should be true since we didn't time out
+ });
+ assert(elapsed < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ Lock lock(mutex);
+
+ likely_spurious = false;
+ lock.unlock();
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable with a certain
+ // timeout, and we never awaken it. The "stop waiting" predicate always returns false,
+ // which means that we can't get out of the wait via a spurious wakeup.
+ {
+ auto timeout = std::chrono::milliseconds(250);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ auto elapsed = measure([&] {
+ bool result = cv.wait_for(lock, timeout, [] { return false; }); // never stop waiting (until timeout)
+ assert(!result); // return value should be false since the predicate returns false after the timeout
+ });
+ assert(elapsed >= timeout);
+ });
+
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we set a fairly long timeout in wait_for() and we basically never
+ // wake up the condition variable. This way, we are hoping to get out of the wait
+ // via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> awoken(false);
+ auto timeout = std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ auto elapsed = measure([&] {
+ ready = true;
+ bool result = cv.wait_for(lock, timeout, [&] { return true; });
+ awoken = true;
+ assert(result); // return value should be true since we didn't time out
+ });
+ assert(elapsed < timeout); // can technically fail if t2 never executes and we timeout, but very unlikely
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ Lock lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ bool woke = awoken.load();
+ assert(woke || !woke);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test
+ // doesn't keep running until the timeout.
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
}
+
+int main(int, char**) { return 0; }
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_pred.pass.cpp
index a5e28137bef8..48efbf12e738 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_pred.pass.cpp
@@ -5,9 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -17,55 +16,113 @@
// void wait(Lock& lock, Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
#include <mutex>
#include <thread>
-#include <functional>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-std::condition_variable_any cv;
+template <class Mutex>
+struct MyLock : std::unique_lock<Mutex> {
+ using std::unique_lock<Mutex>::unique_lock;
+};
-typedef std::timed_mutex L0;
-typedef std::unique_lock<L0> L1;
+template <class Lock>
+void test() {
+ using Mutex = typename Lock::mutex_type;
-L0 m0;
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we try to minimize the likelihood that we got awoken by a
+ // spurious wakeup by updating the likely_spurious flag only immediately
+ // before we perform the notification.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> likely_spurious(true);
+ std::condition_variable_any cv;
+ Mutex mutex;
-int test1 = 0;
-int test2 = 0;
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ ready = true;
+ cv.wait(lock, [&] { return !likely_spurious; });
+ });
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
- bool operator()() {return i_ != 0;}
-};
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ Lock lock(mutex);
+
+ likely_spurious = false;
+ lock.unlock();
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we basically never wake up the condition variable. This way, we
+ // are hoping to get out of the wait via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> awoken(false);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ ready = true;
+ cv.wait(lock, [&] { return true; });
+ awoken = true;
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ Lock lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ bool woke = awoken.load();
+ assert(woke || !woke);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test finishes.
+ cv.notify_one();
+ });
-void f()
-{
- L1 lk(m0);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- cv.wait(lk, Pred(test2));
- assert(test2 != 0);
+ t2.join();
+ t1.join();
+ }
}
-int main(int, char**)
-{
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
+int main(int, char**) {
+ test<std::unique_lock<std::mutex>>();
+ test<std::unique_lock<std::timed_mutex>>();
+ test<MyLock<std::mutex>>();
+ test<MyLock<std::timed_mutex>>();
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until.pass.cpp
index 0f2334393d83..6494bcd6dbe3 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until.pass.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -18,93 +17,115 @@
// wait_until(Lock& lock, const chrono::time_point<Clock, Duration>& abs_time);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-struct Clock
-{
- typedef std::chrono::milliseconds duration;
- typedef duration::rep rep;
- typedef duration::period period;
- typedef std::chrono::time_point<Clock> time_point;
- static const bool is_steady = true;
-
- static time_point now()
- {
- using namespace std::chrono;
- return time_point(duration_cast<duration>(
- steady_clock::now().time_since_epoch()
- ));
- }
+struct TestClock {
+ typedef std::chrono::milliseconds duration;
+ typedef duration::rep rep;
+ typedef duration::period period;
+ typedef std::chrono::time_point<TestClock> time_point;
+ static const bool is_steady = true;
+
+ static time_point now() {
+ using namespace std::chrono;
+ return time_point(duration_cast<duration>(steady_clock::now().time_since_epoch()));
+ }
};
-std::condition_variable_any cv;
-
-typedef std::timed_mutex L0;
-typedef std::unique_lock<L0> L1;
-
-L0 m0;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-void f()
-{
- L1 lk(m0);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- Clock::time_point t = t0 + Clock::duration(250);
- while (test2 == 0 && cv.wait_until(lk, t) == std::cv_status::no_timeout)
- ;
- Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < Clock::duration(250));
- assert(test2 != 0);
- }
- else
- {
- assert(t1 - t0 - Clock::duration(250) < Clock::duration(50));
- assert(test2 == 0);
- }
- ++runs;
+template <class Mutex>
+struct MyLock : std::unique_lock<Mutex> {
+ using std::unique_lock<Mutex>::unique_lock;
+};
+
+template <class Lock, class Clock>
+void test() {
+ using Mutex = typename Lock::mutex_type;
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_until() and we wait
+ // again in case we get awoken spuriously. Note that it can actually
+ // happen that we get awoken spuriously and fail to recognize it
+ // (making this test useless), but the likelihood should be small.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> likely_spurious(true);
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ ready = true;
+ do {
+ std::cv_status result = cv.wait_until(lock, timeout);
+ assert(result == std::cv_status::no_timeout);
+ } while (likely_spurious);
+
+ // This can technically fail if we have many spurious awakenings, but in practice the
+ // tolerance is so high that it shouldn't be a problem.
+ assert(Clock::now() < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This blocks the condition variable inside its wait call
+ // so we can notify it while it is waiting.
+ Lock lock(mutex);
+ cv.notify_one();
+ likely_spurious = false;
+ lock.unlock();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable
+ // with a certain timeout, and we never awaken it. To guard against
+ // spurious wakeups, we wait again whenever we are awoken for a reason
+ // other than a timeout.
+ {
+ auto timeout = Clock::now() + std::chrono::milliseconds(250);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ std::cv_status result;
+ do {
+ result = cv.wait_until(lock, timeout);
+ if (result == std::cv_status::timeout)
+ assert(Clock::now() >= timeout);
+ } while (result != std::cv_status::timeout);
+ });
+
+ t1.join();
+ }
}
-int main(int, char**)
-{
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+int main(int, char**) {
+ test<std::unique_lock<std::mutex>, TestClock>();
+ test<std::unique_lock<std::mutex>, std::chrono::steady_clock>();
+
+ test<std::unique_lock<std::timed_mutex>, TestClock>();
+ test<std::unique_lock<std::timed_mutex>, std::chrono::steady_clock>();
+
+ test<MyLock<std::mutex>, TestClock>();
+ test<MyLock<std::mutex>, std::chrono::steady_clock>();
+ test<MyLock<std::timed_mutex>, TestClock>();
+ test<MyLock<std::timed_mutex>, std::chrono::steady_clock>();
return 0;
}
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_pred.pass.cpp
index aa60ae4715df..ee7c1729aacf 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_pred.pass.cpp
@@ -6,8 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// UNSUPPORTED: no-threads
-// ALLOW_RETRIES: 2
+// UNSUPPORTED: no-threads, c++03
// <condition_variable>
@@ -20,103 +19,171 @@
// Predicate pred);
#include <condition_variable>
+#include <atomic>
+#include <cassert>
+#include <chrono>
#include <mutex>
#include <thread>
-#include <chrono>
-#include <cassert>
#include "make_test_thread.h"
#include "test_macros.h"
-struct Clock
-{
- typedef std::chrono::milliseconds duration;
- typedef duration::rep rep;
- typedef duration::period period;
- typedef std::chrono::time_point<Clock> time_point;
- static const bool is_steady = true;
-
- static time_point now()
- {
- using namespace std::chrono;
- return time_point(duration_cast<duration>(
- steady_clock::now().time_since_epoch()
- ));
- }
+struct TestClock {
+ typedef std::chrono::milliseconds duration;
+ typedef duration::rep rep;
+ typedef duration::period period;
+ typedef std::chrono::time_point<TestClock> time_point;
+ static const bool is_steady = true;
+
+ static time_point now() {
+ using namespace std::chrono;
+ return time_point(duration_cast<duration>(steady_clock::now().time_since_epoch()));
+ }
};
-class Pred
-{
- int& i_;
-public:
- explicit Pred(int& i) : i_(i) {}
-
- bool operator()() {return i_ != 0;}
+template <class Mutex>
+struct MyLock : std::unique_lock<Mutex> {
+ using std::unique_lock<Mutex>::unique_lock;
};
-std::condition_variable_any cv;
-
-typedef std::timed_mutex L0;
-typedef std::unique_lock<L0> L1;
-
-L0 m0;
-
-int test1 = 0;
-int test2 = 0;
-
-int runs = 0;
-
-void f()
-{
- L1 lk(m0);
- assert(test2 == 0);
- test1 = 1;
- cv.notify_one();
- Clock::time_point t0 = Clock::now();
- Clock::time_point t = t0 + Clock::duration(250);
- bool r = cv.wait_until(lk, t, Pred(test2));
- Clock::time_point t1 = Clock::now();
- if (runs == 0)
- {
- assert(t1 - t0 < Clock::duration(250));
- assert(test2 != 0);
- assert(r);
- }
- else
- {
- assert(t1 - t0 - Clock::duration(250) < Clock::duration(50));
- assert(test2 == 0);
- assert(!r);
- }
- ++runs;
+template <class Lock, class Clock>
+void test() {
+ using Mutex = typename Lock::mutex_type;
+ // Test unblocking via a call to notify_one() in another thread.
+ //
+ // To test this, we set a very long timeout in wait_until() and we try to minimize
+ // the likelihood that we got awoken by a spurious wakeup by updating the
+ // likely_spurious flag only immediately before we perform the notification.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> likely_spurious(true);
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ ready = true;
+ bool result = cv.wait_until(lock, timeout, [&] { return !likely_spurious; });
+ assert(result); // return value should be true since we didn't time out
+ assert(Clock::now() < timeout);
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ Lock lock(mutex);
+
+ likely_spurious = false;
+ lock.unlock();
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
+
+ // Test unblocking via a timeout.
+ //
+ // To test this, we create a thread that waits on a condition variable with a certain
+ // timeout, and we never awaken it. The "stop waiting" predicate always returns false,
+ // which means that we can't get out of the wait via a spurious wakeup.
+ {
+ auto timeout = Clock::now() + std::chrono::milliseconds(250);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ bool result = cv.wait_until(lock, timeout, [] { return false; }); // never stop waiting (until timeout)
+ assert(!result); // return value should be false since the predicate returns false after the timeout
+ assert(Clock::now() >= timeout);
+ });
+
+ t1.join();
+ }
+
+ // Test unblocking via a spurious wakeup.
+ //
+ // To test this, we set a fairly long timeout in wait_until() and we basically never
+ // wake up the condition variable. This way, we are hoping to get out of the wait
+ // via a spurious wakeup.
+ //
+ // However, since spurious wakeups are not required to even happen, this test is
+ // only trying to trigger that code path, but not actually asserting that it is
+ // taken. In particular, we do need to eventually ensure we get out of the wait
+ // by standard means, so we actually wake up the thread at the end.
+ {
+ std::atomic<bool> ready(false);
+ std::atomic<bool> awoken(false);
+ auto timeout = Clock::now() + std::chrono::seconds(3600);
+ std::condition_variable_any cv;
+ Mutex mutex;
+
+ std::thread t1 = support::make_test_thread([&] {
+ Lock lock(mutex);
+ ready = true;
+ bool result = cv.wait_until(lock, timeout, [&] { return true; });
+ awoken = true;
+ assert(result); // return value should be true since we didn't time out
+ assert(Clock::now() < timeout); // can technically fail if t2 never executes and we timeout, but very unlikely
+ });
+
+ std::thread t2 = support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+
+ // Acquire the same mutex as t1. This ensures that the condition variable has started
+ // waiting (and hence released that mutex).
+ Lock lock(mutex);
+ lock.unlock();
+
+ // Give some time for t1 to be awoken spuriously so that code path is used.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+
+ // We would want to assert that the thread has been awoken after this time,
+ // however nothing guarantees us that it ever gets spuriously awoken, so
+ // we can't really check anything. This is still left here as documentation.
+ bool woke = awoken.load();
+ assert(woke || !woke);
+
+ // Whatever happened, actually awaken the condition variable to ensure the test
+ // doesn't keep running until the timeout.
+ cv.notify_one();
+ });
+
+ t2.join();
+ t1.join();
+ }
}
-int main(int, char**)
-{
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- test2 = 1;
- lk.unlock();
- cv.notify_one();
- t.join();
- }
- test1 = 0;
- test2 = 0;
- {
- L1 lk(m0);
- std::thread t = support::make_test_thread(f);
- assert(test1 == 0);
- while (test1 == 0)
- cv.wait(lk);
- assert(test1 != 0);
- lk.unlock();
- t.join();
- }
+int main(int, char**) {
+ // Run on multiple threads to speed up the test, and because it ought to work anyways.
+ std::thread tests[] = {
+ support::make_test_thread([] {
+ test<std::unique_lock<std::mutex>, TestClock>();
+ test<std::unique_lock<std::mutex>, std::chrono::steady_clock>();
+ }),
+ support::make_test_thread([] {
+ test<std::unique_lock<std::timed_mutex>, TestClock>();
+ test<std::unique_lock<std::timed_mutex>, std::chrono::steady_clock>();
+ }),
+ support::make_test_thread([] {
+ test<MyLock<std::mutex>, TestClock>();
+ test<MyLock<std::mutex>, std::chrono::steady_clock>();
+ }),
+ support::make_test_thread([] {
+ test<MyLock<std::timed_mutex>, TestClock>();
+ test<MyLock<std::timed_mutex>, std::chrono::steady_clock>();
+ })};
+
+ for (std::thread& t : tests)
+ t.join();
return 0;
}
diff --git a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.guard/implicit_ctad.pass.cpp b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.guard/implicit_ctad.pass.cpp
index b75441733482..9319ec0dba04 100644
--- a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.guard/implicit_ctad.pass.cpp
+++ b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.guard/implicit_ctad.pass.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
// UNSUPPORTED: no-threads
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <mutex>
diff --git a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.scoped/implicit_ctad.pass.cpp b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.scoped/implicit_ctad.pass.cpp
index 7305b48c53a9..86bda3a9c6b9 100644
--- a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.scoped/implicit_ctad.pass.cpp
+++ b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.scoped/implicit_ctad.pass.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
// UNSUPPORTED: no-threads
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <mutex>
diff --git a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/implicit_ctad.pass.cpp b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/implicit_ctad.pass.cpp
index 9a595f90ed4f..826ec2b558f0 100644
--- a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/implicit_ctad.pass.cpp
+++ b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/implicit_ctad.pass.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
// UNSUPPORTED: no-threads
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <shared_mutex>
diff --git a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.cons/mutex.pass.cpp b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.cons/mutex.pass.cpp
index 4940041bcf96..ece330134f2c 100644
--- a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.cons/mutex.pass.cpp
+++ b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.cons/mutex.pass.cpp
@@ -5,10 +5,9 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
+
// UNSUPPORTED: no-threads
// UNSUPPORTED: c++03, c++11
-// ALLOW_RETRIES: 2
// <shared_mutex>
@@ -19,9 +18,8 @@
// template<class _Mutex> shared_lock(shared_lock<_Mutex>)
// -> shared_lock<_Mutex>; // C++17
+#include <atomic>
#include <cassert>
-#include <chrono>
-#include <cstdlib>
#include <shared_mutex>
#include <thread>
#include <vector>
@@ -29,77 +27,77 @@
#include "make_test_thread.h"
#include "test_macros.h"
-typedef std::chrono::system_clock Clock;
-typedef Clock::time_point time_point;
-typedef Clock::duration duration;
-typedef std::chrono::milliseconds ms;
-typedef std::chrono::nanoseconds ns;
-
-ms WaitTime = ms(250);
-
-// Thread sanitizer causes more overhead and will sometimes cause this test
-// to fail. To prevent this we give Thread sanitizer more time to complete the
-// test.
-#if !defined(TEST_IS_EXECUTED_IN_A_SLOW_ENVIRONMENT)
-ms Tolerance = ms(50);
-#else
-ms Tolerance = ms(50 * 5);
-#endif
+struct Monitor {
+ bool lock_shared_called = false;
+ bool unlock_shared_called = false;
+};
-std::shared_timed_mutex m;
+struct TrackedMutex {
+ Monitor* monitor = nullptr;
-void f()
-{
- time_point t0 = Clock::now();
- time_point t1;
- {
- std::shared_lock<std::shared_timed_mutex> ul(m);
- t1 = Clock::now();
- }
- ns d = t1 - t0 - WaitTime;
- assert(d < Tolerance); // within tolerance
-}
+ void lock_shared() {
+ if (monitor != nullptr)
+ monitor->lock_shared_called = true;
+ }
+ void unlock_shared() {
+ if (monitor != nullptr)
+ monitor->unlock_shared_called = true;
+ }
+};
-void g()
-{
- time_point t0 = Clock::now();
- time_point t1;
- {
- std::shared_lock<std::shared_timed_mutex> ul(m);
- t1 = Clock::now();
- }
- ns d = t1 - t0;
- assert(d < Tolerance); // within tolerance
-}
+template <class Mutex>
+void test() {
+ // Basic sanity test
+ {
+ Mutex mutex;
+ std::vector<std::thread> threads;
+ std::atomic<bool> ready(false);
+ for (int i = 0; i != 5; ++i) {
+ threads.push_back(support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
-int main(int, char**)
-{
- std::vector<std::thread> v;
- {
- m.lock();
- for (int i = 0; i < 5; ++i)
- v.push_back(support::make_test_thread(f));
- std::this_thread::sleep_for(WaitTime);
- m.unlock();
- for (auto& t : v)
- t.join();
- }
- {
- m.lock_shared();
- for (auto& t : v)
- t = support::make_test_thread(g);
- std::thread q = support::make_test_thread(f);
- std::this_thread::sleep_for(WaitTime);
- m.unlock_shared();
- for (auto& t : v)
- t.join();
- q.join();
+ std::shared_lock<Mutex> lock(mutex);
+ assert(lock.owns_lock());
+ }));
}
+ ready = true;
+ for (auto& t : threads)
+ t.join();
+ }
+
+ // Test CTAD
+ {
+#if TEST_STD_VER >= 17
+ Mutex mutex;
+ std::shared_lock lock(mutex);
+ static_assert(std::is_same<decltype(lock), std::shared_lock<Mutex>>::value);
+#endif
+ }
+}
+
+int main(int, char**) {
#if TEST_STD_VER >= 17
- std::shared_lock sl(m);
- static_assert((std::is_same<decltype(sl), std::shared_lock<decltype(m)>>::value), "" );
+ test<std::shared_mutex>();
#endif
+ test<std::shared_timed_mutex>();
+ test<TrackedMutex>();
+
+ // Use shared_lock with a dummy mutex class that tracks whether each
+ // operation has been called or not.
+ {
+ Monitor monitor;
+ TrackedMutex mutex{&monitor};
+
+ std::shared_lock<TrackedMutex> lock(mutex);
+ assert(monitor.lock_shared_called);
+ assert(lock.owns_lock());
+
+ lock.unlock();
+ assert(monitor.unlock_shared_called);
+ }
return 0;
}
diff --git a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/lock.pass.cpp b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/lock.pass.cpp
index edb7c42356ac..d36ca1d38f8f 100644
--- a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/lock.pass.cpp
+++ b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/lock.pass.cpp
@@ -5,10 +5,9 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
+
// UNSUPPORTED: no-threads
// UNSUPPORTED: c++03, c++11
-// ALLOW_RETRIES: 2
// <shared_mutex>
@@ -16,10 +15,9 @@
// void lock();
+#include <atomic>
#include <cassert>
-#include <chrono>
-#include <cstdlib>
-#include <mutex>
+#include <mutex> // std::defer_lock
#include <shared_mutex>
#include <system_error>
#include <thread>
@@ -28,71 +26,99 @@
#include "make_test_thread.h"
#include "test_macros.h"
-std::shared_timed_mutex m;
+struct Monitor {
+ bool lock_shared_called = false;
+ bool unlock_shared_called = false;
+};
-typedef std::chrono::system_clock Clock;
-typedef Clock::time_point time_point;
-typedef Clock::duration duration;
-typedef std::chrono::milliseconds ms;
-typedef std::chrono::nanoseconds ns;
+struct TrackedMutex {
+ Monitor* monitor = nullptr;
-ms WaitTime = ms(250);
+ void lock_shared() {
+ if (monitor != nullptr)
+ monitor->lock_shared_called = true;
+ }
+ void unlock_shared() {
+ if (monitor != nullptr)
+ monitor->unlock_shared_called = true;
+ }
+};
-// Thread sanitizer causes more overhead and will sometimes cause this test
-// to fail. To prevent this we give Thread sanitizer more time to complete the
-// test.
-#if !defined(TEST_IS_EXECUTED_IN_A_SLOW_ENVIRONMENT)
-ms Tolerance = ms(25);
-#else
-ms Tolerance = ms(25 * 5);
-#endif
+template <class Mutex>
+void test() {
+ // Basic sanity test
+ {
+ Mutex mutex;
+ std::vector<std::thread> threads;
+ std::atomic<bool> ready(false);
+ for (int i = 0; i != 5; ++i) {
+ threads.push_back(support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+ std::shared_lock<Mutex> lock(mutex, std::defer_lock);
+ lock.lock();
+ assert(lock.owns_lock());
+ }));
+ }
+
+ ready = true;
+ for (auto& t : threads)
+ t.join();
+ }
-void f()
-{
- std::shared_lock<std::shared_timed_mutex> lk(m, std::defer_lock);
- time_point t0 = Clock::now();
- lk.lock();
- time_point t1 = Clock::now();
- assert(lk.owns_lock() == true);
- ns d = t1 - t0 - WaitTime;
- assert(d < Tolerance); // within tolerance
+ // Try locking the same shared_lock again in the same thread. This should throw an exception.
+ {
+ Mutex mutex;
+ std::shared_lock<Mutex> lock(mutex, std::defer_lock);
+ lock.lock();
+ assert(lock.owns_lock());
#ifndef TEST_HAS_NO_EXCEPTIONS
- try
- {
- lk.lock();
- assert(false);
- }
- catch (std::system_error& e)
- {
- assert(e.code().value() == EDEADLK);
+ try {
+ lock.lock();
+ assert(false);
+ } catch (std::system_error const& e) {
+ assert(e.code() == std::errc::resource_deadlock_would_occur);
}
#endif
- lk.unlock();
- lk.release();
+ }
+
+ // Try locking a shared_lock that isn't associated to any mutex. This should throw an exception.
+ {
+ std::shared_lock<Mutex> lock; // no associated mutex
#ifndef TEST_HAS_NO_EXCEPTIONS
- try
- {
- lk.lock();
- assert(false);
- }
- catch (std::system_error& e)
- {
- assert(e.code().value() == EPERM);
+ try {
+ lock.lock();
+ assert(false);
+ } catch (std::system_error const& e) {
+ assert(e.code() == std::errc::operation_not_permitted);
}
#endif
+ }
}
-int main(int, char**)
-{
- m.lock();
- std::vector<std::thread> v;
- for (int i = 0; i < 5; ++i)
- v.push_back(support::make_test_thread(f));
- std::this_thread::sleep_for(WaitTime);
- m.unlock();
- for (auto& t : v)
- t.join();
+int main(int, char**) {
+#if TEST_STD_VER >= 17
+ test<std::shared_mutex>();
+#endif
+ test<std::shared_timed_mutex>();
+ test<TrackedMutex>();
+
+ // Use shared_lock with a dummy mutex class that tracks whether each
+ // operation has been called or not.
+ {
+ Monitor monitor;
+ TrackedMutex mutex{&monitor};
+
+ std::shared_lock<TrackedMutex> lock(mutex, std::defer_lock);
+ lock.lock();
+ assert(monitor.lock_shared_called);
+ assert(lock.owns_lock());
+
+ lock.unlock();
+ assert(monitor.unlock_shared_called);
+ }
return 0;
}
diff --git a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/try_lock.pass.cpp b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/try_lock.pass.cpp
index 0e707fcf2d50..b6146680b6e3 100644
--- a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/try_lock.pass.cpp
+++ b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/try_lock.pass.cpp
@@ -5,11 +5,9 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
+
// UNSUPPORTED: no-threads
// UNSUPPORTED: c++03, c++11
-//
-// ALLOW_RETRIES: 2
// <shared_mutex>
@@ -17,60 +15,115 @@
// bool try_lock();
+#include <atomic>
#include <cassert>
-#include <mutex>
+#include <mutex> // std::defer_lock
#include <shared_mutex>
#include <system_error>
+#include <thread>
+#include <vector>
+#include "make_test_thread.h"
#include "test_macros.h"
-bool try_lock_called = false;
+struct Monitor {
+ bool try_lock_shared_called = false;
+ bool unlock_shared_called = false;
+};
-struct mutex
-{
- bool try_lock_shared()
- {
- try_lock_called = !try_lock_called;
- return try_lock_called;
- }
- void unlock_shared() {}
+struct TrackedMutex {
+ Monitor* monitor = nullptr;
+
+ bool try_lock_shared() {
+ if (monitor != nullptr)
+ monitor->try_lock_shared_called = true;
+ return true;
+ }
+ void unlock_shared() {
+ if (monitor != nullptr)
+ monitor->unlock_shared_called = true;
+ }
};
-mutex m;
+template <class Mutex>
+void test() {
+ // Basic sanity test
+ {
+ Mutex mutex;
+ std::vector<std::thread> threads;
+ std::atomic<bool> ready(false);
+ for (int i = 0; i != 5; ++i) {
+ threads.push_back(support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
-int main(int, char**)
-{
- std::shared_lock<mutex> lk(m, std::defer_lock);
- assert(lk.try_lock() == true);
- assert(try_lock_called == true);
- assert(lk.owns_lock() == true);
-#ifndef TEST_HAS_NO_EXCEPTIONS
- try
- {
- TEST_IGNORE_NODISCARD lk.try_lock();
- assert(false);
+ std::shared_lock<Mutex> lock(mutex, std::defer_lock);
+ bool result = lock.try_lock();
+ assert(result);
+ assert(lock.owns_lock());
+ }));
}
- catch (std::system_error& e)
- {
- assert(e.code().value() == EDEADLK);
+
+ ready = true;
+ for (auto& t : threads)
+ t.join();
+ }
+
+ // Make sure that we throw an exception if we try to re-lock a mutex that is
+ // already locked by the current thread.
+ {
+ Mutex mutex;
+
+ std::shared_lock<Mutex> lock(mutex, std::defer_lock);
+ assert(lock.try_lock());
+ assert(lock.owns_lock());
+#ifndef TEST_HAS_NO_EXCEPTIONS
+ try {
+ TEST_IGNORE_NODISCARD lock.try_lock();
+ assert(false);
+ } catch (std::system_error const& e) {
+ assert(e.code() == std::errc::resource_deadlock_would_occur);
}
#endif
- lk.unlock();
- assert(lk.try_lock() == false);
- assert(try_lock_called == false);
- assert(lk.owns_lock() == false);
- lk.release();
+ }
+
+ // Make sure that we throw an exception if we try to lock a shared_lock
+ // that is not associated to any mutex.
+ {
+ std::shared_lock<Mutex> lock; // not associated to a mutex
#ifndef TEST_HAS_NO_EXCEPTIONS
- try
- {
- TEST_IGNORE_NODISCARD lk.try_lock();
- assert(false);
- }
- catch (std::system_error& e)
- {
- assert(e.code().value() == EPERM);
+ try {
+ TEST_IGNORE_NODISCARD lock.try_lock();
+ assert(false);
+ } catch (std::system_error const& e) {
+ assert(e.code() == std::errc::operation_not_permitted);
}
#endif
+ }
+}
+
+int main(int, char**) {
+#if TEST_STD_VER >= 17
+ test<std::shared_mutex>();
+#endif
+ test<std::shared_timed_mutex>();
+ test<TrackedMutex>();
+
+ // Use shared_lock with a dummy mutex class that tracks whether each
+ // operation has been called or not.
+ {
+ Monitor monitor;
+ TrackedMutex mutex{&monitor};
+
+ std::shared_lock<TrackedMutex> lock(mutex, std::defer_lock);
+ bool result = lock.try_lock();
+ assert(result);
+ assert(monitor.try_lock_shared_called);
+ assert(lock.owns_lock());
+ lock.unlock();
+ assert(monitor.unlock_shared_called);
+ }
return 0;
}
diff --git a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.unique/implicit_ctad.pass.cpp b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.unique/implicit_ctad.pass.cpp
index ffe651c6b744..337ad4c45a94 100644
--- a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.unique/implicit_ctad.pass.cpp
+++ b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.unique/implicit_ctad.pass.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
// UNSUPPORTED: no-threads
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <mutex>
diff --git a/libcxx/test/std/utilities/function.objects/func.search/func.search.bm/implicit_ctad.pass.cpp b/libcxx/test/std/utilities/function.objects/func.search/func.search.bm/implicit_ctad.pass.cpp
index 863b4a5c2569..50c89d6b8db6 100644
--- a/libcxx/test/std/utilities/function.objects/func.search/func.search.bm/implicit_ctad.pass.cpp
+++ b/libcxx/test/std/utilities/function.objects/func.search/func.search.bm/implicit_ctad.pass.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <functional>
diff --git a/libcxx/test/std/utilities/function.objects/func.search/func.search.bmh/implicit_ctad.pass.cpp b/libcxx/test/std/utilities/function.objects/func.search/func.search.bmh/implicit_ctad.pass.cpp
index 778f6d3bd2cb..9cb4ef5afbb5 100644
--- a/libcxx/test/std/utilities/function.objects/func.search/func.search.bmh/implicit_ctad.pass.cpp
+++ b/libcxx/test/std/utilities/function.objects/func.search/func.search.bmh/implicit_ctad.pass.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <functional>
diff --git a/libcxx/test/std/utilities/function.objects/func.search/func.search.default/implicit_ctad.pass.cpp b/libcxx/test/std/utilities/function.objects/func.search/func.search.default/implicit_ctad.pass.cpp
index 3c9029566d92..6334ed16ed52 100644
--- a/libcxx/test/std/utilities/function.objects/func.search/func.search.default/implicit_ctad.pass.cpp
+++ b/libcxx/test/std/utilities/function.objects/func.search/func.search.default/implicit_ctad.pass.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <functional>
diff --git a/libcxx/test/std/utilities/function.objects/operations.implicit_ctad.pass.cpp b/libcxx/test/std/utilities/function.objects/operations.implicit_ctad.pass.cpp
index 03c46d232c38..bb4fb4bf71c1 100644
--- a/libcxx/test/std/utilities/function.objects/operations.implicit_ctad.pass.cpp
+++ b/libcxx/test/std/utilities/function.objects/operations.implicit_ctad.pass.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-// UNSUPPORTED: c++98, c++03, c++11, c++14
+// UNSUPPORTED: c++03, c++11, c++14
// <functional>
diff --git a/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/has_unique_object_representations.pass.cpp b/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/has_unique_object_representations.pass.cpp
index ce34c8e958db..b8b84bb90882 100644
--- a/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/has_unique_object_representations.pass.cpp
+++ b/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/has_unique_object_representations.pass.cpp
@@ -99,6 +99,8 @@ int main(int, char**)
test_has_unique_object_representations<unsigned>();
test_has_unique_object_representations<NonEmptyUnion>();
test_has_unique_object_representations<char[3]>();
+ test_has_unique_object_representations<char[3][4]>();
+ test_has_unique_object_representations<char[3][4][5]>();
test_has_unique_object_representations<char[]>();
diff --git a/libcxx/test/std/utilities/utility/mem.res/mem.res.global/new_delete_resource.pass.cpp b/libcxx/test/std/utilities/utility/mem.res/mem.res.global/new_delete_resource.pass.cpp
index 68a82f6ce90b..7b3107029d4d 100644
--- a/libcxx/test/std/utilities/utility/mem.res/mem.res.global/new_delete_resource.pass.cpp
+++ b/libcxx/test/std/utilities/utility/mem.res/mem.res.global/new_delete_resource.pass.cpp
@@ -76,7 +76,7 @@ void test_allocate_deallocate() {
ASSERT_WITH_LIBRARY_INTERNAL_ALLOCATIONS(globalMemCounter.checkOutstandingNewEq(1));
ASSERT_WITH_LIBRARY_INTERNAL_ALLOCATIONS(globalMemCounter.checkLastNewSizeEq(50));
- r1.deallocate(ret, 1);
+ r1.deallocate(ret, 50);
assert(globalMemCounter.checkOutstandingNewEq(0));
ASSERT_WITH_LIBRARY_INTERNAL_ALLOCATIONS(globalMemCounter.checkDeleteCalledEq(1));
}
diff --git a/libcxx/utils/libcxx/test/dsl.py b/libcxx/utils/libcxx/test/dsl.py
index 387862ae6f49..7ac66d449b1c 100644
--- a/libcxx/utils/libcxx/test/dsl.py
+++ b/libcxx/utils/libcxx/test/dsl.py
@@ -8,8 +8,8 @@
import os
import pickle
-import pipes
import platform
+import shlex
import shutil
import tempfile
@@ -290,7 +290,7 @@ def hasAnyLocale(config, locales):
}
#endif
"""
- return programSucceeds(config, program, args=[pipes.quote(l) for l in locales])
+ return programSucceeds(config, program, args=[shlex.quote(l) for l in locales])
@_memoizeExpensiveOperation(lambda c, flags="": (c.substitutions, c.environment, flags))
diff --git a/libcxx/utils/libcxx/test/features.py b/libcxx/utils/libcxx/test/features.py
index c81b56b1af54..093cd39ea64c 100644
--- a/libcxx/utils/libcxx/test/features.py
+++ b/libcxx/utils/libcxx/test/features.py
@@ -38,6 +38,39 @@ def _getAndroidDeviceApi(cfg):
)
)
+
+def _mingwSupportsModules(cfg):
+ # Only mingw headers are known to work with libc++ built as a module,
+ # at the moment.
+ if not "__MINGW32__" in compilerMacros(cfg):
+ return False
+ # For mingw headers, check for a version known to support being built
+ # as a module.
+ return sourceBuilds(
+ cfg,
+ """
+ #include <_mingw_mac.h>
+ #if __MINGW64_VERSION_MAJOR < 12
+ #error Headers known to be incompatible
+ #elif __MINGW64_VERSION_MAJOR == 12
+ // The headers were fixed to work with libc++ modules during
+ // __MINGW64_VERSION_MAJOR == 12. The headers became compatible
+ // with libc++ built as a module in
+ // 1652e9241b5d8a5a779c6582b1c3c4f4a7cc66e5 (Apr 2024), but the
+ // following commit 8c13b28ace68f2c0094d45121d59a4b951b533ed
+ // removed the now unused __mingw_static_ovr define. Use this
+ // as indicator for whether we've got new enough headers.
+ #ifdef __mingw_static_ovr
+ #error Headers too old
+ #endif
+ #else
+ // __MINGW64_VERSION_MAJOR > 12 should be ok.
+ #endif
+ int main() { return 0; }
+ """,
+ )
+
+
# Lit features are evaluated in order. Some checks may require the compiler detection to have
# run first in order to work properly.
DEFAULT_FEATURES = [
@@ -281,7 +314,7 @@ DEFAULT_FEATURES = [
# Any declaration of a library function shall have external linkage.
when=lambda cfg: "__ANDROID__" in compilerMacros(cfg)
or "__FreeBSD__" in compilerMacros(cfg)
- or "_WIN32" in compilerMacros(cfg)
+ or ("_WIN32" in compilerMacros(cfg) and not _mingwSupportsModules(cfg))
or platform.system().lower().startswith("aix")
# Avoid building on platforms that don't support modules properly.
or not hasCompileFlag(cfg, "-Wno-reserved-module-identifier"),
diff --git a/libcxxabi/CMakeLists.txt b/libcxxabi/CMakeLists.txt
index da998d2221dc..f7673da25d20 100644
--- a/libcxxabi/CMakeLists.txt
+++ b/libcxxabi/CMakeLists.txt
@@ -5,6 +5,7 @@
#===============================================================================
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "libc++abi")
set(LLVM_COMMON_CMAKE_UTILS "${CMAKE_CURRENT_SOURCE_DIR}/../cmake")
diff --git a/libcxxabi/include/cxxabi.h b/libcxxabi/include/cxxabi.h
index d0701181751c..0e3969084e04 100644
--- a/libcxxabi/include/cxxabi.h
+++ b/libcxxabi/include/cxxabi.h
@@ -48,13 +48,17 @@ extern _LIBCXXABI_FUNC_VIS void
__cxa_free_exception(void *thrown_exception) throw();
// This function is an LLVM extension, which mirrors the same extension in libsupc++ and libcxxrt
extern _LIBCXXABI_FUNC_VIS __cxa_exception*
+#ifdef __wasm__
+// In Wasm, a destructor returns its argument
+__cxa_init_primary_exception(void* object, std::type_info* tinfo, void*(_LIBCXXABI_DTOR_FUNC* dest)(void*)) throw();
+#else
__cxa_init_primary_exception(void* object, std::type_info* tinfo, void(_LIBCXXABI_DTOR_FUNC* dest)(void*)) throw();
+#endif
// 2.4.3 Throwing the Exception Object
extern _LIBCXXABI_FUNC_VIS _LIBCXXABI_NORETURN void
__cxa_throw(void *thrown_exception, std::type_info *tinfo,
-#ifdef __USING_WASM_EXCEPTIONS__
- // In Wasm, a destructor returns its argument
+#ifdef __wasm__
void *(_LIBCXXABI_DTOR_FUNC *dest)(void *));
#else
void (_LIBCXXABI_DTOR_FUNC *dest)(void *));
diff --git a/libcxxabi/src/cxa_exception.cpp b/libcxxabi/src/cxa_exception.cpp
index 65e9f4504dda..ff69a4c65e46 100644
--- a/libcxxabi/src/cxa_exception.cpp
+++ b/libcxxabi/src/cxa_exception.cpp
@@ -207,7 +207,12 @@ void __cxa_free_exception(void *thrown_object) throw() {
}
__cxa_exception* __cxa_init_primary_exception(void* object, std::type_info* tinfo,
+#ifdef __wasm__
+// In Wasm, a destructor returns its argument
+ void *(_LIBCXXABI_DTOR_FUNC* dest)(void*)) throw() {
+#else
void(_LIBCXXABI_DTOR_FUNC* dest)(void*)) throw() {
+#endif
__cxa_exception* exception_header = cxa_exception_from_thrown_object(object);
exception_header->referenceCount = 0;
exception_header->unexpectedHandler = std::get_unexpected();
@@ -267,7 +272,7 @@ will call terminate, assuming that there was no handler for the
exception.
*/
void
-#ifdef __USING_WASM_EXCEPTIONS__
+#ifdef __wasm__
// In Wasm, a destructor returns its argument
__cxa_throw(void *thrown_object, std::type_info *tinfo, void *(_LIBCXXABI_DTOR_FUNC *dest)(void *)) {
#else
diff --git a/libcxxabi/src/cxa_exception.h b/libcxxabi/src/cxa_exception.h
index 10712f6f47bb..aba08f299210 100644
--- a/libcxxabi/src/cxa_exception.h
+++ b/libcxxabi/src/cxa_exception.h
@@ -43,7 +43,7 @@ struct _LIBCXXABI_HIDDEN __cxa_exception {
// Manage the exception object itself.
std::type_info *exceptionType;
-#ifdef __USING_WASM_EXCEPTIONS__
+#ifdef __wasm__
// In Wasm, a destructor returns its argument
void *(_LIBCXXABI_DTOR_FUNC *exceptionDestructor)(void *);
#else
diff --git a/libcxxabi/src/cxa_personality.cpp b/libcxxabi/src/cxa_personality.cpp
index d95d78131940..843a18a4cbd8 100644
--- a/libcxxabi/src/cxa_personality.cpp
+++ b/libcxxabi/src/cxa_personality.cpp
@@ -70,7 +70,7 @@ extern "C" EXCEPTION_DISPOSITION _GCC_specific_handler(PEXCEPTION_RECORD,
+------------------+--+-----+-----+------------------------+--------------------------+
| callSiteTableLength | (ULEB128) | Call Site Table length, used to find Action table |
+---------------------+-----------+---------------------------------------------------+
-#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__USING_WASM_EXCEPTIONS__)
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__WASM_EXCEPTIONS__)
+---------------------+-----------+------------------------------------------------+
| Beginning of Call Site Table The current ip lies within the |
| ... (start, length) range of one of these |
@@ -84,7 +84,7 @@ extern "C" EXCEPTION_DISPOSITION _GCC_specific_handler(PEXCEPTION_RECORD,
| +-------------+---------------------------------+------------------------------+ |
| ... |
+----------------------------------------------------------------------------------+
-#else // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__
+#else // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__
+---------------------+-----------+------------------------------------------------+
| Beginning of Call Site Table The current ip is a 1-based index into |
| ... this table. Or it is -1 meaning no |
@@ -97,7 +97,7 @@ extern "C" EXCEPTION_DISPOSITION _GCC_specific_handler(PEXCEPTION_RECORD,
| +-------------+---------------------------------+------------------------------+ |
| ... |
+----------------------------------------------------------------------------------+
-#endif // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__
+#endif // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__
+---------------------------------------------------------------------+
| Beginning of Action Table ttypeIndex == 0 : cleanup |
| ... ttypeIndex > 0 : catch |
@@ -547,7 +547,7 @@ void
set_registers(_Unwind_Exception* unwind_exception, _Unwind_Context* context,
const scan_results& results)
{
-#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__USING_WASM_EXCEPTIONS__)
+#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__WASM_EXCEPTIONS__)
#define __builtin_eh_return_data_regno(regno) regno
#elif defined(__ibmxl__)
// IBM xlclang++ compiler does not support __builtin_eh_return_data_regno.
@@ -642,7 +642,7 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions,
// Get beginning current frame's code (as defined by the
// emitted dwarf code)
uintptr_t funcStart = _Unwind_GetRegionStart(context);
-#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__USING_WASM_EXCEPTIONS__)
+#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__WASM_EXCEPTIONS__)
if (ip == uintptr_t(-1))
{
// no action
@@ -652,9 +652,9 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions,
else if (ip == 0)
call_terminate(native_exception, unwind_exception);
// ip is 1-based index into call site table
-#else // !__USING_SJLJ_EXCEPTIONS__ && !__USING_WASM_EXCEPTIONS__
+#else // !__USING_SJLJ_EXCEPTIONS__ && !__WASM_EXCEPTIONS__
uintptr_t ipOffset = ip - funcStart;
-#endif // !__USING_SJLJ_EXCEPTIONS__ && !__USING_WASM_EXCEPTIONS__
+#endif // !__USING_SJLJ_EXCEPTIONS__ && !__WASM_EXCEPTIONS__
const uint8_t* classInfo = NULL;
// Note: See JITDwarfEmitter::EmitExceptionTable(...) for corresponding
// dwarf emission
@@ -675,7 +675,7 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions,
// Walk call-site table looking for range that
// includes current PC.
uint8_t callSiteEncoding = *lsda++;
-#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__USING_WASM_EXCEPTIONS__)
+#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__WASM_EXCEPTIONS__)
(void)callSiteEncoding; // When using SjLj/Wasm exceptions, callSiteEncoding is never used
#endif
uint32_t callSiteTableLength = static_cast<uint32_t>(readULEB128(&lsda));
@@ -686,7 +686,7 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions,
while (callSitePtr < callSiteTableEnd)
{
// There is one entry per call site.
-#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__USING_WASM_EXCEPTIONS__)
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__WASM_EXCEPTIONS__)
// The call sites are non-overlapping in [start, start+length)
// The call sites are ordered in increasing value of start
uintptr_t start = readEncodedPointer(&callSitePtr, callSiteEncoding);
@@ -694,15 +694,15 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions,
uintptr_t landingPad = readEncodedPointer(&callSitePtr, callSiteEncoding);
uintptr_t actionEntry = readULEB128(&callSitePtr);
if ((start <= ipOffset) && (ipOffset < (start + length)))
-#else // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__
+#else // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__
// ip is 1-based index into this table
uintptr_t landingPad = readULEB128(&callSitePtr);
uintptr_t actionEntry = readULEB128(&callSitePtr);
if (--ip == 0)
-#endif // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__
+#endif // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__
{
// Found the call site containing ip.
-#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__USING_WASM_EXCEPTIONS__)
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__WASM_EXCEPTIONS__)
if (landingPad == 0)
{
// No handler here
@@ -710,9 +710,9 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions,
return;
}
landingPad = (uintptr_t)lpStart + landingPad;
-#else // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__
+#else // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__
++landingPad;
-#endif // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__
+#endif // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__
results.landingPad = landingPad;
if (actionEntry == 0)
{
@@ -838,7 +838,7 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions,
action += actionOffset;
} // there is no break out of this loop, only return
}
-#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__USING_WASM_EXCEPTIONS__)
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__WASM_EXCEPTIONS__)
else if (ipOffset < start)
{
// There is no call site for this ip
@@ -846,7 +846,7 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions,
// Possible stack corruption.
call_terminate(native_exception, unwind_exception);
}
-#endif // !__USING_SJLJ_EXCEPTIONS__ && !__USING_WASM_EXCEPTIONS__
+#endif // !__USING_SJLJ_EXCEPTIONS__ && !__WASM_EXCEPTIONS__
} // there might be some tricky cases which break out of this loop
// It is possible that no eh table entry specify how to handle
@@ -903,7 +903,7 @@ _UA_CLEANUP_PHASE
*/
#if !defined(_LIBCXXABI_ARM_EHABI)
-#ifdef __USING_WASM_EXCEPTIONS__
+#ifdef __WASM_EXCEPTIONS__
_Unwind_Reason_Code __gxx_personality_wasm0
#elif defined(__SEH__) && !defined(__USING_SJLJ_EXCEPTIONS__)
static _Unwind_Reason_Code __gxx_personality_imp
@@ -972,7 +972,7 @@ __gxx_personality_v0
exc->languageSpecificData = results.languageSpecificData;
exc->catchTemp = reinterpret_cast<void*>(results.landingPad);
exc->adjustedPtr = results.adjustedPtr;
-#ifdef __USING_WASM_EXCEPTIONS__
+#ifdef __WASM_EXCEPTIONS__
// Wasm only uses a single phase (_UA_SEARCH_PHASE), so save the
// results here.
set_registers(unwind_exception, context, results);
diff --git a/libunwind/CMakeLists.txt b/libunwind/CMakeLists.txt
index 806d5a783ec3..2117cd9e756e 100644
--- a/libunwind/CMakeLists.txt
+++ b/libunwind/CMakeLists.txt
@@ -3,6 +3,7 @@
#===============================================================================
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "libunwind")
set(LLVM_COMMON_CMAKE_UTILS "${CMAKE_CURRENT_SOURCE_DIR}/../cmake")
diff --git a/libunwind/include/__libunwind_config.h b/libunwind/include/__libunwind_config.h
index 8db336b2d727..028b9e3baa80 100644
--- a/libunwind/include/__libunwind_config.h
+++ b/libunwind/include/__libunwind_config.h
@@ -180,6 +180,10 @@
#endif
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER \
_LIBUNWIND_HIGHEST_DWARF_REGISTER_LOONGARCH
+#elif defined(__wasm__)
+// Unused
+#define _LIBUNWIND_CONTEXT_SIZE 0
+#define _LIBUNWIND_CURSOR_SIZE 0
# else
# error "Unsupported architecture."
# endif
diff --git a/libunwind/src/Unwind-wasm.c b/libunwind/src/Unwind-wasm.c
index f7f39d38b59c..b18b32c5d178 100644
--- a/libunwind/src/Unwind-wasm.c
+++ b/libunwind/src/Unwind-wasm.c
@@ -14,7 +14,7 @@
#include "config.h"
-#ifdef __USING_WASM_EXCEPTIONS__
+#ifdef __WASM_EXCEPTIONS__
#include "unwind.h"
#include <threads.h>
@@ -120,4 +120,4 @@ _Unwind_GetRegionStart(struct _Unwind_Context *context) {
return 0;
}
-#endif // defined(__USING_WASM_EXCEPTIONS__)
+#endif // defined(__WASM_EXCEPTIONS__)
diff --git a/libunwind/src/UnwindCursor.hpp b/libunwind/src/UnwindCursor.hpp
index 7753936a5894..66fe8e2a32cc 100644
--- a/libunwind/src/UnwindCursor.hpp
+++ b/libunwind/src/UnwindCursor.hpp
@@ -2416,7 +2416,7 @@ int UnwindCursor<A, R>::stepWithTBTable(pint_t pc, tbtable *TBTable,
}
// Reset LR in the current context.
- newRegisters.setLR(NULL);
+ newRegisters.setLR(static_cast<uintptr_t>(NULL));
_LIBUNWIND_TRACE_UNWINDING(
"Extract info from lastStack=%p, returnAddress=%p",
diff --git a/libunwind/src/UnwindLevel1.c b/libunwind/src/UnwindLevel1.c
index 05d0f2cb0a0a..48e7bc3b9e00 100644
--- a/libunwind/src/UnwindLevel1.c
+++ b/libunwind/src/UnwindLevel1.c
@@ -31,7 +31,8 @@
#include "libunwind_ext.h"
#include "unwind.h"
-#if !defined(_LIBUNWIND_ARM_EHABI) && !defined(__USING_SJLJ_EXCEPTIONS__)
+#if !defined(_LIBUNWIND_ARM_EHABI) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
+ !defined(__wasm__)
#ifndef _LIBUNWIND_SUPPORT_SEH_UNWIND
diff --git a/libunwind/src/UnwindRegistersRestore.S b/libunwind/src/UnwindRegistersRestore.S
index 42c2488fc7cf..67d9e0571189 100644
--- a/libunwind/src/UnwindRegistersRestore.S
+++ b/libunwind/src/UnwindRegistersRestore.S
@@ -20,7 +20,7 @@
.text
#endif
-#if !defined(__USING_SJLJ_EXCEPTIONS__)
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__)
#if defined(__i386__)
DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)
@@ -1232,7 +1232,7 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv)
#endif
-#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
+#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */
NO_EXEC_STACK_DIRECTIVE
diff --git a/libunwind/src/UnwindRegistersSave.S b/libunwind/src/UnwindRegistersSave.S
index 19a0e87d683c..5bf6055fe414 100644
--- a/libunwind/src/UnwindRegistersSave.S
+++ b/libunwind/src/UnwindRegistersSave.S
@@ -20,7 +20,7 @@
.text
#endif
-#if !defined(__USING_SJLJ_EXCEPTIONS__)
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__)
#if defined(__i386__)
@@ -1177,6 +1177,6 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
WEAK_ALIAS(__unw_getcontext, unw_getcontext)
-#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
+#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */
NO_EXEC_STACK_DIRECTIVE
diff --git a/libunwind/src/libunwind.cpp b/libunwind/src/libunwind.cpp
index 217dde909863..cf39ec5f7dbd 100644
--- a/libunwind/src/libunwind.cpp
+++ b/libunwind/src/libunwind.cpp
@@ -26,7 +26,7 @@
#include <sanitizer/asan_interface.h>
#endif
-#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__USING_WASM_EXCEPTIONS__)
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__)
#include "AddressSpace.hpp"
#include "UnwindCursor.hpp"
@@ -347,8 +347,7 @@ void __unw_remove_dynamic_eh_frame_section(unw_word_t eh_frame_start) {
}
#endif // defined(_LIBUNWIND_SUPPORT_DWARF_UNWIND)
-#endif // !defined(__USING_SJLJ_EXCEPTIONS__) &&
- // !defined(__USING_WASM_EXCEPTIONS__)
+#endif // !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__)
#ifdef __APPLE__
diff --git a/lld/COFF/DriverUtils.cpp b/lld/COFF/DriverUtils.cpp
index b4ff31a606da..6e8f74c83be4 100644
--- a/lld/COFF/DriverUtils.cpp
+++ b/lld/COFF/DriverUtils.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/Object/COFF.h"
#include "llvm/Object/WindowsResource.h"
#include "llvm/Option/Arg.h"
@@ -39,6 +40,7 @@
#include <optional>
using namespace llvm::COFF;
+using namespace llvm::object;
using namespace llvm::opt;
using namespace llvm;
using llvm::sys::Process;
@@ -632,18 +634,6 @@ err:
fatal("invalid /export: " + arg);
}
-static StringRef undecorate(COFFLinkerContext &ctx, StringRef sym) {
- if (ctx.config.machine != I386)
- return sym;
- // In MSVC mode, a fully decorated stdcall function is exported
- // as-is with the leading underscore (with type IMPORT_NAME).
- // In MinGW mode, a decorated stdcall function gets the underscore
- // removed, just like normal cdecl functions.
- if (sym.starts_with("_") && sym.contains('@') && !ctx.config.mingw)
- return sym;
- return sym.starts_with("_") ? sym.substr(1) : sym;
-}
-
// Convert stdcall/fastcall style symbols into unsuffixed symbols,
// with or without a leading underscore. (MinGW specific.)
static StringRef killAt(StringRef sym, bool prefix) {
@@ -693,11 +683,29 @@ void LinkerDriver::fixupExports() {
for (Export &e : ctx.config.exports) {
if (!e.exportAs.empty()) {
e.exportName = e.exportAs;
- } else if (!e.forwardTo.empty()) {
- e.exportName = undecorate(ctx, e.name);
- } else {
- e.exportName = undecorate(ctx, e.extName.empty() ? e.name : e.extName);
+ continue;
+ }
+
+ StringRef sym =
+ !e.forwardTo.empty() || e.extName.empty() ? e.name : e.extName;
+ if (ctx.config.machine == I386 && sym.starts_with("_")) {
+ // In MSVC mode, a fully decorated stdcall function is exported
+ // as-is with the leading underscore (with type IMPORT_NAME).
+ // In MinGW mode, a decorated stdcall function gets the underscore
+ // removed, just like normal cdecl functions.
+ if (ctx.config.mingw || !sym.contains('@')) {
+ e.exportName = sym.substr(1);
+ continue;
+ }
+ }
+ if (isArm64EC(ctx.config.machine) && !e.data && !e.constant) {
+ if (std::optional<std::string> demangledName =
+ getArm64ECDemangledFunctionName(sym)) {
+ e.exportName = saver().save(*demangledName);
+ continue;
+ }
}
+ e.exportName = sym;
}
if (ctx.config.killAt && ctx.config.machine == I386) {
diff --git a/lld/ELF/Arch/AVR.cpp b/lld/ELF/Arch/AVR.cpp
index 9211eabc9669..2275f8694287 100644
--- a/lld/ELF/Arch/AVR.cpp
+++ b/lld/ELF/Arch/AVR.cpp
@@ -231,14 +231,13 @@ void AVR::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
// Since every jump destination is word aligned we gain an extra bit
case R_AVR_7_PCREL: {
- checkInt(loc, val - 2, 7, rel);
+ checkInt(loc, val - 2, 8, rel);
checkAlignment(loc, val, 2, rel);
const uint16_t target = (val - 2) >> 1;
write16le(loc, (read16le(loc) & 0xfc07) | ((target & 0x7f) << 3));
break;
}
case R_AVR_13_PCREL: {
- checkInt(loc, val - 2, 13, rel);
checkAlignment(loc, val, 2, rel);
const uint16_t target = (val - 2) >> 1;
write16le(loc, (read16le(loc) & 0xf000) | (target & 0xfff));
diff --git a/lld/ELF/Config.h b/lld/ELF/Config.h
index dbb81412453a..f0dfe7f377de 100644
--- a/lld/ELF/Config.h
+++ b/lld/ELF/Config.h
@@ -102,6 +102,9 @@ enum class GnuStackKind { None, Exec, NoExec };
// For --lto=
enum LtoKind : uint8_t {UnifiedThin, UnifiedRegular, Default};
+// For -z gcs=
+enum class GcsPolicy { Implicit, Never, Always };
+
struct SymbolVersion {
llvm::StringRef name;
bool isExternCpp;
@@ -188,6 +191,7 @@ struct Config {
StringRef zBtiReport = "none";
StringRef zCetReport = "none";
StringRef zPauthReport = "none";
+ StringRef zGcsReport = "none";
bool ltoBBAddrMap;
llvm::StringRef ltoBasicBlockSections;
std::pair<llvm::StringRef, llvm::StringRef> thinLTOObjectSuffixReplace;
@@ -341,6 +345,7 @@ struct Config {
UnresolvedPolicy unresolvedSymbols;
UnresolvedPolicy unresolvedSymbolsInShlib;
Target2Policy target2;
+ GcsPolicy zGcs;
bool power10Stubs;
ARMVFPArgKind armVFPArgs = ARMVFPArgKind::Default;
BuildIdKind buildId = BuildIdKind::None;
diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index 028cdcc83d2f..ddc574a11314 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -466,6 +466,10 @@ static void checkOptions() {
error("-z bti-report only supported on AArch64");
if (config->zPauthReport != "none")
error("-z pauth-report only supported on AArch64");
+ if (config->zGcsReport != "none")
+ error("-z gcs-report only supported on AArch64");
+ if (config->zGcs != GcsPolicy::Implicit)
+ error("-z gcs only supported on AArch64");
}
if (config->emachine != EM_386 && config->emachine != EM_X86_64 &&
@@ -560,6 +564,25 @@ static uint8_t getZStartStopVisibility(opt::InputArgList &args) {
return ret;
}
+static GcsPolicy getZGcs(opt::InputArgList &args) {
+ GcsPolicy ret = GcsPolicy::Implicit;
+ for (auto *arg : args.filtered(OPT_z)) {
+ std::pair<StringRef, StringRef> kv = StringRef(arg->getValue()).split('=');
+ if (kv.first == "gcs") {
+ arg->claim();
+ if (kv.second == "implicit")
+ ret = GcsPolicy::Implicit;
+ else if (kv.second == "never")
+ ret = GcsPolicy::Never;
+ else if (kv.second == "always")
+ ret = GcsPolicy::Always;
+ else
+ error("unknown -z gcs= value: " + kv.second);
+ }
+ }
+ return ret;
+}
+
// Report a warning for an unknown -z option.
static void checkZOptions(opt::InputArgList &args) {
// This function is called before getTarget(), when certain options are not
@@ -1438,6 +1461,7 @@ static void readConfigs(opt::InputArgList &args) {
config->zCopyreloc = getZFlag(args, "copyreloc", "nocopyreloc", true);
config->zForceBti = hasZOption(args, "force-bti");
config->zForceIbt = hasZOption(args, "force-ibt");
+ config->zGcs = getZGcs(args);
config->zGlobal = hasZOption(args, "global");
config->zGnustack = getZGnuStack(args);
config->zHazardplt = hasZOption(args, "hazardplt");
@@ -1510,6 +1534,7 @@ static void readConfigs(opt::InputArgList &args) {
auto reports = {std::make_pair("bti-report", &config->zBtiReport),
std::make_pair("cet-report", &config->zCetReport),
+ std::make_pair("gcs-report", &config->zGcsReport),
std::make_pair("pauth-report", &config->zPauthReport)};
for (opt::Arg *arg : args.filtered(OPT_z)) {
std::pair<StringRef, StringRef> option =
@@ -2678,6 +2703,11 @@ static void readSecurityNotes() {
"GNU_PROPERTY_AARCH64_FEATURE_1_BTI property");
checkAndReportMissingFeature(
+ config->zGcsReport, features, GNU_PROPERTY_AARCH64_FEATURE_1_GCS,
+ toString(f) + ": -z gcs-report: file does not have "
+ "GNU_PROPERTY_AARCH64_FEATURE_1_GCS property");
+
+ checkAndReportMissingFeature(
config->zCetReport, features, GNU_PROPERTY_X86_FEATURE_1_IBT,
toString(f) + ": -z cet-report: file does not have "
"GNU_PROPERTY_X86_FEATURE_1_IBT property");
@@ -2729,6 +2759,12 @@ static void readSecurityNotes() {
// Force enable Shadow Stack.
if (config->zShstk)
config->andFeatures |= GNU_PROPERTY_X86_FEATURE_1_SHSTK;
+
+ // Force enable/disable GCS
+ if (config->zGcs == GcsPolicy::Always)
+ config->andFeatures |= GNU_PROPERTY_AARCH64_FEATURE_1_GCS;
+ else if (config->zGcs == GcsPolicy::Never)
+ config->andFeatures &= ~GNU_PROPERTY_AARCH64_FEATURE_1_GCS;
}
static void initSectionsAndLocalSyms(ELFFileBase *file, bool ignoreComdats) {
diff --git a/lld/ELF/Options.td b/lld/ELF/Options.td
index 883a6079bf50..ff61a566f52f 100644
--- a/lld/ELF/Options.td
+++ b/lld/ELF/Options.td
@@ -69,6 +69,7 @@ defm compress_debug_sections:
defm compress_sections: EEq<"compress-sections",
"Compress output sections that match the glob and do not have the SHF_ALLOC flag. "
+ "The sections remain uncompressed if compressed content would be larger. "
"The compression level is <level> (if specified) or a default speed-focused level">,
MetaVarName<"<section-glob>={none,zlib,zstd}[:level]">;
diff --git a/lld/ELF/OutputSections.cpp b/lld/ELF/OutputSections.cpp
index fcb4c4387aa9..60de10061c53 100644
--- a/lld/ELF/OutputSections.cpp
+++ b/lld/ELF/OutputSections.cpp
@@ -344,9 +344,10 @@ template <class ELFT> void OutputSection::maybeCompress() {
(void)sizeof(Elf_Chdr);
DebugCompressionType ctype = DebugCompressionType::None;
+ size_t compressedSize = sizeof(Elf_Chdr);
unsigned level = 0; // default compression level
if (!(flags & SHF_ALLOC) && config->compressDebugSections &&
- name.starts_with(".debug_") && size)
+ name.starts_with(".debug_"))
ctype = *config->compressDebugSections;
for (auto &[glob, t, l] : config->compressSections)
if (glob.match(name))
@@ -360,7 +361,6 @@ template <class ELFT> void OutputSection::maybeCompress() {
}
llvm::TimeTraceScope timeScope("Compress sections");
- compressed.uncompressedSize = size;
auto buf = std::make_unique<uint8_t[]>(size);
// Write uncompressed data to a temporary zero-initialized buffer.
{
@@ -378,7 +378,6 @@ template <class ELFT> void OutputSection::maybeCompress() {
[[maybe_unused]] constexpr size_t shardSize = 1 << 20;
auto shardsIn = split(ArrayRef<uint8_t>(buf.get(), size), shardSize);
const size_t numShards = shardsIn.size();
- compressed.numShards = numShards;
auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(numShards);
#if LLVM_ENABLE_ZSTD
@@ -409,9 +408,8 @@ template <class ELFT> void OutputSection::maybeCompress() {
shardsOut[i] = std::move(out);
});
compressed.type = ELFCOMPRESS_ZSTD;
- size = sizeof(Elf_Chdr);
for (size_t i = 0; i != numShards; ++i)
- size += shardsOut[i].size();
+ compressedSize += shardsOut[i].size();
}
#endif
@@ -434,18 +432,23 @@ template <class ELFT> void OutputSection::maybeCompress() {
// Update section size and combine Alder-32 checksums.
uint32_t checksum = 1; // Initial Adler-32 value
- size = sizeof(Elf_Chdr) + 2; // Elf_Chdir and zlib header
+ compressedSize += 2; // Elf_Chdir and zlib header
for (size_t i = 0; i != numShards; ++i) {
- size += shardsOut[i].size();
+ compressedSize += shardsOut[i].size();
checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
}
- size += 4; // checksum
+ compressedSize += 4; // checksum
compressed.type = ELFCOMPRESS_ZLIB;
compressed.checksum = checksum;
}
#endif
+ if (compressedSize >= size)
+ return;
+ compressed.uncompressedSize = size;
compressed.shards = std::move(shardsOut);
+ compressed.numShards = numShards;
+ size = compressedSize;
flags |= SHF_COMPRESSED;
}
diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index 22bfed0852bc..ad280289cebf 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -613,7 +613,7 @@ uint64_t EhFrameSection::getFdePc(uint8_t *buf, size_t fdeOff,
size_t off = fdeOff + 8;
uint64_t addr = readFdeAddr(buf + off, enc & 0xf);
if ((enc & 0x70) == DW_EH_PE_absptr)
- return addr;
+ return config->is64 ? addr : uint32_t(addr);
if ((enc & 0x70) == DW_EH_PE_pcrel)
return addr + getParent()->addr + off + outSecOff;
fatal("unknown FDE size relative encoding");
diff --git a/lld/MachO/Config.h b/lld/MachO/Config.h
index 7b45f7f4c39a..96253e15f7ee 100644
--- a/lld/MachO/Config.h
+++ b/lld/MachO/Config.h
@@ -193,6 +193,7 @@ struct Configuration {
UndefinedSymbolTreatment undefinedSymbolTreatment =
UndefinedSymbolTreatment::error;
ICFLevel icfLevel = ICFLevel::none;
+ bool keepICFStabs = false;
ObjCStubsMode objcStubsMode = ObjCStubsMode::fast;
llvm::MachO::HeaderFileType outputType;
std::vector<llvm::StringRef> systemLibraryRoots;
diff --git a/lld/MachO/Driver.cpp b/lld/MachO/Driver.cpp
index d4d8d53d69ee..4ee6a907b2f4 100644
--- a/lld/MachO/Driver.cpp
+++ b/lld/MachO/Driver.cpp
@@ -1648,6 +1648,7 @@ bool link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS,
config->emitChainedFixups || args.hasArg(OPT_init_offsets);
config->emitRelativeMethodLists = shouldEmitRelativeMethodLists(args);
config->icfLevel = getICFLevel(args);
+ config->keepICFStabs = args.hasArg(OPT_keep_icf_stabs);
config->dedupStrings =
args.hasFlag(OPT_deduplicate_strings, OPT_no_deduplicate_strings, true);
config->deadStripDuplicates = args.hasArg(OPT_dead_strip_duplicates);
diff --git a/lld/MachO/Options.td b/lld/MachO/Options.td
index 11458d92b3ab..aecced9279da 100644
--- a/lld/MachO/Options.td
+++ b/lld/MachO/Options.td
@@ -85,6 +85,9 @@ def icf_eq: Joined<["--"], "icf=">,
HelpText<"Set level for identical code folding (default: none)">,
MetaVarName<"[none,safe,all]">,
Group<grp_lld>;
+def keep_icf_stabs: Joined<["--"], "keep-icf-stabs">,
+ HelpText<"Generate STABS entries for symbols folded by ICF. These entries can then be used by dsymutil to discover the address range where folded symbols are located.">,
+ Group<grp_lld>;
def lto_O: Joined<["--"], "lto-O">,
HelpText<"Set optimization level for LTO (default: 2)">,
MetaVarName<"<opt-level>">,
diff --git a/lld/MachO/SyntheticSections.cpp b/lld/MachO/SyntheticSections.cpp
index 29070810bb04..b3fe223938bf 100644
--- a/lld/MachO/SyntheticSections.cpp
+++ b/lld/MachO/SyntheticSections.cpp
@@ -1220,15 +1220,18 @@ void SymtabSection::emitStabs() {
continue;
// Constant-folded symbols go in the executable's symbol table, but don't
- // get a stabs entry.
- if (defined->wasIdenticalCodeFolded)
+ // get a stabs entry unless --keep-icf-stabs flag is specified
+ if (!config->keepICFStabs && defined->wasIdenticalCodeFolded)
continue;
ObjFile *file = defined->getObjectFile();
if (!file || !file->compileUnit)
continue;
- symbolsNeedingStabs.emplace_back(defined, defined->isec()->getFile()->id);
+ // We use 'originalIsec' to get the file id of the symbol since 'isec()'
+ // might point to the merged ICF symbol's file
+ symbolsNeedingStabs.emplace_back(defined,
+ defined->originalIsec->getFile()->id);
}
}
@@ -1243,7 +1246,9 @@ void SymtabSection::emitStabs() {
InputFile *lastFile = nullptr;
for (SortingPair &pair : symbolsNeedingStabs) {
Defined *defined = pair.first;
- InputSection *isec = defined->isec();
+ // We use 'originalIsec' of the symbol since we care about the actual origin
+ // of the symbol, not the canonical location returned by `isec()`.
+ InputSection *isec = defined->originalIsec;
ObjFile *file = cast<ObjFile>(isec->getFile());
if (lastFile == nullptr || lastFile != file) {
@@ -1256,7 +1261,7 @@ void SymtabSection::emitStabs() {
}
StabsEntry symStab;
- symStab.sect = defined->isec()->parent->index;
+ symStab.sect = isec->parent->index;
symStab.strx = stringTableSection.addString(defined->getName());
symStab.value = defined->getVA();
diff --git a/lld/docs/ld.lld.1 b/lld/docs/ld.lld.1
index 0df13f07f560..da3b926d02a2 100644
--- a/lld/docs/ld.lld.1
+++ b/lld/docs/ld.lld.1
@@ -148,6 +148,7 @@ Alias for
.Fl -color-diagnostics Ns = Ns Cm auto .
.It Fl -compress-debug-sections Ns = Ns Ar value
Compress DWARF debug sections.
+The sections remain uncompressed if compressed content would be larger.
.Cm value
may be
.Pp
@@ -163,6 +164,7 @@ Use the default compression level in zstd.
.Pp
.It Fl -compress-sections Ns = Ns Ar section-glob={none,zlib,zstd}[:level]
Compress output sections that match the glob and do not have the SHF_ALLOC flag.
+The matched sections remain uncompressed if compressed content would be larger.
The compression level is
.Cm level
(if specified) or a default speed-focused level.
@@ -420,9 +422,7 @@ Disable string merging.
.It Cm 1
Enable string merging.
.It Cm 2
-Enable string tail merging. If
-.Fl -compress-debug-sections
-is given, compress debug sections at compression level 6 instead of 1.
+Enable string tail merging.
.El
.Pp
.Fl O Ns Cm 1
diff --git a/lld/test/COFF/arm64ec-exports.s b/lld/test/COFF/arm64ec-exports.s
new file mode 100644
index 000000000000..a48211e6fb76
--- /dev/null
+++ b/lld/test/COFF/arm64ec-exports.s
@@ -0,0 +1,121 @@
+; REQUIRES: aarch64
+; RUN: split-file %s %t.dir && cd %t.dir
+
+; RUN: llvm-mc -filetype=obj -triple=arm64ec-windows test.s -o test.obj
+; RUN: llvm-mc -filetype=obj -triple=arm64ec-windows drectve.s -o drectve.obj
+; RUN: llvm-mc -filetype=obj -triple=arm64ec-windows %S/Inputs/loadconfig-arm64ec.s -o loadconfig-arm64ec.obj
+
+; Check various forms of export directive and make sure that function export name is demangled.
+
+; RUN: lld-link -out:out.dll test.obj loadconfig-arm64ec.obj -dll -noentry -machine:arm64ec \
+; RUN: -export:unmangled_func '-export:#mangled_func' '-export:#exportas_func,EXPORTAS,exportas_func' \
+; RUN: '-export:?cxx_func@@$$hYAHXZ' -export:data_sym,DATA '-export:#mangled_data_sym,DATA'
+
+
+; RUN: llvm-readobj --coff-exports out.dll | FileCheck --check-prefix=EXP %s
+; EXP: Export {
+; EXP-NEXT: Ordinal: 1
+; EXP-NEXT: Name: #mangled_data_sym
+; EXP-NEXT: RVA: 0x3000
+; EXP-NEXT: }
+; EXP-NEXT: Export {
+; EXP-NEXT: Ordinal: 2
+; EXP-NEXT: Name: ?cxx_func@@YAHXZ
+; EXP-NEXT: RVA: 0x1018
+; EXP-NEXT: }
+; EXP-NEXT: Export {
+; EXP-NEXT: Ordinal: 3
+; EXP-NEXT: Name: data_sym
+; EXP-NEXT: RVA: 0x3004
+; EXP-NEXT: }
+; EXP-NEXT: Export {
+; EXP-NEXT: Ordinal: 4
+; EXP-NEXT: Name: exportas_func
+; EXP-NEXT: RVA: 0x1010
+; EXP-NEXT: }
+; EXP-NEXT: Export {
+; EXP-NEXT: Ordinal: 5
+; EXP-NEXT: Name: mangled_func
+; EXP-NEXT: RVA: 0x1008
+; EXP-NEXT: }
+; EXP-NEXT: Export {
+; EXP-NEXT: Ordinal: 6
+; EXP-NEXT: Name: unmangled_func
+; EXP-NEXT: RVA: 0x1000
+; EXP-NEXT: }
+
+; RUN: llvm-nm --print-armap out.lib | FileCheck --check-prefix=IMPLIB %s
+; IMPLIB: Archive EC map
+; IMPLIB-NEXT: #exportas_func in out
+; IMPLIB-NEXT: #mangled_func in out
+; IMPLIB-NEXT: #unmangled_func in out
+; IMPLIB-NEXT: ?cxx_func@@$$hYAHXZ in out
+; IMPLIB-NEXT: ?cxx_func@@YAHXZ in out
+; IMPLIB-NEXT: __IMPORT_DESCRIPTOR_out{{.*}} in out
+; IMPLIB-NEXT: __NULL_IMPORT_DESCRIPTOR in out
+; IMPLIB-NEXT: __imp_?cxx_func@@YAHXZ in out
+; IMPLIB-NEXT: __imp_aux_?cxx_func@@YAHXZ in out
+; IMPLIB-NEXT: __imp_aux_exportas_func in out
+; IMPLIB-NEXT: __imp_aux_mangled_func in out
+; IMPLIB-NEXT: __imp_aux_unmangled_func in out
+; IMPLIB-NEXT: __imp_data_sym in out
+; IMPLIB-NEXT: __imp_exportas_func in out
+; IMPLIB-NEXT: __imp_mangled_data_sym in out
+; IMPLIB-NEXT: __imp_mangled_func in out
+; IMPLIB-NEXT: __imp_unmangled_func in out
+; IMPLIB-NEXT: exportas_func in out
+; IMPLIB-NEXT: mangled_func in out
+; IMPLIB-NEXT: unmangled_func in out
+; IMPLIB-NEXT: out{{.*}}_NULL_THUNK_DATA in out
+
+
+; Check that using .drectve section has the same effect.
+
+; RUN: lld-link -out:out2.dll test.obj loadconfig-arm64ec.obj -dll -noentry -machine:arm64ec drectve.obj
+; RUN: llvm-readobj --coff-exports out2.dll | FileCheck --check-prefix=EXP %s
+; RUN: llvm-nm --print-armap out2.lib | FileCheck --check-prefix=IMPLIB %s
+
+#--- test.s
+ .text
+ .globl unmangled_func
+ .p2align 2, 0x0
+unmangled_func:
+ mov w0, #1
+ ret
+
+ .globl "#mangled_func"
+ .p2align 2, 0x0
+"#mangled_func":
+ mov w0, #2
+ ret
+
+ .globl "#exportas_func"
+ .p2align 2, 0x0
+"#exportas_func":
+ mov w0, #3
+ ret
+
+ .globl "?cxx_func@@$$hYAHXZ"
+ .p2align 2, 0x0
+"?cxx_func@@$$hYAHXZ":
+ mov w0, #4
+ ret
+
+ .data
+ .globl "#mangled_data_sym"
+ .p2align 2, 0x0
+"#mangled_data_sym":
+ .word 0x01010101
+ .globl data_sym
+ .p2align 2, 0x0
+data_sym:
+ .word 0x01010101
+
+#--- drectve.s
+ .section .drectve, "yn"
+ .ascii " -export:unmangled_func"
+ .ascii " -export:#mangled_func"
+ .ascii " -export:#exportas_func,EXPORTAS,exportas_func"
+ .ascii " -export:?cxx_func@@$$hYAHXZ"
+ .ascii " -export:data_sym,DATA"
+ .ascii " -export:#mangled_data_sym,DATA"
diff --git a/lld/test/ELF/aarch64-feature-gcs.s b/lld/test/ELF/aarch64-feature-gcs.s
new file mode 100644
index 000000000000..7a08673dbb7e
--- /dev/null
+++ b/lld/test/ELF/aarch64-feature-gcs.s
@@ -0,0 +1,134 @@
+# REQUIRES: aarch64
+# RUN: rm -rf %t && split-file %s %t && cd %t
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-gnu func1-gcs.s -o func1-gcs.o
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-gnu func2.s -o func2.o
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-gnu func2-gcs.s -o func2-gcs.o
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-gnu func3.s -o func3.o
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-gnu func3-gcs.s -o func3-gcs.o
+
+## GCS should be enabled when it's enabled in all inputs or when it's forced on.
+
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -o gcs
+# RUN: llvm-readelf -n gcs | FileCheck --check-prefix GCS %s
+# RUN: ld.lld func1-gcs.o func3-gcs.o --shared -o gcs.so
+# RUN: llvm-readelf -n gcs.so | FileCheck --check-prefix GCS %s
+# RUN: ld.lld func1-gcs.o func2.o func3-gcs.o -o force-gcs -z gcs=always
+# RUN: llvm-readelf -n force-gcs | FileCheck --check-prefix GCS %s
+# RUN: ld.lld func2-gcs.o func3.o --shared -o force-gcs.so -z gcs=always
+# RUN: llvm-readelf -n force-gcs.so | FileCheck --check-prefix GCS %s
+# RUN: ld.lld func2-gcs.o func3.o --shared -o force-gcs2.so -z gcs=never -z gcs=always
+# RUN: llvm-readelf -n force-gcs2.so | FileCheck --check-prefix GCS %s
+
+# GCS: Properties: aarch64 feature: GCS
+
+## GCS should not be enabled if it's not enabled in at least one input.
+
+# RUN: ld.lld func1-gcs.o func2.o func3-gcs.o -o no-gcs
+# RUN: llvm-readelf -n no-gcs | count 0
+# RUN: ld.lld func2-gcs.o func3.o --shared -o no-gcs.so
+
+## GCS should be disabled with gcs=never, even if GCS is present in all inputs.
+
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -z gcs=never -o never-gcs
+# RUN: llvm-readelf -n never-gcs | count 0
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -z gcs=always -z gcs=never -o never-gcs2
+# RUN: llvm-readelf -n never-gcs2 | count 0
+
+## gcs-report should report any input files that don't have the gcs property.
+
+# RUN: ld.lld func1-gcs.o func2.o func3-gcs.o -o /dev/null -z gcs-report=warning 2>&1 | FileCheck --check-prefix=REPORT-WARN %s
+# RUN: ld.lld func1-gcs.o func2.o func3-gcs.o -o /dev/null -z gcs-report=warning -z gcs=always 2>&1 | FileCheck --check-prefix=REPORT-WARN %s
+# RUN: ld.lld func1-gcs.o func2.o func3-gcs.o -o /dev/null -z gcs-report=warning -z gcs=never 2>&1 | FileCheck --check-prefix=REPORT-WARN %s
+# RUN: not ld.lld func2-gcs.o func3.o --shared -o /dev/null -z gcs-report=error 2>&1 | FileCheck --check-prefix=REPORT-ERROR %s
+# RUN: not ld.lld func2-gcs.o func3.o --shared -o /dev/null -z gcs-report=error -z gcs=always 2>&1 | FileCheck --check-prefix=REPORT-ERROR %s
+# RUN: not ld.lld func2-gcs.o func3.o --shared -o /dev/null -z gcs-report=error -z gcs=never 2>&1 | FileCheck --check-prefix=REPORT-ERROR %s
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -o /dev/null -z gcs-report=warning 2>&1 | count 0
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -o /dev/null -z gcs-report=warning -z gcs=always 2>&1 | count 0
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -o /dev/null -z gcs-report=warning -z gcs=never 2>&1 | count 0
+
+# REPORT-WARN: warning: func2.o: -z gcs-report: file does not have GNU_PROPERTY_AARCH64_FEATURE_1_GCS property
+# REPORT-ERROR: error: func3.o: -z gcs-report: file does not have GNU_PROPERTY_AARCH64_FEATURE_1_GCS property
+
+## An invalid gcs option should give an error
+# RUN: not ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -z gcs=nonsense 2>&1 | FileCheck --check-prefix=INVALID %s
+
+# INVALID: error: unknown -z gcs= value: nonsense
+
+#--- func1-gcs.s
+.section ".note.gnu.property", "a"
+.long 4
+.long 0x10
+.long 0x5
+.asciz "GNU"
+
+.long 0xc0000000 // GNU_PROPERTY_AARCH64_FEATURE_1_AND
+.long 4
+.long 4 // GNU_PROPERTY_AARCH64_FEATURE_1_GCS
+.long 0
+
+.text
+.globl _start
+.type func1,%function
+func1:
+ bl func2
+ ret
+
+#--- func2.s
+
+.text
+.globl func2
+.type func2,@function
+func2:
+ .globl func3
+ .type func3, @function
+ bl func3
+ ret
+
+#--- func2-gcs.s
+
+.section ".note.gnu.property", "a"
+.long 4
+.long 0x10
+.long 0x5
+.asciz "GNU"
+
+.long 0xc0000000 // GNU_PROPERTY_AARCH64_FEATURE_1_AND
+.long 4
+.long 4 // GNU_PROPERTY_AARCH64_FEATURE_1_GCS
+.long 0
+
+.text
+.globl func2
+.type func2,@function
+func2:
+ .globl func3
+ .type func3, @function
+ bl func3
+ ret
+
+#--- func3.s
+
+.text
+.globl func3
+.type func3,@function
+func3:
+ ret
+
+#--- func3-gcs.s
+
+.section ".note.gnu.property", "a"
+.long 4
+.long 0x10
+.long 0x5
+.asciz "GNU"
+
+.long 0xc0000000 // GNU_PROPERTY_AARCH64_FEATURE_1_AND
+.long 4
+.long 4 // GNU_PROPERTY_AARCH64_FEATURE_1_GCS
+.long 0
+
+.text
+.globl func3
+.type func3,@function
+func3:
+ ret
diff --git a/lld/test/ELF/arm-gotoff.s b/lld/test/ELF/arm-gotoff.s
index 0a6ea3ab0dad..5b0dd3e98cc4 100644
--- a/lld/test/ELF/arm-gotoff.s
+++ b/lld/test/ELF/arm-gotoff.s
@@ -1,67 +1,29 @@
// REQUIRES: arm
// RUN: llvm-mc -filetype=obj -triple=armv7a-linux-gnueabi %s -o %t.o
-// RUN: ld.lld %t.o -o %t
-// RUN: llvm-readobj -S -r --symbols %t | FileCheck %s
-// RUN: llvm-objdump --triple=armv7a-linux-gnueabi -d %t | FileCheck --check-prefix=DISASM %s
+// RUN: ld.lld -z separate-loadable-segments %t.o -o %t
+// RUN: llvm-readelf -S -r --symbols %t | FileCheck %s
+// RUN: llvm-objdump --triple=armv7a-linux-gnueabi -d --no-show-raw-insn %t | FileCheck --check-prefix=DISASM %s
// Test the R_ARM_GOTOFF32 relocation
-// CHECK: Name: .got
-// CHECK-NEXT: Type: SHT_PROGBITS (0x1)
-// CHECK-NEXT: Flags [
-// CHECK-NEXT: SHF_ALLOC
-// CHECK-NEXT: SHF_WRITE
-// CHECK-NEXT: ]
-// CHECK-NEXT: Address: 0x30124
-// CHECK-NEXT: Offset: 0x124
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Link:
-// CHECK-NEXT: Info:
-// CHECK-NEXT: AddressAlignment:
+// CHECK: [Nr] Name Type Address Off Size ES Flg Lk Inf Al
+// CHECK-NEXT: [ 0] NULL 00000000 000000 000000 00 0 0 0
+// CHECK-NEXT: [ 1] .text PROGBITS 00020000 010000 000010 00 AX 0 0 4
+// CHECK-NEXT: [ 2] .got PROGBITS 00030000 020000 000000 00 WA 0 0 4
+// CHECK-NEXT: [ 3] .relro_padding NOBITS 00030000 020000 000000 00 WA 0 0 1
+// CHECK-NEXT: [ 4] .bss NOBITS 00030000 020000 000014 00 WA 0 0 1
-// CHECK: Name: .bss
-// CHECK-NEXT: Type: SHT_NOBITS
-// CHECK-NEXT: Flags [
-// CHECK-NEXT: SHF_ALLOC
-// CHECK-NEXT: SHF_WRITE
-// CHECK-NEXT: ]
-// CHECK-NEXT: Address: 0x40124
-// CHECK-NEXT: Offset:
-// CHECK-NEXT: Size: 20
-// CHECK-NEXT: Link:
-// CHECK-NEXT: Info:
-// CHECK-NEXT: AddressAlignment: 1
+// CHECK: 00030000 10 OBJECT GLOBAL DEFAULT 4 bar
+// CHECK-NEXT: 0003000a 10 OBJECT GLOBAL DEFAULT 4 obj
-// CHECK-NEXT: EntrySize: 0
-
-// CHECK: Symbol {
-// CHECK: Name: bar
-// CHECK-NEXT: Value: 0x40124
-// CHECK-NEXT: Size: 10
-// CHECK-NEXT: Binding: Global
-// CHECK-NEXT: Type: Object
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .bss
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: obj
-// CHECK-NEXT: Value: 0x4012E
-// CHECK-NEXT: Size: 10
-// CHECK-NEXT: Binding: Global
-// CHECK-NEXT: Type: Object
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .bss
-
-// DISASM: Disassembly of section .text:
-// DISASM-EMPTY:
-// DISASM-NEXT :_start:
-// DISASM-NEXT 11114: 1e ff 2f e1 bx lr
+// DISASM: <_start>:
+// DISASM-NEXT: bx lr
// Offset 0 from .got = bar
-// DISASM 11118: 00 10 00 00
+// DISASM: .word 0x00000000
// Offset 10 from .got = obj
-// DISASM-NEXT 1111c: 0a 10 00 00
+// DISASM-NEXT: .word 0x0000000a
// Offset 15 from .got = obj +5
-// DISASM-NEXT 11120: 0f 10 00 00
+// DISASM-NEXT: .word 0x0000000f
.syntax unified
.globl _start
_start:
diff --git a/lld/test/ELF/avr-reloc-error.s b/lld/test/ELF/avr-reloc-error.s
index 0a30f68d168e..f177e44f753f 100644
--- a/lld/test/ELF/avr-reloc-error.s
+++ b/lld/test/ELF/avr-reloc-error.s
@@ -3,7 +3,7 @@
# RUN: rm -rf %t && split-file %s %t && cd %t
# RUN: llvm-mc -filetype=obj -triple=avr -mcpu=atmega328 avr-pcrel-7.s -o avr-pcrel-7.o
-# RUN: not ld.lld avr-pcrel-7.o -o /dev/null -Ttext=0x1000 --defsym=callee0=0x1040 --defsym=callee1=0x1044 --defsym=callee2=0x100f 2>&1 | \
+# RUN: not ld.lld avr-pcrel-7.o -o /dev/null -Ttext=0x1000 --defsym=callee0=0x1040 --defsym=callee1=0x1084 --defsym=callee2=0x100f 2>&1 | \
# RUN: FileCheck %s --check-prefix=PCREL7
# RUN: llvm-mc -filetype=obj -triple=avr -mcpu=atmega328 avr-pcrel-13.s -o avr-pcrel-13.o
# RUN: not ld.lld avr-pcrel-13.o -o /dev/null -Ttext=0x1000 --defsym=callee0=0x2000 --defsym=callee1=0x2004 --defsym=callee2=0x100f 2>&1 | \
@@ -20,7 +20,7 @@
__start:
# PCREL7-NOT: callee0
-# PCREL7: error: {{.*}} relocation R_AVR_7_PCREL out of range: {{.*}} is not in [-64, 63]; references 'callee1'
+# PCREL7: error: {{.*}} relocation R_AVR_7_PCREL out of range: {{.*}} is not in [-128, 127]; references 'callee1'
# PCREL7: error: {{.*}} improper alignment for relocation R_AVR_7_PCREL: {{.*}} is not aligned to 2 bytes
brne callee0
breq callee1
@@ -34,7 +34,6 @@ brlt callee2
__start:
# PCREL13-NOT: callee0
-# PCREL13: error: {{.*}} relocation R_AVR_13_PCREL out of range: {{.*}} is not in [-4096, 4095]; references 'callee1'
# PCREL13: error: {{.*}} improper alignment for relocation R_AVR_13_PCREL: {{.*}} is not aligned to 2 bytes
rjmp callee0
rcall callee1
diff --git a/lld/test/ELF/avr-reloc.s b/lld/test/ELF/avr-reloc.s
index 172c0e03ba74..ec088eaa149d 100644
--- a/lld/test/ELF/avr-reloc.s
+++ b/lld/test/ELF/avr-reloc.s
@@ -82,6 +82,12 @@ sbic b, 1 ; R_AVR_PORT5
; CHECK-NEXT: rjmp .-36
; CHECK-NEXT: breq .+26
; CHECK-NEXT: breq .-40
+; CHECK-NEXT: rjmp .-4096
+; CHECK-NEXT: rjmp .+4094
+; CHECK-NEXT: rjmp .+4094
+; CHECK-NEXT: rjmp .-4096
+; CHECK-NEXT: breq .-128
+; CHECK-NEXT: breq .+126
; HEX-LABEL: section .PCREL:
; HEX-NEXT: 0fc0eecf 69f061f3
foo:
@@ -89,6 +95,12 @@ rjmp foo + 32 ; R_AVR_13_PCREL
rjmp foo - 32 ; R_AVR_13_PCREL
breq foo + 32 ; R_AVR_7_PCREL
breq foo - 32 ; R_AVR_7_PCREL
+rjmp 1f - 4096 $ 1: ; R_AVR_13_PCREL
+rjmp 1f + 4094 $ 1: ; R_AVR_13_PCREL
+rjmp 1f - 4098 $ 1: ; R_AVR_13_PCREL (overflow)
+rjmp 1f + 4096 $ 1: ; R_AVR_13_PCREL (overflow)
+breq 1f - 128 $ 1: ; R_AVR_7_PCREL
+breq 1f + 126 $ 1: ; R_AVR_7_PCREL
.section .LDSSTS,"ax",@progbits
; CHECK-LABEL: section .LDSSTS:
diff --git a/lld/test/ELF/compress-debug-sections-zstd.s b/lld/test/ELF/compress-debug-sections-zstd.s
index 97ab192a52f4..d9f29af99974 100644
--- a/lld/test/ELF/compress-debug-sections-zstd.s
+++ b/lld/test/ELF/compress-debug-sections-zstd.s
@@ -3,22 +3,25 @@
# RUN: llvm-mc -filetype=obj -triple=x86_64 --compress-debug-sections=zstd %s -o %t.o
# RUN: ld.lld %t.o -o %t.so -shared
-# RUN: llvm-readelf -S -x .debug_str %t.so | FileCheck %s
+# RUN: llvm-readelf -S -p .debug_str %t.so | FileCheck %s
# CHECK: .debug_str PROGBITS [[#%x,]] [[#%x,]] [[#%x,]] 01 MS 0 0 1
-# CHECK: Hex dump of section '.debug_str':
-# CHECK-NEXT: 0x00000000 73686f72 7420756e 7369676e 65642069 short unsigned i
-# CHECK-NEXT: 0x00000010 6e740075 6e736967 6e656420 63686172 nt.unsigned char
-# CHECK-NEXT: 0x00000020 00636861 72006c6f 6e672075 6e736967 .char.long unsig
-# CHECK-NEXT: 0x00000030 6e656420 696e7400 756e7369 676e6564 ned int.unsigned
-# CHECK-NEXT: 0x00000040 20696e74 00 int.
+# CHECK: String dump of section '.debug_str':
+# CHECK-NEXT: [ 0] {{A+}}
+# CHECK-NEXT: [ 81] short unsigned int
+# CHECK-NEXT: [ 94] unsigned char
+# CHECK-NEXT: [ a2] char
+# CHECK-NEXT: [ a7] long unsigned int
+# CHECK-NEXT: [ b9] unsigned int
# RUN: ld.lld %t.o -o %t.so -shared --compress-debug-sections=zstd
# RUN: llvm-readelf -S %t.so | FileCheck %s --check-prefix=OUTPUT-SEC
# RUN: llvm-objcopy --decompress-debug-sections %t.so
-# RUN: llvm-readelf -S -x .debug_str %t.so | FileCheck %s
+# RUN: llvm-readelf -S -p .debug_str %t.so | FileCheck %s
-# OUTPUT-SEC: .debug_str PROGBITS [[#%x,]] [[#%x,]] [[#%x,]] 01 MSC 0 0 1
+# OUTPUT-SEC: .debug_str PROGBITS [[#%x,]] [[#%x,]] [[#%x,]] 01 MSC 0 0 1
+# OUTPUT-SEC-NEXT: .debug_frame PROGBITS [[#%x,]] [[#%x,]] 000000 00 0 0 1
+# OUTPUT-SEC-NEXT: .debug_loc PROGBITS [[#%x,]] [[#%x,]] 000010 00 0 0 1
.section .debug_str,"MS",@progbits,1
.LASF2:
@@ -31,3 +34,11 @@
.string "char"
.LASF1:
.string "unsigned char"
+.Lunused:
+ .fill 128, 1, 0x41
+ .byte 0
+
+## Test sections where compressed content would be larger.
+.section .debug_frame,""
+.section .debug_loc,""
+.space 16
diff --git a/lld/test/ELF/compress-sections-special.s b/lld/test/ELF/compress-sections-special.s
index 80c61fe626a4..7e474ac7c7d6 100644
--- a/lld/test/ELF/compress-sections-special.s
+++ b/lld/test/ELF/compress-sections-special.s
@@ -14,7 +14,7 @@
# CHECK: warning: {{.*}}: unable to get the string table for the SHT_SYMTAB section: SHT_STRTAB string table section
# CHECK: Hex dump of section '.strtab':
-# CHECK-NEXT: 01000000 00000000 1a000000 00000000
+# CHECK-NEXT: 01000000 00000000 5c000000 00000000
# CHECK-NEXT: 01000000 00000000 {{.*}}
# RUN: not ld.lld -shared a.o --compress-sections .dynstr=zlib 2>&1 | FileCheck %s --check-prefix=ERR-ALLOC
@@ -25,6 +25,8 @@ _start:
l0:
g0:
g1:
+.globl ggggggggggggggggggggggggggggggg0
+.globl ggggggggggggggggggggggggggggggg1
.section nonalloc0,""
.quad .text+1
diff --git a/lld/test/ELF/compress-sections.s b/lld/test/ELF/compress-sections.s
index aa30c7a90474..aaad31476044 100644
--- a/lld/test/ELF/compress-sections.s
+++ b/lld/test/ELF/compress-sections.s
@@ -11,10 +11,11 @@
# CHECK1-NEXT: .text PROGBITS [[#%x,TEXT:]] [[#%x,]] [[#%x,]] 00 AX 0 0 4
# CHECK1: nonalloc0 PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 00 0 0 8
# CHECK1-NEXT: nonalloc1 PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 00 0 0 8
+# CHECK1-NEXT: smallc0 PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 00 0 0 8
# CHECK1-NEXT: .debug_str PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 01 MS 0 0 1
-# CHECK1: 0000000000000010 0 NOTYPE LOCAL DEFAULT [[#]] (nonalloc0) sym0
-# CHECK1: 0000000000000008 0 NOTYPE LOCAL DEFAULT [[#]] (nonalloc1) sym1
+# CHECK1: 0000000000000090 0 NOTYPE LOCAL DEFAULT [[#]] (nonalloc0) sym0
+# CHECK1: 0000000000000088 0 NOTYPE LOCAL DEFAULT [[#]] (nonalloc1) sym1
# RUN: ld.lld -pie a.o --compress-sections '*c0=zlib' --compress-sections .debug_str=zstd:3 -o out2
# RUN: llvm-readelf -SrsX -x nonalloc0 -x .debug_str out2 | FileCheck %s --check-prefix=CHECK2
@@ -24,15 +25,16 @@
# CHECK2-NEXT: foo1 PROGBITS [[#%x,FOO1:]] [[#%x,]] [[#%x,]] 00 A 0 0 8
# CHECK2-NEXT: .text PROGBITS [[#%x,TEXT:]] [[#%x,]] [[#%x,]] 00 AX 0 0 4
# CHECK2: nonalloc0 PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 00 C 0 0 1
-# CHECK2-NEXT: nonalloc1 PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 00 0 0 8
+# CHECK2-NEXT: nonalloc1 PROGBITS 0000000000000000 [[#%x,]] 000088 00 0 0 8
+# CHECK2-NEXT: smallc0 PROGBITS 0000000000000000 [[#%x,]] 00000c 00 0 0 1
# CHECK2-NEXT: .debug_str PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 01 MSC 0 0 1
-# CHECK2: 0000000000000010 0 NOTYPE LOCAL DEFAULT [[#]] (nonalloc0) sym0
-# CHECK2: 0000000000000008 0 NOTYPE LOCAL DEFAULT [[#]] (nonalloc1) sym1
+# CHECK2: 0000000000000090 0 NOTYPE LOCAL DEFAULT [[#]] (nonalloc0) sym0
+# CHECK2: 0000000000000088 0 NOTYPE LOCAL DEFAULT [[#]] (nonalloc1) sym1
# CHECK2: Hex dump of section 'nonalloc0':
-## zlib with ch_size=0x10
-# CHECK2-NEXT: 01000000 00000000 10000000 00000000
+## zlib with ch_size=0x90
+# CHECK2-NEXT: 01000000 00000000 90000000 00000000
# CHECK2-NEXT: 01000000 00000000 {{.*}}
# CHECK2: Hex dump of section '.debug_str':
## zstd with ch_size=0x38
@@ -80,20 +82,28 @@ _start:
.balign 8
.quad .text-.
.quad .text-.
+.space 128
.section foo1,"a"
.balign 8
.quad .text-.
.quad .text-.
+.space 128
.section nonalloc0,""
.balign 8
.quad .text+1
.quad .text+2
+.space 128
sym0:
.section nonalloc1,""
.balign 8
.quad 42
+.space 128
sym1:
+.section smallc0,""
+.balign 8
+.space 12
+
.section .debug_str,"MS",@progbits,1
.Linfo_string0:
.asciz "AAAAAAAAAAAAAAAAAAAAAAAAAAA"
diff --git a/lld/test/ELF/compressed-debug-level.test b/lld/test/ELF/compressed-debug-level.test
index ce3a194bd7c2..5a4d37e31eca 100644
--- a/lld/test/ELF/compressed-debug-level.test
+++ b/lld/test/ELF/compressed-debug-level.test
@@ -18,8 +18,8 @@
# RUN: llvm-readelf --sections %t.6 | FileCheck -check-prefixes=HEADER,LEVEL6 %s
# HEADER: [Nr] Name Type Address Off Size
-# LEVEL1: [ 1] .debug_info PROGBITS 00000000 000094 00001{{[bc]}}
-# LEVEL6: [ 1] .debug_info PROGBITS 00000000 000094 00001a
+# LEVEL1: [ 1] .debug_info PROGBITS 00000000 000094 0000{{1[def]|21}}
+# LEVEL6: [ 1] .debug_info PROGBITS 00000000 000094 00001{{[abc]}}
## A little arbitrary debug section which has a different size after
## applying compression of level 1 and 6.
@@ -33,4 +33,4 @@ FileHeader:
Sections:
- Name: .debug_info
Type: SHT_PROGBITS
- Content: '010101010101010201010201'
+ Content: '010101010101010201010201010101010101010201010201010101010101010201010201'
diff --git a/lld/test/ELF/linkerscript/compress-debug-sections.s b/lld/test/ELF/linkerscript/compress-debug-sections.s
index fe1c66dbdbdc..8d06689cc871 100644
--- a/lld/test/ELF/linkerscript/compress-debug-sections.s
+++ b/lld/test/ELF/linkerscript/compress-debug-sections.s
@@ -34,3 +34,5 @@
.section .debug_str,"MS",@progbits,1
.asciz "AAA"
.asciz "BBB"
+ .fill 64,1,0x41
+ .byte 0
diff --git a/lld/test/ELF/linkerscript/compress-sections.s b/lld/test/ELF/linkerscript/compress-sections.s
index 9b4574a1778c..5131fa754224 100644
--- a/lld/test/ELF/linkerscript/compress-sections.s
+++ b/lld/test/ELF/linkerscript/compress-sections.s
@@ -10,10 +10,11 @@
# CHECK-NEXT: str PROGBITS 0000000000000000 [[#%x,]] [[#%x,]] 01 MSC 0 0 1
# CHECK: 0000000000000000 0 NOTYPE GLOBAL DEFAULT [[#]] (nonalloc) nonalloc_start
-# CHECK: 0000000000000023 0 NOTYPE GLOBAL DEFAULT [[#]] (nonalloc) nonalloc_end
+# CHECK: 0000000000000063 0 NOTYPE GLOBAL DEFAULT [[#]] (nonalloc) nonalloc_end
# CHECK: String dump of section 'str':
# CHECK-NEXT: [ 0] AAA
-# CHECK-NEXT: [ 4] BBB
+# CHECK-NEXT: [ 4] {{a+}}
+# CHECK-NEXT: [ 45] BBB
## TODO The uncompressed size of 'nonalloc' is dependent on linker script
## commands, which is not handled. We should report an error.
@@ -28,6 +29,7 @@ _start:
.balign 8
.quad .text
.quad .text
+.space 64
.section nonalloc1,""
.balign 8
.quad 42
@@ -35,6 +37,8 @@ _start:
.section str,"MS",@progbits,1
.asciz "AAA"
.asciz "BBB"
+ .fill 64,1,0x61
+ .byte 0
#--- a.lds
SECTIONS {
diff --git a/lld/test/ELF/mips-eh_frame-pic.s b/lld/test/ELF/mips-eh_frame-pic.s
index 79076e74a7e3..fd8560bc0163 100644
--- a/lld/test/ELF/mips-eh_frame-pic.s
+++ b/lld/test/ELF/mips-eh_frame-pic.s
@@ -27,6 +27,11 @@
## relative addressing.
# NOPIC32-ERR: ld.lld: error: relocation R_MIPS_32 cannot be used against local symbol
+## https://github.com/llvm/llvm-project/issues/88852: getFdePc should return a
+## 32-bit address.
+# RUN: ld.lld --eh-frame-hdr -Ttext=0x80000000 %t-nopic32.o -o %t-nopic32
+# RUN: llvm-readelf -x .eh_frame_hdr %t-nopic32 | FileCheck %s --check-prefix=NOPIC32-HDR
+
## For -fPIC, .eh_frame should contain DW_EH_PE_pcrel | DW_EH_PE_sdata4 values:
# RUN: llvm-mc -filetype=obj -triple=mips-unknown-linux --position-independent %s -o %t-pic32.o
# RUN: llvm-readobj -r %t-pic32.o | FileCheck %s --check-prefixes=RELOCS,PIC32-RELOCS
@@ -51,6 +56,10 @@
## Note: ld.bfd converts the R_MIPS_64 relocs to DW_EH_PE_pcrel | DW_EH_PE_sdata8
## for N64 ABI (and DW_EH_PE_pcrel | DW_EH_PE_sdata4 for MIPS32)
+# NOPIC32-HDR: Hex dump of section '.eh_frame_hdr':
+# NOPIC32-HDR: 0x80010038 011b033b 00000010 00000001 fffeffc8 .
+# NOPIC32-HDR: 0x80010048 00000028 .
+
.ent func
.global func
func:
diff --git a/lld/test/MachO/stabs-icf.s b/lld/test/MachO/stabs-icf.s
index 99d0871ce4d2..5f8449809ddd 100644
--- a/lld/test/MachO/stabs-icf.s
+++ b/lld/test/MachO/stabs-icf.s
@@ -4,6 +4,9 @@
# RUN: %lld -lSystem --icf=all %t.o -o %t
# RUN: dsymutil -s %t | FileCheck %s -DDIR=%t -DSRC_PATH=%t.o
+# RUN: %lld -lSystem --icf=all %t.o -o %t_icf_stabs --keep-icf-stabs
+# RUN: dsymutil -s %t_icf_stabs | FileCheck %s -DDIR=%t_icf_stabs -DSRC_PATH=%t.o --check-prefixes=ICF_STABS
+
## This should include no N_FUN entry for _baz (which is ICF'd into _bar),
## but it does include a SECT EXT entry.
## NOTE: We do not omit the N_FUN entry for _bar even though it is of size zero.
@@ -27,6 +30,30 @@
# CHECK-DAG: ( {{.*}}) {{[0-9]+}} 0100 0000000000000000 'dyld_stub_binder'
# CHECK-EMPTY:
+
+# ICF_STABS: (N_SO ) 00 0000 0000000000000000 '/tmp{{[/\\]}}test.cpp'
+# ICF_STABS-NEXT: (N_OSO ) 03 0001 {{.*}} '[[SRC_PATH]]'
+# ICF_STABS-NEXT: (N_FUN ) 01 0000 [[#%.16x,MAIN:]] '_main'
+# ICF_STABS-NEXT: (N_FUN ) 00 0000 000000000000000b{{$}}
+# ICF_STABS-NEXT: (N_FUN ) 01 0000 [[#%.16x,BAR:]] '_bar'
+# ICF_STABS-NEXT: (N_FUN ) 00 0000 0000000000000000{{$}}
+# ICF_STABS-NEXT: (N_FUN ) 01 0000 [[#BAR]] '_bar2'
+# ICF_STABS-NEXT: (N_FUN ) 00 0000 0000000000000001{{$}}
+# ICF_STABS-NEXT: (N_FUN ) 01 0000 [[#BAR]] '_baz'
+# ICF_STABS-NEXT: (N_FUN ) 00 0000 0000000000000000{{$}}
+# ICF_STABS-NEXT: (N_FUN ) 01 0000 [[#BAR]] '_baz2'
+# ICF_STABS-NEXT: (N_FUN ) 00 0000 0000000000000001{{$}}
+# ICF_STABS-NEXT: (N_SO ) 01 0000 0000000000000000{{$}}
+# ICF_STABS-DAG: ( SECT EXT) 01 0000 [[#MAIN]] '_main'
+# ICF_STABS-DAG: ( SECT EXT) 01 0000 [[#BAR]] '_bar'
+# ICF_STABS-DAG: ( SECT EXT) 01 0000 [[#BAR]] '_bar2'
+# ICF_STABS-DAG: ( SECT EXT) 01 0000 [[#BAR]] '_baz'
+# ICF_STABS-DAG: ( SECT EXT) 01 0000 [[#BAR]] '_baz2'
+# ICF_STABS-DAG: ( {{.*}}) {{[0-9]+}} 0010 {{[0-9a-f]+}} '__mh_execute_header'
+# ICF_STABS-DAG: ( {{.*}}) {{[0-9]+}} 0100 0000000000000000 'dyld_stub_binder'
+# ICF_STABS-EMPTY:
+
+
.text
.globl _bar, _bar2, _baz, _baz2, _main
diff --git a/lld/test/wasm/shared64.s b/lld/test/wasm/shared64.s
index 3401faed8610..73f77436cabf 100644
--- a/lld/test/wasm/shared64.s
+++ b/lld/test/wasm/shared64.s
@@ -154,6 +154,7 @@ get_local_func_address:
# CHECK-NEXT: Index: 0
# CHECK-NEXT: ElemType: FUNCREF
# CHECK-NEXT: Limits:
+# CHECK-NEXT: Flags: [ IS_64 ]
# CHECK-NEXT: Minimum: 0x2
# CHECK-NEXT: - Module: env
# CHECK-NEXT: Field: __stack_pointer
@@ -170,11 +171,6 @@ get_local_func_address:
# CHECK-NEXT: Kind: GLOBAL
# CHECK-NEXT: GlobalType: I64
# CHECK-NEXT: GlobalMutable: false
-# CHECK-NEXT: - Module: env
-# CHECK-NEXT: Field: __table_base32
-# CHECK-NEXT: Kind: GLOBAL
-# CHECK-NEXT: GlobalType: I32
-# CHECK-NEXT: GlobalMutable: false
# CHECK-NEXT: - Module: GOT.mem
# CHECK-NEXT: Field: indirect_func
# CHECK-NEXT: Kind: GLOBAL
@@ -209,7 +205,7 @@ get_local_func_address:
# CHECK-NEXT: Segments:
# CHECK-NEXT: - Offset:
# CHECK-NEXT: Opcode: GLOBAL_GET
-# CHECK-NEXT: Index: 3
+# CHECK-NEXT: Index: 2
# CHECK-NEXT: Functions: [ 3, 2 ]
# check the generated code in __wasm_call_ctors and __wasm_apply_data_relocs functions
@@ -223,7 +219,7 @@ get_local_func_address:
# DIS-NEXT: i64.const 4
# DIS-NEXT: global.get 1
# DIS-NEXT: i64.add
-# DIS-NEXT: global.get 5
+# DIS-NEXT: global.get 4
# DIS-NEXT: i64.store 0:p2align=2
# DIS-NEXT: i64.const 12
# DIS-NEXT: global.get 1
@@ -242,12 +238,12 @@ get_local_func_address:
# DIS-NEXT: i64.const 24
# DIS-NEXT: global.get 1
# DIS-NEXT: i64.add
-# DIS-NEXT: global.get 6
+# DIS-NEXT: global.get 5
# DIS-NEXT: i64.store 0:p2align=2
# DIS-NEXT: i64.const 32
# DIS-NEXT: global.get 1
# DIS-NEXT: i64.add
-# DIS-NEXT: global.get 7
+# DIS-NEXT: global.get 6
# DIS-NEXT: i32.const 4
# DIS-NEXT: i32.add
# DIS-NEXT: i32.store 0
diff --git a/lld/wasm/Driver.cpp b/lld/wasm/Driver.cpp
index d5d763b0a4ae..cc79f80d005d 100644
--- a/lld/wasm/Driver.cpp
+++ b/lld/wasm/Driver.cpp
@@ -870,13 +870,6 @@ static void createSyntheticSymbols() {
WasmSym::tableBase = createUndefinedGlobal("__table_base", globalType);
WasmSym::memoryBase->markLive();
WasmSym::tableBase->markLive();
- if (is64) {
- WasmSym::tableBase32 =
- createUndefinedGlobal("__table_base32", &globalTypeI32);
- WasmSym::tableBase32->markLive();
- } else {
- WasmSym::tableBase32 = nullptr;
- }
} else {
// For non-PIC code
WasmSym::stackPointer = createGlobalVariable("__stack_pointer", true);
@@ -923,9 +916,6 @@ static void createOptionalSymbols() {
WasmSym::heapEnd = symtab->addOptionalDataSymbol("__heap_end");
WasmSym::definedMemoryBase = symtab->addOptionalDataSymbol("__memory_base");
WasmSym::definedTableBase = symtab->addOptionalDataSymbol("__table_base");
- if (config->is64.value_or(false))
- WasmSym::definedTableBase32 =
- symtab->addOptionalDataSymbol("__table_base32");
}
// For non-shared memory programs we still need to define __tls_base since we
diff --git a/lld/wasm/Symbols.cpp b/lld/wasm/Symbols.cpp
index ace6bade02d4..687728d00c85 100644
--- a/lld/wasm/Symbols.cpp
+++ b/lld/wasm/Symbols.cpp
@@ -96,8 +96,6 @@ GlobalSymbol *WasmSym::tlsSize;
GlobalSymbol *WasmSym::tlsAlign;
UndefinedGlobal *WasmSym::tableBase;
DefinedData *WasmSym::definedTableBase;
-UndefinedGlobal *WasmSym::tableBase32;
-DefinedData *WasmSym::definedTableBase32;
UndefinedGlobal *WasmSym::memoryBase;
DefinedData *WasmSym::definedMemoryBase;
TableSymbol *WasmSym::indirectFunctionTable;
diff --git a/lld/wasm/Symbols.h b/lld/wasm/Symbols.h
index 38586bbd1323..65a062b8321b 100644
--- a/lld/wasm/Symbols.h
+++ b/lld/wasm/Symbols.h
@@ -603,11 +603,6 @@ struct WasmSym {
// Used in PIC code for offset of indirect function table
static UndefinedGlobal *tableBase;
static DefinedData *definedTableBase;
- // 32-bit copy in wasm64 to work around init expr limitations.
- // These can potentially be removed again once we have
- // https://github.com/WebAssembly/extended-const
- static UndefinedGlobal *tableBase32;
- static DefinedData *definedTableBase32;
// __memory_base
// Used in PIC code for offset of global data
diff --git a/lld/wasm/SyntheticSections.cpp b/lld/wasm/SyntheticSections.cpp
index 72e255951608..b359e0fdc856 100644
--- a/lld/wasm/SyntheticSections.cpp
+++ b/lld/wasm/SyntheticSections.cpp
@@ -584,12 +584,10 @@ void ElemSection::writeBody() {
initExpr.Extended = false;
if (ctx.isPic) {
initExpr.Inst.Opcode = WASM_OPCODE_GLOBAL_GET;
- initExpr.Inst.Value.Global =
- (config->is64.value_or(false) ? WasmSym::tableBase32
- : WasmSym::tableBase)
- ->getGlobalIndex();
+ initExpr.Inst.Value.Global = WasmSym::tableBase->getGlobalIndex();
} else {
- initExpr.Inst.Opcode = WASM_OPCODE_I32_CONST;
+ bool is64 = config->is64.value_or(false);
+ initExpr.Inst.Opcode = is64 ? WASM_OPCODE_I64_CONST : WASM_OPCODE_I32_CONST;
initExpr.Inst.Value.Int32 = config->tableBase;
}
writeInitExpr(os, initExpr);
diff --git a/lld/wasm/Writer.cpp b/lld/wasm/Writer.cpp
index 55eff995fb8a..7a015764b77c 100644
--- a/lld/wasm/Writer.cpp
+++ b/lld/wasm/Writer.cpp
@@ -939,6 +939,8 @@ static void finalizeIndirectFunctionTable() {
limits.Flags |= WASM_LIMITS_FLAG_HAS_MAX;
limits.Maximum = limits.Minimum;
}
+ if (config->is64.value_or(false))
+ limits.Flags |= WASM_LIMITS_FLAG_IS_64;
WasmSym::indirectFunctionTable->setLimits(limits);
}
@@ -1691,12 +1693,8 @@ void Writer::createSyntheticSectionsPostLayout() {
void Writer::run() {
// For PIC code the table base is assigned dynamically by the loader.
// For non-PIC, we start at 1 so that accessing table index 0 always traps.
- if (!ctx.isPic) {
- if (WasmSym::definedTableBase)
- WasmSym::definedTableBase->setVA(config->tableBase);
- if (WasmSym::definedTableBase32)
- WasmSym::definedTableBase32->setVA(config->tableBase);
- }
+ if (!ctx.isPic && WasmSym::definedTableBase)
+ WasmSym::definedTableBase->setVA(config->tableBase);
log("-- createOutputSegments");
createOutputSegments();
diff --git a/lldb/CMakeLists.txt b/lldb/CMakeLists.txt
index b0764f105327..59cdc4593463 100644
--- a/lldb/CMakeLists.txt
+++ b/lldb/CMakeLists.txt
@@ -1,4 +1,5 @@
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "LLDB")
if(NOT DEFINED LLVM_COMMON_CMAKE_UTILS)
set(LLVM_COMMON_CMAKE_UTILS ${CMAKE_CURRENT_SOURCE_DIR}/../cmake)
diff --git a/lldb/cmake/modules/AddLLDB.cmake b/lldb/cmake/modules/AddLLDB.cmake
index fdc4ee0c05d7..538029037dd4 100644
--- a/lldb/cmake/modules/AddLLDB.cmake
+++ b/lldb/cmake/modules/AddLLDB.cmake
@@ -29,7 +29,6 @@ function(lldb_tablegen)
if(LTG_TARGET)
add_public_tablegen_target(${LTG_TARGET})
- set_target_properties( ${LTG_TARGET} PROPERTIES FOLDER "LLDB tablegenning")
set_property(GLOBAL APPEND PROPERTY LLDB_TABLEGEN_TARGETS ${LTG_TARGET})
endif()
endfunction(lldb_tablegen)
@@ -165,10 +164,10 @@ function(add_lldb_library name)
get_property(parent_dir DIRECTORY PROPERTY PARENT_DIRECTORY)
if(EXISTS ${parent_dir})
get_filename_component(category ${parent_dir} NAME)
- set_target_properties(${name} PROPERTIES FOLDER "lldb plugins/${category}")
+ set_target_properties(${name} PROPERTIES FOLDER "LLDB/Plugins/${category}")
endif()
else()
- set_target_properties(${name} PROPERTIES FOLDER "lldb libraries")
+ set_target_properties(${name} PROPERTIES FOLDER "LLDB/Libraries")
endif()
# If we want to export all lldb symbols (i.e LLDB_EXPORT_ALL_SYMBOLS=ON), we
@@ -208,7 +207,6 @@ function(add_lldb_executable name)
else()
target_link_libraries(${name} PRIVATE ${ARG_CLANG_LIBS})
endif()
- set_target_properties(${name} PROPERTIES FOLDER "lldb executables")
if (ARG_BUILD_RPATH)
set_target_properties(${name} PROPERTIES BUILD_RPATH "${ARG_BUILD_RPATH}")
diff --git a/lldb/cmake/modules/LLDBConfig.cmake b/lldb/cmake/modules/LLDBConfig.cmake
index 3c6223b015bb..f2afced7403b 100644
--- a/lldb/cmake/modules/LLDBConfig.cmake
+++ b/lldb/cmake/modules/LLDBConfig.cmake
@@ -187,24 +187,18 @@ include_directories("${CMAKE_CURRENT_BINARY_DIR}/../clang/include")
# form -W<foo>, and if supported, add the corresponding -Wno-<foo> option.
# Disable GCC warnings
-check_cxx_compiler_flag("-Wdeprecated-declarations" CXX_SUPPORTS_DEPRECATED_DECLARATIONS)
-append_if(CXX_SUPPORTS_DEPRECATED_DECLARATIONS "-Wno-deprecated-declarations" CMAKE_CXX_FLAGS)
-
-check_cxx_compiler_flag("-Wunknown-pragmas" CXX_SUPPORTS_UNKNOWN_PRAGMAS)
-append_if(CXX_SUPPORTS_UNKNOWN_PRAGMAS "-Wno-unknown-pragmas" CMAKE_CXX_FLAGS)
-
-check_cxx_compiler_flag("-Wstrict-aliasing" CXX_SUPPORTS_STRICT_ALIASING)
-append_if(CXX_SUPPORTS_STRICT_ALIASING "-Wno-strict-aliasing" CMAKE_CXX_FLAGS)
+append("-Wno-deprecated-declarations" CMAKE_CXX_FLAGS)
+append("-Wno-unknown-pragmas" CMAKE_CXX_FLAGS)
+append("-Wno-strict-aliasing" CMAKE_CXX_FLAGS)
check_cxx_compiler_flag("-Wstringop-truncation" CXX_SUPPORTS_STRINGOP_TRUNCATION)
append_if(CXX_SUPPORTS_STRINGOP_TRUNCATION "-Wno-stringop-truncation" CMAKE_CXX_FLAGS)
# Disable Clang warnings
-check_cxx_compiler_flag("-Wdeprecated-register" CXX_SUPPORTS_DEPRECATED_REGISTER)
-append_if(CXX_SUPPORTS_DEPRECATED_REGISTER "-Wno-deprecated-register" CMAKE_CXX_FLAGS)
-
-check_cxx_compiler_flag("-Wvla-extension" CXX_SUPPORTS_VLA_EXTENSION)
-append_if(CXX_SUPPORTS_VLA_EXTENSION "-Wno-vla-extension" CMAKE_CXX_FLAGS)
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ append("-Wno-deprecated-register" CMAKE_CXX_FLAGS)
+ append("-Wno-vla-extension" CMAKE_CXX_FLAGS)
+endif()
# Disable MSVC warnings
if( MSVC )
@@ -272,7 +266,7 @@ if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
)
add_custom_target(lldb-headers)
- set_target_properties(lldb-headers PROPERTIES FOLDER "lldb misc")
+ set_target_properties(lldb-headers PROPERTIES FOLDER "LLDB/Resources")
if (NOT CMAKE_CONFIGURATION_TYPES)
add_llvm_install_targets(install-lldb-headers
diff --git a/lldb/cmake/modules/LLDBFramework.cmake b/lldb/cmake/modules/LLDBFramework.cmake
index dd8c36bba0e9..471aeaaad3c0 100644
--- a/lldb/cmake/modules/LLDBFramework.cmake
+++ b/lldb/cmake/modules/LLDBFramework.cmake
@@ -106,7 +106,7 @@ endforeach()
# Wrap output in a target, so lldb-framework can depend on it.
add_custom_target(liblldb-resource-headers DEPENDS lldb-sbapi-dwarf-enums ${lldb_staged_headers})
-set_target_properties(liblldb-resource-headers PROPERTIES FOLDER "lldb misc")
+set_target_properties(liblldb-resource-headers PROPERTIES FOLDER "LLDB/Resources")
add_dependencies(liblldb liblldb-resource-headers)
# At build time, copy the staged headers into the framework bundle (and do
diff --git a/lldb/cmake/modules/LLDBStandalone.cmake b/lldb/cmake/modules/LLDBStandalone.cmake
index fd16716d7141..c9367214848f 100644
--- a/lldb/cmake/modules/LLDBStandalone.cmake
+++ b/lldb/cmake/modules/LLDBStandalone.cmake
@@ -118,8 +118,8 @@ if(LLVM_USE_FOLDERS)
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
endif()
-set_target_properties(clang-tablegen-targets PROPERTIES FOLDER "lldb misc")
-set_target_properties(intrinsics_gen PROPERTIES FOLDER "lldb misc")
+set_target_properties(clang-tablegen-targets PROPERTIES FOLDER "Clang/Tablegenning")
+set_target_properties(intrinsics_gen PROPERTIES FOLDER "LLVM/Tablegenning")
if(NOT DEFINED LLVM_COMMON_CMAKE_UTILS)
set(LLVM_COMMON_CMAKE_UTILS ${CMAKE_CURRENT_SOURCE_DIR}/../cmake)
diff --git a/lldb/docs/CMakeLists.txt b/lldb/docs/CMakeLists.txt
index af18eb22e954..f482e91d1b10 100644
--- a/lldb/docs/CMakeLists.txt
+++ b/lldb/docs/CMakeLists.txt
@@ -13,6 +13,7 @@ if(DOXYGEN_FOUND)
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating LLDB C++ API reference with Doxygen" VERBATIM
)
+ set_target_properties(lldb-cpp-doc PROPERTIES FOLDER "LLDB/Docs")
endif()
if (LLVM_ENABLE_SPHINX)
diff --git a/lldb/docs/resources/build.rst b/lldb/docs/resources/build.rst
index 09d3d15a9408..33b6a6f79def 100644
--- a/lldb/docs/resources/build.rst
+++ b/lldb/docs/resources/build.rst
@@ -477,7 +477,6 @@ further by passing the appropriate cmake options, such as:
-DLLDB_ENABLE_PYTHON=0
-DLLDB_ENABLE_LIBEDIT=0
-DLLDB_ENABLE_CURSES=0
- -DLLVM_ENABLE_TERMINFO=0
(see :ref:`Optional Dependencies` for more)
diff --git a/lldb/include/lldb/API/SBCommandInterpreter.h b/lldb/include/lldb/API/SBCommandInterpreter.h
index ba2e049204b8..8ac36344b3a7 100644
--- a/lldb/include/lldb/API/SBCommandInterpreter.h
+++ b/lldb/include/lldb/API/SBCommandInterpreter.h
@@ -318,6 +318,14 @@ public:
SBStructuredData GetStatistics();
+ /// Returns a list of handled commands, output and error. Each element in
+ /// the list is a dictionary with the following keys/values:
+ /// - "command" (string): The command that was executed.
+ /// - "output" (string): The output of the command. Empty ("") if no output.
+ /// - "error" (string): The error of the command. Empty ("") if no error.
+ /// - "seconds" (float): The time it took to execute the command.
+ SBStructuredData GetTranscript();
+
protected:
friend class lldb_private::CommandPluginInterfaceImplementation;
diff --git a/lldb/include/lldb/API/SBDebugger.h b/lldb/include/lldb/API/SBDebugger.h
index 7333cd57ad31..af19b1faf3bf 100644
--- a/lldb/include/lldb/API/SBDebugger.h
+++ b/lldb/include/lldb/API/SBDebugger.h
@@ -328,9 +328,22 @@ public:
void SetLoggingCallback(lldb::LogOutputCallback log_callback, void *baton);
+ /// Clear all previously added callbacks and only add the given one.
+ LLDB_DEPRECATED_FIXME("Use AddDestroyCallback and RemoveDestroyCallback",
+ "AddDestroyCallback")
void SetDestroyCallback(lldb::SBDebuggerDestroyCallback destroy_callback,
void *baton);
+ /// Add a callback for when the debugger is destroyed. Return a token, which
+ /// can be used to remove said callback. Multiple callbacks can be added by
+ /// calling this function multiple times, and will be invoked in FIFO order.
+ lldb::callback_token_t
+ AddDestroyCallback(lldb::SBDebuggerDestroyCallback destroy_callback,
+ void *baton);
+
+ /// Remove the specified callback. Return true if successful.
+ bool RemoveDestroyCallback(lldb::callback_token_t token);
+
#ifndef SWIG
LLDB_DEPRECATED_FIXME("Use DispatchInput(const void *, size_t)",
"DispatchInput(const void *, size_t)")
diff --git a/lldb/include/lldb/Core/Debugger.h b/lldb/include/lldb/Core/Debugger.h
index ea994bf8c28d..a72c2596cc2c 100644
--- a/lldb/include/lldb/Core/Debugger.h
+++ b/lldb/include/lldb/Core/Debugger.h
@@ -40,6 +40,7 @@
#include "lldb/lldb-types.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DynamicLibrary.h"
@@ -559,10 +560,25 @@ public:
static void ReportSymbolChange(const ModuleSpec &module_spec);
+ /// DEPRECATED: We used to only support one Destroy callback. Now that we
+ /// support Add and Remove, you should only remove callbacks that you added.
+ /// Use Add and Remove instead.
+ ///
+ /// Clear all previously added callbacks and only add the given one.
void
SetDestroyCallback(lldb_private::DebuggerDestroyCallback destroy_callback,
void *baton);
+ /// Add a callback for when the debugger is destroyed. Return a token, which
+ /// can be used to remove said callback. Multiple callbacks can be added by
+ /// calling this function multiple times, and will be invoked in FIFO order.
+ lldb::callback_token_t
+ AddDestroyCallback(lldb_private::DebuggerDestroyCallback destroy_callback,
+ void *baton);
+
+ /// Remove the specified callback. Return true if successful.
+ bool RemoveDestroyCallback(lldb::callback_token_t token);
+
/// Manually start the global event handler thread. It is useful to plugins
/// that directly use the \a lldb_private namespace and want to use the
/// debugger's default event handler thread instead of defining their own.
@@ -721,8 +737,19 @@ protected:
lldb::TargetSP m_dummy_target_sp;
Diagnostics::CallbackID m_diagnostics_callback_id;
- lldb_private::DebuggerDestroyCallback m_destroy_callback = nullptr;
- void *m_destroy_callback_baton = nullptr;
+ std::mutex m_destroy_callback_mutex;
+ lldb::callback_token_t m_destroy_callback_next_token = 0;
+ struct DestroyCallbackInfo {
+ DestroyCallbackInfo() {}
+ DestroyCallbackInfo(lldb::callback_token_t token,
+ lldb_private::DebuggerDestroyCallback callback,
+ void *baton)
+ : token(token), callback(callback), baton(baton) {}
+ lldb::callback_token_t token;
+ lldb_private::DebuggerDestroyCallback callback;
+ void *baton;
+ };
+ llvm::SmallVector<DestroyCallbackInfo, 2> m_destroy_callbacks;
uint32_t m_interrupt_requested = 0; ///< Tracks interrupt requests
std::mutex m_interrupt_mutex;
diff --git a/lldb/include/lldb/Interpreter/CommandInterpreter.h b/lldb/include/lldb/Interpreter/CommandInterpreter.h
index 70a55a77465b..ccc30cf4f1a8 100644
--- a/lldb/include/lldb/Interpreter/CommandInterpreter.h
+++ b/lldb/include/lldb/Interpreter/CommandInterpreter.h
@@ -22,6 +22,7 @@
#include "lldb/Utility/Log.h"
#include "lldb/Utility/StreamString.h"
#include "lldb/Utility/StringList.h"
+#include "lldb/Utility/StructuredData.h"
#include "lldb/lldb-forward.h"
#include "lldb/lldb-private.h"
@@ -560,6 +561,9 @@ public:
bool GetPromptOnQuit() const;
void SetPromptOnQuit(bool enable);
+ bool GetSaveTranscript() const;
+ void SetSaveTranscript(bool enable);
+
bool GetSaveSessionOnQuit() const;
void SetSaveSessionOnQuit(bool enable);
@@ -647,6 +651,7 @@ public:
}
llvm::json::Value GetStatistics();
+ const StructuredData::Array &GetTranscript() const;
protected:
friend class Debugger;
@@ -765,7 +770,20 @@ private:
typedef llvm::StringMap<uint64_t> CommandUsageMap;
CommandUsageMap m_command_usages;
+ /// Turn on settings `interpreter.save-transcript` for LLDB to populate
+ /// this stream. Otherwise this stream is empty.
StreamString m_transcript_stream;
+
+ /// Contains a list of handled commands and their details. Each element in
+ /// the list is a dictionary with the following keys/values:
+ /// - "command" (string): The command that was executed.
+ /// - "output" (string): The output of the command. Empty ("") if no output.
+ /// - "error" (string): The error of the command. Empty ("") if no error.
+ /// - "seconds" (float): The time it took to execute the command.
+ ///
+ /// Turn on settings `interpreter.save-transcript` for LLDB to populate
+ /// this list. Otherwise this list is empty.
+ StructuredData::Array m_transcript;
};
} // namespace lldb_private
diff --git a/lldb/include/lldb/Symbol/CompilerType.h b/lldb/include/lldb/Symbol/CompilerType.h
index 28c723abf279..70dacdcb7986 100644
--- a/lldb/include/lldb/Symbol/CompilerType.h
+++ b/lldb/include/lldb/Symbol/CompilerType.h
@@ -436,7 +436,7 @@ public:
uint32_t *bitfield_bit_size_ptr = nullptr,
bool *is_bitfield_ptr = nullptr) const;
- CompilerType GetChildCompilerTypeAtIndex(
+ llvm::Expected<CompilerType> GetChildCompilerTypeAtIndex(
ExecutionContext *exe_ctx, size_t idx, bool transparent_pointers,
bool omit_empty_base_classes, bool ignore_array_bounds,
std::string &child_name, uint32_t &child_byte_size,
diff --git a/lldb/include/lldb/Symbol/TypeSystem.h b/lldb/include/lldb/Symbol/TypeSystem.h
index 7bcb8d69387a..b4025c173a18 100644
--- a/lldb/include/lldb/Symbol/TypeSystem.h
+++ b/lldb/include/lldb/Symbol/TypeSystem.h
@@ -359,7 +359,7 @@ public:
return CompilerDecl();
}
- virtual CompilerType GetChildCompilerTypeAtIndex(
+ virtual llvm::Expected<CompilerType> GetChildCompilerTypeAtIndex(
lldb::opaque_compiler_type_t type, ExecutionContext *exe_ctx, size_t idx,
bool transparent_pointers, bool omit_empty_base_classes,
bool ignore_array_bounds, std::string &child_name,
diff --git a/lldb/include/lldb/Target/Process.h b/lldb/include/lldb/Target/Process.h
index aac0cf51680a..637d34c29715 100644
--- a/lldb/include/lldb/Target/Process.h
+++ b/lldb/include/lldb/Target/Process.h
@@ -915,8 +915,8 @@ public:
/// \param[in] force_kill
/// Whether lldb should force a kill (instead of a detach) from
/// the inferior process. Normally if lldb launched a binary and
- /// Destory is called, lldb kills it. If lldb attached to a
- /// running process and Destory is called, lldb detaches. If
+ /// Destroy is called, lldb kills it. If lldb attached to a
+ /// running process and Destroy is called, lldb detaches. If
/// this behavior needs to be over-ridden, this is the bool that
/// can be used.
///
diff --git a/lldb/include/lldb/lldb-types.h b/lldb/include/lldb/lldb-types.h
index d60686e33142..d88b8232ee6b 100644
--- a/lldb/include/lldb/lldb-types.h
+++ b/lldb/include/lldb/lldb-types.h
@@ -68,6 +68,7 @@ typedef int pipe_t; // Host pipe type
#define LLDB_INVALID_PROCESS ((lldb::process_t)-1)
#define LLDB_INVALID_HOST_THREAD ((lldb::thread_t)NULL)
#define LLDB_INVALID_PIPE ((lldb::pipe_t)-1)
+#define LLDB_INVALID_CALLBACK_TOKEN ((lldb::callback_token_t) - 1)
typedef void (*LogOutputCallback)(const char *, void *baton);
typedef bool (*CommandOverrideCallback)(void *baton, const char **argv);
@@ -77,6 +78,7 @@ typedef bool (*ExpressionCancelCallback)(ExpressionEvaluationPhase phase,
typedef void *ScriptObjectPtr;
typedef uint64_t addr_t;
+typedef int32_t callback_token_t;
typedef uint64_t user_id_t;
typedef uint64_t pid_t;
typedef uint64_t tid_t;
diff --git a/lldb/packages/Python/lldbsuite/test/dotest.py b/lldb/packages/Python/lldbsuite/test/dotest.py
index ebabf348643e..2e537e3fd3ce 100644
--- a/lldb/packages/Python/lldbsuite/test/dotest.py
+++ b/lldb/packages/Python/lldbsuite/test/dotest.py
@@ -542,12 +542,6 @@ def setupSysPath():
lldbDAPExec = os.path.join(lldbDir, "lldb-dap")
if is_exe(lldbDAPExec):
os.environ["LLDBDAP_EXEC"] = lldbDAPExec
- else:
- if not configuration.shouldSkipBecauseOfCategories(["lldb-dap"]):
- print(
- "The 'lldb-dap' executable cannot be located. The lldb-dap tests can not be run as a result."
- )
- configuration.skip_categories.append("lldb-dap")
lldbPythonDir = None # The directory that contains 'lldb/__init__.py'
@@ -929,6 +923,24 @@ def checkPexpectSupport():
configuration.skip_categories.append("pexpect")
+def checkDAPSupport():
+ import lldb
+
+ if "LLDBDAP_EXEC" not in os.environ:
+ msg = (
+ "The 'lldb-dap' executable cannot be located and its tests will not be run."
+ )
+ elif lldb.remote_platform:
+ msg = "lldb-dap tests are not compatible with remote platforms and will not be run."
+ else:
+ msg = None
+
+ if msg:
+ if configuration.verbose:
+ print(msg)
+ configuration.skip_categories.append("lldb-dap")
+
+
def run_suite():
# On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults
# does not exist before proceeding to running the test suite.
@@ -1029,6 +1041,7 @@ def run_suite():
checkObjcSupport()
checkForkVForkSupport()
checkPexpectSupport()
+ checkDAPSupport()
skipped_categories_list = ", ".join(configuration.skip_categories)
print(
diff --git a/lldb/source/API/CMakeLists.txt b/lldb/source/API/CMakeLists.txt
index 76b42ecf63f9..e8228afe103f 100644
--- a/lldb/source/API/CMakeLists.txt
+++ b/lldb/source/API/CMakeLists.txt
@@ -38,6 +38,7 @@ add_custom_command(
)
add_custom_target(lldb-sbapi-dwarf-enums
DEPENDS ${sb_languages_file})
+set_target_properties(lldb-sbapi-dwarf-enums PROPERTIES FOLDER "LLDB/Tablegenning")
add_lldb_library(liblldb SHARED ${option_framework}
SBAddress.cpp
@@ -215,7 +216,6 @@ if (NOT CMAKE_SYSTEM_NAME MATCHES "Windows")
"Only the SB API is guaranteed to be stable.")
add_llvm_symbol_exports(liblldb "${LLDB_EXPORT_ALL_SYMBOLS_EXPORTS_FILE}")
endif()
- set_target_properties(liblldb_exports PROPERTIES FOLDER "lldb misc")
elseif (LLDB_EXPORT_ALL_SYMBOLS)
MESSAGE("-- Symbols (liblldb): exporting all symbols from the lldb and lldb_private namespaces")
@@ -254,7 +254,6 @@ elseif (LLDB_EXPORT_ALL_SYMBOLS)
)
add_llvm_symbol_exports(liblldb ${exported_symbol_file})
- set_target_properties(liblldb_exports PROPERTIES FOLDER "lldb misc")
endif()
if (NOT MSVC)
diff --git a/lldb/source/API/SBCommandInterpreter.cpp b/lldb/source/API/SBCommandInterpreter.cpp
index 83c0951c56db..7a3547328368 100644
--- a/lldb/source/API/SBCommandInterpreter.cpp
+++ b/lldb/source/API/SBCommandInterpreter.cpp
@@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//
+#include "lldb/Utility/StructuredData.h"
#include "lldb/lldb-types.h"
#include "lldb/Interpreter/CommandInterpreter.h"
@@ -571,6 +572,21 @@ SBStructuredData SBCommandInterpreter::GetStatistics() {
return data;
}
+SBStructuredData SBCommandInterpreter::GetTranscript() {
+ LLDB_INSTRUMENT_VA(this);
+
+ SBStructuredData data;
+ if (IsValid())
+ // A deep copy is performed by `std::make_shared` on the
+ // `StructuredData::Array`, via its implicitly-declared copy constructor.
+ // This ensures thread-safety between the user changing the returned
+ // `SBStructuredData` and the `CommandInterpreter` changing its internal
+ // `m_transcript`.
+ data.m_impl_up->SetObjectSP(
+ std::make_shared<StructuredData::Array>(m_opaque_ptr->GetTranscript()));
+ return data;
+}
+
lldb::SBCommand SBCommandInterpreter::AddMultiwordCommand(const char *name,
const char *help) {
LLDB_INSTRUMENT_VA(this, name, help);
diff --git a/lldb/source/API/SBDebugger.cpp b/lldb/source/API/SBDebugger.cpp
index 9c662dfbf441..7ef0d6efd4aa 100644
--- a/lldb/source/API/SBDebugger.cpp
+++ b/lldb/source/API/SBDebugger.cpp
@@ -1695,6 +1695,26 @@ void SBDebugger::SetDestroyCallback(
}
}
+lldb::callback_token_t
+SBDebugger::AddDestroyCallback(lldb::SBDebuggerDestroyCallback destroy_callback,
+ void *baton) {
+ LLDB_INSTRUMENT_VA(this, destroy_callback, baton);
+
+ if (m_opaque_sp)
+ return m_opaque_sp->AddDestroyCallback(destroy_callback, baton);
+
+ return LLDB_INVALID_CALLBACK_TOKEN;
+}
+
+bool SBDebugger::RemoveDestroyCallback(lldb::callback_token_t token) {
+ LLDB_INSTRUMENT_VA(this, token);
+
+ if (m_opaque_sp)
+ return m_opaque_sp->RemoveDestroyCallback(token);
+
+ return false;
+}
+
SBTrace
SBDebugger::LoadTraceFromFile(SBError &error,
const SBFileSpec &trace_description_file) {
diff --git a/lldb/source/Breakpoint/BreakpointResolverFileLine.cpp b/lldb/source/Breakpoint/BreakpointResolverFileLine.cpp
index d7d8c714867e..16c4ee1b88d1 100644
--- a/lldb/source/Breakpoint/BreakpointResolverFileLine.cpp
+++ b/lldb/source/Breakpoint/BreakpointResolverFileLine.cpp
@@ -198,16 +198,16 @@ void BreakpointResolverFileLine::DeduceSourceMapping(
return;
Log *log = GetLog(LLDBLog::Breakpoints);
- const llvm::StringRef path_separator = llvm::sys::path::get_separator(
- m_location_spec.GetFileSpec().GetPathStyle());
// Check if "b" is a suffix of "a".
// And return std::nullopt if not or the new path
// of "a" after consuming "b" from the back.
auto check_suffix =
- [path_separator](llvm::StringRef a, llvm::StringRef b,
- bool case_sensitive) -> std::optional<llvm::StringRef> {
+ [](llvm::StringRef a, llvm::StringRef b,
+ bool case_sensitive) -> std::optional<llvm::StringRef> {
if (case_sensitive ? a.consume_back(b) : a.consume_back_insensitive(b)) {
- if (a.empty() || a.ends_with(path_separator)) {
+ // Note sc_file_dir and request_file_dir below are normalized
+ // and always contain the path separator '/'.
+ if (a.empty() || a.ends_with("/")) {
return a;
}
}
diff --git a/lldb/source/Commands/CommandObjectThread.cpp b/lldb/source/Commands/CommandObjectThread.cpp
index 4397ee14ea07..db96ee2cec38 100644
--- a/lldb/source/Commands/CommandObjectThread.cpp
+++ b/lldb/source/Commands/CommandObjectThread.cpp
@@ -114,8 +114,8 @@ public:
CommandObjectThreadBacktrace(CommandInterpreter &interpreter)
: CommandObjectIterateOverThreads(
interpreter, "thread backtrace",
- "Show thread call stacks. Defaults to the current thread, thread "
- "indexes can be specified as arguments.\n"
+ "Show backtraces of thread call stacks. Defaults to the current "
+ "thread, thread indexes can be specified as arguments.\n"
"Use the thread-index \"all\" to see all threads.\n"
"Use the thread-index \"unique\" to see threads grouped by unique "
"call stacks.\n"
diff --git a/lldb/source/Core/CMakeLists.txt b/lldb/source/Core/CMakeLists.txt
index 10525ac39e6e..f24dbbd45a8e 100644
--- a/lldb/source/Core/CMakeLists.txt
+++ b/lldb/source/Core/CMakeLists.txt
@@ -11,9 +11,6 @@ set(LLDB_LIBEDIT_LIBS)
if (LLDB_ENABLE_CURSES)
list(APPEND LLDB_CURSES_LIBS ${PANEL_LIBRARIES} ${CURSES_LIBRARIES})
- if(LLVM_ENABLE_TERMINFO)
- list(APPEND LLDB_CURSES_LIBS ${Terminfo_LIBRARIES})
- endif()
if (LLVM_BUILD_STATIC)
list(APPEND LLDB_CURSES_LIBS gpm)
endif()
diff --git a/lldb/source/Core/Debugger.cpp b/lldb/source/Core/Debugger.cpp
index 9951fbcd3e7c..309e01e45658 100644
--- a/lldb/source/Core/Debugger.cpp
+++ b/lldb/source/Core/Debugger.cpp
@@ -743,9 +743,22 @@ DebuggerSP Debugger::CreateInstance(lldb::LogOutputCallback log_callback,
}
void Debugger::HandleDestroyCallback() {
- if (m_destroy_callback) {
- m_destroy_callback(GetID(), m_destroy_callback_baton);
- m_destroy_callback = nullptr;
+ const lldb::user_id_t user_id = GetID();
+ // Invoke and remove all the callbacks in an FIFO order. Callbacks which are
+ // added during this loop will be appended, invoked and then removed last.
+ // Callbacks which are removed during this loop will not be invoked.
+ while (true) {
+ DestroyCallbackInfo callback_info;
+ {
+ std::lock_guard<std::mutex> guard(m_destroy_callback_mutex);
+ if (m_destroy_callbacks.empty())
+ break;
+ // Pop the first item in the list
+ callback_info = m_destroy_callbacks.front();
+ m_destroy_callbacks.erase(m_destroy_callbacks.begin());
+ }
+ // Call the destroy callback with user id and baton
+ callback_info.callback(user_id, callback_info.baton);
}
}
@@ -1427,8 +1440,30 @@ void Debugger::SetLoggingCallback(lldb::LogOutputCallback log_callback,
void Debugger::SetDestroyCallback(
lldb_private::DebuggerDestroyCallback destroy_callback, void *baton) {
- m_destroy_callback = destroy_callback;
- m_destroy_callback_baton = baton;
+ std::lock_guard<std::mutex> guard(m_destroy_callback_mutex);
+ m_destroy_callbacks.clear();
+ const lldb::callback_token_t token = m_destroy_callback_next_token++;
+ m_destroy_callbacks.emplace_back(token, destroy_callback, baton);
+}
+
+lldb::callback_token_t Debugger::AddDestroyCallback(
+ lldb_private::DebuggerDestroyCallback destroy_callback, void *baton) {
+ std::lock_guard<std::mutex> guard(m_destroy_callback_mutex);
+ const lldb::callback_token_t token = m_destroy_callback_next_token++;
+ m_destroy_callbacks.emplace_back(token, destroy_callback, baton);
+ return token;
+}
+
+bool Debugger::RemoveDestroyCallback(lldb::callback_token_t token) {
+ std::lock_guard<std::mutex> guard(m_destroy_callback_mutex);
+ for (auto it = m_destroy_callbacks.begin(); it != m_destroy_callbacks.end();
+ ++it) {
+ if (it->token == token) {
+ m_destroy_callbacks.erase(it);
+ return true;
+ }
+ }
+ return false;
}
static void PrivateReportProgress(Debugger &debugger, uint64_t progress_id,
diff --git a/lldb/source/Core/ValueObject.cpp b/lldb/source/Core/ValueObject.cpp
index f39bd07a2553..1443d9dfc328 100644
--- a/lldb/source/Core/ValueObject.cpp
+++ b/lldb/source/Core/ValueObject.cpp
@@ -505,15 +505,23 @@ ValueObject *ValueObject::CreateChildAtIndex(size_t idx,
uint64_t language_flags = 0;
const bool transparent_pointers = !synthetic_array_member;
- CompilerType child_compiler_type;
ExecutionContext exe_ctx(GetExecutionContextRef());
- child_compiler_type = GetCompilerType().GetChildCompilerTypeAtIndex(
- &exe_ctx, idx, transparent_pointers, omit_empty_base_classes,
- ignore_array_bounds, child_name_str, child_byte_size, child_byte_offset,
- child_bitfield_bit_size, child_bitfield_bit_offset, child_is_base_class,
- child_is_deref_of_parent, this, language_flags);
+ auto child_compiler_type_or_err =
+ GetCompilerType().GetChildCompilerTypeAtIndex(
+ &exe_ctx, idx, transparent_pointers, omit_empty_base_classes,
+ ignore_array_bounds, child_name_str, child_byte_size,
+ child_byte_offset, child_bitfield_bit_size, child_bitfield_bit_offset,
+ child_is_base_class, child_is_deref_of_parent, this, language_flags);
+ CompilerType child_compiler_type;
+ if (!child_compiler_type_or_err)
+ LLDB_LOG_ERROR(GetLog(LLDBLog::Types),
+ child_compiler_type_or_err.takeError(),
+ "could not find child: {0}");
+ else
+ child_compiler_type = *child_compiler_type_or_err;
+
if (child_compiler_type) {
if (synthetic_index)
child_byte_offset += child_byte_size * synthetic_index;
@@ -2624,16 +2632,23 @@ ValueObjectSP ValueObject::Dereference(Status &error) {
bool child_is_deref_of_parent = false;
const bool transparent_pointers = false;
CompilerType compiler_type = GetCompilerType();
- CompilerType child_compiler_type;
uint64_t language_flags = 0;
ExecutionContext exe_ctx(GetExecutionContextRef());
- child_compiler_type = compiler_type.GetChildCompilerTypeAtIndex(
+ CompilerType child_compiler_type;
+ auto child_compiler_type_or_err = compiler_type.GetChildCompilerTypeAtIndex(
&exe_ctx, 0, transparent_pointers, omit_empty_base_classes,
ignore_array_bounds, child_name_str, child_byte_size, child_byte_offset,
child_bitfield_bit_size, child_bitfield_bit_offset, child_is_base_class,
child_is_deref_of_parent, this, language_flags);
+ if (!child_compiler_type_or_err)
+ LLDB_LOG_ERROR(GetLog(LLDBLog::Types),
+ child_compiler_type_or_err.takeError(),
+ "could not find child: {0}");
+ else
+ child_compiler_type = *child_compiler_type_or_err;
+
if (child_compiler_type && child_byte_size) {
ConstString child_name;
if (!child_name_str.empty())
diff --git a/lldb/source/Core/ValueObjectConstResultImpl.cpp b/lldb/source/Core/ValueObjectConstResultImpl.cpp
index e2db3ace1924..493980d7ea96 100644
--- a/lldb/source/Core/ValueObjectConstResultImpl.cpp
+++ b/lldb/source/Core/ValueObjectConstResultImpl.cpp
@@ -17,6 +17,8 @@
#include "lldb/Target/ExecutionContext.h"
#include "lldb/Utility/DataBufferHeap.h"
#include "lldb/Utility/Endian.h"
+#include "lldb/Utility/LLDBLog.h"
+#include "lldb/Utility/Log.h"
#include "lldb/Utility/Scalar.h"
#include <string>
@@ -66,15 +68,21 @@ ValueObject *ValueObjectConstResultImpl::CreateChildAtIndex(
const bool transparent_pointers = !synthetic_array_member;
CompilerType compiler_type = m_impl_backend->GetCompilerType();
- CompilerType child_compiler_type;
ExecutionContext exe_ctx(m_impl_backend->GetExecutionContextRef());
- child_compiler_type = compiler_type.GetChildCompilerTypeAtIndex(
+ auto child_compiler_type_or_err = compiler_type.GetChildCompilerTypeAtIndex(
&exe_ctx, idx, transparent_pointers, omit_empty_base_classes,
ignore_array_bounds, child_name_str, child_byte_size, child_byte_offset,
child_bitfield_bit_size, child_bitfield_bit_offset, child_is_base_class,
child_is_deref_of_parent, m_impl_backend, language_flags);
+ CompilerType child_compiler_type;
+ if (!child_compiler_type_or_err)
+ LLDB_LOG_ERROR(GetLog(LLDBLog::Types),
+ child_compiler_type_or_err.takeError(),
+ "could not find child: {0}");
+ else
+ child_compiler_type = *child_compiler_type_or_err;
// One might think we should check that the size of the children
// is always strictly positive, hence we could avoid creating a
diff --git a/lldb/source/Host/common/Socket.cpp b/lldb/source/Host/common/Socket.cpp
index bd0c127a0895..f9911cf136cb 100644
--- a/lldb/source/Host/common/Socket.cpp
+++ b/lldb/source/Host/common/Socket.cpp
@@ -87,8 +87,7 @@ llvm::Error Socket::Initialize() {
if (err == 0) {
if (wsaData.wVersion < wVersion) {
WSACleanup();
- return llvm::make_error<llvm::StringError>(
- "WSASock version is not expected.", llvm::inconvertibleErrorCode());
+ return llvm::createStringError("WSASock version is not expected.");
}
} else {
return llvm::errorCodeToError(llvm::mapWindowsError(::WSAGetLastError()));
diff --git a/lldb/source/Interpreter/CommandInterpreter.cpp b/lldb/source/Interpreter/CommandInterpreter.cpp
index 4c58ecc3c184..7f21f382adb8 100644
--- a/lldb/source/Interpreter/CommandInterpreter.cpp
+++ b/lldb/source/Interpreter/CommandInterpreter.cpp
@@ -51,6 +51,7 @@
#include "lldb/Utility/Log.h"
#include "lldb/Utility/State.h"
#include "lldb/Utility/Stream.h"
+#include "lldb/Utility/StructuredData.h"
#include "lldb/Utility/Timer.h"
#include "lldb/Host/Config.h"
@@ -161,6 +162,17 @@ void CommandInterpreter::SetPromptOnQuit(bool enable) {
SetPropertyAtIndex(idx, enable);
}
+bool CommandInterpreter::GetSaveTranscript() const {
+ const uint32_t idx = ePropertySaveTranscript;
+ return GetPropertyAtIndexAs<bool>(
+ idx, g_interpreter_properties[idx].default_uint_value != 0);
+}
+
+void CommandInterpreter::SetSaveTranscript(bool enable) {
+ const uint32_t idx = ePropertySaveTranscript;
+ SetPropertyAtIndex(idx, enable);
+}
+
bool CommandInterpreter::GetSaveSessionOnQuit() const {
const uint32_t idx = ePropertySaveSessionOnQuit;
return GetPropertyAtIndexAs<bool>(
@@ -816,11 +828,11 @@ void CommandInterpreter::LoadCommandDictionary() {
std::unique_ptr<CommandObjectRegexCommand> bt_regex_cmd_up(
new CommandObjectRegexCommand(
*this, "_regexp-bt",
- "Show the current thread's call stack. Any numeric argument "
- "displays at most that many "
- "frames. The argument 'all' displays all threads. Use 'settings"
- " set frame-format' to customize the printing of individual frames "
- "and 'settings set thread-format' to customize the thread header.",
+ "Show backtrace of the current thread's call stack. Any numeric "
+ "argument displays at most that many frames. The argument 'all' "
+ "displays all threads. Use 'settings set frame-format' to customize "
+ "the printing of individual frames and 'settings set thread-format' "
+ "to customize the thread header.",
"bt [<digit> | all]", 0, false));
if (bt_regex_cmd_up) {
// accept but don't document "bt -c <number>" -- before bt was a regex
@@ -1889,7 +1901,16 @@ bool CommandInterpreter::HandleCommand(const char *command_line,
else
add_to_history = (lazy_add_to_history == eLazyBoolYes);
- m_transcript_stream << "(lldb) " << command_line << '\n';
+ // The same `transcript_item` will be used below to add output and error of
+ // the command.
+ StructuredData::DictionarySP transcript_item;
+ if (GetSaveTranscript()) {
+ m_transcript_stream << "(lldb) " << command_line << '\n';
+
+ transcript_item = std::make_shared<StructuredData::Dictionary>();
+ transcript_item->AddStringItem("command", command_line);
+ m_transcript.AddItem(transcript_item);
+ }
bool empty_command = false;
bool comment_command = false;
@@ -1994,7 +2015,7 @@ bool CommandInterpreter::HandleCommand(const char *command_line,
// Take care of things like setting up the history command & calling the
// appropriate Execute method on the CommandObject, with the appropriate
// arguments.
-
+ StatsDuration execute_time;
if (cmd_obj != nullptr) {
bool generate_repeat_command = add_to_history;
// If we got here when empty_command was true, then this command is a
@@ -2035,14 +2056,24 @@ bool CommandInterpreter::HandleCommand(const char *command_line,
log, "HandleCommand, command line after removing command name(s): '%s'",
remainder.c_str());
+ ElapsedTime elapsed(execute_time);
cmd_obj->Execute(remainder.c_str(), result);
}
LLDB_LOGF(log, "HandleCommand, command %s",
(result.Succeeded() ? "succeeded" : "did not succeed"));
- m_transcript_stream << result.GetOutputData();
- m_transcript_stream << result.GetErrorData();
+ // To test whether or not transcript should be saved, `transcript_item` is
+ // used instead of `GetSaveTrasncript()`. This is because the latter will
+ // fail when the command is "settings set interpreter.save-transcript true".
+ if (transcript_item) {
+ m_transcript_stream << result.GetOutputData();
+ m_transcript_stream << result.GetErrorData();
+
+ transcript_item->AddStringItem("output", result.GetOutputData());
+ transcript_item->AddStringItem("error", result.GetErrorData());
+ transcript_item->AddFloatItem("seconds", execute_time.get().count());
+ }
return result.Succeeded();
}
@@ -3554,3 +3585,7 @@ llvm::json::Value CommandInterpreter::GetStatistics() {
stats.try_emplace(command_usage.getKey(), command_usage.getValue());
return stats;
}
+
+const StructuredData::Array &CommandInterpreter::GetTranscript() const {
+ return m_transcript;
+}
diff --git a/lldb/source/Interpreter/InterpreterProperties.td b/lldb/source/Interpreter/InterpreterProperties.td
index 2155ee61ccff..a5fccbbca091 100644
--- a/lldb/source/Interpreter/InterpreterProperties.td
+++ b/lldb/source/Interpreter/InterpreterProperties.td
@@ -9,6 +9,10 @@ let Definition = "interpreter" in {
Global,
DefaultTrue,
Desc<"If true, LLDB will prompt you before quitting if there are any live processes being debugged. If false, LLDB will quit without asking in any case.">;
+ def SaveTranscript: Property<"save-transcript", "Boolean">,
+ Global,
+ DefaultFalse,
+ Desc<"If true, commands will be saved into a transcript buffer for user access.">;
def SaveSessionOnQuit: Property<"save-session-on-quit", "Boolean">,
Global,
DefaultFalse,
diff --git a/lldb/source/Interpreter/Options.cpp b/lldb/source/Interpreter/Options.cpp
index 51b7e6b26b6e..4e7d074ace1b 100644
--- a/lldb/source/Interpreter/Options.cpp
+++ b/lldb/source/Interpreter/Options.cpp
@@ -931,8 +931,7 @@ llvm::Expected<Args> Options::ParseAlias(const Args &args,
Option *long_options = GetLongOptions();
if (long_options == nullptr) {
- return llvm::make_error<llvm::StringError>("Invalid long options",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError("Invalid long options");
}
std::string short_options = BuildShortOptions(long_options);
@@ -957,8 +956,7 @@ llvm::Expected<Args> Options::ParseAlias(const Args &args,
break;
if (val == '?') {
- return llvm::make_error<llvm::StringError>(
- "Unknown or ambiguous option", llvm::inconvertibleErrorCode());
+ return llvm::createStringError("Unknown or ambiguous option");
}
if (val == 0)
@@ -980,9 +978,8 @@ llvm::Expected<Args> Options::ParseAlias(const Args &args,
// See if the option takes an argument, and see if one was supplied.
if (long_options_index == -1) {
- return llvm::make_error<llvm::StringError>(
- llvm::formatv("Invalid option with value '{0}'.", char(val)).str(),
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(
+ llvm::formatv("Invalid option with value '{0}'.", char(val)).str());
}
StreamString option_str;
@@ -995,11 +992,10 @@ llvm::Expected<Args> Options::ParseAlias(const Args &args,
switch (has_arg) {
case OptionParser::eRequiredArgument:
if (OptionParser::GetOptionArgument() == nullptr) {
- return llvm::make_error<llvm::StringError>(
+ return llvm::createStringError(
llvm::formatv("Option '{0}' is missing argument specifier.",
option_str.GetString())
- .str(),
- llvm::inconvertibleErrorCode());
+ .str());
}
[[fallthrough]];
case OptionParser::eOptionalArgument:
@@ -1008,12 +1004,11 @@ llvm::Expected<Args> Options::ParseAlias(const Args &args,
case OptionParser::eNoArgument:
break;
default:
- return llvm::make_error<llvm::StringError>(
+ return llvm::createStringError(
llvm::formatv("error with options table; invalid value in has_arg "
"field for option '{0}'.",
char(val))
- .str(),
- llvm::inconvertibleErrorCode());
+ .str());
}
// Find option in the argument list; also see if it was supposed to take an
// argument and if one was supplied. Remove option (and argument, if
@@ -1261,8 +1256,7 @@ llvm::Expected<Args> Options::Parse(const Args &args,
Status error;
Option *long_options = GetLongOptions();
if (long_options == nullptr) {
- return llvm::make_error<llvm::StringError>("Invalid long options.",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError("Invalid long options.");
}
std::string short_options = BuildShortOptions(long_options);
@@ -1322,9 +1316,8 @@ llvm::Expected<Args> Options::Parse(const Args &args,
if (!platform_sp && require_validation) {
// Caller requires validation but we cannot validate as we don't have
// the mandatory platform against which to validate.
- return llvm::make_error<llvm::StringError>(
- "cannot validate options: no platform available",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(
+ "cannot validate options: no platform available");
}
bool validation_failed = false;
diff --git a/lldb/source/Plugins/ABI/PowerPC/ABISysV_ppc64.cpp b/lldb/source/Plugins/ABI/PowerPC/ABISysV_ppc64.cpp
index 173b5613d1b8..eac058701313 100644
--- a/lldb/source/Plugins/ABI/PowerPC/ABISysV_ppc64.cpp
+++ b/lldb/source/Plugins/ABI/PowerPC/ABISysV_ppc64.cpp
@@ -501,14 +501,12 @@ public:
CompilerType &type) {
RegisterContext *reg_ctx = thread.GetRegisterContext().get();
if (!reg_ctx)
- return llvm::make_error<llvm::StringError>(
- LOG_PREFIX "Failed to get RegisterContext",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(LOG_PREFIX
+ "Failed to get RegisterContext");
ProcessSP process_sp = thread.GetProcess();
if (!process_sp)
- return llvm::make_error<llvm::StringError>(
- LOG_PREFIX "GetProcess() failed", llvm::inconvertibleErrorCode());
+ return llvm::createStringError(LOG_PREFIX "GetProcess() failed");
return ReturnValueExtractor(thread, type, reg_ctx, process_sp);
}
@@ -836,7 +834,7 @@ private:
for (uint32_t i = 0; i < n; i++) {
std::string name;
uint32_t size;
- GetChildType(i, name, size);
+ (void)GetChildType(i, name, size);
// NOTE: the offset returned by GetChildCompilerTypeAtIndex()
// can't be used because it never considers alignment bytes
// between struct fields.
@@ -903,7 +901,8 @@ private:
}
// get child
- CompilerType GetChildType(uint32_t i, std::string &name, uint32_t &size) {
+ llvm::Expected<CompilerType> GetChildType(uint32_t i, std::string &name,
+ uint32_t &size) {
// GetChild constant inputs
const bool transparent_pointers = false;
const bool omit_empty_base_classes = true;
diff --git a/lldb/source/Plugins/Instruction/ARM64/EmulateInstructionARM64.cpp b/lldb/source/Plugins/Instruction/ARM64/EmulateInstructionARM64.cpp
index 6ca4fb052457..62ecac3e0831 100644
--- a/lldb/source/Plugins/Instruction/ARM64/EmulateInstructionARM64.cpp
+++ b/lldb/source/Plugins/Instruction/ARM64/EmulateInstructionARM64.cpp
@@ -444,6 +444,8 @@ bool EmulateInstructionARM64::CreateFunctionEntryUnwind(
// Our previous Call Frame Address is the stack pointer
row->GetCFAValue().SetIsRegisterPlusOffset(gpr_sp_arm64, 0);
+ row->SetRegisterLocationToSame(gpr_lr_arm64, /*must_replace=*/false);
+ row->SetRegisterLocationToSame(gpr_fp_arm64, /*must_replace=*/false);
unwind_plan.AppendRow(row);
unwind_plan.SetSourceName("EmulateInstructionARM64");
diff --git a/lldb/source/Plugins/Language/CPlusPlus/BlockPointer.cpp b/lldb/source/Plugins/Language/CPlusPlus/BlockPointer.cpp
index 9a6e135e0083..2c9b3c425397 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/BlockPointer.cpp
+++ b/lldb/source/Plugins/Language/CPlusPlus/BlockPointer.cpp
@@ -12,6 +12,7 @@
#include "Plugins/ExpressionParser/Clang/ClangPersistentVariables.h"
#include "Plugins/TypeSystem/Clang/TypeSystemClang.h"
#include "lldb/Core/ValueObject.h"
+#include "lldb/Core/ValueObjectConstResult.h"
#include "lldb/DataFormatters/FormattersHelpers.h"
#include "lldb/Symbol/CompilerType.h"
#include "lldb/Symbol/TypeSystem.h"
@@ -105,13 +106,16 @@ public:
bool child_is_deref_of_parent = false;
uint64_t language_flags = 0;
- const CompilerType child_type =
- m_block_struct_type.GetChildCompilerTypeAtIndex(
- &exe_ctx, idx, transparent_pointers, omit_empty_base_classes,
- ignore_array_bounds, child_name, child_byte_size, child_byte_offset,
- child_bitfield_bit_size, child_bitfield_bit_offset,
- child_is_base_class, child_is_deref_of_parent, value_object,
- language_flags);
+ auto child_type_or_err = m_block_struct_type.GetChildCompilerTypeAtIndex(
+ &exe_ctx, idx, transparent_pointers, omit_empty_base_classes,
+ ignore_array_bounds, child_name, child_byte_size, child_byte_offset,
+ child_bitfield_bit_size, child_bitfield_bit_offset, child_is_base_class,
+ child_is_deref_of_parent, value_object, language_flags);
+ if (!child_type_or_err)
+ return ValueObjectConstResult::Create(
+ exe_ctx.GetBestExecutionContextScope(),
+ Status(child_type_or_err.takeError()));
+ CompilerType child_type = *child_type_or_err;
ValueObjectSP struct_pointer_sp =
m_backend.Cast(m_block_struct_type.GetPointerType());
diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
index e160fd076393..b0e6fb7d6f5a 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
+++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
@@ -1098,6 +1098,7 @@ LibcxxChronoTimePointSecondsSummaryProvider(ValueObject &valobj, Stream &stream,
if (!ptr_sp)
return false;
+#ifndef _WIN32
// The date time in the chrono library is valid in the range
// [-32767-01-01T00:00:00Z, 32767-12-31T23:59:59Z]. A 64-bit time_t has a
// larger range, the function strftime is not able to format the entire range
@@ -1107,6 +1108,11 @@ LibcxxChronoTimePointSecondsSummaryProvider(ValueObject &valobj, Stream &stream,
-1'096'193'779'200; // -32767-01-01T00:00:00Z
const std::time_t chrono_timestamp_max =
971'890'963'199; // 32767-12-31T23:59:59Z
+#else
+ const std::time_t chrono_timestamp_min = -43'200; // 1969-12-31T12:00:00Z
+ const std::time_t chrono_timestamp_max =
+ 32'536'850'399; // 3001-01-19T21:59:59
+#endif
const std::time_t seconds = ptr_sp->GetValueAsSigned(0);
if (seconds < chrono_timestamp_min || seconds > chrono_timestamp_max)
@@ -1148,12 +1154,17 @@ LibcxxChronoTimepointDaysSummaryProvider(ValueObject &valobj, Stream &stream,
if (!ptr_sp)
return false;
+#ifndef _WIN32
// The date time in the chrono library is valid in the range
// [-32767-01-01Z, 32767-12-31Z]. A 32-bit time_t has a larger range, the
// function strftime is not able to format the entire range of time_t. The
// exact point has not been investigated; it's limited to chrono's range.
const int chrono_timestamp_min = -12'687'428; // -32767-01-01Z
const int chrono_timestamp_max = 11'248'737; // 32767-12-31Z
+#else
+ const int chrono_timestamp_min = 0; // 1970-01-01Z
+ const int chrono_timestamp_max = 376'583; // 3001-01-19Z
+#endif
const int days = ptr_sp->GetValueAsSigned(0);
if (days < chrono_timestamp_min || days > chrono_timestamp_max)
diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
index ec5b320e2218..0929d49e55ea 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
+++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxxMap.cpp
@@ -295,13 +295,13 @@ void lldb_private::formatters::LibcxxStdMapSyntheticFrontEnd::GetValueOffset(
bool child_is_base_class;
bool child_is_deref_of_parent;
uint64_t language_flags;
- if (tree_node_type
- .GetChildCompilerTypeAtIndex(
- nullptr, 4, true, true, true, child_name, child_byte_size,
- child_byte_offset, child_bitfield_bit_size,
- child_bitfield_bit_offset, child_is_base_class,
- child_is_deref_of_parent, nullptr, language_flags)
- .IsValid())
+ auto child_type =
+ llvm::expectedToStdOptional(tree_node_type.GetChildCompilerTypeAtIndex(
+ nullptr, 4, true, true, true, child_name, child_byte_size,
+ child_byte_offset, child_bitfield_bit_size,
+ child_bitfield_bit_offset, child_is_base_class,
+ child_is_deref_of_parent, nullptr, language_flags));
+ if (child_type && child_type->IsValid())
m_skip_size = (uint32_t)child_byte_offset;
}
}
diff --git a/lldb/source/Plugins/Platform/POSIX/PlatformPOSIX.cpp b/lldb/source/Plugins/Platform/POSIX/PlatformPOSIX.cpp
index b4f1b76c39db..588b19dac616 100644
--- a/lldb/source/Plugins/Platform/POSIX/PlatformPOSIX.cpp
+++ b/lldb/source/Plugins/Platform/POSIX/PlatformPOSIX.cpp
@@ -678,8 +678,8 @@ uint32_t PlatformPOSIX::DoLoadImage(lldb_private::Process *process,
loaded_image->Clear();
std::string path;
- path = remote_file.GetPath();
-
+ path = remote_file.GetPath(false);
+
ThreadSP thread_sp = process->GetThreadList().GetExpressionExecutionThread();
if (!thread_sp) {
error.SetErrorString("dlopen error: no thread available to call dlopen.");
diff --git a/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp b/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp
index f561c21b9d91..77b4301ea22e 100644
--- a/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp
+++ b/lldb/source/Plugins/Process/NetBSD/NativeThreadNetBSD.cpp
@@ -180,8 +180,6 @@ void NativeThreadNetBSD::SetStepping() {
}
std::string NativeThreadNetBSD::GetName() {
- Log *log = GetLog(POSIXLog::Thread);
-
#ifdef PT_LWPSTATUS
struct ptrace_lwpstatus info = {};
info.pl_lwpid = m_tid;
@@ -193,6 +191,8 @@ std::string NativeThreadNetBSD::GetName() {
return info.pl_name;
#else
std::vector<struct kinfo_lwp> infos;
+ Log *log = GetLog(POSIXLog::Thread);
+
int mib[5] = {CTL_KERN, KERN_LWP, static_cast<int>(m_process.GetID()),
sizeof(struct kinfo_lwp), 0};
size_t size;
diff --git a/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp b/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp
index 36812c27a5b6..30af9345999c 100644
--- a/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp
+++ b/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp
@@ -250,6 +250,9 @@ Status ProcessElfCore::DoLoadCore() {
}
}
+ // Try to find gnu build id before we load the executable.
+ UpdateBuildIdForNTFileEntries();
+
// Core files are useless without the main executable. See if we can locate
// the main executable using data we found in the core file notes.
lldb::ModuleSP exe_module_sp = GetTarget().GetExecutableModule();
@@ -258,6 +261,7 @@ Status ProcessElfCore::DoLoadCore() {
if (!m_nt_file_entries.empty()) {
ModuleSpec exe_module_spec;
exe_module_spec.GetArchitecture() = arch;
+ exe_module_spec.GetUUID() = m_nt_file_entries[0].uuid;
exe_module_spec.GetFileSpec().SetFile(m_nt_file_entries[0].path,
FileSpec::Style::native);
if (exe_module_spec.GetFileSpec()) {
@@ -271,6 +275,12 @@ Status ProcessElfCore::DoLoadCore() {
return error;
}
+void ProcessElfCore::UpdateBuildIdForNTFileEntries() {
+ for (NT_FILE_Entry &entry : m_nt_file_entries) {
+ entry.uuid = FindBuidIdInCoreMemory(entry.start);
+ }
+}
+
lldb_private::DynamicLoader *ProcessElfCore::GetDynamicLoader() {
if (m_dyld_up.get() == nullptr)
m_dyld_up.reset(DynamicLoader::FindPlugin(
@@ -983,6 +993,67 @@ llvm::Error ProcessElfCore::ParseThreadContextsFromNoteSegment(
}
}
+UUID ProcessElfCore::FindBuidIdInCoreMemory(lldb::addr_t address) {
+ UUID invalid_uuid;
+ const uint32_t addr_size = GetAddressByteSize();
+ const size_t elf_header_size = addr_size == 4 ? sizeof(llvm::ELF::Elf32_Ehdr)
+ : sizeof(llvm::ELF::Elf64_Ehdr);
+
+ std::vector<uint8_t> elf_header_bytes;
+ elf_header_bytes.resize(elf_header_size);
+ Status error;
+ size_t byte_read =
+ ReadMemory(address, elf_header_bytes.data(), elf_header_size, error);
+ if (byte_read != elf_header_size ||
+ !elf::ELFHeader::MagicBytesMatch(elf_header_bytes.data()))
+ return invalid_uuid;
+ DataExtractor elf_header_data(elf_header_bytes.data(), elf_header_size,
+ GetByteOrder(), addr_size);
+ lldb::offset_t offset = 0;
+
+ elf::ELFHeader elf_header;
+ elf_header.Parse(elf_header_data, &offset);
+
+ const lldb::addr_t ph_addr = address + elf_header.e_phoff;
+
+ std::vector<uint8_t> ph_bytes;
+ ph_bytes.resize(elf_header.e_phentsize);
+ for (unsigned int i = 0; i < elf_header.e_phnum; ++i) {
+ byte_read = ReadMemory(ph_addr + i * elf_header.e_phentsize,
+ ph_bytes.data(), elf_header.e_phentsize, error);
+ if (byte_read != elf_header.e_phentsize)
+ break;
+ DataExtractor program_header_data(ph_bytes.data(), elf_header.e_phentsize,
+ GetByteOrder(), addr_size);
+ offset = 0;
+ elf::ELFProgramHeader program_header;
+ program_header.Parse(program_header_data, &offset);
+ if (program_header.p_type != llvm::ELF::PT_NOTE)
+ continue;
+
+ std::vector<uint8_t> note_bytes;
+ note_bytes.resize(program_header.p_memsz);
+
+ byte_read = ReadMemory(program_header.p_vaddr, note_bytes.data(),
+ program_header.p_memsz, error);
+ if (byte_read != program_header.p_memsz)
+ continue;
+ DataExtractor segment_data(note_bytes.data(), note_bytes.size(),
+ GetByteOrder(), addr_size);
+ auto notes_or_error = parseSegment(segment_data);
+ if (!notes_or_error)
+ return invalid_uuid;
+ for (const CoreNote &note : *notes_or_error) {
+ if (note.info.n_namesz == 4 &&
+ note.info.n_type == llvm::ELF::NT_GNU_BUILD_ID &&
+ "GNU" == note.info.n_name &&
+ note.data.ValidOffsetForDataOfSize(0, note.info.n_descsz))
+ return UUID(note.data.GetData().take_front(note.info.n_descsz));
+ }
+ }
+ return invalid_uuid;
+}
+
uint32_t ProcessElfCore::GetNumThreadContexts() {
if (!m_thread_data_valid)
DoLoadCore();
diff --git a/lldb/source/Plugins/Process/elf-core/ProcessElfCore.h b/lldb/source/Plugins/Process/elf-core/ProcessElfCore.h
index 2cec635bbacf..668a7c484674 100644
--- a/lldb/source/Plugins/Process/elf-core/ProcessElfCore.h
+++ b/lldb/source/Plugins/Process/elf-core/ProcessElfCore.h
@@ -117,6 +117,10 @@ private:
lldb::addr_t end;
lldb::addr_t file_ofs;
std::string path;
+ // Add a UUID member for convenient access. The UUID value is not in the
+ // NT_FILE entries, we will find it in core memory and store it here for
+ // easy access.
+ lldb_private::UUID uuid;
};
// For ProcessElfCore only
@@ -158,6 +162,12 @@ private:
// Returns number of thread contexts stored in the core file
uint32_t GetNumThreadContexts();
+ // Populate gnu uuid for each NT_FILE entry
+ void UpdateBuildIdForNTFileEntries();
+
+ // Returns the value of certain type of note of a given start address
+ lldb_private::UUID FindBuidIdInCoreMemory(lldb::addr_t address);
+
// Parse a contiguous address range of the process from LOAD segment
lldb::addr_t
AddAddressRangeFromLoadSegment(const elf::ELFProgramHeader &header);
diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
index ce52f3595247..6e676de146b3 100644
--- a/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
+++ b/lldb/source/Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.cpp
@@ -2494,8 +2494,7 @@ bool ScriptInterpreterPythonImpl::LoadScriptingModule(
auto ExtendSysPath = [&](std::string directory) -> llvm::Error {
if (directory.empty()) {
- return llvm::make_error<llvm::StringError>(
- "invalid directory name", llvm::inconvertibleErrorCode());
+ return llvm::createStringError("invalid directory name");
}
replace_all(directory, "\\", "\\\\");
@@ -2508,10 +2507,8 @@ bool ScriptInterpreterPythonImpl::LoadScriptingModule(
directory.c_str(), directory.c_str());
bool syspath_retval =
ExecuteMultipleLines(command_stream.GetData(), exc_options).Success();
- if (!syspath_retval) {
- return llvm::make_error<llvm::StringError>(
- "Python sys.path handling failed", llvm::inconvertibleErrorCode());
- }
+ if (!syspath_retval)
+ return llvm::createStringError("Python sys.path handling failed");
return llvm::Error::success();
};
diff --git a/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.h b/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.h
index 83215bf3c87e..041b388f9f34 100644
--- a/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.h
+++ b/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.h
@@ -120,9 +120,8 @@ public:
llvm::Expected<lldb::TypeSystemSP>
GetTypeSystemForLanguage(lldb::LanguageType language) override {
- return llvm::make_error<llvm::StringError>(
- "SymbolFileBreakpad does not support GetTypeSystemForLanguage",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(
+ "SymbolFileBreakpad does not support GetTypeSystemForLanguage");
}
CompilerDeclContext FindNamespace(ConstString name,
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.cpp
index 44febcfac3b0..d28da728728e 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.cpp
@@ -259,9 +259,3 @@ DWARFDebugInfo::GetDIE(const DIERef &die_ref) {
return cu->GetNonSkeletonUnit().GetDIE(die_ref.die_offset());
return DWARFDIE(); // Not found
}
-
-llvm::StringRef DWARFDebugInfo::PeekDIEName(const DIERef &die_ref) {
- if (DWARFUnit *cu = GetUnit(die_ref))
- return cu->GetNonSkeletonUnit().PeekDIEName(die_ref.die_offset());
- return llvm::StringRef();
-}
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.h b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.h
index c1f0cb0203fb..456ebd908ccb 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.h
@@ -44,11 +44,6 @@ public:
bool ContainsTypeUnits();
DWARFDIE GetDIE(const DIERef &die_ref);
- /// Returns the AT_Name of this DIE, if it exists, without parsing the entire
- /// compile unit. An empty is string is returned upon error or if the
- /// attribute is not present.
- llvm::StringRef PeekDIEName(const DIERef &die_ref);
-
enum {
eDumpFlag_Verbose = (1 << 0), // Verbose dumping
eDumpFlag_ShowForm = (1 << 1), // Show the DW_form type
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfoEntry.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfoEntry.cpp
index 1b0fefedf983..688a287a0650 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfoEntry.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfoEntry.cpp
@@ -11,6 +11,7 @@
#include <cassert>
#include <algorithm>
+#include <limits>
#include <optional>
#include "llvm/Support/LEB128.h"
@@ -41,13 +42,23 @@ extern int g_verbose;
// Extract a debug info entry for a given DWARFUnit from the data
// starting at the offset in offset_ptr
bool DWARFDebugInfoEntry::Extract(const DWARFDataExtractor &data,
- const DWARFUnit *cu,
+ const DWARFUnit &unit,
lldb::offset_t *offset_ptr) {
m_offset = *offset_ptr;
+ auto report_error = [&](const char *fmt, const auto &...vals) {
+ unit.GetSymbolFileDWARF().GetObjectFile()->GetModule()->ReportError(
+ "[{0:x16}]: {1}, please file a bug and "
+ "attach the file at the start of this error message",
+ static_cast<uint64_t>(m_offset), llvm::formatv(fmt, vals...));
+ *offset_ptr = std::numeric_limits<lldb::offset_t>::max();
+ return false;
+ };
+
m_parent_idx = 0;
m_sibling_idx = 0;
const uint64_t abbr_idx = data.GetULEB128(offset_ptr);
- lldbassert(abbr_idx <= UINT16_MAX);
+ if (abbr_idx > std::numeric_limits<uint16_t>::max())
+ return report_error("abbreviation code {0} too big", abbr_idx);
m_abbr_idx = abbr_idx;
if (m_abbr_idx == 0) {
@@ -56,31 +67,18 @@ bool DWARFDebugInfoEntry::Extract(const DWARFDataExtractor &data,
return true; // NULL debug tag entry
}
- const auto *abbrevDecl = GetAbbreviationDeclarationPtr(cu);
- if (abbrevDecl == nullptr) {
- cu->GetSymbolFileDWARF().GetObjectFile()->GetModule()->ReportError(
- "[{0:x16}]: invalid abbreviation code {1}, "
- "please file a bug and "
- "attach the file at the start of this error message",
- (uint64_t)m_offset, (unsigned)abbr_idx);
- // WE can't parse anymore if the DWARF is borked...
- *offset_ptr = UINT32_MAX;
- return false;
- }
+ const auto *abbrevDecl = GetAbbreviationDeclarationPtr(&unit);
+ if (abbrevDecl == nullptr)
+ return report_error("invalid abbreviation code {0}", abbr_idx);
+
m_tag = abbrevDecl->getTag();
m_has_children = abbrevDecl->hasChildren();
// Skip all data in the .debug_info or .debug_types for the attributes
for (const auto &attribute : abbrevDecl->attributes()) {
- if (DWARFFormValue::SkipValue(attribute.Form, data, offset_ptr, cu))
+ if (DWARFFormValue::SkipValue(attribute.Form, data, offset_ptr, &unit))
continue;
- cu->GetSymbolFileDWARF().GetObjectFile()->GetModule()->ReportError(
- "[{0:x16}]: Unsupported DW_FORM_{1:x}, please file a bug "
- "and "
- "attach the file at the start of this error message",
- (uint64_t)m_offset, (unsigned)attribute.Form);
- *offset_ptr = m_offset;
- return false;
+ return report_error("Unsupported DW_FORM_{1:x}", attribute.Form);
}
return true;
}
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfoEntry.h b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfoEntry.h
index c19fa7428549..6773b00e8206 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfoEntry.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfoEntry.h
@@ -49,7 +49,7 @@ public:
void BuildFunctionAddressRangeTable(DWARFUnit *cu,
DWARFDebugAranges *debug_aranges) const;
- bool Extract(const DWARFDataExtractor &data, const DWARFUnit *cu,
+ bool Extract(const DWARFDataExtractor &data, const DWARFUnit &cu,
lldb::offset_t *offset_ptr);
using Recurse = DWARFBaseDIE::Recurse;
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFUnit.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFUnit.cpp
index 3a57ec970b07..66a762bf9b68 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFUnit.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFUnit.cpp
@@ -63,7 +63,7 @@ void DWARFUnit::ExtractUnitDIENoDwoIfNeeded() {
// parse
const DWARFDataExtractor &data = GetData();
if (offset < GetNextUnitOffset() &&
- m_first_die.Extract(data, this, &offset)) {
+ m_first_die.Extract(data, *this, &offset)) {
AddUnitDIE(m_first_die);
return;
}
@@ -242,7 +242,7 @@ void DWARFUnit::ExtractDIEsRWLocked() {
die_index_stack.reserve(32);
die_index_stack.push_back(0);
bool prev_die_had_children = false;
- while (offset < next_cu_offset && die.Extract(data, this, &offset)) {
+ while (offset < next_cu_offset && die.Extract(data, *this, &offset)) {
const bool null_die = die.IsNULL();
if (depth == 0) {
assert(m_die_array.empty() && "Compile unit DIE already added");
@@ -670,7 +670,7 @@ DWARFUnit::GetDIE(dw_offset_t die_offset) {
llvm::StringRef DWARFUnit::PeekDIEName(dw_offset_t die_offset) {
DWARFDebugInfoEntry die;
- if (!die.Extract(GetData(), this, &die_offset))
+ if (!die.Extract(GetData(), *this, &die_offset))
return llvm::StringRef();
// Does die contain a DW_AT_Name?
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
index 4da0d56fdcac..79400e36e04f 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
@@ -9,7 +9,7 @@
#include "Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h"
#include "Plugins/SymbolFile/DWARF/DWARFDebugInfo.h"
#include "Plugins/SymbolFile/DWARF/DWARFDeclContext.h"
-#include "Plugins/SymbolFile/DWARF/SymbolFileDWARFDwo.h"
+#include "Plugins/SymbolFile/DWARF/LogChannelDWARF.h"
#include "lldb/Core/Module.h"
#include "lldb/Utility/RegularExpression.h"
#include "lldb/Utility/Stream.h"
@@ -48,26 +48,30 @@ DebugNamesDWARFIndex::GetUnits(const DebugNames &debug_names) {
return result;
}
-std::optional<DIERef>
-DebugNamesDWARFIndex::ToDIERef(const DebugNames::Entry &entry) const {
+DWARFUnit *
+DebugNamesDWARFIndex::GetNonSkeletonUnit(const DebugNames::Entry &entry) const {
// Look for a DWARF unit offset (CU offset or local TU offset) as they are
// both offsets into the .debug_info section.
std::optional<uint64_t> unit_offset = entry.getCUOffset();
if (!unit_offset) {
unit_offset = entry.getLocalTUOffset();
if (!unit_offset)
- return std::nullopt;
+ return nullptr;
}
DWARFUnit *cu =
m_debug_info.GetUnitAtOffset(DIERef::Section::DebugInfo, *unit_offset);
- if (!cu)
- return std::nullopt;
+ return cu ? &cu->GetNonSkeletonUnit() : nullptr;
+}
- cu = &cu->GetNonSkeletonUnit();
+std::optional<DIERef>
+DebugNamesDWARFIndex::ToDIERef(const DebugNames::Entry &entry) const {
+ DWARFUnit *unit = GetNonSkeletonUnit(entry);
+ if (!unit)
+ return std::nullopt;
if (std::optional<uint64_t> die_offset = entry.getDIEUnitOffset())
- return DIERef(cu->GetSymbolFileDWARF().GetFileIndex(),
- DIERef::Section::DebugInfo, cu->GetOffset() + *die_offset);
+ return DIERef(unit->GetSymbolFileDWARF().GetFileIndex(),
+ DIERef::Section::DebugInfo, unit->GetOffset() + *die_offset);
return std::nullopt;
}
@@ -306,10 +310,10 @@ bool DebugNamesDWARFIndex::SameParentChain(
auto maybe_dieoffset = entry.getDIEUnitOffset();
if (!maybe_dieoffset)
return false;
- auto die_ref = ToDIERef(entry);
- if (!die_ref)
+ DWARFUnit *unit = GetNonSkeletonUnit(entry);
+ if (!unit)
return false;
- return name == m_debug_info.PeekDIEName(*die_ref);
+ return name == unit->PeekDIEName(unit->GetOffset() + *maybe_dieoffset);
};
// If the AT_name of any parent fails to match the expected name, we don't
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
index b54dd1162d20..81fb8f88b805 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
@@ -10,7 +10,6 @@
#define LLDB_SOURCE_PLUGINS_SYMBOLFILE_DWARF_DEBUGNAMESDWARFINDEX_H
#include "Plugins/SymbolFile/DWARF/DWARFIndex.h"
-#include "Plugins/SymbolFile/DWARF/LogChannelDWARF.h"
#include "Plugins/SymbolFile/DWARF/ManualDWARFIndex.h"
#include "Plugins/SymbolFile/DWARF/SymbolFileDWARF.h"
#include "lldb/Utility/ConstString.h"
@@ -84,6 +83,7 @@ private:
std::unique_ptr<DebugNames> m_debug_names_up;
ManualDWARFIndex m_fallback;
+ DWARFUnit *GetNonSkeletonUnit(const DebugNames::Entry &entry) const;
std::optional<DIERef> ToDIERef(const DebugNames::Entry &entry) const;
bool ProcessEntry(const DebugNames::Entry &entry,
llvm::function_ref<bool(DWARFDIE die)> callback);
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index 582d9eac3e1d..369ae46cf264 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -5272,8 +5272,7 @@ TypeSystemClang::GetNumChildren(lldb::opaque_compiler_type_t type,
bool omit_empty_base_classes,
const ExecutionContext *exe_ctx) {
if (!type)
- return llvm::make_error<llvm::StringError>("invalid clang type",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError("invalid clang type");
uint32_t num_children = 0;
clang::QualType qual_type(RemoveWrappingTypes(GetQualType(type)));
@@ -5331,9 +5330,8 @@ TypeSystemClang::GetNumChildren(lldb::opaque_compiler_type_t type,
num_children += std::distance(record_decl->field_begin(),
record_decl->field_end());
} else
- return llvm::make_error<llvm::StringError>(
- "incomplete type \"" + GetDisplayTypeName(type).GetString() + "\"",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(
+ "incomplete type \"" + GetDisplayTypeName(type).GetString() + "\"");
break;
case clang::Type::ObjCObject:
case clang::Type::ObjCInterface:
@@ -6130,7 +6128,7 @@ uint32_t TypeSystemClang::GetNumPointeeChildren(clang::QualType type) {
return 0;
}
-CompilerType TypeSystemClang::GetChildCompilerTypeAtIndex(
+llvm::Expected<CompilerType> TypeSystemClang::GetChildCompilerTypeAtIndex(
lldb::opaque_compiler_type_t type, ExecutionContext *exe_ctx, size_t idx,
bool transparent_pointers, bool omit_empty_base_classes,
bool ignore_array_bounds, std::string &child_name,
@@ -6156,11 +6154,8 @@ CompilerType TypeSystemClang::GetChildCompilerTypeAtIndex(
auto num_children_or_err =
GetNumChildren(type, omit_empty_base_classes, exe_ctx);
- if (!num_children_or_err) {
- LLDB_LOG_ERRORV(GetLog(LLDBLog::Types), num_children_or_err.takeError(),
- "{0}");
- return {};
- }
+ if (!num_children_or_err)
+ return num_children_or_err.takeError();
const bool idx_is_valid = idx < *num_children_or_err;
int32_t bit_offset;
@@ -6242,7 +6237,8 @@ CompilerType TypeSystemClang::GetChildCompilerTypeAtIndex(
std::optional<uint64_t> size =
base_class_clang_type.GetBitSize(get_exe_scope());
if (!size)
- return {};
+ return llvm::createStringError("no size info for base class");
+
uint64_t base_class_clang_type_bit_size = *size;
// Base classes bit sizes should be a multiple of 8 bits in size
@@ -6274,7 +6270,8 @@ CompilerType TypeSystemClang::GetChildCompilerTypeAtIndex(
std::optional<uint64_t> size =
field_clang_type.GetByteSize(get_exe_scope());
if (!size)
- return {};
+ return llvm::createStringError("no size info for field");
+
child_byte_size = *size;
const uint32_t child_bit_size = child_byte_size * 8;
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
index 042379d40bcb..d67b7a4c9fe7 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
@@ -887,7 +887,7 @@ public:
static uint32_t GetNumPointeeChildren(clang::QualType type);
- CompilerType GetChildCompilerTypeAtIndex(
+ llvm::Expected<CompilerType> GetChildCompilerTypeAtIndex(
lldb::opaque_compiler_type_t type, ExecutionContext *exe_ctx, size_t idx,
bool transparent_pointers, bool omit_empty_base_classes,
bool ignore_array_bounds, std::string &child_name,
diff --git a/lldb/source/Plugins/UnwindAssembly/InstEmulation/UnwindAssemblyInstEmulation.cpp b/lldb/source/Plugins/UnwindAssembly/InstEmulation/UnwindAssemblyInstEmulation.cpp
index c4a171ec7d01..49edd40544e3 100644
--- a/lldb/source/Plugins/UnwindAssembly/InstEmulation/UnwindAssemblyInstEmulation.cpp
+++ b/lldb/source/Plugins/UnwindAssembly/InstEmulation/UnwindAssemblyInstEmulation.cpp
@@ -424,8 +424,6 @@ size_t UnwindAssemblyInstEmulation::WriteMemory(
log->PutString(strm.GetString());
}
- const bool cant_replace = false;
-
switch (context.type) {
default:
case EmulateInstruction::eContextInvalid:
@@ -467,7 +465,7 @@ size_t UnwindAssemblyInstEmulation::WriteMemory(
m_pushed_regs[reg_num] = addr;
const int32_t offset = addr - m_initial_sp;
m_curr_row->SetRegisterLocationToAtCFAPlusOffset(reg_num, offset,
- cant_replace);
+ /*can_replace=*/true);
m_curr_row_modified = true;
}
}
diff --git a/lldb/source/Symbol/CompilerType.cpp b/lldb/source/Symbol/CompilerType.cpp
index 072dbccec44f..f8da9ef7b764 100644
--- a/lldb/source/Symbol/CompilerType.cpp
+++ b/lldb/source/Symbol/CompilerType.cpp
@@ -805,8 +805,7 @@ CompilerType::GetNumChildren(bool omit_empty_base_classes,
if (auto type_system_sp = GetTypeSystem())
return type_system_sp->GetNumChildren(m_type, omit_empty_base_classes,
exe_ctx);
- return llvm::make_error<llvm::StringError>("invalid type",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError("invalid type");
}
lldb::BasicType CompilerType::GetBasicTypeEnumeration() const {
@@ -902,7 +901,7 @@ uint32_t CompilerType::GetIndexOfFieldWithName(
return UINT32_MAX;
}
-CompilerType CompilerType::GetChildCompilerTypeAtIndex(
+llvm::Expected<CompilerType> CompilerType::GetChildCompilerTypeAtIndex(
ExecutionContext *exe_ctx, size_t idx, bool transparent_pointers,
bool omit_empty_base_classes, bool ignore_array_bounds,
std::string &child_name, uint32_t &child_byte_size,
diff --git a/lldb/source/Symbol/Symbol.cpp b/lldb/source/Symbol/Symbol.cpp
index 1895f299cc06..9b0042ffdb4b 100644
--- a/lldb/source/Symbol/Symbol.cpp
+++ b/lldb/source/Symbol/Symbol.cpp
@@ -101,18 +101,15 @@ const Symbol &Symbol::operator=(const Symbol &rhs) {
llvm::Expected<Symbol> Symbol::FromJSON(const JSONSymbol &symbol,
SectionList *section_list) {
if (!section_list)
- return llvm::make_error<llvm::StringError>("no section list provided",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError("no section list provided");
if (!symbol.value && !symbol.address)
- return llvm::make_error<llvm::StringError>(
- "symbol must contain either a value or an address",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(
+ "symbol must contain either a value or an address");
if (symbol.value && symbol.address)
- return llvm::make_error<llvm::StringError>(
- "symbol cannot contain both a value and an address",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(
+ "symbol cannot contain both a value and an address");
const uint64_t size = symbol.size.value_or(0);
const bool is_artificial = false;
@@ -133,9 +130,8 @@ llvm::Expected<Symbol> Symbol::FromJSON(const JSONSymbol &symbol,
AddressRange(section_sp, offset, size), size_is_valid,
contains_linker_annotations, flags);
}
- return llvm::make_error<llvm::StringError>(
- llvm::formatv("no section found for address: {0:x}", *symbol.address),
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(
+ llvm::formatv("no section found for address: {0:x}", *symbol.address));
}
// Absolute symbols encode the integer value in the m_offset of the
diff --git a/lldb/source/Symbol/SymbolFileOnDemand.cpp b/lldb/source/Symbol/SymbolFileOnDemand.cpp
index c6d9f0071c39..0cfe9fc1514b 100644
--- a/lldb/source/Symbol/SymbolFileOnDemand.cpp
+++ b/lldb/source/Symbol/SymbolFileOnDemand.cpp
@@ -457,9 +457,8 @@ SymbolFileOnDemand::GetTypeSystemForLanguage(LanguageType language) {
Log *log = GetLog();
LLDB_LOG(log, "[{0}] {1} is skipped for language type {2}",
GetSymbolFileName(), __FUNCTION__, language);
- return llvm::make_error<llvm::StringError>(
- "GetTypeSystemForLanguage is skipped by SymbolFileOnDemand",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(
+ "GetTypeSystemForLanguage is skipped by SymbolFileOnDemand");
}
return m_sym_file_impl->GetTypeSystemForLanguage(language);
}
diff --git a/lldb/source/Symbol/TypeSystem.cpp b/lldb/source/Symbol/TypeSystem.cpp
index 3665771b1889..4956f10a0b0a 100644
--- a/lldb/source/Symbol/TypeSystem.cpp
+++ b/lldb/source/Symbol/TypeSystem.cpp
@@ -267,9 +267,8 @@ llvm::Expected<lldb::TypeSystemSP> TypeSystemMap::GetTypeSystemForLanguage(
std::optional<CreateCallback> create_callback) {
std::lock_guard<std::mutex> guard(m_mutex);
if (m_clear_in_progress)
- return llvm::make_error<llvm::StringError>(
- "Unable to get TypeSystem because TypeSystemMap is being cleared",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(
+ "Unable to get TypeSystem because TypeSystemMap is being cleared");
collection::iterator pos = m_map.find(language);
if (pos != m_map.end()) {
@@ -277,11 +276,10 @@ llvm::Expected<lldb::TypeSystemSP> TypeSystemMap::GetTypeSystemForLanguage(
assert(!pos->second->weak_from_this().expired());
return pos->second;
}
- return llvm::make_error<llvm::StringError>(
+ return llvm::createStringError(
"TypeSystem for language " +
- llvm::StringRef(Language::GetNameForLanguageType(language)) +
- " doesn't exist",
- llvm::inconvertibleErrorCode());
+ llvm::StringRef(Language::GetNameForLanguageType(language)) +
+ " doesn't exist");
}
for (const auto &pair : m_map) {
@@ -291,31 +289,27 @@ llvm::Expected<lldb::TypeSystemSP> TypeSystemMap::GetTypeSystemForLanguage(
m_map[language] = pair.second;
if (pair.second)
return pair.second;
- return llvm::make_error<llvm::StringError>(
+ return llvm::createStringError(
"TypeSystem for language " +
- llvm::StringRef(Language::GetNameForLanguageType(language)) +
- " doesn't exist",
- llvm::inconvertibleErrorCode());
+ llvm::StringRef(Language::GetNameForLanguageType(language)) +
+ " doesn't exist");
}
}
if (!create_callback)
- return llvm::make_error<llvm::StringError>(
+ return llvm::createStringError(
"Unable to find type system for language " +
- llvm::StringRef(Language::GetNameForLanguageType(language)),
- llvm::inconvertibleErrorCode());
-
+ llvm::StringRef(Language::GetNameForLanguageType(language)));
// Cache even if we get a shared pointer that contains a null type system
// back.
TypeSystemSP type_system_sp = (*create_callback)();
m_map[language] = type_system_sp;
if (type_system_sp)
return type_system_sp;
- return llvm::make_error<llvm::StringError>(
+ return llvm::createStringError(
"TypeSystem for language " +
- llvm::StringRef(Language::GetNameForLanguageType(language)) +
- " doesn't exist",
- llvm::inconvertibleErrorCode());
+ llvm::StringRef(Language::GetNameForLanguageType(language)) +
+ " doesn't exist");
}
llvm::Expected<lldb::TypeSystemSP>
diff --git a/lldb/source/Target/RegisterContextUnwind.cpp b/lldb/source/Target/RegisterContextUnwind.cpp
index 13e101413a47..e2d712cb72ea 100644
--- a/lldb/source/Target/RegisterContextUnwind.cpp
+++ b/lldb/source/Target/RegisterContextUnwind.cpp
@@ -1555,12 +1555,12 @@ RegisterContextUnwind::SavedLocationForRegister(
}
if (unwindplan_regloc.IsSame()) {
- if (!IsFrameZero() &&
+ if (!m_all_registers_available &&
(regnum.GetAsKind(eRegisterKindGeneric) == LLDB_REGNUM_GENERIC_PC ||
regnum.GetAsKind(eRegisterKindGeneric) == LLDB_REGNUM_GENERIC_RA)) {
UnwindLogMsg("register %s (%d) is marked as 'IsSame' - it is a pc or "
- "return address reg on a non-zero frame -- treat as if we "
- "have no information",
+ "return address reg on a frame which does not have all "
+ "registers available -- treat as if we have no information",
regnum.GetName(), regnum.GetAsKind(eRegisterKindLLDB));
return UnwindLLDB::RegisterSearchResult::eRegisterNotFound;
} else {
diff --git a/lldb/source/Target/Target.cpp b/lldb/source/Target/Target.cpp
index 77731167995e..ec0da8a1378a 100644
--- a/lldb/source/Target/Target.cpp
+++ b/lldb/source/Target/Target.cpp
@@ -2414,8 +2414,7 @@ llvm::Expected<lldb::TypeSystemSP>
Target::GetScratchTypeSystemForLanguage(lldb::LanguageType language,
bool create_on_demand) {
if (!m_valid)
- return llvm::make_error<llvm::StringError>("Invalid Target",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError("Invalid Target");
if (language == eLanguageTypeMipsAssembler // GNU AS and LLVM use it for all
// assembly code
@@ -2428,9 +2427,8 @@ Target::GetScratchTypeSystemForLanguage(lldb::LanguageType language,
// target language.
} else {
if (languages_for_expressions.Empty())
- return llvm::make_error<llvm::StringError>(
- "No expression support for any languages",
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(
+ "No expression support for any languages");
language = (LanguageType)languages_for_expressions.bitvector.find_first();
}
}
@@ -2574,23 +2572,20 @@ Target::CreateUtilityFunction(std::string expression, std::string name,
return type_system_or_err.takeError();
auto ts = *type_system_or_err;
if (!ts)
- return llvm::make_error<llvm::StringError>(
+ return llvm::createStringError(
llvm::StringRef("Type system for language ") +
- Language::GetNameForLanguageType(language) +
- llvm::StringRef(" is no longer live"),
- llvm::inconvertibleErrorCode());
+ Language::GetNameForLanguageType(language) +
+ llvm::StringRef(" is no longer live"));
std::unique_ptr<UtilityFunction> utility_fn =
ts->CreateUtilityFunction(std::move(expression), std::move(name));
if (!utility_fn)
- return llvm::make_error<llvm::StringError>(
+ return llvm::createStringError(
llvm::StringRef("Could not create an expression for language") +
- Language::GetNameForLanguageType(language),
- llvm::inconvertibleErrorCode());
+ Language::GetNameForLanguageType(language));
DiagnosticManager diagnostics;
if (!utility_fn->Install(diagnostics, exe_ctx))
- return llvm::make_error<llvm::StringError>(diagnostics.GetString(),
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(diagnostics.GetString());
return std::move(utility_fn);
}
@@ -2621,8 +2616,7 @@ void Target::SetDefaultArchitecture(const ArchSpec &arch) {
llvm::Error Target::SetLabel(llvm::StringRef label) {
size_t n = LLDB_INVALID_INDEX32;
if (llvm::to_integer(label, n))
- return llvm::make_error<llvm::StringError>(
- "Cannot use integer as target label.", llvm::inconvertibleErrorCode());
+ return llvm::createStringError("Cannot use integer as target label.");
TargetList &targets = GetDebugger().GetTargetList();
for (size_t i = 0; i < targets.GetNumTargets(); i++) {
TargetSP target_sp = targets.GetTargetAtIndex(i);
@@ -2790,15 +2784,13 @@ llvm::Expected<lldb_private::Address> Target::GetEntryPointAddress() {
// We haven't found the entry point address. Return an appropriate error.
if (!has_primary_executable)
- return llvm::make_error<llvm::StringError>(
+ return llvm::createStringError(
"No primary executable found and could not find entry point address in "
- "any executable module",
- llvm::inconvertibleErrorCode());
+ "any executable module");
- return llvm::make_error<llvm::StringError>(
+ return llvm::createStringError(
"Could not find entry point address for primary executable module \"" +
- exe_module->GetFileSpec().GetFilename().GetStringRef() + "\"",
- llvm::inconvertibleErrorCode());
+ exe_module->GetFileSpec().GetFilename().GetStringRef() + "\"");
}
lldb::addr_t Target::GetCallableLoadAddress(lldb::addr_t load_addr,
diff --git a/lldb/source/Utility/Status.cpp b/lldb/source/Utility/Status.cpp
index 3bd00bb20da2..18312e87f03e 100644
--- a/lldb/source/Utility/Status.cpp
+++ b/lldb/source/Utility/Status.cpp
@@ -92,8 +92,7 @@ llvm::Error Status::ToError() const {
if (m_type == ErrorType::eErrorTypePOSIX)
return llvm::errorCodeToError(
std::error_code(m_code, std::generic_category()));
- return llvm::make_error<llvm::StringError>(AsCString(),
- llvm::inconvertibleErrorCode());
+ return llvm::createStringError(AsCString());
}
Status::~Status() = default;
diff --git a/lldb/test/API/CMakeLists.txt b/lldb/test/API/CMakeLists.txt
index 9196f54ce1ae..856beb894208 100644
--- a/lldb/test/API/CMakeLists.txt
+++ b/lldb/test/API/CMakeLists.txt
@@ -1,4 +1,5 @@
add_custom_target(lldb-api-test-deps)
+set_target_properties(lldb-api-test-deps PROPERTIES FOLDER "LLDB/Tests")
add_dependencies(lldb-api-test-deps lldb-test-depends)
add_lit_testsuites(LLDB-API
diff --git a/lldb/test/API/commands/session/save/TestSessionSave.py b/lldb/test/API/commands/session/save/TestSessionSave.py
index 172a76452304..98985c66010b 100644
--- a/lldb/test/API/commands/session/save/TestSessionSave.py
+++ b/lldb/test/API/commands/session/save/TestSessionSave.py
@@ -25,6 +25,12 @@ class SessionSaveTestCase(TestBase):
raw = ""
interpreter = self.dbg.GetCommandInterpreter()
+ # Make sure "save-transcript" is on, so that all the following setings
+ # and commands are saved into the trasncript. Note that this cannot be
+ # a part of the `settings`, because this command itself won't be saved
+ # into the transcript.
+ self.runCmd("settings set interpreter.save-transcript true")
+
settings = [
"settings set interpreter.echo-commands true",
"settings set interpreter.echo-comment-commands true",
@@ -95,6 +101,12 @@ class SessionSaveTestCase(TestBase):
raw = ""
interpreter = self.dbg.GetCommandInterpreter()
+ # Make sure "save-transcript" is on, so that all the following setings
+ # and commands are saved into the trasncript. Note that this cannot be
+ # a part of the `settings`, because this command itself won't be saved
+ # into the transcript.
+ self.runCmd("settings set interpreter.save-transcript true")
+
td = tempfile.TemporaryDirectory()
settings = [
diff --git a/lldb/test/API/functionalities/breakpoint/breakpoint_command/TestBreakpointCommand.py b/lldb/test/API/functionalities/breakpoint/breakpoint_command/TestBreakpointCommand.py
index c219a4ee5bd9..605561c75737 100644
--- a/lldb/test/API/functionalities/breakpoint/breakpoint_command/TestBreakpointCommand.py
+++ b/lldb/test/API/functionalities/breakpoint/breakpoint_command/TestBreakpointCommand.py
@@ -6,7 +6,7 @@ Test lldb breakpoint command add/list/delete.
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
-from lldbsuite.test import lldbutil
+from lldbsuite.test import lldbutil, lldbplatformutil
import json
import os
import side_effect
@@ -581,7 +581,6 @@ class BreakpointCommandTestCase(TestBase):
self.assertNotEqual(target_stats, None)
self.assertEqual(target_stats["sourceMapDeduceCount"], expected_count)
- @skipIf(oslist=["windows"])
@no_debug_info_test
def test_breakpoints_auto_source_map_relative(self):
"""
@@ -612,8 +611,13 @@ class BreakpointCommandTestCase(TestBase):
self.verify_source_map_deduce_statistics(target, 0)
# Verify auto deduced source map when file path in debug info
- # is a suffix of request breakpoint file path
- path = "/x/y/a/b/c/main.cpp"
+ # is a suffix of request breakpoint file path.
+ # Note the path must be absolute.
+ path = (
+ "/x/y/a/b/c/main.cpp"
+ if lldbplatformutil.getHostPlatform() != "windows"
+ else r"C:\x\y\a\b\c\main.cpp"
+ )
bp = target.BreakpointCreateByLocation(path, 2)
self.assertGreater(
bp.GetNumLocations(),
@@ -625,7 +629,11 @@ class BreakpointCommandTestCase(TestBase):
source_map_json = self.get_source_map_json()
self.assertEqual(len(source_map_json), 1, "source map should not be empty")
- self.verify_source_map_entry_pair(source_map_json[0], ".", "/x/y")
+ self.verify_source_map_entry_pair(
+ source_map_json[0],
+ ".",
+ "/x/y" if lldbplatformutil.getHostPlatform() != "windows" else r"C:\x\y",
+ )
self.verify_source_map_deduce_statistics(target, 1)
# Reset source map.
diff --git a/lldb/test/API/functionalities/bt-interrupt/main.c b/lldb/test/API/functionalities/bt-interrupt/main.c
index bdaf423d334e..14a9eb6ffc85 100644
--- a/lldb/test/API/functionalities/bt-interrupt/main.c
+++ b/lldb/test/API/functionalities/bt-interrupt/main.c
@@ -12,6 +12,7 @@ struct Foo {
int
forgot_termination(int input, struct Foo my_foo) {
+ char frame_increasing_buffer[0x1000]; // To blow the stack sooner.
return forgot_termination(++input, my_foo);
}
diff --git a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx/chrono/TestDataFormatterLibcxxChrono.py b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx/chrono/TestDataFormatterLibcxxChrono.py
index fb35481d5551..0737a5bc7e6e 100644
--- a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx/chrono/TestDataFormatterLibcxxChrono.py
+++ b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx/chrono/TestDataFormatterLibcxxChrono.py
@@ -14,6 +14,7 @@ class LibcxxChronoDataFormatterTestCase(TestBase):
@skipIf(compiler="clang", compiler_version=["<", "17.0"])
def test_with_run_command(self):
"""Test that that file and class static variables display correctly."""
+ isNotWindowsHost = lldbplatformutil.getHostPlatform() != "windows"
self.build()
(self.target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(
self, "break here", lldb.SBFileSpec("main.cpp", False)
@@ -57,7 +58,11 @@ class LibcxxChronoDataFormatterTestCase(TestBase):
self.expect(
"frame variable ss_neg_date_time",
substrs=[
- "ss_neg_date_time = date/time=-32767-01-01T00:00:00Z timestamp=-1096193779200 s"
+ (
+ "ss_neg_date_time = date/time=-32767-01-01T00:00:00Z timestamp=-1096193779200 s"
+ if isNotWindowsHost
+ else "ss_neg_date_time = timestamp=-1096193779200 s"
+ )
],
)
self.expect(
@@ -68,7 +73,11 @@ class LibcxxChronoDataFormatterTestCase(TestBase):
self.expect(
"frame variable ss_pos_date_time",
substrs=[
- "ss_pos_date_time = date/time=32767-12-31T23:59:59Z timestamp=971890963199 s"
+ (
+ "ss_pos_date_time = date/time=32767-12-31T23:59:59Z timestamp=971890963199 s"
+ if isNotWindowsHost
+ else "ss_pos_date_time = timestamp=971890963199 s"
+ )
],
)
self.expect(
@@ -103,7 +112,13 @@ class LibcxxChronoDataFormatterTestCase(TestBase):
)
self.expect(
"frame variable sd_neg_date",
- substrs=["sd_neg_date = date=-32767-01-01Z timestamp=-12687428 days"],
+ substrs=[
+ (
+ "sd_neg_date = date=-32767-01-01Z timestamp=-12687428 days"
+ if isNotWindowsHost
+ else "sd_neg_date = timestamp=-12687428 days"
+ )
+ ],
)
self.expect(
"frame variable sd_neg_days",
@@ -112,7 +127,13 @@ class LibcxxChronoDataFormatterTestCase(TestBase):
self.expect(
"frame variable sd_pos_date",
- substrs=["sd_pos_date = date=32767-12-31Z timestamp=11248737 days"],
+ substrs=[
+ (
+ "sd_pos_date = date=32767-12-31Z timestamp=11248737 days"
+ if isNotWindowsHost
+ else "sd_pos_date = timestamp=11248737 days"
+ )
+ ],
)
self.expect(
"frame variable sd_pos_days",
@@ -157,7 +178,11 @@ class LibcxxChronoDataFormatterTestCase(TestBase):
self.expect(
"frame variable ls_neg_date_time",
substrs=[
- "ls_neg_date_time = date/time=-32767-01-01T00:00:00 timestamp=-1096193779200 s"
+ (
+ "ls_neg_date_time = date/time=-32767-01-01T00:00:00 timestamp=-1096193779200 s"
+ if isNotWindowsHost
+ else "ls_neg_date_time = timestamp=-1096193779200 s"
+ )
],
)
self.expect(
@@ -168,7 +193,11 @@ class LibcxxChronoDataFormatterTestCase(TestBase):
self.expect(
"frame variable ls_pos_date_time",
substrs=[
- "ls_pos_date_time = date/time=32767-12-31T23:59:59 timestamp=971890963199 s"
+ (
+ "ls_pos_date_time = date/time=32767-12-31T23:59:59 timestamp=971890963199 s"
+ if isNotWindowsHost
+ else "ls_pos_date_time = timestamp=971890963199 s"
+ )
],
)
self.expect(
@@ -207,7 +236,13 @@ class LibcxxChronoDataFormatterTestCase(TestBase):
)
self.expect(
"frame variable ld_neg_date",
- substrs=["ld_neg_date = date=-32767-01-01 timestamp=-12687428 days"],
+ substrs=[
+ (
+ "ld_neg_date = date=-32767-01-01 timestamp=-12687428 days"
+ if isNotWindowsHost
+ else "ld_neg_date = timestamp=-12687428 days"
+ )
+ ],
)
self.expect(
"frame variable ld_neg_days",
@@ -216,7 +251,13 @@ class LibcxxChronoDataFormatterTestCase(TestBase):
self.expect(
"frame variable ld_pos_date",
- substrs=["ld_pos_date = date=32767-12-31 timestamp=11248737 days"],
+ substrs=[
+ (
+ "ld_pos_date = date=32767-12-31 timestamp=11248737 days"
+ if isNotWindowsHost
+ else "ld_pos_date = timestamp=11248737 days"
+ )
+ ],
)
self.expect(
"frame variable ld_pos_days",
diff --git a/lldb/test/API/functionalities/thread/exit_during_expression/main.c b/lldb/test/API/functionalities/thread/exit_during_expression/main.c
index eb6d17520986..f633632e96cc 100644
--- a/lldb/test/API/functionalities/thread/exit_during_expression/main.c
+++ b/lldb/test/API/functionalities/thread/exit_during_expression/main.c
@@ -3,7 +3,7 @@
#include <stdio.h>
#include <unistd.h>
-static unsigned int g_timeout = 100000;
+static unsigned int g_timeout = 1000000;
extern int usleep(unsigned int);
diff --git a/lldb/test/API/lang/c/enum_types/TestEnumTypes.py b/lldb/test/API/lang/c/enum_types/TestEnumTypes.py
index 33a846c50d7d..0015c8f47857 100644
--- a/lldb/test/API/lang/c/enum_types/TestEnumTypes.py
+++ b/lldb/test/API/lang/c/enum_types/TestEnumTypes.py
@@ -26,7 +26,9 @@ class EnumTypesTestCase(TestBase):
self.expect("fr var b", DATA_TYPES_DISPLAYED_CORRECTLY, patterns=[" = B$"])
self.expect("fr var c", DATA_TYPES_DISPLAYED_CORRECTLY, patterns=[" = C$"])
self.expect("fr var ab", DATA_TYPES_DISPLAYED_CORRECTLY, patterns=[" = AB$"])
- self.expect("fr var ac", DATA_TYPES_DISPLAYED_CORRECTLY, patterns=[" = A | C$"])
+ self.expect(
+ "fr var ac", DATA_TYPES_DISPLAYED_CORRECTLY, patterns=[" = A \| C$"]
+ )
self.expect("fr var all", DATA_TYPES_DISPLAYED_CORRECTLY, patterns=[" = ALL$"])
# Test that an enum that doesn't match the heuristic we use in
# TypeSystemClang::DumpEnumValue, gets printed as a raw integer.
@@ -37,7 +39,7 @@ class EnumTypesTestCase(TestBase):
self.expect(
"expression (enum bitfield)nonsense",
DATA_TYPES_DISPLAYED_CORRECTLY,
- patterns=[" = B | C | 0x10$"],
+ patterns=[" = B \| C \| 0x10$"],
)
# Break inside the main.
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/Makefile b/lldb/test/API/lang/cpp/limit-debug-info/Makefile
index 30230b3469ac..cbb9690bcecb 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/Makefile
+++ b/lldb/test/API/lang/cpp/limit-debug-info/Makefile
@@ -1,5 +1,3 @@
CXX_SOURCES = main.cpp derived.cpp base.cpp
-CFLAGS_EXTRAS = $(LIMIT_DEBUG_INFO_FLAGS)
-
include Makefile.rules
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/TestWithLimitDebugInfo.py b/lldb/test/API/lang/cpp/limit-debug-info/TestWithLimitDebugInfo.py
index a4422cee91f0..ebd9e662c3d4 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/TestWithLimitDebugInfo.py
+++ b/lldb/test/API/lang/cpp/limit-debug-info/TestWithLimitDebugInfo.py
@@ -5,13 +5,8 @@ from lldbsuite.test import lldbutil
class TestWithLimitDebugInfo(TestBase):
- @add_test_categories(["dwarf", "dwo"])
- def test_limit_debug_info(self):
- self.build()
-
- src_file = os.path.join(self.getSourceDir(), "main.cpp")
- src_file_spec = lldb.SBFileSpec(src_file)
- self.assertTrue(src_file_spec.IsValid(), "breakpoint file")
+ def _run_test(self, build_dict):
+ self.build(dictionary=build_dict)
# Get the path of the executable
exe_path = self.getBuildArtifact("a.out")
@@ -21,9 +16,11 @@ class TestWithLimitDebugInfo(TestBase):
self.assertTrue(target.IsValid(), VALID_TARGET)
# Break on main function
- breakpoint = target.BreakpointCreateBySourceRegex("break here", src_file_spec)
- self.assertTrue(
- breakpoint.IsValid() and breakpoint.GetNumLocations() >= 1, VALID_BREAKPOINT
+ lldbutil.run_break_set_by_file_and_line(
+ self, "derived.h", line_number("derived.h", "// break1")
+ )
+ lldbutil.run_break_set_by_file_and_line(
+ self, "derived.h", line_number("derived.h", "// break2")
)
# Launch the process
@@ -32,14 +29,23 @@ class TestWithLimitDebugInfo(TestBase):
# Get the thread of the process
self.assertEqual(process.GetState(), lldb.eStateStopped, PROCESS_STOPPED)
- thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint)
- thread.StepInto()
- # Get frame for current thread
- frame = thread.GetSelectedFrame()
+ self.expect_expr("1", result_type="int", result_value="1")
+ self.expect_expr("this", result_type="Foo *")
+ self.expect_expr("this->x", result_type="int", result_value="12345")
+
+ self.runCmd("continue")
self.expect_expr("1", result_type="int", result_value="1")
+ self.expect_expr("this", result_type="ns::Foo2 *")
+ self.expect_expr("this->x", result_type="int", result_value="23456")
- v2 = frame.EvaluateExpression("this")
- self.assertTrue(v2.IsValid(), "'expr this' results in a valid SBValue object")
- self.assertSuccess(v2.GetError(), "'expr this' succeeds without an error.")
+ @add_test_categories(["dwarf", "dwo"])
+ def test_default(self):
+ self._run_test(dict(CFLAGS_EXTRAS="$(LIMIT_DEBUG_INFO_FLAGS)"))
+
+ @add_test_categories(["dwarf", "dwo"])
+ def test_debug_names(self):
+ self._run_test(
+ dict(CFLAGS_EXTRAS="$(LIMIT_DEBUG_INFO_FLAGS) -gdwarf-5 -gpubnames")
+ )
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/base.cpp b/lldb/test/API/lang/cpp/limit-debug-info/base.cpp
index 296864488820..062eaa3c6f26 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/base.cpp
+++ b/lldb/test/API/lang/cpp/limit-debug-info/base.cpp
@@ -1,8 +1,7 @@
#include "base.h"
-FooNS::FooNS() : x(12345) {}
-
-void FooNS::bar() {
- x = 54321;
-}
+FooBase::FooBase() : x(12345) {}
+ns::Foo2Base::Foo2Base() : x(23456) {}
+void FooBase::bar() {}
+void ns::Foo2Base::bar() {}
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/base.h b/lldb/test/API/lang/cpp/limit-debug-info/base.h
index f4da76701c78..8e4dd17e7007 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/base.h
+++ b/lldb/test/API/lang/cpp/limit-debug-info/base.h
@@ -1,12 +1,22 @@
-class FooNS
-{
+class FooBase {
public:
- virtual void bar();
- virtual char baz() = 0;
+ virtual void bar();
protected:
- FooNS();
+ FooBase();
- int x;
+ int x;
};
+namespace ns {
+class Foo2Base {
+public:
+ virtual void bar();
+
+protected:
+ Foo2Base();
+
+ int x;
+};
+
+} // namespace ns
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/derived.cpp b/lldb/test/API/lang/cpp/limit-debug-info/derived.cpp
index 911fe3d9bc17..cbda8e706b52 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/derived.cpp
+++ b/lldb/test/API/lang/cpp/limit-debug-info/derived.cpp
@@ -1,11 +1,10 @@
#include "derived.h"
-Foo foo1;
-Foo foo2;
-
Foo::Foo() { a = 12345; }
+ns::Foo2::Foo2() { a = 23456; }
-char Foo::baz() {
- return (char)(x&0xff);
-}
+Foo foo1;
+Foo foo2;
+ns::Foo2 foo2_1;
+ns::Foo2 foo2_2;
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/derived.h b/lldb/test/API/lang/cpp/limit-debug-info/derived.h
index 8f95c52a595f..a4aab37d9dee 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/derived.h
+++ b/lldb/test/API/lang/cpp/limit-debug-info/derived.h
@@ -1,19 +1,36 @@
#include "base.h"
-class Foo : public FooNS
-{
+class Foo : public FooBase {
public:
- Foo();
+ Foo();
- // Deliberately defined by hand.
- Foo &operator=(const Foo &rhs) {
- a = rhs.a;
- return *this;
- }
+ // Deliberately defined by hand.
+ Foo &operator=(const Foo &rhs) {
+ x = rhs.x; // break1
+ a = rhs.a;
+ return *this;
+ }
+ int a;
+};
+
+namespace ns {
+class Foo2 : public Foo2Base {
+public:
+ Foo2();
- char baz() override;
- int a;
+ // Deliberately defined by hand.
+ Foo2 &operator=(const Foo2 &rhs) {
+ x = rhs.x; // break2
+ a = rhs.a;
+ return *this;
+ }
+
+ int a;
};
+} // namespace ns
extern Foo foo1;
extern Foo foo2;
+
+extern ns::Foo2 foo2_1;
+extern ns::Foo2 foo2_2;
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/main.cpp b/lldb/test/API/lang/cpp/limit-debug-info/main.cpp
index 35cb0373ae39..405fc2cdd4c8 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/main.cpp
+++ b/lldb/test/API/lang/cpp/limit-debug-info/main.cpp
@@ -1,8 +1,6 @@
#include "derived.h"
int main() {
- foo1 = foo2; // break here
-
- foo1.bar();
- return foo1.baz();
+ foo1 = foo2;
+ foo2_1 = foo2_2;
}
diff --git a/lldb/test/API/python_api/debugger/TestDebuggerAPI.py b/lldb/test/API/python_api/debugger/TestDebuggerAPI.py
index 522de2466012..a007a87ca93e 100644
--- a/lldb/test/API/python_api/debugger/TestDebuggerAPI.py
+++ b/lldb/test/API/python_api/debugger/TestDebuggerAPI.py
@@ -91,6 +91,11 @@ class DebuggerAPITestCase(TestBase):
# Test the local property again, is it set to new_cache_line_size?
self.assertEqual(get_cache_line_size(), new_cache_line_size)
+ @expectedFailureAll(
+ hostoslist=["windows"],
+ remote=True,
+ bugnumber="github.com/llvm/llvm-project/issues/92419",
+ )
def test_CreateTarget_platform(self):
exe = self.getBuildArtifact("a.out")
self.yaml2obj("elf.yaml", exe)
@@ -161,3 +166,124 @@ class DebuggerAPITestCase(TestBase):
original_dbg_id = self.dbg.GetID()
self.dbg.Destroy(self.dbg)
self.assertEqual(destroy_dbg_id, original_dbg_id)
+
+ def test_AddDestroyCallback(self):
+ original_dbg_id = self.dbg.GetID()
+ called = []
+
+ def foo(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal called
+ called += [('foo', dbg_id)]
+
+ def bar(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal called
+ called += [('bar', dbg_id)]
+
+ token_foo = self.dbg.AddDestroyCallback(foo)
+ token_bar = self.dbg.AddDestroyCallback(bar)
+ self.dbg.Destroy(self.dbg)
+
+ # Should call both `foo()` and `bar()`.
+ self.assertEqual(called, [
+ ('foo', original_dbg_id),
+ ('bar', original_dbg_id),
+ ])
+
+ def test_RemoveDestroyCallback(self):
+ original_dbg_id = self.dbg.GetID()
+ called = []
+
+ def foo(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal called
+ called += [('foo', dbg_id)]
+
+ def bar(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal called
+ called += [('bar', dbg_id)]
+
+ token_foo = self.dbg.AddDestroyCallback(foo)
+ token_bar = self.dbg.AddDestroyCallback(bar)
+ ret = self.dbg.RemoveDestroyCallback(token_foo)
+ self.dbg.Destroy(self.dbg)
+
+ # `Remove` should be successful
+ self.assertTrue(ret)
+ # Should only call `bar()`
+ self.assertEqual(called, [('bar', original_dbg_id)])
+
+ def test_RemoveDestroyCallback_invalid_token(self):
+ original_dbg_id = self.dbg.GetID()
+ magic_token_that_should_not_exist = 32413
+ called = []
+
+ def foo(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal called
+ called += [('foo', dbg_id)]
+
+ token_foo = self.dbg.AddDestroyCallback(foo)
+ ret = self.dbg.RemoveDestroyCallback(magic_token_that_should_not_exist)
+ self.dbg.Destroy(self.dbg)
+
+ # `Remove` should be unsuccessful
+ self.assertFalse(ret)
+ # Should call `foo()`
+ self.assertEqual(called, [('foo', original_dbg_id)])
+
+ def test_HandleDestroyCallback(self):
+ """
+ Validates:
+ 1. AddDestroyCallback and RemoveDestroyCallback work during debugger destroy.
+ 2. HandleDestroyCallback invokes all callbacks in FIFO order.
+ """
+ original_dbg_id = self.dbg.GetID()
+ events = []
+ bar_token = None
+
+ def foo(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal events
+ events.append(('foo called', dbg_id))
+
+ def bar(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal events
+ events.append(('bar called', dbg_id))
+
+ def add_foo(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal events
+ events.append(('add_foo called', dbg_id))
+ events.append(('foo token', self.dbg.AddDestroyCallback(foo)))
+
+ def remove_bar(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal events
+ events.append(('remove_bar called', dbg_id))
+ events.append(('remove bar ret', self.dbg.RemoveDestroyCallback(bar_token)))
+
+ # Setup
+ events.append(('add_foo token', self.dbg.AddDestroyCallback(add_foo)))
+ bar_token = self.dbg.AddDestroyCallback(bar)
+ events.append(('bar token', bar_token))
+ events.append(('remove_bar token', self.dbg.AddDestroyCallback(remove_bar)))
+ # Destroy
+ self.dbg.Destroy(self.dbg)
+
+ self.assertEqual(events, [
+ # Setup
+ ('add_foo token', 0), # add_foo should be added
+ ('bar token', 1), # bar should be added
+ ('remove_bar token', 2), # remove_bar should be added
+ # Destroy
+ ('add_foo called', original_dbg_id), # add_foo should be called
+ ('foo token', 3), # foo should be added
+ ('bar called', original_dbg_id), # bar should be called
+ ('remove_bar called', original_dbg_id), # remove_bar should be called
+ ('remove bar ret', False), # remove_bar should fail, because it's already invoked and removed
+ ('foo called', original_dbg_id), # foo should be called
+ ])
diff --git a/lldb/test/API/python_api/interpreter/TestCommandInterpreterAPI.py b/lldb/test/API/python_api/interpreter/TestCommandInterpreterAPI.py
index 8f9fbfc255bb..95643eef0d34 100644
--- a/lldb/test/API/python_api/interpreter/TestCommandInterpreterAPI.py
+++ b/lldb/test/API/python_api/interpreter/TestCommandInterpreterAPI.py
@@ -1,5 +1,6 @@
"""Test the SBCommandInterpreter APIs."""
+import json
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
@@ -15,8 +16,7 @@ class CommandInterpreterAPICase(TestBase):
# Find the line number to break on inside main.cpp.
self.line = line_number("main.c", "Hello world.")
- def test_with_process_launch_api(self):
- """Test the SBCommandInterpreter APIs."""
+ def buildAndCreateTarget(self):
self.build()
exe = self.getBuildArtifact("a.out")
@@ -27,6 +27,11 @@ class CommandInterpreterAPICase(TestBase):
# Retrieve the associated command interpreter from our debugger.
ci = self.dbg.GetCommandInterpreter()
self.assertTrue(ci, VALID_COMMAND_INTERPRETER)
+ return ci
+
+ def test_with_process_launch_api(self):
+ """Test the SBCommandInterpreter APIs."""
+ ci = self.buildAndCreateTarget()
# Exercise some APIs....
@@ -85,3 +90,166 @@ class CommandInterpreterAPICase(TestBase):
self.assertEqual(res.GetOutput(), "")
self.assertIsNotNone(res.GetError())
self.assertEqual(res.GetError(), "")
+
+ def getTranscriptAsPythonObject(self, ci):
+ """Retrieve the transcript and convert it into a Python object"""
+ structured_data = ci.GetTranscript()
+ self.assertTrue(structured_data.IsValid())
+
+ stream = lldb.SBStream()
+ self.assertTrue(stream)
+
+ error = structured_data.GetAsJSON(stream)
+ self.assertSuccess(error)
+
+ return json.loads(stream.GetData())
+
+ def test_structured_transcript(self):
+ """Test structured transcript generation and retrieval."""
+ ci = self.buildAndCreateTarget()
+
+ # Make sure the "save-transcript" setting is on
+ self.runCmd("settings set interpreter.save-transcript true")
+
+ # Send a few commands through the command interpreter.
+ #
+ # Using `ci.HandleCommand` because some commands will fail so that we
+ # can test the "error" field in the saved transcript.
+ res = lldb.SBCommandReturnObject()
+ ci.HandleCommand("version", res)
+ ci.HandleCommand("an-unknown-command", res)
+ ci.HandleCommand("breakpoint set -f main.c -l %d" % self.line, res)
+ ci.HandleCommand("r", res)
+ ci.HandleCommand("p a", res)
+ ci.HandleCommand("statistics dump", res)
+ total_number_of_commands = 6
+
+ # Get transcript as python object
+ transcript = self.getTranscriptAsPythonObject(ci)
+
+ # All commands should have expected fields.
+ for command in transcript:
+ self.assertIn("command", command)
+ self.assertIn("output", command)
+ self.assertIn("error", command)
+ self.assertIn("seconds", command)
+
+ # The following validates individual commands in the transcript.
+ #
+ # Notes:
+ # 1. Some of the asserts rely on the exact output format of the
+ # commands. Hopefully we are not changing them any time soon.
+ # 2. We are removing the "seconds" field from each command, so that
+ # some of the validations below can be easier / more readable.
+ for command in transcript:
+ del(command["seconds"])
+
+ # (lldb) version
+ self.assertEqual(transcript[0]["command"], "version")
+ self.assertIn("lldb version", transcript[0]["output"])
+ self.assertEqual(transcript[0]["error"], "")
+
+ # (lldb) an-unknown-command
+ self.assertEqual(transcript[1],
+ {
+ "command": "an-unknown-command",
+ "output": "",
+ "error": "error: 'an-unknown-command' is not a valid command.\n",
+ })
+
+ # (lldb) breakpoint set -f main.c -l <line>
+ self.assertEqual(transcript[2]["command"], "breakpoint set -f main.c -l %d" % self.line)
+ # Breakpoint 1: where = a.out`main + 29 at main.c:5:3, address = 0x0000000100000f7d
+ self.assertIn("Breakpoint 1: where = a.out`main ", transcript[2]["output"])
+ self.assertEqual(transcript[2]["error"], "")
+
+ # (lldb) r
+ self.assertEqual(transcript[3]["command"], "r")
+ # Process 25494 launched: '<path>/TestCommandInterpreterAPI.test_structured_transcript/a.out' (x86_64)
+ self.assertIn("Process", transcript[3]["output"])
+ self.assertIn("launched", transcript[3]["output"])
+ self.assertEqual(transcript[3]["error"], "")
+
+ # (lldb) p a
+ self.assertEqual(transcript[4],
+ {
+ "command": "p a",
+ "output": "(int) 123\n",
+ "error": "",
+ })
+
+ # (lldb) statistics dump
+ statistics_dump = json.loads(transcript[5]["output"])
+ # Dump result should be valid JSON
+ self.assertTrue(statistics_dump is not json.JSONDecodeError)
+ # Dump result should contain expected fields
+ self.assertIn("commands", statistics_dump)
+ self.assertIn("memory", statistics_dump)
+ self.assertIn("modules", statistics_dump)
+ self.assertIn("targets", statistics_dump)
+
+ def test_save_transcript_setting_default(self):
+ ci = self.buildAndCreateTarget()
+ res = lldb.SBCommandReturnObject()
+
+ # The setting's default value should be "false"
+ self.runCmd("settings show interpreter.save-transcript", "interpreter.save-transcript (boolean) = false\n")
+ # self.assertEqual(res.GetOutput(), )
+
+ def test_save_transcript_setting_off(self):
+ ci = self.buildAndCreateTarget()
+
+ # Make sure the setting is off
+ self.runCmd("settings set interpreter.save-transcript false")
+
+ # The transcript should be empty after running a command
+ self.runCmd("version")
+ transcript = self.getTranscriptAsPythonObject(ci)
+ self.assertEqual(transcript, [])
+
+ def test_save_transcript_setting_on(self):
+ ci = self.buildAndCreateTarget()
+ res = lldb.SBCommandReturnObject()
+
+ # Make sure the setting is on
+ self.runCmd("settings set interpreter.save-transcript true")
+
+ # The transcript should contain one item after running a command
+ self.runCmd("version")
+ transcript = self.getTranscriptAsPythonObject(ci)
+ self.assertEqual(len(transcript), 1)
+ self.assertEqual(transcript[0]["command"], "version")
+
+ def test_save_transcript_returns_copy(self):
+ """
+ Test that the returned structured data is *at least* a shallow copy.
+
+ We believe that a deep copy *is* performed in `SBCommandInterpreter::GetTranscript`.
+ However, the deep copy cannot be tested and doesn't need to be tested,
+ because there is no logic in the command interpreter to modify a
+ transcript item (representing a command) after it has been returned.
+ """
+ ci = self.buildAndCreateTarget()
+
+ # Make sure the setting is on
+ self.runCmd("settings set interpreter.save-transcript true")
+
+ # Run commands and get the transcript as structured data
+ self.runCmd("version")
+ structured_data_1 = ci.GetTranscript()
+ self.assertTrue(structured_data_1.IsValid())
+ self.assertEqual(structured_data_1.GetSize(), 1)
+ self.assertEqual(structured_data_1.GetItemAtIndex(0).GetValueForKey("command").GetStringValue(100), "version")
+
+ # Run some more commands and get the transcript as structured data again
+ self.runCmd("help")
+ structured_data_2 = ci.GetTranscript()
+ self.assertTrue(structured_data_2.IsValid())
+ self.assertEqual(structured_data_2.GetSize(), 2)
+ self.assertEqual(structured_data_2.GetItemAtIndex(0).GetValueForKey("command").GetStringValue(100), "version")
+ self.assertEqual(structured_data_2.GetItemAtIndex(1).GetValueForKey("command").GetStringValue(100), "help")
+
+ # Now, the first structured data should remain unchanged
+ self.assertTrue(structured_data_1.IsValid())
+ self.assertEqual(structured_data_1.GetSize(), 1)
+ self.assertEqual(structured_data_1.GetItemAtIndex(0).GetValueForKey("command").GetStringValue(100), "version")
diff --git a/lldb/test/API/python_api/interpreter/main.c b/lldb/test/API/python_api/interpreter/main.c
index 277aa54a4eea..366ffde5fdef 100644
--- a/lldb/test/API/python_api/interpreter/main.c
+++ b/lldb/test/API/python_api/interpreter/main.c
@@ -1,6 +1,7 @@
#include <stdio.h>
int main(int argc, char const *argv[]) {
- printf("Hello world.\n");
- return 0;
+ int a = 123;
+ printf("Hello world.\n");
+ return 0;
}
diff --git a/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py b/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py
index cab0067382ca..b3ba69749f67 100644
--- a/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py
+++ b/lldb/test/API/tools/lldb-dap/attach/TestDAP_attach.py
@@ -41,7 +41,6 @@ class TestDAP_attach(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
@skipIfNetBSD # Hangs on NetBSD as well
- @skipIfRemote
def test_by_pid(self):
"""
Tests attaching to a process by process ID.
@@ -59,7 +58,6 @@ class TestDAP_attach(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
@skipIfNetBSD # Hangs on NetBSD as well
- @skipIfRemote
def test_by_name(self):
"""
Tests attaching to a process by process name.
diff --git a/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_logpoints.py b/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_logpoints.py
index cbf190f2b2bf..78ceb7971112 100644
--- a/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_logpoints.py
+++ b/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_logpoints.py
@@ -20,7 +20,6 @@ class TestDAP_logpoints(lldbdap_testcase.DAPTestCaseBase):
self.main_path = os.path.realpath(self.getBuildArtifact(self.main_basename))
@skipIfWindows
- @skipIfRemote
def test_logmessage_basic(self):
"""Tests breakpoint logmessage basic functionality."""
before_loop_line = line_number("main.cpp", "// before loop")
@@ -83,7 +82,6 @@ class TestDAP_logpoints(lldbdap_testcase.DAPTestCaseBase):
self.assertRegex(logMessage_line, reg_str)
@skipIfWindows
- @skipIfRemote
def test_logmessage_advanced(self):
"""Tests breakpoint logmessage functionality for complex expression."""
before_loop_line = line_number("main.cpp", "// before loop")
@@ -144,7 +142,6 @@ class TestDAP_logpoints(lldbdap_testcase.DAPTestCaseBase):
self.assertEqual(logMessage_line, logMessage_prefix + str(result))
@skipIfWindows
- @skipIfRemote
def test_logmessage_format(self):
"""
Tests breakpoint logmessage functionality with format.
@@ -209,7 +206,6 @@ class TestDAP_logpoints(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_logmessage_format_failure(self):
"""
Tests breakpoint logmessage format with parsing failure.
diff --git a/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setBreakpoints.py b/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setBreakpoints.py
index 6f57c05e43c8..123fea79c5cd 100644
--- a/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setBreakpoints.py
+++ b/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setBreakpoints.py
@@ -20,7 +20,6 @@ class TestDAP_setBreakpoints(lldbdap_testcase.DAPTestCaseBase):
self.main_path = os.path.realpath(self.getBuildArtifact(self.main_basename))
@skipIfWindows
- @skipIfRemote
def test_source_map(self):
"""
This test simulates building two files in a folder, and then moving
@@ -99,7 +98,6 @@ class TestDAP_setBreakpoints(lldbdap_testcase.DAPTestCaseBase):
self.assertEqual(frames[1]["source"]["path"], new_main_path)
@skipIfWindows
- @skipIfRemote
def test_set_and_clear(self):
"""Tests setting and clearing source file and line breakpoints.
This packet is a bit tricky on the debug adaptor side since there
@@ -261,7 +259,6 @@ class TestDAP_setBreakpoints(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_clear_breakpoints_unset_breakpoints(self):
"""Test clearing breakpoints like test_set_and_clear, but clear
breakpoints by omitting the breakpoints array instead of sending an
@@ -305,7 +302,6 @@ class TestDAP_setBreakpoints(lldbdap_testcase.DAPTestCaseBase):
self.assertEqual(len(breakpoints), 0, "expect no source breakpoints")
@skipIfWindows
- @skipIfRemote
def test_functionality(self):
"""Tests hitting breakpoints and the functionality of a single
breakpoint, like 'conditions' and 'hitCondition' settings."""
diff --git a/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setExceptionBreakpoints.py b/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setExceptionBreakpoints.py
index 84d3f12490f3..b2ab12e51bf6 100644
--- a/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setExceptionBreakpoints.py
+++ b/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setExceptionBreakpoints.py
@@ -12,7 +12,6 @@ import lldbdap_testcase
class TestDAP_setExceptionBreakpoints(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
def test_functionality(self):
"""Tests setting and clearing exception breakpoints.
This packet is a bit tricky on the debug adaptor side since there
diff --git a/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setFunctionBreakpoints.py b/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setFunctionBreakpoints.py
index 9708effb7a1a..8f00f42574b5 100644
--- a/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setFunctionBreakpoints.py
+++ b/lldb/test/API/tools/lldb-dap/breakpoint/TestDAP_setFunctionBreakpoints.py
@@ -12,7 +12,6 @@ import lldbdap_testcase
class TestDAP_setFunctionBreakpoints(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
def test_set_and_clear(self):
"""Tests setting and clearing function breakpoints.
This packet is a bit tricky on the debug adaptor side since there
@@ -123,7 +122,6 @@ class TestDAP_setFunctionBreakpoints(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_functionality(self):
"""Tests hitting breakpoints and the functionality of a single
breakpoint, like 'conditions' and 'hitCondition' settings."""
diff --git a/lldb/test/API/tools/lldb-dap/commands/TestDAP_commands.py b/lldb/test/API/tools/lldb-dap/commands/TestDAP_commands.py
index bfdf9ef2897b..226b9385fe71 100644
--- a/lldb/test/API/tools/lldb-dap/commands/TestDAP_commands.py
+++ b/lldb/test/API/tools/lldb-dap/commands/TestDAP_commands.py
@@ -7,7 +7,6 @@ from lldbsuite.test.decorators import *
class TestDAP_commands(lldbdap_testcase.DAPTestCaseBase):
- @skipIfRemote
def test_command_directive_quiet_on_success(self):
program = self.getBuildArtifact("a.out")
command_quiet = (
@@ -61,7 +60,6 @@ class TestDAP_commands(lldbdap_testcase.DAPTestCaseBase):
def test_command_directive_abort_on_error_pre_run_commands(self):
self.do_test_abort_on_error(use_pre_run_commands=True)
- @skipIfRemote
def test_command_directive_abort_on_error_post_run_commands(self):
self.do_test_abort_on_error(use_post_run_commands=True)
diff --git a/lldb/test/API/tools/lldb-dap/completions/TestDAP_completions.py b/lldb/test/API/tools/lldb-dap/completions/TestDAP_completions.py
index 3250a5093cac..2b3ec656c107 100644
--- a/lldb/test/API/tools/lldb-dap/completions/TestDAP_completions.py
+++ b/lldb/test/API/tools/lldb-dap/completions/TestDAP_completions.py
@@ -19,7 +19,6 @@ class TestDAP_completions(lldbdap_testcase.DAPTestCaseBase):
self.assertNotIn(not_expected_item, actual_list)
@skipIfWindows
- @skipIfRemote
@skipIf(compiler="clang", compiler_version=["<", "17.0"])
def test_completions(self):
"""
diff --git a/lldb/test/API/tools/lldb-dap/console/TestDAP_console.py b/lldb/test/API/tools/lldb-dap/console/TestDAP_console.py
index 8769f39633e6..e6345818bf08 100644
--- a/lldb/test/API/tools/lldb-dap/console/TestDAP_console.py
+++ b/lldb/test/API/tools/lldb-dap/console/TestDAP_console.py
@@ -38,7 +38,6 @@ class TestDAP_console(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_scopes_variables_setVariable_evaluate(self):
"""
Tests that the "scopes" request causes the currently selected
@@ -82,7 +81,6 @@ class TestDAP_console(lldbdap_testcase.DAPTestCaseBase):
self.check_lldb_command("frame select", "frame #1", "frame 1 is selected")
@skipIfWindows
- @skipIfRemote
def test_custom_escape_prefix(self):
program = self.getBuildArtifact("a.out")
self.build_and_launch(program, commandEscapePrefix="::")
@@ -99,7 +97,6 @@ class TestDAP_console(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_empty_escape_prefix(self):
program = self.getBuildArtifact("a.out")
self.build_and_launch(program, commandEscapePrefix="")
@@ -116,7 +113,6 @@ class TestDAP_console(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_exit_status_message_sigterm(self):
source = "main.cpp"
program = self.getBuildArtifact("a.out")
@@ -154,7 +150,6 @@ class TestDAP_console(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_exit_status_message_ok(self):
source = "main.cpp"
program = self.getBuildArtifact("a.out")
diff --git a/lldb/test/API/tools/lldb-dap/console/TestDAP_redirection_to_console.py b/lldb/test/API/tools/lldb-dap/console/TestDAP_redirection_to_console.py
index 85911a449efe..8b47d4b9d681 100644
--- a/lldb/test/API/tools/lldb-dap/console/TestDAP_redirection_to_console.py
+++ b/lldb/test/API/tools/lldb-dap/console/TestDAP_redirection_to_console.py
@@ -8,7 +8,6 @@ import lldbdap_testcase
class TestDAP_redirection_to_console(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
def test(self):
"""
Without proper stderr and stdout redirection, the following code would throw an
diff --git a/lldb/test/API/tools/lldb-dap/coreFile/TestDAP_coreFile.py b/lldb/test/API/tools/lldb-dap/coreFile/TestDAP_coreFile.py
index cabaeafc4a64..3c847dc269b2 100644
--- a/lldb/test/API/tools/lldb-dap/coreFile/TestDAP_coreFile.py
+++ b/lldb/test/API/tools/lldb-dap/coreFile/TestDAP_coreFile.py
@@ -13,7 +13,6 @@ import os
class TestDAP_coreFile(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
@skipIfLLVMTargetMissing("X86")
def test_core_file(self):
current_dir = os.path.dirname(__file__)
@@ -60,7 +59,6 @@ class TestDAP_coreFile(lldbdap_testcase.DAPTestCaseBase):
self.assertEqual(self.get_stackFrames(), expected_frames)
@skipIfWindows
- @skipIfRemote
@skipIfLLVMTargetMissing("X86")
def test_core_file_source_mapping(self):
"""Test that sourceMap property is correctly applied when loading a core"""
diff --git a/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py b/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py
index 1e0e40d4a013..a542a318050d 100644
--- a/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py
+++ b/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py
@@ -13,7 +13,6 @@ class TestDAP_setDataBreakpoints(lldbdap_testcase.DAPTestCaseBase):
self.accessTypes = ["read", "write", "readWrite"]
@skipIfWindows
- @skipIfRemote
def test_duplicate_start_addresses(self):
"""Test setDataBreakpoints with multiple watchpoints starting at the same addresses."""
program = self.getBuildArtifact("a.out")
@@ -58,7 +57,6 @@ class TestDAP_setDataBreakpoints(lldbdap_testcase.DAPTestCaseBase):
self.assertEqual(i_val, "2")
@skipIfWindows
- @skipIfRemote
def test_expression(self):
"""Tests setting data breakpoints on expression."""
program = self.getBuildArtifact("a.out")
@@ -99,7 +97,6 @@ class TestDAP_setDataBreakpoints(lldbdap_testcase.DAPTestCaseBase):
self.assertEqual(i_val, "2")
@skipIfWindows
- @skipIfRemote
def test_functionality(self):
"""Tests setting data breakpoints on variable."""
program = self.getBuildArtifact("a.out")
diff --git a/lldb/test/API/tools/lldb-dap/disassemble/TestDAP_disassemble.py b/lldb/test/API/tools/lldb-dap/disassemble/TestDAP_disassemble.py
index 1b96ea71659f..9e8ef5b289f2 100644
--- a/lldb/test/API/tools/lldb-dap/disassemble/TestDAP_disassemble.py
+++ b/lldb/test/API/tools/lldb-dap/disassemble/TestDAP_disassemble.py
@@ -13,7 +13,6 @@ import os
class TestDAP_disassemble(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
def test_disassemble(self):
"""
Tests the 'disassemble' request.
diff --git a/lldb/test/API/tools/lldb-dap/disconnect/TestDAP_disconnect.py b/lldb/test/API/tools/lldb-dap/disconnect/TestDAP_disconnect.py
index e5aab88c7fa4..f9e461adecb1 100644
--- a/lldb/test/API/tools/lldb-dap/disconnect/TestDAP_disconnect.py
+++ b/lldb/test/API/tools/lldb-dap/disconnect/TestDAP_disconnect.py
@@ -24,7 +24,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
self.assertTrue(output is None or len(output) == 0)
@skipIfWindows
- @skipIfRemote
def test_launch(self):
"""
This test launches a process that would creates a file, but we disconnect
@@ -46,7 +45,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
self.assertFalse(os.path.exists(program + ".side_effect"))
@skipIfWindows
- @skipIfRemote
@expectedFailureNetBSD
def test_attach(self):
"""
diff --git a/lldb/test/API/tools/lldb-dap/evaluate/TestDAP_evaluate.py b/lldb/test/API/tools/lldb-dap/evaluate/TestDAP_evaluate.py
index 57cabf5b7f41..29548a835c69 100644
--- a/lldb/test/API/tools/lldb-dap/evaluate/TestDAP_evaluate.py
+++ b/lldb/test/API/tools/lldb-dap/evaluate/TestDAP_evaluate.py
@@ -27,7 +27,7 @@ class TestDAP_evaluate(lldbdap_testcase.DAPTestCaseBase):
)
def isResultExpandedDescription(self):
- return self.context == "repl" or self.context == "hover"
+ return self.context == "repl"
def isExpressionParsedExpected(self):
return self.context != "hover"
@@ -192,31 +192,26 @@ class TestDAP_evaluate(lldbdap_testcase.DAPTestCaseBase):
self.assertEvaluate("my_bool_vec", "size=2")
@skipIfWindows
- @skipIfRemote
def test_generic_evaluate_expressions(self):
# Tests context-less expression evaluations
self.run_test_evaluate_expressions(enableAutoVariableSummaries=False)
@skipIfWindows
- @skipIfRemote
def test_repl_evaluate_expressions(self):
# Tests expression evaluations that are triggered from the Debug Console
self.run_test_evaluate_expressions("repl", enableAutoVariableSummaries=False)
@skipIfWindows
- @skipIfRemote
def test_watch_evaluate_expressions(self):
# Tests expression evaluations that are triggered from a watch expression
self.run_test_evaluate_expressions("watch", enableAutoVariableSummaries=True)
@skipIfWindows
- @skipIfRemote
def test_hover_evaluate_expressions(self):
# Tests expression evaluations that are triggered when hovering on the editor
self.run_test_evaluate_expressions("hover", enableAutoVariableSummaries=False)
@skipIfWindows
- @skipIfRemote
def test_variable_evaluate_expressions(self):
# Tests expression evaluations that are triggered in the variable explorer
self.run_test_evaluate_expressions("variable", enableAutoVariableSummaries=True)
diff --git a/lldb/test/API/tools/lldb-dap/exception/TestDAP_exception.py b/lldb/test/API/tools/lldb-dap/exception/TestDAP_exception.py
index 58a67d816436..8c2c0154ba65 100644
--- a/lldb/test/API/tools/lldb-dap/exception/TestDAP_exception.py
+++ b/lldb/test/API/tools/lldb-dap/exception/TestDAP_exception.py
@@ -9,7 +9,6 @@ import lldbdap_testcase
class TestDAP_exception(lldbdap_testcase.DAPTestCaseBase):
- @skipIfRemote
@skipIfWindows
def test_stopped_description(self):
"""
diff --git a/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py b/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
index 0760d358d9c0..05873e926b64 100644
--- a/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
+++ b/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
@@ -13,7 +13,6 @@ import os
class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
def test_default(self):
"""
Tests the default launch of a simple program. No arguments,
@@ -29,7 +28,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
self.assertIn(program, lines[0], "make sure program path is in first argument")
@skipIfWindows
- @skipIfRemote
def test_termination(self):
"""
Tests the correct termination of lldb-dap upon a 'disconnect'
@@ -50,7 +48,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
self.assertEqual(self.dap_server.process.poll(), 0)
@skipIfWindows
- @skipIfRemote
def test_stopOnEntry(self):
"""
Tests the default launch of a simple program that stops at the
@@ -70,7 +67,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_cwd(self):
"""
Tests the default launch of a simple program with a current working
@@ -97,7 +93,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
self.assertTrue(found, "verified program working directory")
@skipIfWindows
- @skipIfRemote
def test_debuggerRoot(self):
"""
Tests the "debuggerRoot" will change the working directory of
@@ -127,7 +122,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
self.continue_to_exit()
@skipIfWindows
- @skipIfRemote
def test_sourcePath(self):
"""
Tests the "sourcePath" will set the target.source-map.
@@ -153,7 +147,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
self.continue_to_exit()
@skipIfWindows
- @skipIfRemote
def test_disableSTDIO(self):
"""
Tests the default launch of a simple program with STDIO disabled.
@@ -168,7 +161,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
@skipIfLinux # shell argument expansion doesn't seem to work on Linux
@expectedFailureAll(oslist=["freebsd", "netbsd"], bugnumber="llvm.org/pr48349")
- @skipIfRemote
def test_shellExpandArguments_enabled(self):
"""
Tests the default launch of a simple program with shell expansion
@@ -191,7 +183,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_shellExpandArguments_disabled(self):
"""
Tests the default launch of a simple program with shell expansion
@@ -214,7 +205,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_args(self):
"""
Tests launch of a simple program with arguments
@@ -240,7 +230,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_environment(self):
"""
Tests launch of a simple program with environment variables
@@ -270,7 +259,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
@skipIf(
archs=["arm", "aarch64"]
) # failed run https://lab.llvm.org/buildbot/#/builders/96/builds/6933
@@ -354,7 +342,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
self.verify_commands("terminateCommands", output, terminateCommands)
@skipIfWindows
- @skipIfRemote
def test_extra_launch_commands(self):
"""
Tests the "launchCommands" with extra launching settings
@@ -420,7 +407,6 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
self.verify_commands("exitCommands", output, exitCommands)
@skipIfWindows
- @skipIfRemote
def test_failing_launch_commands(self):
"""
Tests "launchCommands" failures prevents a launch.
diff --git a/lldb/test/API/tools/lldb-dap/module/TestDAP_module.py b/lldb/test/API/tools/lldb-dap/module/TestDAP_module.py
index 3f3ead033ce9..a4e0f04d450d 100644
--- a/lldb/test/API/tools/lldb-dap/module/TestDAP_module.py
+++ b/lldb/test/API/tools/lldb-dap/module/TestDAP_module.py
@@ -58,7 +58,6 @@ class TestDAP_module(lldbdap_testcase.DAPTestCaseBase):
self.assertIn("addressRange", program_module)
@skipIfWindows
- @skipIfRemote
def test_modules(self):
"""
Mac or linux.
@@ -74,7 +73,6 @@ class TestDAP_module(lldbdap_testcase.DAPTestCaseBase):
)
@skipUnlessDarwin
- @skipIfRemote
def test_modules_dsym(self):
"""
Darwin only test with dSYM file.
@@ -85,7 +83,6 @@ class TestDAP_module(lldbdap_testcase.DAPTestCaseBase):
return self.run_test("a.out.dSYM", expect_debug_info_size=True)
@skipIfWindows
- @skipIfRemote
def test_compile_units(self):
program = self.getBuildArtifact("a.out")
self.build_and_launch(program)
diff --git a/lldb/test/API/tools/lldb-dap/optimized/TestDAP_optimized.py b/lldb/test/API/tools/lldb-dap/optimized/TestDAP_optimized.py
index 90b130d3af4d..dc7f4f98875f 100644
--- a/lldb/test/API/tools/lldb-dap/optimized/TestDAP_optimized.py
+++ b/lldb/test/API/tools/lldb-dap/optimized/TestDAP_optimized.py
@@ -11,7 +11,6 @@ from lldbsuite.test.lldbtest import *
class TestDAP_optimized(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
def test_stack_frame_name(self):
"""Test optimized frame has special name suffix."""
program = self.getBuildArtifact("a.out")
@@ -30,7 +29,6 @@ class TestDAP_optimized(lldbdap_testcase.DAPTestCaseBase):
self.assertTrue(parent_frame["name"].endswith(" [opt]"))
@skipIfWindows
- @skipIfRemote
def test_optimized_variable(self):
"""Test optimized variable value contains error."""
program = self.getBuildArtifact("a.out")
diff --git a/lldb/test/API/tools/lldb-dap/restart/TestDAP_restart.py b/lldb/test/API/tools/lldb-dap/restart/TestDAP_restart.py
index 32dbc82a5729..36fa0bd40183 100644
--- a/lldb/test/API/tools/lldb-dap/restart/TestDAP_restart.py
+++ b/lldb/test/API/tools/lldb-dap/restart/TestDAP_restart.py
@@ -9,7 +9,6 @@ import lldbdap_testcase
class TestDAP_restart(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
def test_basic_functionality(self):
"""
Tests the basic restarting functionality: set two breakpoints in
@@ -45,7 +44,6 @@ class TestDAP_restart(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_stopOnEntry(self):
"""
Check that the stopOnEntry setting is still honored after a restart.
@@ -87,7 +85,6 @@ class TestDAP_restart(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_arguments(self):
"""
Tests that lldb-dap will use updated launch arguments included
diff --git a/lldb/test/API/tools/lldb-dap/restart/TestDAP_restart_runInTerminal.py b/lldb/test/API/tools/lldb-dap/restart/TestDAP_restart_runInTerminal.py
index c19a6d5b54cd..5a9938c25c2c 100644
--- a/lldb/test/API/tools/lldb-dap/restart/TestDAP_restart_runInTerminal.py
+++ b/lldb/test/API/tools/lldb-dap/restart/TestDAP_restart_runInTerminal.py
@@ -21,7 +21,6 @@ class TestDAP_restart_runInTerminal(lldbdap_testcase.DAPTestCaseBase):
return False
@skipIfWindows
- @skipIfRemote
@skipIf(archs=["arm"]) # Always times out on buildbot
def test_basic_functionality(self):
"""
@@ -62,7 +61,6 @@ class TestDAP_restart_runInTerminal(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
@skipIf(archs=["arm"]) # Always times out on buildbot
def test_stopOnEntry(self):
"""
diff --git a/lldb/test/API/tools/lldb-dap/runInTerminal/TestDAP_runInTerminal.py b/lldb/test/API/tools/lldb-dap/runInTerminal/TestDAP_runInTerminal.py
index f79a31988dc6..9fcd210122d5 100644
--- a/lldb/test/API/tools/lldb-dap/runInTerminal/TestDAP_runInTerminal.py
+++ b/lldb/test/API/tools/lldb-dap/runInTerminal/TestDAP_runInTerminal.py
@@ -44,7 +44,6 @@ class TestDAP_runInTerminal(lldbdap_testcase.DAPTestCaseBase):
return False
@skipIfWindows
- @skipIfRemote
@skipIf(archs=no_match(["x86_64"]))
def test_runInTerminal(self):
if not self.isTestSupported():
@@ -92,7 +91,6 @@ class TestDAP_runInTerminal(lldbdap_testcase.DAPTestCaseBase):
self.assertIn("bar", env)
@skipIfWindows
- @skipIfRemote
@skipIf(archs=no_match(["x86_64"]))
def test_runInTerminalInvalidTarget(self):
if not self.isTestSupported():
@@ -112,7 +110,6 @@ class TestDAP_runInTerminal(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
@skipIf(archs=no_match(["x86_64"]))
def test_missingArgInRunInTerminalLauncher(self):
if not self.isTestSupported():
@@ -128,7 +125,6 @@ class TestDAP_runInTerminal(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
@skipIf(archs=no_match(["x86_64"]))
def test_FakeAttachedRunInTerminalLauncherWithInvalidProgram(self):
if not self.isTestSupported():
@@ -156,7 +152,6 @@ class TestDAP_runInTerminal(lldbdap_testcase.DAPTestCaseBase):
self.assertIn("No such file or directory", stderr)
@skipIfWindows
- @skipIfRemote
@skipIf(archs=no_match(["x86_64"]))
def test_FakeAttachedRunInTerminalLauncherWithValidProgram(self):
if not self.isTestSupported():
@@ -184,7 +179,6 @@ class TestDAP_runInTerminal(lldbdap_testcase.DAPTestCaseBase):
self.assertIn("foo", stdout)
@skipIfWindows
- @skipIfRemote
@skipIf(archs=no_match(["x86_64"]))
def test_FakeAttachedRunInTerminalLauncherAndCheckEnvironment(self):
if not self.isTestSupported():
@@ -206,7 +200,6 @@ class TestDAP_runInTerminal(lldbdap_testcase.DAPTestCaseBase):
self.assertIn("FOO=BAR", stdout)
@skipIfWindows
- @skipIfRemote
@skipIf(archs=no_match(["x86_64"]))
def test_NonAttachedRunInTerminalLauncher(self):
if not self.isTestSupported():
diff --git a/lldb/test/API/tools/lldb-dap/stackTrace/TestDAP_stackTrace.py b/lldb/test/API/tools/lldb-dap/stackTrace/TestDAP_stackTrace.py
index 70526cc71538..0d7776faa4a9 100644
--- a/lldb/test/API/tools/lldb-dap/stackTrace/TestDAP_stackTrace.py
+++ b/lldb/test/API/tools/lldb-dap/stackTrace/TestDAP_stackTrace.py
@@ -57,7 +57,6 @@ class TestDAP_stackTrace(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_stackTrace(self):
"""
Tests the 'stackTrace' packet and all its variants.
@@ -190,7 +189,6 @@ class TestDAP_stackTrace(lldbdap_testcase.DAPTestCaseBase):
)
@skipIfWindows
- @skipIfRemote
def test_functionNameWithArgs(self):
"""
Test that the stack frame without a function name is given its pc in the response.
diff --git a/lldb/test/API/tools/lldb-dap/stackTraceMissingFunctionName/TestDAP_stackTraceMissingFunctionName.py b/lldb/test/API/tools/lldb-dap/stackTraceMissingFunctionName/TestDAP_stackTraceMissingFunctionName.py
index 0011c0f616e1..a04c752764fb 100644
--- a/lldb/test/API/tools/lldb-dap/stackTraceMissingFunctionName/TestDAP_stackTraceMissingFunctionName.py
+++ b/lldb/test/API/tools/lldb-dap/stackTraceMissingFunctionName/TestDAP_stackTraceMissingFunctionName.py
@@ -13,7 +13,6 @@ from lldbsuite.test import lldbtest, lldbutil
class TestDAP_stackTraceMissingFunctionName(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
def test_missingFunctionName(self):
"""
Test that the stack frame without a function name is given its pc in the response.
diff --git a/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py b/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py
index 7700c65f862d..fd48e69cae5e 100644
--- a/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py
+++ b/lldb/test/API/tools/lldb-dap/startDebugging/TestDAP_startDebugging.py
@@ -11,7 +11,6 @@ import lldbdap_testcase
class TestDAP_startDebugging(lldbdap_testcase.DAPTestCaseBase):
- @skipIfRemote
def test_startDebugging(self):
"""
Tests the "startDebugging" reverse request. It makes sure that the IDE can
diff --git a/lldb/test/API/tools/lldb-dap/step/TestDAP_step.py b/lldb/test/API/tools/lldb-dap/step/TestDAP_step.py
index 578e64e36ea0..8a1bb76340be 100644
--- a/lldb/test/API/tools/lldb-dap/step/TestDAP_step.py
+++ b/lldb/test/API/tools/lldb-dap/step/TestDAP_step.py
@@ -12,7 +12,6 @@ import lldbdap_testcase
class TestDAP_step(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
def test_step(self):
"""
Tests the stepping in/out/over in threads.
diff --git a/lldb/test/API/tools/lldb-dap/stop-hooks/TestDAP_stop_hooks.py b/lldb/test/API/tools/lldb-dap/stop-hooks/TestDAP_stop_hooks.py
index c538e8002a03..70c11a63a79f 100644
--- a/lldb/test/API/tools/lldb-dap/stop-hooks/TestDAP_stop_hooks.py
+++ b/lldb/test/API/tools/lldb-dap/stop-hooks/TestDAP_stop_hooks.py
@@ -9,7 +9,6 @@ import lldbdap_testcase
class TestDAP_stop_hooks(lldbdap_testcase.DAPTestCaseBase):
- @skipIfRemote
def test_stop_hooks_before_run(self):
"""
Test that there is no race condition between lldb-dap and
diff --git a/lldb/test/API/tools/lldb-dap/terminated-event/TestDAP_terminatedEvent.py b/lldb/test/API/tools/lldb-dap/terminated-event/TestDAP_terminatedEvent.py
index ff5081a41424..6d1c25e8e453 100644
--- a/lldb/test/API/tools/lldb-dap/terminated-event/TestDAP_terminatedEvent.py
+++ b/lldb/test/API/tools/lldb-dap/terminated-event/TestDAP_terminatedEvent.py
@@ -13,7 +13,6 @@ import json
class TestDAP_terminatedEvent(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
def test_terminated_event(self):
"""
Terminated Event
diff --git a/lldb/test/API/tools/lldb-dap/threads/TestDAP_threads.py b/lldb/test/API/tools/lldb-dap/threads/TestDAP_threads.py
index f7f1ad7a3d50..6edb4b8e2a81 100644
--- a/lldb/test/API/tools/lldb-dap/threads/TestDAP_threads.py
+++ b/lldb/test/API/tools/lldb-dap/threads/TestDAP_threads.py
@@ -10,7 +10,6 @@ import lldbdap_testcase
class TestDAP_threads(lldbdap_testcase.DAPTestCaseBase):
@skipIfWindows
- @skipIfRemote
def test_correct_thread(self):
"""
Tests that the correct thread is selected if we continue from
@@ -45,7 +44,6 @@ class TestDAP_threads(lldbdap_testcase.DAPTestCaseBase):
self.assertTrue(stopped_event[0]["body"]["threadCausedFocus"])
@skipIfWindows
- @skipIfRemote
def test_thread_format(self):
"""
Tests the support for custom thread formats.
diff --git a/lldb/test/API/tools/lldb-dap/variables/TestDAP_variables.py b/lldb/test/API/tools/lldb-dap/variables/TestDAP_variables.py
index 07ab6d5a63eb..3c6901b2fd99 100644
--- a/lldb/test/API/tools/lldb-dap/variables/TestDAP_variables.py
+++ b/lldb/test/API/tools/lldb-dap/variables/TestDAP_variables.py
@@ -394,14 +394,12 @@ class TestDAP_variables(lldbdap_testcase.DAPTestCaseBase):
self.verify_variables(verify_locals, locals)
@skipIfWindows
- @skipIfRemote
def test_scopes_variables_setVariable_evaluate(self):
self.do_test_scopes_variables_setVariable_evaluate(
enableAutoVariableSummaries=False
)
@skipIfWindows
- @skipIfRemote
def test_scopes_variables_setVariable_evaluate_with_descriptive_summaries(self):
self.do_test_scopes_variables_setVariable_evaluate(
enableAutoVariableSummaries=True
@@ -502,29 +500,12 @@ class TestDAP_variables(lldbdap_testcase.DAPTestCaseBase):
},
"hover": {
"equals": {"type": "PointType"},
- "equals": {
- "result": """(PointType) pt = {
- x = 11
- y = 22
- buffer = {
- [0] = 0
- [1] = 1
- [2] = 2
- [3] = 3
- [4] = 4
- [5] = 5
- [6] = 6
- [7] = 7
- [8] = 8
- [9] = 9
- [10] = 10
- [11] = 11
- [12] = 12
- [13] = 13
- [14] = 14
- [15] = 15
- }
-}"""
+ "startswith": {
+ "result": (
+ "{x:11, y:22, buffer:{...}}"
+ if enableAutoVariableSummaries
+ else "PointType @ 0x"
+ )
},
"missing": ["indexedVariables"],
"hasVariablesReference": True,
@@ -620,12 +601,10 @@ class TestDAP_variables(lldbdap_testcase.DAPTestCaseBase):
self.assertEqual(scope.get("presentationHint"), "registers")
@skipIfWindows
- @skipIfRemote
def test_scopes_and_evaluate_expansion(self):
self.do_test_scopes_and_evaluate_expansion(enableAutoVariableSummaries=False)
@skipIfWindows
- @skipIfRemote
def test_scopes_and_evaluate_expansion_with_descriptive_summaries(self):
self.do_test_scopes_and_evaluate_expansion(enableAutoVariableSummaries=True)
@@ -681,17 +660,14 @@ class TestDAP_variables(lldbdap_testcase.DAPTestCaseBase):
self.verify_variables(verify_children, children)
@skipIfWindows
- @skipIfRemote
def test_indexedVariables(self):
self.do_test_indexedVariables(enableSyntheticChildDebugging=False)
@skipIfWindows
- @skipIfRemote
def test_indexedVariables_with_raw_child_for_synthetics(self):
self.do_test_indexedVariables(enableSyntheticChildDebugging=True)
@skipIfWindows
- @skipIfRemote
def test_registers(self):
"""
Test that registers whose byte size is the size of a pointer on
@@ -765,7 +741,6 @@ class TestDAP_variables(lldbdap_testcase.DAPTestCaseBase):
@no_debug_info_test
@skipIfWindows
- @skipIfRemote
def test_value_format(self):
"""
Test that toggle variables value format between decimal and hexical works.
diff --git a/lldb/test/CMakeLists.txt b/lldb/test/CMakeLists.txt
index 6a9ca59f96b0..5ac474736eb6 100644
--- a/lldb/test/CMakeLists.txt
+++ b/lldb/test/CMakeLists.txt
@@ -91,10 +91,11 @@ string(REPLACE ${CMAKE_CFG_INTDIR} ${LLVM_BUILD_MODE} LLDB_TOOLS_DIR ${LLVM_RUNT
# Create a custom target to track test dependencies.
add_custom_target(lldb-test-depends)
-set_target_properties(lldb-test-depends PROPERTIES FOLDER "lldb misc")
+set_target_properties(lldb-test-depends PROPERTIES FOLDER "LLDB/Tests")
# Create an alias for the legacy name of lldb-test-depends
add_custom_target(lldb-test-deps)
+set_target_properties(lldb-test-deps PROPERTIES FOLDER "LLDB/Tests")
add_dependencies(lldb-test-deps lldb-test-depends)
function(add_lldb_test_dependency)
@@ -265,7 +266,6 @@ add_lit_testsuite(check-lldb "Running lldb lit test suite"
lldb-api-test-deps
lldb-shell-test-deps
lldb-unit-test-deps)
-set_target_properties(check-lldb PROPERTIES FOLDER "lldb tests")
if(LLDB_BUILT_STANDALONE)
# This has to happen *AFTER* add_lit_testsuite.
diff --git a/lldb/test/Shell/CMakeLists.txt b/lldb/test/Shell/CMakeLists.txt
index 221033bb05eb..97323ff6fbc3 100644
--- a/lldb/test/Shell/CMakeLists.txt
+++ b/lldb/test/Shell/CMakeLists.txt
@@ -1,4 +1,5 @@
add_custom_target(lldb-shell-test-deps)
+set_target_properties(lldb-shell-test-deps PROPERTIES FOLDER "LLDB/Tests")
add_dependencies(lldb-shell-test-deps lldb-test-depends)
add_lit_testsuites(LLDB-SHELL
diff --git a/lldb/test/Shell/SymbolFile/DWARF/x86/invalid_abbreviation.s b/lldb/test/Shell/SymbolFile/DWARF/x86/invalid_abbreviation.s
new file mode 100644
index 000000000000..3f32c037aeb2
--- /dev/null
+++ b/lldb/test/Shell/SymbolFile/DWARF/x86/invalid_abbreviation.s
@@ -0,0 +1,47 @@
+# REQUIRES: x86
+
+# RUN: llvm-mc -triple=x86_64-pc-linux -filetype=obj %s > %t
+# RUN: %lldb %t \
+# RUN: -o exit 2>&1 | FileCheck %s
+
+# CHECK-DAG: error: {{.*}} [0x0000000000000022]: abbreviation code 65536 too big, please file a bug and attach the file at the start of this error message
+# CHECK-DAG: error: {{.*}} [0x0000000000000048]: invalid abbreviation code 47, please file a bug and attach the file at the start of this error message
+
+
+ .section .debug_abbrev,"",@progbits
+ .uleb128 65535 # Largest representable Abbreviation Code
+ .byte 17 # DW_TAG_compile_unit
+ .byte 1 # DW_CHILDREN_yes
+ .byte 37 # DW_AT_producer
+ .byte 8 # DW_FORM_string
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 0 # EOM(3)
+
+ .section .debug_info,"",@progbits
+.Lcu_begin0:
+ .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .uleb128 65535 # DW_TAG_compile_unit
+ .asciz "Hand-written DWARF" # DW_AT_producer
+ .uleb128 65536 # Unrepresentable abbreviation
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end0:
+
+ .section .debug_info,"",@progbits
+.Lcu_begin1:
+ .long .Ldebug_info_end1-.Ldebug_info_start1 # Length of Unit
+.Ldebug_info_start1:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .uleb128 65535 # DW_TAG_compile_unit
+ .asciz "Hand-written DWARF" # DW_AT_producer
+ .byte 47 # Missing abbreviation
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end1:
diff --git a/lldb/test/Shell/Unwind/Inputs/signal-in-leaf-function-aarch64.c b/lldb/test/Shell/Unwind/Inputs/signal-in-leaf-function-aarch64.c
new file mode 100644
index 000000000000..fe020affcad0
--- /dev/null
+++ b/lldb/test/Shell/Unwind/Inputs/signal-in-leaf-function-aarch64.c
@@ -0,0 +1,15 @@
+#include <signal.h>
+#include <unistd.h>
+
+int __attribute__((naked)) signal_generating_add(int a, int b) {
+ asm("add w0, w1, w0\n\t"
+ "udf #0xdead\n\t"
+ "ret");
+}
+
+void sigill_handler(int signo) { _exit(0); }
+
+int main() {
+ signal(SIGILL, sigill_handler);
+ return signal_generating_add(42, 47);
+}
diff --git a/lldb/test/Shell/Unwind/signal-in-leaf-function-aarch64.test b/lldb/test/Shell/Unwind/signal-in-leaf-function-aarch64.test
new file mode 100644
index 000000000000..2ac2d4a75078
--- /dev/null
+++ b/lldb/test/Shell/Unwind/signal-in-leaf-function-aarch64.test
@@ -0,0 +1,30 @@
+# REQUIRES: target-aarch64 && native
+# UNSUPPORTED: system-windows
+# llvm.org/pr91610, rdar://128031075
+# XFAIL: system-darwin
+
+
+# RUN: %clang_host %S/Inputs/signal-in-leaf-function-aarch64.c -o %t
+# RUN: %lldb -s %s -o exit %t | FileCheck %s
+
+# Convert EXC_BAD_INSTRUCTION to SIGILL on darwin
+settings set platform.plugin.darwin.ignored-exceptions EXC_BAD_INSTRUCTION
+
+breakpoint set -n sigill_handler
+# CHECK: Breakpoint 1: where = {{.*}}`sigill_handler
+
+run
+# CHECK: thread #1, {{.*}} stop reason = signal SIGILL
+
+thread backtrace
+# CHECK: frame #0: [[ADD:0x[0-9a-fA-F]*]] {{.*}}`signal_generating_add
+# CHECK: frame #1: [[MAIN:0x[0-9a-fA-F]*]] {{.*}}`main
+
+continue
+# CHECK: thread #1, {{.*}} stop reason = breakpoint 1
+
+thread backtrace
+# CHECK: frame #0: {{.*}}`sigill_handler
+# Unknown number of signal trampoline frames
+# CHECK: frame #{{[0-9]+}}: [[ADD]] {{.*}}`signal_generating_add
+# CHECK: frame #{{[0-9]+}}: [[MAIN]] {{.*}}`main
diff --git a/lldb/test/Unit/CMakeLists.txt b/lldb/test/Unit/CMakeLists.txt
index a592e1cb1a1f..b86f1e0f5ed2 100644
--- a/lldb/test/Unit/CMakeLists.txt
+++ b/lldb/test/Unit/CMakeLists.txt
@@ -1,4 +1,5 @@
add_custom_target(lldb-unit-test-deps)
+set_target_properties(lldb-unit-test-deps PROPERTIES FOLDER "LLDB/Tests")
add_dependencies(lldb-unit-test-deps lldb-test-depends)
add_lit_testsuites(LLDB-UNIT
diff --git a/lldb/tools/driver/CMakeLists.txt b/lldb/tools/driver/CMakeLists.txt
index c93cd171b92b..cd304a047dea 100644
--- a/lldb/tools/driver/CMakeLists.txt
+++ b/lldb/tools/driver/CMakeLists.txt
@@ -28,8 +28,6 @@ add_dependencies(lldb
${tablegen_deps}
)
-set_target_properties(LLDBOptionsTableGen PROPERTIES FOLDER "lldb misc")
-
if(LLDB_BUILD_FRAMEWORK)
# In the build-tree, we know the exact path to the framework directory.
# The installed framework can be in different locations.
diff --git a/lldb/tools/lldb-dap/DAP.h b/lldb/tools/lldb-dap/DAP.h
index bbd9d46ba3a0..a88ee3e1dec6 100644
--- a/lldb/tools/lldb-dap/DAP.h
+++ b/lldb/tools/lldb-dap/DAP.h
@@ -26,6 +26,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/JSON.h"
+#include "llvm/Support/Threading.h"
#include "llvm/Support/raw_ostream.h"
#include "lldb/API/SBAttachInfo.h"
@@ -169,6 +170,7 @@ struct DAP {
std::optional<llvm::json::Object> last_launch_or_attach_request;
lldb::tid_t focus_tid;
bool disconnecting = false;
+ llvm::once_flag terminated_event_flag;
bool stop_at_entry;
bool is_attach;
bool enable_auto_variable_summaries;
diff --git a/lldb/tools/lldb-dap/JSONUtils.cpp b/lldb/tools/lldb-dap/JSONUtils.cpp
index bec277332bcf..069877dbab33 100644
--- a/lldb/tools/lldb-dap/JSONUtils.cpp
+++ b/lldb/tools/lldb-dap/JSONUtils.cpp
@@ -1065,9 +1065,9 @@ llvm::json::Object VariableDescription::GetVariableExtensionsJSON() {
}
std::string VariableDescription::GetResult(llvm::StringRef context) {
- // In repl and hover context, the results can be displayed as multiple lines
- // so more detailed descriptions can be returned.
- if (context != "repl" && context != "hover")
+ // In repl context, the results can be displayed as multiple lines so more
+ // detailed descriptions can be returned.
+ if (context != "repl")
return display_value;
if (!v.IsValid())
diff --git a/lldb/tools/lldb-dap/lldb-dap.cpp b/lldb/tools/lldb-dap/lldb-dap.cpp
index 170fa88f1e8b..7746afb6cbbf 100644
--- a/lldb/tools/lldb-dap/lldb-dap.cpp
+++ b/lldb/tools/lldb-dap/lldb-dap.cpp
@@ -227,13 +227,12 @@ void SendContinuedEvent() {
// debugged.
void SendTerminatedEvent() {
// Prevent races if the process exits while we're being asked to disconnect.
- static std::mutex mutex;
- std::lock_guard<std::mutex> locker(mutex);
-
- g_dap.RunTerminateCommands();
- // Send a "terminated" event
- llvm::json::Object event(CreateTerminatedEventObject());
- g_dap.SendJSON(llvm::json::Value(std::move(event)));
+ llvm::call_once(g_dap.terminated_event_flag, [&] {
+ g_dap.RunTerminateCommands();
+ // Send a "terminated" event
+ llvm::json::Object event(CreateTerminatedEventObject());
+ g_dap.SendJSON(llvm::json::Value(std::move(event)));
+ });
}
// Send a thread stopped event for all threads as long as the process
diff --git a/lldb/tools/lldb-fuzzer/lldb-commandinterpreter-fuzzer/CMakeLists.txt b/lldb/tools/lldb-fuzzer/lldb-commandinterpreter-fuzzer/CMakeLists.txt
index 7eb85ba91670..0ef30be8f9d3 100644
--- a/lldb/tools/lldb-fuzzer/lldb-commandinterpreter-fuzzer/CMakeLists.txt
+++ b/lldb/tools/lldb-fuzzer/lldb-commandinterpreter-fuzzer/CMakeLists.txt
@@ -29,4 +29,5 @@ if(TARGET lldb-commandinterpreter-fuzzer)
COMMAND $<TARGET_FILE:lldb-commandinterpreter-fuzzer> -dict=${CMAKE_CURRENT_SOURCE_DIR}/inputdictionary.txt -only_ascii=1 -artifact_prefix=commandinterpreter-
USES_TERMINAL
)
+ set_target_properties(fuzz-lldb-commandinterpreter PROPERTIES FOLDER "LLDB/Fuzzer")
endif()
diff --git a/lldb/tools/lldb-fuzzer/lldb-target-fuzzer/CMakeLists.txt b/lldb/tools/lldb-fuzzer/lldb-target-fuzzer/CMakeLists.txt
index 6876945c08da..c71b2731ef76 100644
--- a/lldb/tools/lldb-fuzzer/lldb-target-fuzzer/CMakeLists.txt
+++ b/lldb/tools/lldb-fuzzer/lldb-target-fuzzer/CMakeLists.txt
@@ -25,4 +25,5 @@ if(TARGET lldb-target-fuzzer)
COMMAND $<TARGET_FILE:lldb-target-fuzzer> -artifact_prefix=target-
USES_TERMINAL
)
+ set_target_properties(fuzz-lldb-target PROPERTIES FOLDER "LLDB/Fuzzer")
endif()
diff --git a/lldb/tools/lldb-server/CMakeLists.txt b/lldb/tools/lldb-server/CMakeLists.txt
index 67103e87a1d4..9030ed709a64 100644
--- a/lldb/tools/lldb-server/CMakeLists.txt
+++ b/lldb/tools/lldb-server/CMakeLists.txt
@@ -1,7 +1,6 @@
set(LLVM_TARGET_DEFINITIONS LLGSOptions.td)
tablegen(LLVM LLGSOptions.inc -gen-opt-parser-defs)
add_public_tablegen_target(LLGSOptionsTableGen)
-set_target_properties(LLGSOptionsTableGen PROPERTIES FOLDER "lldb misc")
set(LLDB_PLUGINS)
diff --git a/lldb/unittests/CMakeLists.txt b/lldb/unittests/CMakeLists.txt
index c084fa5cca92..c92c28f7b689 100644
--- a/lldb/unittests/CMakeLists.txt
+++ b/lldb/unittests/CMakeLists.txt
@@ -1,5 +1,5 @@
add_custom_target(LLDBUnitTests)
-set_target_properties(LLDBUnitTests PROPERTIES FOLDER "lldb tests")
+set_target_properties(LLDBUnitTests PROPERTIES FOLDER "LLDB/Tests")
add_dependencies(lldb-unit-test-deps LLDBUnitTests)
diff --git a/lldb/unittests/SymbolFile/DWARF/DWARFDIETest.cpp b/lldb/unittests/SymbolFile/DWARF/DWARFDIETest.cpp
index bcb211815f9f..20742ea51230 100644
--- a/lldb/unittests/SymbolFile/DWARF/DWARFDIETest.cpp
+++ b/lldb/unittests/SymbolFile/DWARF/DWARFDIETest.cpp
@@ -9,6 +9,7 @@
#include "Plugins/SymbolFile/DWARF/DWARFDIE.h"
#include "Plugins/SymbolFile/DWARF/DWARFDebugInfo.h"
#include "TestingSupport/Symbol/YAMLModuleTester.h"
+#include "lldb/Core/dwarf.h"
#include "llvm/ADT/STLExtras.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
@@ -169,21 +170,20 @@ DWARF:
YAMLModuleTester t(yamldata);
auto *symbol_file =
llvm::cast<SymbolFileDWARF>(t.GetModule()->GetSymbolFile());
- auto &debug_info = symbol_file->DebugInfo();
+ DWARFUnit *unit = symbol_file->DebugInfo().GetUnitAtIndex(0);
- DIERef first_die(std::nullopt, DIERef::Section::DebugInfo,
- 11 /*FirstDIEOffset*/);
- EXPECT_EQ(debug_info.PeekDIEName(first_die), "");
+ dw_offset_t first_die_offset = 11;
+ EXPECT_EQ(unit->PeekDIEName(first_die_offset), "");
- DIERef second_die(std::nullopt, DIERef::Section::DebugInfo, 14);
- EXPECT_EQ(debug_info.PeekDIEName(second_die), "NameType1");
+ dw_offset_t second_die_offset = 14;
+ EXPECT_EQ(unit->PeekDIEName(second_die_offset), "NameType1");
- DIERef third_die(std::nullopt, DIERef::Section::DebugInfo, 19);
- EXPECT_EQ(debug_info.PeekDIEName(third_die), "NameType2");
+ dw_offset_t third_die_offset = 19;
+ EXPECT_EQ(unit->PeekDIEName(third_die_offset), "NameType2");
- DIERef fourth_die(std::nullopt, DIERef::Section::DebugInfo, 24);
- EXPECT_EQ(debug_info.PeekDIEName(fourth_die), "NameType1");
+ dw_offset_t fourth_die_offset = 24;
+ EXPECT_EQ(unit->PeekDIEName(fourth_die_offset), "NameType1");
- DIERef fifth_die(std::nullopt, DIERef::Section::DebugInfo, 26);
- EXPECT_EQ(debug_info.PeekDIEName(fifth_die), "NameType2");
+ dw_offset_t fifth_die_offset = 26;
+ EXPECT_EQ(unit->PeekDIEName(fifth_die_offset), "NameType2");
}
diff --git a/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp b/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp
index 80abeb8fae9e..9303d6f5f3c6 100644
--- a/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp
+++ b/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp
@@ -77,7 +77,7 @@ TEST_F(TestArm64InstEmulation, TestSimpleDarwinFunction) {
// UnwindPlan we expect:
- // row[0]: 0: CFA=sp +0 =>
+ // row[0]: 0: CFA=sp +0 => fp= <same> lr= <same>
// row[1]: 4: CFA=sp+16 => fp=[CFA-16] lr=[CFA-8]
// row[2]: 8: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8]
// row[2]: 16: CFA=sp+16 => fp=[CFA-16] lr=[CFA-8]
@@ -88,13 +88,19 @@ TEST_F(TestArm64InstEmulation, TestSimpleDarwinFunction) {
EXPECT_TRUE(engine->GetNonCallSiteUnwindPlanFromAssembly(
sample_range, data, sizeof(data), unwind_plan));
- // CFA=sp +0
+ // CFA=sp +0 => fp= <same> lr= <same>
row_sp = unwind_plan.GetRowForFunctionOffset(0);
EXPECT_EQ(0ull, row_sp->GetOffset());
EXPECT_TRUE(row_sp->GetCFAValue().GetRegisterNumber() == gpr_sp_arm64);
EXPECT_TRUE(row_sp->GetCFAValue().IsRegisterPlusOffset() == true);
EXPECT_EQ(0, row_sp->GetCFAValue().GetOffset());
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_fp_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
+
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_lr_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
+
// CFA=sp+16 => fp=[CFA-16] lr=[CFA-8]
row_sp = unwind_plan.GetRowForFunctionOffset(4);
EXPECT_EQ(4ull, row_sp->GetOffset());
@@ -146,6 +152,12 @@ TEST_F(TestArm64InstEmulation, TestSimpleDarwinFunction) {
EXPECT_TRUE(row_sp->GetCFAValue().GetRegisterNumber() == gpr_sp_arm64);
EXPECT_TRUE(row_sp->GetCFAValue().IsRegisterPlusOffset() == true);
EXPECT_EQ(0, row_sp->GetCFAValue().GetOffset());
+
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_fp_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
+
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_lr_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
}
TEST_F(TestArm64InstEmulation, TestMediumDarwinFunction) {
@@ -381,8 +393,12 @@ TEST_F(TestArm64InstEmulation, TestFramelessThreeEpilogueFunction) {
EXPECT_FALSE(row_sp->GetRegisterInfo(gpr_x26_arm64, regloc));
EXPECT_FALSE(row_sp->GetRegisterInfo(gpr_x27_arm64, regloc));
EXPECT_FALSE(row_sp->GetRegisterInfo(gpr_x28_arm64, regloc));
- EXPECT_FALSE(row_sp->GetRegisterInfo(gpr_fp_arm64, regloc));
- EXPECT_FALSE(row_sp->GetRegisterInfo(gpr_lr_arm64, regloc));
+
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_fp_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
+
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_lr_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
row_sp = unwind_plan.GetRowForFunctionOffset(36);
EXPECT_TRUE(row_sp->GetCFAValue().GetRegisterNumber() == gpr_sp_arm64);
diff --git a/lldb/unittests/tools/lldb-server/CMakeLists.txt b/lldb/unittests/tools/lldb-server/CMakeLists.txt
index 1a7603077b8d..05a7ef7f66f2 100644
--- a/lldb/unittests/tools/lldb-server/CMakeLists.txt
+++ b/lldb/unittests/tools/lldb-server/CMakeLists.txt
@@ -7,7 +7,7 @@ function(add_lldb_test_executable test_name)
set_output_directory(${test_name} BINARY_DIR ${outdir} LIBRARY_DIR ${outdir})
list(APPEND ALL_LLDB_TEST_EXECUTABLES ${test_name})
set(ALL_LLDB_TEST_EXECUTABLES ${ALL_LLDB_TEST_EXECUTABLES} PARENT_SCOPE)
- set_target_properties(${test_name} PROPERTIES FOLDER "lldb tests")
+ set_target_properties(${test_name} PROPERTIES FOLDER "LLDB/Tests")
endfunction()
add_lldb_test_executable(thread_inferior inferior/thread_inferior.cpp)
diff --git a/lldb/utils/TableGen/CMakeLists.txt b/lldb/utils/TableGen/CMakeLists.txt
index 47a6400b4287..0ac010bcda35 100644
--- a/lldb/utils/TableGen/CMakeLists.txt
+++ b/lldb/utils/TableGen/CMakeLists.txt
@@ -13,6 +13,5 @@ if (NOT DEFINED LLDB_TABLEGEN_EXE)
LLDBTableGen.cpp
LLDBTableGenUtils.cpp
)
- set_target_properties(lldb-tblgen PROPERTIES FOLDER "LLDB tablegenning")
endif()
endif()
diff --git a/lldb/utils/lit-cpuid/CMakeLists.txt b/lldb/utils/lit-cpuid/CMakeLists.txt
index a151b986b551..6ea7e1c5b4e1 100644
--- a/lldb/utils/lit-cpuid/CMakeLists.txt
+++ b/lldb/utils/lit-cpuid/CMakeLists.txt
@@ -6,4 +6,4 @@ add_lldb_executable(lit-cpuid
TargetParser
)
-set_target_properties(lit-cpuid PROPERTIES FOLDER "lldb utils")
+set_target_properties(lit-cpuid PROPERTIES FOLDER "LLDB/Utils")
diff --git a/lldb/utils/lldb-dotest/CMakeLists.txt b/lldb/utils/lldb-dotest/CMakeLists.txt
index 09f41dbce421..bb17a2ce017d 100644
--- a/lldb/utils/lldb-dotest/CMakeLists.txt
+++ b/lldb/utils/lldb-dotest/CMakeLists.txt
@@ -1,7 +1,7 @@
# Make lldb-dotest a custom target.
add_custom_target(lldb-dotest)
add_dependencies(lldb-dotest lldb-test-depends)
-set_target_properties(lldb-dotest PROPERTIES FOLDER "lldb utils")
+set_target_properties(lldb-dotest PROPERTIES FOLDER "LLDB/Utils")
get_property(LLDB_TEST_USER_ARGS GLOBAL PROPERTY LLDB_TEST_USER_ARGS_PROPERTY)
get_property(LLDB_TEST_COMMON_ARGS GLOBAL PROPERTY LLDB_TEST_COMMON_ARGS_PROPERTY)
diff --git a/lldb/utils/lldb-repro/CMakeLists.txt b/lldb/utils/lldb-repro/CMakeLists.txt
index 725cb66c4755..8ca02b9fb819 100644
--- a/lldb/utils/lldb-repro/CMakeLists.txt
+++ b/lldb/utils/lldb-repro/CMakeLists.txt
@@ -1,6 +1,6 @@
add_custom_target(lldb-repro)
add_dependencies(lldb-repro lldb-test-depends)
-set_target_properties(lldb-repro PROPERTIES FOLDER "lldb utils")
+set_target_properties(lldb-repro PROPERTIES FOLDER "LLDB/Utils")
# Generate lldb-repro Python script for each build mode.
if(LLDB_BUILT_STANDALONE)
diff --git a/llvm-libgcc/CMakeLists.txt b/llvm-libgcc/CMakeLists.txt
index 013c9ca2e330..c6641ab9e321 100644
--- a/llvm-libgcc/CMakeLists.txt
+++ b/llvm-libgcc/CMakeLists.txt
@@ -3,6 +3,7 @@
#===============================================================================
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "LLVM libgcc")
set(LLVM_COMMON_CMAKE_UTILS "${CMAKE_CURRENT_SOURCE_DIR}/../cmake")
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index c06e661573ed..612e90abd409 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -539,8 +539,6 @@ set(FFI_INCLUDE_DIR "" CACHE PATH "Additional directory, where CMake should sear
set(LLVM_TARGET_ARCH "host"
CACHE STRING "Set target to use for LLVM JIT or use \"host\" for automatic detection.")
-option(LLVM_ENABLE_TERMINFO "Use terminfo database if available." ON)
-
set(LLVM_ENABLE_LIBXML2 "ON" CACHE STRING "Use libxml2 if available. Can be ON, OFF, or FORCE_ON")
option(LLVM_ENABLE_LIBEDIT "Use libedit if available." ON)
@@ -1124,7 +1122,7 @@ configure_file(
add_custom_target(srpm
COMMAND cpack -G TGZ --config CPackSourceConfig.cmake -B ${LLVM_SRPM_DIR}/SOURCES
COMMAND rpmbuild -bs --define '_topdir ${LLVM_SRPM_DIR}' ${LLVM_SRPM_BINARY_SPECFILE})
-set_target_properties(srpm PROPERTIES FOLDER "Misc")
+set_target_properties(srpm PROPERTIES FOLDER "LLVM/Misc")
if(APPLE AND DARWIN_LTO_LIBRARY)
set(CMAKE_EXE_LINKER_FLAGS
@@ -1227,7 +1225,9 @@ if( LLVM_INCLUDE_UTILS )
add_subdirectory(utils/split-file)
add_subdirectory(utils/mlgo-utils)
if( LLVM_INCLUDE_TESTS )
+ set(LLVM_SUBPROJECT_TITLE "Third-Party/Google Test")
add_subdirectory(${LLVM_THIRD_PARTY_DIR}/unittest ${CMAKE_CURRENT_BINARY_DIR}/third-party/unittest)
+ set(LLVM_SUBPROJECT_TITLE)
endif()
else()
if ( LLVM_INCLUDE_TESTS )
@@ -1291,7 +1291,7 @@ if( LLVM_INCLUDE_TESTS )
if(LLVM_ALL_LIT_DEPENDS OR LLVM_ALL_ADDITIONAL_TEST_DEPENDS)
add_dependencies(test-depends ${LLVM_ALL_LIT_DEPENDS} ${LLVM_ALL_ADDITIONAL_TEST_DEPENDS})
endif()
- set_target_properties(test-depends PROPERTIES FOLDER "Tests")
+ set_target_properties(test-depends PROPERTIES FOLDER "LLVM/Tests")
add_dependencies(check-all test-depends)
endif()
@@ -1348,7 +1348,7 @@ if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
# Installing the headers needs to depend on generating any public
# tablegen'd headers.
add_custom_target(llvm-headers DEPENDS intrinsics_gen omp_gen)
- set_target_properties(llvm-headers PROPERTIES FOLDER "Misc")
+ set_target_properties(llvm-headers PROPERTIES FOLDER "LLVM/Resources")
if (NOT LLVM_ENABLE_IDE)
add_llvm_install_targets(install-llvm-headers
@@ -1358,7 +1358,7 @@ if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
# Custom target to install all libraries.
add_custom_target(llvm-libraries)
- set_target_properties(llvm-libraries PROPERTIES FOLDER "Misc")
+ set_target_properties(llvm-libraries PROPERTIES FOLDER "LLVM/Resources")
if (NOT LLVM_ENABLE_IDE)
add_llvm_install_targets(install-llvm-libraries
@@ -1404,6 +1404,8 @@ if (LLVM_INCLUDE_BENCHMARKS)
set(HAVE_STD_REGEX ON CACHE BOOL "OK" FORCE)
add_subdirectory(${LLVM_THIRD_PARTY_DIR}/benchmark
${CMAKE_CURRENT_BINARY_DIR}/third-party/benchmark)
+ set_target_properties(benchmark PROPERTIES FOLDER "Third-Party/Google Benchmark")
+ set_target_properties(benchmark_main PROPERTIES FOLDER "Third-Party/Google Benchmark")
add_subdirectory(benchmarks)
endif()
diff --git a/llvm/cmake/config-ix.cmake b/llvm/cmake/config-ix.cmake
index bf1b110245bb..8cfb36b0194e 100644
--- a/llvm/cmake/config-ix.cmake
+++ b/llvm/cmake/config-ix.cmake
@@ -240,21 +240,11 @@ if(NOT LLVM_USE_SANITIZER MATCHES "Memory.*")
else()
set(HAVE_LIBEDIT 0)
endif()
- if(LLVM_ENABLE_TERMINFO)
- if(LLVM_ENABLE_TERMINFO STREQUAL FORCE_ON)
- find_package(Terminfo REQUIRED)
- else()
- find_package(Terminfo)
- endif()
- set(LLVM_ENABLE_TERMINFO "${Terminfo_FOUND}")
- endif()
else()
set(HAVE_LIBEDIT 0)
- set(LLVM_ENABLE_TERMINFO 0)
endif()
else()
set(HAVE_LIBEDIT 0)
- set(LLVM_ENABLE_TERMINFO 0)
endif()
# function checks
@@ -415,15 +405,18 @@ if( LLVM_ENABLE_PIC )
set(ENABLE_PIC 1)
else()
set(ENABLE_PIC 0)
- check_cxx_compiler_flag("-fno-pie" SUPPORTS_NO_PIE_FLAG)
- if(SUPPORTS_NO_PIE_FLAG)
- set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-pie")
- endif()
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-pie")
endif()
-check_cxx_compiler_flag("-Wvariadic-macros" SUPPORTS_VARIADIC_MACROS_FLAG)
-check_cxx_compiler_flag("-Wgnu-zero-variadic-macro-arguments"
- SUPPORTS_GNU_ZERO_VARIADIC_MACRO_ARGUMENTS_FLAG)
+set(SUPPORTS_VARIADIC_MACROS_FLAG 0)
+if (LLVM_COMPILER_IS_GCC_COMPATIBLE)
+ set(SUPPORTS_VARIADIC_MACROS_FLAG 1)
+endif()
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ set(SUPPORTS_GNU_ZERO_VARIADIC_MACRO_ARGUMENTS_FLAG 1)
+else()
+ set(SUPPORTS_GNU_ZERO_VARIADIC_MACRO_ARGUMENTS_FLAG 0)
+endif()
set(USE_NO_MAYBE_UNINITIALIZED 0)
set(USE_NO_UNINITIALIZED 0)
@@ -433,11 +426,9 @@ set(USE_NO_UNINITIALIZED 0)
if (CMAKE_COMPILER_IS_GNUCXX)
# Disable all -Wuninitialized warning for old GCC versions.
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0)
- check_cxx_compiler_flag("-Wuninitialized" HAS_UNINITIALIZED)
- set(USE_NO_UNINITIALIZED ${HAS_UNINITIALIZED})
+ set(USE_NO_UNINITIALIZED 1)
else()
- check_cxx_compiler_flag("-Wmaybe-uninitialized" HAS_MAYBE_UNINITIALIZED)
- set(USE_NO_MAYBE_UNINITIALIZED ${HAS_MAYBE_UNINITIALIZED})
+ set(USE_NO_MAYBE_UNINITIALIZED 1)
endif()
endif()
diff --git a/llvm/cmake/modules/AddLLVM.cmake b/llvm/cmake/modules/AddLLVM.cmake
index 693fd5669f63..03f4e1f190fd 100644
--- a/llvm/cmake/modules/AddLLVM.cmake
+++ b/llvm/cmake/modules/AddLLVM.cmake
@@ -4,6 +4,21 @@ include(LLVMProcessSources)
include(LLVM-Config)
include(DetermineGCCCompatible)
+# get_subproject_title(titlevar)
+# Set ${outvar} to the title of the current LLVM subproject (Clang, MLIR ...)
+#
+# The title is set in the subproject's top-level using the variable
+# LLVM_SUBPROJECT_TITLE. If it does not exist, it is assumed it is LLVM itself.
+# The title is not semantically significant, but use to create folders in
+# CMake-generated IDE projects (Visual Studio/XCode).
+function(get_subproject_title outvar)
+ if (LLVM_SUBPROJECT_TITLE)
+ set(${outvar} "${LLVM_SUBPROJECT_TITLE}" PARENT_SCOPE)
+ else ()
+ set(${outvar} "LLVM" PARENT_SCOPE)
+ endif ()
+endfunction(get_subproject_title)
+
function(llvm_update_compile_flags name)
get_property(sources TARGET ${name} PROPERTY SOURCES)
if("${sources}" MATCHES "\\.c(;|$)")
@@ -151,7 +166,8 @@ function(add_llvm_symbol_exports target_name export_file)
endif()
add_custom_target(${target_name}_exports DEPENDS ${native_export_file})
- set_target_properties(${target_name}_exports PROPERTIES FOLDER "Misc")
+ get_subproject_title(subproject_title)
+ set_target_properties(${target_name}_exports PROPERTIES FOLDER "${subproject_title}/API")
get_property(srcs TARGET ${target_name} PROPERTY SOURCES)
foreach(src ${srcs})
@@ -260,11 +276,11 @@ if (NOT DEFINED LLVM_LINKER_DETECTED AND NOT WIN32)
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
include(CheckLinkerFlag)
- # Linkers that support Darwin allow a setting to internalize all symbol exports,
+ # Linkers that support Darwin allow a setting to internalize all symbol exports,
# aiding in reducing binary size and often is applicable for executables.
check_linker_flag(C "-Wl,-no_exported_symbols" LLVM_LINKER_SUPPORTS_NO_EXPORTED_SYMBOLS)
-
- if (NOT LLVM_USE_LINKER)
+
+ if (NOT LLVM_USE_LINKER)
# Apple's linker complains about duplicate libraries, which CMake likes to do
# to support ELF platforms. To silence that warning, we can use
# -no_warn_duplicate_libraries, but only in versions of the linker that
@@ -273,8 +289,8 @@ if (NOT DEFINED LLVM_LINKER_DETECTED AND NOT WIN32)
else()
set(LLVM_LINKER_SUPPORTS_NO_WARN_DUPLICATE_LIBRARIES OFF CACHE INTERNAL "")
endif()
-
- else()
+
+ else()
set(LLVM_LINKER_SUPPORTS_NO_EXPORTED_SYMBOLS OFF CACHE INTERNAL "")
endif()
endif()
@@ -543,6 +559,8 @@ function(llvm_add_library name)
endif()
endif()
+ get_subproject_title(subproject_title)
+
# Generate objlib
if((ARG_SHARED AND ARG_STATIC) OR ARG_OBJECT)
# Generate an obj library for both targets.
@@ -564,7 +582,7 @@ function(llvm_add_library name)
# Bring in the target include directories from our original target.
target_include_directories(${obj_name} PRIVATE $<TARGET_PROPERTY:${name},INCLUDE_DIRECTORIES>)
- set_target_properties(${obj_name} PROPERTIES FOLDER "Object Libraries")
+ set_target_properties(${obj_name} PROPERTIES FOLDER "${subproject_title}/Object Libraries")
if(ARG_DEPENDS)
add_dependencies(${obj_name} ${ARG_DEPENDS})
endif()
@@ -603,6 +621,7 @@ function(llvm_add_library name)
LINK_LIBS ${ARG_LINK_LIBS}
LINK_COMPONENTS ${ARG_LINK_COMPONENTS}
)
+ set_target_properties(${name_static} PROPERTIES FOLDER "${subproject_title}/Libraries")
# Bring in the target link info from our original target.
target_link_directories(${name_static} PRIVATE $<TARGET_PROPERTY:${name},LINK_DIRECTORIES>)
@@ -620,6 +639,7 @@ function(llvm_add_library name)
else()
add_library(${name} STATIC ${ALL_FILES})
endif()
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Libraries")
if(ARG_COMPONENT_LIB)
set_target_properties(${name} PROPERTIES LLVM_COMPONENT TRUE)
@@ -796,6 +816,8 @@ function(add_llvm_install_targets target)
endif()
endforeach()
+ get_subproject_title(subproject_title)
+
add_custom_target(${target}
DEPENDS ${file_dependencies}
COMMAND "${CMAKE_COMMAND}"
@@ -803,7 +825,7 @@ function(add_llvm_install_targets target)
${prefix_option}
-P "${CMAKE_BINARY_DIR}/cmake_install.cmake"
USES_TERMINAL)
- set_target_properties(${target} PROPERTIES FOLDER "Component Install Targets")
+ set_target_properties(${target} PROPERTIES FOLDER "${subproject_title}/Installation")
add_custom_target(${target}-stripped
DEPENDS ${file_dependencies}
COMMAND "${CMAKE_COMMAND}"
@@ -812,7 +834,7 @@ function(add_llvm_install_targets target)
-DCMAKE_INSTALL_DO_STRIP=1
-P "${CMAKE_BINARY_DIR}/cmake_install.cmake"
USES_TERMINAL)
- set_target_properties(${target}-stripped PROPERTIES FOLDER "Component Install Targets (Stripped)")
+ set_target_properties(${target}-stripped PROPERTIES FOLDER "${subproject_title}/Installation")
if(target_dependencies)
add_dependencies(${target} ${target_dependencies})
add_dependencies(${target}-stripped ${target_dependencies})
@@ -832,6 +854,8 @@ endfunction()
function(add_llvm_component_group name)
cmake_parse_arguments(ARG "HAS_JIT" "" "LINK_COMPONENTS" ${ARGN})
add_custom_target(${name})
+ get_subproject_title(subproject_title)
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Component Groups")
if(ARG_HAS_JIT)
set_property(TARGET ${name} PROPERTY COMPONENT_HAS_JIT ON)
endif()
@@ -865,6 +889,8 @@ function(add_llvm_component_library name)
if(ARG_ADD_TO_COMPONENT)
set_property(TARGET ${ARG_ADD_TO_COMPONENT} APPEND PROPERTY LLVM_LINK_COMPONENTS ${component_name})
+ get_subproject_title(subproject_title)
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Libraries/${ARG_ADD_TO_COMPONENT}")
endif()
endfunction()
@@ -921,10 +947,12 @@ macro(add_llvm_library name)
endif()
set_property(GLOBAL APPEND PROPERTY LLVM_EXPORTS ${name})
endif()
+
+ get_subproject_title(subproject_title)
if (ARG_MODULE)
- set_target_properties(${name} PROPERTIES FOLDER "Loadable modules")
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Loadable Modules")
else()
- set_target_properties(${name} PROPERTIES FOLDER "Libraries")
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Libraries")
endif()
endmacro(add_llvm_library name)
@@ -948,7 +976,8 @@ macro(generate_llvm_objects name)
add_dependencies(${obj_name} ${ARG_DEPENDS})
endif()
- set_target_properties(${obj_name} PROPERTIES FOLDER "Object Libraries")
+ get_subproject_title(subproject_title)
+ set_target_properties(${obj_name} PROPERTIES FOLDER "${subproject_title}/Object Libraries")
endif()
if (ARG_GENERATE_DRIVER)
@@ -999,6 +1028,8 @@ macro(add_llvm_executable name)
else()
add_executable(${name} ${ALL_FILES})
endif()
+ get_subproject_title(subproject_title)
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Executables")
setup_dependency_debugging(${name} ${LLVM_COMMON_DEPENDS})
@@ -1038,7 +1069,7 @@ macro(add_llvm_executable name)
add_llvm_symbol_exports( ${name} ${LLVM_EXPORTED_SYMBOL_FILE} )
endif(LLVM_EXPORTED_SYMBOL_FILE)
- if (DEFINED LLVM_ENABLE_EXPORTED_SYMBOLS_IN_EXECUTABLES AND
+ if (DEFINED LLVM_ENABLE_EXPORTED_SYMBOLS_IN_EXECUTABLES AND
NOT LLVM_ENABLE_EXPORTED_SYMBOLS_IN_EXECUTABLES)
if(LLVM_LINKER_SUPPORTS_NO_EXPORTED_SYMBOLS)
set_property(TARGET ${name} APPEND_STRING PROPERTY
@@ -1418,8 +1449,9 @@ macro(llvm_add_tool project name)
if( LLVM_BUILD_TOOLS )
set_property(GLOBAL APPEND PROPERTY LLVM_EXPORTS ${name})
endif()
- set_target_properties(${name} PROPERTIES FOLDER "Tools")
endif()
+ get_subproject_title(subproject_title)
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Tools")
endmacro(llvm_add_tool project name)
macro(add_llvm_tool name)
@@ -1435,7 +1467,8 @@ macro(add_llvm_example name)
if( LLVM_BUILD_EXAMPLES )
install(TARGETS ${name} RUNTIME DESTINATION "${LLVM_EXAMPLES_INSTALL_DIR}")
endif()
- set_target_properties(${name} PROPERTIES FOLDER "Examples")
+ get_subproject_title(subproject_title)
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Examples")
endmacro(add_llvm_example name)
macro(add_llvm_example_library name)
@@ -1446,7 +1479,8 @@ macro(add_llvm_example_library name)
add_llvm_library(${name} ${ARGN})
endif()
- set_target_properties(${name} PROPERTIES FOLDER "Examples")
+ get_subproject_title(subproject_title)
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Examples")
endmacro(add_llvm_example_library name)
# This is a macro that is used to create targets for executables that are needed
@@ -1457,7 +1491,8 @@ macro(add_llvm_utility name)
endif()
add_llvm_executable(${name} DISABLE_LLVM_LINK_LLVM_DYLIB ${ARGN})
- set_target_properties(${name} PROPERTIES FOLDER "Utils")
+ get_subproject_title(subproject_title)
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Utils")
if ( ${name} IN_LIST LLVM_TOOLCHAIN_UTILITIES OR NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
if (LLVM_INSTALL_UTILS AND LLVM_BUILD_UTILS)
get_target_export_arg(${name} LLVM export_to_llvmexports)
@@ -1480,19 +1515,20 @@ endmacro(add_llvm_utility name)
macro(add_llvm_fuzzer name)
cmake_parse_arguments(ARG "" "DUMMY_MAIN" "" ${ARGN})
+ get_subproject_title(subproject_title)
if( LLVM_LIB_FUZZING_ENGINE )
set(LLVM_OPTIONAL_SOURCES ${ARG_DUMMY_MAIN})
add_llvm_executable(${name} ${ARG_UNPARSED_ARGUMENTS})
target_link_libraries(${name} PRIVATE ${LLVM_LIB_FUZZING_ENGINE})
- set_target_properties(${name} PROPERTIES FOLDER "Fuzzers")
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Fuzzers")
elseif( LLVM_USE_SANITIZE_COVERAGE )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=fuzzer")
set(LLVM_OPTIONAL_SOURCES ${ARG_DUMMY_MAIN})
add_llvm_executable(${name} ${ARG_UNPARSED_ARGUMENTS})
- set_target_properties(${name} PROPERTIES FOLDER "Fuzzers")
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Fuzzers")
elseif( ARG_DUMMY_MAIN )
add_llvm_executable(${name} ${ARG_DUMMY_MAIN} ${ARG_UNPARSED_ARGUMENTS})
- set_target_properties(${name} PROPERTIES FOLDER "Fuzzers")
+ set_target_properties(${name} PROPERTIES FOLDER "${subproject_title}/Fuzzers")
endif()
endmacro()
@@ -1641,7 +1677,7 @@ function(add_unittest test_suite test_name)
if (SUPPORTS_VARIADIC_MACROS_FLAG)
list(APPEND LLVM_COMPILE_FLAGS "-Wno-variadic-macros")
- endif ()
+ endif()
# Some parts of gtest rely on this GNU extension, don't warn on it.
if(SUPPORTS_GNU_ZERO_VARIADIC_MACRO_ARGUMENTS_FLAG)
list(APPEND LLVM_COMPILE_FLAGS "-Wno-gnu-zero-variadic-macro-arguments")
@@ -1653,6 +1689,8 @@ function(add_unittest test_suite test_name)
list(APPEND LLVM_LINK_COMPONENTS Support) # gtest needs it for raw_ostream
add_llvm_executable(${test_name} IGNORE_EXTERNALIZE_DEBUGINFO NO_INSTALL_RPATH ${ARGN})
+ get_subproject_title(subproject_title)
+ set_target_properties(${test_name} PROPERTIES FOLDER "${subproject_title}/Tests/Unit")
# The runtime benefits of LTO don't outweight the compile time costs for tests.
if(LLVM_ENABLE_LTO)
@@ -1684,10 +1722,6 @@ function(add_unittest test_suite test_name)
target_link_libraries(${test_name} PRIVATE llvm_gtest_main llvm_gtest ${LLVM_PTHREAD_LIB})
add_dependencies(${test_suite} ${test_name})
- get_target_property(test_suite_folder ${test_suite} FOLDER)
- if (test_suite_folder)
- set_property(TARGET ${test_name} PROPERTY FOLDER "${test_suite_folder}")
- endif ()
endfunction()
# Use for test binaries that call llvm::getInputFileDirectory(). Use of this
@@ -1710,7 +1744,8 @@ function(add_benchmark benchmark_name)
add_llvm_executable(${benchmark_name} IGNORE_EXTERNALIZE_DEBUGINFO NO_INSTALL_RPATH ${ARGN})
set(outdir ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR})
set_output_directory(${benchmark_name} BINARY_DIR ${outdir} LIBRARY_DIR ${outdir})
- set_property(TARGET ${benchmark_name} PROPERTY FOLDER "Utils")
+ get_subproject_title(subproject_title)
+ set_property(TARGET ${benchmark_name} PROPERTY FOLDER "${subproject_title}/Benchmarks")
target_link_libraries(${benchmark_name} PRIVATE benchmark)
endfunction()
@@ -1999,6 +2034,8 @@ function(add_lit_target target comment)
COMMAND ${CMAKE_COMMAND} -E echo "${target} does nothing, no tools built.")
message(STATUS "${target} does nothing.")
endif()
+ get_subproject_title(subproject_title)
+ set_target_properties(${target} PROPERTIES FOLDER "${subproject_title}/Tests")
if (ARG_DEPENDS)
add_dependencies(${target} ${ARG_DEPENDS})
@@ -2080,7 +2117,8 @@ function(add_lit_testsuites project directory)
cmake_parse_arguments(ARG "EXCLUDE_FROM_CHECK_ALL" "FOLDER" "PARAMS;DEPENDS;ARGS" ${ARGN})
if (NOT ARG_FOLDER)
- set(ARG_FOLDER "Test Subdirectories")
+ get_subproject_title(subproject_title)
+ set(ARG_FOLDER "${subproject_title}/Tests/LIT Testsuites")
endif()
# Search recursively for test directories by assuming anything not
@@ -2282,7 +2320,8 @@ function(llvm_add_tool_symlink project link_name target)
set(should_build_all ALL)
endif()
add_custom_target(${target_name} ${should_build_all} DEPENDS ${target} ${output_path})
- set_target_properties(${target_name} PROPERTIES FOLDER Tools)
+ get_subproject_title(subproject_title)
+ set_target_properties(${target_name} PROPERTIES FOLDER "${subproject_title}/Tools")
# Make sure both the link and target are toolchain tools
if (${link_name} IN_LIST LLVM_TOOLCHAIN_TOOLS AND ${target} IN_LIST LLVM_TOOLCHAIN_TOOLS)
@@ -2542,5 +2581,7 @@ function(setup_host_tool tool_name setting_name exe_var_name target_var_name)
if(LLVM_USE_HOST_TOOLS AND NOT ${setting_name})
build_native_tool(${tool_name} exe_name DEPENDS ${tool_name})
add_custom_target(${target_var_name} DEPENDS ${exe_name})
+ get_subproject_title(subproject_title)
+ set_target_properties(${target_var_name} PROPERTIES FOLDER "${subproject_title}/Native")
endif()
endfunction()
diff --git a/llvm/cmake/modules/AddOCaml.cmake b/llvm/cmake/modules/AddOCaml.cmake
index 891c9e6d618c..2d9116b08a52 100644
--- a/llvm/cmake/modules/AddOCaml.cmake
+++ b/llvm/cmake/modules/AddOCaml.cmake
@@ -173,6 +173,8 @@ function(add_ocaml_library name)
VERBATIM)
add_custom_target("ocaml_${name}" ALL DEPENDS ${ocaml_outputs} "${bin}/${name}.odoc")
+ get_subproject_title(subproject_title)
+ set_target_properties("ocaml_${name}" PROPERTIES FOLDER "${subproject_title}/Bindings/OCaml")
set_target_properties("ocaml_${name}" PROPERTIES
OCAML_FLAGS "-I;${bin}")
@@ -228,5 +230,5 @@ endfunction()
add_custom_target(ocaml_make_directory
COMMAND "${CMAKE_COMMAND}" "-E" "make_directory" "${LLVM_LIBRARY_DIR}/ocaml/llvm")
add_custom_target("ocaml_all")
-set_target_properties(ocaml_all PROPERTIES FOLDER "Misc")
-set_target_properties(ocaml_make_directory PROPERTIES FOLDER "Misc")
+set_target_properties(ocaml_all PROPERTIES FOLDER "LLVM/Bindings/OCaml")
+set_target_properties(ocaml_make_directory PROPERTIES FOLDER "LLVM/Bindings/OCaml")
diff --git a/llvm/cmake/modules/AddSphinxTarget.cmake b/llvm/cmake/modules/AddSphinxTarget.cmake
index b90639fbbf07..9de169d7297c 100644
--- a/llvm/cmake/modules/AddSphinxTarget.cmake
+++ b/llvm/cmake/modules/AddSphinxTarget.cmake
@@ -6,6 +6,7 @@ if (LLVM_ENABLE_SPHINX)
find_package(Sphinx REQUIRED)
if (LLVM_BUILD_DOCS AND NOT TARGET sphinx)
add_custom_target(sphinx ALL)
+ set_target_properties(sphinx PROPERTIES FOLDER "LLVM/Docs")
endif()
else()
message(STATUS "Sphinx disabled.")
@@ -58,6 +59,8 @@ function (add_sphinx_target builder project)
"${SPHINX_BUILD_DIR}" # Output
COMMENT
"Generating ${builder} Sphinx documentation for ${project} into \"${SPHINX_BUILD_DIR}\"")
+ get_subproject_title(subproject_title)
+ set_target_properties(${SPHINX_TARGET_NAME} PROPERTIES FOLDER "${subproject_title}/Docs")
# When "clean" target is run, remove the Sphinx build directory
set_property(DIRECTORY APPEND PROPERTY
diff --git a/llvm/cmake/modules/CrossCompile.cmake b/llvm/cmake/modules/CrossCompile.cmake
index 55bf3be75642..39b4abaa0d93 100644
--- a/llvm/cmake/modules/CrossCompile.cmake
+++ b/llvm/cmake/modules/CrossCompile.cmake
@@ -45,6 +45,8 @@ function(llvm_create_cross_target project_name target_name toolchain buildtype)
add_custom_target(CREATE_${project_name}_${target_name}
DEPENDS ${${project_name}_${target_name}_BUILD})
+ get_subproject_title(subproject_title)
+ set_target_properties(CREATE_${project_name}_${target_name} PROPERTIES FOLDER "${subproject_title}/Native")
# Escape semicolons in the targets list so that cmake doesn't expand
# them to spaces.
@@ -98,6 +100,8 @@ function(llvm_create_cross_target project_name target_name toolchain buildtype)
add_custom_target(CONFIGURE_${project_name}_${target_name}
DEPENDS ${${project_name}_${target_name}_BUILD}/CMakeCache.txt)
+ get_subproject_title(subproject_title)
+ set_target_properties(CONFIGURE_${project_name}_${target_name} PROPERTIES FOLDER "${subproject_title}/Native")
endfunction()
diff --git a/llvm/cmake/modules/FindTerminfo.cmake b/llvm/cmake/modules/FindTerminfo.cmake
deleted file mode 100644
index 163af6697067..000000000000
--- a/llvm/cmake/modules/FindTerminfo.cmake
+++ /dev/null
@@ -1,55 +0,0 @@
-# Attempts to discover terminfo library with a linkable setupterm function.
-#
-# Example usage:
-#
-# find_package(Terminfo)
-#
-# If successful, the following variables will be defined:
-# Terminfo_FOUND
-# Terminfo_LIBRARIES
-#
-# Additionally, the following import target will be defined:
-# Terminfo::terminfo
-
-find_library(Terminfo_LIBRARIES NAMES terminfo tinfo curses ncurses ncursesw)
-
-if(Terminfo_LIBRARIES)
- include(CMakePushCheckState)
- cmake_push_check_state()
- list(APPEND CMAKE_REQUIRED_LIBRARIES ${Terminfo_LIBRARIES})
- set(Terminfo_LINKABLE_SRC [=[
- #ifdef __cplusplus
- extern "C" {
- #endif
- int setupterm(char *term, int filedes, int *errret);
- #ifdef __cplusplus
- }
- #endif
- int main(void) { return setupterm(0, 0, 0); }
- ]=])
- if(DEFINED CMAKE_C_COMPILER)
- include(CheckCSourceCompiles)
- check_c_source_compiles("${Terminfo_LINKABLE_SRC}" Terminfo_LINKABLE)
- else()
- include(CheckCXXSourceCompiles)
- check_cxx_source_compiles("${Terminfo_LINKABLE_SRC}" Terminfo_LINKABLE)
- endif()
- cmake_pop_check_state()
-endif()
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(Terminfo
- FOUND_VAR
- Terminfo_FOUND
- REQUIRED_VARS
- Terminfo_LIBRARIES
- Terminfo_LINKABLE)
-mark_as_advanced(Terminfo_LIBRARIES
- Terminfo_LINKABLE)
-
-if(Terminfo_FOUND)
- if(NOT TARGET Terminfo::terminfo)
- add_library(Terminfo::terminfo UNKNOWN IMPORTED)
- set_target_properties(Terminfo::terminfo PROPERTIES IMPORTED_LOCATION "${Terminfo_LIBRARIES}")
- endif()
-endif()
diff --git a/llvm/cmake/modules/HandleLLVMOptions.cmake b/llvm/cmake/modules/HandleLLVMOptions.cmake
index 185266c0861e..99d848ba3d85 100644
--- a/llvm/cmake/modules/HandleLLVMOptions.cmake
+++ b/llvm/cmake/modules/HandleLLVMOptions.cmake
@@ -158,7 +158,7 @@ if(LLVM_ENABLE_EXPENSIVE_CHECKS)
endif()
add_compile_definitions(EXPENSIVE_CHECKS)
- # In some libstdc++ versions, std::min_element is not constexpr when
+ # In libstdc++ 9 and earlier, std::min_element is not constexpr when
# _GLIBCXX_DEBUG is enabled.
CHECK_CXX_SOURCE_COMPILES("
#define _GLIBCXX_DEBUG
@@ -425,7 +425,7 @@ if( LLVM_ENABLE_PIC )
# GCC for MIPS can miscompile LLVM due to PR37701.
if(CMAKE_COMPILER_IS_GNUCXX AND LLVM_NATIVE_ARCH STREQUAL "Mips" AND
NOT Uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG")
- add_flag_or_print_warning("-fno-shrink-wrap" FNO_SHRINK_WRAP)
+ append("-fno-shrink-wrap" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
endif()
# gcc with -O3 -fPIC generates TLS sequences that violate the spec on
# Solaris/sparcv9, causing executables created with the system linker
@@ -635,18 +635,16 @@ if( MSVC )
# This checks CMAKE_CXX_COMPILER_ID in addition to check_cxx_compiler_flag()
# because cl.exe does not emit an error on flags it doesn't understand,
# letting check_cxx_compiler_flag() claim it understands all flags.
- check_cxx_compiler_flag("/Brepro" SUPPORTS_BREPRO)
- if (SUPPORTS_BREPRO)
- # Check if /INCREMENTAL is passed to the linker and complain that it
- # won't work with /Brepro.
- has_msvc_incremental_no_flag("${CMAKE_EXE_LINKER_FLAGS_${uppercase_CMAKE_BUILD_TYPE}} ${CMAKE_EXE_LINKER_FLAGS}" NO_INCR_EXE)
- has_msvc_incremental_no_flag("${CMAKE_MODULE_LINKER_FLAGS_${uppercase_CMAKE_BUILD_TYPE}} ${CMAKE_MODULE_LINKER_FLAGS}" NO_INCR_MODULE)
- has_msvc_incremental_no_flag("${CMAKE_SHARED_LINKER_FLAGS_${uppercase_CMAKE_BUILD_TYPE}} ${CMAKE_SHARED_LINKER_FLAGS}" NO_INCR_SHARED)
- if (NO_INCR_EXE AND NO_INCR_MODULE AND NO_INCR_SHARED)
- append("/Brepro" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
- else()
- message(WARNING "/Brepro not compatible with /INCREMENTAL linking - builds will be non-deterministic")
- endif()
+
+ # Check if /INCREMENTAL is passed to the linker and complain that it
+ # won't work with /Brepro.
+ has_msvc_incremental_no_flag("${CMAKE_EXE_LINKER_FLAGS_${uppercase_CMAKE_BUILD_TYPE}} ${CMAKE_EXE_LINKER_FLAGS}" NO_INCR_EXE)
+ has_msvc_incremental_no_flag("${CMAKE_MODULE_LINKER_FLAGS_${uppercase_CMAKE_BUILD_TYPE}} ${CMAKE_MODULE_LINKER_FLAGS}" NO_INCR_MODULE)
+ has_msvc_incremental_no_flag("${CMAKE_SHARED_LINKER_FLAGS_${uppercase_CMAKE_BUILD_TYPE}} ${CMAKE_SHARED_LINKER_FLAGS}" NO_INCR_SHARED)
+ if (NO_INCR_EXE AND NO_INCR_MODULE AND NO_INCR_SHARED)
+ append("/Brepro" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
+ else()
+ message(WARNING "/Brepro not compatible with /INCREMENTAL linking - builds will be non-deterministic")
endif()
endif()
# By default MSVC has a 2^16 limit on the number of sections in an object file,
@@ -667,19 +665,22 @@ endif( LLVM_COMPILER_IS_GCC_COMPATIBLE )
# Specific default warnings-as-errors for compilers accepting GCC-compatible warning flags:
if ( LLVM_COMPILER_IS_GCC_COMPATIBLE OR CMAKE_CXX_COMPILER_ID MATCHES "XL" )
- add_flag_if_supported("-Werror=date-time" WERROR_DATE_TIME)
- add_flag_if_supported("-Werror=unguarded-availability-new" WERROR_UNGUARDED_AVAILABILITY_NEW)
+ append("-Werror=date-time" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
endif( LLVM_COMPILER_IS_GCC_COMPATIBLE OR CMAKE_CXX_COMPILER_ID MATCHES "XL" )
-if ( LLVM_COMPILER_IS_GCC_COMPATIBLE )
+if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ append("-Werror=unguarded-availability-new" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
+endif()
+
+if (CMAKE_CXX_COMPILER_ID STREQUAL "GCC")
# LLVM data structures like llvm::User and llvm::MDNode rely on
# the value of object storage persisting beyond the lifetime of the
# object (#24952). This is not standard compliant and causes a runtime
# crash if LLVM is built with GCC and LTO enabled (#57740). Until
# these bugs are fixed, we need to disable dead store eliminations
# based on object lifetime.
- add_flag_if_supported("-fno-lifetime-dse" CMAKE_CXX_FLAGS)
-endif ( LLVM_COMPILER_IS_GCC_COMPATIBLE )
+ append("-fno-lifetime-dse" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
+endif ()
# Modules enablement for GCC-compatible compilers:
if ( LLVM_COMPILER_IS_GCC_COMPATIBLE AND LLVM_ENABLE_MODULES )
@@ -697,22 +698,7 @@ if ( LLVM_COMPILER_IS_GCC_COMPATIBLE AND LLVM_ENABLE_MODULES )
(uppercase_CMAKE_BUILD_TYPE STREQUAL "RELWITHDEBINFO")))
set(module_flags "${module_flags} -gmodules")
endif()
- set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${module_flags}")
-
- # Check that we can build code with modules enabled, and that repeatedly
- # including <cassert> still manages to respect NDEBUG properly.
- CHECK_CXX_SOURCE_COMPILES("#undef NDEBUG
- #include <cassert>
- #define NDEBUG
- #include <cassert>
- int main() { assert(this code is not compiled); }"
- CXX_SUPPORTS_MODULES)
- set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQUIRED_FLAGS})
- if (CXX_SUPPORTS_MODULES)
- append("${module_flags}" CMAKE_CXX_FLAGS)
- else()
- message(FATAL_ERROR "LLVM_ENABLE_MODULES is not supported by this compiler")
- endif()
+ append("${module_flags}" CMAKE_CXX_FLAGS)
endif( LLVM_COMPILER_IS_GCC_COMPATIBLE AND LLVM_ENABLE_MODULES )
if (MSVC)
@@ -814,13 +800,10 @@ if (LLVM_ENABLE_WARNINGS AND (LLVM_COMPILER_IS_GCC_COMPATIBLE OR CLANG_CL))
# Turn off missing field initializer warnings for gcc to avoid noise from
# false positives with empty {}. Turn them on otherwise (they're off by
# default for clang).
- check_cxx_compiler_flag("-Wmissing-field-initializers" CXX_SUPPORTS_MISSING_FIELD_INITIALIZERS_FLAG)
- if (CXX_SUPPORTS_MISSING_FIELD_INITIALIZERS_FLAG)
- if (CMAKE_COMPILER_IS_GNUCXX)
- append("-Wno-missing-field-initializers" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
- else()
- append("-Wmissing-field-initializers" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
- endif()
+ if (CMAKE_COMPILER_IS_GNUCXX)
+ append("-Wno-missing-field-initializers" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
+ else()
+ append("-Wmissing-field-initializers" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
endif()
if (LLVM_ENABLE_PEDANTIC AND LLVM_COMPILER_IS_GCC_COMPATIBLE)
@@ -833,8 +816,13 @@ if (LLVM_ENABLE_WARNINGS AND (LLVM_COMPILER_IS_GCC_COMPATIBLE OR CLANG_CL))
add_flag_if_supported("-Wc++98-compat-extra-semi" CXX98_COMPAT_EXTRA_SEMI_FLAG)
endif()
- add_flag_if_supported("-Wimplicit-fallthrough" IMPLICIT_FALLTHROUGH_FLAG)
- add_flag_if_supported("-Wcovered-switch-default" COVERED_SWITCH_DEFAULT_FLAG)
+ append("-Wimplicit-fallthrough" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
+
+ set(CXX_SUPPORTS_COVERED_SWITCH_DEFAULT_FLAG 0)
+ if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ set(CXX_SUPPORTS_COVERED_SWITCH_DEFAULT_FLAG 1)
+ append("-Wcovered-switch-default" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
+ endif()
append_if(USE_NO_UNINITIALIZED "-Wno-uninitialized" CMAKE_CXX_FLAGS)
append_if(USE_NO_MAYBE_UNINITIALIZED "-Wno-maybe-uninitialized" CMAKE_CXX_FLAGS)
@@ -845,38 +833,32 @@ if (LLVM_ENABLE_WARNINGS AND (LLVM_COMPILER_IS_GCC_COMPATIBLE OR CLANG_CL))
# Disable -Wclass-memaccess, a C++-only warning from GCC 8 that fires on
# LLVM's ADT classes.
- check_cxx_compiler_flag("-Wclass-memaccess" CXX_SUPPORTS_CLASS_MEMACCESS_FLAG)
- append_if(CXX_SUPPORTS_CLASS_MEMACCESS_FLAG "-Wno-class-memaccess" CMAKE_CXX_FLAGS)
+ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 8.1)
+ append("-Wno-class-memaccess" CMAKE_CXX_FLAGS)
+ endif()
+ endif()
# Disable -Wredundant-move and -Wpessimizing-move on GCC>=9. GCC wants to
- # remove std::move in code like "A foo(ConvertibleToA a) {
- # return std::move(a); }", but this code does not compile (or uses the copy
+ # remove std::move in code like
+ # "A foo(ConvertibleToA a) { return std::move(a); }",
+ # but this code does not compile (or uses the copy
# constructor instead) on clang<=3.8. Clang also has a -Wredundant-move and
# -Wpessimizing-move, but they only fire when the types match exactly, so we
# can keep them here.
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
- check_cxx_compiler_flag("-Wredundant-move" CXX_SUPPORTS_REDUNDANT_MOVE_FLAG)
- append_if(CXX_SUPPORTS_REDUNDANT_MOVE_FLAG "-Wno-redundant-move" CMAKE_CXX_FLAGS)
- check_cxx_compiler_flag("-Wpessimizing-move" CXX_SUPPORTS_PESSIMIZING_MOVE_FLAG)
- append_if(CXX_SUPPORTS_PESSIMIZING_MOVE_FLAG "-Wno-pessimizing-move" CMAKE_CXX_FLAGS)
+ if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9.1)
+ append("-Wno-redundant-move" CMAKE_CXX_FLAGS)
+ append("-Wno-pessimizing-move" CMAKE_CXX_FLAGS)
+ endif()
endif()
# The LLVM libraries have no stable C++ API, so -Wnoexcept-type is not useful.
- check_cxx_compiler_flag("-Wnoexcept-type" CXX_SUPPORTS_NOEXCEPT_TYPE_FLAG)
- append_if(CXX_SUPPORTS_NOEXCEPT_TYPE_FLAG "-Wno-noexcept-type" CMAKE_CXX_FLAGS)
-
- # Check if -Wnon-virtual-dtor warns for a class marked final, when it has a
- # friend declaration. If it does, don't add -Wnon-virtual-dtor. The case is
- # considered unhelpful (https://gcc.gnu.org/PR102168).
- set(OLD_CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS})
- set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror=non-virtual-dtor")
- CHECK_CXX_SOURCE_COMPILES("class f {};
- class base {friend f; public: virtual void anchor();protected: ~base();};
- int main() { return 0; }"
- CXX_WONT_WARN_ON_FINAL_NONVIRTUALDTOR)
- set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQUIRED_FLAGS})
- append_if(CXX_WONT_WARN_ON_FINAL_NONVIRTUALDTOR "-Wnon-virtual-dtor" CMAKE_CXX_FLAGS)
+ append("-Wno-noexcept-type" CMAKE_CXX_FLAGS)
+ if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ append("-Wnon-virtual-dtor" CMAKE_CXX_FLAGS)
+ endif()
append("-Wdelete-non-virtual-dtor" CMAKE_CXX_FLAGS)
# Enable -Wsuggest-override if it's available, and only if it doesn't
@@ -906,14 +888,15 @@ if (LLVM_ENABLE_WARNINGS AND (LLVM_COMPILER_IS_GCC_COMPATIBLE OR CLANG_CL))
endif()
# Enable -Wstring-conversion to catch misuse of string literals.
- add_flag_if_supported("-Wstring-conversion" STRING_CONVERSION_FLAG)
+ if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ append("-Wstring-conversion" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
+ endif()
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
# Disable the misleading indentation warning with GCC; GCC can
# produce noisy notes about this getting disabled in large files.
# See e.g. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89549
- check_cxx_compiler_flag("-Wmisleading-indentation" CXX_SUPPORTS_MISLEADING_INDENTATION_FLAG)
- append_if(CXX_SUPPORTS_MISLEADING_INDENTATION_FLAG "-Wno-misleading-indentation" CMAKE_CXX_FLAGS)
+ append("-Wno-misleading-indentation" CMAKE_CXX_FLAGS)
else()
# Prevent bugs that can happen with llvm's brace style.
add_flag_if_supported("-Wmisleading-indentation" MISLEADING_INDENTATION_FLAG)
@@ -931,14 +914,15 @@ macro(append_common_sanitizer_flags)
if (NOT MSVC OR CLANG_CL)
# Append -fno-omit-frame-pointer and turn on debug info to get better
# stack traces.
- add_flag_if_supported("-fno-omit-frame-pointer" FNO_OMIT_FRAME_POINTER)
+ append("-fno-omit-frame-pointer" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
if (NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" AND
- NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "RELWITHDEBINFO")
- add_flag_if_supported("-gline-tables-only" GLINE_TABLES_ONLY)
+ NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "RELWITHDEBINFO" AND
+ CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ append("-gline-tables-only" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
endif()
# Use -O1 even in debug mode, otherwise sanitizers slowdown is too large.
if (uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" AND LLVM_OPTIMIZE_SANITIZED_BUILDS)
- add_flag_if_supported("-O1" O1)
+ append("-O1" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
endif()
else()
# Always ask the linker to produce symbols with asan.
@@ -1112,15 +1096,12 @@ endif()
if(NOT CYGWIN AND NOT MSVC)
if(NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin" AND
NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG")
- check_c_compiler_flag("-Werror -fno-function-sections" C_SUPPORTS_FNO_FUNCTION_SECTIONS)
- if (C_SUPPORTS_FNO_FUNCTION_SECTIONS)
- # Don't add -ffunction-sections if it can't be disabled with -fno-function-sections.
- # Doing so will break sanitizers.
- add_flag_if_supported("-ffunction-sections" FFUNCTION_SECTIONS)
- elseif (CMAKE_CXX_COMPILER_ID MATCHES "XL")
+ if (CMAKE_CXX_COMPILER_ID MATCHES "XL")
append("-qfuncsect" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
+ else()
+ append("-ffunction-sections" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
endif()
- add_flag_if_supported("-fdata-sections" FDATA_SECTIONS)
+ append("-fdata-sections" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
endif()
elseif(MSVC)
if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" )
@@ -1385,7 +1366,9 @@ if(LLVM_USE_RELATIVE_PATHS_IN_DEBUG_INFO)
file(RELATIVE_PATH relative_root "${CMAKE_BINARY_DIR}" "${source_root}")
append_if(SUPPORTS_FDEBUG_PREFIX_MAP "-fdebug-prefix-map=${CMAKE_BINARY_DIR}=${relative_root}" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
append_if(SUPPORTS_FDEBUG_PREFIX_MAP "-fdebug-prefix-map=${source_root}/=${LLVM_SOURCE_PREFIX}" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
- add_flag_if_supported("-no-canonical-prefixes" NO_CANONICAL_PREFIXES)
+ if (LLVM_COMPILER_IS_GCC_COMPATIBLE)
+ append("-no-canonical-prefixes" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
+ endif()
endif()
option(LLVM_USE_RELATIVE_PATHS_IN_FILES "Use relative paths in sources and debug info" OFF)
@@ -1400,7 +1383,9 @@ if(LLVM_USE_RELATIVE_PATHS_IN_FILES)
file(RELATIVE_PATH relative_root "${CMAKE_BINARY_DIR}" "${source_root}")
append_if(SUPPORTS_FFILE_PREFIX_MAP "-ffile-prefix-map=${CMAKE_BINARY_DIR}=${relative_root}" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
append_if(SUPPORTS_FFILE_PREFIX_MAP "-ffile-prefix-map=${source_root}/=${LLVM_SOURCE_PREFIX}" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
- add_flag_if_supported("-no-canonical-prefixes" NO_CANONICAL_PREFIXES)
+ if (LLVM_COMPILER_IS_GCC_COMPATIBLE)
+ append("-no-canonical-prefixes" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
+ endif()
endif()
set(LLVM_THIRD_PARTY_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../third-party CACHE STRING
diff --git a/llvm/cmake/modules/LLVMConfig.cmake.in b/llvm/cmake/modules/LLVMConfig.cmake.in
index 397bd5815b64..7e1501a89354 100644
--- a/llvm/cmake/modules/LLVMConfig.cmake.in
+++ b/llvm/cmake/modules/LLVMConfig.cmake.in
@@ -60,11 +60,6 @@ if(LLVM_ENABLE_LIBEDIT)
find_package(LibEdit)
endif()
-set(LLVM_ENABLE_TERMINFO @LLVM_ENABLE_TERMINFO@)
-if(LLVM_ENABLE_TERMINFO)
- find_package(Terminfo)
-endif()
-
set(LLVM_ENABLE_THREADS @LLVM_ENABLE_THREADS@)
set(LLVM_ENABLE_UNWIND_TABLES @LLVM_ENABLE_UNWIND_TABLES@)
diff --git a/llvm/cmake/modules/LLVMDistributionSupport.cmake b/llvm/cmake/modules/LLVMDistributionSupport.cmake
index 0b78f8f9137c..03c677357023 100644
--- a/llvm/cmake/modules/LLVMDistributionSupport.cmake
+++ b/llvm/cmake/modules/LLVMDistributionSupport.cmake
@@ -210,6 +210,8 @@ function(install_distribution_exports project)
COMPONENT ${target})
if(NOT LLVM_ENABLE_IDE)
add_custom_target(${target})
+ get_subproject_title(subproject_title)
+ set_target_properties(${target} PROPERTIES FOLDER "${subproject_title}/Distribution")
add_llvm_install_targets(install-${target} COMPONENT ${target})
endif()
endif()
@@ -260,6 +262,14 @@ function(llvm_distribution_add_targets)
add_custom_target(${distribution_target})
add_custom_target(install-${distribution_target})
add_custom_target(install-${distribution_target}-stripped)
+ get_subproject_title(subproject_title)
+ set_target_properties(
+ ${distribution_target}
+ install-${distribution_target}
+ install-${distribution_target}-stripped
+ PROPERTIES
+ FOLDER "${subproject_title}/Distribution"
+ )
foreach(target ${distribution_components})
# Note that some distribution components may not have an actual target, but only an install-FOO target.
diff --git a/llvm/cmake/modules/LLVMExternalProjectUtils.cmake b/llvm/cmake/modules/LLVMExternalProjectUtils.cmake
index c8016f20a819..60aed21143fd 100644
--- a/llvm/cmake/modules/LLVMExternalProjectUtils.cmake
+++ b/llvm/cmake/modules/LLVMExternalProjectUtils.cmake
@@ -56,11 +56,13 @@ endfunction()
# Use provided strip tool instead of the default one.
# TARGET_TRIPLE triple
# Optional target triple to pass to the compiler
+# FOLDER
+# For IDEs, the Folder to put the targets into.
# )
function(llvm_ExternalProject_Add name source_dir)
cmake_parse_arguments(ARG
"USE_TOOLCHAIN;EXCLUDE_FROM_ALL;NO_INSTALL;ALWAYS_CLEAN"
- "SOURCE_DIR"
+ "SOURCE_DIR;FOLDER"
"CMAKE_ARGS;TOOLCHAIN_TOOLS;RUNTIME_LIBRARIES;DEPENDS;EXTRA_TARGETS;PASSTHROUGH_PREFIXES;STRIP_TOOL;TARGET_TRIPLE"
${ARGN})
canonicalize_tool_name(${name} nameCanon)
@@ -150,6 +152,9 @@ function(llvm_ExternalProject_Add name source_dir)
COMMENT "Clobbering ${name} build and stamp directories"
USES_TERMINAL
)
+ if (ARG_FOLDER)
+ set_target_properties(${name}-clear PROPERTIES FOLDER "${ARG_FOLDER}")
+ endif ()
# Find all variables that start with a prefix and propagate them through
get_cmake_property(variableNames VARIABLES)
@@ -252,6 +257,9 @@ function(llvm_ExternalProject_Add name source_dir)
add_custom_target(${name}-clobber
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${name}-clobber-stamp)
+ if (ARG_FOLDER)
+ set_target_properties(${name}-clobber PROPERTIES FOLDER "${ARG_FOLDER}")
+ endif ()
if(ARG_EXCLUDE_FROM_ALL)
set(exclude EXCLUDE_FROM_ALL 1)
@@ -358,6 +366,12 @@ function(llvm_ExternalProject_Add name source_dir)
USES_TERMINAL_INSTALL 1
LIST_SEPARATOR |
)
+ if (ARG_FOLDER)
+ set_target_properties(
+ ${name} ${name}-clobber ${name}-build ${name}-configure
+ PROPERTIES FOLDER "${ARG_FOLDER}"
+ )
+ endif ()
if(ARG_USE_TOOLCHAIN)
set(force_deps DEPENDS ${TOOLCHAIN_BINS})
@@ -374,6 +388,9 @@ function(llvm_ExternalProject_Add name source_dir)
USES_TERMINAL 1
)
ExternalProject_Add_StepTargets(${name} clean)
+ if (ARG_FOLDER)
+ set_target_properties(${name}-clean PROPERTIES FOLDER "${ARG_FOLDER}")
+ endif ()
if(ARG_USE_TOOLCHAIN)
add_dependencies(${name}-clean ${name}-clobber)
@@ -388,6 +405,9 @@ function(llvm_ExternalProject_Add name source_dir)
add_llvm_install_targets(install-${name}
DEPENDS ${name}
COMPONENT ${name})
+ if (ARG_FOLDER)
+ set_target_properties(install-${name} PROPERTIES FOLDER "${ARG_FOLDER}")
+ endif ()
endif()
# Add top-level targets
@@ -404,5 +424,8 @@ function(llvm_ExternalProject_Add name source_dir)
WORKING_DIRECTORY ${BINARY_DIR}
VERBATIM
USES_TERMINAL)
+ if (ARG_FOLDER)
+ set_target_properties(${target} PROPERTIES FOLDER "${ARG_FOLDER}")
+ endif ()
endforeach()
endfunction()
diff --git a/llvm/cmake/modules/TableGen.cmake b/llvm/cmake/modules/TableGen.cmake
index df91598c404f..ffcc718b4777 100644
--- a/llvm/cmake/modules/TableGen.cmake
+++ b/llvm/cmake/modules/TableGen.cmake
@@ -167,7 +167,8 @@ function(add_public_tablegen_target target)
if(LLVM_COMMON_DEPENDS)
add_dependencies(${target} ${LLVM_COMMON_DEPENDS})
endif()
- set_target_properties(${target} PROPERTIES FOLDER "Tablegenning")
+ get_subproject_title(subproject_title)
+ set_target_properties(${target} PROPERTIES FOLDER "${subproject_title}/Tablegenning")
set(LLVM_COMMON_DEPENDS ${LLVM_COMMON_DEPENDS} ${target} PARENT_SCOPE)
endfunction()
@@ -217,6 +218,8 @@ macro(add_tablegen target project)
set(${project}_TABLEGEN_EXE ${${project}_TABLEGEN_EXE} PARENT_SCOPE)
add_custom_target(${target}-host DEPENDS ${${project}_TABLEGEN_EXE})
+ get_subproject_title(subproject_title)
+ set_target_properties(${target}-host PROPERTIES FOLDER "${subproject_title}/Native")
set(${project}_TABLEGEN_TARGET ${target}-host PARENT_SCOPE)
# If we're using the host tablegen, and utils were not requested, we have no
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 75536bc5bea6..1004956ac8f1 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -1970,6 +1970,8 @@ The AMDGPU backend uses the following ELF header:
``EF_AMDGPU_MACH_AMDGCN_GFX10_3_GENERIC`` 0x053 ``gfx10-3-generic``
``EF_AMDGPU_MACH_AMDGCN_GFX11_GENERIC`` 0x054 ``gfx11-generic``
*reserved* 0x055 Reserved.
+ *reserved* 0x056 Reserved.
+ *reserved* 0x057 Reserved.
========================================== ========== =============================
Sections
diff --git a/llvm/docs/CMakeLists.txt b/llvm/docs/CMakeLists.txt
index 5e420a269632..bc87eb727d32 100644
--- a/llvm/docs/CMakeLists.txt
+++ b/llvm/docs/CMakeLists.txt
@@ -87,6 +87,7 @@ if (LLVM_ENABLE_DOXYGEN)
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating llvm doxygen documentation." VERBATIM)
+ set_target_properties(doxygen-llvm PROPERTIES FOLDER "LLVM/Docs")
if (LLVM_BUILD_DOCS)
add_dependencies(doxygen doxygen-llvm)
diff --git a/llvm/docs/DeveloperPolicy.rst b/llvm/docs/DeveloperPolicy.rst
index 5d3731d761a3..f2ac46e6c04f 100644
--- a/llvm/docs/DeveloperPolicy.rst
+++ b/llvm/docs/DeveloperPolicy.rst
@@ -1069,7 +1069,7 @@ In certain circumstances, code licensed under other licenses can be added
to the codebase. However, this may only be done with approval of the LLVM
Foundation Board of Directors, and contributors should plan for the approval
process to take at least 4-6 weeks. If you would like to contribute code
-under a different license, please create a Phabricator review with the code
+under a different license, please create a pull request with the code
you want to contribute and email board@llvm.org requesting a review.
If you have questions or comments about these topics, please ask on the
@@ -1124,20 +1124,17 @@ To relicense LLVM, we will be seeking approval from all of the copyright holders
of code in the repository, or potentially remove/rewrite code if we cannot.
This is a large
and challenging project which will take a significant amount of time to
-complete. In the interim, **all contributions to the project will be made under
-the terms of both the new license and the legacy license scheme** (each of which
-is described below). The exception to this is the legacy patent grant, which
-will not be required for new contributions.
+complete.
-When all of the code in the project has been converted to the new license or
-removed, we will drop the requirement to contribute under the legacy license.
-This will achieve the goal of having
-a single standardized license for the entire codebase.
+Starting on 2024-06-01 (first of June 2024), new contributions only need to
+be covered by the new LLVM license, i.e. Apache-2.0 WITH LLVM-exception.
+Before this date, the project required all contributions to be made under
+both the new license and the legacy license.
-If you are a prior contributor to LLVM and have not done so already, please do
-*TODO* to allow us to use your code. *Add a link to a separate page here, which
-is probably a click through web form or something like that. Details to be
-determined later*.
+If you are a contributor to LLVM with contributions committed before 2019-01-19
+and have not done so already, please do follow the instructions at
+https://foundation.llvm.org/docs/relicensing/, under section "Individual
+Relicensing Agreement" to relicense your contributions under the new license.
.. _open source licensing terms:
@@ -1264,12 +1261,11 @@ Legacy License Structure
.. note::
The code base was previously licensed under the Terms described here.
- We are in the middle of relicensing to a new approach (described above), but
- until this effort is complete, the code is also still available under these
- terms. Once we finish the relicensing project, new versions of the code will
- not be available under these terms. However, nothing takes away your right
- to use old versions under the licensing terms under which they were
- originally released.
+ We are in the middle of relicensing to a new approach (described above).
+ More than 99% of all contributions made to LLVM are covered by the Apache-2.0
+ WITH LLVM-exception license. A small portion of LLVM code remains exclusively
+ covered by the legacy license. Contributions after 2024-06-01 are covered
+ exclusively by the new license._
We intend to keep LLVM perpetually open source and to use a permissive open
source license. The code in
diff --git a/llvm/docs/GettingInvolved.rst b/llvm/docs/GettingInvolved.rst
index 3588ef14db15..646f1d09dfab 100644
--- a/llvm/docs/GettingInvolved.rst
+++ b/llvm/docs/GettingInvolved.rst
@@ -349,6 +349,11 @@ The :doc:`CodeOfConduct` applies to all office hours.
- Every two weeks, Wednesdays at 2:00pm US Pacific, for 90 minutes.
- Livestream chat or `Google meet <https://meet.google.com/wit-tvzc-dwc>`__
- English
+ * - Renato Golin
+ - General LLVM, MLIR & Linalg, distributed computing, research, socials.
+ - Every first Tuesday of the month, 11:00am UK time, for 60 minutes.
+ - `Google meet <https://meet.google.com/esg-fggc-hfe>`__
+ - English, Portuguese
* - Rotating hosts
- Getting Started, beginner questions, new contributors.
- Every Tuesday at 2 PM ET (11 AM PT), for 30 minutes.
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index e2f4d8bfcaee..614dd98b013b 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -290,13 +290,17 @@ linkage:
symbol is weak until linked, if not linked, the symbol becomes null
instead of being an undefined reference.
``linkonce_odr``, ``weak_odr``
- Some languages allow differing globals to be merged, such as two
- functions with different semantics. Other languages, such as
- ``C++``, ensure that only equivalent globals are ever merged (the
- "one definition rule" --- "ODR"). Such languages can use the
- ``linkonce_odr`` and ``weak_odr`` linkage types to indicate that the
- global will only be merged with equivalent globals. These linkage
- types are otherwise the same as their non-``odr`` versions.
+ The ``odr`` suffix indicates that all globals defined with the given name
+ are equivalent, along the lines of the C++ "one definition rule" ("ODR").
+ Informally, this means we can inline functions and fold loads of constants.
+
+ Formally, use the following definition: when an ``odr`` function is
+ called, one of the definitions is non-deterministically chosen to run. For
+ ``odr`` variables, if any byte in the value is not equal in all
+ initializers, that byte is a :ref:`poison value <poisonvalues>`. For
+ aliases and ifuncs, apply the rule for the underlying function or variable.
+
+ These linkage types are otherwise the same as their non-``odr`` versions.
``external``
If none of the above identifiers are used, the global is externally
visible, meaning that it participates in linkage and can be used to
@@ -11180,6 +11184,8 @@ Syntax:
<result> = getelementptr <ty>, ptr <ptrval>{, <ty> <idx>}*
<result> = getelementptr inbounds <ty>, ptr <ptrval>{, <ty> <idx>}*
+ <result> = getelementptr nusw <ty>, ptr <ptrval>{, <ty> <idx>}*
+ <result> = getelementptr nuw <ty>, ptr <ptrval>{, <ty> <idx>}*
<result> = getelementptr inrange(S,E) <ty>, ptr <ptrval>{, <ty> <idx>}*
<result> = getelementptr <ty>, <N x ptr> <ptrval>, <vector index type> <idx>
@@ -11295,27 +11301,47 @@ memory though, even if it happens to point into allocated storage. See the
:ref:`Pointer Aliasing Rules <pointeraliasing>` section for more
information.
-If the ``inbounds`` keyword is present, the result value of a
-``getelementptr`` with any non-zero indices is a
-:ref:`poison value <poisonvalues>` if one of the following rules is violated:
-
-* The base pointer has an *in bounds* address of an allocated object, which
+The ``getelementptr`` instruction may have a number of attributes that impose
+additional rules. If any of the rules are violated, the result value is a
+:ref:`poison value <poisonvalues>`. In cases where the base is a vector of
+pointers, the attributes apply to each computation element-wise.
+
+For ``nusw`` (no unsigned signed wrap):
+
+ * If the type of an index is larger than the pointer index type, the
+ truncation to the pointer index type preserves the signed value
+ (``trunc nsw``).
+ * The multiplication of an index by the type size does not wrap the pointer
+ index type in a signed sense (``mul nsw``).
+ * The successive addition of each offset (without adding the base address)
+ does not wrap the pointer index type in a signed sense (``add nsw``).
+ * The successive addition of the current address, truncated to the pointer
+ index type and interpreted as an unsigned number, and each offset,
+ interpreted as a signed number, does not wrap the pointer index type.
+
+For ``nuw`` (no unsigned wrap):
+
+ * If the type of an index is larger than the pointer index type, the
+ truncation to the pointer index type preserves the unsigned value
+ (``trunc nuw``).
+ * The multiplication of an index by the type size does not wrap the pointer
+ index type in an unsigned sense (``mul nuw``).
+ * The successive addition of each offset (without adding the base address)
+ does not wrap the pointer index type in an unsigned sense (``add nuw``).
+ * The successive addition of the current address, truncated to the pointer
+ index type and interpreted as an unsigned number, and each offset, also
+ interpreted as an unsigned number, does not wrap the pointer index type
+ (``add nuw``).
+
+For ``inbounds`` all rules of the ``nusw`` attribute apply. Additionally,
+if the ``getelementptr`` has any non-zero indices, the following rules apply:
+
+ * The base pointer has an *in bounds* address of an allocated object, which
means that it points into an allocated object, or to its end. Note that the
object does not have to be live anymore; being in-bounds of a deallocated
object is sufficient.
-* If the type of an index is larger than the pointer index type, the
- truncation to the pointer index type preserves the signed value.
-* The multiplication of an index by the type size does not wrap the pointer
- index type in a signed sense (``nsw``).
-* The successive addition of each offset (without adding the base address) does
- not wrap the pointer index type in a signed sense (``nsw``).
-* The successive addition of the current address, interpreted as an unsigned
- number, and each offset, interpreted as a signed number, does not wrap the
- unsigned address space and remains *in bounds* of the allocated object.
- As a corollary, if the added offset is non-negative, the addition does not
- wrap in an unsigned sense (``nuw``).
-* In cases where the base is a vector of pointers, the ``inbounds`` keyword
- applies to each of the computations element-wise.
+ * During the successive addition of offsets to the address, the resulting
+ pointer must remain *in bounds* of the allocated object at each step.
Note that ``getelementptr`` with all-zero indices is always considered to be
``inbounds``, even if the base pointer does not point to an allocated object.
@@ -11326,6 +11352,10 @@ These rules are based on the assumption that no allocated object may cross
the unsigned address space boundary, and no allocated object may be larger
than half the pointer index type space.
+If ``inbounds`` is present on a ``getelementptr`` instruction, the ``nusw``
+attribute will be automatically set as well. For this reason, the ``nusw``
+will also not be printed in textual IR if ``inbounds`` is already present.
+
If the ``inrange(Start, End)`` attribute is present, loading from or
storing to any pointer derived from the ``getelementptr`` has undefined
behavior if the load or store would access memory outside the half-open range
@@ -15757,8 +15787,8 @@ The arguments and return value are floating-point numbers of the same type.
Semantics:
""""""""""
-Return the same value as a corresponding libm '``fma``' function but without
-trapping or setting ``errno``.
+Return the same value as the IEEE-754 fusedMultiplyAdd operation. This
+is assumed to not trap or set ``errno``.
When specified with the fast-math-flag 'afn', the result may be approximated
using a less accurate calculation.
diff --git a/llvm/docs/MemorySSA.rst b/llvm/docs/MemorySSA.rst
index 17d2c9af96c2..09e9f9a37732 100644
--- a/llvm/docs/MemorySSA.rst
+++ b/llvm/docs/MemorySSA.rst
@@ -295,9 +295,9 @@ A code snippet for such a walk looks like this:
.. code-block:: c++
MemoryDef *Def; // find who's optimized or defining for this MemoryDef
- for (auto& U : Def->uses()) {
- MemoryAccess *MA = cast<MemoryAccess>(Use.getUser());
- if (auto *DefUser = cast_of_null<MemoryDef>MA)
+ for (auto &U : Def->uses()) {
+ MemoryAccess *MA = cast<MemoryAccess>(U.getUser());
+ if (auto *DefUser = dyn_cast<MemoryDef>(MA))
if (DefUser->isOptimized() && DefUser->getOptimized() == Def) {
// User who is optimized to Def
} else {
@@ -312,19 +312,18 @@ the store.
.. code-block:: c++
checkUses(MemoryAccess *Def) { // Def can be a MemoryDef or a MemoryPhi.
- for (auto& U : Def->uses()) {
- MemoryAccess *MA = cast<MemoryAccess>(Use.getUser());
- if (auto *MU = cast_of_null<MemoryUse>MA) {
+ for (auto &U : Def->uses()) {
+ MemoryAccess *MA = cast<MemoryAccess>(U.getUser());
+ if (auto *MU = dyn_cast<MemoryUse>(MA)) {
// Process MemoryUse as needed.
- }
- else {
+ } else {
// Process MemoryDef or MemoryPhi as needed.
// As a user can come up twice, as an optimized access and defining
// access, keep a visited list.
// Check transitive uses as needed
- checkUses (MA); // use a worklist for an iterative algorithm
+ checkUses(MA); // use a worklist for an iterative algorithm
}
}
}
diff --git a/llvm/docs/ORCv2.rst b/llvm/docs/ORCv2.rst
index 910ef5b9f3d0..333977a0aaa6 100644
--- a/llvm/docs/ORCv2.rst
+++ b/llvm/docs/ORCv2.rst
@@ -780,7 +780,7 @@ constructs a new ThreadSafeContext value from a std::unique_ptr<LLVMContext>:
// separate context.
for (const auto &IRPath : IRPaths) {
auto Ctx = std::make_unique<LLVMContext>();
- auto M = std::make_unique<LLVMContext>("M", *Ctx);
+ auto M = std::make_unique<Module>("M", *Ctx);
CompileLayer.add(MainJD, ThreadSafeModule(std::move(M), std::move(Ctx)));
}
diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index ff08c9d345d5..5ecee2a480f7 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -119,7 +119,9 @@ on support follow.
``V`` Supported
``Za128rs`` Supported (`See note <#riscv-profiles-extensions-note>`__)
``Za64rs`` Supported (`See note <#riscv-profiles-extensions-note>`__)
+ ``Zaamo`` Assembly Support
``Zacas`` Supported (`See note <#riscv-zacas-note>`__)
+ ``Zalrsc`` Assembly Support
``Zama16b`` Supported (`See note <#riscv-profiles-extensions-note>`__)
``Zawrs`` Assembly Support
``Zba`` Supported
@@ -275,9 +277,6 @@ The primary goal of experimental support is to assist in the process of ratifica
``experimental-ztso``
LLVM implements the `v0.1 proposed specification <https://github.com/riscv/riscv-isa-manual/releases/download/draft-20220723-10eea63/riscv-spec.pdf>`__ (see Chapter 25). The mapping from the C/C++ memory model to Ztso has not yet been ratified in any standards document. There are multiple possible mappings, and they are *not* mutually ABI compatible. The mapping LLVM implements is ABI compatible with the default WMO mapping. This mapping may change and there is *explicitly* no ABI stability offered while the extension remains in experimental status. User beware.
-``experimental-zaamo``, ``experimental-zalrsc``
- LLVM implements the `v0.2 proposed specification <https://github.com/riscv/riscv-zaamo-zalrsc/releases/tag/v0.2>`__.
-
To use an experimental extension from `clang`, you must add `-menable-experimental-extensions` to the command line, and specify the exact version of the experimental extension you are using. To use an experimental extension with LLVM's internal developer tools (e.g. `llc`, `llvm-objdump`, `llvm-mc`), you must prefix the extension name with `experimental-`. Note that you don't need to specify the version with internal tools, and shouldn't include the `experimental-` prefix with `clang`.
Vendor Extensions
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index 84320461fa9e..c7c2c2825f58 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -51,6 +51,7 @@ Changes to the LLVM IR
----------------------
* Added Memory Model Relaxation Annotations (MMRAs).
+* Added ``nusw`` and ``nuw`` flags to ``getelementptr`` instruction.
* Renamed ``llvm.experimental.vector.reverse`` intrinsic to ``llvm.vector.reverse``.
* Renamed ``llvm.experimental.vector.splice`` intrinsic to ``llvm.vector.splice``.
* Renamed ``llvm.experimental.vector.interleave2`` intrinsic to ``llvm.vector.interleave2``.
@@ -62,6 +63,10 @@ Changes to LLVM infrastructure
Changes to building LLVM
------------------------
+- The ``LLVM_ENABLE_TERMINFO`` flag has been removed. LLVM no longer depends on
+ terminfo and now always uses the ``TERM`` environment variable for color
+ support autodetection.
+
Changes to TableGen
-------------------
@@ -129,6 +134,8 @@ Changes to the RISC-V Backend
* llvm-objdump now prints disassembled opcode bytes in groups of 2 or 4 bytes to
match GNU objdump. The bytes within the groups are in big endian order.
* Added smstateen extension to -march. CSR names for smstateen were already supported.
+* Zaamo and Zalrsc are no longer experimental.
+* Processors that enable post reg-alloc scheduling (PostMachineScheduler) by default should use the `UsePostRAScheduler` subtarget feature. Setting `PostRAScheduler = 1` in the scheduler model will have no effect on the enabling of the PostMachineScheduler.
Changes to the WebAssembly Backend
----------------------------------
@@ -139,6 +146,9 @@ Changes to the Windows Target
Changes to the X86 Backend
--------------------------
+- Removed knl/knm specific ISA intrinsics: AVX512PF, AVX512ER, PREFETCHWT1,
+ while assembly encoding/decoding supports are kept.
+
Changes to the OCaml bindings
-----------------------------
@@ -238,6 +248,11 @@ Changes to the LLVM tools
documented in `--help` output and the command guide. (`#90474
<https://github.com/llvm/llvm-project/pull/90474>`)
+* llvm-readobj's LLVM output format for ELF core files has been changed.
+ Similarly, the JSON format has been fixed for this case. The NT_FILE note
+ now has a map for the mapped files. (`#92835
+ <https://github.com/llvm/llvm-project/pull/92835>`).
+
Changes to LLDB
---------------------------------
diff --git a/llvm/docs/SPIRVUsage.rst b/llvm/docs/SPIRVUsage.rst
index d27177a4541a..657b0fb9b672 100644
--- a/llvm/docs/SPIRVUsage.rst
+++ b/llvm/docs/SPIRVUsage.rst
@@ -143,6 +143,8 @@ list of supported SPIR-V extensions, sorted alphabetically by their extension na
- Adds instructions to convert between single-precision 32-bit floating-point values and 16-bit bfloat16 values.
* - ``SPV_INTEL_function_pointers``
- Allows translation of function pointers.
+ * - ``SPV_INTEL_inline_assembly``
+ - Allows to use inline assembly.
* - ``SPV_INTEL_optnone``
- Adds OptNoneINTEL value for Function Control mask that indicates a request to not optimize the function.
* - ``SPV_INTEL_subgroups``
@@ -161,6 +163,8 @@ list of supported SPIR-V extensions, sorted alphabetically by their extension na
- Allows to use the LinkOnceODR linkage type that lets a function or global variable to be merged with other functions or global variables of the same name when linkage occurs.
* - ``SPV_KHR_no_integer_wrap_decoration``
- Adds decorations to indicate that a given instruction does not cause integer wrapping.
+ * - ``SPV_KHR_shader_clock``
+ - Adds the extension cl_khr_kernel_clock that adds the ability for a kernel to sample the value from clocks provided by compute units.
* - ``SPV_KHR_subgroup_rotate``
- Adds a new instruction that enables rotating values across invocations within a subgroup.
* - ``SPV_KHR_uniform_group_instructions``
@@ -333,6 +337,10 @@ SPIR-V backend, along with their descriptions and argument details.
- 32-bit Integer
- `[]`
- Generates an undefined value. Useful for optimizations and indicating uninitialized variables.
+ * - `int_spv_inline_asm`
+ - None
+ - `[Metadata, Metadata, Vararg]`
+ - Associates inline assembly features to inline assembly call instances by creating metadatas and preserving original arguments. Not emitted directly but used to support SPIR-V representation in LLVM IR.
* - `int_spv_assume`
- None
- `[1-bit Integer]`
diff --git a/llvm/examples/ExceptionDemo/ExceptionDemo.cpp b/llvm/examples/ExceptionDemo/ExceptionDemo.cpp
index 0afc6b30d140..fdee76cb9614 100644
--- a/llvm/examples/ExceptionDemo/ExceptionDemo.cpp
+++ b/llvm/examples/ExceptionDemo/ExceptionDemo.cpp
@@ -1865,7 +1865,7 @@ static void createStandardUtilityFunctions(unsigned numTypeInfos,
// llvm.eh.typeid.for intrinsic
- getDeclaration(&module, llvm::Intrinsic::eh_typeid_for);
+ getDeclaration(&module, llvm::Intrinsic::eh_typeid_for, builder.getPtrTy());
}
diff --git a/llvm/examples/Kaleidoscope/CMakeLists.txt b/llvm/examples/Kaleidoscope/CMakeLists.txt
index 3822cdd9e1c4..6ad3b6156647 100644
--- a/llvm/examples/Kaleidoscope/CMakeLists.txt
+++ b/llvm/examples/Kaleidoscope/CMakeLists.txt
@@ -1,5 +1,5 @@
add_custom_target(Kaleidoscope)
-set_target_properties(Kaleidoscope PROPERTIES FOLDER Examples)
+set_target_properties(Kaleidoscope PROPERTIES FOLDER "LLVM/Examples")
macro(add_kaleidoscope_chapter name)
add_dependencies(Kaleidoscope ${name})
diff --git a/llvm/include/llvm/ADT/GenericUniformityImpl.h b/llvm/include/llvm/ADT/GenericUniformityImpl.h
index 6b744384051b..bd09f4fe43e0 100644
--- a/llvm/include/llvm/ADT/GenericUniformityImpl.h
+++ b/llvm/include/llvm/ADT/GenericUniformityImpl.h
@@ -46,14 +46,13 @@
#include "llvm/ADT/GenericUniformityInfo.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include <set>
-
#define DEBUG_TYPE "uniformity"
namespace llvm {
@@ -413,7 +412,7 @@ protected:
const TargetTransformInfo *TTI = nullptr;
// Detected/marked divergent values.
- std::set<ConstValueRefT> DivergentValues;
+ DenseSet<ConstValueRefT> DivergentValues;
SmallPtrSet<const BlockT *, 32> DivergentTermBlocks;
// Internal worklist for divergence propagation.
diff --git a/llvm/include/llvm/Analysis/CFG.h b/llvm/include/llvm/Analysis/CFG.h
index 86b01c13274f..23bc10a4a9d1 100644
--- a/llvm/include/llvm/Analysis/CFG.h
+++ b/llvm/include/llvm/Analysis/CFG.h
@@ -96,6 +96,18 @@ bool isPotentiallyReachableFromMany(
const SmallPtrSetImpl<BasicBlock *> *ExclusionSet,
const DominatorTree *DT = nullptr, const LoopInfo *LI = nullptr);
+/// Determine whether there is a potentially a path from at least one block in
+/// 'Worklist' to at least one block in 'StopSet' within a single function
+/// without passing through any of the blocks in 'ExclusionSet'. Returns false
+/// only if we can prove that once any block in 'Worklist' has been reached then
+/// no blocks in 'StopSet' can be executed without passing through any blocks in
+/// 'ExclusionSet'. Conservatively returns true.
+bool isManyPotentiallyReachableFromMany(
+ SmallVectorImpl<BasicBlock *> &Worklist,
+ const SmallPtrSetImpl<const BasicBlock *> &StopSet,
+ const SmallPtrSetImpl<BasicBlock *> *ExclusionSet,
+ const DominatorTree *DT = nullptr, const LoopInfo *LI = nullptr);
+
/// Return true if the control flow in \p RPOTraversal is irreducible.
///
/// This is a generic implementation to detect CFG irreducibility based on loop
diff --git a/llvm/include/llvm/Analysis/ConstantFolding.h b/llvm/include/llvm/Analysis/ConstantFolding.h
index c54b1e8f01d2..58b38fb8b036 100644
--- a/llvm/include/llvm/Analysis/ConstantFolding.h
+++ b/llvm/include/llvm/Analysis/ConstantFolding.h
@@ -68,9 +68,16 @@ Constant *ConstantFoldConstant(const Constant *C, const DataLayout &DL,
/// fold instructions like loads and stores, which have no constant expression
/// form.
///
+/// In some cases, constant folding may return one value chosen from a set of
+/// multiple legal return values. For example, the exact bit pattern of NaN
+/// results is not guaranteed. Using such a result is usually only valid if
+/// all uses of the original operation are replaced by the constant-folded
+/// result. The \p AllowNonDeterministic parameter controls whether this is
+/// allowed.
Constant *ConstantFoldInstOperands(Instruction *I, ArrayRef<Constant *> Ops,
const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr);
+ const TargetLibraryInfo *TLI = nullptr,
+ bool AllowNonDeterministic = true);
/// Attempt to constant fold a compare instruction (icmp/fcmp) with the
/// specified operands. Returns null or a constant expression of the specified
@@ -95,7 +102,8 @@ Constant *ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
/// Returns null or a constant expression of the specified operands on failure.
Constant *ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
Constant *RHS, const DataLayout &DL,
- const Instruction *I);
+ const Instruction *I,
+ bool AllowNonDeterministic = true);
/// Attempt to flush float point constant according to denormal mode set in the
/// instruction's parent function attributes. If so, return a zero with the
@@ -190,7 +198,8 @@ bool canConstantFoldCallTo(const CallBase *Call, const Function *F);
/// with the specified arguments, returning null if unsuccessful.
Constant *ConstantFoldCall(const CallBase *Call, Function *F,
ArrayRef<Constant *> Operands,
- const TargetLibraryInfo *TLI = nullptr);
+ const TargetLibraryInfo *TLI = nullptr,
+ bool AllowNonDeterministic = true);
Constant *ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS,
Constant *RHS, Type *Ty,
diff --git a/llvm/include/llvm/Analysis/InstSimplifyFolder.h b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
index 8a3269d6add0..98c7c291fea1 100644
--- a/llvm/include/llvm/Analysis/InstSimplifyFolder.h
+++ b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
@@ -72,8 +72,8 @@ public:
return simplifyUnOp(Opc, V, FMF, SQ);
}
- Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
- return simplifyICmpInst(P, LHS, RHS, SQ);
+ Value *FoldCmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ return simplifyCmpInst(P, LHS, RHS, SQ);
}
Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
@@ -139,15 +139,6 @@ public:
return C; // avoid calling Fold
return ConstFolder.CreatePointerBitCastOrAddrSpaceCast(C, DestTy);
}
-
- //===--------------------------------------------------------------------===//
- // Compare Instructions
- //===--------------------------------------------------------------------===//
-
- Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const override {
- return ConstFolder.CreateFCmp(P, LHS, RHS);
- }
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Analysis/TargetFolder.h b/llvm/include/llvm/Analysis/TargetFolder.h
index b4105ad76c02..f95d738a4065 100644
--- a/llvm/include/llvm/Analysis/TargetFolder.h
+++ b/llvm/include/llvm/Analysis/TargetFolder.h
@@ -99,7 +99,7 @@ public:
return FoldBinOp(Opc, LHS, RHS);
}
- Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ Value *FoldCmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
if (LC && RC)
@@ -216,15 +216,6 @@ public:
return C; // avoid calling Fold
return Fold(ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy));
}
-
- //===--------------------------------------------------------------------===//
- // Compare Instructions
- //===--------------------------------------------------------------------===//
-
- Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const override {
- return Fold(ConstantExpr::getCompare(P, LHS, RHS));
- }
};
}
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 0c3a6b3742c7..cefce93f9e25 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1397,7 +1397,7 @@ public:
InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
int VF,
const APInt &DemandedDstElts,
- TTI::TargetCostKind CostKind);
+ TTI::TargetCostKind CostKind) const;
/// \return The cost of Load and Store instructions.
InstructionCost
diff --git a/llvm/include/llvm/Analysis/VecFuncs.def b/llvm/include/llvm/Analysis/VecFuncs.def
index 10f1333cf888..e12eb7095b90 100644
--- a/llvm/include/llvm/Analysis/VecFuncs.def
+++ b/llvm/include/llvm/Analysis/VecFuncs.def
@@ -12,11 +12,6 @@
// This .def file also allows creating an array of vector functions supported in
// the specified framework or library.
-#if defined(TLI_DEFINE_MASSV_VECFUNCS_NAMES)
-#define TLI_DEFINE_MASSV_VECFUNCS
-#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, VABI_PREFIX) VEC,
-#endif
-
#define FIXED(NL) ElementCount::getFixed(NL)
#define SCALABLE(NL) ElementCount::getScalable(NL)
#define NOMASK false
@@ -1276,14 +1271,3 @@ TLI_DEFINE_VECFUNC("cbrtf", "amd_vrs4_cbrtf", FIXED(4), NOMASK, "_ZGV_LLVM_N4v")
#undef FIXED
#undef TLI_DEFINE_VECFUNC
-#undef TLI_DEFINE_ACCELERATE_VECFUNCS
-#undef TLI_DEFINE_DARWIN_LIBSYSTEM_M_VECFUNCS
-#undef TLI_DEFINE_LIBMVEC_X86_VECFUNCS
-#undef TLI_DEFINE_MASSV_VECFUNCS
-#undef TLI_DEFINE_SVML_VECFUNCS
-#undef TLI_DEFINE_SLEEFGNUABI_VF2_VECFUNCS
-#undef TLI_DEFINE_SLEEFGNUABI_VF4_VECFUNCS
-#undef TLI_DEFINE_SLEEFGNUABI_SCALABLE_VECFUNCS
-#undef TLI_DEFINE_MASSV_VECFUNCS_NAMES
-#undef TLI_DEFINE_ARMPL_VECFUNCS
-#undef TLI_DEFINE_AMDLIBM_VECFUNCS
diff --git a/llvm/include/llvm/AsmParser/LLToken.h b/llvm/include/llvm/AsmParser/LLToken.h
index 0cbcdcd9ffac..df61ec6ed30e 100644
--- a/llvm/include/llvm/AsmParser/LLToken.h
+++ b/llvm/include/llvm/AsmParser/LLToken.h
@@ -109,6 +109,7 @@ enum Kind {
kw_fast,
kw_nuw,
kw_nsw,
+ kw_nusw,
kw_exact,
kw_disjoint,
kw_inbounds,
diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h
index 85c85067e5dc..aa69f94f6505 100644
--- a/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/llvm/include/llvm/BinaryFormat/ELF.h
@@ -796,11 +796,13 @@ enum : unsigned {
EF_AMDGPU_MACH_AMDGCN_GFX10_3_GENERIC = 0x053,
EF_AMDGPU_MACH_AMDGCN_GFX11_GENERIC = 0x054,
EF_AMDGPU_MACH_AMDGCN_RESERVED_0X55 = 0x055,
+ EF_AMDGPU_MACH_AMDGCN_RESERVED_0X56 = 0x056,
+ EF_AMDGPU_MACH_AMDGCN_RESERVED_0X57 = 0x057,
// clang-format on
// First/last AMDGCN-based processors.
EF_AMDGPU_MACH_AMDGCN_FIRST = EF_AMDGPU_MACH_AMDGCN_GFX600,
- EF_AMDGPU_MACH_AMDGCN_LAST = EF_AMDGPU_MACH_AMDGCN_GFX11_GENERIC,
+ EF_AMDGPU_MACH_AMDGCN_LAST = EF_AMDGPU_MACH_AMDGCN_RESERVED_0X57,
// Indicates if the "xnack" target feature is enabled for all code contained
// in the object.
@@ -1078,7 +1080,7 @@ enum : unsigned {
SHT_SYMTAB_SHNDX = 18, // Indices for SHN_XINDEX entries.
// Experimental support for SHT_RELR sections. For details, see proposal
// at https://groups.google.com/forum/#!topic/generic-abi/bX460iggiKg
- SHT_RELR = 19, // Relocation entries; only offsets.
+ SHT_RELR = 19, // Relocation entries; only offsets.
// TODO: Experimental CREL relocations. LLVM will change the value and
// break compatibility in the future.
SHT_CREL = 0x40000014,
diff --git a/llvm/include/llvm/Bitcode/BitcodeWriter.h b/llvm/include/llvm/Bitcode/BitcodeWriter.h
index 248d33f4502e..a343f0e05763 100644
--- a/llvm/include/llvm/Bitcode/BitcodeWriter.h
+++ b/llvm/include/llvm/Bitcode/BitcodeWriter.h
@@ -102,7 +102,8 @@ class raw_ostream;
void writeIndex(
const ModuleSummaryIndex *Index,
- const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex);
+ const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex,
+ const GVSummaryPtrSet *DecSummaries);
};
/// Write the specified module to the specified raw output stream.
@@ -147,10 +148,12 @@ class raw_ostream;
/// where it will be written in a new bitcode block. This is used when
/// writing the combined index file for ThinLTO. When writing a subset of the
/// index for a distributed backend, provide the \p ModuleToSummariesForIndex
- /// map.
+ /// map. \p DecSummaries specifies the set of summaries for which the
+ /// corresponding value should be imported as a declaration (prototype).
void writeIndexToFile(const ModuleSummaryIndex &Index, raw_ostream &Out,
const std::map<std::string, GVSummaryMapTy>
- *ModuleToSummariesForIndex = nullptr);
+ *ModuleToSummariesForIndex = nullptr,
+ const GVSummaryPtrSet *DecSummaries = nullptr);
/// If EmbedBitcode is set, save a copy of the llvm IR as data in the
/// __LLVM,__bitcode section (.llvmbc on non-MacOS).
diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index 909eb833c601..d3b9e96520f8 100644
--- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -385,7 +385,7 @@ enum ConstantsCodes {
CST_CODE_CSTRING = 9, // CSTRING: [values]
CST_CODE_CE_BINOP = 10, // CE_BINOP: [opcode, opval, opval]
CST_CODE_CE_CAST = 11, // CE_CAST: [opcode, opty, opval]
- CST_CODE_CE_GEP = 12, // CE_GEP: [n x operands]
+ CST_CODE_CE_GEP_OLD = 12, // CE_GEP: [n x operands]
CST_CODE_CE_SELECT = 13, // CE_SELECT: [opval, opval, opval]
CST_CODE_CE_EXTRACTELT = 14, // CE_EXTRACTELT: [opty, opval, opval]
CST_CODE_CE_INSERTELT = 15, // CE_INSERTELT: [opval, opval, opval]
@@ -412,6 +412,7 @@ enum ConstantsCodes {
// asmdialect|unwind,
// asmstr,conststr]
CST_CODE_CE_GEP_WITH_INRANGE = 31, // [opty, flags, range, n x operands]
+ CST_CODE_CE_GEP = 32, // [opty, flags, n x operands]
};
/// CastOpcodes - These are values used in the bitcode files to encode which
@@ -524,6 +525,14 @@ enum PossiblyExactOperatorOptionalFlags { PEO_EXACT = 0 };
/// PossiblyDisjointInst's SubclassOptionalData contents.
enum PossiblyDisjointInstOptionalFlags { PDI_DISJOINT = 0 };
+/// GetElementPtrOptionalFlags - Flags for serializing
+/// GEPOperator's SubclassOptionalData contents.
+enum GetElementPtrOptionalFlags {
+ GEP_INBOUNDS = 0,
+ GEP_NUSW = 1,
+ GEP_NUW = 2,
+};
+
/// Encoded AtomicOrdering values.
enum AtomicOrderingCodes {
ORDERING_NOTATOMIC = 0,
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index a9a33c7617d7..2111e82e1a99 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -869,6 +869,9 @@ public:
/// Combine insert vector element OOB.
bool matchInsertVectorElementOOB(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI,
+ BuildFnTy &MatchInfo);
+
private:
/// Checks for legality of an indexed variant of \p LdSt.
bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
index 371c5c5a0a1e..cc2dd2f4e489 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
@@ -212,6 +212,10 @@ enum {
/// - InsnID(ULEB128) - Instruction ID
GIM_CheckHasNoUse,
+ /// Check if there's one use of the first result.
+ /// - InsnID(ULEB128) - Instruction ID
+ GIM_CheckHasOneUse,
+
/// Check the type for the specified operand
/// - InsnID(ULEB128) - Instruction ID
/// - OpIdx(ULEB128) - Operand index
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
index 2ea9d11779f0..05f1a7e57e56 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
@@ -468,7 +468,24 @@ bool GIMatchTableExecutor::executeMatchTable(
if (handleReject() == RejectAndGiveUp)
return false;
}
+ break;
+ }
+ case GIM_CheckHasOneUse: {
+ uint64_t InsnID = readULEB();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckHasOneUse(MIs["
+ << InsnID << "]\n");
+
+ const MachineInstr *MI = State.MIs[InsnID];
+ assert(MI && "Used insn before defined");
+ assert(MI->getNumDefs() > 0 && "No defs");
+ const Register Res = MI->getOperand(0).getReg();
+ if (!MRI.hasOneNonDBGUse(Res)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
break;
}
case GIM_CheckAtomicOrdering: {
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
index 2a3145b635e6..2b3efc3b609f 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
@@ -34,6 +34,17 @@ public:
static bool classof(const MachineInstr *MI) {
return isPreISelGenericOpcode(MI->getOpcode());
}
+
+ bool hasPoisonGeneratingFlags() const {
+ return getFlags() & (NoUWrap | NoSWrap | IsExact | Disjoint | NonNeg |
+ FmNoNans | FmNoInfs);
+ }
+
+ void dropPoisonGeneratingFlags() {
+ clearFlags(NoUWrap | NoSWrap | IsExact | Disjoint | NonNeg | FmNoNans |
+ FmNoInfs);
+ assert(!hasPoisonGeneratingFlags());
+ }
};
/// Provides common memory operand functionality.
diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index 2b0c5d166d88..db48a0ae5514 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -416,6 +416,12 @@ public:
Flags &= ~((uint32_t)Flag);
}
+ void clearFlags(unsigned flags) {
+ assert(isUInt<LLVM_MI_FLAGS_BITS>(flags) &&
+ "flags to be cleared are out of range for the Flags field");
+ Flags &= ~flags;
+ }
+
/// Return true if MI is in a bundle (but not the first MI in a bundle).
///
/// A bundle looks like this before it's finalized:
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 979ef8033eb5..96a627069046 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -469,6 +469,7 @@ public:
MachineFunction &getMachineFunction() const { return *MF; }
const Pass *getPass() const { return SDAGISelPass; }
+ CodeGenOptLevel getOptLevel() const { return OptLevel; }
const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
const TargetMachine &getTarget() const { return TM; }
const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
@@ -990,6 +991,11 @@ public:
/// value assuming it was the smaller SrcTy value.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
+ /// Return the expression required to zero extend the Op
+ /// value assuming it was the smaller SrcTy value.
+ SDValue getVPZeroExtendInReg(SDValue Op, SDValue Mask, SDValue EVL,
+ const SDLoc &DL, EVT VT);
+
/// Convert Op, which must be of integer type, to the integer type VT, by
/// either truncating it or performing either zero or sign extension as
/// appropriate extension for the pointer's semantics.
diff --git a/llvm/include/llvm/CodeGen/ValueTypes.h b/llvm/include/llvm/CodeGen/ValueTypes.h
index b66c66d1bfc4..dab6c421bf6e 100644
--- a/llvm/include/llvm/CodeGen/ValueTypes.h
+++ b/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -488,8 +488,10 @@ namespace llvm {
Type *getTypeForEVT(LLVMContext &Context) const;
/// Return the value type corresponding to the specified type.
- /// This returns all pointers as iPTR. If HandleUnknown is true, unknown
- /// types are returned as Other, otherwise they are invalid.
+ /// If HandleUnknown is true, unknown types are returned as Other,
+ /// otherwise they are invalid.
+ /// NB: This includes pointer types, which require a DataLayout to convert
+ /// to a concrete value type.
static EVT getEVT(Type *Ty, bool HandleUnknown = false);
intptr_t getRawBits() const {
diff --git a/llvm/include/llvm/CodeGen/ValueTypes.td b/llvm/include/llvm/CodeGen/ValueTypes.td
index 900b30d9b024..c3e378ed8f6e 100644
--- a/llvm/include/llvm/CodeGen/ValueTypes.td
+++ b/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -296,18 +296,23 @@ def MetadataVT : ValueType<0, 249> { // Metadata
def iPTRAny : VTAny<250>;
// Pseudo valuetype to represent "vector of any size"
+// Should only be used in TableGen.
def vAny : VTAny<251>;
// Pseudo valuetype to represent "float of any format"
+// Should only be used in TableGen.
def fAny : VTAny<252>;
// Pseudo valuetype to represent "integer of any bit width"
+// Should only be used in TableGen.
def iAny : VTAny<253>;
// Pseudo valuetype mapped to the current pointer size.
+// Should only be used in TableGen.
def iPTR : ValueType<0, 254>;
// Pseudo valuetype to represent "any type of any size".
+// Should only be used in TableGen.
def Any : VTAny<255>;
} // end defset ValueTypes
diff --git a/llvm/include/llvm/CodeGenTypes/MachineValueType.h b/llvm/include/llvm/CodeGenTypes/MachineValueType.h
index 9aceb9896021..3b2a9b535c09 100644
--- a/llvm/include/llvm/CodeGenTypes/MachineValueType.h
+++ b/llvm/include/llvm/CodeGenTypes/MachineValueType.h
@@ -476,9 +476,11 @@ namespace llvm {
return getVectorVT(VT, EC.getKnownMinValue());
}
- /// Return the value type corresponding to the specified type. This returns
- /// all pointers as iPTR. If HandleUnknown is true, unknown types are
- /// returned as Other, otherwise they are invalid.
+ /// Return the value type corresponding to the specified type.
+ /// If HandleUnknown is true, unknown types are returned as Other,
+ /// otherwise they are invalid.
+ /// NB: This includes pointer types, which require a DataLayout to convert
+ /// to a concrete value type.
static MVT getVT(Type *Ty, bool HandleUnknown = false);
public:
diff --git a/llvm/include/llvm/Config/config.h.cmake b/llvm/include/llvm/Config/config.h.cmake
index 977c182e9d2b..ff30741c8f36 100644
--- a/llvm/include/llvm/Config/config.h.cmake
+++ b/llvm/include/llvm/Config/config.h.cmake
@@ -209,9 +209,6 @@
/* Define to 1 if you have the <sys/types.h> header file. */
#cmakedefine HAVE_SYS_TYPES_H ${HAVE_SYS_TYPES_H}
-/* Define if the setupterm() function is supported this platform. */
-#cmakedefine LLVM_ENABLE_TERMINFO ${LLVM_ENABLE_TERMINFO}
-
/* Define to 1 if you have the <termios.h> header file. */
#cmakedefine HAVE_TERMIOS_H ${HAVE_TERMIOS_H}
diff --git a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
index 13a37265762a..5a3f8c605959 100644
--- a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
+++ b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
@@ -19,7 +19,7 @@
// - EmptyTrait: the class has no data members.
// - WrapperTrait: the class has a single member `v`
// - TupleTrait: the class has a tuple member `t`
-// - UnionTrait the class has a varuant member `u`
+// - UnionTrait the class has a variant member `u`
// - IncompleteTrait: the class is a placeholder class that is currently empty,
// but will be completed at a later time.
// Note: This structure follows the one used in flang parser.
diff --git a/llvm/include/llvm/IR/ConstantFolder.h b/llvm/include/llvm/IR/ConstantFolder.h
index 3e74a563a584..ce4b44ddc855 100644
--- a/llvm/include/llvm/IR/ConstantFolder.h
+++ b/llvm/include/llvm/IR/ConstantFolder.h
@@ -95,7 +95,7 @@ public:
return nullptr;
}
- Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ Value *FoldCmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
if (LC && RC)
@@ -201,15 +201,6 @@ public:
Type *DestTy) const override {
return ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy);
}
-
- //===--------------------------------------------------------------------===//
- // Compare Instructions
- //===--------------------------------------------------------------------===//
-
- Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const override {
- return ConstantExpr::getCompare(P, LHS, RHS);
- }
};
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/ConstantRange.h b/llvm/include/llvm/IR/ConstantRange.h
index e718e6e7e340..a5e2f809ab41 100644
--- a/llvm/include/llvm/IR/ConstantRange.h
+++ b/llvm/include/llvm/IR/ConstantRange.h
@@ -419,6 +419,15 @@ public:
/// treating both this and \p Other as unsigned ranges.
ConstantRange multiply(const ConstantRange &Other) const;
+ /// Return a new range representing the possible values resulting
+ /// from a multiplication with wrap type \p NoWrapKind of a value in this
+ /// range and a value in \p Other.
+ /// If the result range is disjoint, the preferred range is determined by the
+ /// \p PreferredRangeType.
+ ConstantRange
+ multiplyWithNoWrap(const ConstantRange &Other, unsigned NoWrapKind,
+ PreferredRangeType RangeType = Smallest) const;
+
/// Return range of possible values for a signed multiplication of this and
/// \p Other. However, if overflow is possible always return a full range
/// rather than trying to determine a more precise result.
diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h
index 9ec81903f09c..a1e5005a9d1d 100644
--- a/llvm/include/llvm/IR/Constants.h
+++ b/llvm/include/llvm/IR/Constants.h
@@ -28,6 +28,7 @@
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GEPNoWrapFlags.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/User.h"
@@ -1198,26 +1199,27 @@ public:
/// \param OnlyIfReducedTy see \a getWithOperands() docs.
static Constant *
getGetElementPtr(Type *Ty, Constant *C, ArrayRef<Constant *> IdxList,
- bool InBounds = false,
+ GEPNoWrapFlags NW = GEPNoWrapFlags::none(),
std::optional<ConstantRange> InRange = std::nullopt,
Type *OnlyIfReducedTy = nullptr) {
return getGetElementPtr(
- Ty, C, ArrayRef((Value *const *)IdxList.data(), IdxList.size()),
- InBounds, InRange, OnlyIfReducedTy);
+ Ty, C, ArrayRef((Value *const *)IdxList.data(), IdxList.size()), NW,
+ InRange, OnlyIfReducedTy);
}
static Constant *
- getGetElementPtr(Type *Ty, Constant *C, Constant *Idx, bool InBounds = false,
+ getGetElementPtr(Type *Ty, Constant *C, Constant *Idx,
+ GEPNoWrapFlags NW = GEPNoWrapFlags::none(),
std::optional<ConstantRange> InRange = std::nullopt,
Type *OnlyIfReducedTy = nullptr) {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
// ArrayRef<Value *>.
- return getGetElementPtr(Ty, C, cast<Value>(Idx), InBounds, InRange,
+ return getGetElementPtr(Ty, C, cast<Value>(Idx), NW, InRange,
OnlyIfReducedTy);
}
static Constant *
getGetElementPtr(Type *Ty, Constant *C, ArrayRef<Value *> IdxList,
- bool InBounds = false,
+ GEPNoWrapFlags NW = GEPNoWrapFlags::none(),
std::optional<ConstantRange> InRange = std::nullopt,
Type *OnlyIfReducedTy = nullptr);
@@ -1225,18 +1227,18 @@ public:
/// "inbounds" flag in LangRef.html for details.
static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Constant *> IdxList) {
- return getGetElementPtr(Ty, C, IdxList, true);
+ return getGetElementPtr(Ty, C, IdxList, GEPNoWrapFlags::inBounds());
}
static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
Constant *Idx) {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
// ArrayRef<Value *>.
- return getGetElementPtr(Ty, C, Idx, true);
+ return getGetElementPtr(Ty, C, Idx, GEPNoWrapFlags::inBounds());
}
static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Value *> IdxList) {
- return getGetElementPtr(Ty, C, IdxList, true);
+ return getGetElementPtr(Ty, C, IdxList, GEPNoWrapFlags::inBounds());
}
static Constant *getExtractElement(Constant *Vec, Constant *Idx,
diff --git a/llvm/include/llvm/IR/GEPNoWrapFlags.h b/llvm/include/llvm/IR/GEPNoWrapFlags.h
new file mode 100644
index 000000000000..feaccc878de0
--- /dev/null
+++ b/llvm/include/llvm/IR/GEPNoWrapFlags.h
@@ -0,0 +1,93 @@
+//===-- llvm/GEPNoWrapFlags.h - NoWrap flags for GEPs -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the nowrap flags for getelementptr operators.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GEPNOWRAPFLAGS_H
+#define LLVM_IR_GEPNOWRAPFLAGS_H
+
+namespace llvm {
+
+/// Represents flags for the getelementptr instruction/expression.
+/// The following flags are supported:
+/// * inbounds (implies nusw)
+/// * nusw (no unsigned signed wrap)
+/// * nuw (no unsigned wrap)
+/// See LangRef for a description of their semantics.
+class GEPNoWrapFlags {
+ enum : unsigned {
+ InBoundsFlag = (1 << 0),
+ NUSWFlag = (1 << 1),
+ NUWFlag = (1 << 2),
+ };
+
+ unsigned Flags;
+ GEPNoWrapFlags(unsigned Flags) : Flags(Flags) {
+ assert((!isInBounds() || hasNoUnsignedSignedWrap()) &&
+ "inbounds implies nusw");
+ }
+
+public:
+ GEPNoWrapFlags() : Flags(0) {}
+ // For historical reasons, interpret plain boolean as InBounds.
+ // TODO: Migrate users to pass explicit GEPNoWrapFlags and remove this ctor.
+ GEPNoWrapFlags(bool IsInBounds)
+ : Flags(IsInBounds ? (InBoundsFlag | NUSWFlag) : 0) {}
+
+ static GEPNoWrapFlags none() { return GEPNoWrapFlags(); }
+ static GEPNoWrapFlags inBounds() {
+ return GEPNoWrapFlags(InBoundsFlag | NUSWFlag);
+ }
+ static GEPNoWrapFlags noUnsignedSignedWrap() {
+ return GEPNoWrapFlags(NUSWFlag);
+ }
+ static GEPNoWrapFlags noUnsignedWrap() { return GEPNoWrapFlags(NUWFlag); }
+
+ static GEPNoWrapFlags fromRaw(unsigned Flags) {
+ return GEPNoWrapFlags(Flags);
+ }
+ unsigned getRaw() const { return Flags; }
+
+ bool isInBounds() const { return Flags & InBoundsFlag; }
+ bool hasNoUnsignedSignedWrap() const { return Flags & NUSWFlag; }
+ bool hasNoUnsignedWrap() const { return Flags & NUWFlag; }
+
+ GEPNoWrapFlags withoutInBounds() const {
+ return GEPNoWrapFlags(Flags & ~InBoundsFlag);
+ }
+ GEPNoWrapFlags withoutNoUnsignedSignedWrap() const {
+ return GEPNoWrapFlags(Flags & ~(InBoundsFlag | NUSWFlag));
+ }
+ GEPNoWrapFlags withoutNoUnsignedWrap() const {
+ return GEPNoWrapFlags(Flags & ~NUWFlag);
+ }
+
+ bool operator==(GEPNoWrapFlags Other) const { return Flags == Other.Flags; }
+ bool operator!=(GEPNoWrapFlags Other) const { return !(*this == Other); }
+
+ GEPNoWrapFlags operator&(GEPNoWrapFlags Other) const {
+ return GEPNoWrapFlags(Flags & Other.Flags);
+ }
+ GEPNoWrapFlags operator|(GEPNoWrapFlags Other) const {
+ return GEPNoWrapFlags(Flags | Other.Flags);
+ }
+ GEPNoWrapFlags &operator&=(GEPNoWrapFlags Other) {
+ Flags &= Other.Flags;
+ return *this;
+ }
+ GEPNoWrapFlags &operator|=(GEPNoWrapFlags Other) {
+ Flags |= Other.Flags;
+ return *this;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_GEPNOWRAPFLAGS_H
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index b6534a1962a2..40a9cf507248 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -455,7 +455,7 @@ public:
/// block.
GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
unsigned AddressSpace = 0,
- Module *M = nullptr);
+ Module *M = nullptr, bool AddNull = true);
/// Get a constant value representing either true or false.
ConstantInt *getInt1(bool V) {
@@ -1992,8 +1992,9 @@ public:
/// block.
Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
unsigned AddressSpace = 0,
- Module *M = nullptr) {
- GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
+ Module *M = nullptr, bool AddNull = true) {
+ GlobalVariable *GV =
+ CreateGlobalString(Str, Name, AddressSpace, M, AddNull);
Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
Constant *Indices[] = {Zero, Zero};
return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
@@ -2350,7 +2351,7 @@ public:
Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
const Twine &Name = "") {
- if (auto *V = Folder.FoldICmp(P, LHS, RHS))
+ if (auto *V = Folder.FoldCmp(P, LHS, RHS))
return V;
return Insert(new ICmpInst(P, LHS, RHS), Name);
}
diff --git a/llvm/include/llvm/IR/IRBuilderFolder.h b/llvm/include/llvm/IR/IRBuilderFolder.h
index 3020f2684ee4..f474c3a0206b 100644
--- a/llvm/include/llvm/IR/IRBuilderFolder.h
+++ b/llvm/include/llvm/IR/IRBuilderFolder.h
@@ -48,8 +48,8 @@ public:
virtual Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
FastMathFlags FMF) const = 0;
- virtual Value *FoldICmp(CmpInst::Predicate P, Value *LHS,
- Value *RHS) const = 0;
+ virtual Value *FoldCmp(CmpInst::Predicate P, Value *LHS,
+ Value *RHS) const = 0;
virtual Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
bool IsInBounds = false) const = 0;
@@ -84,13 +84,6 @@ public:
virtual Value *CreatePointerCast(Constant *C, Type *DestTy) const = 0;
virtual Value *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
Type *DestTy) const = 0;
-
- //===--------------------------------------------------------------------===//
- // Compare Instructions
- //===--------------------------------------------------------------------===//
-
- virtual Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const = 0;
};
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 0f7b215b80fd..8d7c2b0c957d 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -26,6 +26,7 @@
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GEPNoWrapFlags.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/OperandTraits.h"
@@ -1167,13 +1168,26 @@ public:
/// a constant offset between them.
bool hasAllConstantIndices() const;
+ /// Set nowrap flags for GEP instruction.
+ void setNoWrapFlags(GEPNoWrapFlags NW);
+
/// Set or clear the inbounds flag on this GEP instruction.
/// See LangRef.html for the meaning of inbounds on a getelementptr.
+ /// TODO: Remove this method in favor of setNoWrapFlags().
void setIsInBounds(bool b = true);
+ /// Get the nowrap flags for the GEP instruction.
+ GEPNoWrapFlags getNoWrapFlags() const;
+
/// Determine whether the GEP has the inbounds flag.
bool isInBounds() const;
+ /// Determine whether the GEP has the nusw flag.
+ bool hasNoUnsignedSignedWrap() const;
+
+ /// Determine whether the GEP has the nuw flag.
+ bool hasNoUnsignedWrap() const;
+
/// Accumulate the constant address offset of this GEP if possible.
///
/// This routine accepts an APInt into which it will accumulate the constant
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 78f0dbec863e..3019f68083d4 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1371,7 +1371,7 @@ let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
// The result of eh.typeid.for depends on the enclosing function, but inside a
// given function it is 'const' and may be CSE'd etc.
-def int_eh_typeid_for : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;
+def int_eh_typeid_for : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty], [IntrNoMem]>;
def int_eh_return_i32 : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty]>;
def int_eh_return_i64 : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty]>;
@@ -1730,7 +1730,7 @@ def int_coro_subfn_addr : DefaultAttrsIntrinsic<
///===-------------------------- Other Intrinsics --------------------------===//
//
-// TODO: We should introduce a new memory kind fo traps (and other side effects
+// TODO: We should introduce a new memory kind fo traps (and other side effects
// we only model to keep things alive).
def int_trap : Intrinsic<[], [], [IntrNoReturn, IntrCold, IntrInaccessibleMemOnly,
IntrWriteMem]>, ClangBuiltin<"__builtin_trap">;
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index be8048ca2459..d4a8954a4cda 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -2466,25 +2466,27 @@ def int_amdgcn_perm :
// GFX9 Intrinsics
//===----------------------------------------------------------------------===//
-class AMDGPUGlobalLoadLDS : Intrinsic <
- [],
- [LLVMQualPointerType<1>, // Base global pointer to load from
- LLVMQualPointerType<3>, // LDS base pointer to store to
- llvm_i32_ty, // Data byte size: 1/2/4
- llvm_i32_ty, // imm offset (applied to both global and LDS address)
- llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc/sc0,
- // bit 1 = slc/sc1,
- // bit 2 = dlc on gfx10/gfx11))
- // bit 4 = scc/nt on gfx90a+))
- // gfx12+:
- // cachepolicy (bits [0-2] = th,
- // bits [3-4] = scope)
- // swizzled buffer (bit 6 = swz),
- [IntrWillReturn, NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
- ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, IntrNoCallback, IntrNoFree],
- "", [SDNPMemOperand]>;
+class AMDGPUGlobalLoadLDS :
+ ClangBuiltin<"__builtin_amdgcn_global_load_lds">,
+ Intrinsic <
+ [],
+ [LLVMQualPointerType<1>, // Base global pointer to load from
+ LLVMQualPointerType<3>, // LDS base pointer to store to
+ llvm_i32_ty, // Data byte size: 1/2/4
+ llvm_i32_ty, // imm offset (applied to both global and LDS address)
+ llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = sc0,
+ // bit 1 = sc1,
+ // bit 4 = scc))
+ [IntrWillReturn, NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
+ ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, IntrNoCallback, IntrNoFree],
+ "", [SDNPMemOperand]>;
def int_amdgcn_global_load_lds : AMDGPUGlobalLoadLDS;
+// Use read/write of inaccessible memory to model the fact that this reads a
+// volatile value.
+def int_amdgcn_pops_exiting_wave_id :
+ DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrInaccessibleMemOnly]>;
+
//===----------------------------------------------------------------------===//
// GFX10 Intrinsics
//===----------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/IR/IntrinsicsSPIRV.td b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
index cc84decc4340..90f12674d047 100644
--- a/llvm/include/llvm/IR/IntrinsicsSPIRV.td
+++ b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
@@ -36,6 +36,7 @@ let TargetPrefix = "spv" in {
def int_spv_alloca : Intrinsic<[llvm_any_ty], []>;
def int_spv_alloca_array : Intrinsic<[llvm_any_ty], [llvm_anyint_ty]>;
def int_spv_undef : Intrinsic<[llvm_i32_ty], []>;
+ def int_spv_inline_asm : Intrinsic<[], [llvm_metadata_ty, llvm_metadata_ty, llvm_vararg_ty]>;
// Expect, Assume Intrinsics
def int_spv_assume : Intrinsic<[], [llvm_i1_ty]>;
diff --git a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
index 572d334ac955..237f268784bb 100644
--- a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
+++ b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -337,6 +337,14 @@ def int_wasm_storef16_f32:
[llvm_float_ty, llvm_ptr_ty],
[IntrWriteMem, IntrArgMemOnly],
"", [SDNPMemOperand]>;
+def int_wasm_splat_f16x8:
+ DefaultAttrsIntrinsic<[llvm_v8f16_ty],
+ [llvm_float_ty],
+ [IntrNoMem, IntrSpeculatable]>;
+def int_wasm_extract_lane_f16x8:
+ DefaultAttrsIntrinsic<[llvm_float_ty],
+ [llvm_v8f16_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/IR/IntrinsicsX86.td b/llvm/include/llvm/IR/IntrinsicsX86.td
index fdc2b0fb7f80..aee804047e1b 100644
--- a/llvm/include/llvm/IR/IntrinsicsX86.td
+++ b/llvm/include/llvm/IR/IntrinsicsX86.td
@@ -3843,58 +3843,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
DefaultAttrsIntrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
[IntrNoMem]>;
-
- def int_x86_avx512_rcp28_ps : ClangBuiltin<"__builtin_ia32_rcp28ps_mask">,
- DefaultAttrsIntrinsic<[llvm_v16f32_ty],
- [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
- llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<3>>]>;
- def int_x86_avx512_rcp28_pd : ClangBuiltin<"__builtin_ia32_rcp28pd_mask">,
- DefaultAttrsIntrinsic<[llvm_v8f64_ty],
- [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
- llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<3>>]>;
- def int_x86_avx512_exp2_ps : ClangBuiltin<"__builtin_ia32_exp2ps_mask">,
- DefaultAttrsIntrinsic<[llvm_v16f32_ty],
- [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
- llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<3>>]>;
- def int_x86_avx512_exp2_pd : ClangBuiltin<"__builtin_ia32_exp2pd_mask">,
- DefaultAttrsIntrinsic<[llvm_v8f64_ty],
- [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
- llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<3>>]>;
-
- def int_x86_avx512_rcp28_ss : ClangBuiltin<"__builtin_ia32_rcp28ss_round_mask">,
- DefaultAttrsIntrinsic<[llvm_v4f32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<4>>]>;
- def int_x86_avx512_rcp28_sd : ClangBuiltin<"__builtin_ia32_rcp28sd_round_mask">,
- DefaultAttrsIntrinsic<[llvm_v2f64_ty],
- [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<4>>]>;
- def int_x86_avx512_rsqrt28_ps : ClangBuiltin<"__builtin_ia32_rsqrt28ps_mask">,
- DefaultAttrsIntrinsic<[llvm_v16f32_ty],
- [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
- llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<3>>]>;
- def int_x86_avx512_rsqrt28_pd : ClangBuiltin<"__builtin_ia32_rsqrt28pd_mask">,
- DefaultAttrsIntrinsic<[llvm_v8f64_ty],
- [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
- llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<3>>]>;
- def int_x86_avx512_rsqrt28_ss : ClangBuiltin<"__builtin_ia32_rsqrt28ss_round_mask">,
- DefaultAttrsIntrinsic<[llvm_v4f32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
- llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<4>>]>;
- def int_x86_avx512_rsqrt28_sd : ClangBuiltin<"__builtin_ia32_rsqrt28sd_round_mask">,
- DefaultAttrsIntrinsic<[llvm_v2f64_ty],
- [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
- llvm_i8_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_psad_bw_512 : ClangBuiltin<"__builtin_ia32_psadbw512">,
DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
[IntrNoMem, Commutative]>;
@@ -4177,38 +4125,6 @@ let TargetPrefix = "x86" in {
Intrinsic<[],
[llvm_ptr_ty, llvm_i8_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty],
[ImmArg<ArgIndex<4>>]>;
-
- // gather prefetch
- // NOTE: These can't be ArgMemOnly because you can put the address completely
- // in the index register.
- def int_x86_avx512_gatherpf_dpd_512 : ClangBuiltin<"__builtin_ia32_gatherpfdpd">,
- Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
- def int_x86_avx512_gatherpf_dps_512 : ClangBuiltin<"__builtin_ia32_gatherpfdps">,
- Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
- def int_x86_avx512_gatherpf_qpd_512 : ClangBuiltin<"__builtin_ia32_gatherpfqpd">,
- Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
- def int_x86_avx512_gatherpf_qps_512 : ClangBuiltin<"__builtin_ia32_gatherpfqps">,
- Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
-
- // scatter prefetch
- // NOTE: These can't be ArgMemOnly because you can put the address completely
- // in the index register.
- def int_x86_avx512_scatterpf_dpd_512 : ClangBuiltin<"__builtin_ia32_scatterpfdpd">,
- Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
- def int_x86_avx512_scatterpf_dps_512 : ClangBuiltin<"__builtin_ia32_scatterpfdps">,
- Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
- def int_x86_avx512_scatterpf_qpd_512 : ClangBuiltin<"__builtin_ia32_scatterpfqpd">,
- Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
- def int_x86_avx512_scatterpf_qps_512 : ClangBuiltin<"__builtin_ia32_scatterpfqps">,
- Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
}
// AVX512 gather/scatter intrinsics that use vXi1 masks.
diff --git a/llvm/include/llvm/IR/ModuleSummaryIndex.h b/llvm/include/llvm/IR/ModuleSummaryIndex.h
index 5d137d4b3553..a6bb261af752 100644
--- a/llvm/include/llvm/IR/ModuleSummaryIndex.h
+++ b/llvm/include/llvm/IR/ModuleSummaryIndex.h
@@ -587,6 +587,10 @@ public:
void setImportKind(ImportKind IK) { Flags.ImportType = IK; }
+ GlobalValueSummary::ImportKind importType() const {
+ return static_cast<ImportKind>(Flags.ImportType);
+ }
+
GlobalValue::VisibilityTypes getVisibility() const {
return (GlobalValue::VisibilityTypes)Flags.Visibility;
}
@@ -1272,6 +1276,9 @@ using ModulePathStringTableTy = StringMap<ModuleHash>;
/// a particular module, and provide efficient access to their summary.
using GVSummaryMapTy = DenseMap<GlobalValue::GUID, GlobalValueSummary *>;
+/// A set of global value summary pointers.
+using GVSummaryPtrSet = SmallPtrSet<GlobalValueSummary *, 4>;
+
/// Map of a type GUID to type id string and summary (multimap used
/// in case of GUID conflicts).
using TypeIdSummaryMapTy =
diff --git a/llvm/include/llvm/IR/NoFolder.h b/llvm/include/llvm/IR/NoFolder.h
index 7bb5d5e696e9..72ab22c0d294 100644
--- a/llvm/include/llvm/IR/NoFolder.h
+++ b/llvm/include/llvm/IR/NoFolder.h
@@ -70,7 +70,7 @@ public:
return nullptr;
}
- Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ Value *FoldCmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
return nullptr;
}
@@ -129,15 +129,6 @@ public:
Constant *C, Type *DestTy) const override {
return CastInst::CreatePointerBitCastOrAddrSpaceCast(C, DestTy);
}
-
- //===--------------------------------------------------------------------===//
- // Compare Instructions
- //===--------------------------------------------------------------------===//
-
- Instruction *CreateFCmp(CmpInst::Predicate P,
- Constant *LHS, Constant *RHS) const override {
- return new FCmpInst(P, LHS, RHS);
- }
};
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/Operator.h b/llvm/include/llvm/IR/Operator.h
index b2307948bbbc..fda26891acfa 100644
--- a/llvm/include/llvm/IR/Operator.h
+++ b/llvm/include/llvm/IR/Operator.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/FMF.h"
+#include "llvm/IR/GEPNoWrapFlags.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
@@ -399,26 +400,24 @@ class LShrOperator
};
class GEPOperator
- : public ConcreteOperator<Operator, Instruction::GetElementPtr> {
- friend class GetElementPtrInst;
- friend class ConstantExpr;
-
- enum {
- IsInBounds = (1 << 0),
- };
-
- void setIsInBounds(bool B) {
- SubclassOptionalData =
- (SubclassOptionalData & ~IsInBounds) | (B * IsInBounds);
- }
-
+ : public ConcreteOperator<Operator, Instruction::GetElementPtr> {
public:
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+ GEPNoWrapFlags getNoWrapFlags() const {
+ return GEPNoWrapFlags::fromRaw(SubclassOptionalData);
+ }
+
/// Test whether this is an inbounds GEP, as defined by LangRef.html.
- bool isInBounds() const {
- return SubclassOptionalData & IsInBounds;
+ bool isInBounds() const { return getNoWrapFlags().isInBounds(); }
+
+ bool hasNoUnsignedSignedWrap() const {
+ return getNoWrapFlags().hasNoUnsignedSignedWrap();
+ }
+
+ bool hasNoUnsignedWrap() const {
+ return getNoWrapFlags().hasNoUnsignedWrap();
}
/// Returns the offset of the index with an inrange attachment, or
diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def
index 20f5bb2b531d..8eced073501e 100644
--- a/llvm/include/llvm/IR/VPIntrinsics.def
+++ b/llvm/include/llvm/IR/VPIntrinsics.def
@@ -174,10 +174,10 @@ HELPER_REGISTER_BINARY_INT_VP(vp_add, VP_ADD, Add, ADD)
HELPER_REGISTER_BINARY_INT_VP(vp_and, VP_AND, And, AND)
// llvm.vp.ashr(x,y,mask,vlen)
-HELPER_REGISTER_BINARY_INT_VP(vp_ashr, VP_ASHR, AShr, SRA)
+HELPER_REGISTER_BINARY_INT_VP(vp_ashr, VP_SRA, AShr, SRA)
// llvm.vp.lshr(x,y,mask,vlen)
-HELPER_REGISTER_BINARY_INT_VP(vp_lshr, VP_LSHR, LShr, SRL)
+HELPER_REGISTER_BINARY_INT_VP(vp_lshr, VP_SRL, LShr, SRL)
// llvm.vp.mul(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_mul, VP_MUL, Mul, MUL)
diff --git a/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h b/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
index c450acda82ad..f1337e82485c 100644
--- a/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
+++ b/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h
@@ -271,12 +271,13 @@ public:
const lto::InputFile &File);
/**
- * Compute the list of summaries needed for importing into module.
+ * Compute the list of summaries and the subset of declaration summaries
+ * needed for importing into module.
*/
void gatherImportedSummariesForModule(
Module &Module, ModuleSummaryIndex &Index,
std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex,
- const lto::InputFile &File);
+ GVSummaryPtrSet &DecSummaries, const lto::InputFile &File);
/**
* Perform internalization. Index is updated to reflect linkage changes.
diff --git a/llvm/include/llvm/MC/MCStreamer.h b/llvm/include/llvm/MC/MCStreamer.h
index 69867620e1bf..b7468cf70a66 100644
--- a/llvm/include/llvm/MC/MCStreamer.h
+++ b/llvm/include/llvm/MC/MCStreamer.h
@@ -245,7 +245,7 @@ class MCStreamer {
/// requires.
unsigned NextWinCFIID = 0;
- bool UseAssemblerInfoForParsing;
+ bool UseAssemblerInfoForParsing = true;
/// Is the assembler allowed to insert padding automatically? For
/// correctness reasons, we sometimes need to ensure instructions aren't
@@ -296,6 +296,8 @@ public:
MCContext &getContext() const { return Context; }
+ // MCObjectStreamer has an MCAssembler and allows more expression folding at
+ // parse time.
virtual MCAssembler *getAssemblerPtr() { return nullptr; }
void setUseAssemblerInfoForParsing(bool v) { UseAssemblerInfoForParsing = v; }
diff --git a/llvm/include/llvm/MCA/IncrementalSourceMgr.h b/llvm/include/llvm/MCA/IncrementalSourceMgr.h
index d53f1138b940..81f9b51cf42f 100644
--- a/llvm/include/llvm/MCA/IncrementalSourceMgr.h
+++ b/llvm/include/llvm/MCA/IncrementalSourceMgr.h
@@ -41,7 +41,7 @@ class IncrementalSourceMgr : public SourceMgr {
bool EOS = false;
/// Called when an instruction is no longer needed.
- using InstFreedCallback = llvm::function_ref<void(Instruction *)>;
+ using InstFreedCallback = std::function<void(Instruction *)>;
InstFreedCallback InstFreedCB;
public:
diff --git a/llvm/include/llvm/MCA/InstrBuilder.h b/llvm/include/llvm/MCA/InstrBuilder.h
index c8619af04b33..00c7942e4fa1 100644
--- a/llvm/include/llvm/MCA/InstrBuilder.h
+++ b/llvm/include/llvm/MCA/InstrBuilder.h
@@ -78,9 +78,9 @@ class InstrBuilder {
bool FirstCallInst;
bool FirstReturnInst;
+ unsigned CallLatency;
- using InstRecycleCallback =
- llvm::function_ref<Instruction *(const InstrDesc &)>;
+ using InstRecycleCallback = std::function<Instruction *(const InstrDesc &)>;
InstRecycleCallback InstRecycleCB;
Expected<const InstrDesc &>
@@ -99,7 +99,7 @@ class InstrBuilder {
public:
InstrBuilder(const MCSubtargetInfo &STI, const MCInstrInfo &MCII,
const MCRegisterInfo &RI, const MCInstrAnalysis *IA,
- const InstrumentManager &IM);
+ const InstrumentManager &IM, unsigned CallLatency);
void clear() {
Descriptors.clear();
diff --git a/llvm/include/llvm/Object/ELF.h b/llvm/include/llvm/Object/ELF.h
index 26e38075f8db..0986379ce76f 100644
--- a/llvm/include/llvm/Object/ELF.h
+++ b/llvm/include/llvm/Object/ELF.h
@@ -321,7 +321,7 @@ public:
std::vector<Elf_Rel> decode_relrs(Elf_Relr_Range relrs) const;
- uint64_t crelHeader(ArrayRef<uint8_t> Content) const;
+ Expected<uint64_t> getCrelHeader(ArrayRef<uint8_t> Content) const;
using RelsOrRelas = std::pair<std::vector<Elf_Rel>, std::vector<Elf_Rela>>;
Expected<RelsOrRelas> decodeCrel(ArrayRef<uint8_t> Content) const;
Expected<RelsOrRelas> crels(const Elf_Shdr &Sec) const;
diff --git a/llvm/include/llvm/Object/ELFTypes.h b/llvm/include/llvm/Object/ELFTypes.h
index aa07f4cacc4a..e2ac4d169c62 100644
--- a/llvm/include/llvm/Object/ELFTypes.h
+++ b/llvm/include/llvm/Object/ELFTypes.h
@@ -484,6 +484,7 @@ struct Elf_Rel_Impl<ELFType<Endianness, true>, true>
Elf_Sxword r_addend; // Compute value for relocatable field by adding this.
};
+// In-memory representation. The serialized representation uses LEB128.
template <bool Is64> struct Elf_Crel_Impl {
using uint = std::conditional_t<Is64, uint64_t, uint32_t>;
static const bool IsRela = true;
diff --git a/llvm/include/llvm/Object/ObjectFile.h b/llvm/include/llvm/Object/ObjectFile.h
index 8c868c7643ed..f49763e31a9c 100644
--- a/llvm/include/llvm/Object/ObjectFile.h
+++ b/llvm/include/llvm/Object/ObjectFile.h
@@ -302,6 +302,7 @@ protected:
public:
ObjectFile() = delete;
ObjectFile(const ObjectFile &other) = delete;
+ ObjectFile &operator=(const ObjectFile &other) = delete;
uint64_t getCommonSymbolSize(DataRefImpl Symb) const {
Expected<uint32_t> SymbolFlagsOrErr = getSymbolFlags(Symb);
diff --git a/llvm/include/llvm/Option/ArgList.h b/llvm/include/llvm/Option/ArgList.h
index fcde68e0b7fe..09812f976d01 100644
--- a/llvm/include/llvm/Option/ArgList.h
+++ b/llvm/include/llvm/Option/ArgList.h
@@ -319,11 +319,15 @@ public:
}
/// Render only the last argument match \p Id0, if present.
- template<typename ...OptSpecifiers>
- void AddLastArg(ArgStringList &Output, OptSpecifiers ...Ids) const {
+ template <typename... OptSpecifiers>
+ void addLastArg(ArgStringList &Output, OptSpecifiers... Ids) const {
if (Arg *A = getLastArg(Ids...)) // Calls claim() on all Ids's Args.
A->render(*this, Output);
}
+ template <typename... OptSpecifiers>
+ void AddLastArg(ArgStringList &Output, OptSpecifiers... Ids) const {
+ addLastArg(Output, Ids...);
+ }
/// AddAllArgsExcept - Render all arguments matching any of the given ids
/// and not matching any of the excluded ids.
diff --git a/llvm/include/llvm/ProfileData/InstrProf.h b/llvm/include/llvm/ProfileData/InstrProf.h
index 88c7fe425b5a..2cee928b210e 100644
--- a/llvm/include/llvm/ProfileData/InstrProf.h
+++ b/llvm/include/llvm/ProfileData/InstrProf.h
@@ -385,8 +385,9 @@ struct TemporalProfTraceTy {
/// Use a set of temporal profile traces to create a list of balanced
/// partitioning function nodes used by BalancedPartitioning to generate a
/// function order that reduces page faults during startup
- static std::vector<BPFunctionNode>
- createBPFunctionNodes(ArrayRef<TemporalProfTraceTy> Traces);
+ static void createBPFunctionNodes(ArrayRef<TemporalProfTraceTy> Traces,
+ std::vector<BPFunctionNode> &Nodes,
+ bool RemoveOutlierUNs = true);
};
inline std::error_code make_error_code(instrprof_error E) {
@@ -1184,35 +1185,32 @@ inline uint64_t ComputeHash(StringRef K) { return ComputeHash(HashType, K); }
// data file in indexed-format. Please update llvm/docs/InstrProfileFormat.rst
// as appropriate when updating the indexed profile format.
struct Header {
- uint64_t Magic;
+ uint64_t Magic = IndexedInstrProf::Magic;
// The lower 32 bits specify the version of the indexed profile.
// The most significant 32 bits are reserved to specify the variant types of
// the profile.
- uint64_t Version;
- uint64_t Unused; // Becomes unused since version 4
- uint64_t HashType;
+ uint64_t Version = 0;
+ uint64_t Unused = 0; // Becomes unused since version 4
+ uint64_t HashType = static_cast<uint64_t>(IndexedInstrProf::HashType);
// This field records the offset of this hash table's metadata (i.e., the
// number of buckets and entries), which follows right after the payload of
// the entire hash table.
- uint64_t HashOffset;
- uint64_t MemProfOffset;
- uint64_t BinaryIdOffset;
- uint64_t TemporalProfTracesOffset;
- uint64_t VTableNamesOffset;
+ uint64_t HashOffset = 0;
+ uint64_t MemProfOffset = 0;
+ uint64_t BinaryIdOffset = 0;
+ uint64_t TemporalProfTracesOffset = 0;
+ uint64_t VTableNamesOffset = 0;
// New fields should only be added at the end to ensure that the size
// computation is correct. The methods below need to be updated to ensure that
// the new field is read correctly.
- // Reads a header struct from the buffer.
+ // Reads a header struct from the buffer. Header fields are in machine native
+ // endianness.
static Expected<Header> readFromBuffer(const unsigned char *Buffer);
// Returns the size of the header in bytes for all valid fields based on the
// version. I.e a older version header will return a smaller size.
size_t size() const;
-
- // Returns the format version in little endian. The header retains the version
- // in native endian of the compiler runtime.
- uint64_t formatVersion() const;
};
// Profile summary data recorded in the profile data file in indexed
diff --git a/llvm/include/llvm/ProfileData/InstrProfReader.h b/llvm/include/llvm/ProfileData/InstrProfReader.h
index 9b35768205f9..46aa1b6c2bfe 100644
--- a/llvm/include/llvm/ProfileData/InstrProfReader.h
+++ b/llvm/include/llvm/ProfileData/InstrProfReader.h
@@ -649,6 +649,8 @@ public:
class IndexedMemProfReader {
private:
+ /// The MemProf version.
+ memprof::IndexedVersion Version = memprof::Version0;
/// MemProf profile schema (if available).
memprof::MemProfSchema Schema;
/// MemProf record profile data on-disk indexed via llvm::md5(FunctionName).
diff --git a/llvm/include/llvm/ProfileData/InstrProfWriter.h b/llvm/include/llvm/ProfileData/InstrProfWriter.h
index 1714f3b6cf3a..b8b6c684717b 100644
--- a/llvm/include/llvm/ProfileData/InstrProfWriter.h
+++ b/llvm/include/llvm/ProfileData/InstrProfWriter.h
@@ -212,6 +212,15 @@ private:
void addTemporalProfileTrace(TemporalProfTraceTy Trace);
Error writeImpl(ProfOStream &OS);
+
+ // Writes known header fields and reserves space for fields whose value are
+ // known only after payloads are written. Returns the start byte offset for
+ // back patching.
+ uint64_t writeHeader(const IndexedInstrProf::Header &header,
+ const bool WritePrevVersion, ProfOStream &OS);
+
+ // Writes compressed vtable names to profiles.
+ Error writeVTableNames(ProfOStream &OS);
};
} // end namespace llvm
diff --git a/llvm/include/llvm/ProfileData/SampleProfReader.h b/llvm/include/llvm/ProfileData/SampleProfReader.h
index 9e8f543909cd..d7c70064ca42 100644
--- a/llvm/include/llvm/ProfileData/SampleProfReader.h
+++ b/llvm/include/llvm/ProfileData/SampleProfReader.h
@@ -274,8 +274,8 @@ public:
/// Create a remapper from the given remapping file. The remapper will
/// be used for profile read in by Reader.
static ErrorOr<std::unique_ptr<SampleProfileReaderItaniumRemapper>>
- create(const std::string Filename, vfs::FileSystem &FS,
- SampleProfileReader &Reader, LLVMContext &C);
+ create(StringRef Filename, vfs::FileSystem &FS, SampleProfileReader &Reader,
+ LLVMContext &C);
/// Create a remapper from the given Buffer. The remapper will
/// be used for profile read in by Reader.
@@ -436,9 +436,9 @@ public:
/// Create a remapper underlying if RemapFilename is not empty.
/// Parameter P specifies the FSDiscriminatorPass.
static ErrorOr<std::unique_ptr<SampleProfileReader>>
- create(const std::string Filename, LLVMContext &C, vfs::FileSystem &FS,
+ create(StringRef Filename, LLVMContext &C, vfs::FileSystem &FS,
FSDiscriminatorPass P = FSDiscriminatorPass::Base,
- const std::string RemapFilename = "");
+ StringRef RemapFilename = "");
/// Create a sample profile reader from the supplied memory buffer.
/// Create a remapper underlying if RemapFilename is not empty.
@@ -446,7 +446,7 @@ public:
static ErrorOr<std::unique_ptr<SampleProfileReader>>
create(std::unique_ptr<MemoryBuffer> &B, LLVMContext &C, vfs::FileSystem &FS,
FSDiscriminatorPass P = FSDiscriminatorPass::Base,
- const std::string RemapFilename = "");
+ StringRef RemapFilename = "");
/// Return the profile summary.
ProfileSummary &getSummary() const { return *(Summary.get()); }
diff --git a/llvm/include/llvm/Support/CMakeLists.txt b/llvm/include/llvm/Support/CMakeLists.txt
index 76c382bfc644..e34c11b2d086 100644
--- a/llvm/include/llvm/Support/CMakeLists.txt
+++ b/llvm/include/llvm/Support/CMakeLists.txt
@@ -48,4 +48,4 @@ set_source_files_properties("${version_inc}"
HEADER_FILE_ONLY TRUE)
add_custom_target(llvm_vcsrevision_h ALL DEPENDS "${generated_files}")
-set_target_properties(llvm_vcsrevision_h PROPERTIES FOLDER "Misc")
+set_target_properties(llvm_vcsrevision_h PROPERTIES FOLDER "LLVM/Resources")
diff --git a/llvm/include/llvm/Support/Error.h b/llvm/include/llvm/Support/Error.h
index 217130ce293a..662c3ea46e3c 100644
--- a/llvm/include/llvm/Support/Error.h
+++ b/llvm/include/llvm/Support/Error.h
@@ -1236,10 +1236,10 @@ class StringError : public ErrorInfo<StringError> {
public:
static char ID;
- // Prints EC + S and converts to EC
+ StringError(std::string &&S, std::error_code EC, bool PrintMsgOnly);
+ /// Prints EC + S and converts to EC.
StringError(std::error_code EC, const Twine &S = Twine());
-
- // Prints S and converts to EC
+ /// Prints S and converts to EC.
StringError(const Twine &S, std::error_code EC);
void log(raw_ostream &OS) const override;
@@ -1258,15 +1258,23 @@ template <typename... Ts>
inline Error createStringError(std::error_code EC, char const *Fmt,
const Ts &... Vals) {
std::string Buffer;
- raw_string_ostream Stream(Buffer);
- Stream << format(Fmt, Vals...);
- return make_error<StringError>(Stream.str(), EC);
+ raw_string_ostream(Buffer) << format(Fmt, Vals...);
+ return make_error<StringError>(Buffer, EC);
}
-Error createStringError(std::error_code EC, char const *Msg);
+Error createStringError(std::string &&Msg, std::error_code EC);
+
+inline Error createStringError(std::error_code EC, const char *S) {
+ return createStringError(std::string(S), EC);
+}
inline Error createStringError(std::error_code EC, const Twine &S) {
- return createStringError(EC, S.str().c_str());
+ return createStringError(S.str(), EC);
+}
+
+/// Create a StringError with an inconvertible error code.
+inline Error createStringError(const Twine &S) {
+ return createStringError(llvm::inconvertibleErrorCode(), S);
}
template <typename... Ts>
diff --git a/llvm/include/llvm/Support/KnownBits.h b/llvm/include/llvm/Support/KnownBits.h
index 9b7f405b6256..ba4a5f01036c 100644
--- a/llvm/include/llvm/Support/KnownBits.h
+++ b/llvm/include/llvm/Support/KnownBits.h
@@ -354,6 +354,18 @@ public:
/// Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
static KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS);
+ /// Compute knownbits resulting from APIntOps::avgFloorS
+ static KnownBits avgFloorS(const KnownBits &LHS, const KnownBits &RHS);
+
+ /// Compute knownbits resulting from APIntOps::avgFloorU
+ static KnownBits avgFloorU(const KnownBits &LHS, const KnownBits &RHS);
+
+ /// Compute knownbits resulting from APIntOps::avgCeilS
+ static KnownBits avgCeilS(const KnownBits &LHS, const KnownBits &RHS);
+
+ /// Compute knownbits resulting from APIntOps::avgCeilU
+ static KnownBits avgCeilU(const KnownBits &LHS, const KnownBits &RHS);
+
/// Compute known bits resulting from multiplying LHS and RHS.
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS,
bool NoUndefSelfMultiply = false);
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 5d4b5a2479f6..8012f9192277 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -220,6 +220,13 @@ def idempotent_prop : GICombineRule<
(match (idempotent_prop_frags $dst, $src)),
(apply (GIReplaceReg $dst, $src))>;
+// Convert freeze(Op(Op0, NonPoisonOps...)) to Op(freeze(Op0), NonPoisonOps...)
+// when Op0 is not guaranteed non-poison
+def push_freeze_to_prevent_poison_from_propagating : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (G_FREEZE $dst, $src):$root,
+ [{ return !isGuaranteedNotToBePoison(${src}.getReg(), MRI) && Helper.matchFreezeOfSingleMaybePoisonOperand(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
def extending_loads : GICombineRule<
(defs root:$root, extending_load_matchdata:$matchinfo),
@@ -1634,6 +1641,78 @@ extract_vector_element_shuffle_vector,
insert_vector_element_extract_vector_element
]>;
+
+// fold ((0-A) + B) -> B-A
+def ZeroMinusAPlusB : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub, 0, $A),
+ (G_ADD $root, $sub, $B)),
+ (apply (G_SUB $root, $B, $A))>;
+
+// fold (A + (0-B)) -> A-B
+def APlusZeroMinusB : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub, 0, $B),
+ (G_ADD $root, $A, $sub)),
+ (apply (G_SUB $root, $A, $B))>;
+
+ // fold (A+(B-A)) -> B
+ def APlusBMinusB : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub, $B, $A),
+ (G_ADD $root, $A, $sub)),
+ (apply (GIReplaceReg $root, $B))>;
+
+// fold ((B-A)+A) -> B
+ def BMinusAPlusA : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub, $B, $A),
+ (G_ADD $root, $sub, $A)),
+ (apply (GIReplaceReg $root, $B))>;
+
+// fold ((A-B)+(C-A)) -> (C-B)
+def AMinusBPlusCMinusA : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub1, $A, $B),
+ (G_SUB $sub2, $C, $A),
+ (G_ADD $root, $sub1, $sub2)),
+ (apply (G_SUB $root, $C, $B))>;
+
+// fold ((A-B)+(B-C)) -> (A-C)
+def AMinusBPlusBMinusC : GICombineRule<
+ (defs root:$root),
+ (match (G_SUB $sub1, $A, $B),
+ (G_SUB $sub2, $B, $C),
+ (G_ADD $root, $sub1, $sub2)),
+ (apply (G_SUB $root, $A, $C))>;
+
+// fold (A+(B-(A+C))) to (B-C)
+def APlusBMinusAplusC : GICombineRule<
+ (defs root:$root),
+ (match (G_ADD $add1, $A, $C),
+ (G_SUB $sub1, $B, $add1),
+ (G_ADD $root, $A, $sub1)),
+ (apply (G_SUB $root, $B, $C))>;
+
+// fold (A+(B-(C+A))) to (B-C)
+def APlusBMinusCPlusA : GICombineRule<
+ (defs root:$root),
+ (match (G_ADD $add1, $C, $A),
+ (G_SUB $sub1, $B, $add1),
+ (G_ADD $root, $A, $sub1)),
+ (apply (G_SUB $root, $B, $C))>;
+
+def integer_reassoc_combines: GICombineGroup<[
+ ZeroMinusAPlusB,
+ APlusZeroMinusB,
+ APlusBMinusB,
+ BMinusAPlusA,
+ AMinusBPlusCMinusA,
+ AMinusBPlusBMinusC,
+ APlusBMinusAplusC,
+ APlusBMinusCPlusA
+]>;
+
// FIXME: These should use the custom predicate feature once it lands.
def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
undef_to_negative_one,
@@ -1691,7 +1770,8 @@ def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
def constant_fold_binops : GICombineGroup<[constant_fold_binop,
constant_fold_fp_binop]>;
-def all_combines : GICombineGroup<[trivial_combines, vector_ops_combines,
+def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
+ vector_ops_combines,
insert_vec_elt_combines, extract_vec_elt_combines, combines_for_extload,
combine_extracted_vector_load,
undef_combines, identity_combines, phi_combines,
@@ -1713,7 +1793,8 @@ def all_combines : GICombineGroup<[trivial_combines, vector_ops_combines,
sub_add_reg, select_to_minmax, redundant_binop_in_equality,
fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
combine_concat_vector, double_icmp_zero_and_or_combine, match_addos,
- sext_trunc, zext_trunc, combine_shuffle_concat]>;
+ sext_trunc, zext_trunc, combine_shuffle_concat,
+ push_freeze_to_prevent_poison_from_propagating]>;
// A combine group used to for prelegalizer combiners at -O0. The combines in
// this group have been selected based on experiments to balance code size and
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 1684b424e3b4..1c95a6090984 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -884,6 +884,9 @@ class PatFrags<dag ops, list<dag> frags, code pred = [{}],
// If set to true, a predicate is added that checks for the absence of use of
// the first result.
bit HasNoUse = ?;
+ // If set to true, a predicate is added that checks for the sole use of
+ // the first result.
+ bit HasOneUse = ?;
// Is the desired pre-packaged predicate for a load?
bit IsLoad = ?;
diff --git a/llvm/include/llvm/TargetParser/X86TargetParser.def b/llvm/include/llvm/TargetParser/X86TargetParser.def
index 5670767ff7ed..8daa8a689c95 100644
--- a/llvm/include/llvm/TargetParser/X86TargetParser.def
+++ b/llvm/include/llvm/TargetParser/X86TargetParser.def
@@ -159,20 +159,20 @@ X86_FEATURE_COMPAT(AVX512VL, "avx512vl", 20)
X86_FEATURE_COMPAT(AVX512BW, "avx512bw", 21)
X86_FEATURE_COMPAT(AVX512DQ, "avx512dq", 22)
X86_FEATURE_COMPAT(AVX512CD, "avx512cd", 23)
-X86_FEATURE_COMPAT(AVX512ER, "avx512er", 24)
-X86_FEATURE_COMPAT(AVX512PF, "avx512pf", 25)
-X86_FEATURE_COMPAT(AVX512VBMI, "avx512vbmi", 26)
-X86_FEATURE_COMPAT(AVX512IFMA, "avx512ifma", 27)
-X86_FEATURE_COMPAT(AVX5124VNNIW, "avx5124vnniw", 28)
-X86_FEATURE_COMPAT(AVX5124FMAPS, "avx5124fmaps", 29)
-X86_FEATURE_COMPAT(AVX512VPOPCNTDQ, "avx512vpopcntdq", 30)
-X86_FEATURE_COMPAT(AVX512VBMI2, "avx512vbmi2", 31)
-X86_FEATURE_COMPAT(GFNI, "gfni", 32)
-X86_FEATURE_COMPAT(VPCLMULQDQ, "vpclmulqdq", 33)
-X86_FEATURE_COMPAT(AVX512VNNI, "avx512vnni", 34)
-X86_FEATURE_COMPAT(AVX512BITALG, "avx512bitalg", 35)
-X86_FEATURE_COMPAT(AVX512BF16, "avx512bf16", 36)
-X86_FEATURE_COMPAT(AVX512VP2INTERSECT, "avx512vp2intersect", 37)
+X86_FEATURE (NF, "nf")
+X86_FEATURE (CF, "cf")
+X86_FEATURE_COMPAT(AVX512VBMI, "avx512vbmi", 24)
+X86_FEATURE_COMPAT(AVX512IFMA, "avx512ifma", 25)
+X86_FEATURE_COMPAT(AVX5124VNNIW, "avx5124vnniw", 26)
+X86_FEATURE_COMPAT(AVX5124FMAPS, "avx5124fmaps", 27)
+X86_FEATURE_COMPAT(AVX512VPOPCNTDQ, "avx512vpopcntdq", 28)
+X86_FEATURE_COMPAT(AVX512VBMI2, "avx512vbmi2", 29)
+X86_FEATURE_COMPAT(GFNI, "gfni", 30)
+X86_FEATURE_COMPAT(VPCLMULQDQ, "vpclmulqdq", 31)
+X86_FEATURE_COMPAT(AVX512VNNI, "avx512vnni", 32)
+X86_FEATURE_COMPAT(AVX512BITALG, "avx512bitalg", 33)
+X86_FEATURE_COMPAT(AVX512BF16, "avx512bf16", 34)
+X86_FEATURE_COMPAT(AVX512VP2INTERSECT, "avx512vp2intersect", 35)
// Below Features has some missings comparing to gcc, it's because gcc has some
// not one-to-one mapped in llvm.
X86_FEATURE_COMPAT(3DNOW, "3dnow", 0)
@@ -202,7 +202,7 @@ X86_FEATURE_COMPAT(MWAITX, "mwaitx", 0)
X86_FEATURE (X87, "x87")
X86_FEATURE_COMPAT(PCONFIG, "pconfig", 0)
X86_FEATURE_COMPAT(PKU, "pku", 0)
-X86_FEATURE_COMPAT(PREFETCHWT1, "prefetchwt1", 0)
+X86_FEATURE (EVEX512, "evex512")
X86_FEATURE_COMPAT(PRFCHW, "prfchw", 0)
X86_FEATURE_COMPAT(PTWRITE, "ptwrite", 0)
X86_FEATURE_COMPAT(RDPID, "rdpid", 0)
@@ -252,9 +252,6 @@ X86_FEATURE (EGPR, "egpr")
X86_FEATURE_COMPAT(USERMSR, "usermsr", 0)
X86_FEATURE_COMPAT(AVX10_1, "avx10.1-256", 0)
X86_FEATURE_COMPAT(AVX10_1_512, "avx10.1-512", 0)
-X86_FEATURE (EVEX512, "evex512")
-X86_FEATURE (NF, "nf")
-X86_FEATURE (CF, "cf")
// These features aren't really CPU features, but the frontend can set them.
X86_FEATURE (RETPOLINE_EXTERNAL_THUNK, "retpoline-external-thunk")
X86_FEATURE (RETPOLINE_INDIRECT_BRANCHES, "retpoline-indirect-branches")
diff --git a/llvm/include/llvm/Transforms/IPO/FunctionImport.h b/llvm/include/llvm/Transforms/IPO/FunctionImport.h
index c4d19e8641ec..72a0823c6627 100644
--- a/llvm/include/llvm/Transforms/IPO/FunctionImport.h
+++ b/llvm/include/llvm/Transforms/IPO/FunctionImport.h
@@ -31,9 +31,9 @@ class Module;
/// based on the provided summary informations.
class FunctionImporter {
public:
- /// Set of functions to import from a source module. Each entry is a set
- /// containing all the GUIDs of all functions to import for a source module.
- using FunctionsToImportTy = std::unordered_set<GlobalValue::GUID>;
+ /// The functions to import from a source module and their import type.
+ using FunctionsToImportTy =
+ DenseMap<GlobalValue::GUID, GlobalValueSummary::ImportKind>;
/// The different reasons selectCallee will chose not to import a
/// candidate.
@@ -99,8 +99,13 @@ public:
/// index's module path string table).
using ImportMapTy = DenseMap<StringRef, FunctionsToImportTy>;
- /// The set contains an entry for every global value the module exports.
- using ExportSetTy = DenseSet<ValueInfo>;
+ /// The map contains an entry for every global value the module exports.
+ /// The key is ValueInfo, and the value indicates whether the definition
+ /// or declaration is visible to another module. If a function's definition is
+ /// visible to other modules, the global values this function referenced are
+ /// visible and shouldn't be internalized.
+ /// TODO: Rename to `ExportMapTy`.
+ using ExportSetTy = DenseMap<ValueInfo, GlobalValueSummary::ImportKind>;
/// A function of this type is used to load modules referenced by the index.
using ModuleLoaderTy =
@@ -207,11 +212,15 @@ bool convertToDeclaration(GlobalValue &GV);
/// \p ModuleToSummariesForIndex will be populated with the needed summaries
/// from each required module path. Use a std::map instead of StringMap to get
/// stable order for bitcode emission.
+///
+/// \p DecSummaries will be popluated with the subset of of summary pointers
+/// that have 'declaration' import type among all summaries the module need.
void gatherImportedSummariesForModule(
StringRef ModulePath,
const DenseMap<StringRef, GVSummaryMapTy> &ModuleToDefinedGVSummaries,
const FunctionImporter::ImportMapTy &ImportList,
- std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex);
+ std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex,
+ GVSummaryPtrSet &DecSummaries);
/// Emit into \p OutputFilename the files module \p ModulePath will import from.
std::error_code EmitImportsFiles(
diff --git a/llvm/include/llvm/Transforms/Utils/CallPromotionUtils.h b/llvm/include/llvm/Transforms/Utils/CallPromotionUtils.h
index fcb384ec3613..385831f45703 100644
--- a/llvm/include/llvm/Transforms/Utils/CallPromotionUtils.h
+++ b/llvm/include/llvm/Transforms/Utils/CallPromotionUtils.h
@@ -15,9 +15,12 @@
#define LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
namespace llvm {
+template <typename T> class ArrayRef;
+class Constant;
class CallBase;
class CastInst;
class Function;
+class Instruction;
class MDNode;
class Value;
@@ -41,7 +44,9 @@ bool isLegalToPromote(const CallBase &CB, Function *Callee,
CallBase &promoteCall(CallBase &CB, Function *Callee,
CastInst **RetBitCast = nullptr);
-/// Promote the given indirect call site to conditionally call \p Callee.
+/// Promote the given indirect call site to conditionally call \p Callee. The
+/// promoted direct call instruction is predicated on `CB.getCalledOperand() ==
+/// Callee`.
///
/// This function creates an if-then-else structure at the location of the call
/// site. The original call site is moved into the "else" block. A clone of the
@@ -51,6 +56,22 @@ CallBase &promoteCall(CallBase &CB, Function *Callee,
CallBase &promoteCallWithIfThenElse(CallBase &CB, Function *Callee,
MDNode *BranchWeights = nullptr);
+/// This is similar to `promoteCallWithIfThenElse` except that the condition to
+/// promote a virtual call is that \p VPtr is the same as any of \p
+/// AddressPoints.
+///
+/// This function is expected to be used on virtual calls (a subset of indirect
+/// calls). \p VPtr is the virtual table address stored in the objects, and
+/// \p AddressPoints contains vtable address points. A vtable address point is
+/// a location inside the vtable that's referenced by vpointer in C++ objects.
+///
+/// TODO: sink the address-calculation instructions of indirect callee to the
+/// indirect call fallback after transformation.
+CallBase &promoteCallWithVTableCmp(CallBase &CB, Instruction *VPtr,
+ Function *Callee,
+ ArrayRef<Constant *> AddressPoints,
+ MDNode *BranchWeights);
+
/// Try to promote (devirtualize) a virtual call on an Alloca. Return true on
/// success.
///
@@ -76,11 +97,11 @@ bool tryPromoteCall(CallBase &CB);
/// Predicate and clone the given call site.
///
-/// This function creates an if-then-else structure at the location of the call
-/// site. The "if" condition compares the call site's called value to the given
-/// callee. The original call site is moved into the "else" block, and a clone
-/// of the call site is placed in the "then" block. The cloned instruction is
-/// returned.
+/// This function creates an if-then-else structure at the location of the
+/// call site. The "if" condition compares the call site's called value to
+/// the given callee. The original call site is moved into the "else" block,
+/// and a clone of the call site is placed in the "then" block. The cloned
+/// instruction is returned.
CallBase &versionCallSite(CallBase &CB, Value *Callee, MDNode *BranchWeights);
} // end namespace llvm
diff --git a/llvm/lib/Analysis/CFG.cpp b/llvm/lib/Analysis/CFG.cpp
index 8528aa9f77e0..841b83505238 100644
--- a/llvm/lib/Analysis/CFG.cpp
+++ b/llvm/lib/Analysis/CFG.cpp
@@ -130,14 +130,21 @@ static const Loop *getOutermostLoop(const LoopInfo *LI, const BasicBlock *BB) {
return L ? L->getOutermostLoop() : nullptr;
}
-bool llvm::isPotentiallyReachableFromMany(
- SmallVectorImpl<BasicBlock *> &Worklist, const BasicBlock *StopBB,
- const SmallPtrSetImpl<BasicBlock *> *ExclusionSet, const DominatorTree *DT,
- const LoopInfo *LI) {
- // When the stop block is unreachable, it's dominated from everywhere,
+template <class StopSetT>
+static bool isReachableImpl(SmallVectorImpl<BasicBlock *> &Worklist,
+ const StopSetT &StopSet,
+ const SmallPtrSetImpl<BasicBlock *> *ExclusionSet,
+ const DominatorTree *DT, const LoopInfo *LI) {
+ // When a stop block is unreachable, it's dominated from everywhere,
// regardless of whether there's a path between the two blocks.
- if (DT && !DT->isReachableFromEntry(StopBB))
- DT = nullptr;
+ if (DT) {
+ for (auto *BB : StopSet) {
+ if (!DT->isReachableFromEntry(BB)) {
+ DT = nullptr;
+ break;
+ }
+ }
+ }
// We can't skip directly from a block that dominates the stop block if the
// exclusion block is potentially in between.
@@ -155,7 +162,13 @@ bool llvm::isPotentiallyReachableFromMany(
}
}
- const Loop *StopLoop = LI ? getOutermostLoop(LI, StopBB) : nullptr;
+ SmallPtrSet<const Loop *, 2> StopLoops;
+ if (LI) {
+ for (auto *StopSetBB : StopSet) {
+ if (const Loop *L = getOutermostLoop(LI, StopSetBB))
+ StopLoops.insert(L);
+ }
+ }
unsigned Limit = DefaultMaxBBsToExplore;
SmallPtrSet<const BasicBlock*, 32> Visited;
@@ -163,12 +176,16 @@ bool llvm::isPotentiallyReachableFromMany(
BasicBlock *BB = Worklist.pop_back_val();
if (!Visited.insert(BB).second)
continue;
- if (BB == StopBB)
+ if (StopSet.contains(BB))
return true;
if (ExclusionSet && ExclusionSet->count(BB))
continue;
- if (DT && DT->dominates(BB, StopBB))
- return true;
+ if (DT) {
+ if (llvm::any_of(StopSet, [&](const BasicBlock *StopBB) {
+ return DT->dominates(BB, StopBB);
+ }))
+ return true;
+ }
const Loop *Outer = nullptr;
if (LI) {
@@ -179,7 +196,7 @@ bool llvm::isPotentiallyReachableFromMany(
// excluded block. Clear Outer so we process BB's successors.
if (LoopsWithHoles.count(Outer))
Outer = nullptr;
- if (StopLoop && Outer == StopLoop)
+ if (StopLoops.contains(Outer))
return true;
}
@@ -204,6 +221,39 @@ bool llvm::isPotentiallyReachableFromMany(
return false;
}
+template <class T> class SingleEntrySet {
+public:
+ using const_iterator = const T *;
+
+ SingleEntrySet(T Elem) : Elem(Elem) {}
+
+ bool contains(T Other) const { return Elem == Other; }
+
+ const_iterator begin() const { return &Elem; }
+ const_iterator end() const { return &Elem + 1; }
+
+private:
+ T Elem;
+};
+
+bool llvm::isPotentiallyReachableFromMany(
+ SmallVectorImpl<BasicBlock *> &Worklist, const BasicBlock *StopBB,
+ const SmallPtrSetImpl<BasicBlock *> *ExclusionSet, const DominatorTree *DT,
+ const LoopInfo *LI) {
+ return isReachableImpl<SingleEntrySet<const BasicBlock *>>(
+ Worklist, SingleEntrySet<const BasicBlock *>(StopBB), ExclusionSet, DT,
+ LI);
+}
+
+bool llvm::isManyPotentiallyReachableFromMany(
+ SmallVectorImpl<BasicBlock *> &Worklist,
+ const SmallPtrSetImpl<const BasicBlock *> &StopSet,
+ const SmallPtrSetImpl<BasicBlock *> *ExclusionSet, const DominatorTree *DT,
+ const LoopInfo *LI) {
+ return isReachableImpl<SmallPtrSetImpl<const BasicBlock *>>(
+ Worklist, StopSet, ExclusionSet, DT, LI);
+}
+
bool llvm::isPotentiallyReachable(
const BasicBlock *A, const BasicBlock *B,
const SmallPtrSetImpl<BasicBlock *> *ExclusionSet, const DominatorTree *DT,
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 046a76945380..705377b97ed9 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -869,7 +869,6 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
bool InBounds = GEP->isInBounds();
Type *SrcElemTy = GEP->getSourceElementType();
- Type *ResElemTy = GEP->getResultElementType();
Type *ResTy = GEP->getType();
if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
return nullptr;
@@ -944,43 +943,18 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
return ConstantExpr::getIntToPtr(C, ResTy);
}
- // Otherwise form a regular getelementptr. Recompute the indices so that
- // we eliminate over-indexing of the notional static type array bounds.
- // This makes it easy to determine if the getelementptr is "inbounds".
-
- // For GEPs of GlobalValues, use the value type, otherwise use an i8 GEP.
- if (auto *GV = dyn_cast<GlobalValue>(Ptr))
- SrcElemTy = GV->getValueType();
- else
- SrcElemTy = Type::getInt8Ty(Ptr->getContext());
-
- if (!SrcElemTy->isSized())
- return nullptr;
-
- Type *ElemTy = SrcElemTy;
- SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
- if (Offset != 0)
- return nullptr;
-
- // Try to add additional zero indices to reach the desired result element
- // type.
- // TODO: Should we avoid extra zero indices if ResElemTy can't be reached and
- // we'll have to insert a bitcast anyway?
- while (ElemTy != ResElemTy) {
- Type *NextTy = GetElementPtrInst::getTypeAtIndex(ElemTy, (uint64_t)0);
- if (!NextTy)
- break;
-
- Indices.push_back(APInt::getZero(isa<StructType>(ElemTy) ? 32 : BitWidth));
- ElemTy = NextTy;
+ // Try to infer inbounds for GEPs of globals.
+ if (!InBounds && Offset.isNonNegative()) {
+ bool CanBeNull, CanBeFreed;
+ uint64_t DerefBytes =
+ Ptr->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
+ InBounds = DerefBytes != 0 && !CanBeNull && Offset.sle(DerefBytes);
}
- SmallVector<Constant *, 32> NewIdxs;
- for (const APInt &Index : Indices)
- NewIdxs.push_back(ConstantInt::get(
- Type::getIntNTy(Ptr->getContext(), Index.getBitWidth()), Index));
-
- return ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs, InBounds,
+ // Otherwise canonicalize this to a single ptradd.
+ LLVMContext &Ctx = Ptr->getContext();
+ return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ctx), Ptr,
+ ConstantInt::get(Ctx, Offset), InBounds,
InRange);
}
@@ -992,7 +966,8 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
ArrayRef<Constant *> Ops,
const DataLayout &DL,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI,
+ bool AllowNonDeterministic) {
Type *DestTy = InstOrCE->getType();
if (Instruction::isUnaryOp(Opcode))
@@ -1011,7 +986,8 @@ Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
// TODO: If a constant expression is being folded rather than an
// instruction, denormals will not be flushed/treated as zero
if (const auto *I = dyn_cast<Instruction>(InstOrCE)) {
- return ConstantFoldFPInstOperands(Opcode, Ops[0], Ops[1], DL, I);
+ return ConstantFoldFPInstOperands(Opcode, Ops[0], Ops[1], DL, I,
+ AllowNonDeterministic);
}
}
return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
@@ -1029,7 +1005,8 @@ Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
return C;
return ConstantExpr::getGetElementPtr(SrcElemTy, Ops[0], Ops.slice(1),
- GEP->isInBounds(), GEP->getInRange());
+ GEP->getNoWrapFlags(),
+ GEP->getInRange());
}
if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) {
@@ -1053,7 +1030,8 @@ Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
if (auto *F = dyn_cast<Function>(Ops.back())) {
const auto *Call = cast<CallBase>(InstOrCE);
if (canConstantFoldCallTo(Call, F))
- return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
+ return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI,
+ AllowNonDeterministic);
}
return nullptr;
case Instruction::Select:
@@ -1114,8 +1092,8 @@ ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
}
if (auto *CE = dyn_cast<ConstantExpr>(C)) {
- if (Constant *Res =
- ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI))
+ if (Constant *Res = ConstantFoldInstOperandsImpl(
+ CE, CE->getOpcode(), Ops, DL, TLI, /*AllowNonDeterministic=*/true))
return Res;
return const_cast<Constant *>(C);
}
@@ -1183,8 +1161,10 @@ Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
Constant *llvm::ConstantFoldInstOperands(Instruction *I,
ArrayRef<Constant *> Ops,
const DataLayout &DL,
- const TargetLibraryInfo *TLI) {
- return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
+ const TargetLibraryInfo *TLI,
+ bool AllowNonDeterministic) {
+ return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI,
+ AllowNonDeterministic);
}
Constant *llvm::ConstantFoldCompareInstOperands(
@@ -1357,7 +1337,8 @@ Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *I,
Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
Constant *RHS, const DataLayout &DL,
- const Instruction *I) {
+ const Instruction *I,
+ bool AllowNonDeterministic) {
if (Instruction::isBinaryOp(Opcode)) {
// Flush denormal inputs if needed.
Constant *Op0 = FlushFPConstant(LHS, I, /* IsOutput */ false);
@@ -1367,13 +1348,30 @@ Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
if (!Op1)
return nullptr;
+ // If nsz or an algebraic FMF flag is set, the result of the FP operation
+ // may change due to future optimization. Don't constant fold them if
+ // non-deterministic results are not allowed.
+ if (!AllowNonDeterministic)
+ if (auto *FP = dyn_cast_or_null<FPMathOperator>(I))
+ if (FP->hasNoSignedZeros() || FP->hasAllowReassoc() ||
+ FP->hasAllowContract() || FP->hasAllowReciprocal())
+ return nullptr;
+
// Calculate constant result.
Constant *C = ConstantFoldBinaryOpOperands(Opcode, Op0, Op1, DL);
if (!C)
return nullptr;
// Flush denormal output if needed.
- return FlushFPConstant(C, I, /* IsOutput */ true);
+ C = FlushFPConstant(C, I, /* IsOutput */ true);
+ if (!C)
+ return nullptr;
+
+ // The precise NaN value is non-deterministic.
+ if (!AllowNonDeterministic && C->isNaN())
+ return nullptr;
+
+ return C;
}
// If instruction lacks a parent/function and the denormal mode cannot be
// determined, use the default (IEEE).
@@ -3401,7 +3399,8 @@ Constant *llvm::ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS,
Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
ArrayRef<Constant *> Operands,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI,
+ bool AllowNonDeterministic) {
if (Call->isNoBuiltin())
return nullptr;
if (!F->hasName())
@@ -3417,8 +3416,13 @@ Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
return nullptr;
}
- StringRef Name = F->getName();
+ // Conservatively assume that floating-point libcalls may be
+ // non-deterministic.
Type *Ty = F->getReturnType();
+ if (!AllowNonDeterministic && Ty->isFPOrFPVectorTy())
+ return nullptr;
+
+ StringRef Name = F->getName();
if (auto *FVTy = dyn_cast<FixedVectorType>(Ty))
return ConstantFoldFixedVectorCall(
Name, IID, FVTy, Operands, F->getParent()->getDataLayout(), TLI, Call);
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 37a7259a5cd0..53a974c5294c 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -3954,12 +3954,14 @@ static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// LHS >s RHS.
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- return ConstantExpr::getICmp(ICmpInst::ICMP_SLT, C,
- Constant::getNullValue(C->getType()));
+ return ConstantFoldCompareInstOperands(
+ ICmpInst::ICMP_SLT, C, Constant::getNullValue(C->getType()),
+ Q.DL);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- return ConstantExpr::getICmp(ICmpInst::ICMP_SGE, C,
- Constant::getNullValue(C->getType()));
+ return ConstantFoldCompareInstOperands(
+ ICmpInst::ICMP_SGE, C, Constant::getNullValue(C->getType()),
+ Q.DL);
// If LHS is non-negative then LHS <u RHS. If LHS is negative then
// LHS >u RHS.
@@ -5346,9 +5348,6 @@ static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
}
- // TODO: Look through bitcasts? What if the bitcast changes the vector element
- // size?
-
// The source operand is not a shuffle. Initialize the root vector value for
// this shuffle if that has not been done yet.
if (!RootVec)
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 13dec3b1e1b0..bc8b9b8479e4 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -203,11 +203,9 @@ RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
///
/// There is no conflict when the intervals are disjoint:
/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
-void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
- Type *AccessTy, bool WritePtr,
- unsigned DepSetId, unsigned ASId,
- PredicatedScalarEvolution &PSE,
- bool NeedsFreeze) {
+static std::pair<const SCEV *, const SCEV *>
+getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
+ PredicatedScalarEvolution &PSE) {
ScalarEvolution *SE = PSE.getSE();
const SCEV *ScStart;
@@ -215,9 +213,7 @@ void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
if (SE->isLoopInvariant(PtrExpr, Lp)) {
ScStart = ScEnd = PtrExpr;
- } else {
- const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr);
- assert(AR && "Invalid addrec expression");
+ } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
const SCEV *Ex = PSE.getBackedgeTakenCount();
ScStart = AR->getStart();
@@ -236,16 +232,33 @@ void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
ScStart = SE->getUMinExpr(ScStart, ScEnd);
ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
}
- }
+ } else
+ return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
+
assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
// Add the size of the pointed element to ScEnd.
auto &DL = Lp->getHeader()->getModule()->getDataLayout();
- Type *IdxTy = DL.getIndexType(Ptr->getType());
+ Type *IdxTy = DL.getIndexType(PtrExpr->getType());
const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
+ return {ScStart, ScEnd};
+}
+
+/// Calculate Start and End points of memory access using
+/// getStartAndEndForAccess.
+void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
+ Type *AccessTy, bool WritePtr,
+ unsigned DepSetId, unsigned ASId,
+ PredicatedScalarEvolution &PSE,
+ bool NeedsFreeze) {
+ const auto &[ScStart, ScEnd] =
+ getStartAndEndForAccess(Lp, PtrExpr, AccessTy, PSE);
+ assert(!isa<SCEVCouldNotCompute>(ScStart) &&
+ !isa<SCEVCouldNotCompute>(ScEnd) &&
+ "must be able to compute both start and end expressions");
Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
NeedsFreeze);
}
@@ -379,9 +392,9 @@ void RuntimePointerChecking::generateChecks(
bool RuntimePointerChecking::needsChecking(
const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
- for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
- for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
- if (needsChecking(M.Members[I], N.Members[J]))
+ for (const auto &I : M.Members)
+ for (const auto &J : N.Members)
+ if (needsChecking(I, J))
return true;
return false;
}
@@ -395,9 +408,7 @@ static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
if (!C)
return nullptr;
- if (C->getValue()->isNegative())
- return J;
- return I;
+ return C->getValue()->isNegative() ? J : I;
}
bool RuntimeCheckingPtrGroup::addPointer(unsigned Index,
@@ -495,8 +506,8 @@ void RuntimePointerChecking::groupChecks(
DenseMap<Value *, SmallVector<unsigned>> PositionMap;
for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
- auto Iter = PositionMap.insert({Pointers[Index].PointerValue, {}});
- Iter.first->second.push_back(Index);
+ auto [It, _] = PositionMap.insert({Pointers[Index].PointerValue, {}});
+ It->second.push_back(Index);
}
// We need to keep track of what pointers we've already seen so we
@@ -595,16 +606,16 @@ void RuntimePointerChecking::printChecks(
raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks,
unsigned Depth) const {
unsigned N = 0;
- for (const auto &Check : Checks) {
- const auto &First = Check.first->Members, &Second = Check.second->Members;
+ for (const auto &[Check1, Check2] : Checks) {
+ const auto &First = Check1->Members, &Second = Check2->Members;
OS.indent(Depth) << "Check " << N++ << ":\n";
- OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
+ OS.indent(Depth + 2) << "Comparing group (" << Check1 << "):\n";
for (unsigned K = 0; K < First.size(); ++K)
OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
- OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
+ OS.indent(Depth + 2) << "Against group (" << Check2 << "):\n";
for (unsigned K = 0; K < Second.size(); ++K)
OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
}
@@ -1145,8 +1156,8 @@ bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
// First, count how many write and read accesses are in the alias set. Also
// collect MemAccessInfos for later.
SmallVector<MemAccessInfo, 4> AccessInfos;
- for (const Value *Ptr_ : ASPointers) {
- Value *Ptr = const_cast<Value *>(Ptr_);
+ for (const Value *ConstPtr : ASPointers) {
+ Value *Ptr = const_cast<Value *>(ConstPtr);
bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
if (IsWrite)
++NumWritePtrChecks;
@@ -1202,9 +1213,7 @@ bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
// We know that we need these checks, so we can now be more aggressive
// and add further checks if required (overflow checks).
CanDoAliasSetRT = true;
- for (auto Retry : Retries) {
- MemAccessInfo Access = Retry.first;
- Type *AccessTy = Retry.second;
+ for (const auto &[Access, AccessTy] : Retries) {
if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
DepSetId, TheLoop, RunningDepId, ASId,
ShouldCheckWrap, /*Assume=*/true)) {
@@ -1276,12 +1285,11 @@ void AccessAnalysis::processMemAccesses() {
LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
LLVM_DEBUG({
- for (auto A : Accesses)
- dbgs() << "\t" << *A.first.getPointer() << " ("
- << (A.first.getInt()
- ? "write"
- : (ReadOnlyPtr.count(A.first.getPointer()) ? "read-only"
- : "read"))
+ for (const auto &[A, _] : Accesses)
+ dbgs() << "\t" << *A.getPointer() << " ("
+ << (A.getInt() ? "write"
+ : (ReadOnlyPtr.count(A.getPointer()) ? "read-only"
+ : "read"))
<< ")\n";
});
@@ -1310,16 +1318,16 @@ void AccessAnalysis::processMemAccesses() {
bool UseDeferred = SetIteration > 0;
PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
- for (const Value *Ptr_ : ASPointers) {
- Value *Ptr = const_cast<Value *>(Ptr_);
+ for (const Value *ConstPtr : ASPointers) {
+ Value *Ptr = const_cast<Value *>(ConstPtr);
// For a single memory access in AliasSetTracker, Accesses may contain
// both read and write, and they both need to be handled for CheckDeps.
- for (const auto &AC : S) {
- if (AC.first.getPointer() != Ptr)
+ for (const auto &[AC, _] : S) {
+ if (AC.getPointer() != Ptr)
continue;
- bool IsWrite = AC.first.getInt();
+ bool IsWrite = AC.getInt();
// If we're using the deferred access set, then it contains only
// reads.
@@ -1846,10 +1854,7 @@ static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
// (If so, then we have proven (**) because |Dist| >= -1*Dist)
const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
Minus = SE.getMinusSCEV(NegDist, CastedProduct);
- if (SE.isKnownPositive(Minus))
- return true;
-
- return false;
+ return SE.isKnownPositive(Minus);
}
/// Check the dependence for two accesses with the same stride \p Stride.
@@ -1977,6 +1982,23 @@ getDependenceDistanceStrideAndSize(
InnermostLoop))
return MemoryDepChecker::Dependence::IndirectUnsafe;
+ // Check if we can prove that Sink only accesses memory after Src's end or
+ // vice versa.
+ const auto &[SrcStart, SrcEnd] =
+ getStartAndEndForAccess(InnermostLoop, Src, ATy, PSE);
+ const auto &[SinkStart, SinkEnd] =
+ getStartAndEndForAccess(InnermostLoop, Sink, BTy, PSE);
+
+ if (!isa<SCEVCouldNotCompute>(SrcStart) &&
+ !isa<SCEVCouldNotCompute>(SrcEnd) &&
+ !isa<SCEVCouldNotCompute>(SinkStart) &&
+ !isa<SCEVCouldNotCompute>(SinkEnd)) {
+ if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
+ return MemoryDepChecker::Dependence::NoDep;
+ if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart))
+ return MemoryDepChecker::Dependence::NoDep;
+ }
+
// Need accesses with constant strides and the same direction. We don't want
// to vectorize "A[B[i]] += ..." and similar code or pointer arithmetic that
// could wrap in the address space.
@@ -2020,7 +2042,7 @@ MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(
if (isa<SCEVCouldNotCompute>(Dist)) {
// TODO: Relax requirement that there is a common stride to retry with
// non-constant distance dependencies.
- FoundNonConstantDistanceDependence |= !!CommonStride;
+ FoundNonConstantDistanceDependence |= CommonStride.has_value();
LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");
return Dependence::Unknown;
}
@@ -2063,11 +2085,10 @@ MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(
if (HasSameSize) {
// Write to the same location with the same size.
return Dependence::Forward;
- } else {
- LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
- "different type sizes\n");
- return Dependence::Unknown;
}
+ LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
+ "different type sizes\n");
+ return Dependence::Unknown;
}
bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
@@ -2313,7 +2334,7 @@ bool MemoryDepChecker::areDepsSafe(
}
++OI;
}
- AI++;
+ ++AI;
}
}
@@ -2322,8 +2343,8 @@ bool MemoryDepChecker::areDepsSafe(
}
SmallVector<Instruction *, 4>
-MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
- MemAccessInfo Access(Ptr, isWrite);
+MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool IsWrite) const {
+ MemAccessInfo Access(Ptr, IsWrite);
auto &IndexVector = Accesses.find(Access)->second;
SmallVector<Instruction *, 4> Insts;
@@ -2699,13 +2720,14 @@ void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
}
void LoopAccessInfo::emitUnsafeDependenceRemark() {
- auto Deps = getDepChecker().getDependences();
+ const auto *Deps = getDepChecker().getDependences();
if (!Deps)
return;
- auto Found = llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
- return MemoryDepChecker::Dependence::isSafeForVectorization(D.Type) !=
- MemoryDepChecker::VectorizationSafetyStatus::Safe;
- });
+ const auto *Found =
+ llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
+ return MemoryDepChecker::Dependence::isSafeForVectorization(D.Type) !=
+ MemoryDepChecker::VectorizationSafetyStatus::Safe;
+ });
if (Found == Deps->end())
return;
MemoryDepChecker::Dependence Dep = *Found;
@@ -2844,9 +2866,9 @@ static Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
// Check that all of the gep indices are uniform except for our induction
// operand.
- for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
- if (i != InductionOperand &&
- !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
+ for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I)
+ if (I != InductionOperand &&
+ !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(I)), Lp))
return Ptr;
return GEP->getOperand(InductionOperand);
}
@@ -3042,9 +3064,8 @@ LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
DepChecker =
std::make_unique<MemoryDepChecker>(*PSE, L, MaxTargetVectorWidthInBits);
PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
- if (canAnalyzeLoop()) {
+ if (canAnalyzeLoop())
analyzeLoop(AA, LI, TLI, DT);
- }
}
void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
@@ -3096,13 +3117,13 @@ void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
}
const LoopAccessInfo &LoopAccessInfoManager::getInfo(Loop &L) {
- auto I = LoopAccessInfoMap.insert({&L, nullptr});
+ auto [It, Inserted] = LoopAccessInfoMap.insert({&L, nullptr});
- if (I.second)
- I.first->second =
+ if (Inserted)
+ It->second =
std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT, &LI);
- return *I.first->second;
+ return *It->second;
}
bool LoopAccessInfoManager::invalidate(
diff --git a/llvm/lib/Analysis/LoopCacheAnalysis.cpp b/llvm/lib/Analysis/LoopCacheAnalysis.cpp
index 284d8d16d264..7ca9f15ad5fc 100644
--- a/llvm/lib/Analysis/LoopCacheAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopCacheAnalysis.cpp
@@ -299,7 +299,12 @@ CacheCostTy IndexedReference::computeRefCost(const Loop &L,
Stride = SE.getNoopOrAnyExtend(Stride, WiderType);
TripCount = SE.getNoopOrZeroExtend(TripCount, WiderType);
const SCEV *Numerator = SE.getMulExpr(Stride, TripCount);
- RefCost = SE.getUDivExpr(Numerator, CacheLineSize);
+ // Round the fractional cost up to the nearest integer number.
+ // The impact is the most significant when cost is calculated
+ // to be a number less than one, because it makes more sense
+ // to say one cache line is used rather than zero cache line
+ // is used.
+ RefCost = SE.getUDivCeilSCEV(Numerator, CacheLineSize);
LLVM_DEBUG(dbgs().indent(4)
<< "Access is consecutive: RefCost=(TripCount*Stride)/CLS="
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 254d79183a1e..8d971e6a78e4 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -9198,8 +9198,25 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
// Since the loop is finite, an invariant RHS cannot include the boundary
// value, otherwise it would loop forever.
if (!EnableFiniteLoopControl || !ControllingFiniteLoop ||
- !isLoopInvariant(RHS, L))
- break;
+ !isLoopInvariant(RHS, L)) {
+ // Otherwise, perform the addition in a wider type, to avoid overflow.
+ // If the LHS is an addrec with the appropriate nowrap flag, the
+ // extension will be sunk into it and the exit count can be analyzed.
+ auto *OldType = dyn_cast<IntegerType>(LHS->getType());
+ if (!OldType)
+ break;
+ // Prefer doubling the bitwidth over adding a single bit to make it more
+ // likely that we use a legal type.
+ auto *NewType =
+ Type::getIntNTy(OldType->getContext(), OldType->getBitWidth() * 2);
+ if (ICmpInst::isSigned(Pred)) {
+ LHS = getSignExtendExpr(LHS, NewType);
+ RHS = getSignExtendExpr(RHS, NewType);
+ } else {
+ LHS = getZeroExtendExpr(LHS, NewType);
+ RHS = getZeroExtendExpr(RHS, NewType);
+ }
+ }
RHS = getAddExpr(getOne(RHS->getType()), RHS);
[[fallthrough]];
case ICmpInst::ICMP_SLT:
@@ -9540,7 +9557,8 @@ static Constant *EvaluateExpression(Value *V, const Loop *L,
Operands[i] = C;
}
- return ConstantFoldInstOperands(I, Operands, DL, TLI);
+ return ConstantFoldInstOperands(I, Operands, DL, TLI,
+ /*AllowNonDeterministic=*/false);
}
@@ -10031,7 +10049,8 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
Constant *C = nullptr;
const DataLayout &DL = getDataLayout();
- C = ConstantFoldInstOperands(I, Operands, DL, &TLI);
+ C = ConstantFoldInstOperands(I, Operands, DL, &TLI,
+ /*AllowNonDeterministic=*/false);
if (!C)
return V;
return getSCEV(C);
@@ -14732,7 +14751,9 @@ void SCEVUnionPredicate::add(const SCEVPredicate *N) {
return;
}
- Preds.push_back(N);
+ // Only add predicate if it is not already implied by this union predicate.
+ if (!implies(N))
+ Preds.push_back(N);
}
PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE,
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 7ce42447b630..6b760fbde5bb 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -882,6 +882,9 @@ static void initializeLibCalls(TargetLibraryInfoImpl &TLI, const Triple &T,
TLI.setUnavailable(LibFunc_vec_free);
}
+ if (T.isOSAIX())
+ TLI.setUnavailable(LibFunc_memrchr);
+
TLI.addVectorizableFunctionsFromVecLib(ClVectorLibrary, T);
}
@@ -1201,26 +1204,31 @@ void TargetLibraryInfoImpl::addVectorizableFunctions(ArrayRef<VecDesc> Fns) {
static const VecDesc VecFuncs_Accelerate[] = {
#define TLI_DEFINE_ACCELERATE_VECFUNCS
#include "llvm/Analysis/VecFuncs.def"
+#undef TLI_DEFINE_ACCELERATE_VECFUNCS
};
static const VecDesc VecFuncs_DarwinLibSystemM[] = {
#define TLI_DEFINE_DARWIN_LIBSYSTEM_M_VECFUNCS
#include "llvm/Analysis/VecFuncs.def"
+#undef TLI_DEFINE_DARWIN_LIBSYSTEM_M_VECFUNCS
};
static const VecDesc VecFuncs_LIBMVEC_X86[] = {
#define TLI_DEFINE_LIBMVEC_X86_VECFUNCS
#include "llvm/Analysis/VecFuncs.def"
+#undef TLI_DEFINE_LIBMVEC_X86_VECFUNCS
};
static const VecDesc VecFuncs_MASSV[] = {
#define TLI_DEFINE_MASSV_VECFUNCS
#include "llvm/Analysis/VecFuncs.def"
+#undef TLI_DEFINE_MASSV_VECFUNCS
};
static const VecDesc VecFuncs_SVML[] = {
#define TLI_DEFINE_SVML_VECFUNCS
#include "llvm/Analysis/VecFuncs.def"
+#undef TLI_DEFINE_SVML_VECFUNCS
};
static const VecDesc VecFuncs_SLEEFGNUABI_VF2[] = {
@@ -1228,18 +1236,21 @@ static const VecDesc VecFuncs_SLEEFGNUABI_VF2[] = {
#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, VABI_PREFIX) \
{SCAL, VEC, VF, /* MASK = */ false, VABI_PREFIX},
#include "llvm/Analysis/VecFuncs.def"
+#undef TLI_DEFINE_SLEEFGNUABI_VF2_VECFUNCS
};
static const VecDesc VecFuncs_SLEEFGNUABI_VF4[] = {
#define TLI_DEFINE_SLEEFGNUABI_VF4_VECFUNCS
#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, VABI_PREFIX) \
{SCAL, VEC, VF, /* MASK = */ false, VABI_PREFIX},
#include "llvm/Analysis/VecFuncs.def"
+#undef TLI_DEFINE_SLEEFGNUABI_VF4_VECFUNCS
};
static const VecDesc VecFuncs_SLEEFGNUABI_VFScalable[] = {
#define TLI_DEFINE_SLEEFGNUABI_SCALABLE_VECFUNCS
#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, MASK, VABI_PREFIX) \
{SCAL, VEC, VF, MASK, VABI_PREFIX},
#include "llvm/Analysis/VecFuncs.def"
+#undef TLI_DEFINE_SLEEFGNUABI_SCALABLE_VECFUNCS
};
static const VecDesc VecFuncs_ArmPL[] = {
@@ -1247,6 +1258,7 @@ static const VecDesc VecFuncs_ArmPL[] = {
#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, MASK, VABI_PREFIX) \
{SCAL, VEC, VF, MASK, VABI_PREFIX},
#include "llvm/Analysis/VecFuncs.def"
+#undef TLI_DEFINE_ARMPL_VECFUNCS
};
const VecDesc VecFuncs_AMDLIBM[] = {
@@ -1254,6 +1266,7 @@ const VecDesc VecFuncs_AMDLIBM[] = {
#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, MASK, VABI_PREFIX) \
{SCAL, VEC, VF, MASK, VABI_PREFIX},
#include "llvm/Analysis/VecFuncs.def"
+#undef TLI_DEFINE_AMDLIBM_VECFUNCS
};
void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index f6a458f7ded4..82b6d7e7c483 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -1037,7 +1037,7 @@ TargetTransformInfo::getVectorInstrCost(const Instruction &I, Type *Val,
InstructionCost TargetTransformInfo::getReplicationShuffleCost(
Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,
- TTI::TargetCostKind CostKind) {
+ TTI::TargetCostKind CostKind) const {
InstructionCost Cost = TTIImpl->getReplicationShuffleCost(
EltTy, ReplicationFactor, VF, DemandedDstElts, CostKind);
assert(Cost >= 0 && "TTI should not produce negative costs!");
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index e8c5f9b3dc25..3baa8ede28ff 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -2493,9 +2493,20 @@ static bool isNonZeroRecurrence(const PHINode *PN) {
}
}
+static bool matchOpWithOpEqZero(Value *Op0, Value *Op1) {
+ ICmpInst::Predicate Pred;
+ return (match(Op0, m_ZExtOrSExt(m_ICmp(Pred, m_Specific(Op1), m_Zero()))) ||
+ match(Op1, m_ZExtOrSExt(m_ICmp(Pred, m_Specific(Op0), m_Zero())))) &&
+ Pred == ICmpInst::ICMP_EQ;
+}
+
static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
const SimplifyQuery &Q, unsigned BitWidth, Value *X,
Value *Y, bool NSW, bool NUW) {
+ // (X + (X != 0)) is non zero
+ if (matchOpWithOpEqZero(X, Y))
+ return true;
+
if (NUW)
return isKnownNonZero(Y, DemandedElts, Q, Depth) ||
isKnownNonZero(X, DemandedElts, Q, Depth);
@@ -2539,6 +2550,11 @@ static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth,
const SimplifyQuery &Q, unsigned BitWidth, Value *X,
Value *Y) {
+ // (X - (X != 0)) is non zero
+ // ((X != 0) - X) is non zero
+ if (matchOpWithOpEqZero(X, Y))
+ return true;
+
// TODO: Move this case into isKnownNonEqual().
if (auto *C = dyn_cast<Constant>(X))
if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Q, Depth))
@@ -2698,7 +2714,15 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
case Instruction::Sub:
return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
I->getOperand(1));
+ case Instruction::Xor:
+ // (X ^ (X != 0)) is non zero
+ if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1)))
+ return true;
+ break;
case Instruction::Or:
+ // (X | (X != 0)) is non zero
+ if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1)))
+ return true;
// X | Y != 0 if X != 0 or Y != 0.
return isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth) ||
isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
@@ -2989,6 +3013,11 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
return isKnownNonZero(II->getArgOperand(0), Q, Depth);
case Intrinsic::umax:
case Intrinsic::uadd_sat:
+ // umax(X, (X != 0)) is non zero
+ // X +usat (X != 0) is non zero
+ if (matchOpWithOpEqZero(II->getArgOperand(0), II->getArgOperand(1)))
+ return true;
+
return isKnownNonZero(II->getArgOperand(1), DemandedElts, Q, Depth) ||
isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth);
case Intrinsic::smax: {
@@ -4751,7 +4780,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
Known = KnownFPClass();
return;
}
- if (isa<UndefValue>(Elt))
+ if (isa<PoisonValue>(Elt))
continue;
auto *CElt = dyn_cast<ConstantFP>(Elt);
if (!CElt) {
@@ -4940,11 +4969,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
// subnormal input could produce a negative zero output.
const Function *F = II->getFunction();
if (Q.IIQ.hasNoSignedZeros(II) ||
- (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType()))) {
+ (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType())))
Known.knownNot(fcNegZero);
- if (KnownSrc.isKnownNeverNaN())
- Known.signBitMustBeZero();
- }
break;
}
diff --git a/llvm/lib/AsmParser/LLLexer.cpp b/llvm/lib/AsmParser/LLLexer.cpp
index 8ded07ffd8bd..20a1bd295771 100644
--- a/llvm/lib/AsmParser/LLLexer.cpp
+++ b/llvm/lib/AsmParser/LLLexer.cpp
@@ -566,6 +566,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(fast);
KEYWORD(nuw);
KEYWORD(nsw);
+ KEYWORD(nusw);
KEYWORD(exact);
KEYWORD(disjoint);
KEYWORD(inbounds);
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 2902bd9fe17c..5d2056d20856 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -4216,7 +4216,7 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy) {
case lltok::kw_extractelement: {
unsigned Opc = Lex.getUIntVal();
SmallVector<Constant*, 16> Elts;
- bool InBounds = false;
+ GEPNoWrapFlags NW;
bool HasInRange = false;
APSInt InRangeStart;
APSInt InRangeEnd;
@@ -4224,7 +4224,17 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy) {
Lex.Lex();
if (Opc == Instruction::GetElementPtr) {
- InBounds = EatIfPresent(lltok::kw_inbounds);
+ while (true) {
+ if (EatIfPresent(lltok::kw_inbounds))
+ NW |= GEPNoWrapFlags::inBounds();
+ else if (EatIfPresent(lltok::kw_nusw))
+ NW |= GEPNoWrapFlags::noUnsignedSignedWrap();
+ else if (EatIfPresent(lltok::kw_nuw))
+ NW |= GEPNoWrapFlags::noUnsignedWrap();
+ else
+ break;
+ }
+
if (EatIfPresent(lltok::kw_inrange)) {
if (parseToken(lltok::lparen, "expected '('"))
return true;
@@ -4303,8 +4313,8 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS, Type *ExpectedTy) {
if (!GetElementPtrInst::getIndexedType(Ty, Indices))
return error(ID.Loc, "invalid getelementptr indices");
- ID.ConstantVal = ConstantExpr::getGetElementPtr(Ty, Elts[0], Indices,
- InBounds, InRange);
+ ID.ConstantVal =
+ ConstantExpr::getGetElementPtr(Ty, Elts[0], Indices, NW, InRange);
} else if (Opc == Instruction::ShuffleVector) {
if (Elts.size() != 3)
return error(ID.Loc, "expected three operands to shufflevector");
@@ -8339,8 +8349,18 @@ int LLParser::parseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr = nullptr;
Value *Val = nullptr;
LocTy Loc, EltLoc;
+ GEPNoWrapFlags NW;
- bool InBounds = EatIfPresent(lltok::kw_inbounds);
+ while (true) {
+ if (EatIfPresent(lltok::kw_inbounds))
+ NW |= GEPNoWrapFlags::inBounds();
+ else if (EatIfPresent(lltok::kw_nusw))
+ NW |= GEPNoWrapFlags::noUnsignedSignedWrap();
+ else if (EatIfPresent(lltok::kw_nuw))
+ NW |= GEPNoWrapFlags::noUnsignedWrap();
+ else
+ break;
+ }
Type *Ty = nullptr;
if (parseType(Ty) ||
@@ -8393,9 +8413,9 @@ int LLParser::parseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) {
if (!GetElementPtrInst::getIndexedType(Ty, Indices))
return error(Loc, "invalid getelementptr indices");
- Inst = GetElementPtrInst::Create(Ty, Ptr, Indices);
- if (InBounds)
- cast<GetElementPtrInst>(Inst)->setIsInBounds(true);
+ GetElementPtrInst *GEP = GetElementPtrInst::Create(Ty, Ptr, Indices);
+ Inst = GEP;
+ GEP->setNoWrapFlags(NW);
return AteExtraComma ? InstExtraComma : InstNormal;
}
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index e64051cf5386..32b9a033173e 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -1459,6 +1459,17 @@ unsigned BitcodeReader::getVirtualTypeID(Type *Ty,
return TypeID;
}
+static GEPNoWrapFlags toGEPNoWrapFlags(uint64_t Flags) {
+ GEPNoWrapFlags NW;
+ if (Flags & (1 << bitc::GEP_INBOUNDS))
+ NW |= GEPNoWrapFlags::inBounds();
+ if (Flags & (1 << bitc::GEP_NUSW))
+ NW |= GEPNoWrapFlags::noUnsignedSignedWrap();
+ if (Flags & (1 << bitc::GEP_NUW))
+ NW |= GEPNoWrapFlags::noUnsignedWrap();
+ return NW;
+}
+
static bool isConstExprSupported(const BitcodeConstant *BC) {
uint8_t Opcode = BC->Opcode;
@@ -1614,9 +1625,9 @@ Expected<Value *> BitcodeReader::materializeValue(unsigned StartValID,
C = ConstantExpr::getCompare(BC->Flags, ConstOps[0], ConstOps[1]);
break;
case Instruction::GetElementPtr:
- C = ConstantExpr::getGetElementPtr(BC->SrcElemTy, ConstOps[0],
- ArrayRef(ConstOps).drop_front(),
- BC->Flags, BC->getInRange());
+ C = ConstantExpr::getGetElementPtr(
+ BC->SrcElemTy, ConstOps[0], ArrayRef(ConstOps).drop_front(),
+ toGEPNoWrapFlags(BC->Flags), BC->getInRange());
break;
case Instruction::ExtractElement:
C = ConstantExpr::getExtractElement(ConstOps[0], ConstOps[1]);
@@ -1700,8 +1711,7 @@ Expected<Value *> BitcodeReader::materializeValue(unsigned StartValID,
I = GetElementPtrInst::Create(BC->SrcElemTy, Ops[0],
ArrayRef(Ops).drop_front(), "constexpr",
InsertBB);
- if (BC->Flags)
- cast<GetElementPtrInst>(I)->setIsInBounds();
+ cast<GetElementPtrInst>(I)->setNoWrapFlags(toGEPNoWrapFlags(BC->Flags));
break;
case Instruction::Select:
I = SelectInst::Create(Ops[0], Ops[1], Ops[2], "constexpr", InsertBB);
@@ -2940,7 +2950,7 @@ Error BitcodeReader::parseValueSymbolTable(uint64_t Offset) {
if (!BB)
return error("Invalid bbentry record");
- BB->setName(StringRef(ValueName.data(), ValueName.size()));
+ BB->setName(ValueName.str());
ValueName.clear();
break;
}
@@ -3321,9 +3331,10 @@ Error BitcodeReader::parseConstants() {
break;
}
case bitc::CST_CODE_CE_INBOUNDS_GEP: // [ty, n x operands]
- case bitc::CST_CODE_CE_GEP: // [ty, n x operands]
+ case bitc::CST_CODE_CE_GEP_OLD: // [ty, n x operands]
case bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX_OLD: // [ty, flags, n x
// operands]
+ case bitc::CST_CODE_CE_GEP: // [ty, flags, n x operands]
case bitc::CST_CODE_CE_GEP_WITH_INRANGE: { // [ty, flags, start, end, n x
// operands]
if (Record.size() < 2)
@@ -3331,27 +3342,29 @@ Error BitcodeReader::parseConstants() {
unsigned OpNum = 0;
Type *PointeeType = nullptr;
if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX_OLD ||
- BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE || Record.size() % 2)
+ BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE ||
+ BitCode == bitc::CST_CODE_CE_GEP || Record.size() % 2)
PointeeType = getTypeByID(Record[OpNum++]);
- bool InBounds = false;
+ uint64_t Flags = 0;
std::optional<ConstantRange> InRange;
if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX_OLD) {
uint64_t Op = Record[OpNum++];
- InBounds = Op & 1;
+ Flags = Op & 1; // inbounds
unsigned InRangeIndex = Op >> 1;
// "Upgrade" inrange by dropping it. The feature is too niche to
// bother.
(void)InRangeIndex;
} else if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE) {
- uint64_t Op = Record[OpNum++];
- InBounds = Op & 1;
+ Flags = Record[OpNum++];
Expected<ConstantRange> MaybeInRange = readConstantRange(Record, OpNum);
if (!MaybeInRange)
return MaybeInRange.takeError();
InRange = MaybeInRange.get();
+ } else if (BitCode == bitc::CST_CODE_CE_GEP) {
+ Flags = Record[OpNum++];
} else if (BitCode == bitc::CST_CODE_CE_INBOUNDS_GEP)
- InBounds = true;
+ Flags = (1 << bitc::GEP_INBOUNDS);
SmallVector<unsigned, 16> Elts;
unsigned BaseTypeID = Record[OpNum];
@@ -3384,7 +3397,8 @@ Error BitcodeReader::parseConstants() {
V = BitcodeConstant::create(
Alloc, CurTy,
- {Instruction::GetElementPtr, InBounds, PointeeType, InRange}, Elts);
+ {Instruction::GetElementPtr, uint8_t(Flags), PointeeType, InRange},
+ Elts);
break;
}
case bitc::CST_CODE_CE_SELECT: { // CE_SELECT: [opval#, opval#, opval#]
@@ -5062,14 +5076,15 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
unsigned TyID;
Type *Ty;
- bool InBounds;
+ GEPNoWrapFlags NW;
if (BitCode == bitc::FUNC_CODE_INST_GEP) {
- InBounds = Record[OpNum++];
+ NW = toGEPNoWrapFlags(Record[OpNum++]);
TyID = Record[OpNum++];
Ty = getTypeByID(TyID);
} else {
- InBounds = BitCode == bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD;
+ if (BitCode == bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD)
+ NW = GEPNoWrapFlags::inBounds();
TyID = InvalidTypeID;
Ty = nullptr;
}
@@ -5096,7 +5111,8 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
GEPIdx.push_back(Op);
}
- I = GetElementPtrInst::Create(Ty, BasePtr, GEPIdx);
+ auto *GEP = GetElementPtrInst::Create(Ty, BasePtr, GEPIdx);
+ I = GEP;
ResTypeID = TyID;
if (cast<GEPOperator>(I)->getNumIndices() != 0) {
@@ -5122,8 +5138,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
ResTypeID = getVirtualTypeID(I->getType(), ResTypeID);
InstructionList.push_back(I);
- if (InBounds)
- cast<GetElementPtrInst>(I)->setIsInBounds(true);
+ GEP->setNoWrapFlags(NW);
break;
}
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index c4cea3d6eef2..3d653fe4458f 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -428,6 +428,11 @@ class IndexBitcodeWriter : public BitcodeWriterBase {
/// The combined index to write to bitcode.
const ModuleSummaryIndex &Index;
+ /// When writing combined summaries, provides the set of global value
+ /// summaries for which the value (function, function alias, etc) should be
+ /// imported as a declaration.
+ const GVSummaryPtrSet *DecSummaries = nullptr;
+
/// When writing a subset of the index for distributed backends, client
/// provides a map of modules to the corresponding GUIDs/summaries to write.
const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex;
@@ -452,11 +457,16 @@ public:
/// Constructs a IndexBitcodeWriter object for the given combined index,
/// writing to the provided \p Buffer. When writing a subset of the index
/// for a distributed backend, provide a \p ModuleToSummariesForIndex map.
+ /// If provided, \p ModuleToDecSummaries specifies the set of summaries for
+ /// which the corresponding functions or aliased functions should be imported
+ /// as a declaration (but not definition) for each module.
IndexBitcodeWriter(BitstreamWriter &Stream, StringTableBuilder &StrtabBuilder,
const ModuleSummaryIndex &Index,
+ const GVSummaryPtrSet *DecSummaries = nullptr,
const std::map<std::string, GVSummaryMapTy>
*ModuleToSummariesForIndex = nullptr)
: BitcodeWriterBase(Stream, StrtabBuilder), Index(Index),
+ DecSummaries(DecSummaries),
ModuleToSummariesForIndex(ModuleToSummariesForIndex) {
// Assign unique value ids to all summaries to be written, for use
// in writing out the call graph edges. Save the mapping from GUID
@@ -1202,7 +1212,8 @@ static uint64_t getEncodedFFlags(FunctionSummary::FFlags Flags) {
// Decode the flags for GlobalValue in the summary. See getDecodedGVSummaryFlags
// in BitcodeReader.cpp.
-static uint64_t getEncodedGVSummaryFlags(GlobalValueSummary::GVFlags Flags) {
+static uint64_t getEncodedGVSummaryFlags(GlobalValueSummary::GVFlags Flags,
+ bool ImportAsDecl = false) {
uint64_t RawFlags = 0;
RawFlags |= Flags.NotEligibleToImport; // bool
@@ -1217,7 +1228,8 @@ static uint64_t getEncodedGVSummaryFlags(GlobalValueSummary::GVFlags Flags) {
RawFlags |= (Flags.Visibility << 8); // 2 bits
- RawFlags |= (Flags.ImportType << 10); // 1 bit
+ unsigned ImportType = Flags.ImportType | ImportAsDecl;
+ RawFlags |= (ImportType << 10); // 1 bit
return RawFlags;
}
@@ -1656,6 +1668,13 @@ static uint64_t getOptimizationFlags(const Value *V) {
Flags |= 1 << bitc::TIO_NO_SIGNED_WRAP;
if (TI->hasNoUnsignedWrap())
Flags |= 1 << bitc::TIO_NO_UNSIGNED_WRAP;
+ } else if (const auto *GEP = dyn_cast<GEPOperator>(V)) {
+ if (GEP->isInBounds())
+ Flags |= 1 << bitc::GEP_INBOUNDS;
+ if (GEP->hasNoUnsignedSignedWrap())
+ Flags |= 1 << bitc::GEP_NUSW;
+ if (GEP->hasNoUnsignedWrap())
+ Flags |= 1 << bitc::GEP_NUW;
}
return Flags;
@@ -2767,12 +2786,11 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
Code = bitc::CST_CODE_CE_GEP;
const auto *GO = cast<GEPOperator>(C);
Record.push_back(VE.getTypeID(GO->getSourceElementType()));
+ Record.push_back(getOptimizationFlags(GO));
if (std::optional<ConstantRange> Range = GO->getInRange()) {
Code = bitc::CST_CODE_CE_GEP_WITH_INRANGE;
- Record.push_back(GO->isInBounds());
emitConstantRange(Record, *Range);
- } else if (GO->isInBounds())
- Code = bitc::CST_CODE_CE_INBOUNDS_GEP;
+ }
for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) {
Record.push_back(VE.getTypeID(C->getOperand(i)->getType()));
Record.push_back(VE.getValueID(C->getOperand(i)));
@@ -2961,7 +2979,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Code = bitc::FUNC_CODE_INST_GEP;
AbbrevToUse = FUNCTION_INST_GEP_ABBREV;
auto &GEPInst = cast<GetElementPtrInst>(I);
- Vals.push_back(GEPInst.isInBounds());
+ Vals.push_back(getOptimizationFlags(&I));
Vals.push_back(VE.getTypeID(GEPInst.getSourceElementType()));
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
pushValueAndType(I.getOperand(i), InstID, Vals);
@@ -3859,7 +3877,7 @@ void ModuleBitcodeWriter::writeBlockInfo() {
{
auto Abbv = std::make_shared<BitCodeAbbrev>();
Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_GEP));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3));
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty
Log2_32_Ceil(VE.getTypes().size() + 1)));
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
@@ -4543,6 +4561,12 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
unsigned AllocAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+ auto shouldImportValueAsDecl = [&](GlobalValueSummary *GVS) -> bool {
+ if (DecSummaries == nullptr)
+ return false;
+ return DecSummaries->contains(GVS);
+ };
+
// The aliases are emitted as a post-pass, and will point to the value
// id of the aliasee. Save them in a vector for post-processing.
SmallVector<AliasSummary *, 64> Aliases;
@@ -4653,7 +4677,8 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
NameVals.push_back(*ValueId);
assert(ModuleIdMap.count(FS->modulePath()));
NameVals.push_back(ModuleIdMap[FS->modulePath()]);
- NameVals.push_back(getEncodedGVSummaryFlags(FS->flags()));
+ NameVals.push_back(
+ getEncodedGVSummaryFlags(FS->flags(), shouldImportValueAsDecl(FS)));
NameVals.push_back(FS->instCount());
NameVals.push_back(getEncodedFFlags(FS->fflags()));
NameVals.push_back(FS->entryCount());
@@ -4702,7 +4727,8 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
NameVals.push_back(AliasValueId);
assert(ModuleIdMap.count(AS->modulePath()));
NameVals.push_back(ModuleIdMap[AS->modulePath()]);
- NameVals.push_back(getEncodedGVSummaryFlags(AS->flags()));
+ NameVals.push_back(
+ getEncodedGVSummaryFlags(AS->flags(), shouldImportValueAsDecl(AS)));
auto AliaseeValueId = SummaryToValueIdMap[&AS->getAliasee()];
assert(AliaseeValueId);
NameVals.push_back(AliaseeValueId);
@@ -5036,8 +5062,9 @@ void BitcodeWriter::writeModule(const Module &M,
void BitcodeWriter::writeIndex(
const ModuleSummaryIndex *Index,
- const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex) {
- IndexBitcodeWriter IndexWriter(*Stream, StrtabBuilder, *Index,
+ const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex,
+ const GVSummaryPtrSet *DecSummaries) {
+ IndexBitcodeWriter IndexWriter(*Stream, StrtabBuilder, *Index, DecSummaries,
ModuleToSummariesForIndex);
IndexWriter.write();
}
@@ -5090,12 +5117,13 @@ void IndexBitcodeWriter::write() {
// index for a distributed backend, provide a \p ModuleToSummariesForIndex map.
void llvm::writeIndexToFile(
const ModuleSummaryIndex &Index, raw_ostream &Out,
- const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex) {
+ const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex,
+ const GVSummaryPtrSet *DecSummaries) {
SmallVector<char, 0> Buffer;
Buffer.reserve(256 * 1024);
BitcodeWriter Writer(Buffer);
- Writer.writeIndex(&Index, ModuleToSummariesForIndex);
+ Writer.writeIndex(&Index, ModuleToSummariesForIndex, DecSummaries);
Writer.writeStrtab();
Out.write((char *)&Buffer.front(), Buffer.size());
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index d50cdc4323ec..c5755b9bdc8d 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -2463,11 +2463,15 @@ bool AsmPrinter::doFinalization(Module &M) {
emitGlobalIFunc(M, IFunc);
// Finalize debug and EH information.
+ // Defer MCAssembler based constant folding due to a performance issue. The
+ // label differences will be evaluated at write time.
+ OutStreamer->setUseAssemblerInfoForParsing(false);
for (const HandlerInfo &HI : Handlers) {
NamedRegionTimer T(HI.TimerName, HI.TimerDescription, HI.TimerGroupName,
HI.TimerGroupDescription, TimePassesIsEnabled);
HI.Handler->endModule();
}
+ OutStreamer->setUseAssemblerInfoForParsing(true);
// This deletes all the ephemeral handlers that AsmPrinter added, while
// keeping all the user-added handlers alive until the AsmPrinter is
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
index d0ef3e5a1939..08e3c208ba4d 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
@@ -102,9 +102,6 @@ void AsmPrinter::emitInlineAsm(StringRef Str, const MCSubtargetInfo &STI,
std::unique_ptr<MCAsmParser> Parser(
createMCAsmParser(SrcMgr, OutContext, *OutStreamer, *MAI, BufNum));
- // Do not use assembler-level information for parsing inline assembly.
- OutStreamer->setUseAssemblerInfoForParsing(false);
-
// We create a new MCInstrInfo here since we might be at the module level
// and not have a MachineFunction to initialize the TargetInstrInfo from and
// we only need MCInstrInfo for asm parsing. We create one unconditionally
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index 6022afbae574..c1e7f01f0eba 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -1539,8 +1539,8 @@ void DwarfCompileUnit::addGlobalNameForTypeUnit(StringRef Name,
}
/// Add a new global type to the unit.
-void DwarfCompileUnit::addGlobalType(const DIType *Ty, const DIE &Die,
- const DIScope *Context) {
+void DwarfCompileUnit::addGlobalTypeImpl(const DIType *Ty, const DIE &Die,
+ const DIScope *Context) {
if (!hasDwarfPubSections())
return;
std::string FullName = getParentContextString(Context) + Ty->getName().str();
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
index dc772bb459c9..76584b3eb8e7 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
@@ -335,8 +335,8 @@ public:
void addGlobalNameForTypeUnit(StringRef Name, const DIScope *Context);
/// Add a new global type to the compile unit.
- void addGlobalType(const DIType *Ty, const DIE &Die,
- const DIScope *Context) override;
+ void addGlobalTypeImpl(const DIType *Ty, const DIE &Die,
+ const DIScope *Context) override;
/// Add a new global type present in a type unit to this compile unit.
void addGlobalTypeUnitType(const DIType *Ty, const DIScope *Context);
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index 1e33c2729e5d..6c04fa1c67a9 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -578,28 +578,33 @@ DIE *DwarfUnit::createTypeDIE(const DIScope *Context, DIE &ContextDIE,
// Create new type.
DIE &TyDIE = createAndAddDIE(Ty->getTag(), ContextDIE, Ty);
- updateAcceleratorTables(Context, Ty, TyDIE);
+ auto construct = [&](const auto *Ty) {
+ updateAcceleratorTables(Context, Ty, TyDIE);
+ constructTypeDIE(TyDIE, Ty);
+ };
- if (auto *BT = dyn_cast<DIBasicType>(Ty))
- constructTypeDIE(TyDIE, BT);
- else if (auto *ST = dyn_cast<DIStringType>(Ty))
- constructTypeDIE(TyDIE, ST);
- else if (auto *STy = dyn_cast<DISubroutineType>(Ty))
- constructTypeDIE(TyDIE, STy);
- else if (auto *CTy = dyn_cast<DICompositeType>(Ty)) {
+ if (auto *CTy = dyn_cast<DICompositeType>(Ty)) {
if (DD->generateTypeUnits() && !Ty->isForwardDecl() &&
(Ty->getRawName() || CTy->getRawIdentifier())) {
// Skip updating the accelerator tables since this is not the full type.
- if (MDString *TypeId = CTy->getRawIdentifier())
+ if (MDString *TypeId = CTy->getRawIdentifier()) {
+ addGlobalType(Ty, TyDIE, Context);
DD->addDwarfTypeUnitType(getCU(), TypeId->getString(), TyDIE, CTy);
- else
+ } else {
+ updateAcceleratorTables(Context, Ty, TyDIE);
finishNonUnitTypeDIE(TyDIE, CTy);
+ }
return &TyDIE;
}
- constructTypeDIE(TyDIE, CTy);
- } else {
- constructTypeDIE(TyDIE, cast<DIDerivedType>(Ty));
- }
+ construct(CTy);
+ } else if (auto *BT = dyn_cast<DIBasicType>(Ty))
+ construct(BT);
+ else if (auto *ST = dyn_cast<DIStringType>(Ty))
+ construct(ST);
+ else if (auto *STy = dyn_cast<DISubroutineType>(Ty))
+ construct(STy);
+ else
+ construct(cast<DIDerivedType>(Ty));
return &TyDIE;
}
@@ -633,21 +638,31 @@ DIE *DwarfUnit::getOrCreateTypeDIE(const MDNode *TyNode) {
void DwarfUnit::updateAcceleratorTables(const DIScope *Context,
const DIType *Ty, const DIE &TyDIE) {
- if (!Ty->getName().empty() && !Ty->isForwardDecl()) {
- bool IsImplementation = false;
- if (auto *CT = dyn_cast<DICompositeType>(Ty)) {
- // A runtime language of 0 actually means C/C++ and that any
- // non-negative value is some version of Objective-C/C++.
- IsImplementation = CT->getRuntimeLang() == 0 || CT->isObjcClassComplete();
- }
- unsigned Flags = IsImplementation ? dwarf::DW_FLAG_type_implementation : 0;
- DD->addAccelType(*this, CUNode->getNameTableKind(), Ty->getName(), TyDIE,
- Flags);
+ if (Ty->getName().empty())
+ return;
+ if (Ty->isForwardDecl())
+ return;
- if (!Context || isa<DICompileUnit>(Context) || isa<DIFile>(Context) ||
- isa<DINamespace>(Context) || isa<DICommonBlock>(Context))
- addGlobalType(Ty, TyDIE, Context);
+ // add temporary record for this type to be added later
+
+ bool IsImplementation = false;
+ if (auto *CT = dyn_cast<DICompositeType>(Ty)) {
+ // A runtime language of 0 actually means C/C++ and that any
+ // non-negative value is some version of Objective-C/C++.
+ IsImplementation = CT->getRuntimeLang() == 0 || CT->isObjcClassComplete();
}
+ unsigned Flags = IsImplementation ? dwarf::DW_FLAG_type_implementation : 0;
+ DD->addAccelType(*this, CUNode->getNameTableKind(), Ty->getName(), TyDIE,
+ Flags);
+
+ addGlobalType(Ty, TyDIE, Context);
+}
+
+void DwarfUnit::addGlobalType(const DIType *Ty, const DIE &TyDIE,
+ const DIScope *Context) {
+ if (!Context || isa<DICompileUnit>(Context) || isa<DIFile>(Context) ||
+ isa<DINamespace>(Context) || isa<DICommonBlock>(Context))
+ addGlobalTypeImpl(Ty, TyDIE, Context);
}
void DwarfUnit::addType(DIE &Entity, const DIType *Ty,
@@ -1844,8 +1859,8 @@ void DwarfTypeUnit::addGlobalName(StringRef Name, const DIE &Die,
getCU().addGlobalNameForTypeUnit(Name, Context);
}
-void DwarfTypeUnit::addGlobalType(const DIType *Ty, const DIE &Die,
- const DIScope *Context) {
+void DwarfTypeUnit::addGlobalTypeImpl(const DIType *Ty, const DIE &Die,
+ const DIScope *Context) {
getCU().addGlobalTypeUnitType(Ty, Context);
}
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
index 18f50f86ec87..02256546b6b8 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.h
@@ -128,8 +128,10 @@ public:
const DIScope *Context) = 0;
/// Add a new global type to the compile unit.
- virtual void addGlobalType(const DIType *Ty, const DIE &Die,
- const DIScope *Context) = 0;
+ virtual void addGlobalTypeImpl(const DIType *Ty, const DIE &Die,
+ const DIScope *Context) = 0;
+
+ void addGlobalType(const DIType *Ty, const DIE &Die, const DIScope *Context);
/// Returns the DIE map slot for the specified debug variable.
///
@@ -397,8 +399,8 @@ public:
}
void addGlobalName(StringRef Name, const DIE &Die,
const DIScope *Context) override;
- void addGlobalType(const DIType *Ty, const DIE &Die,
- const DIScope *Context) override;
+ void addGlobalTypeImpl(const DIType *Ty, const DIE &Die,
+ const DIScope *Context) override;
DwarfCompileUnit &getCU() override { return CU; }
};
} // end llvm namespace
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index ee44e9353d04..d2b756e82964 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -37,6 +37,7 @@
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
@@ -937,6 +938,36 @@ void AtomicExpandImpl::expandPartwordAtomicRMW(
AI->eraseFromParent();
}
+/// Copy metadata that's safe to preserve when widening atomics.
+static void copyMetadataForAtomic(Instruction &Dest,
+ const Instruction &Source) {
+ SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
+ Source.getAllMetadata(MD);
+ LLVMContext &Ctx = Dest.getContext();
+ MDBuilder MDB(Ctx);
+
+ for (auto [ID, N] : MD) {
+ switch (ID) {
+ case LLVMContext::MD_dbg:
+ case LLVMContext::MD_tbaa:
+ case LLVMContext::MD_tbaa_struct:
+ case LLVMContext::MD_alias_scope:
+ case LLVMContext::MD_noalias:
+ case LLVMContext::MD_access_group:
+ case LLVMContext::MD_mmra:
+ Dest.setMetadata(ID, N);
+ break;
+ default:
+ if (ID == Ctx.getMDKindID("amdgpu.no.remote.memory"))
+ Dest.setMetadata(ID, N);
+ else if (ID == Ctx.getMDKindID("amdgpu.no.fine.grained.memory"))
+ Dest.setMetadata(ID, N);
+
+ break;
+ }
+ }
+}
+
// Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
ReplacementIRBuilder Builder(AI, *DL);
@@ -965,7 +996,8 @@ AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
AtomicRMWInst *NewAI = Builder.CreateAtomicRMW(
Op, PMV.AlignedAddr, NewOperand, PMV.AlignedAddrAlignment,
AI->getOrdering(), AI->getSyncScopeID());
- // TODO: Preserve metadata
+
+ copyMetadataForAtomic(*NewAI, *AI);
Value *FinalOldResult = extractMaskedValue(Builder, NewAI, PMV);
AI->replaceAllUsesWith(FinalOldResult);
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 22eb4a3e0d7c..4cc602b5c870 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -223,6 +223,70 @@ void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
replaceRegWith(MRI, DstReg, SrcReg);
}
+bool CombinerHelper::matchFreezeOfSingleMaybePoisonOperand(
+ MachineInstr &MI, BuildFnTy &MatchInfo) {
+ // Ported from InstCombinerImpl::pushFreezeToPreventPoisonFromPropagating.
+ Register DstOp = MI.getOperand(0).getReg();
+ Register OrigOp = MI.getOperand(1).getReg();
+
+ if (!MRI.hasOneNonDBGUse(OrigOp))
+ return false;
+
+ MachineInstr *OrigDef = MRI.getUniqueVRegDef(OrigOp);
+ // Even if only a single operand of the PHI is not guaranteed non-poison,
+ // moving freeze() backwards across a PHI can cause optimization issues for
+ // other users of that operand.
+ //
+ // Moving freeze() from one of the output registers of a G_UNMERGE_VALUES to
+ // the source register is unprofitable because it makes the freeze() more
+ // strict than is necessary (it would affect the whole register instead of
+ // just the subreg being frozen).
+ if (OrigDef->isPHI() || isa<GUnmerge>(OrigDef))
+ return false;
+
+ if (canCreateUndefOrPoison(OrigOp, MRI,
+ /*ConsiderFlagsAndMetadata=*/false))
+ return false;
+
+ std::optional<MachineOperand> MaybePoisonOperand;
+ for (MachineOperand &Operand : OrigDef->uses()) {
+ if (!Operand.isReg())
+ return false;
+
+ if (isGuaranteedNotToBeUndefOrPoison(Operand.getReg(), MRI))
+ continue;
+
+ if (!MaybePoisonOperand)
+ MaybePoisonOperand = Operand;
+ else {
+ // We have more than one maybe-poison operand. Moving the freeze is
+ // unsafe.
+ return false;
+ }
+ }
+
+ cast<GenericMachineInstr>(OrigDef)->dropPoisonGeneratingFlags();
+
+ // Eliminate freeze if all operands are guaranteed non-poison.
+ if (!MaybePoisonOperand) {
+ MatchInfo = [=](MachineIRBuilder &B) { MRI.replaceRegWith(DstOp, OrigOp); };
+ return true;
+ }
+
+ Register MaybePoisonOperandReg = MaybePoisonOperand->getReg();
+ LLT MaybePoisonOperandRegTy = MRI.getType(MaybePoisonOperandReg);
+
+ MatchInfo = [=](MachineIRBuilder &B) mutable {
+ B.setInsertPt(*OrigDef->getParent(), OrigDef->getIterator());
+ auto Freeze = B.buildFreeze(MaybePoisonOperandRegTy, MaybePoisonOperandReg);
+ replaceRegOpWith(
+ MRI, *OrigDef->findRegisterUseOperand(MaybePoisonOperandReg, TRI),
+ Freeze.getReg(0));
+ replaceRegWith(MRI, DstOp, OrigOp);
+ };
+ return true;
+}
+
bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI,
SmallVector<Register> &Ops) {
assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
diff --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
index 14e1e1fdf01d..5acf35b37882 100644
--- a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
@@ -538,6 +538,13 @@ bool InlineAsmLowering::lowerInlineAsm(
}
}
+ // Add rounding control registers as implicit def for inline asm.
+ if (MF.getFunction().hasFnAttribute(Attribute::StrictFP)) {
+ ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters();
+ for (MCPhysReg Reg : RCRegs)
+ Inst.addReg(Reg, RegState::ImplicitDefine);
+ }
+
if (auto Bundle = Call.getOperandBundle(LLVMContext::OB_convergencectrl)) {
auto *Token = Bundle->Inputs[0].get();
ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*Token);
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 40507845d8d8..c04f7208c61f 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -1296,7 +1296,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
MI.eraseFromParent();
return Legalized;
}
-
+ case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
case TargetOpcode::G_FREEZE: {
if (TypeIdx != 0)
return UnableToLegalize;
@@ -1310,7 +1310,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
SmallVector<Register, 8> Parts;
for (unsigned i = 0; i < Unmerge->getNumDefs(); ++i) {
Parts.push_back(
- MIRBuilder.buildFreeze(NarrowTy, Unmerge.getReg(i)).getReg(0));
+ MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy}, {Unmerge.getReg(i)})
+ .getReg(0));
}
MIRBuilder.buildMergeLikeInstr(MI.getOperand(0).getReg(), Parts);
@@ -2515,6 +2516,7 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
return Legalized;
}
case TargetOpcode::G_FREEZE:
+ case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
Observer.changingInstr(MI);
widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
widenScalarDst(MI, WideTy);
@@ -3970,7 +3972,7 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
// target can override this with custom lowering and calling the
// implementation functions.
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
- if (LI.isLegalOrCustom({G_UMIN, Ty}))
+ if (LI.isLegalOrCustom({G_UMIN, Ty}) && LI.isLegalOrCustom({G_UMAX, Ty}))
return lowerAddSubSatToMinMax(MI);
return lowerAddSubSatToAddoSubo(MI);
}
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index cd5dc0e01ed0..f455482e0294 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -1745,11 +1745,20 @@ static bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI,
UndefPoisonKind Kind) {
MachineInstr *RegDef = MRI.getVRegDef(Reg);
+ if (auto *GMI = dyn_cast<GenericMachineInstr>(RegDef)) {
+ if (ConsiderFlagsAndMetadata && includesPoison(Kind) &&
+ GMI->hasPoisonGeneratingFlags())
+ return true;
+ } else {
+ // Conservatively return true.
+ return true;
+ }
+
switch (RegDef->getOpcode()) {
case TargetOpcode::G_FREEZE:
return false;
default:
- return true;
+ return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
}
}
@@ -1767,8 +1776,17 @@ static bool isGuaranteedNotToBeUndefOrPoison(Register Reg,
return true;
case TargetOpcode::G_IMPLICIT_DEF:
return !includesUndef(Kind);
- default:
- return false;
+ default: {
+ auto MOCheck = [&](const MachineOperand &MO) {
+ if (!MO.isReg())
+ return true;
+ return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1,
+ Kind);
+ };
+ return !::canCreateUndefOrPoison(Reg, MRI,
+ /*ConsiderFlagsAndMetadata=*/true, Kind) &&
+ all_of(RegDef->uses(), MOCheck);
+ }
}
}
diff --git a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
index a9b59e738c00..fc4be84bca10 100644
--- a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
+++ b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp
@@ -64,10 +64,10 @@ struct VectorInfo;
struct InterleavedLoadCombineImpl {
public:
InterleavedLoadCombineImpl(Function &F, DominatorTree &DT, MemorySSA &MSSA,
+ const TargetTransformInfo &TTI,
const TargetMachine &TM)
: F(F), DT(DT), MSSA(MSSA),
- TLI(*TM.getSubtargetImpl(F)->getTargetLowering()),
- TTI(TM.getTargetTransformInfo(F)) {}
+ TLI(*TM.getSubtargetImpl(F)->getTargetLowering()), TTI(TTI) {}
/// Scan the function for interleaved load candidates and execute the
/// replacement if applicable.
@@ -87,7 +87,7 @@ private:
const TargetLowering &TLI;
/// Target Transform Information
- const TargetTransformInfo TTI;
+ const TargetTransformInfo &TTI;
/// Find the instruction in sets LIs that dominates all others, return nullptr
/// if there is none.
@@ -1329,6 +1329,7 @@ struct InterleavedLoadCombine : public FunctionPass {
return InterleavedLoadCombineImpl(
F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
getAnalysis<MemorySSAWrapperPass>().getMSSA(),
+ getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
TPC->getTM<TargetMachine>())
.run();
}
@@ -1336,6 +1337,7 @@ struct InterleavedLoadCombine : public FunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MemorySSAWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
FunctionPass::getAnalysisUsage(AU);
}
@@ -1348,7 +1350,8 @@ InterleavedLoadCombinePass::run(Function &F, FunctionAnalysisManager &FAM) {
auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
auto &MemSSA = FAM.getResult<MemorySSAAnalysis>(F).getMSSA();
- bool Changed = InterleavedLoadCombineImpl(F, DT, MemSSA, *TM).run();
+ auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
+ bool Changed = InterleavedLoadCombineImpl(F, DT, MemSSA, TTI, *TM).run();
return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
}
@@ -1360,6 +1363,7 @@ INITIALIZE_PASS_BEGIN(
false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_END(
InterleavedLoadCombine, DEBUG_TYPE,
"Combine interleaved loads into wide loads and shufflevector instructions",
diff --git a/llvm/lib/CodeGen/LiveRangeEdit.cpp b/llvm/lib/CodeGen/LiveRangeEdit.cpp
index 643370f0573d..7b7b5459ad7b 100644
--- a/llvm/lib/CodeGen/LiveRangeEdit.cpp
+++ b/llvm/lib/CodeGen/LiveRangeEdit.cpp
@@ -414,7 +414,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink) {
DeadRemats->insert(MI);
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
MI->substituteRegister(Dest, NewLI.reg(), 0, TRI);
- MI->getOperand(0).setIsDead(true);
+ assert(MI->registerDefIsDead(NewLI.reg(), &TRI));
} else {
if (TheDelegate)
TheDelegate->LRE_WillEraseInstruction(MI);
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index 78d581c8cead..03e892a5e0d2 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -1664,7 +1664,8 @@ void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
if (ShouldTrackPressure) {
// Update top scheduled pressure.
RegisterOperands RegOpers;
- RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
+ RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks,
+ /*IgnoreDead=*/false);
if (ShouldTrackLaneMasks) {
// Adjust liveness and add missing dead+read-undef flags.
SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
@@ -1698,7 +1699,8 @@ void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
}
if (ShouldTrackPressure) {
RegisterOperands RegOpers;
- RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
+ RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks,
+ /*IgnoreDead=*/false);
if (ShouldTrackLaneMasks) {
// Adjust liveness and add missing dead+read-undef flags.
SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
@@ -3775,6 +3777,21 @@ SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
}
} while (SU->isScheduled);
+ // If IsTopNode, then SU is in Top.Available and must be removed. Otherwise,
+ // if isTopReady(), then SU is in either Top.Available or Top.Pending.
+ // If !IsTopNode, then SU is in Bot.Available and must be removed. Otherwise,
+ // if isBottomReady(), then SU is in either Bot.Available or Bot.Pending.
+ //
+ // It is coincidental when !IsTopNode && isTopReady or when IsTopNode &&
+ // isBottomReady. That is, it didn't factor into the decision to choose SU
+ // because it isTopReady or isBottomReady, respectively. In fact, if the
+ // RegionPolicy is OnlyTopDown or OnlyBottomUp, then the Bot queues and Top
+ // queues respectivley contain the original roots and don't get updated when
+ // picking a node. So if SU isTopReady on a OnlyBottomUp pick, then it was
+ // because we schduled everything but the top roots. Conversley, if SU
+ // isBottomReady on OnlyTopDown, then it was because we scheduled everything
+ // but the bottom roots. If its in a queue even coincidentally, it should be
+ // removed so it does not get re-picked in a subsequent pickNode call.
if (SU->isTopReady())
Top.removeReady(SU);
if (SU->isBottomReady())
diff --git a/llvm/lib/CodeGen/ParallelCG.cpp b/llvm/lib/CodeGen/ParallelCG.cpp
index ceb64b2badab..8ab64f8afe6e 100644
--- a/llvm/lib/CodeGen/ParallelCG.cpp
+++ b/llvm/lib/CodeGen/ParallelCG.cpp
@@ -79,9 +79,7 @@ void llvm::splitCodeGen(
[TMFactory, FileType, ThreadOS](const SmallString<0> &BC) {
LLVMContext Ctx;
Expected<std::unique_ptr<Module>> MOrErr = parseBitcodeFile(
- MemoryBufferRef(StringRef(BC.data(), BC.size()),
- "<split-module>"),
- Ctx);
+ MemoryBufferRef(BC.str(), "<split-module>"), Ctx);
if (!MOrErr)
report_fatal_error("Failed to read bitcode");
std::unique_ptr<Module> MPartInCtx = std::move(MOrErr.get());
diff --git a/llvm/lib/CodeGen/RegisterPressure.cpp b/llvm/lib/CodeGen/RegisterPressure.cpp
index 3fa22447f416..9a7eb49666b2 100644
--- a/llvm/lib/CodeGen/RegisterPressure.cpp
+++ b/llvm/lib/CodeGen/RegisterPressure.cpp
@@ -873,7 +873,7 @@ void RegPressureTracker::recede(SmallVectorImpl<RegisterMaskPair> *LiveUses) {
const MachineInstr &MI = *CurrPos;
RegisterOperands RegOpers;
- RegOpers.collect(MI, *TRI, *MRI, TrackLaneMasks, false);
+ RegOpers.collect(MI, *TRI, *MRI, TrackLaneMasks, /*IgnoreDead=*/false);
if (TrackLaneMasks) {
SlotIndex SlotIdx = LIS->getInstructionIndex(*CurrPos).getRegSlot();
RegOpers.adjustLaneLiveness(*LIS, *MRI, SlotIdx);
@@ -1041,7 +1041,7 @@ void RegPressureTracker::bumpUpwardPressure(const MachineInstr *MI) {
// Account for register pressure similar to RegPressureTracker::recede().
RegisterOperands RegOpers;
RegOpers.collect(*MI, *TRI, *MRI, TrackLaneMasks, /*IgnoreDead=*/true);
- assert(RegOpers.DeadDefs.size() == 0);
+ assert(RegOpers.DeadDefs.empty());
if (TrackLaneMasks)
RegOpers.adjustLaneLiveness(*LIS, *MRI, SlotIdx);
else if (RequireIntervals)
@@ -1290,7 +1290,7 @@ void RegPressureTracker::bumpDownwardPressure(const MachineInstr *MI) {
// Account for register pressure similar to RegPressureTracker::recede().
RegisterOperands RegOpers;
- RegOpers.collect(*MI, *TRI, *MRI, TrackLaneMasks, false);
+ RegOpers.collect(*MI, *TRI, *MRI, TrackLaneMasks, /*IgnoreDead=*/false);
if (TrackLaneMasks)
RegOpers.adjustLaneLiveness(*LIS, *MRI, SlotIdx);
diff --git a/llvm/lib/CodeGen/ScheduleDAG.cpp b/llvm/lib/CodeGen/ScheduleDAG.cpp
index de8e6f63794d..8d9a5041fc2f 100644
--- a/llvm/lib/CodeGen/ScheduleDAG.cpp
+++ b/llvm/lib/CodeGen/ScheduleDAG.cpp
@@ -331,8 +331,10 @@ void SUnit::biasCriticalPath() {
unsigned MaxDepth = BestI->getSUnit()->getDepth();
for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
++I) {
- if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
+ if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth) {
+ MaxDepth = I->getSUnit()->getDepth();
BestI = I;
+ }
}
if (BestI != Preds.begin())
std::swap(*Preds.begin(), *BestI);
diff --git a/llvm/lib/CodeGen/SelectOptimize.cpp b/llvm/lib/CodeGen/SelectOptimize.cpp
index 2e03ae6aec94..0a5f0a861d48 100644
--- a/llvm/lib/CodeGen/SelectOptimize.cpp
+++ b/llvm/lib/CodeGen/SelectOptimize.cpp
@@ -130,7 +130,11 @@ public:
class SelectLike {
SelectLike(Instruction *I) : I(I) {}
+ /// The select (/or) instruction.
Instruction *I;
+ /// Whether this select is inverted, "not(cond), FalseVal, TrueVal", as
+ /// opposed to the original condition.
+ bool Inverted = false;
public:
/// Match a select or select-like instruction, returning a SelectLike.
@@ -153,14 +157,22 @@ public:
bool isValid() { return I; }
operator bool() { return isValid(); }
+ /// Invert the select by inverting the condition and switching the operands.
+ void setInverted() {
+ assert(!Inverted && "Trying to invert an inverted SelectLike");
+ assert(isa<Instruction>(getCondition()) &&
+ cast<Instruction>(getCondition())->getOpcode() ==
+ Instruction::Xor);
+ Inverted = true;
+ }
+ bool isInverted() const { return Inverted; }
+
Instruction *getI() { return I; }
const Instruction *getI() const { return I; }
Type *getType() const { return I->getType(); }
- /// Return the condition for the SelectLike instruction. For example the
- /// condition of a select or c in `or(zext(c), x)`
- Value *getCondition() const {
+ Value *getNonInvertedCondition() const {
if (auto *Sel = dyn_cast<SelectInst>(I))
return Sel->getCondition();
// Or(zext) case
@@ -177,11 +189,24 @@ public:
llvm_unreachable("Unhandled case in getCondition");
}
+ /// Return the condition for the SelectLike instruction. For example the
+ /// condition of a select or c in `or(zext(c), x)`
+ Value *getCondition() const {
+ Value *CC = getNonInvertedCondition();
+ // For inverted conditions the CC is checked when created to be a not
+ // (xor) instruction.
+ if (Inverted)
+ return cast<Instruction>(CC)->getOperand(0);
+ return CC;
+ }
+
/// Return the true value for the SelectLike instruction. Note this may not
/// exist for all SelectLike instructions. For example, for `or(zext(c), x)`
/// the true value would be `or(x,1)`. As this value does not exist, nullptr
/// is returned.
- Value *getTrueValue() const {
+ Value *getTrueValue(bool HonorInverts = true) const {
+ if (Inverted && HonorInverts)
+ return getFalseValue(/*HonorInverts=*/false);
if (auto *Sel = dyn_cast<SelectInst>(I))
return Sel->getTrueValue();
// Or(zext) case - The true value is Or(X), so return nullptr as the value
@@ -195,7 +220,9 @@ public:
/// Return the false value for the SelectLike instruction. For example the
/// getFalseValue of a select or `x` in `or(zext(c), x)` (which is
/// `select(c, x|1, x)`)
- Value *getFalseValue() const {
+ Value *getFalseValue(bool HonorInverts = true) const {
+ if (Inverted && HonorInverts)
+ return getTrueValue(/*HonorInverts=*/false);
if (auto *Sel = dyn_cast<SelectInst>(I))
return Sel->getFalseValue();
// Or(zext) case - return the operand which is not the zext.
@@ -216,8 +243,8 @@ public:
/// InstCostMap. This may need to be generated for select-like instructions.
Scaled64 getTrueOpCost(DenseMap<const Instruction *, CostInfo> &InstCostMap,
const TargetTransformInfo *TTI) {
- if (auto *Sel = dyn_cast<SelectInst>(I))
- if (auto *I = dyn_cast<Instruction>(Sel->getTrueValue()))
+ if (isa<SelectInst>(I))
+ if (auto *I = dyn_cast<Instruction>(getTrueValue()))
return InstCostMap.contains(I) ? InstCostMap[I].NonPredCost
: Scaled64::getZero();
@@ -242,8 +269,8 @@ public:
Scaled64
getFalseOpCost(DenseMap<const Instruction *, CostInfo> &InstCostMap,
const TargetTransformInfo *TTI) {
- if (auto *Sel = dyn_cast<SelectInst>(I))
- if (auto *I = dyn_cast<Instruction>(Sel->getFalseValue()))
+ if (isa<SelectInst>(I))
+ if (auto *I = dyn_cast<Instruction>(getFalseValue()))
return InstCostMap.contains(I) ? InstCostMap[I].NonPredCost
: Scaled64::getZero();
@@ -510,9 +537,10 @@ getTrueOrFalseValue(SelectOptimizeImpl::SelectLike SI, bool isTrue,
for (SelectInst *DefSI = dyn_cast<SelectInst>(SI.getI());
DefSI != nullptr && Selects.count(DefSI);
DefSI = dyn_cast<SelectInst>(V)) {
- assert(DefSI->getCondition() == SI.getCondition() &&
- "The condition of DefSI does not match with SI");
- V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
+ if (DefSI->getCondition() == SI.getCondition())
+ V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
+ else // Handle inverted SI
+ V = (!isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
}
if (isa<BinaryOperator>(SI.getI())) {
@@ -632,18 +660,19 @@ void SelectOptimizeImpl::convertProfitableSIGroups(SelectGroups &ProfSIGroups) {
// Delete the unconditional branch that was just created by the split.
StartBlock->getTerminator()->eraseFromParent();
- // Move any debug/pseudo instructions that were in-between the select
- // group to the newly-created end block.
- SmallVector<Instruction *, 2> DebugPseudoINS;
+ // Move any debug/pseudo instructions and not's that were in-between the
+ // select group to the newly-created end block.
+ SmallVector<Instruction *, 2> SinkInstrs;
auto DIt = SI.getI()->getIterator();
while (&*DIt != LastSI.getI()) {
if (DIt->isDebugOrPseudoInst())
- DebugPseudoINS.push_back(&*DIt);
+ SinkInstrs.push_back(&*DIt);
+ if (match(&*DIt, m_Not(m_Specific(SI.getCondition()))))
+ SinkInstrs.push_back(&*DIt);
DIt++;
}
- for (auto *DI : DebugPseudoINS) {
+ for (auto *DI : SinkInstrs)
DI->moveBeforePreserving(&*EndBlock->getFirstInsertionPt());
- }
// Duplicate implementation for DbgRecords, the non-instruction debug-info
// format. Helper lambda for moving DbgRecords to the end block.
@@ -765,6 +794,13 @@ void SelectOptimizeImpl::collectSelectGroups(BasicBlock &BB,
++BBIt;
continue;
}
+
+ // Skip not(select(..)), if the not is part of the same select group
+ if (match(NI, m_Not(m_Specific(SI.getCondition())))) {
+ ++BBIt;
+ continue;
+ }
+
// We only allow selects in the same group, not other select-like
// instructions.
if (!isa<SelectInst>(NI))
@@ -773,6 +809,10 @@ void SelectOptimizeImpl::collectSelectGroups(BasicBlock &BB,
SelectLike NSI = SelectLike::match(NI);
if (NSI && SI.getCondition() == NSI.getCondition()) {
SIGroup.push_back(NSI);
+ } else if (NSI && match(NSI.getCondition(),
+ m_Not(m_Specific(SI.getCondition())))) {
+ NSI.setInverted();
+ SIGroup.push_back(NSI);
} else
break;
++BBIt;
@@ -783,6 +823,12 @@ void SelectOptimizeImpl::collectSelectGroups(BasicBlock &BB,
if (!isSelectKindSupported(SI))
continue;
+ LLVM_DEBUG({
+ dbgs() << "New Select group with\n";
+ for (auto SI : SIGroup)
+ dbgs() << " " << *SI.getI() << "\n";
+ });
+
SIGroups.push_back(SIGroup);
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 2b181cd3ab1d..93d866384b48 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5211,30 +5211,28 @@ SDValue DAGCombiner::visitAVG(SDNode *N) {
!DAG.isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(Opcode, DL, N->getVTList(), N1, N0);
- if (VT.isVector()) {
+ if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N, DL))
return FoldedVOp;
- // fold (avgfloor x, 0) -> x >> 1
- if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) {
- if (Opcode == ISD::AVGFLOORS)
- return DAG.getNode(ISD::SRA, DL, VT, N0, DAG.getConstant(1, DL, VT));
- if (Opcode == ISD::AVGFLOORU)
- return DAG.getNode(ISD::SRL, DL, VT, N0, DAG.getConstant(1, DL, VT));
- }
- }
-
// fold (avg x, undef) -> x
if (N0.isUndef())
return N1;
if (N1.isUndef())
return N0;
- // Fold (avg x, x) --> x
+ // fold (avg x, x) --> x
if (N0 == N1 && Level >= AfterLegalizeTypes)
return N0;
- // TODO If we use avg for scalars anywhere, we can add (avgfl x, 0) -> x >> 1
+ // fold (avgfloor x, 0) -> x >> 1
+ SDValue X;
+ if (sd_match(N, m_c_BinOp(ISD::AVGFLOORS, m_Value(X), m_Zero())))
+ return DAG.getNode(ISD::SRA, DL, VT, X,
+ DAG.getShiftAmountConstant(1, VT, DL));
+ if (sd_match(N, m_c_BinOp(ISD::AVGFLOORU, m_Value(X), m_Zero())))
+ return DAG.getNode(ISD::SRL, DL, VT, X,
+ DAG.getShiftAmountConstant(1, VT, DL));
return SDValue();
}
@@ -5255,24 +5253,25 @@ SDValue DAGCombiner::visitABD(SDNode *N) {
!DAG.isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(Opcode, DL, N->getVTList(), N1, N0);
- if (VT.isVector()) {
+ if (VT.isVector())
if (SDValue FoldedVOp = SimplifyVBinOp(N, DL))
return FoldedVOp;
- // fold (abds x, 0) -> abs x
- // fold (abdu x, 0) -> x
- if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) {
- if (Opcode == ISD::ABDS)
- return DAG.getNode(ISD::ABS, DL, VT, N0);
- if (Opcode == ISD::ABDU)
- return N0;
- }
- }
-
// fold (abd x, undef) -> 0
if (N0.isUndef() || N1.isUndef())
return DAG.getConstant(0, DL, VT);
+ SDValue X;
+
+ // fold (abds x, 0) -> abs x
+ if (sd_match(N, m_c_BinOp(ISD::ABDS, m_Value(X), m_Zero())) &&
+ (!LegalOperations || hasOperation(ISD::ABS, VT)))
+ return DAG.getNode(ISD::ABS, DL, VT, X);
+
+ // fold (abdu x, 0) -> x
+ if (sd_match(N, m_c_BinOp(ISD::ABDU, m_Value(X), m_Zero())))
+ return X;
+
// fold (abds x, y) -> (abdu x, y) iff both args are known positive
if (Opcode == ISD::ABDS && hasOperation(ISD::ABDU, VT) &&
DAG.SignBitIsZero(N0) && DAG.SignBitIsZero(N1))
@@ -10746,6 +10745,7 @@ SDValue DAGCombiner::visitFunnelShift(SDNode *N) {
SDValue N2 = N->getOperand(2);
bool IsFSHL = N->getOpcode() == ISD::FSHL;
unsigned BitWidth = VT.getScalarSizeInBits();
+ SDLoc DL(N);
// fold (fshl N0, N1, 0) -> N0
// fold (fshr N0, N1, 0) -> N1
@@ -10765,8 +10765,8 @@ SDValue DAGCombiner::visitFunnelShift(SDNode *N) {
// fold (fsh* N0, N1, c) -> (fsh* N0, N1, c % BitWidth)
if (Cst->getAPIntValue().uge(BitWidth)) {
uint64_t RotAmt = Cst->getAPIntValue().urem(BitWidth);
- return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N0, N1,
- DAG.getConstant(RotAmt, SDLoc(N), ShAmtTy));
+ return DAG.getNode(N->getOpcode(), DL, VT, N0, N1,
+ DAG.getConstant(RotAmt, DL, ShAmtTy));
}
unsigned ShAmt = Cst->getZExtValue();
@@ -10778,13 +10778,13 @@ SDValue DAGCombiner::visitFunnelShift(SDNode *N) {
// fold fshl(N0, undef_or_zero, C) -> shl(N0, C)
// fold fshr(N0, undef_or_zero, C) -> shl(N0, BW-C)
if (IsUndefOrZero(N0))
- return DAG.getNode(ISD::SRL, SDLoc(N), VT, N1,
- DAG.getConstant(IsFSHL ? BitWidth - ShAmt : ShAmt,
- SDLoc(N), ShAmtTy));
+ return DAG.getNode(
+ ISD::SRL, DL, VT, N1,
+ DAG.getConstant(IsFSHL ? BitWidth - ShAmt : ShAmt, DL, ShAmtTy));
if (IsUndefOrZero(N1))
- return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0,
- DAG.getConstant(IsFSHL ? ShAmt : BitWidth - ShAmt,
- SDLoc(N), ShAmtTy));
+ return DAG.getNode(
+ ISD::SHL, DL, VT, N0,
+ DAG.getConstant(IsFSHL ? ShAmt : BitWidth - ShAmt, DL, ShAmtTy));
// fold (fshl ld1, ld0, c) -> (ld0[ofs]) iff ld0 and ld1 are consecutive.
// fold (fshr ld1, ld0, c) -> (ld0[ofs]) iff ld0 and ld1 are consecutive.
@@ -10833,18 +10833,19 @@ SDValue DAGCombiner::visitFunnelShift(SDNode *N) {
if (isPowerOf2_32(BitWidth)) {
APInt ModuloBits(N2.getScalarValueSizeInBits(), BitWidth - 1);
if (IsUndefOrZero(N0) && !IsFSHL && DAG.MaskedValueIsZero(N2, ~ModuloBits))
- return DAG.getNode(ISD::SRL, SDLoc(N), VT, N1, N2);
+ return DAG.getNode(ISD::SRL, DL, VT, N1, N2);
if (IsUndefOrZero(N1) && IsFSHL && DAG.MaskedValueIsZero(N2, ~ModuloBits))
- return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, N2);
+ return DAG.getNode(ISD::SHL, DL, VT, N0, N2);
}
// fold (fshl N0, N0, N2) -> (rotl N0, N2)
// fold (fshr N0, N0, N2) -> (rotr N0, N2)
- // TODO: Investigate flipping this rotate if only one is legal, if funnel shift
- // is legal as well we might be better off avoiding non-constant (BW - N2).
+ // TODO: Investigate flipping this rotate if only one is legal.
+ // If funnel shift is legal as well we might be better off avoiding
+ // non-constant (BW - N2).
unsigned RotOpc = IsFSHL ? ISD::ROTL : ISD::ROTR;
if (N0 == N1 && hasOperation(RotOpc, VT))
- return DAG.getNode(RotOpc, SDLoc(N), VT, N0, N2);
+ return DAG.getNode(RotOpc, DL, VT, N0, N2);
// Simplify, based on bits shifted out of N0/N1.
if (SimplifyDemandedBits(SDValue(N, 0)))
@@ -17386,15 +17387,20 @@ SDValue DAGCombiner::visitFREM(SDNode *N) {
TLI.isOperationLegalOrCustom(ISD::FMUL, VT) &&
TLI.isOperationLegalOrCustom(ISD::FDIV, VT) &&
TLI.isOperationLegalOrCustom(ISD::FTRUNC, VT) &&
- DAG.isKnownToBeAPowerOfTwoFP(N1) &&
- (Flags.hasNoSignedZeros() || DAG.cannotBeOrderedNegativeFP(N0))) {
+ DAG.isKnownToBeAPowerOfTwoFP(N1)) {
+ bool NeedsCopySign =
+ !Flags.hasNoSignedZeros() && !DAG.cannotBeOrderedNegativeFP(N0);
SDValue Div = DAG.getNode(ISD::FDIV, DL, VT, N0, N1);
SDValue Rnd = DAG.getNode(ISD::FTRUNC, DL, VT, Div);
- if (TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT))
- return DAG.getNode(ISD::FMA, DL, VT, DAG.getNode(ISD::FNEG, DL, VT, Rnd),
- N1, N0);
- SDValue Mul = DAG.getNode(ISD::FMUL, DL, VT, Rnd, N1);
- return DAG.getNode(ISD::FSUB, DL, VT, N0, Mul);
+ SDValue MLA;
+ if (TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
+ MLA = DAG.getNode(ISD::FMA, DL, VT, DAG.getNode(ISD::FNEG, DL, VT, Rnd),
+ N1, N0);
+ } else {
+ SDValue Mul = DAG.getNode(ISD::FMUL, DL, VT, Rnd, N1);
+ MLA = DAG.getNode(ISD::FSUB, DL, VT, N0, Mul);
+ }
+ return NeedsCopySign ? DAG.getNode(ISD::FCOPYSIGN, DL, VT, MLA, N0) : MLA;
}
return SDValue();
diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 8fb6b11b8805..de22d230b1c3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -214,6 +214,10 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
if (CI->isMustTailCall() && Fn->isVarArg())
MF->getFrameInfo().setHasMustTailInVarArgFunc(true);
}
+
+ // Determine if there is a call to setjmp in the machine function.
+ if (Call->hasFnAttr(Attribute::ReturnsTwice))
+ MF->setExposesReturnsTwice(true);
}
// Mark values used outside their block as exported, by allocating
@@ -222,8 +226,10 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
if (!isa<AllocaInst>(I) || !StaticAllocaMap.count(cast<AllocaInst>(&I)))
InitializeRegForValue(&I);
- // Decide the preferred extend type for a value.
- PreferredExtendType[&I] = getPreferredExtendForValue(&I);
+ // Decide the preferred extend type for a value. This iterates over all
+ // users and therefore isn't cheap, so don't do this at O0.
+ if (DAG->getOptLevel() != CodeGenOptLevel::None)
+ PreferredExtendType[&I] = getPreferredExtendForValue(&I);
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 759368a67a16..36738961382e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -1412,6 +1412,13 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
}
}
+ // Add rounding control registers as implicit def for inline asm.
+ if (MF->getFunction().hasFnAttribute(Attribute::StrictFP)) {
+ ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters();
+ for (MCPhysReg Reg : RCRegs)
+ MIB.addReg(Reg, RegState::ImplicitDefine);
+ }
+
// GCC inline assembly allows input operands to also be early-clobber
// output operands (so long as the operand is written only after it's
// used), but this does not match the semantics of our early-clobber flag.
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 0543c211c497..bfc2273c9425 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -2050,8 +2050,15 @@ SDValue SelectionDAGLegalize::ExpandSPLAT_VECTOR(SDNode *Node) {
std::pair<SDValue, SDValue> SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
TargetLowering::ArgListTy &&Args,
bool isSigned) {
- SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
- TLI.getPointerTy(DAG.getDataLayout()));
+ EVT CodePtrTy = TLI.getPointerTy(DAG.getDataLayout());
+ SDValue Callee;
+ if (const char *LibcallName = TLI.getLibcallName(LC))
+ Callee = DAG.getExternalSymbol(LibcallName, CodePtrTy);
+ else {
+ Callee = DAG.getUNDEF(CodePtrTy);
+ DAG.getContext()->emitError(Twine("no libcall available for ") +
+ Node->getOperationName(&DAG));
+ }
EVT RetVT = Node->getValueType(0);
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 98f64947bcab..8fda35f00863 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -107,9 +107,9 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SIGN_EXTEND_INREG:
Res = PromoteIntRes_SIGN_EXTEND_INREG(N); break;
case ISD::SRA:
- case ISD::VP_ASHR: Res = PromoteIntRes_SRA(N); break;
+ case ISD::VP_SRA: Res = PromoteIntRes_SRA(N); break;
case ISD::SRL:
- case ISD::VP_LSHR: Res = PromoteIntRes_SRL(N); break;
+ case ISD::VP_SRL: Res = PromoteIntRes_SRL(N); break;
case ISD::VP_TRUNCATE:
case ISD::TRUNCATE: Res = PromoteIntRes_TRUNCATE(N); break;
case ISD::UNDEF: Res = PromoteIntRes_UNDEF(N); break;
@@ -573,7 +573,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BSWAP(SDNode *N) {
ShAmt);
SDValue Mask = N->getOperand(1);
SDValue EVL = N->getOperand(2);
- return DAG.getNode(ISD::VP_LSHR, dl, NVT,
+ return DAG.getNode(ISD::VP_SRL, dl, NVT,
DAG.getNode(ISD::VP_BSWAP, dl, NVT, Op, Mask, EVL), ShAmt,
Mask, EVL);
}
@@ -601,7 +601,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BITREVERSE(SDNode *N) {
DAG.getNode(ISD::BITREVERSE, dl, NVT, Op), ShAmt);
SDValue Mask = N->getOperand(1);
SDValue EVL = N->getOperand(2);
- return DAG.getNode(ISD::VP_LSHR, dl, NVT,
+ return DAG.getNode(ISD::VP_SRL, dl, NVT,
DAG.getNode(ISD::VP_BITREVERSE, dl, NVT, Op, Mask, EVL),
ShAmt, Mask, EVL);
}
@@ -1405,7 +1405,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N) {
SDValue RHS = N->getOperand(1);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger)
RHS = ZExtPromotedInteger(RHS);
- if (N->getOpcode() != ISD::VP_ASHR)
+ if (N->getOpcode() != ISD::VP_SRA)
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
@@ -1417,7 +1417,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SRL(SDNode *N) {
SDValue RHS = N->getOperand(1);
if (getTypeAction(RHS.getValueType()) == TargetLowering::TypePromoteInteger)
RHS = ZExtPromotedInteger(RHS);
- if (N->getOpcode() != ISD::VP_LSHR)
+ if (N->getOpcode() != ISD::VP_SRL)
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS);
return DAG.getNode(N->getOpcode(), SDLoc(N), LHS.getValueType(), LHS, RHS,
N->getOperand(2), N->getOperand(3));
@@ -1511,13 +1511,12 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VPFunnelShift(SDNode *N) {
!TLI.isOperationLegalOrCustom(Opcode, VT)) {
SDValue HiShift = DAG.getConstant(OldBits, DL, VT);
Hi = DAG.getNode(ISD::VP_SHL, DL, VT, Hi, HiShift, Mask, EVL);
- // FIXME: Replace it by vp operations.
- Lo = DAG.getZeroExtendInReg(Lo, DL, OldVT);
+ Lo = DAG.getVPZeroExtendInReg(Lo, Mask, EVL, DL, OldVT);
SDValue Res = DAG.getNode(ISD::VP_OR, DL, VT, Hi, Lo, Mask, EVL);
- Res = DAG.getNode(IsFSHR ? ISD::VP_LSHR : ISD::VP_SHL, DL, VT, Res, Amt,
+ Res = DAG.getNode(IsFSHR ? ISD::VP_SRL : ISD::VP_SHL, DL, VT, Res, Amt,
Mask, EVL);
if (!IsFSHR)
- Res = DAG.getNode(ISD::VP_LSHR, DL, VT, Res, HiShift, Mask, EVL);
+ Res = DAG.getNode(ISD::VP_SRL, DL, VT, Res, HiShift, Mask, EVL);
return Res;
}
@@ -2213,7 +2212,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_VP_SIGN_EXTEND(SDNode *N) {
// FIXME: There is no VP_SIGN_EXTEND_INREG so use a pair of shifts.
SDValue Shl = DAG.getNode(ISD::VP_SHL, dl, VT, Op, ShAmt, N->getOperand(1),
N->getOperand(2));
- return DAG.getNode(ISD::VP_ASHR, dl, VT, Shl, ShAmt, N->getOperand(1),
+ return DAG.getNode(ISD::VP_SRA, dl, VT, Shl, ShAmt, N->getOperand(1),
N->getOperand(2));
}
@@ -2375,10 +2374,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_VP_ZERO_EXTEND(SDNode *N) {
// FIXME: There is no VP_ANY_EXTEND yet.
Op = DAG.getNode(ISD::VP_ZERO_EXTEND, dl, VT, Op, N->getOperand(1),
N->getOperand(2));
- APInt Imm = APInt::getLowBitsSet(VT.getScalarSizeInBits(),
- N->getOperand(0).getScalarValueSizeInBits());
- return DAG.getNode(ISD::VP_AND, dl, VT, Op, DAG.getConstant(Imm, dl, VT),
- N->getOperand(1), N->getOperand(2));
+ return DAG.getVPZeroExtendInReg(Op, N->getOperand(1), N->getOperand(2), dl,
+ N->getOperand(0).getValueType());
}
SDValue DAGTypeLegalizer::PromoteIntOp_FIX(SDNode *N) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index cd858003cf03..40e621f0db22 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1188,8 +1188,8 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::OR: case ISD::VP_OR:
case ISD::XOR: case ISD::VP_XOR:
case ISD::SHL: case ISD::VP_SHL:
- case ISD::SRA: case ISD::VP_ASHR:
- case ISD::SRL: case ISD::VP_LSHR:
+ case ISD::SRA: case ISD::VP_SRA:
+ case ISD::SRL: case ISD::VP_SRL:
case ISD::UREM: case ISD::VP_UREM:
case ISD::SREM: case ISD::VP_SREM:
case ISD::FREM: case ISD::VP_FREM:
@@ -2911,18 +2911,10 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_REVERSE(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::SplitVecRes_VECTOR_SPLICE(SDNode *N, SDValue &Lo,
SDValue &Hi) {
- EVT VT = N->getValueType(0);
SDLoc DL(N);
- EVT LoVT, HiVT;
- std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
-
SDValue Expanded = TLI.expandVectorSplice(N, DAG);
- Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, Expanded,
- DAG.getVectorIdxConstant(0, DL));
- Hi =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, Expanded,
- DAG.getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL));
+ std::tie(Lo, Hi) = DAG.SplitVector(Expanded, DL);
}
void DAGTypeLegalizer::SplitVecRes_VP_REVERSE(SDNode *N, SDValue &Lo,
@@ -2967,12 +2959,7 @@ void DAGTypeLegalizer::SplitVecRes_VP_REVERSE(SDNode *N, SDValue &Lo,
SDValue Load = DAG.getLoadVP(VT, DL, Store, StackPtr, Mask, EVL, LoadMMO);
- auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VT);
- Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, Load,
- DAG.getVectorIdxConstant(0, DL));
- Hi =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, Load,
- DAG.getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL));
+ std::tie(Lo, Hi) = DAG.SplitVector(Load, DL);
}
void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(SDNode *N) {
@@ -3033,6 +3020,7 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
"operand!\n");
case ISD::VP_SETCC:
+ case ISD::STRICT_FSETCC:
case ISD::SETCC: Res = SplitVecOp_VSETCC(N); break;
case ISD::BITCAST: Res = SplitVecOp_BITCAST(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
@@ -3997,14 +3985,16 @@ SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N) {
}
SDValue DAGTypeLegalizer::SplitVecOp_VSETCC(SDNode *N) {
+ bool isStrict = N->getOpcode() == ISD::STRICT_FSETCC;
assert(N->getValueType(0).isVector() &&
- N->getOperand(0).getValueType().isVector() &&
+ N->getOperand(isStrict ? 1 : 0).getValueType().isVector() &&
"Operand types must be vectors");
// The result has a legal vector type, but the input needs splitting.
SDValue Lo0, Hi0, Lo1, Hi1, LoRes, HiRes;
SDLoc DL(N);
- GetSplitVector(N->getOperand(0), Lo0, Hi0);
- GetSplitVector(N->getOperand(1), Lo1, Hi1);
+ GetSplitVector(N->getOperand(isStrict ? 1 : 0), Lo0, Hi0);
+ GetSplitVector(N->getOperand(isStrict ? 2 : 1), Lo1, Hi1);
+
auto PartEltCnt = Lo0.getValueType().getVectorElementCount();
LLVMContext &Context = *DAG.getContext();
@@ -4014,6 +4004,16 @@ SDValue DAGTypeLegalizer::SplitVecOp_VSETCC(SDNode *N) {
if (N->getOpcode() == ISD::SETCC) {
LoRes = DAG.getNode(ISD::SETCC, DL, PartResVT, Lo0, Lo1, N->getOperand(2));
HiRes = DAG.getNode(ISD::SETCC, DL, PartResVT, Hi0, Hi1, N->getOperand(2));
+ } else if (N->getOpcode() == ISD::STRICT_FSETCC) {
+ LoRes = DAG.getNode(ISD::STRICT_FSETCC, DL,
+ DAG.getVTList(PartResVT, N->getValueType(1)),
+ N->getOperand(0), Lo0, Lo1, N->getOperand(3));
+ HiRes = DAG.getNode(ISD::STRICT_FSETCC, DL,
+ DAG.getVTList(PartResVT, N->getValueType(1)),
+ N->getOperand(0), Hi0, Hi1, N->getOperand(3));
+ SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
+ LoRes.getValue(1), HiRes.getValue(1));
+ ReplaceValueWith(SDValue(N, 1), NewChain);
} else {
assert(N->getOpcode() == ISD::VP_SETCC && "Expected VP_SETCC opcode");
SDValue MaskLo, MaskHi, EVLLo, EVLHi;
@@ -4235,8 +4235,8 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SUB: case ISD::VP_SUB:
case ISD::XOR: case ISD::VP_XOR:
case ISD::SHL: case ISD::VP_SHL:
- case ISD::SRA: case ISD::VP_ASHR:
- case ISD::SRL: case ISD::VP_LSHR:
+ case ISD::SRA: case ISD::VP_SRA:
+ case ISD::SRL: case ISD::VP_SRL:
case ISD::FMINNUM: case ISD::VP_FMINNUM:
case ISD::FMAXNUM: case ISD::VP_FMAXNUM:
case ISD::FMINIMUM:
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 247f52370e4c..b05649c6ce95 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -1540,6 +1540,25 @@ SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
}
+SDValue SelectionDAG::getVPZeroExtendInReg(SDValue Op, SDValue Mask,
+ SDValue EVL, const SDLoc &DL,
+ EVT VT) {
+ EVT OpVT = Op.getValueType();
+ assert(VT.isInteger() && OpVT.isInteger() &&
+ "Cannot getVPZeroExtendInReg FP types");
+ assert(VT.isVector() && OpVT.isVector() &&
+ "getVPZeroExtendInReg type and operand type should be vector!");
+ assert(VT.getVectorElementCount() == OpVT.getVectorElementCount() &&
+ "Vector element counts must match in getZeroExtendInReg");
+ assert(VT.bitsLE(OpVT) && "Not extending!");
+ if (OpVT == VT)
+ return Op;
+ APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(),
+ VT.getScalarSizeInBits());
+ return getNode(ISD::VP_AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT), Mask,
+ EVL);
+}
+
SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
// Only unsigned pointer semantics are supported right now. In the future this
// might delegate to TLI to check pointer signedness.
@@ -3468,19 +3487,28 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known = KnownBits::mulhs(Known, Known2);
break;
}
- case ISD::AVGFLOORU:
- case ISD::AVGCEILU:
- case ISD::AVGFLOORS:
+ case ISD::AVGFLOORU: {
+ Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ Known = KnownBits::avgFloorU(Known, Known2);
+ break;
+ }
+ case ISD::AVGCEILU: {
+ Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ Known = KnownBits::avgCeilU(Known, Known2);
+ break;
+ }
+ case ISD::AVGFLOORS: {
+ Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ Known = KnownBits::avgFloorS(Known, Known2);
+ break;
+ }
case ISD::AVGCEILS: {
- bool IsCeil = Opcode == ISD::AVGCEILU || Opcode == ISD::AVGCEILS;
- bool IsSigned = Opcode == ISD::AVGFLOORS || Opcode == ISD::AVGCEILS;
Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
- Known = IsSigned ? Known.sext(BitWidth + 1) : Known.zext(BitWidth + 1);
- Known2 = IsSigned ? Known2.sext(BitWidth + 1) : Known2.zext(BitWidth + 1);
- KnownBits Carry = KnownBits::makeConstant(APInt(1, IsCeil ? 1 : 0));
- Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
- Known = Known.extractBits(BitWidth, 1);
+ Known = KnownBits::avgCeilS(Known, Known2);
break;
}
case ISD::SELECT:
@@ -4752,6 +4780,13 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
(VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
}
+ case ISD::AVGCEILS:
+ case ISD::AVGFLOORS:
+ Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ if (Tmp == 1)
+ return 1; // Early out.
+ Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ return std::min(Tmp, Tmp2);
case ISD::SREM:
// The sign bit is the LHS's sign bit, except when the result of the
// remainder is zero. The magnitude of the result should be less than or
@@ -5241,18 +5276,17 @@ bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts,
// Check if we demand any upper (undef) elements.
return !PoisonOnly && DemandedElts.ugt(1);
+ case ISD::INSERT_VECTOR_ELT:
case ISD::EXTRACT_VECTOR_ELT: {
// Ensure that the element index is in bounds.
EVT VecVT = Op.getOperand(0).getValueType();
- KnownBits KnownIdx = computeKnownBits(Op.getOperand(1), Depth + 1);
- return KnownIdx.getMaxValue().uge(VecVT.getVectorMinNumElements());
- }
-
- case ISD::INSERT_VECTOR_ELT:{
- // Ensure that the element index is in bounds.
- EVT VecVT = Op.getOperand(0).getValueType();
- KnownBits KnownIdx = computeKnownBits(Op.getOperand(2), Depth + 1);
- return KnownIdx.getMaxValue().uge(VecVT.getVectorMinNumElements());
+ SDValue Idx = Op.getOperand(Opcode == ISD::INSERT_VECTOR_ELT ? 2 : 1);
+ if (isGuaranteedNotToBeUndefOrPoison(Idx, DemandedElts, PoisonOnly,
+ Depth + 1)) {
+ KnownBits KnownIdx = computeKnownBits(Idx, Depth + 1);
+ return KnownIdx.getMaxValue().uge(VecVT.getVectorMinNumElements());
+ }
+ return true;
}
case ISD::VECTOR_SHUFFLE: {
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index b5694c955b8c..8addaf1ae3e5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -680,9 +680,6 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
}
}
- // Determine if there is a call to setjmp in the machine function.
- MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
-
// Determine if floating point is used for msvc
computeUsesMSVCFloatingPoint(TM.getTargetTriple(), Fn, MF->getMMI());
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 3ec6b9b79507..be7bcc505bd4 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -587,6 +587,10 @@ bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
if (VT.isVector())
return false;
+ assert(Op.getOperand(0).getValueType().getScalarSizeInBits() == BitWidth &&
+ Op.getOperand(1).getValueType().getScalarSizeInBits() == BitWidth &&
+ "ShrinkDemandedOp only supports operands that have the same size!");
+
// Don't do this if the node has another user, which may require the
// full value.
if (!Op.getNode()->hasOneUse())
@@ -1832,11 +1836,33 @@ bool TargetLowering::SimplifyDemandedBits(
}
}
+ // TODO: Can we merge this fold with the one below?
// Try shrinking the operation as long as the shift amount will still be
// in range.
- if ((ShAmt < DemandedBits.getActiveBits()) &&
- ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO))
- return true;
+ if (ShAmt < DemandedBits.getActiveBits() && !VT.isVector() &&
+ Op.getNode()->hasOneUse()) {
+ // Search for the smallest integer type with free casts to and from
+ // Op's type. For expedience, just check power-of-2 integer types.
+ unsigned DemandedSize = DemandedBits.getActiveBits();
+ for (unsigned SmallVTBits = llvm::bit_ceil(DemandedSize);
+ SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
+ EVT SmallVT = EVT::getIntegerVT(*TLO.DAG.getContext(), SmallVTBits);
+ if (isNarrowingProfitable(VT, SmallVT) &&
+ isTypeDesirableForOp(ISD::SHL, SmallVT) &&
+ isTruncateFree(VT, SmallVT) && isZExtFree(SmallVT, VT) &&
+ (!TLO.LegalOperations() || isOperationLegal(ISD::SHL, SmallVT))) {
+ assert(DemandedSize <= SmallVTBits &&
+ "Narrowed below demanded bits?");
+ // We found a type with free casts.
+ SDValue NarrowShl = TLO.DAG.getNode(
+ ISD::SHL, dl, SmallVT,
+ TLO.DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)),
+ TLO.DAG.getShiftAmountConstant(ShAmt, SmallVT, dl));
+ return TLO.CombineTo(
+ Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl));
+ }
+ }
+ }
// Narrow shift to lower half - similar to ShrinkDemandedOp.
// (shl i64:x, K) -> (i64 zero_extend (shl (i32 (trunc i64:x)), K))
@@ -1908,11 +1934,6 @@ bool TargetLowering::SimplifyDemandedBits(
SDValue Op1 = Op.getOperand(1);
EVT ShiftVT = Op1.getValueType();
- // Try to match AVG patterns.
- if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits,
- DemandedElts, Depth + 1))
- return TLO.CombineTo(Op, AVG);
-
KnownBits KnownSA = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
if (KnownSA.isConstant() && KnownSA.getConstant().ult(BitWidth)) {
unsigned ShAmt = KnownSA.getConstant().getZExtValue();
@@ -1994,6 +2015,12 @@ bool TargetLowering::SimplifyDemandedBits(
// shift amounts.
Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
}
+
+ // Try to match AVG patterns (after shift simplification).
+ if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits,
+ DemandedElts, Depth + 1))
+ return TLO.CombineTo(Op, AVG);
+
break;
}
case ISD::SRA: {
@@ -2015,11 +2042,6 @@ bool TargetLowering::SimplifyDemandedBits(
if (DemandedBits.isOne())
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1));
- // Try to match AVG patterns.
- if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits,
- DemandedElts, Depth + 1))
- return TLO.CombineTo(Op, AVG);
-
KnownBits KnownSA = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1);
if (KnownSA.isConstant() && KnownSA.getConstant().ult(BitWidth)) {
unsigned ShAmt = KnownSA.getConstant().getZExtValue();
@@ -2106,6 +2128,12 @@ bool TargetLowering::SimplifyDemandedBits(
}
}
}
+
+ // Try to match AVG patterns (after shift simplification).
+ if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits,
+ DemandedElts, Depth + 1))
+ return TLO.CombineTo(Op, AVG);
+
break;
}
case ISD::FSHL:
@@ -2786,10 +2814,16 @@ bool TargetLowering::SimplifyDemandedBits(
unsigned DemandedBitsLZ = DemandedBits.countl_zero();
APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
KnownBits KnownOp0, KnownOp1;
- if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, KnownOp0, TLO,
- Depth + 1) ||
- SimplifyDemandedBits(Op1, LoMask, DemandedElts, KnownOp1, TLO,
+ auto GetDemandedBitsLHSMask = [&](APInt Demanded,
+ const KnownBits &KnownRHS) {
+ if (Op.getOpcode() == ISD::MUL)
+ Demanded.clearHighBits(KnownRHS.countMinTrailingZeros());
+ return Demanded;
+ };
+ if (SimplifyDemandedBits(Op1, LoMask, DemandedElts, KnownOp1, TLO,
Depth + 1) ||
+ SimplifyDemandedBits(Op0, GetDemandedBitsLHSMask(LoMask, KnownOp1),
+ DemandedElts, KnownOp0, TLO, Depth + 1) ||
// See if the operation should be performed at a smaller bit width.
ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) {
if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) {
@@ -7855,7 +7889,7 @@ static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG) {
InvShAmt = DAG.getNode(ISD::VP_SUB, DL, ShVT, BitWidthC, ShAmt, Mask, VL);
ShX = DAG.getNode(ISD::VP_SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt, Mask,
VL);
- ShY = DAG.getNode(ISD::VP_LSHR, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt, Mask,
+ ShY = DAG.getNode(ISD::VP_SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt, Mask,
VL);
} else {
// fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW))
@@ -7877,12 +7911,12 @@ static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG) {
SDValue One = DAG.getConstant(1, DL, ShVT);
if (IsFSHL) {
ShX = DAG.getNode(ISD::VP_SHL, DL, VT, X, ShAmt, Mask, VL);
- SDValue ShY1 = DAG.getNode(ISD::VP_LSHR, DL, VT, Y, One, Mask, VL);
- ShY = DAG.getNode(ISD::VP_LSHR, DL, VT, ShY1, InvShAmt, Mask, VL);
+ SDValue ShY1 = DAG.getNode(ISD::VP_SRL, DL, VT, Y, One, Mask, VL);
+ ShY = DAG.getNode(ISD::VP_SRL, DL, VT, ShY1, InvShAmt, Mask, VL);
} else {
SDValue ShX1 = DAG.getNode(ISD::VP_SHL, DL, VT, X, One, Mask, VL);
ShX = DAG.getNode(ISD::VP_SHL, DL, VT, ShX1, InvShAmt, Mask, VL);
- ShY = DAG.getNode(ISD::VP_LSHR, DL, VT, Y, ShAmt, Mask, VL);
+ ShY = DAG.getNode(ISD::VP_SRL, DL, VT, Y, ShAmt, Mask, VL);
}
}
return DAG.getNode(ISD::VP_OR, DL, VT, ShX, ShY, Mask, VL);
@@ -8849,7 +8883,7 @@ SDValue TargetLowering::expandVPCTPOP(SDNode *Node, SelectionDAG &DAG) const {
// v = v - ((v >> 1) & 0x55555555...)
Tmp1 = DAG.getNode(ISD::VP_AND, dl, VT,
- DAG.getNode(ISD::VP_LSHR, dl, VT, Op,
+ DAG.getNode(ISD::VP_SRL, dl, VT, Op,
DAG.getConstant(1, dl, ShVT), Mask, VL),
Mask55, Mask, VL);
Op = DAG.getNode(ISD::VP_SUB, dl, VT, Op, Tmp1, Mask, VL);
@@ -8857,13 +8891,13 @@ SDValue TargetLowering::expandVPCTPOP(SDNode *Node, SelectionDAG &DAG) const {
// v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Op, Mask33, Mask, VL);
Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT,
- DAG.getNode(ISD::VP_LSHR, dl, VT, Op,
+ DAG.getNode(ISD::VP_SRL, dl, VT, Op,
DAG.getConstant(2, dl, ShVT), Mask, VL),
Mask33, Mask, VL);
Op = DAG.getNode(ISD::VP_ADD, dl, VT, Tmp2, Tmp3, Mask, VL);
// v = (v + (v >> 4)) & 0x0F0F0F0F...
- Tmp4 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(4, dl, ShVT),
+ Tmp4 = DAG.getNode(ISD::VP_SRL, dl, VT, Op, DAG.getConstant(4, dl, ShVT),
Mask, VL),
Tmp5 = DAG.getNode(ISD::VP_ADD, dl, VT, Op, Tmp4, Mask, VL);
Op = DAG.getNode(ISD::VP_AND, dl, VT, Tmp5, Mask0F, Mask, VL);
@@ -8887,8 +8921,8 @@ SDValue TargetLowering::expandVPCTPOP(SDNode *Node, SelectionDAG &DAG) const {
Mask, VL);
}
}
- return DAG.getNode(ISD::VP_LSHR, dl, VT, V,
- DAG.getConstant(Len - 8, dl, ShVT), Mask, VL);
+ return DAG.getNode(ISD::VP_SRL, dl, VT, V, DAG.getConstant(Len - 8, dl, ShVT),
+ Mask, VL);
}
SDValue TargetLowering::expandCTLZ(SDNode *Node, SelectionDAG &DAG) const {
@@ -8960,7 +8994,7 @@ SDValue TargetLowering::expandVPCTLZ(SDNode *Node, SelectionDAG &DAG) const {
for (unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) {
SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT);
Op = DAG.getNode(ISD::VP_OR, dl, VT, Op,
- DAG.getNode(ISD::VP_LSHR, dl, VT, Op, Tmp, Mask, VL), Mask,
+ DAG.getNode(ISD::VP_SRL, dl, VT, Op, Tmp, Mask, VL), Mask,
VL);
}
Op = DAG.getNode(ISD::VP_XOR, dl, VT, Op, DAG.getConstant(-1, dl, VT), Mask,
@@ -9194,11 +9228,21 @@ SDValue TargetLowering::expandABD(SDNode *N, SelectionDAG &DAG) const {
DAG.getNode(ISD::USUBSAT, dl, VT, LHS, RHS),
DAG.getNode(ISD::USUBSAT, dl, VT, RHS, LHS));
- // abds(lhs, rhs) -> select(sgt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs))
- // abdu(lhs, rhs) -> select(ugt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs))
EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
ISD::CondCode CC = IsSigned ? ISD::CondCode::SETGT : ISD::CondCode::SETUGT;
SDValue Cmp = DAG.getSetCC(dl, CCVT, LHS, RHS, CC);
+
+ // Branchless expansion iff cmp result is allbits:
+ // abds(lhs, rhs) -> sub(sgt(lhs, rhs), xor(sgt(lhs, rhs), sub(lhs, rhs)))
+ // abdu(lhs, rhs) -> sub(ugt(lhs, rhs), xor(ugt(lhs, rhs), sub(lhs, rhs)))
+ if (CCVT == VT && getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) {
+ SDValue Diff = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
+ SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, Diff, Cmp);
+ return DAG.getNode(ISD::SUB, dl, VT, Cmp, Xor);
+ }
+
+ // abds(lhs, rhs) -> select(sgt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs))
+ // abdu(lhs, rhs) -> select(ugt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs))
return DAG.getSelect(dl, VT, Cmp, DAG.getNode(ISD::SUB, dl, VT, LHS, RHS),
DAG.getNode(ISD::SUB, dl, VT, RHS, LHS));
}
@@ -9279,7 +9323,7 @@ SDValue TargetLowering::expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const {
case MVT::i16:
Tmp1 = DAG.getNode(ISD::VP_SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT),
Mask, EVL);
- Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(8, dl, SHVT),
+ Tmp2 = DAG.getNode(ISD::VP_SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT),
Mask, EVL);
return DAG.getNode(ISD::VP_OR, dl, VT, Tmp1, Tmp2, Mask, EVL);
case MVT::i32:
@@ -9289,11 +9333,11 @@ SDValue TargetLowering::expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const {
Mask, EVL);
Tmp3 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp3, DAG.getConstant(8, dl, SHVT),
Mask, EVL);
- Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(8, dl, SHVT),
+ Tmp2 = DAG.getNode(ISD::VP_SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT),
Mask, EVL);
Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2,
DAG.getConstant(0xFF00, dl, VT), Mask, EVL);
- Tmp1 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(24, dl, SHVT),
+ Tmp1 = DAG.getNode(ISD::VP_SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT),
Mask, EVL);
Tmp4 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL);
Tmp2 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL);
@@ -9313,19 +9357,19 @@ SDValue TargetLowering::expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const {
DAG.getConstant(255ULL << 24, dl, VT), Mask, EVL);
Tmp5 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp5, DAG.getConstant(8, dl, SHVT),
Mask, EVL);
- Tmp4 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(8, dl, SHVT),
+ Tmp4 = DAG.getNode(ISD::VP_SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT),
Mask, EVL);
Tmp4 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp4,
DAG.getConstant(255ULL << 24, dl, VT), Mask, EVL);
- Tmp3 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(24, dl, SHVT),
+ Tmp3 = DAG.getNode(ISD::VP_SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT),
Mask, EVL);
Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp3,
DAG.getConstant(255ULL << 16, dl, VT), Mask, EVL);
- Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(40, dl, SHVT),
+ Tmp2 = DAG.getNode(ISD::VP_SRL, dl, VT, Op, DAG.getConstant(40, dl, SHVT),
Mask, EVL);
Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2,
DAG.getConstant(255ULL << 8, dl, VT), Mask, EVL);
- Tmp1 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(56, dl, SHVT),
+ Tmp1 = DAG.getNode(ISD::VP_SRL, dl, VT, Op, DAG.getConstant(56, dl, SHVT),
Mask, EVL);
Tmp8 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp7, Mask, EVL);
Tmp6 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp6, Tmp5, Mask, EVL);
@@ -9424,7 +9468,7 @@ SDValue TargetLowering::expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const {
Tmp = (Sz > 8 ? DAG.getNode(ISD::VP_BSWAP, dl, VT, Op, Mask, EVL) : Op);
// swap i4: ((V >> 4) & 0x0F) | ((V & 0x0F) << 4)
- Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Tmp, DAG.getConstant(4, dl, SHVT),
+ Tmp2 = DAG.getNode(ISD::VP_SRL, dl, VT, Tmp, DAG.getConstant(4, dl, SHVT),
Mask, EVL);
Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2,
DAG.getConstant(Mask4, dl, VT), Mask, EVL);
@@ -9435,7 +9479,7 @@ SDValue TargetLowering::expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const {
Tmp = DAG.getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
// swap i2: ((V >> 2) & 0x33) | ((V & 0x33) << 2)
- Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Tmp, DAG.getConstant(2, dl, SHVT),
+ Tmp2 = DAG.getNode(ISD::VP_SRL, dl, VT, Tmp, DAG.getConstant(2, dl, SHVT),
Mask, EVL);
Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2,
DAG.getConstant(Mask2, dl, VT), Mask, EVL);
@@ -9446,7 +9490,7 @@ SDValue TargetLowering::expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const {
Tmp = DAG.getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL);
// swap i1: ((V >> 1) & 0x55) | ((V & 0x55) << 1)
- Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Tmp, DAG.getConstant(1, dl, SHVT),
+ Tmp2 = DAG.getNode(ISD::VP_SRL, dl, VT, Tmp, DAG.getConstant(1, dl, SHVT),
Mask, EVL);
Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2,
DAG.getConstant(Mask1, dl, VT), Mask, EVL);
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 09b70cfb7227..82a59918b085 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -227,6 +227,34 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT) {
CallingConv::ARM_AAPCS_VFP);
}
}
+
+ switch (TT.getOS()) {
+ case Triple::MacOSX:
+ if (TT.isMacOSXVersionLT(10, 9)) {
+ setLibcallName(RTLIB::EXP10_F32, nullptr);
+ setLibcallName(RTLIB::EXP10_F64, nullptr);
+ } else {
+ setLibcallName(RTLIB::EXP10_F32, "__exp10f");
+ setLibcallName(RTLIB::EXP10_F64, "__exp10");
+ }
+ break;
+ case Triple::IOS:
+ case Triple::TvOS:
+ case Triple::WatchOS:
+ case Triple::XROS:
+ if (!TT.isWatchOS() &&
+ (TT.isOSVersionLT(7, 0) || (TT.isOSVersionLT(9, 0) && TT.isX86()))) {
+ setLibcallName(RTLIB::EXP10_F32, nullptr);
+ setLibcallName(RTLIB::EXP10_F64, nullptr);
+ } else {
+ setLibcallName(RTLIB::EXP10_F32, "__exp10f");
+ setLibcallName(RTLIB::EXP10_F64, "__exp10");
+ }
+
+ break;
+ default:
+ break;
+ }
} else {
setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 3e1897ce670a..0fc915d89f6c 100644
--- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -523,6 +523,8 @@ static unsigned getELFSectionType(StringRef Name, SectionKind K) {
if (hasPrefix(Name, ".llvm.offloading"))
return ELF::SHT_LLVM_OFFLOADING;
+ if (Name == ".llvm.lto")
+ return ELF::SHT_LLVM_LTO;
if (K.isBSS() || K.isThreadBSS())
return ELF::SHT_NOBITS;
diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp
index 58db686ec7d5..3d5c58d282da 100644
--- a/llvm/lib/CodeGen/ValueTypes.cpp
+++ b/llvm/lib/CodeGen/ValueTypes.cpp
@@ -579,9 +579,11 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const {
// clang-format on
}
-/// Return the value type corresponding to the specified type. This returns all
-/// pointers as MVT::iPTR. If HandleUnknown is true, unknown types are returned
-/// as Other, otherwise they are invalid.
+/// Return the value type corresponding to the specified type.
+/// If HandleUnknown is true, unknown types are returned as Other, otherwise
+/// they are invalid.
+/// NB: This includes pointer types, which require a DataLayout to convert
+/// to a concrete value type.
MVT MVT::getVT(Type *Ty, bool HandleUnknown){
assert(Ty != nullptr && "Invalid type");
switch (Ty->getTypeID()) {
@@ -611,7 +613,6 @@ MVT MVT::getVT(Type *Ty, bool HandleUnknown){
case Type::X86_AMXTyID: return MVT(MVT::x86amx);
case Type::FP128TyID: return MVT(MVT::f128);
case Type::PPC_FP128TyID: return MVT(MVT::ppcf128);
- case Type::PointerTyID: return MVT(MVT::iPTR);
case Type::FixedVectorTyID:
case Type::ScalableVectorTyID: {
VectorType *VTy = cast<VectorType>(Ty);
@@ -622,9 +623,11 @@ MVT MVT::getVT(Type *Ty, bool HandleUnknown){
}
}
-/// getEVT - Return the value type corresponding to the specified type. This
-/// returns all pointers as MVT::iPTR. If HandleUnknown is true, unknown types
-/// are returned as Other, otherwise they are invalid.
+/// getEVT - Return the value type corresponding to the specified type.
+/// If HandleUnknown is true, unknown types are returned as Other, otherwise
+/// they are invalid.
+/// NB: This includes pointer types, which require a DataLayout to convert
+/// to a concrete value type.
EVT EVT::getEVT(Type *Ty, bool HandleUnknown){
switch (Ty->getTypeID()) {
default:
diff --git a/llvm/lib/DWARFLinker/Parallel/OutputSections.h b/llvm/lib/DWARFLinker/Parallel/OutputSections.h
index 0e1f2dae54bc..d2e4622aa764 100644
--- a/llvm/lib/DWARFLinker/Parallel/OutputSections.h
+++ b/llvm/lib/DWARFLinker/Parallel/OutputSections.h
@@ -220,7 +220,7 @@ struct SectionDescriptor : SectionDescriptorBase {
/// Returns section content.
StringRef getContents() override {
if (SectionOffsetInsideAsmPrinterOutputStart == 0)
- return StringRef(Contents.data(), Contents.size());
+ return Contents;
return Contents.slice(SectionOffsetInsideAsmPrinterOutputStart,
SectionOffsetInsideAsmPrinterOutputEnd);
diff --git a/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp b/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp
index 4da031716e32..3cdffb8cd061 100644
--- a/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp
@@ -75,7 +75,7 @@ Error SimpleExecutorMemoryManager::finalize(tpctypes::FinalizeRequest &FR) {
auto BailOut = [&](Error Err) {
std::pair<void *, Allocation> AllocToDestroy;
- // Get allocation to destory.
+ // Get allocation to destroy.
{
std::lock_guard<std::mutex> Lock(M);
auto I = Allocations.find(Base.toPtr<void *>());
@@ -153,7 +153,7 @@ Error SimpleExecutorMemoryManager::deallocate(
std::vector<std::pair<void *, Allocation>> AllocPairs;
AllocPairs.reserve(Bases.size());
- // Get allocation to destory.
+ // Get allocation to destroy.
Error Err = Error::success();
{
std::lock_guard<std::mutex> Lock(M);
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index 941f6a7a7d82..ced5d78f994a 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -1417,6 +1417,10 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
} else if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
if (GEP->isInBounds())
Out << " inbounds";
+ else if (GEP->hasNoUnsignedSignedWrap())
+ Out << " nusw";
+ if (GEP->hasNoUnsignedWrap())
+ Out << " nuw";
if (auto InRange = GEP->getInRange()) {
Out << " inrange(" << InRange->getLower() << ", " << InRange->getUpper()
<< ")";
diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp
index 4622ad7e9a0e..4c5e1ce3df7c 100644
--- a/llvm/lib/IR/ConstantFold.cpp
+++ b/llvm/lib/IR/ConstantFold.cpp
@@ -1721,8 +1721,9 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
if (auto *GV = dyn_cast<GlobalVariable>(C))
if (!GV->hasExternalWeakLinkage() && GV->getValueType() == PointeeTy &&
isInBoundsIndices(Idxs))
- return ConstantExpr::getGetElementPtr(PointeeTy, C, Idxs,
- /*InBounds=*/true, InRange);
+ // TODO(gep_nowrap): Can also set NUW here.
+ return ConstantExpr::getGetElementPtr(
+ PointeeTy, C, Idxs, GEPNoWrapFlags::inBounds(), InRange);
return nullptr;
}
diff --git a/llvm/lib/IR/ConstantRange.cpp b/llvm/lib/IR/ConstantRange.cpp
index 59e7a9f5eb11..c3bde48b982c 100644
--- a/llvm/lib/IR/ConstantRange.cpp
+++ b/llvm/lib/IR/ConstantRange.cpp
@@ -930,6 +930,8 @@ ConstantRange ConstantRange::overflowingBinaryOp(Instruction::BinaryOps BinOp,
return addWithNoWrap(Other, NoWrapKind);
case Instruction::Sub:
return subWithNoWrap(Other, NoWrapKind);
+ case Instruction::Mul:
+ return multiplyWithNoWrap(Other, NoWrapKind);
default:
// Don't know about this Overflowing Binary Operation.
// Conservatively fallback to plain binop handling.
@@ -1167,6 +1169,26 @@ ConstantRange::multiply(const ConstantRange &Other) const {
return UR.isSizeStrictlySmallerThan(SR) ? UR : SR;
}
+ConstantRange
+ConstantRange::multiplyWithNoWrap(const ConstantRange &Other,
+ unsigned NoWrapKind,
+ PreferredRangeType RangeType) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+ if (isFullSet() && Other.isFullSet())
+ return getFull();
+
+ ConstantRange Result = multiply(Other);
+
+ if (NoWrapKind & OverflowingBinaryOperator::NoSignedWrap)
+ Result = Result.intersectWith(smul_sat(Other), RangeType);
+
+ if (NoWrapKind & OverflowingBinaryOperator::NoUnsignedWrap)
+ Result = Result.intersectWith(umul_sat(Other), RangeType);
+
+ return Result;
+}
+
ConstantRange ConstantRange::smul_fast(const ConstantRange &Other) const {
if (isEmptySet() || Other.isEmptySet())
return getEmpty();
diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp
index db442c54125a..cfb89d557db4 100644
--- a/llvm/lib/IR/Constants.cpp
+++ b/llvm/lib/IR/Constants.cpp
@@ -1568,7 +1568,7 @@ Constant *ConstantExpr::getWithOperands(ArrayRef<Constant *> Ops, Type *Ty,
assert(SrcTy || (Ops[0]->getType() == getOperand(0)->getType()));
return ConstantExpr::getGetElementPtr(
SrcTy ? SrcTy : GEPO->getSourceElementType(), Ops[0], Ops.slice(1),
- GEPO->isInBounds(), GEPO->getInRange(), OnlyIfReducedTy);
+ GEPO->getNoWrapFlags(), GEPO->getInRange(), OnlyIfReducedTy);
}
case Instruction::ICmp:
case Instruction::FCmp:
@@ -2348,13 +2348,15 @@ Constant *ConstantExpr::getCompare(unsigned short Predicate, Constant *C1,
}
Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
- ArrayRef<Value *> Idxs, bool InBounds,
+ ArrayRef<Value *> Idxs,
+ GEPNoWrapFlags NW,
std::optional<ConstantRange> InRange,
Type *OnlyIfReducedTy) {
assert(Ty && "Must specify element type");
assert(isSupportedGetElementPtr(Ty) && "Element type is unsupported!");
- if (Constant *FC = ConstantFoldGetElementPtr(Ty, C, InBounds, InRange, Idxs))
+ if (Constant *FC =
+ ConstantFoldGetElementPtr(Ty, C, NW.isInBounds(), InRange, Idxs))
return FC; // Fold a few common cases.
assert(GetElementPtrInst::getIndexedType(Ty, Idxs) && "GEP indices invalid!");
@@ -2390,10 +2392,8 @@ Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
ArgVec.push_back(Idx);
}
- unsigned SubClassOptionalData = InBounds ? GEPOperator::IsInBounds : 0;
const ConstantExprKeyType Key(Instruction::GetElementPtr, ArgVec, 0,
- SubClassOptionalData, std::nullopt, Ty,
- InRange);
+ NW.getRaw(), std::nullopt, Ty, InRange);
LLVMContextImpl *pImpl = C->getContext().pImpl;
return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index c6f20af0f1df..b32799355d69 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -43,8 +43,8 @@ using namespace llvm;
GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
const Twine &Name,
unsigned AddressSpace,
- Module *M) {
- Constant *StrConstant = ConstantDataArray::getString(Context, Str);
+ Module *M, bool AddNull) {
+ Constant *StrConstant = ConstantDataArray::getString(Context, Str, AddNull);
if (!M)
M = BB->getParent()->getParent();
auto *GV = new GlobalVariable(
@@ -1053,9 +1053,8 @@ Value *IRBuilderBase::CreateFCmpHelper(
return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
}
- if (auto *LC = dyn_cast<Constant>(LHS))
- if (auto *RC = dyn_cast<Constant>(RHS))
- return Insert(Folder.CreateFCmp(P, LC, RC), Name);
+ if (auto *V = Folder.FoldCmp(P, LHS, RHS))
+ return V;
return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
}
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index 678edc58ad84..29272e627a1d 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -441,7 +441,7 @@ void Instruction::dropPoisonGeneratingFlags() {
break;
case Instruction::GetElementPtr:
- cast<GetElementPtrInst>(this)->setIsInBounds(false);
+ cast<GetElementPtrInst>(this)->setNoWrapFlags(GEPNoWrapFlags::none());
break;
case Instruction::UIToFP:
@@ -660,7 +660,8 @@ void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
- DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
+ DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() |
+ DestGEP->getNoWrapFlags());
if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
if (isa<PossiblyNonNegInst>(this))
@@ -700,7 +701,8 @@ void Instruction::andIRFlags(const Value *V) {
if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
- DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
+ DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() &
+ DestGEP->getNoWrapFlags());
if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
if (isa<PossiblyNonNegInst>(this))
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index c31d399b01d1..1213f078d05e 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -2043,14 +2043,35 @@ bool GetElementPtrInst::hasAllConstantIndices() const {
return true;
}
+void GetElementPtrInst::setNoWrapFlags(GEPNoWrapFlags NW) {
+ SubclassOptionalData = NW.getRaw();
+}
+
void GetElementPtrInst::setIsInBounds(bool B) {
- cast<GEPOperator>(this)->setIsInBounds(B);
+ GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
+ if (B)
+ NW |= GEPNoWrapFlags::inBounds();
+ else
+ NW = NW.withoutInBounds();
+ setNoWrapFlags(NW);
+}
+
+GEPNoWrapFlags GetElementPtrInst::getNoWrapFlags() const {
+ return cast<GEPOperator>(this)->getNoWrapFlags();
}
bool GetElementPtrInst::isInBounds() const {
return cast<GEPOperator>(this)->isInBounds();
}
+bool GetElementPtrInst::hasNoUnsignedSignedWrap() const {
+ return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
+}
+
+bool GetElementPtrInst::hasNoUnsignedWrap() const {
+ return cast<GEPOperator>(this)->hasNoUnsignedWrap();
+}
+
bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
APInt &Offset) const {
// Delegate to the generic GEPOperator implementation.
diff --git a/llvm/lib/IR/MDBuilder.cpp b/llvm/lib/IR/MDBuilder.cpp
index 0bf41d7cc7c2..bd68db3a6f96 100644
--- a/llvm/lib/IR/MDBuilder.cpp
+++ b/llvm/lib/IR/MDBuilder.cpp
@@ -86,9 +86,8 @@ MDNode *MDBuilder::createFunctionEntryCount(
}
MDNode *MDBuilder::createFunctionSectionPrefix(StringRef Prefix) {
- return MDNode::get(Context,
- {createString("function_section_prefix"),
- createString(Prefix)});
+ return MDNode::get(
+ Context, {createString("function_section_prefix"), createString(Prefix)});
}
MDNode *MDBuilder::createRange(const APInt &Lo, const APInt &Hi) {
@@ -148,9 +147,10 @@ MDNode *MDBuilder::mergeCallbackEncodings(MDNode *ExistingCallbacks,
for (unsigned u = 0; u < NumExistingOps; u++) {
Ops[u] = ExistingCallbacks->getOperand(u);
- auto *OldCBCalleeIdxAsCM = cast<ConstantAsMetadata>(Ops[u]);
+ auto *OldCBCalleeIdxAsCM =
+ cast<ConstantAsMetadata>(cast<MDNode>(Ops[u])->getOperand(0));
uint64_t OldCBCalleeIdx =
- cast<ConstantInt>(OldCBCalleeIdxAsCM->getValue())->getZExtValue();
+ cast<ConstantInt>(OldCBCalleeIdxAsCM->getValue())->getZExtValue();
(void)OldCBCalleeIdx;
assert(NewCBCalleeIdx != OldCBCalleeIdx &&
"Cannot map a callback callee index twice!");
@@ -339,8 +339,8 @@ MDNode *MDBuilder::createMutableTBAAAccessTag(MDNode *Tag) {
MDNode *MDBuilder::createIrrLoopHeaderWeight(uint64_t Weight) {
Metadata *Vals[] = {
- createString("loop_header_weight"),
- createConstant(ConstantInt::get(Type::getInt64Ty(Context), Weight)),
+ createString("loop_header_weight"),
+ createConstant(ConstantInt::get(Type::getInt64Ty(Context), Weight)),
};
return MDNode::get(Context, Vals);
}
diff --git a/llvm/lib/IR/Mangler.cpp b/llvm/lib/IR/Mangler.cpp
index 72e2bc1f24ac..019fe844e286 100644
--- a/llvm/lib/IR/Mangler.cpp
+++ b/llvm/lib/IR/Mangler.cpp
@@ -292,7 +292,7 @@ void llvm::emitLinkerFlagsForUsedCOFF(raw_ostream &OS, const GlobalValue *GV,
std::optional<std::string> llvm::getArm64ECMangledFunctionName(StringRef Name) {
bool IsCppFn = Name[0] == '?';
- if (IsCppFn && Name.find("$$h") != std::string::npos)
+ if (IsCppFn && Name.contains("$$h"))
return std::nullopt;
if (!IsCppFn && Name[0] == '#')
return std::nullopt;
diff --git a/llvm/lib/IR/Module.cpp b/llvm/lib/IR/Module.cpp
index a8696ed9e3ce..f97dd18c736c 100644
--- a/llvm/lib/IR/Module.cpp
+++ b/llvm/lib/IR/Module.cpp
@@ -882,7 +882,7 @@ StringRef Module::getDarwinTargetVariantTriple() const {
}
void Module::setDarwinTargetVariantTriple(StringRef T) {
- addModuleFlag(ModFlagBehavior::Override, "darwin.target_variant.triple",
+ addModuleFlag(ModFlagBehavior::Warning, "darwin.target_variant.triple",
MDString::get(getContext(), T));
}
diff --git a/llvm/lib/IR/Operator.cpp b/llvm/lib/IR/Operator.cpp
index 29620ef716f2..6c9862556f55 100644
--- a/llvm/lib/IR/Operator.cpp
+++ b/llvm/lib/IR/Operator.cpp
@@ -42,7 +42,8 @@ bool Operator::hasPoisonGeneratingFlags() const {
case Instruction::GetElementPtr: {
auto *GEP = cast<GEPOperator>(this);
// Note: inrange exists on constexpr only
- return GEP->isInBounds() || GEP->getInRange() != std::nullopt;
+ return GEP->getNoWrapFlags() != GEPNoWrapFlags::none() ||
+ GEP->getInRange() != std::nullopt;
}
case Instruction::UIToFP:
case Instruction::ZExt:
diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp
index 21cad1de0ced..7304eab738ce 100644
--- a/llvm/lib/LTO/LTO.cpp
+++ b/llvm/lib/LTO/LTO.cpp
@@ -121,6 +121,9 @@ void llvm::computeLTOCacheKey(
support::endian::write64le(Data, I);
Hasher.update(Data);
};
+ auto AddUint8 = [&](const uint8_t I) {
+ Hasher.update(ArrayRef<uint8_t>((const uint8_t *)&I, 1));
+ };
AddString(Conf.CPU);
// FIXME: Hash more of Options. For now all clients initialize Options from
// command-line flags (which is unsupported in production), but may set
@@ -156,18 +159,18 @@ void llvm::computeLTOCacheKey(
auto ModHash = Index.getModuleHash(ModuleID);
Hasher.update(ArrayRef<uint8_t>((uint8_t *)&ModHash[0], sizeof(ModHash)));
- std::vector<uint64_t> ExportsGUID;
+ std::vector<std::pair<uint64_t, uint8_t>> ExportsGUID;
ExportsGUID.reserve(ExportList.size());
- for (const auto &VI : ExportList) {
- auto GUID = VI.getGUID();
- ExportsGUID.push_back(GUID);
- }
+ for (const auto &[VI, ExportType] : ExportList)
+ ExportsGUID.push_back(
+ std::make_pair(VI.getGUID(), static_cast<uint8_t>(ExportType)));
// Sort the export list elements GUIDs.
llvm::sort(ExportsGUID);
- for (uint64_t GUID : ExportsGUID) {
+ for (auto [GUID, ExportType] : ExportsGUID) {
// The export list can impact the internalization, be conservative here
Hasher.update(ArrayRef<uint8_t>((uint8_t *)&GUID, sizeof(GUID)));
+ AddUint8(ExportType);
}
// Include the hash for every module we import functions from. The set of
@@ -199,13 +202,21 @@ void llvm::computeLTOCacheKey(
[](const ImportModule &Lhs, const ImportModule &Rhs) -> bool {
return Lhs.getHash() < Rhs.getHash();
});
+ std::vector<std::pair<uint64_t, uint8_t>> ImportedGUIDs;
for (const ImportModule &Entry : ImportModulesVector) {
auto ModHash = Entry.getHash();
Hasher.update(ArrayRef<uint8_t>((uint8_t *)&ModHash[0], sizeof(ModHash)));
AddUint64(Entry.getFunctions().size());
- for (auto &Fn : Entry.getFunctions())
- AddUint64(Fn);
+
+ ImportedGUIDs.clear();
+ for (auto &[Fn, ImportType] : Entry.getFunctions())
+ ImportedGUIDs.push_back(std::make_pair(Fn, ImportType));
+ llvm::sort(ImportedGUIDs);
+ for (auto &[GUID, Type] : ImportedGUIDs) {
+ AddUint64(GUID);
+ AddUint8(Type);
+ }
}
// Include the hash for the resolved ODR.
@@ -275,9 +286,9 @@ void llvm::computeLTOCacheKey(
// Imported functions may introduce new uses of type identifier resolutions,
// so we need to collect their used resolutions as well.
for (const ImportModule &ImpM : ImportModulesVector)
- for (auto &ImpF : ImpM.getFunctions()) {
+ for (auto &[GUID, UnusedImportType] : ImpM.getFunctions()) {
GlobalValueSummary *S =
- Index.findSummaryInModule(ImpF, ImpM.getIdentifier());
+ Index.findSummaryInModule(GUID, ImpM.getIdentifier());
AddUsedThings(S);
// If this is an alias, we also care about any types/etc. that the aliasee
// may reference.
@@ -1389,15 +1400,20 @@ public:
llvm::StringRef ModulePath,
const std::string &NewModulePath) {
std::map<std::string, GVSummaryMapTy> ModuleToSummariesForIndex;
+ GVSummaryPtrSet DeclarationSummaries;
+
std::error_code EC;
gatherImportedSummariesForModule(ModulePath, ModuleToDefinedGVSummaries,
- ImportList, ModuleToSummariesForIndex);
+ ImportList, ModuleToSummariesForIndex,
+ DeclarationSummaries);
raw_fd_ostream OS(NewModulePath + ".thinlto.bc", EC,
sys::fs::OpenFlags::OF_None);
if (EC)
return errorCodeToError(EC);
- writeIndexToFile(CombinedIndex, OS, &ModuleToSummariesForIndex);
+
+ writeIndexToFile(CombinedIndex, OS, &ModuleToSummariesForIndex,
+ &DeclarationSummaries);
if (ShouldEmitImportsFiles) {
EC = EmitImportsFiles(ModulePath, NewModulePath + ".imports",
diff --git a/llvm/lib/LTO/LTOBackend.cpp b/llvm/lib/LTO/LTOBackend.cpp
index d4b89ede2d71..76223e88ca1a 100644
--- a/llvm/lib/LTO/LTOBackend.cpp
+++ b/llvm/lib/LTO/LTOBackend.cpp
@@ -452,9 +452,8 @@ static void splitCodeGen(const Config &C, TargetMachine *TM,
CodegenThreadPool.async(
[&](const SmallString<0> &BC, unsigned ThreadId) {
LTOLLVMContext Ctx(C);
- Expected<std::unique_ptr<Module>> MOrErr = parseBitcodeFile(
- MemoryBufferRef(StringRef(BC.data(), BC.size()), "ld-temp.o"),
- Ctx);
+ Expected<std::unique_ptr<Module>> MOrErr =
+ parseBitcodeFile(MemoryBufferRef(BC.str(), "ld-temp.o"), Ctx);
if (!MOrErr)
report_fatal_error("Failed to read bitcode");
std::unique_ptr<Module> MPartInCtx = std::move(MOrErr.get());
@@ -721,7 +720,14 @@ bool lto::initImportList(const Module &M,
if (Summary->modulePath() == M.getModuleIdentifier())
continue;
// Add an entry to provoke importing by thinBackend.
- ImportList[Summary->modulePath()].insert(GUID);
+ // Try emplace the entry first. If an entry with the same key already
+ // exists, set the value to 'std::min(existing-value, new-value)' to make
+ // sure a definition takes precedence over a declaration.
+ auto [Iter, Inserted] = ImportList[Summary->modulePath()].try_emplace(
+ GUID, Summary->importType());
+
+ if (!Inserted)
+ Iter->second = std::min(Iter->second, Summary->importType());
}
}
return true;
diff --git a/llvm/lib/LTO/ThinLTOCodeGenerator.cpp b/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
index 8f517eb50dc7..b054b42b6377 100644
--- a/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
+++ b/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
@@ -766,7 +766,7 @@ void ThinLTOCodeGenerator::crossModuleImport(Module &TheModule,
void ThinLTOCodeGenerator::gatherImportedSummariesForModule(
Module &TheModule, ModuleSummaryIndex &Index,
std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex,
- const lto::InputFile &File) {
+ GVSummaryPtrSet &DecSummaries, const lto::InputFile &File) {
auto ModuleCount = Index.modulePaths().size();
auto ModuleIdentifier = TheModule.getModuleIdentifier();
@@ -796,7 +796,7 @@ void ThinLTOCodeGenerator::gatherImportedSummariesForModule(
llvm::gatherImportedSummariesForModule(
ModuleIdentifier, ModuleToDefinedGVSummaries,
- ImportLists[ModuleIdentifier], ModuleToSummariesForIndex);
+ ImportLists[ModuleIdentifier], ModuleToSummariesForIndex, DecSummaries);
}
/**
@@ -832,10 +832,14 @@ void ThinLTOCodeGenerator::emitImports(Module &TheModule, StringRef OutputName,
IsPrevailing(PrevailingCopy), ImportLists,
ExportLists);
+ // 'EmitImportsFiles' emits the list of modules from which to import from, and
+ // the set of keys in `ModuleToSummariesForIndex` should be a superset of keys
+ // in `DecSummaries`, so no need to use `DecSummaries` in `EmitImportFiles`.
+ GVSummaryPtrSet DecSummaries;
std::map<std::string, GVSummaryMapTy> ModuleToSummariesForIndex;
llvm::gatherImportedSummariesForModule(
ModuleIdentifier, ModuleToDefinedGVSummaries,
- ImportLists[ModuleIdentifier], ModuleToSummariesForIndex);
+ ImportLists[ModuleIdentifier], ModuleToSummariesForIndex, DecSummaries);
std::error_code EC;
if ((EC = EmitImportsFiles(ModuleIdentifier, OutputName,
diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp
index df5f55b42d97..bd785eff4d22 100644
--- a/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/llvm/lib/MC/ELFObjectWriter.cpp
@@ -260,7 +260,7 @@ public:
void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
MCValue Target, uint64_t &FixedValue) override;
- bool usesRela(const MCTargetOptions *, const MCSectionELF &Sec) const;
+ bool usesRela(const MCTargetOptions *TO, const MCSectionELF &Sec) const;
void executePostLayoutBinding(MCAssembler &Asm,
const MCAsmLayout &Layout) override;
@@ -944,14 +944,14 @@ void ELFWriter::WriteSecHdrEntry(uint32_t Name, uint32_t Type, uint64_t Flags,
}
template <class uint>
-static void encodeCrel(ArrayRef<ELFRelocationEntry> Relocs, raw_ostream &os) {
+static void encodeCrel(ArrayRef<ELFRelocationEntry> Relocs, raw_ostream &OS) {
uint OffsetMask = 8, Offset = 0, Addend = 0;
uint32_t Symidx = 0, Type = 0;
// hdr & 4 indicates 3 flag bits in delta offset and flags members.
- for (size_t i = 0, e = Relocs.size(); i != e; ++i)
- OffsetMask |= Relocs[i].Offset;
+ for (const ELFRelocationEntry &Entry : Relocs)
+ OffsetMask |= Entry.Offset;
const int Shift = llvm::countr_zero(OffsetMask);
- encodeULEB128(Relocs.size() * 8 + ELF::CREL_HDR_ADDEND + Shift, os);
+ encodeULEB128(Relocs.size() * 8 + ELF::CREL_HDR_ADDEND + Shift, OS);
for (const ELFRelocationEntry &Entry : Relocs) {
// The delta offset and flags member may be larger than uint64_t. Special
// case the first byte (3 flag bits and 4 offset bits). Other ULEB128 bytes
@@ -962,22 +962,22 @@ static void encodeCrel(ArrayRef<ELFRelocationEntry> Relocs, raw_ostream &os) {
uint8_t B = (DeltaOffset << 3) + (Symidx != CurSymidx) +
(Type != Entry.Type ? 2 : 0) + (Addend != Entry.Addend ? 4 : 0);
if (DeltaOffset < 0x10) {
- os << char(B);
+ OS << char(B);
} else {
- os << char(B | 0x80);
- encodeULEB128(DeltaOffset >> 4, os);
+ OS << char(B | 0x80);
+ encodeULEB128(DeltaOffset >> 4, OS);
}
// Delta symidx/type/addend members (SLEB128).
if (B & 1) {
- encodeSLEB128(static_cast<int32_t>(CurSymidx - Symidx), os);
+ encodeSLEB128(static_cast<int32_t>(CurSymidx - Symidx), OS);
Symidx = CurSymidx;
}
if (B & 2) {
- encodeSLEB128(static_cast<int32_t>(Entry.Type - Type), os);
+ encodeSLEB128(static_cast<int32_t>(Entry.Type - Type), OS);
Type = Entry.Type;
}
if (B & 4) {
- encodeSLEB128(std::make_signed_t<uint>(Entry.Addend - Addend), os);
+ encodeSLEB128(std::make_signed_t<uint>(Entry.Addend - Addend), OS);
Addend = Entry.Addend;
}
}
diff --git a/llvm/lib/MC/MCDwarf.cpp b/llvm/lib/MC/MCDwarf.cpp
index 2ee0c3eb27b9..aba4071e6b91 100644
--- a/llvm/lib/MC/MCDwarf.cpp
+++ b/llvm/lib/MC/MCDwarf.cpp
@@ -1910,6 +1910,11 @@ void MCDwarfFrameEmitter::Emit(MCObjectStreamer &Streamer, MCAsmBackend *MAB,
[](const MCDwarfFrameInfo &X, const MCDwarfFrameInfo &Y) {
return CIEKey(X) < CIEKey(Y);
});
+ // Disable AttemptToFoldSymbolOffsetDifference folding of fdeStart-cieStart
+ // for EmitFDE due to the the performance issue. The label differences will be
+ // evaluate at write time.
+ assert(Streamer.getUseAssemblerInfoForParsing());
+ Streamer.setUseAssemblerInfoForParsing(false);
for (auto I = FrameArrayX.begin(), E = FrameArrayX.end(); I != E;) {
const MCDwarfFrameInfo &Frame = *I;
++I;
@@ -1930,6 +1935,7 @@ void MCDwarfFrameEmitter::Emit(MCObjectStreamer &Streamer, MCAsmBackend *MAB,
Emitter.EmitFDE(*CIEStart, Frame, I == E, *SectionStart);
}
+ Streamer.setUseAssemblerInfoForParsing(true);
}
void MCDwarfFrameEmitter::encodeAdvanceLoc(MCContext &Context,
diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp
index d2da5d0d3f90..0ccade91677a 100644
--- a/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/llvm/lib/MC/MCObjectStreamer.cpp
@@ -40,9 +40,6 @@ MCObjectStreamer::MCObjectStreamer(MCContext &Context,
MCObjectStreamer::~MCObjectStreamer() = default;
-// AssemblerPtr is used for evaluation of expressions and causes
-// difference between asm and object outputs. Return nullptr to in
-// inline asm mode to limit divergence to assembly inputs.
MCAssembler *MCObjectStreamer::getAssemblerPtr() {
if (getUseAssemblerInfoForParsing())
return Assembler.get();
diff --git a/llvm/lib/MC/MCStreamer.cpp b/llvm/lib/MC/MCStreamer.cpp
index 176d55aa890b..199d865ea349 100644
--- a/llvm/lib/MC/MCStreamer.cpp
+++ b/llvm/lib/MC/MCStreamer.cpp
@@ -93,7 +93,7 @@ void MCTargetStreamer::emitAssignment(MCSymbol *Symbol, const MCExpr *Value) {}
MCStreamer::MCStreamer(MCContext &Ctx)
: Context(Ctx), CurrentWinFrameInfo(nullptr),
- CurrentProcWinFrameInfoStartIndex(0), UseAssemblerInfoForParsing(false) {
+ CurrentProcWinFrameInfoStartIndex(0) {
SectionStack.push_back(std::pair<MCSectionSubPair, MCSectionSubPair>());
}
diff --git a/llvm/lib/MCA/InstrBuilder.cpp b/llvm/lib/MCA/InstrBuilder.cpp
index bcf065c56691..d5cbdc5de0b8 100644
--- a/llvm/lib/MCA/InstrBuilder.cpp
+++ b/llvm/lib/MCA/InstrBuilder.cpp
@@ -31,9 +31,9 @@ InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
const llvm::MCInstrInfo &mcii,
const llvm::MCRegisterInfo &mri,
const llvm::MCInstrAnalysis *mcia,
- const mca::InstrumentManager &im)
+ const mca::InstrumentManager &im, unsigned cl)
: STI(sti), MCII(mcii), MRI(mri), MCIA(mcia), IM(im), FirstCallInst(true),
- FirstReturnInst(true) {
+ FirstReturnInst(true), CallLatency(cl) {
const MCSchedModel &SM = STI.getSchedModel();
ProcResourceMasks.resize(SM.getNumProcResourceKinds());
computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
@@ -220,17 +220,19 @@ static void initializeUsedResources(InstrDesc &ID,
static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
const MCSchedClassDesc &SCDesc,
- const MCSubtargetInfo &STI) {
+ const MCSubtargetInfo &STI,
+ unsigned CallLatency) {
if (MCDesc.isCall()) {
// We cannot estimate how long this call will take.
- // Artificially set an arbitrarily high latency (100cy).
- ID.MaxLatency = 100U;
+ // Artificially set an arbitrarily high latency.
+ ID.MaxLatency = CallLatency;
return;
}
int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
- // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
- ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
+ // If latency is unknown, then conservatively assume the MaxLatency set for
+ // calls.
+ ID.MaxLatency = Latency < 0 ? CallLatency : static_cast<unsigned>(Latency);
}
static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI) {
@@ -568,7 +570,7 @@ InstrBuilder::createInstrDescImpl(const MCInst &MCI,
// We don't correctly model calls.
WithColor::warning() << "found a call in the input assembly sequence.\n";
WithColor::note() << "call instructions are not correctly modeled. "
- << "Assume a latency of 100cy.\n";
+ << "Assume a latency of " << CallLatency << "cy.\n";
FirstCallInst = false;
}
@@ -580,7 +582,7 @@ InstrBuilder::createInstrDescImpl(const MCInst &MCI,
}
initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
- computeMaxLatency(*ID, MCDesc, SCDesc, STI);
+ computeMaxLatency(*ID, MCDesc, SCDesc, STI, CallLatency);
if (Error Err = verifyOperands(MCDesc, MCI))
return std::move(Err);
diff --git a/llvm/lib/Object/ELF.cpp b/llvm/lib/Object/ELF.cpp
index 24bd52f3af41..685929510d9e 100644
--- a/llvm/lib/Object/ELF.cpp
+++ b/llvm/lib/Object/ELF.cpp
@@ -394,23 +394,26 @@ ELFFile<ELFT>::decode_relrs(Elf_Relr_Range relrs) const {
}
template <class ELFT>
-uint64_t ELFFile<ELFT>::crelHeader(ArrayRef<uint8_t> Content) const {
- DataExtractor Data(Content, true, 8); // endian/class is irrelevant
- DataExtractor::Cursor Cur(0);
- uint64_t Hdr = Data.getULEB128(Cur);
- // In case of an error, return 0 and postpone error reporting to decodeCrel.
- consumeError(Cur.takeError());
+Expected<uint64_t>
+ELFFile<ELFT>::getCrelHeader(ArrayRef<uint8_t> Content) const {
+ DataExtractor Data(Content, isLE(), ELFT::Is64Bits ? 8 : 4);
+ Error Err = Error::success();
+ uint64_t Hdr = 0;
+ Hdr = Data.getULEB128(&Hdr, &Err);
+ if (Err)
+ return Err;
return Hdr;
}
template <class ELFT>
Expected<typename ELFFile<ELFT>::RelsOrRelas>
ELFFile<ELFT>::decodeCrel(ArrayRef<uint8_t> Content) const {
- DataExtractor Data(Content, true, 8); // endian/class is irrelevant
+ DataExtractor Data(Content, isLE(), ELFT::Is64Bits ? 8 : 4);
DataExtractor::Cursor Cur(0);
const uint64_t Hdr = Data.getULEB128(Cur);
- const size_t Count = Hdr / 8, FlagBits = Hdr & ELF::CREL_HDR_ADDEND ? 3 : 2,
- Shift = Hdr % ELF::CREL_HDR_ADDEND;
+ const size_t Count = Hdr / 8;
+ const size_t FlagBits = Hdr & ELF::CREL_HDR_ADDEND ? 3 : 2;
+ const size_t Shift = Hdr % ELF::CREL_HDR_ADDEND;
std::vector<Elf_Rel> Rels;
std::vector<Elf_Rela> Relas;
if (Hdr & ELF::CREL_HDR_ADDEND)
diff --git a/llvm/lib/ObjectYAML/ELFEmitter.cpp b/llvm/lib/ObjectYAML/ELFEmitter.cpp
index 9068bddd1478..78b643de9ed6 100644
--- a/llvm/lib/ObjectYAML/ELFEmitter.cpp
+++ b/llvm/lib/ObjectYAML/ELFEmitter.cpp
@@ -1295,7 +1295,8 @@ void ELFState<ELFT>::writeSectionContent(
OffsetMask |= Rel.Offset;
const int Shift = llvm::countr_zero(OffsetMask);
if (IsCrel)
- CBA.writeULEB128(Section.Relocations->size() * 8 + 4 + Shift);
+ CBA.writeULEB128(Section.Relocations->size() * 8 + ELF::CREL_HDR_ADDEND +
+ Shift);
for (const ELFYAML::Relocation &Rel : *Section.Relocations) {
const bool IsDynamic = Section.Link && (*Section.Link == ".dynsym");
uint32_t CurSymidx =
diff --git a/llvm/lib/Option/OptTable.cpp b/llvm/lib/Option/OptTable.cpp
index b8b6b90c253f..3eceb0fbdfc4 100644
--- a/llvm/lib/Option/OptTable.cpp
+++ b/llvm/lib/Option/OptTable.cpp
@@ -197,7 +197,7 @@ OptTable::suggestValueCompletions(StringRef Option, StringRef Arg) const {
std::vector<std::string> Result;
for (StringRef Val : Candidates)
- if (Val.starts_with(Arg) && Arg.compare(Val))
+ if (Val.starts_with(Arg) && Arg != Val)
Result.push_back(std::string(Val));
return Result;
}
diff --git a/llvm/lib/ProfileData/InstrProf.cpp b/llvm/lib/ProfileData/InstrProf.cpp
index 806d01de1ada..f9cd71b37002 100644
--- a/llvm/lib/ProfileData/InstrProf.cpp
+++ b/llvm/lib/ProfileData/InstrProf.cpp
@@ -1002,46 +1002,60 @@ void InstrProfRecord::addValueData(uint32_t ValueKind, uint32_t Site,
ValueSites.emplace_back(VData, VData + N);
}
-std::vector<BPFunctionNode> TemporalProfTraceTy::createBPFunctionNodes(
- ArrayRef<TemporalProfTraceTy> Traces) {
+void TemporalProfTraceTy::createBPFunctionNodes(
+ ArrayRef<TemporalProfTraceTy> Traces, std::vector<BPFunctionNode> &Nodes,
+ bool RemoveOutlierUNs) {
using IDT = BPFunctionNode::IDT;
using UtilityNodeT = BPFunctionNode::UtilityNodeT;
- // Collect all function IDs ordered by their smallest timestamp. This will be
- // used as the initial FunctionNode order.
- SetVector<IDT> FunctionIds;
- size_t LargestTraceSize = 0;
- for (auto &Trace : Traces)
- LargestTraceSize =
- std::max(LargestTraceSize, Trace.FunctionNameRefs.size());
- for (size_t Timestamp = 0; Timestamp < LargestTraceSize; Timestamp++)
- for (auto &Trace : Traces)
- if (Timestamp < Trace.FunctionNameRefs.size())
- FunctionIds.insert(Trace.FunctionNameRefs[Timestamp]);
-
- const int N = Log2_64(LargestTraceSize) + 1;
-
+ UtilityNodeT MaxUN = 0;
+ DenseMap<IDT, size_t> IdToFirstTimestamp;
+ DenseMap<IDT, UtilityNodeT> IdToFirstUN;
+ DenseMap<IDT, SmallVector<UtilityNodeT>> IdToUNs;
// TODO: We need to use the Trace.Weight field to give more weight to more
// important utilities
- DenseMap<IDT, SmallVector<UtilityNodeT, 4>> FuncGroups;
- for (size_t TraceIdx = 0; TraceIdx < Traces.size(); TraceIdx++) {
- auto &Trace = Traces[TraceIdx].FunctionNameRefs;
- for (size_t Timestamp = 0; Timestamp < Trace.size(); Timestamp++) {
- for (int I = Log2_64(Timestamp + 1); I < N; I++) {
- auto FunctionId = Trace[Timestamp];
- UtilityNodeT GroupId = TraceIdx * N + I;
- FuncGroups[FunctionId].push_back(GroupId);
+ for (auto &Trace : Traces) {
+ size_t CutoffTimestamp = 1;
+ for (size_t Timestamp = 0; Timestamp < Trace.FunctionNameRefs.size();
+ Timestamp++) {
+ IDT Id = Trace.FunctionNameRefs[Timestamp];
+ auto [It, WasInserted] = IdToFirstTimestamp.try_emplace(Id, Timestamp);
+ if (!WasInserted)
+ It->getSecond() = std::min<size_t>(It->getSecond(), Timestamp);
+ if (Timestamp >= CutoffTimestamp) {
+ ++MaxUN;
+ CutoffTimestamp = 2 * Timestamp;
}
+ IdToFirstUN.try_emplace(Id, MaxUN);
}
+ for (auto &[Id, FirstUN] : IdToFirstUN)
+ for (auto UN = FirstUN; UN <= MaxUN; ++UN)
+ IdToUNs[Id].push_back(UN);
+ ++MaxUN;
+ IdToFirstUN.clear();
}
- std::vector<BPFunctionNode> Nodes;
- for (auto Id : FunctionIds) {
- auto &UNs = FuncGroups[Id];
- llvm::sort(UNs);
- UNs.erase(std::unique(UNs.begin(), UNs.end()), UNs.end());
- Nodes.emplace_back(Id, UNs);
+ if (RemoveOutlierUNs) {
+ DenseMap<UtilityNodeT, unsigned> UNFrequency;
+ for (auto &[Id, UNs] : IdToUNs)
+ for (auto &UN : UNs)
+ ++UNFrequency[UN];
+ // Filter out utility nodes that are too infrequent or too prevalent to make
+ // BalancedPartitioning more effective.
+ for (auto &[Id, UNs] : IdToUNs)
+ llvm::erase_if(UNs, [&](auto &UN) {
+ return UNFrequency[UN] <= 1 || 2 * UNFrequency[UN] > IdToUNs.size();
+ });
}
- return Nodes;
+
+ for (auto &[Id, UNs] : IdToUNs)
+ Nodes.emplace_back(Id, UNs);
+
+ // Since BalancedPartitioning is sensitive to the initial order, we explicitly
+ // order nodes by their earliest timestamp.
+ llvm::sort(Nodes, [&](auto &L, auto &R) {
+ return std::make_pair(IdToFirstTimestamp[L.Id], L.Id) <
+ std::make_pair(IdToFirstTimestamp[R.Id], R.Id);
+ });
}
#define INSTR_PROF_COMMON_API_IMPL
@@ -1620,13 +1634,12 @@ inline size_t constexpr offsetOf(T1 T2::*Member) {
return size_t(&(Object.*Member)) - size_t(&Object);
}
+// Read a uint64_t from the specified buffer offset, and swap the bytes in
+// native endianness if necessary.
static inline uint64_t read(const unsigned char *Buffer, size_t Offset) {
- return *reinterpret_cast<const uint64_t *>(Buffer + Offset);
-}
-
-uint64_t Header::formatVersion() const {
- using namespace support;
- return endian::byte_swap<uint64_t, llvm::endianness::little>(Version);
+ using namespace ::support;
+ return endian::read<uint64_t, llvm::endianness::little, unaligned>(Buffer +
+ Offset);
}
Expected<Header> Header::readFromBuffer(const unsigned char *Buffer) {
@@ -1638,18 +1651,15 @@ Expected<Header> Header::readFromBuffer(const unsigned char *Buffer) {
H.Magic = read(Buffer, offsetOf(&Header::Magic));
// Check the magic number.
- uint64_t Magic =
- endian::byte_swap<uint64_t, llvm::endianness::little>(H.Magic);
- if (Magic != IndexedInstrProf::Magic)
+ if (H.Magic != IndexedInstrProf::Magic)
return make_error<InstrProfError>(instrprof_error::bad_magic);
// Read the version.
H.Version = read(Buffer, offsetOf(&Header::Version));
- if (GET_VERSION(H.formatVersion()) >
- IndexedInstrProf::ProfVersion::CurrentVersion)
+ if (GET_VERSION(H.Version) > IndexedInstrProf::ProfVersion::CurrentVersion)
return make_error<InstrProfError>(instrprof_error::unsupported_version);
- switch (GET_VERSION(H.formatVersion())) {
+ switch (GET_VERSION(H.Version)) {
// When a new field is added in the header add a case statement here to
// populate it.
static_assert(
@@ -1680,7 +1690,7 @@ Expected<Header> Header::readFromBuffer(const unsigned char *Buffer) {
}
size_t Header::size() const {
- switch (GET_VERSION(formatVersion())) {
+ switch (GET_VERSION(Version)) {
// When a new field is added to the header add a case statement here to
// compute the size as offset of the new field + size of the new field. This
// relies on the field being added to the end of the list.
diff --git a/llvm/lib/ProfileData/InstrProfCorrelator.cpp b/llvm/lib/ProfileData/InstrProfCorrelator.cpp
index cf80a58f43bd..44e2aeb00d8c 100644
--- a/llvm/lib/ProfileData/InstrProfCorrelator.cpp
+++ b/llvm/lib/ProfileData/InstrProfCorrelator.cpp
@@ -350,16 +350,14 @@ void DwarfInstrProfCorrelator<IntPtrT>::correlateProfileDataImpl(
continue;
}
StringRef AnnotationName = *AnnotationNameOrErr;
- if (AnnotationName.compare(
- InstrProfCorrelator::FunctionNameAttributeName) == 0) {
+ if (AnnotationName == InstrProfCorrelator::FunctionNameAttributeName) {
if (auto EC =
AnnotationFormValue->getAsCString().moveInto(FunctionName))
consumeError(std::move(EC));
- } else if (AnnotationName.compare(
- InstrProfCorrelator::CFGHashAttributeName) == 0) {
+ } else if (AnnotationName == InstrProfCorrelator::CFGHashAttributeName) {
CFGHash = AnnotationFormValue->getAsUnsignedConstant();
- } else if (AnnotationName.compare(
- InstrProfCorrelator::NumCountersAttributeName) == 0) {
+ } else if (AnnotationName ==
+ InstrProfCorrelator::NumCountersAttributeName) {
NumCounters = AnnotationFormValue->getAsUnsignedConstant();
}
}
diff --git a/llvm/lib/ProfileData/InstrProfReader.cpp b/llvm/lib/ProfileData/InstrProfReader.cpp
index ba21e01abfba..836206a4fd86 100644
--- a/llvm/lib/ProfileData/InstrProfReader.cpp
+++ b/llvm/lib/ProfileData/InstrProfReader.cpp
@@ -1212,7 +1212,6 @@ Error IndexedMemProfReader::deserialize(const unsigned char *Start,
const uint64_t FirstWord =
support::endian::readNext<uint64_t, llvm::endianness::little>(Ptr);
- memprof::IndexedVersion Version = memprof::Version0;
if (FirstWord == memprof::Version1 || FirstWord == memprof::Version2) {
// Everything is good. We can proceed to deserialize the rest.
Version = static_cast<memprof::IndexedVersion>(FirstWord);
@@ -1311,43 +1310,33 @@ Error IndexedInstrProfReader::readHeader() {
const IndexedInstrProf::Header *Header = &HeaderOr.get();
Cur += Header->size();
- Cur = readSummary((IndexedInstrProf::ProfVersion)Header->formatVersion(), Cur,
+ Cur = readSummary((IndexedInstrProf::ProfVersion)Header->Version, Cur,
/* UseCS */ false);
- if (Header->formatVersion() & VARIANT_MASK_CSIR_PROF)
- Cur =
- readSummary((IndexedInstrProf::ProfVersion)Header->formatVersion(), Cur,
- /* UseCS */ true);
+ if (Header->Version & VARIANT_MASK_CSIR_PROF)
+ Cur = readSummary((IndexedInstrProf::ProfVersion)Header->Version, Cur,
+ /* UseCS */ true);
// Read the hash type and start offset.
- IndexedInstrProf::HashT HashType = static_cast<IndexedInstrProf::HashT>(
- endian::byte_swap<uint64_t, llvm::endianness::little>(Header->HashType));
+ IndexedInstrProf::HashT HashType =
+ static_cast<IndexedInstrProf::HashT>(Header->HashType);
if (HashType > IndexedInstrProf::HashT::Last)
return error(instrprof_error::unsupported_hash_type);
- uint64_t HashOffset =
- endian::byte_swap<uint64_t, llvm::endianness::little>(Header->HashOffset);
-
// The hash table with profile counts comes next.
auto IndexPtr = std::make_unique<InstrProfReaderIndex<OnDiskHashTableImplV3>>(
- Start + HashOffset, Cur, Start, HashType, Header->formatVersion());
+ Start + Header->HashOffset, Cur, Start, HashType, Header->Version);
// The MemProfOffset field in the header is only valid when the format
// version is higher than 8 (when it was introduced).
- if (GET_VERSION(Header->formatVersion()) >= 8 &&
- Header->formatVersion() & VARIANT_MASK_MEMPROF) {
- uint64_t MemProfOffset =
- endian::byte_swap<uint64_t, llvm::endianness::little>(
- Header->MemProfOffset);
- if (Error E = MemProfReader.deserialize(Start, MemProfOffset))
+ if (GET_VERSION(Header->Version) >= 8 &&
+ Header->Version & VARIANT_MASK_MEMPROF) {
+ if (Error E = MemProfReader.deserialize(Start, Header->MemProfOffset))
return E;
}
// BinaryIdOffset field in the header is only valid when the format version
// is higher than 9 (when it was introduced).
- if (GET_VERSION(Header->formatVersion()) >= 9) {
- uint64_t BinaryIdOffset =
- endian::byte_swap<uint64_t, llvm::endianness::little>(
- Header->BinaryIdOffset);
- const unsigned char *Ptr = Start + BinaryIdOffset;
+ if (GET_VERSION(Header->Version) >= 9) {
+ const unsigned char *Ptr = Start + Header->BinaryIdOffset;
// Read binary ids size.
BinaryIdsSize =
support::endian::readNext<uint64_t, llvm::endianness::little>(Ptr);
@@ -1360,11 +1349,8 @@ Error IndexedInstrProfReader::readHeader() {
"corrupted binary ids");
}
- if (GET_VERSION(Header->formatVersion()) >= 12) {
- uint64_t VTableNamesOffset =
- endian::byte_swap<uint64_t, llvm::endianness::little>(
- Header->VTableNamesOffset);
- const unsigned char *Ptr = Start + VTableNamesOffset;
+ if (GET_VERSION(Header->Version) >= 12) {
+ const unsigned char *Ptr = Start + Header->VTableNamesOffset;
CompressedVTableNamesLen =
support::endian::readNext<uint64_t, llvm::endianness::little>(Ptr);
@@ -1376,12 +1362,9 @@ Error IndexedInstrProfReader::readHeader() {
return make_error<InstrProfError>(instrprof_error::truncated);
}
- if (GET_VERSION(Header->formatVersion()) >= 10 &&
- Header->formatVersion() & VARIANT_MASK_TEMPORAL_PROF) {
- uint64_t TemporalProfTracesOffset =
- endian::byte_swap<uint64_t, llvm::endianness::little>(
- Header->TemporalProfTracesOffset);
- const unsigned char *Ptr = Start + TemporalProfTracesOffset;
+ if (GET_VERSION(Header->Version) >= 10 &&
+ Header->Version & VARIANT_MASK_TEMPORAL_PROF) {
+ const unsigned char *Ptr = Start + Header->TemporalProfTracesOffset;
const auto *PtrEnd = (const unsigned char *)DataBuffer->getBufferEnd();
// Expect at least two 64 bit fields: NumTraces, and TraceStreamSize
if (Ptr + 2 * sizeof(uint64_t) > PtrEnd)
@@ -1506,6 +1489,55 @@ Expected<InstrProfRecord> IndexedInstrProfReader::getInstrProfRecord(
return error(instrprof_error::unknown_function);
}
+static Expected<memprof::MemProfRecord>
+getMemProfRecordV0(const memprof::IndexedMemProfRecord &IndexedRecord,
+ MemProfFrameHashTable &MemProfFrameTable) {
+ memprof::FrameIdConverter<MemProfFrameHashTable> FrameIdConv(
+ MemProfFrameTable);
+
+ memprof::MemProfRecord Record =
+ memprof::MemProfRecord(IndexedRecord, FrameIdConv);
+
+ // Check that all frame ids were successfully converted to frames.
+ if (FrameIdConv.LastUnmappedId) {
+ return make_error<InstrProfError>(instrprof_error::hash_mismatch,
+ "memprof frame not found for frame id " +
+ Twine(*FrameIdConv.LastUnmappedId));
+ }
+
+ return Record;
+}
+
+static Expected<memprof::MemProfRecord>
+getMemProfRecordV2(const memprof::IndexedMemProfRecord &IndexedRecord,
+ MemProfFrameHashTable &MemProfFrameTable,
+ MemProfCallStackHashTable &MemProfCallStackTable) {
+ memprof::FrameIdConverter<MemProfFrameHashTable> FrameIdConv(
+ MemProfFrameTable);
+
+ memprof::CallStackIdConverter<MemProfCallStackHashTable> CSIdConv(
+ MemProfCallStackTable, FrameIdConv);
+
+ memprof::MemProfRecord Record = IndexedRecord.toMemProfRecord(CSIdConv);
+
+ // Check that all call stack ids were successfully converted to call stacks.
+ if (CSIdConv.LastUnmappedId) {
+ return make_error<InstrProfError>(
+ instrprof_error::hash_mismatch,
+ "memprof call stack not found for call stack id " +
+ Twine(*CSIdConv.LastUnmappedId));
+ }
+
+ // Check that all frame ids were successfully converted to frames.
+ if (FrameIdConv.LastUnmappedId) {
+ return make_error<InstrProfError>(instrprof_error::hash_mismatch,
+ "memprof frame not found for frame id " +
+ Twine(*FrameIdConv.LastUnmappedId));
+ }
+
+ return Record;
+}
+
Expected<memprof::MemProfRecord>
IndexedMemProfReader::getMemProfRecord(const uint64_t FuncNameHash) const {
// TODO: Add memprof specific errors.
@@ -1518,41 +1550,27 @@ IndexedMemProfReader::getMemProfRecord(const uint64_t FuncNameHash) const {
instrprof_error::unknown_function,
"memprof record not found for function hash " + Twine(FuncNameHash));
- // Setup a callback to convert from frame ids to frame using the on-disk
- // FrameData hash table.
- memprof::FrameIdConverter<MemProfFrameHashTable> FrameIdConv(
- *MemProfFrameTable.get());
-
const memprof::IndexedMemProfRecord IndexedRecord = *Iter;
- memprof::MemProfRecord Record;
- if (MemProfCallStackTable) {
- // Setup a callback to convert call stack ids to call stacks using the
- // on-disk hash table.
- memprof::CallStackIdConverter<MemProfCallStackHashTable> CSIdConv(
- *MemProfCallStackTable.get(), FrameIdConv);
-
- Record = IndexedRecord.toMemProfRecord(CSIdConv);
-
- // Check that all call stack ids were successfully converted to call stacks.
- if (CSIdConv.LastUnmappedId) {
- return make_error<InstrProfError>(
- instrprof_error::hash_mismatch,
- "memprof call stack not found for call stack id " +
- Twine(*CSIdConv.LastUnmappedId));
- }
- } else {
- Record = memprof::MemProfRecord(IndexedRecord, FrameIdConv);
- }
-
- // Check that all frame ids were successfully converted to frames.
- if (FrameIdConv.LastUnmappedId) {
- return make_error<InstrProfError>(
- instrprof_error::hash_mismatch,
- "memprof frame not found for frame id " +
- Twine(*FrameIdConv.LastUnmappedId));
+ switch (Version) {
+ case memprof::Version0:
+ case memprof::Version1:
+ assert(MemProfFrameTable && "MemProfFrameTable must be available");
+ assert(!MemProfCallStackTable &&
+ "MemProfCallStackTable must not be available");
+ return getMemProfRecordV0(IndexedRecord, *MemProfFrameTable);
+ case memprof::Version2:
+ assert(MemProfFrameTable && "MemProfFrameTable must be available");
+ assert(MemProfCallStackTable && "MemProfCallStackTable must be available");
+ return getMemProfRecordV2(IndexedRecord, *MemProfFrameTable,
+ *MemProfCallStackTable);
}
- return Record;
+ return make_error<InstrProfError>(
+ instrprof_error::unsupported_version,
+ formatv("MemProf version {} not supported; "
+ "requires version between {} and {}, inclusive",
+ Version, memprof::MinimumSupportedVersion,
+ memprof::MaximumSupportedVersion));
}
Error IndexedInstrProfReader::getFunctionCounts(StringRef FuncName,
diff --git a/llvm/lib/ProfileData/InstrProfWriter.cpp b/llvm/lib/ProfileData/InstrProfWriter.cpp
index b5b13550b057..b67a9700b680 100644
--- a/llvm/lib/ProfileData/InstrProfWriter.cpp
+++ b/llvm/lib/ProfileData/InstrProfWriter.cpp
@@ -639,6 +639,58 @@ static Error writeMemProf(ProfOStream &OS,
memprof::MaximumSupportedVersion));
}
+uint64_t InstrProfWriter::writeHeader(const IndexedInstrProf::Header &Header,
+ const bool WritePrevVersion,
+ ProfOStream &OS) {
+ // Only write out the first four fields.
+ for (int I = 0; I < 4; I++)
+ OS.write(reinterpret_cast<const uint64_t *>(&Header)[I]);
+
+ // Remember the offset of the remaining fields to allow back patching later.
+ auto BackPatchStartOffset = OS.tell();
+
+ // Reserve the space for back patching later.
+ OS.write(0); // HashOffset
+ OS.write(0); // MemProfOffset
+ OS.write(0); // BinaryIdOffset
+ OS.write(0); // TemporalProfTracesOffset
+ if (!WritePrevVersion)
+ OS.write(0); // VTableNamesOffset
+
+ return BackPatchStartOffset;
+}
+
+Error InstrProfWriter::writeVTableNames(ProfOStream &OS) {
+ std::vector<std::string> VTableNameStrs;
+ for (StringRef VTableName : VTableNames.keys())
+ VTableNameStrs.push_back(VTableName.str());
+
+ std::string CompressedVTableNames;
+ if (!VTableNameStrs.empty())
+ if (Error E = collectGlobalObjectNameStrings(
+ VTableNameStrs, compression::zlib::isAvailable(),
+ CompressedVTableNames))
+ return E;
+
+ const uint64_t CompressedStringLen = CompressedVTableNames.length();
+
+ // Record the length of compressed string.
+ OS.write(CompressedStringLen);
+
+ // Write the chars in compressed strings.
+ for (auto &c : CompressedVTableNames)
+ OS.writeByte(static_cast<uint8_t>(c));
+
+ // Pad up to a multiple of 8.
+ // InstrProfReader could read bytes according to 'CompressedStringLen'.
+ const uint64_t PaddedLength = alignTo(CompressedStringLen, 8);
+
+ for (uint64_t K = CompressedStringLen; K < PaddedLength; K++)
+ OS.writeByte(0);
+
+ return Error::success();
+}
+
Error InstrProfWriter::writeImpl(ProfOStream &OS) {
using namespace IndexedInstrProf;
using namespace support;
@@ -651,7 +703,7 @@ Error InstrProfWriter::writeImpl(ProfOStream &OS) {
InfoObj->CSSummaryBuilder = &CSISB;
// Populate the hash table generator.
- SmallVector<std::pair<StringRef, const ProfilingData *>, 0> OrderedData;
+ SmallVector<std::pair<StringRef, const ProfilingData *>> OrderedData;
for (const auto &I : FunctionData)
if (shouldEncodeData(I.getValue()))
OrderedData.emplace_back((I.getKey()), &I.getValue());
@@ -661,7 +713,6 @@ Error InstrProfWriter::writeImpl(ProfOStream &OS) {
// Write the header.
IndexedInstrProf::Header Header;
- Header.Magic = IndexedInstrProf::Magic;
Header.Version = WritePrevVersion
? IndexedInstrProf::ProfVersion::Version11
: IndexedInstrProf::ProfVersion::CurrentVersion;
@@ -685,43 +736,8 @@ Error InstrProfWriter::writeImpl(ProfOStream &OS) {
if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile))
Header.Version |= VARIANT_MASK_TEMPORAL_PROF;
- Header.Unused = 0;
- Header.HashType = static_cast<uint64_t>(IndexedInstrProf::HashType);
- Header.HashOffset = 0;
- Header.MemProfOffset = 0;
- Header.BinaryIdOffset = 0;
- Header.TemporalProfTracesOffset = 0;
- Header.VTableNamesOffset = 0;
-
- // Only write out the first four fields. We need to remember the offset of the
- // remaining fields to allow back patching later.
- for (int I = 0; I < 4; I++)
- OS.write(reinterpret_cast<uint64_t *>(&Header)[I]);
-
- // Save the location of Header.HashOffset field in \c OS.
- uint64_t HashTableStartFieldOffset = OS.tell();
- // Reserve the space for HashOffset field.
- OS.write(0);
-
- // Save the location of MemProf profile data. This is stored in two parts as
- // the schema and as a separate on-disk chained hashtable.
- uint64_t MemProfSectionOffset = OS.tell();
- // Reserve space for the MemProf table field to be patched later if this
- // profile contains memory profile information.
- OS.write(0);
-
- // Save the location of binary ids section.
- uint64_t BinaryIdSectionOffset = OS.tell();
- // Reserve space for the BinaryIdOffset field to be patched later if this
- // profile contains binary ids.
- OS.write(0);
-
- uint64_t TemporalProfTracesOffset = OS.tell();
- OS.write(0);
-
- uint64_t VTableNamesOffset = OS.tell();
- if (!WritePrevVersion)
- OS.write(0);
+ const uint64_t BackPatchStartOffset =
+ writeHeader(Header, WritePrevVersion, OS);
// Reserve space to write profile summary data.
uint32_t NumEntries = ProfileSummaryBuilder::DefaultCutoffs.size();
@@ -790,34 +806,9 @@ Error InstrProfWriter::writeImpl(ProfOStream &OS) {
uint64_t VTableNamesSectionStart = OS.tell();
- if (!WritePrevVersion) {
- std::vector<std::string> VTableNameStrs;
- for (StringRef VTableName : VTableNames.keys())
- VTableNameStrs.push_back(VTableName.str());
-
- std::string CompressedVTableNames;
- if (!VTableNameStrs.empty())
- if (Error E = collectGlobalObjectNameStrings(
- VTableNameStrs, compression::zlib::isAvailable(),
- CompressedVTableNames))
- return E;
-
- const uint64_t CompressedStringLen = CompressedVTableNames.length();
-
- // Record the length of compressed string.
- OS.write(CompressedStringLen);
-
- // Write the chars in compressed strings.
- for (auto &c : CompressedVTableNames)
- OS.writeByte(static_cast<uint8_t>(c));
-
- // Pad up to a multiple of 8.
- // InstrProfReader could read bytes according to 'CompressedStringLen'.
- const uint64_t PaddedLength = alignTo(CompressedStringLen, 8);
-
- for (uint64_t K = CompressedStringLen; K < PaddedLength; K++)
- OS.writeByte(0);
- }
+ if (!WritePrevVersion)
+ if (Error E = writeVTableNames(OS))
+ return E;
uint64_t TemporalProfTracesSectionStart = 0;
if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile)) {
@@ -850,16 +841,20 @@ Error InstrProfWriter::writeImpl(ProfOStream &OS) {
}
InfoObj->CSSummaryBuilder = nullptr;
+ const size_t MemProfOffset = BackPatchStartOffset + sizeof(uint64_t);
+ const size_t BinaryIdOffset = MemProfOffset + sizeof(uint64_t);
+ const size_t TemporalProfTracesOffset = BinaryIdOffset + sizeof(uint64_t);
+ const size_t VTableNamesOffset = TemporalProfTracesOffset + sizeof(uint64_t);
if (!WritePrevVersion) {
// Now do the final patch:
PatchItem PatchItems[] = {
// Patch the Header.HashOffset field.
- {HashTableStartFieldOffset, &HashTableStart, 1},
+ {BackPatchStartOffset, &HashTableStart, 1},
// Patch the Header.MemProfOffset (=0 for profiles without MemProf
// data).
- {MemProfSectionOffset, &MemProfSectionStart, 1},
+ {MemProfOffset, &MemProfSectionStart, 1},
// Patch the Header.BinaryIdSectionOffset.
- {BinaryIdSectionOffset, &BinaryIdSectionStart, 1},
+ {BinaryIdOffset, &BinaryIdSectionStart, 1},
// Patch the Header.TemporalProfTracesOffset (=0 for profiles without
// traces).
{TemporalProfTracesOffset, &TemporalProfTracesSectionStart, 1},
@@ -875,12 +870,12 @@ Error InstrProfWriter::writeImpl(ProfOStream &OS) {
// Now do the final patch:
PatchItem PatchItems[] = {
// Patch the Header.HashOffset field.
- {HashTableStartFieldOffset, &HashTableStart, 1},
+ {BackPatchStartOffset, &HashTableStart, 1},
// Patch the Header.MemProfOffset (=0 for profiles without MemProf
// data).
- {MemProfSectionOffset, &MemProfSectionStart, 1},
+ {MemProfOffset, &MemProfSectionStart, 1},
// Patch the Header.BinaryIdSectionOffset.
- {BinaryIdSectionOffset, &BinaryIdSectionStart, 1},
+ {BinaryIdOffset, &BinaryIdSectionStart, 1},
// Patch the Header.TemporalProfTracesOffset (=0 for profiles without
// traces).
{TemporalProfTracesOffset, &TemporalProfTracesSectionStart, 1},
diff --git a/llvm/lib/ProfileData/MemProf.cpp b/llvm/lib/ProfileData/MemProf.cpp
index f5789186094c..e5608644519d 100644
--- a/llvm/lib/ProfileData/MemProf.cpp
+++ b/llvm/lib/ProfileData/MemProf.cpp
@@ -208,6 +208,7 @@ static IndexedMemProfRecord deserializeV2(const MemProfSchema &Schema,
// Read the meminfo nodes.
const uint64_t NumNodes =
endian::readNext<uint64_t, llvm::endianness::little>(Ptr);
+ Record.AllocSites.reserve(NumNodes);
for (uint64_t I = 0; I < NumNodes; I++) {
IndexedAllocationInfo Node;
Node.CSId = endian::readNext<CallStackId, llvm::endianness::little>(Ptr);
@@ -219,6 +220,7 @@ static IndexedMemProfRecord deserializeV2(const MemProfSchema &Schema,
// Read the callsite information.
const uint64_t NumCtxs =
endian::readNext<uint64_t, llvm::endianness::little>(Ptr);
+ Record.CallSiteIds.reserve(NumCtxs);
for (uint64_t J = 0; J < NumCtxs; J++) {
CallStackId CSId =
endian::readNext<CallStackId, llvm::endianness::little>(Ptr);
@@ -247,13 +249,15 @@ MemProfRecord IndexedMemProfRecord::toMemProfRecord(
Callback) const {
MemProfRecord Record;
+ Record.AllocSites.reserve(AllocSites.size());
for (const memprof::IndexedAllocationInfo &IndexedAI : AllocSites) {
memprof::AllocationInfo AI;
AI.Info = IndexedAI.Info;
AI.CallStack = Callback(IndexedAI.CSId);
- Record.AllocSites.push_back(AI);
+ Record.AllocSites.push_back(std::move(AI));
}
+ Record.CallSites.reserve(CallSiteIds.size());
for (memprof::CallStackId CSId : CallSiteIds)
Record.CallSites.push_back(Callback(CSId));
diff --git a/llvm/lib/ProfileData/MemProfReader.cpp b/llvm/lib/ProfileData/MemProfReader.cpp
index c25babac844a..fc3be716087e 100644
--- a/llvm/lib/ProfileData/MemProfReader.cpp
+++ b/llvm/lib/ProfileData/MemProfReader.cpp
@@ -587,31 +587,27 @@ Error RawMemProfReader::symbolizeAndFilterStackFrames(
std::vector<std::string>
RawMemProfReader::peekBuildIds(MemoryBuffer *DataBuffer) {
const char *Next = DataBuffer->getBufferStart();
- // Use a set + vector since a profile file may contain multiple raw profile
+ // Use a SetVector since a profile file may contain multiple raw profile
// dumps, each with segment information. We want them unique and in order they
// were stored in the profile; the profiled binary should be the first entry.
// The runtime uses dl_iterate_phdr and the "... first object visited by
// callback is the main program."
// https://man7.org/linux/man-pages/man3/dl_iterate_phdr.3.html
- std::vector<std::string> BuildIds;
- llvm::SmallSet<std::string, 10> BuildIdsSet;
+ llvm::SetVector<std::string, std::vector<std::string>,
+ llvm::SmallSet<std::string, 10>>
+ BuildIds;
while (Next < DataBuffer->getBufferEnd()) {
auto *Header = reinterpret_cast<const memprof::Header *>(Next);
const llvm::SmallVector<SegmentEntry> Entries =
readSegmentEntries(Next + Header->SegmentOffset);
- for (const auto &Entry : Entries) {
- const std::string Id = getBuildIdString(Entry);
- if (BuildIdsSet.contains(Id))
- continue;
- BuildIds.push_back(Id);
- BuildIdsSet.insert(Id);
- }
+ for (const auto &Entry : Entries)
+ BuildIds.insert(getBuildIdString(Entry));
Next += Header->TotalSize;
}
- return BuildIds;
+ return BuildIds.takeVector();
}
Error RawMemProfReader::readRawProfile(
diff --git a/llvm/lib/ProfileData/SampleProfReader.cpp b/llvm/lib/ProfileData/SampleProfReader.cpp
index f91a0e6177ea..a4b2d0668a5a 100644
--- a/llvm/lib/ProfileData/SampleProfReader.cpp
+++ b/llvm/lib/ProfileData/SampleProfReader.cpp
@@ -1822,9 +1822,9 @@ setupMemoryBuffer(const Twine &Filename, vfs::FileSystem &FS) {
///
/// \returns an error code indicating the status of the created reader.
ErrorOr<std::unique_ptr<SampleProfileReader>>
-SampleProfileReader::create(const std::string Filename, LLVMContext &C,
+SampleProfileReader::create(StringRef Filename, LLVMContext &C,
vfs::FileSystem &FS, FSDiscriminatorPass P,
- const std::string RemapFilename) {
+ StringRef RemapFilename) {
auto BufferOrError = setupMemoryBuffer(Filename, FS);
if (std::error_code EC = BufferOrError.getError())
return EC;
@@ -1842,7 +1842,7 @@ SampleProfileReader::create(const std::string Filename, LLVMContext &C,
///
/// \returns an error code indicating the status of the created reader.
ErrorOr<std::unique_ptr<SampleProfileReaderItaniumRemapper>>
-SampleProfileReaderItaniumRemapper::create(const std::string Filename,
+SampleProfileReaderItaniumRemapper::create(StringRef Filename,
vfs::FileSystem &FS,
SampleProfileReader &Reader,
LLVMContext &C) {
@@ -1895,7 +1895,7 @@ SampleProfileReaderItaniumRemapper::create(std::unique_ptr<MemoryBuffer> &B,
ErrorOr<std::unique_ptr<SampleProfileReader>>
SampleProfileReader::create(std::unique_ptr<MemoryBuffer> &B, LLVMContext &C,
vfs::FileSystem &FS, FSDiscriminatorPass P,
- const std::string RemapFilename) {
+ StringRef RemapFilename) {
std::unique_ptr<SampleProfileReader> Reader;
if (SampleProfileReaderRawBinary::hasFormat(*B))
Reader.reset(new SampleProfileReaderRawBinary(std::move(B), C));
diff --git a/llvm/lib/Support/BLAKE3/CMakeLists.txt b/llvm/lib/Support/BLAKE3/CMakeLists.txt
index cb4f840461f7..51317b8048f7 100644
--- a/llvm/lib/Support/BLAKE3/CMakeLists.txt
+++ b/llvm/lib/Support/BLAKE3/CMakeLists.txt
@@ -79,4 +79,5 @@ else()
endif()
add_library(LLVMSupportBlake3 OBJECT EXCLUDE_FROM_ALL ${LLVM_BLAKE3_FILES})
+set_target_properties(LLVMSupportBlake3 PROPERTIES FOLDER "LLVM/Libraries")
llvm_update_compile_flags(LLVMSupportBlake3)
diff --git a/llvm/lib/Support/CMakeLists.txt b/llvm/lib/Support/CMakeLists.txt
index 03e888958a07..be4badc09efa 100644
--- a/llvm/lib/Support/CMakeLists.txt
+++ b/llvm/lib/Support/CMakeLists.txt
@@ -56,9 +56,6 @@ elseif( CMAKE_HOST_UNIX )
STRING(REGEX REPLACE "^lib" "" Backtrace_LIBFILE ${Backtrace_LIBFILE})
set(system_libs ${system_libs} ${Backtrace_LIBFILE})
endif()
- if( LLVM_ENABLE_TERMINFO )
- set(imported_libs ${imported_libs} Terminfo::terminfo)
- endif()
set(system_libs ${system_libs} ${LLVM_ATOMIC_LIB})
set(system_libs ${system_libs} ${LLVM_PTHREAD_LIB})
if( UNIX AND NOT (BEOS OR HAIKU) )
@@ -325,14 +322,6 @@ if(LLVM_ENABLE_ZSTD)
set(llvm_system_libs ${llvm_system_libs} "${zstd_library}")
endif()
-if(LLVM_ENABLE_TERMINFO)
- if(NOT terminfo_library)
- get_property(terminfo_library TARGET Terminfo::terminfo PROPERTY LOCATION)
- endif()
- get_library_name(${terminfo_library} terminfo_library)
- set(llvm_system_libs ${llvm_system_libs} "${terminfo_library}")
-endif()
-
set_property(TARGET LLVMSupport PROPERTY LLVM_SYSTEM_LIBS "${llvm_system_libs}")
diff --git a/llvm/lib/Support/Error.cpp b/llvm/lib/Support/Error.cpp
index 21d591530b41..34ec31e3b833 100644
--- a/llvm/lib/Support/Error.cpp
+++ b/llvm/lib/Support/Error.cpp
@@ -135,6 +135,9 @@ StringError::StringError(std::error_code EC, const Twine &S)
StringError::StringError(const Twine &S, std::error_code EC)
: Msg(S.str()), EC(EC), PrintMsgOnly(true) {}
+StringError::StringError(std::string &&S, std::error_code EC, bool PrintMsgOnly)
+ : Msg(S), EC(EC), PrintMsgOnly(PrintMsgOnly) {}
+
void StringError::log(raw_ostream &OS) const {
if (PrintMsgOnly) {
OS << Msg;
@@ -149,7 +152,7 @@ std::error_code StringError::convertToErrorCode() const {
return EC;
}
-Error createStringError(std::error_code EC, char const *Msg) {
+Error createStringError(std::string &&Msg, std::error_code EC) {
return make_error<StringError>(Msg, EC);
}
diff --git a/llvm/lib/Support/KnownBits.cpp b/llvm/lib/Support/KnownBits.cpp
index fe47884f3e55..d6012a8eea8a 100644
--- a/llvm/lib/Support/KnownBits.cpp
+++ b/llvm/lib/Support/KnownBits.cpp
@@ -774,6 +774,37 @@ KnownBits KnownBits::usub_sat(const KnownBits &LHS, const KnownBits &RHS) {
return computeForSatAddSub(/*Add*/ false, /*Signed*/ false, LHS, RHS);
}
+static KnownBits avgCompute(KnownBits LHS, KnownBits RHS, bool IsCeil,
+ bool IsSigned) {
+ unsigned BitWidth = LHS.getBitWidth();
+ LHS = IsSigned ? LHS.sext(BitWidth + 1) : LHS.zext(BitWidth + 1);
+ RHS = IsSigned ? RHS.sext(BitWidth + 1) : RHS.zext(BitWidth + 1);
+ KnownBits Carry = KnownBits::makeConstant(APInt(1, IsCeil ? 1 : 0));
+ LHS = KnownBits::computeForAddCarry(LHS, RHS, Carry);
+ LHS = LHS.extractBits(BitWidth, 1);
+ return LHS;
+}
+
+KnownBits KnownBits::avgFloorS(const KnownBits &LHS, const KnownBits &RHS) {
+ return avgCompute(LHS, RHS, /* IsCeil */ false,
+ /* IsSigned */ true);
+}
+
+KnownBits KnownBits::avgFloorU(const KnownBits &LHS, const KnownBits &RHS) {
+ return avgCompute(LHS, RHS, /* IsCeil */ false,
+ /* IsSigned */ false);
+}
+
+KnownBits KnownBits::avgCeilS(const KnownBits &LHS, const KnownBits &RHS) {
+ return avgCompute(LHS, RHS, /* IsCeil */ true,
+ /* IsSigned */ true);
+}
+
+KnownBits KnownBits::avgCeilU(const KnownBits &LHS, const KnownBits &RHS) {
+ return avgCompute(LHS, RHS, /* IsCeil */ true,
+ /* IsSigned */ false);
+}
+
KnownBits KnownBits::mul(const KnownBits &LHS, const KnownBits &RHS,
bool NoUndefSelfMultiply) {
unsigned BitWidth = LHS.getBitWidth();
diff --git a/llvm/lib/Support/LockFileManager.cpp b/llvm/lib/Support/LockFileManager.cpp
index 083f8d7b37be..3169aa25ec0d 100644
--- a/llvm/lib/Support/LockFileManager.cpp
+++ b/llvm/lib/Support/LockFileManager.cpp
@@ -66,7 +66,7 @@ LockFileManager::readLockFile(StringRef LockFileName) {
StringRef Hostname;
StringRef PIDStr;
std::tie(Hostname, PIDStr) = getToken(MB.getBuffer(), " ");
- PIDStr = PIDStr.substr(PIDStr.find_first_not_of(" "));
+ PIDStr = PIDStr.substr(PIDStr.find_first_not_of(' '));
int PID;
if (!PIDStr.getAsInteger(10, PID)) {
auto Owner = std::make_pair(std::string(Hostname), PID);
diff --git a/llvm/lib/Support/Unix/Process.inc b/llvm/lib/Support/Unix/Process.inc
index ae90924cae1b..84b10ff5d1d0 100644
--- a/llvm/lib/Support/Unix/Process.inc
+++ b/llvm/lib/Support/Unix/Process.inc
@@ -341,17 +341,9 @@ unsigned Process::StandardErrColumns() {
return getColumns();
}
-#ifdef LLVM_ENABLE_TERMINFO
-// We manually declare these extern functions because finding the correct
-// headers from various terminfo, curses, or other sources is harder than
-// writing their specs down.
-extern "C" int setupterm(char *term, int filedes, int *errret);
-extern "C" struct term *set_curterm(struct term *termp);
-extern "C" int del_curterm(struct term *termp);
-extern "C" int tigetnum(char *capname);
-#endif
-
-bool checkTerminalEnvironmentForColors() {
+static bool terminalHasColors() {
+ // Check if the current terminal is one of terminals that are known to support
+ // ANSI color escape codes.
if (const char *TermStr = std::getenv("TERM")) {
return StringSwitch<bool>(TermStr)
.Case("ansi", true)
@@ -368,54 +360,10 @@ bool checkTerminalEnvironmentForColors() {
return false;
}
-static bool terminalHasColors(int fd) {
-#ifdef LLVM_ENABLE_TERMINFO
- // First, acquire a global lock because these C routines are thread hostile.
- static std::mutex TermColorMutex;
- std::lock_guard<std::mutex> G(TermColorMutex);
-
- struct term *previous_term = set_curterm(nullptr);
- int errret = 0;
- if (setupterm(nullptr, fd, &errret) != 0)
- // Regardless of why, if we can't get terminfo, we shouldn't try to print
- // colors.
- return false;
-
- // Test whether the terminal as set up supports color output. How to do this
- // isn't entirely obvious. We can use the curses routine 'has_colors' but it
- // would be nice to avoid a dependency on curses proper when we can make do
- // with a minimal terminfo parsing library. Also, we don't really care whether
- // the terminal supports the curses-specific color changing routines, merely
- // if it will interpret ANSI color escape codes in a reasonable way. Thus, the
- // strategy here is just to query the baseline colors capability and if it
- // supports colors at all to assume it will translate the escape codes into
- // whatever range of colors it does support. We can add more detailed tests
- // here if users report them as necessary.
- //
- // The 'tigetnum' routine returns -2 or -1 on errors, and might return 0 if
- // the terminfo says that no colors are supported.
- int colors_ti = tigetnum(const_cast<char *>("colors"));
- bool HasColors =
- colors_ti >= 0 ? colors_ti : checkTerminalEnvironmentForColors();
-
- // Now extract the structure allocated by setupterm and free its memory
- // through a really silly dance.
- struct term *termp = set_curterm(previous_term);
- (void)del_curterm(termp); // Drop any errors here.
-
- // Return true if we found a color capabilities for the current terminal.
- return HasColors;
-#else
- // When the terminfo database is not available, check if the current terminal
- // is one of terminals that are known to support ANSI color escape codes.
- return checkTerminalEnvironmentForColors();
-#endif
-}
-
bool Process::FileDescriptorHasColors(int fd) {
// A file descriptor has colors if it is displayed and the terminal has
// colors.
- return FileDescriptorIsDisplayed(fd) && terminalHasColors(fd);
+ return FileDescriptorIsDisplayed(fd) && terminalHasColors();
}
bool Process::StandardOutHasColors() {
diff --git a/llvm/lib/Support/raw_socket_stream.cpp b/llvm/lib/Support/raw_socket_stream.cpp
index 14e2308df4d7..549d537709bf 100644
--- a/llvm/lib/Support/raw_socket_stream.cpp
+++ b/llvm/lib/Support/raw_socket_stream.cpp
@@ -204,17 +204,26 @@ ListeningSocket::accept(std::chrono::milliseconds Timeout) {
auto Start = std::chrono::steady_clock::now();
#ifdef _WIN32
PollStatus = WSAPoll(FDs, 2, RemainingTime);
- if (PollStatus == SOCKET_ERROR) {
#else
PollStatus = ::poll(FDs, 2, RemainingTime);
+#endif
+ // If FD equals -1 then ListeningSocket::shutdown has been called and it is
+ // appropriate to return operation_canceled
+ if (FD.load() == -1)
+ return llvm::make_error<StringError>(
+ std::make_error_code(std::errc::operation_canceled),
+ "Accept canceled");
+
+#if _WIN32
+ if (PollStatus == SOCKET_ERROR) {
+#else
if (PollStatus == -1) {
#endif
- // Ignore error if caused by interupting signal
std::error_code PollErrCode = getLastSocketErrorCode();
+ // Ignore EINTR (signal occured before any request event) and retry
if (PollErrCode != std::errc::interrupted)
return llvm::make_error<StringError>(PollErrCode, "FD poll failed");
}
-
if (PollStatus == 0)
return llvm::make_error<StringError>(
std::make_error_code(std::errc::timed_out),
@@ -222,13 +231,7 @@ ListeningSocket::accept(std::chrono::milliseconds Timeout) {
if (FDs[0].revents & POLLNVAL)
return llvm::make_error<StringError>(
- std::make_error_code(std::errc::bad_file_descriptor),
- "File descriptor closed by another thread");
-
- if (FDs[1].revents & POLLIN)
- return llvm::make_error<StringError>(
- std::make_error_code(std::errc::operation_canceled),
- "Accept canceled");
+ std::make_error_code(std::errc::bad_file_descriptor));
auto Stop = std::chrono::steady_clock::now();
ElapsedTime +=
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
index bfcafc6442d2..9a804c12939c 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
@@ -38,6 +38,8 @@ static const MCPhysReg QRegList[] = {AArch64::Q0, AArch64::Q1, AArch64::Q2,
static const MCPhysReg ZRegList[] = {AArch64::Z0, AArch64::Z1, AArch64::Z2,
AArch64::Z3, AArch64::Z4, AArch64::Z5,
AArch64::Z6, AArch64::Z7};
+static const MCPhysReg PRegList[] = {AArch64::P0, AArch64::P1, AArch64::P2,
+ AArch64::P3};
static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
MVT LocVT, ISD::ArgFlagsTy &ArgFlags,
@@ -59,11 +61,17 @@ static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
// CCAssignFn again we want it to behave as if all remaining registers are
// allocated. This will force the code to pass the tuple indirectly in
// accordance with the PCS.
- bool RegsAllocated[8];
+ bool ZRegsAllocated[8];
for (int I = 0; I < 8; I++) {
- RegsAllocated[I] = State.isAllocated(ZRegList[I]);
+ ZRegsAllocated[I] = State.isAllocated(ZRegList[I]);
State.AllocateReg(ZRegList[I]);
}
+ // The same applies to P registers.
+ bool PRegsAllocated[4];
+ for (int I = 0; I < 4; I++) {
+ PRegsAllocated[I] = State.isAllocated(PRegList[I]);
+ State.AllocateReg(PRegList[I]);
+ }
auto &It = PendingMembers[0];
CCAssignFn *AssignFn =
@@ -79,8 +87,11 @@ static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
// Return the register state back to how it was before, leaving any
// unallocated registers available for other smaller types.
for (int I = 0; I < 8; I++)
- if (!RegsAllocated[I])
+ if (!ZRegsAllocated[I])
State.DeallocateReg(ZRegList[I]);
+ for (int I = 0; I < 4; I++)
+ if (!PRegsAllocated[I])
+ State.DeallocateReg(PRegList[I]);
// All pending members have now been allocated
PendingMembers.clear();
@@ -140,9 +151,15 @@ static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
RegList = DRegList;
else if (LocVT.SimpleTy == MVT::f128 || LocVT.is128BitVector())
RegList = QRegList;
- else if (LocVT.isScalableVector())
- RegList = ZRegList;
- else {
+ else if (LocVT.isScalableVector()) {
+ // Scalable masks should be pass by Predicate registers.
+ if (LocVT == MVT::nxv1i1 || LocVT == MVT::nxv2i1 || LocVT == MVT::nxv4i1 ||
+ LocVT == MVT::nxv8i1 || LocVT == MVT::nxv16i1 ||
+ LocVT == MVT::aarch64svcount)
+ RegList = PRegList;
+ else
+ RegList = ZRegList;
+ } else {
// Not an array we want to split up after all.
return false;
}
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index 10cad6d19244..1c7f6b870d39 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -295,5 +295,6 @@ def AArch64PostLegalizerCombiner
ptr_add_immed_chain, overlapping_and,
split_store_zero_128, undef_combines,
select_to_minmax, or_to_bsp, combine_concat_vector,
- commute_constant_to_rhs]> {
+ commute_constant_to_rhs,
+ push_freeze_to_prevent_poison_from_propagating]> {
}
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandImm.cpp b/llvm/lib/Target/AArch64/AArch64ExpandImm.cpp
index a7d72b59b1d5..98016271a9d0 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandImm.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandImm.cpp
@@ -518,6 +518,14 @@ static inline void expandMOVImmSimple(uint64_t Imm, unsigned BitSize,
Insn.push_back({ Opc, Imm16,
AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) });
}
+
+ // Now, we get 16-bit divided Imm. If high and low bits are same in
+ // 32-bit, there is an opportunity to reduce instruction.
+ if (Insn.size() > 2 && (Imm >> 32) == (Imm & 0xffffffffULL)) {
+ for (int Size = Insn.size(); Size > 2; Size--)
+ Insn.pop_back();
+ Insn.push_back({AArch64::ORRXrs, 0, 32});
+ }
}
/// Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index 03f0778bae59..36957bb0f5a0 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -168,6 +168,19 @@ bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
.addImm(I->Op2));
}
break;
+ case AArch64::ORRWrs:
+ case AArch64::ORRXrs: {
+ Register DstReg = MI.getOperand(0).getReg();
+ bool DstIsDead = MI.getOperand(0).isDead();
+ MIBS.push_back(
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
+ .addReg(DstReg, RegState::Define |
+ getDeadRegState(DstIsDead && LastItem) |
+ RenamableState)
+ .addReg(DstReg)
+ .addReg(DstReg)
+ .addImm(I->Op2));
+ } break;
case AArch64::ANDXri:
case AArch64::EORXri:
if (I->Op1 == 0) {
diff --git a/llvm/lib/Target/AArch64/AArch64Features.td b/llvm/lib/Target/AArch64/AArch64Features.td
index ba0b760ce3d7..ffb899a30145 100644
--- a/llvm/lib/Target/AArch64/AArch64Features.td
+++ b/llvm/lib/Target/AArch64/AArch64Features.td
@@ -223,13 +223,6 @@ def FeatureSVE : Extension<"sve", "SVE",
"Enable Scalable Vector Extension (SVE) instructions (FEAT_SVE)", [FeatureFullFP16],
"FEAT_SVE", "+sve,+fullfp16,+fp-armv8,+neon", 310>;
-def FeatureFPMR : Extension<"fpmr", "FPMR",
- "Enable FPMR Register (FEAT_FPMR)">;
-
-let FMVDependencies = "+fpmr" in
-def FeatureFP8 : Extension<"fp8", "FP8",
- "Enable FP8 instructions (FEAT_FP8)">;
-
// This flag is currently still labeled as Experimental, but when fully
// implemented this should tell the compiler to use the zeroing pseudos to
// benefit from the reverse instructions (e.g. SUB vs SUBR) if the inactive
@@ -667,41 +660,44 @@ def FeatureSME2p1 : Extension<"sme2p1", "SME2p1",
def FeatureFAMINMAX: Extension<"faminmax", "FAMINMAX",
"Enable FAMIN and FAMAX instructions (FEAT_FAMINMAX)">;
-let FMVDependencies = "+fpmr" in
+def FeatureLUT: Extension<"lut", "LUT",
+ "Enable Lookup Table instructions (FEAT_LUT)">;
+
+def FeatureFP8 : Extension<"fp8", "FP8",
+ "Enable FP8 instructions (FEAT_FP8)", [FeatureFAMINMAX, FeatureLUT, FeatureBF16]>;
+
def FeatureFP8FMA : Extension<"fp8fma", "FP8FMA",
- "Enable fp8 multiply-add instructions (FEAT_FP8FMA)">;
+ "Enable fp8 multiply-add instructions (FEAT_FP8FMA)", [FeatureFP8]>;
let FMVDependencies = "+sme2" in
def FeatureSSVE_FP8FMA : Extension<"ssve-fp8fma", "SSVE_FP8FMA",
- "Enable SVE2 fp8 multiply-add instructions (FEAT_SSVE_FP8FMA)", [FeatureSME2]>;
+ "Enable SVE2 fp8 multiply-add instructions (FEAT_SSVE_FP8FMA)", [FeatureSME2, FeatureFP8]>;
+def FeatureFP8DOT4: Extension<"fp8dot4", "FP8DOT4",
+ "Enable fp8 4-way dot instructions (FEAT_FP8DOT4)", [FeatureFP8FMA]>;
+
def FeatureFP8DOT2: Extension<"fp8dot2", "FP8DOT2",
- "Enable fp8 2-way dot instructions (FEAT_FP8DOT2)">;
+ "Enable fp8 2-way dot instructions (FEAT_FP8DOT2)", [FeatureFP8DOT4]>;
let FMVDependencies = "+sme2" in
-def FeatureSSVE_FP8DOT2 : Extension<"ssve-fp8dot2", "SSVE_FP8DOT2",
- "Enable SVE2 fp8 2-way dot product instructions (FEAT_SSVE_FP8DOT2)", [FeatureSME2]>;
-
-def FeatureFP8DOT4: Extension<"fp8dot4", "FP8DOT4",
- "Enable fp8 4-way dot instructions (FEAT_FP8DOT4)">;
+def FeatureSSVE_FP8DOT4 : Extension<"ssve-fp8dot4", "SSVE_FP8DOT4",
+ "Enable SVE2 fp8 4-way dot product instructions (FEAT_SSVE_FP8DOT4)", [FeatureSSVE_FP8FMA]>;
let FMVDependencies = "+sme2" in
-def FeatureSSVE_FP8DOT4 : Extension<"ssve-fp8dot4", "SSVE_FP8DOT4",
- "Enable SVE2 fp8 4-way dot product instructions (FEAT_SSVE_FP8DOT4)", [FeatureSME2]>;
-def FeatureLUT: Extension<"lut", "LUT",
- "Enable Lookup Table instructions (FEAT_LUT)">;
+def FeatureSSVE_FP8DOT2 : Extension<"ssve-fp8dot2", "SSVE_FP8DOT2",
+ "Enable SVE2 fp8 2-way dot product instructions (FEAT_SSVE_FP8DOT2)", [FeatureSSVE_FP8DOT4]>;
def FeatureSME_LUTv2 : Extension<"sme-lutv2", "SME_LUTv2",
"Enable Scalable Matrix Extension (SME) LUTv2 instructions (FEAT_SME_LUTv2)">;
-let FMVDependencies = "+fp8,+sme2" in
-def FeatureSMEF8F16 : Extension<"sme-f8f16", "SMEF8F16",
- "Enable Scalable Matrix Extension (SME) F8F16 instructions(FEAT_SME_F8F16)", [FeatureSME2, FeatureFP8]>;
-
let FMVDependencies = "+sme2,+fp8" in
def FeatureSMEF8F32 : Extension<"sme-f8f32", "SMEF8F32",
"Enable Scalable Matrix Extension (SME) F8F32 instructions (FEAT_SME_F8F32)", [FeatureSME2, FeatureFP8]>;
+let FMVDependencies = "+fp8,+sme2" in
+def FeatureSMEF8F16 : Extension<"sme-f8f16", "SMEF8F16",
+ "Enable Scalable Matrix Extension (SME) F8F16 instructions(FEAT_SME_F8F16)", [FeatureSMEF8F32]>;
+
def FeatureAppleA7SysReg : SubtargetFeature<"apple-a7-sysreg", "HasAppleA7SysReg", "true",
"Apple A7 (the CPU formerly known as Cyclone)">;
@@ -869,7 +865,7 @@ def HasV9_4aOps : Architecture64<9, 4, "a", "v9.4a",
FeatureRASv2])>;
def HasV9_5aOps : Architecture64<9, 5, "a", "v9.5a",
[HasV9_4aOps, FeatureCPA],
- !listconcat(HasV9_4aOps.DefaultExts, [FeatureCPA])>;
+ !listconcat(HasV9_4aOps.DefaultExts, [FeatureCPA, FeatureLUT, FeatureFAMINMAX])>;
def HasV8_0rOps : Architecture64<8, 0, "r", "v8r",
[ //v8.1
FeatureCRC, FeaturePAN, FeatureLSE, FeatureCONTEXTIDREL2,
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e31a27e9428e..25ba8d850030 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1871,9 +1871,11 @@ bool AArch64TargetLowering::shouldExpandCttzElements(EVT VT) const {
if (!Subtarget->hasSVEorSME())
return true;
- // We can only use the BRKB + CNTP sequence with legal predicate types.
+ // We can only use the BRKB + CNTP sequence with legal predicate types. We can
+ // also support fixed-width predicates.
return VT != MVT::nxv16i1 && VT != MVT::nxv8i1 && VT != MVT::nxv4i1 &&
- VT != MVT::nxv2i1;
+ VT != MVT::nxv2i1 && VT != MVT::v16i1 && VT != MVT::v8i1 &&
+ VT != MVT::v4i1 && VT != MVT::v2i1;
}
void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
@@ -5838,9 +5840,20 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return SDValue();
}
case Intrinsic::experimental_cttz_elts: {
- SDValue NewCttzElts =
- DAG.getNode(AArch64ISD::CTTZ_ELTS, dl, MVT::i64, Op.getOperand(1));
+ SDValue CttzOp = Op.getOperand(1);
+ EVT VT = CttzOp.getValueType();
+ assert(VT.getVectorElementType() == MVT::i1 && "Expected MVT::i1");
+ if (VT.isFixedLengthVector()) {
+ // We can use SVE instructions to lower this intrinsic by first creating
+ // an SVE predicate register mask from the fixed-width vector.
+ EVT NewVT = getTypeToTransformTo(*DAG.getContext(), VT);
+ SDValue Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, NewVT, CttzOp);
+ CttzOp = convertFixedMaskToScalableVector(Mask, DAG);
+ }
+
+ SDValue NewCttzElts =
+ DAG.getNode(AArch64ISD::CTTZ_ELTS, dl, MVT::i64, CttzOp);
return DAG.getZExtOrTrunc(NewCttzElts, dl, Op.getValueType());
}
}
@@ -7235,7 +7248,6 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinValue();
unsigned NumParts = 1;
if (Ins[i].Flags.isInConsecutiveRegs()) {
- assert(!Ins[i].Flags.isInConsecutiveRegsLast());
while (!Ins[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
++NumParts;
}
@@ -8232,7 +8244,6 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
uint64_t PartSize = StoreSize;
unsigned NumParts = 1;
if (Outs[i].Flags.isInConsecutiveRegs()) {
- assert(!Outs[i].Flags.isInConsecutiveRegsLast());
while (!Outs[i + NumParts - 1].Flags.isInConsecutiveRegsLast())
++NumParts;
StoreSize *= NumParts;
@@ -13530,11 +13541,9 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
DAG.getConstant(NumElts, dl, MVT::i64));
if (Even && !Odd)
- return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), LHS,
- RHS);
+ return DAG.getNode(AArch64ISD::UZP1, dl, VT, LHS, RHS);
if (Odd && !Even)
- return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), LHS,
- RHS);
+ return DAG.getNode(AArch64ISD::UZP2, dl, VT, LHS, RHS);
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index a39e3b7be76d..4830033b2352 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -171,8 +171,6 @@ def HasSME2 : Predicate<"Subtarget->hasSME2()">,
AssemblerPredicateWithAll<(all_of FeatureSME2), "sme2">;
def HasSME2p1 : Predicate<"Subtarget->hasSME2p1()">,
AssemblerPredicateWithAll<(all_of FeatureSME2p1), "sme2p1">;
-def HasFPMR : Predicate<"Subtarget->hasFPMR()">,
- AssemblerPredicateWithAll<(all_of FeatureFPMR), "fpmr">;
def HasFP8 : Predicate<"Subtarget->hasFP8()">,
AssemblerPredicateWithAll<(all_of FeatureFP8), "fp8">;
def HasFAMINMAX : Predicate<"Subtarget->hasFAMINMAX()">,
diff --git a/llvm/lib/Target/AArch64/AArch64PointerAuth.cpp b/llvm/lib/Target/AArch64/AArch64PointerAuth.cpp
index abde099be382..e900f6881620 100644
--- a/llvm/lib/Target/AArch64/AArch64PointerAuth.cpp
+++ b/llvm/lib/Target/AArch64/AArch64PointerAuth.cpp
@@ -231,7 +231,7 @@ MachineMemOperand *createCheckMemOperand(MachineFunction &MF,
} // namespace
-MachineBasicBlock &llvm::AArch64PAuth::checkAuthenticatedRegister(
+void llvm::AArch64PAuth::checkAuthenticatedRegister(
MachineBasicBlock::iterator MBBI, AuthCheckMethod Method,
Register AuthenticatedReg, Register TmpReg, bool UseIKey, unsigned BrkImm) {
@@ -241,37 +241,36 @@ MachineBasicBlock &llvm::AArch64PAuth::checkAuthenticatedRegister(
const AArch64InstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MBBI->getDebugLoc();
+ // All terminator instructions should be grouped at the end of the machine
+ // basic block, with no non-terminator instructions between them. Depending on
+ // the method requested, we will insert some regular instructions, maybe
+ // followed by a conditional branch instruction, which is a terminator, before
+ // MBBI. Thus, MBBI is expected to be the first terminator of its MBB.
+ assert(MBBI->isTerminator() && MBBI == MBB.getFirstTerminator() &&
+ "MBBI should be the first terminator in MBB");
+
// First, handle the methods not requiring creating extra MBBs.
switch (Method) {
default:
break;
case AuthCheckMethod::None:
- return MBB;
+ return;
case AuthCheckMethod::DummyLoad:
BuildMI(MBB, MBBI, DL, TII->get(AArch64::LDRWui), getWRegFromXReg(TmpReg))
.addReg(AuthenticatedReg)
.addImm(0)
.addMemOperand(createCheckMemOperand(MF, Subtarget));
- return MBB;
+ return;
}
// Control flow has to be changed, so arrange new MBBs.
- // At now, at least an AUT* instruction is expected before MBBI
- assert(MBBI != MBB.begin() &&
- "Cannot insert the check at the very beginning of MBB");
- // The block to insert check into.
- MachineBasicBlock *CheckBlock = &MBB;
- // The remaining part of the original MBB that is executed on success.
- MachineBasicBlock *SuccessBlock = MBB.splitAt(*std::prev(MBBI));
-
// The block that explicitly generates a break-point exception on failure.
MachineBasicBlock *BreakBlock =
MF.CreateMachineBasicBlock(MBB.getBasicBlock());
MF.push_back(BreakBlock);
- MBB.splitSuccessor(SuccessBlock, BreakBlock);
+ MBB.addSuccessor(BreakBlock);
- assert(CheckBlock->getFallThrough() == SuccessBlock);
BuildMI(BreakBlock, DL, TII->get(AArch64::BRK)).addImm(BrkImm);
switch (Method) {
@@ -279,32 +278,32 @@ MachineBasicBlock &llvm::AArch64PAuth::checkAuthenticatedRegister(
case AuthCheckMethod::DummyLoad:
llvm_unreachable("Should be handled above");
case AuthCheckMethod::HighBitsNoTBI:
- BuildMI(CheckBlock, DL, TII->get(AArch64::EORXrs), TmpReg)
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::EORXrs), TmpReg)
.addReg(AuthenticatedReg)
.addReg(AuthenticatedReg)
.addImm(1);
- BuildMI(CheckBlock, DL, TII->get(AArch64::TBNZX))
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::TBNZX))
.addReg(TmpReg)
.addImm(62)
.addMBB(BreakBlock);
- return *SuccessBlock;
+ return;
case AuthCheckMethod::XPACHint:
assert(AuthenticatedReg == AArch64::LR &&
"XPACHint mode is only compatible with checking the LR register");
assert(UseIKey && "XPACHint mode is only compatible with I-keys");
- BuildMI(CheckBlock, DL, TII->get(AArch64::ORRXrs), TmpReg)
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXrs), TmpReg)
.addReg(AArch64::XZR)
.addReg(AArch64::LR)
.addImm(0);
- BuildMI(CheckBlock, DL, TII->get(AArch64::XPACLRI));
- BuildMI(CheckBlock, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::XPACLRI));
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
.addReg(TmpReg)
.addReg(AArch64::LR)
.addImm(0);
- BuildMI(CheckBlock, DL, TII->get(AArch64::Bcc))
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::Bcc))
.addImm(AArch64CC::NE)
.addMBB(BreakBlock);
- return *SuccessBlock;
+ return;
}
llvm_unreachable("Unknown AuthCheckMethod enum");
}
diff --git a/llvm/lib/Target/AArch64/AArch64PointerAuth.h b/llvm/lib/Target/AArch64/AArch64PointerAuth.h
index e1ceaed58abe..4ffda7478224 100644
--- a/llvm/lib/Target/AArch64/AArch64PointerAuth.h
+++ b/llvm/lib/Target/AArch64/AArch64PointerAuth.h
@@ -98,14 +98,10 @@ enum class AuthCheckMethod {
/// using an I-key or D-key and which register can be used as temporary.
/// If an explicit BRK instruction is used to generate an exception, BrkImm
/// specifies its immediate operand.
-///
-/// \returns The machine basic block containing the code that is executed
-/// after the check succeeds.
-MachineBasicBlock &checkAuthenticatedRegister(MachineBasicBlock::iterator MBBI,
- AuthCheckMethod Method,
- Register AuthenticatedReg,
- Register TmpReg, bool UseIKey,
- unsigned BrkImm);
+void checkAuthenticatedRegister(MachineBasicBlock::iterator MBBI,
+ AuthCheckMethod Method,
+ Register AuthenticatedReg, Register TmpReg,
+ bool UseIKey, unsigned BrkImm);
/// Returns the number of bytes added by checkAuthenticatedRegister.
unsigned getCheckerSizeInBytes(AuthCheckMethod Method);
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
index 5d185fcaefc4..8bc26eeef34d 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -64,12 +64,6 @@ ReservedRegsForRA("reserve-regs-for-regalloc", cl::desc("Reserve physical "
"Should only be used for testing register allocator."),
cl::CommaSeparated, cl::Hidden);
-static cl::opt<bool> ForceStreamingCompatibleSVE(
- "force-streaming-compatible-sve",
- cl::desc(
- "Force the use of streaming-compatible SVE code for all functions"),
- cl::Hidden);
-
static cl::opt<AArch64PAuth::AuthCheckMethod>
AuthenticatedLRCheckMethod("aarch64-authenticated-lr-check-method",
cl::Hidden,
@@ -316,15 +310,14 @@ AArch64Subtarget::AArch64Subtarget(const Triple &TT, StringRef CPU,
const TargetMachine &TM, bool LittleEndian,
unsigned MinSVEVectorSizeInBitsOverride,
unsigned MaxSVEVectorSizeInBitsOverride,
- bool StreamingSVEMode,
- bool StreamingCompatibleSVEMode,
+ bool IsStreaming, bool IsStreamingCompatible,
bool HasMinSize)
: AArch64GenSubtargetInfo(TT, CPU, TuneCPU, FS),
ReserveXRegister(AArch64::GPR64commonRegClass.getNumRegs()),
ReserveXRegisterForRA(AArch64::GPR64commonRegClass.getNumRegs()),
CustomCallSavedXRegs(AArch64::GPR64commonRegClass.getNumRegs()),
- IsLittle(LittleEndian), StreamingSVEMode(StreamingSVEMode),
- StreamingCompatibleSVEMode(StreamingCompatibleSVEMode),
+ IsLittle(LittleEndian), IsStreaming(IsStreaming),
+ IsStreamingCompatible(IsStreamingCompatible),
MinSVEVectorSizeInBits(MinSVEVectorSizeInBitsOverride),
MaxSVEVectorSizeInBits(MaxSVEVectorSizeInBitsOverride), TargetTriple(TT),
InstrInfo(initializeSubtargetDependencies(FS, CPU, TuneCPU, HasMinSize)),
@@ -547,20 +540,6 @@ void AArch64Subtarget::mirFileLoaded(MachineFunction &MF) const {
bool AArch64Subtarget::useAA() const { return UseAA; }
-bool AArch64Subtarget::isStreamingCompatible() const {
- return StreamingCompatibleSVEMode || ForceStreamingCompatibleSVE;
-}
-
-bool AArch64Subtarget::isNeonAvailable() const {
- return hasNEON() &&
- (hasSMEFA64() || (!isStreaming() && !isStreamingCompatible()));
-}
-
-bool AArch64Subtarget::isSVEAvailable() const {
- return hasSVE() &&
- (hasSMEFA64() || (!isStreaming() && !isStreamingCompatible()));
-}
-
// If return address signing is enabled, tail calls are emitted as follows:
//
// ```
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h
index 3f3eefc4f680..7ef7a89b5749 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -79,8 +79,8 @@ protected:
bool IsLittle;
- bool StreamingSVEMode;
- bool StreamingCompatibleSVEMode;
+ bool IsStreaming;
+ bool IsStreamingCompatible;
unsigned MinSVEVectorSizeInBits;
unsigned MaxSVEVectorSizeInBits;
unsigned VScaleForTuning = 2;
@@ -120,8 +120,7 @@ public:
StringRef FS, const TargetMachine &TM, bool LittleEndian,
unsigned MinSVEVectorSizeInBitsOverride = 0,
unsigned MaxSVEVectorSizeInBitsOverride = 0,
- bool StreamingSVEMode = false,
- bool StreamingCompatibleSVEMode = false,
+ bool IsStreaming = false, bool IsStreamingCompatible = false,
bool HasMinSize = false);
// Getters for SubtargetFeatures defined in tablegen
@@ -165,20 +164,26 @@ public:
bool isXRaySupported() const override { return true; }
/// Returns true if the function has a streaming body.
- bool isStreaming() const { return StreamingSVEMode; }
+ bool isStreaming() const { return IsStreaming; }
/// Returns true if the function has a streaming-compatible body.
- bool isStreamingCompatible() const;
+ bool isStreamingCompatible() const { return IsStreamingCompatible; }
/// Returns true if the target has NEON and the function at runtime is known
/// to have NEON enabled (e.g. the function is known not to be in streaming-SVE
/// mode, which disables NEON instructions).
- bool isNeonAvailable() const;
+ bool isNeonAvailable() const {
+ return hasNEON() &&
+ (hasSMEFA64() || (!isStreaming() && !isStreamingCompatible()));
+ }
/// Returns true if the target has SVE and can use the full range of SVE
/// instructions, for example because it knows the function is known not to be
/// in streaming-SVE mode or when the target has FEAT_FA64 enabled.
- bool isSVEAvailable() const;
+ bool isSVEAvailable() const {
+ return hasSVE() &&
+ (hasSMEFA64() || (!isStreaming() && !isStreamingCompatible()));
+ }
unsigned getMinVectorRegisterBitWidth() const {
// Don't assume any minimum vector size when PSTATE.SM may not be 0, because
diff --git a/llvm/lib/Target/AArch64/AArch64SystemOperands.td b/llvm/lib/Target/AArch64/AArch64SystemOperands.td
index 0564741c4970..0b5bc97674c7 100644
--- a/llvm/lib/Target/AArch64/AArch64SystemOperands.td
+++ b/llvm/lib/Target/AArch64/AArch64SystemOperands.td
@@ -1943,11 +1943,9 @@ def : RWSysReg<"PM", 0b11, 0b000, 0b0100, 0b0011, 0b001>;
// 2023 ISA Extension
// AArch64 Floating-point Mode Register controls behaviors of the FP8
// instructions (FEAT_FPMR)
-let Requires = [{ {AArch64::FeatureFPMR} }] in {
// Op0 Op1 CRn CRm Op2
def : ROSysReg<"ID_AA64FPFR0_EL1", 0b11, 0b000, 0b0000, 0b0100, 0b111>;
def : RWSysReg<"FPMR", 0b11, 0b011, 0b0100, 0b0100, 0b010>;
-}
// v9.5a Software Stepping Enhancements (FEAT_STEP2)
// Op0 Op1 CRn CRm Op2
diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index df802cf42526..945ab5cf1f30 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -187,6 +187,11 @@ static cl::opt<unsigned> SVEVectorBitsMinOpt(
"with zero meaning no minimum size is assumed."),
cl::init(0), cl::Hidden);
+static cl::opt<bool> ForceStreamingCompatible(
+ "force-streaming-compatible",
+ cl::desc("Force the use of streaming-compatible code for all functions"),
+ cl::init(false), cl::Hidden);
+
extern cl::opt<bool> EnableHomogeneousPrologEpilog;
static cl::opt<bool> EnableGISelLoadStoreOptPreLegal(
@@ -408,10 +413,11 @@ AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
StringRef FS = FSAttr.isValid() ? FSAttr.getValueAsString() : TargetFS;
bool HasMinSize = F.hasMinSize();
- bool StreamingSVEMode = F.hasFnAttribute("aarch64_pstate_sm_enabled") ||
- F.hasFnAttribute("aarch64_pstate_sm_body");
- bool StreamingCompatibleSVEMode =
- F.hasFnAttribute("aarch64_pstate_sm_compatible");
+ bool IsStreaming = F.hasFnAttribute("aarch64_pstate_sm_enabled") ||
+ F.hasFnAttribute("aarch64_pstate_sm_body");
+ bool IsStreamingCompatible =
+ F.hasFnAttribute("aarch64_pstate_sm_compatible") ||
+ ForceStreamingCompatible;
unsigned MinSVEVectorSize = 0;
unsigned MaxSVEVectorSize = 0;
@@ -439,10 +445,9 @@ AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
SmallString<512> Key;
raw_svector_ostream(Key) << "SVEMin" << MinSVEVectorSize << "SVEMax"
- << MaxSVEVectorSize
- << "StreamingSVEMode=" << StreamingSVEMode
- << "StreamingCompatibleSVEMode="
- << StreamingCompatibleSVEMode << CPU << TuneCPU << FS
+ << MaxSVEVectorSize << "IsStreaming=" << IsStreaming
+ << "IsStreamingCompatible=" << IsStreamingCompatible
+ << CPU << TuneCPU << FS
<< "HasMinSize=" << HasMinSize;
auto &I = SubtargetMap[Key];
@@ -453,12 +458,10 @@ AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
resetTargetOptions(F);
I = std::make_unique<AArch64Subtarget>(
TargetTriple, CPU, TuneCPU, FS, *this, isLittle, MinSVEVectorSize,
- MaxSVEVectorSize, StreamingSVEMode, StreamingCompatibleSVEMode,
- HasMinSize);
+ MaxSVEVectorSize, IsStreaming, IsStreamingCompatible, HasMinSize);
}
- assert((!StreamingSVEMode || I->hasSME()) &&
- "Expected SME to be available");
+ assert((!IsStreaming || I->hasSME()) && "Expected SME to be available");
return I.get();
}
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index c9bba9bf6314..13a68b7dcf98 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -3718,7 +3718,6 @@ static const struct Extension {
{"sb", {AArch64::FeatureSB}},
{"ssbs", {AArch64::FeatureSSBS}},
{"tme", {AArch64::FeatureTME}},
- {"fpmr", {AArch64::FeatureFPMR}},
{"fp8", {AArch64::FeatureFP8}},
{"faminmax", {AArch64::FeatureFAMINMAX}},
{"fp8fma", {AArch64::FeatureFP8FMA}},
@@ -3731,7 +3730,7 @@ static const struct Extension {
{"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
{"sme-f8f16", {AArch64::FeatureSMEF8F16}},
{"sme-f8f32", {AArch64::FeatureSMEF8F32}},
- {"sme-fa64", {AArch64::FeatureSMEFA64}},
+ {"sme-fa64", {AArch64::FeatureSMEFA64}},
{"cpa", {AArch64::FeatureCPA}},
{"tlbiw", {AArch64::FeatureTLBIW}},
};
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
index 0dd4a78f962d..6493a2ee4a93 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
@@ -430,6 +430,55 @@ public:
return false;
}
+ bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst,
+ APInt &Mask) const override {
+ const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
+ unsigned NumDefs = Desc.getNumDefs();
+ unsigned NumImplicitDefs = Desc.implicit_defs().size();
+ assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs &&
+ "Unexpected number of bits in the mask!");
+ // 32-bit General Purpose Register class.
+ const MCRegisterClass &GPR32RC = MRI.getRegClass(AArch64::GPR32RegClassID);
+ // Floating Point Register classes.
+ const MCRegisterClass &FPR8RC = MRI.getRegClass(AArch64::FPR8RegClassID);
+ const MCRegisterClass &FPR16RC = MRI.getRegClass(AArch64::FPR16RegClassID);
+ const MCRegisterClass &FPR32RC = MRI.getRegClass(AArch64::FPR32RegClassID);
+ const MCRegisterClass &FPR64RC = MRI.getRegClass(AArch64::FPR64RegClassID);
+ const MCRegisterClass &FPR128RC =
+ MRI.getRegClass(AArch64::FPR128RegClassID);
+
+ auto ClearsSuperReg = [=](unsigned RegID) {
+ // An update to the lower 32 bits of a 64 bit integer register is
+ // architecturally defined to zero extend the upper 32 bits on a write.
+ if (GPR32RC.contains(RegID))
+ return true;
+ // SIMD&FP instructions operating on scalar data only acccess the lower
+ // bits of a register, the upper bits are zero extended on a write. For
+ // SIMD vector registers smaller than 128-bits, the upper 64-bits of the
+ // register are zero extended on a write.
+ // When VL is higher than 128 bits, any write to a SIMD&FP register sets
+ // bits higher than 128 to zero.
+ return FPR8RC.contains(RegID) || FPR16RC.contains(RegID) ||
+ FPR32RC.contains(RegID) || FPR64RC.contains(RegID) ||
+ FPR128RC.contains(RegID);
+ };
+
+ Mask.clearAllBits();
+ for (unsigned I = 0, E = NumDefs; I < E; ++I) {
+ const MCOperand &Op = Inst.getOperand(I);
+ if (ClearsSuperReg(Op.getReg()))
+ Mask.setBit(I);
+ }
+
+ for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) {
+ const MCPhysReg Reg = Desc.implicit_defs()[I];
+ if (ClearsSuperReg(Reg))
+ Mask.setBit(NumDefs + I);
+ }
+
+ return Mask.getBoolValue();
+ }
+
std::vector<std::pair<uint64_t, uint64_t>>
findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
const Triple &TargetTriple) const override {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index b7388ed9e85a..cad4a3430327 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -19,7 +19,6 @@
#include "AMDGPU.h"
#include "AMDGPUHSAMetadataStreamer.h"
#include "AMDGPUResourceUsageAnalysis.h"
-#include "AMDKernelCodeT.h"
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUInstPrinter.h"
#include "MCTargetDesc/AMDGPUMCExpr.h"
@@ -29,6 +28,7 @@
#include "SIMachineFunctionInfo.h"
#include "TargetInfo/AMDGPUTargetInfo.h"
#include "Utils/AMDGPUBaseInfo.h"
+#include "Utils/AMDKernelCodeTUtils.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -205,8 +205,9 @@ void AMDGPUAsmPrinter::emitFunctionBodyStart() {
if (STM.isMesaKernel(F) &&
(F.getCallingConv() == CallingConv::AMDGPU_KERNEL ||
F.getCallingConv() == CallingConv::SPIR_KERNEL)) {
- amd_kernel_code_t KernelCode;
+ AMDGPUMCKernelCodeT KernelCode;
getAmdKernelCode(KernelCode, CurrentProgramInfo, *MF);
+ KernelCode.validate(&STM, MF->getContext());
getTargetStreamer()->EmitAMDKernelCodeT(KernelCode);
}
@@ -517,12 +518,9 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
DumpCodeInstEmitter = nullptr;
if (STM.dumpCode()) {
- // For -dumpcode, get the assembler out of the streamer, even if it does
- // not really want to let us have it. This only works with -filetype=obj.
- bool SaveFlag = OutStreamer->getUseAssemblerInfoForParsing();
- OutStreamer->setUseAssemblerInfoForParsing(true);
+ // For -dumpcode, get the assembler out of the streamer. This only works
+ // with -filetype=obj.
MCAssembler *Assembler = OutStreamer->getAssemblerPtr();
- OutStreamer->setUseAssemblerInfoForParsing(SaveFlag);
if (Assembler)
DumpCodeInstEmitter = Assembler->getEmitterPtr();
}
@@ -1320,7 +1318,7 @@ static amd_element_byte_size_t getElementByteSizeValue(unsigned Size) {
}
}
-void AMDGPUAsmPrinter::getAmdKernelCode(amd_kernel_code_t &Out,
+void AMDGPUAsmPrinter::getAmdKernelCode(AMDGPUMCKernelCodeT &Out,
const SIProgramInfo &CurrentProgramInfo,
const MachineFunction &MF) const {
const Function &F = MF.getFunction();
@@ -1331,24 +1329,22 @@ void AMDGPUAsmPrinter::getAmdKernelCode(amd_kernel_code_t &Out,
const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
MCContext &Ctx = MF.getContext();
- AMDGPU::initDefaultAMDKernelCodeT(Out, &STM);
+ Out.initDefault(&STM, Ctx, /*InitMCExpr=*/false);
- Out.compute_pgm_resource_registers =
- CurrentProgramInfo.getComputePGMRSrc1(STM) |
- (CurrentProgramInfo.getComputePGMRSrc2() << 32);
+ Out.compute_pgm_resource1_registers =
+ CurrentProgramInfo.getComputePGMRSrc1(STM, Ctx);
+ Out.compute_pgm_resource2_registers =
+ CurrentProgramInfo.getComputePGMRSrc2(Ctx);
Out.code_properties |= AMD_CODE_PROPERTY_IS_PTR64;
- if (getMCExprValue(CurrentProgramInfo.DynamicCallStack, Ctx))
- Out.code_properties |= AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK;
+ Out.is_dynamic_callstack = CurrentProgramInfo.DynamicCallStack;
- AMD_HSA_BITS_SET(Out.code_properties,
- AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE,
+ AMD_HSA_BITS_SET(Out.code_properties, AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE,
getElementByteSizeValue(STM.getMaxPrivateElementSize(true)));
const GCNUserSGPRUsageInfo &UserSGPRInfo = MFI->getUserSGPRInfo();
if (UserSGPRInfo.hasPrivateSegmentBuffer()) {
- Out.code_properties |=
- AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER;
+ Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER;
}
if (UserSGPRInfo.hasDispatchPtr())
@@ -1374,10 +1370,9 @@ void AMDGPUAsmPrinter::getAmdKernelCode(amd_kernel_code_t &Out,
Align MaxKernArgAlign;
Out.kernarg_segment_byte_size = STM.getKernArgSegmentSize(F, MaxKernArgAlign);
- Out.wavefront_sgpr_count = getMCExprValue(CurrentProgramInfo.NumSGPR, Ctx);
- Out.workitem_vgpr_count = getMCExprValue(CurrentProgramInfo.NumVGPR, Ctx);
- Out.workitem_private_segment_byte_size =
- getMCExprValue(CurrentProgramInfo.ScratchSize, Ctx);
+ Out.wavefront_sgpr_count = CurrentProgramInfo.NumSGPR;
+ Out.workitem_vgpr_count = CurrentProgramInfo.NumVGPR;
+ Out.workitem_private_segment_byte_size = CurrentProgramInfo.ScratchSize;
Out.workgroup_group_segment_byte_size = CurrentProgramInfo.LDSSize;
// kernarg_segment_alignment is specified as log of the alignment.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
index 16d8952a533e..87156f27fc6c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
@@ -17,8 +17,6 @@
#include "SIProgramInfo.h"
#include "llvm/CodeGen/AsmPrinter.h"
-struct amd_kernel_code_t;
-
namespace llvm {
class AMDGPUMachineFunction;
@@ -29,6 +27,7 @@ class MCOperand;
namespace AMDGPU {
struct MCKernelDescriptor;
+struct AMDGPUMCKernelCodeT;
namespace HSAMD {
class MetadataStreamer;
}
@@ -50,7 +49,8 @@ private:
uint64_t getFunctionCodeSize(const MachineFunction &MF) const;
void getSIProgramInfo(SIProgramInfo &Out, const MachineFunction &MF);
- void getAmdKernelCode(amd_kernel_code_t &Out, const SIProgramInfo &KernelInfo,
+ void getAmdKernelCode(AMDGPU::AMDGPUMCKernelCodeT &Out,
+ const SIProgramInfo &KernelInfo,
const MachineFunction &MF) const;
/// Emit register usage information so that the GPU driver
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPassBuilder.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPassBuilder.cpp
new file mode 100644
index 000000000000..01ab61a0e407
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPassBuilder.cpp
@@ -0,0 +1,38 @@
+//===- lib/Target/AMDGPU/AMDGPUCodeGenPassBuilder.cpp ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUCodeGenPassBuilder.h"
+#include "AMDGPUTargetMachine.h"
+
+using namespace llvm;
+
+AMDGPUCodeGenPassBuilder::AMDGPUCodeGenPassBuilder(
+ AMDGPUTargetMachine &TM, const CGPassBuilderOption &Opts,
+ PassInstrumentationCallbacks *PIC)
+ : CodeGenPassBuilder(TM, Opts, PIC) {
+ Opt.RequiresCodeGenSCCOrder = true;
+ // Exceptions and StackMaps are not supported, so these passes will never do
+ // anything.
+ // Garbage collection is not supported.
+ disablePass<StackMapLivenessPass, FuncletLayoutPass,
+ ShadowStackGCLoweringPass>();
+}
+
+void AMDGPUCodeGenPassBuilder::addPreISel(AddIRPass &addPass) const {
+ // TODO: Add passes pre instruction selection.
+}
+
+void AMDGPUCodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
+ CreateMCStreamer) const {
+ // TODO: Add AsmPrinter.
+}
+
+Error AMDGPUCodeGenPassBuilder::addInstSelector(AddMachinePass &) const {
+ // TODO: Add instruction selector.
+ return Error::success();
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPassBuilder.h b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPassBuilder.h
new file mode 100644
index 000000000000..5f79e309703a
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPassBuilder.h
@@ -0,0 +1,33 @@
+//===- lib/Target/AMDGPU/AMDGPUCodeGenPassBuilder.h -----------*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUCODEGENPASSBUILDER_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPUCODEGENPASSBUILDER_H
+
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Passes/CodeGenPassBuilder.h"
+
+namespace llvm {
+
+class AMDGPUTargetMachine;
+
+class AMDGPUCodeGenPassBuilder
+ : public CodeGenPassBuilder<AMDGPUCodeGenPassBuilder, AMDGPUTargetMachine> {
+public:
+ AMDGPUCodeGenPassBuilder(AMDGPUTargetMachine &TM,
+ const CGPassBuilderOption &Opts,
+ PassInstrumentationCallbacks *PIC);
+
+ void addPreISel(AddIRPass &addPass) const;
+ void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const;
+ Error addInstSelector(AddMachinePass &) const;
+};
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_AMDGPU_AMDGPUCODEGENPASSBUILDER_H
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index 152f495a452b..231db188e65d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -250,6 +250,11 @@ def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_USHORT, SIbuffer_load_ushort>;
def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_UBYTE, SIbuffer_load_ubyte>;
def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SSHORT, SIbuffer_load_short>;
def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SBYTE, SIbuffer_load_byte>;
+def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_TFE, SIbuffer_load_tfe>;
+def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_USHORT_TFE, SIbuffer_load_ushort_tfe>;
+def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_UBYTE_TFE, SIbuffer_load_ubyte_tfe>;
+def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SSHORT_TFE, SIbuffer_load_short_tfe>;
+def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SBYTE_TFE, SIbuffer_load_byte_tfe>;
def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT, SIbuffer_load_format>;
def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT_TFE, SIbuffer_load_format_tfe>;
def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT_D16, SIbuffer_load_format_d16>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index c11c7a57e059..e35957338da7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -2526,6 +2526,14 @@ void AMDGPUDAGToDAGISel::SelectDSBvhStackIntrinsic(SDNode *N) {
CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
}
+void AMDGPUDAGToDAGISel::SelectPOPSExitingWaveID(SDNode *N) {
+ // TODO: Select this with a tablegen pattern. This is tricky because the
+ // intrinsic is IntrReadMem/IntrWriteMem but the instruction is not marked
+ // mayLoad/mayStore and tablegen complains about the mismatch.
+ SDValue Reg = CurDAG->getRegister(AMDGPU::SRC_POPS_EXITING_WAVE_ID, MVT::i32);
+ CurDAG->SelectNodeTo(N, AMDGPU::S_MOV_B32, N->getVTList(), Reg);
+}
+
static unsigned gwsIntrinToOpcode(unsigned IntrID) {
switch (IntrID) {
case Intrinsic::amdgcn_ds_gws_init:
@@ -2682,6 +2690,9 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
case Intrinsic::amdgcn_ds_bvh_stack_rtn:
SelectDSBvhStackIntrinsic(N);
return;
+ case Intrinsic::amdgcn_pops_exiting_wave_id:
+ SelectPOPSExitingWaveID(N);
+ return;
}
SelectCode(N);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
index f987b747c0e2..53d25b4cf4ca 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
@@ -274,6 +274,7 @@ private:
void SelectFP_EXTEND(SDNode *N);
void SelectDSAppendConsume(SDNode *N, unsigned IntrID);
void SelectDSBvhStackIntrinsic(SDNode *N);
+ void SelectPOPSExitingWaveID(SDNode *N);
void SelectDS_GWS(SDNode *N, unsigned IntrID);
void SelectInterpP1F16(SDNode *N);
void SelectINTRINSIC_W_CHAIN(SDNode *N);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d35a022ad680..375643b7f519 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -3117,20 +3117,30 @@ static bool isCttzOpc(unsigned Opc) {
SDValue AMDGPUTargetLowering::lowerCTLZResults(SDValue Op,
SelectionDAG &DAG) const {
auto SL = SDLoc(Op);
+ auto Opc = Op.getOpcode();
auto Arg = Op.getOperand(0u);
auto ResultVT = Op.getValueType();
if (ResultVT != MVT::i8 && ResultVT != MVT::i16)
return {};
- assert(isCtlzOpc(Op.getOpcode()));
+ assert(isCtlzOpc(Opc));
assert(ResultVT == Arg.getValueType());
- auto const LeadingZeroes = 32u - ResultVT.getFixedSizeInBits();
- auto SubVal = DAG.getConstant(LeadingZeroes, SL, MVT::i32);
- auto NewOp = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Arg);
- NewOp = DAG.getNode(Op.getOpcode(), SL, MVT::i32, NewOp);
- NewOp = DAG.getNode(ISD::SUB, SL, MVT::i32, NewOp, SubVal);
+ const uint64_t NumBits = ResultVT.getFixedSizeInBits();
+ SDValue NumExtBits = DAG.getConstant(32u - NumBits, SL, MVT::i32);
+ SDValue NewOp;
+
+ if (Opc == ISD::CTLZ_ZERO_UNDEF) {
+ NewOp = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Arg);
+ NewOp = DAG.getNode(ISD::SHL, SL, MVT::i32, NewOp, NumExtBits);
+ NewOp = DAG.getNode(Opc, SL, MVT::i32, NewOp);
+ } else {
+ NewOp = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Arg);
+ NewOp = DAG.getNode(Opc, SL, MVT::i32, NewOp);
+ NewOp = DAG.getNode(ISD::SUB, SL, MVT::i32, NewOp, NumExtBits);
+ }
+
return DAG.getNode(ISD::TRUNCATE, SL, ResultVT, NewOp);
}
@@ -5519,6 +5529,11 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(BUFFER_LOAD_USHORT)
NODE_NAME_CASE(BUFFER_LOAD_BYTE)
NODE_NAME_CASE(BUFFER_LOAD_SHORT)
+ NODE_NAME_CASE(BUFFER_LOAD_TFE)
+ NODE_NAME_CASE(BUFFER_LOAD_UBYTE_TFE)
+ NODE_NAME_CASE(BUFFER_LOAD_USHORT_TFE)
+ NODE_NAME_CASE(BUFFER_LOAD_BYTE_TFE)
+ NODE_NAME_CASE(BUFFER_LOAD_SHORT_TFE)
NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
NODE_NAME_CASE(BUFFER_LOAD_FORMAT_TFE)
NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index 3814b56a4d56..71c4334029b4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -582,6 +582,11 @@ enum NodeType : unsigned {
BUFFER_LOAD_USHORT,
BUFFER_LOAD_BYTE,
BUFFER_LOAD_SHORT,
+ BUFFER_LOAD_TFE,
+ BUFFER_LOAD_UBYTE_TFE,
+ BUFFER_LOAD_USHORT_TFE,
+ BUFFER_LOAD_BYTE_TFE,
+ BUFFER_LOAD_SHORT_TFE,
BUFFER_LOAD_FORMAT,
BUFFER_LOAD_FORMAT_TFE,
BUFFER_LOAD_FORMAT_D16,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index b48a09489653..04d9bb5cb18a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -2079,6 +2079,21 @@ bool AMDGPUInstructionSelector::selectDSBvhStackIntrinsic(
return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
}
+bool AMDGPUInstructionSelector::selectPOPSExitingWaveID(
+ MachineInstr &MI) const {
+ Register Dst = MI.getOperand(0).getReg();
+ const DebugLoc &DL = MI.getDebugLoc();
+ MachineBasicBlock *MBB = MI.getParent();
+
+ // TODO: Select this with a tablegen pattern. This is tricky because the
+ // intrinsic is IntrReadMem/IntrWriteMem but the instruction is not marked
+ // mayLoad/mayStore and tablegen complains about the mismatch.
+ auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
+ .addReg(AMDGPU::SRC_POPS_EXITING_WAVE_ID);
+ MI.eraseFromParent();
+ return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
+}
+
bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
MachineInstr &I) const {
unsigned IntrinsicID = cast<GIntrinsic>(I).getIntrinsicID();
@@ -2129,6 +2144,8 @@ bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
return selectSBarrierSignalIsfirst(I, IntrinsicID);
case Intrinsic::amdgcn_s_barrier_leave:
return selectSBarrierLeave(I);
+ case Intrinsic::amdgcn_pops_exiting_wave_id:
+ return selectPOPSExitingWaveID(I);
}
return selectImpl(I, *CoverageInfo);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index f561d5d29efc..48f3b1811801 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -125,6 +125,7 @@ private:
bool selectDSAppendConsume(MachineInstr &MI, bool IsAppend) const;
bool selectSBarrier(MachineInstr &MI) const;
bool selectDSBvhStackIntrinsic(MachineInstr &MI) const;
+ bool selectPOPSExitingWaveID(MachineInstr &MI) const;
bool selectImageIntrinsic(MachineInstr &MI,
const AMDGPU::ImageDimIntrinsicInfo *Intr) const;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index 86f77f7b64e8..fa7492ac6cbe 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -168,30 +168,20 @@ def brtarget : Operand<OtherVT>;
class HasOneUseUnaryOp<SDPatternOperator op> : PatFrag<
(ops node:$src0),
- (op $src0),
- [{ return N->hasOneUse(); }]> {
-
- let GISelPredicateCode = [{
- return MRI.hasOneNonDBGUse(MI.getOperand(0).getReg());
- }];
+ (op $src0)> {
+ let HasOneUse = 1;
}
class HasOneUseBinOp<SDPatternOperator op> : PatFrag<
(ops node:$src0, node:$src1),
- (op $src0, $src1),
- [{ return N->hasOneUse(); }]> {
- let GISelPredicateCode = [{
- return MRI.hasOneNonDBGUse(MI.getOperand(0).getReg());
- }];
+ (op $src0, $src1)> {
+ let HasOneUse = 1;
}
class HasOneUseTernaryOp<SDPatternOperator op> : PatFrag<
(ops node:$src0, node:$src1, node:$src2),
- (op $src0, $src1, $src2),
- [{ return N->hasOneUse(); }]> {
- let GISelPredicateCode = [{
- return MRI.hasOneNonDBGUse(MI.getOperand(0).getReg());
- }];
+ (op $src0, $src1, $src2)> {
+ let HasOneUse = 1;
}
class is_canonicalized_1<SDPatternOperator op> : PatFrag<
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index bd7bf78c4c0b..ee7fb20c23aa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1270,13 +1270,22 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.custom();
// The 64-bit versions produce 32-bit results, but only on the SALU.
- getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF})
- .legalFor({{S32, S32}, {S32, S64}})
- .clampScalar(0, S32, S32)
- .clampScalar(1, S32, S64)
- .scalarize(0)
- .widenScalarToNextPow2(0, 32)
- .widenScalarToNextPow2(1, 32);
+ getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
+ .legalFor({{S32, S32}, {S32, S64}})
+ .customIf(scalarNarrowerThan(1, 32))
+ .clampScalar(0, S32, S32)
+ .clampScalar(1, S32, S64)
+ .scalarize(0)
+ .widenScalarToNextPow2(0, 32)
+ .widenScalarToNextPow2(1, 32);
+
+ getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF)
+ .legalFor({{S32, S32}, {S32, S64}})
+ .clampScalar(0, S32, S32)
+ .clampScalar(1, S32, S64)
+ .scalarize(0)
+ .widenScalarToNextPow2(0, 32)
+ .widenScalarToNextPow2(1, 32);
// S64 is only legal on SALU, and needs to be broken into 32-bit elements in
// RegBankSelect.
@@ -2128,6 +2137,8 @@ bool AMDGPULegalizerInfo::legalizeCustom(
case TargetOpcode::G_CTLZ:
case TargetOpcode::G_CTTZ:
return legalizeCTLZ_CTTZ(MI, MRI, B);
+ case TargetOpcode::G_CTLZ_ZERO_UNDEF:
+ return legalizeCTLZ_ZERO_UNDEF(MI, MRI, B);
case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
return legalizeFPTruncRound(MI, B);
case TargetOpcode::G_STACKSAVE:
@@ -4145,6 +4156,25 @@ bool AMDGPULegalizerInfo::legalizeCTLZ_CTTZ(MachineInstr &MI,
return true;
}
+bool AMDGPULegalizerInfo::legalizeCTLZ_ZERO_UNDEF(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+ LLT SrcTy = MRI.getType(Src);
+ TypeSize NumBits = SrcTy.getSizeInBits();
+
+ assert(NumBits < 32u);
+
+ auto ShiftAmt = B.buildConstant(S32, 32u - NumBits);
+ auto Extend = B.buildAnyExt(S32, {Src}).getReg(0u);
+ auto Shift = B.buildShl(S32, Extend, ShiftAmt);
+ auto Ctlz = B.buildInstr(AMDGPU::G_AMDGPU_FFBH_U32, {S32}, {Shift});
+ B.buildTrunc(Dst, Ctlz);
+ MI.eraseFromParent();
+ return true;
+}
+
// Check that this is a G_XOR x, -1
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI) {
if (MI.getOpcode() != TargetOpcode::G_XOR)
@@ -5840,17 +5870,18 @@ bool AMDGPULegalizerInfo::legalizeBufferLoad(MachineInstr &MI,
: AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT;
}
} else {
- if (IsTFE)
- return false;
switch (MemTy.getSizeInBits()) {
case 8:
- Opc = AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE;
+ Opc = IsTFE ? AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE_TFE
+ : AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE;
break;
case 16:
- Opc = AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT;
+ Opc = IsTFE ? AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT_TFE
+ : AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT;
break;
default:
- Opc = AMDGPU::G_AMDGPU_BUFFER_LOAD;
+ Opc = IsTFE ? AMDGPU::G_AMDGPU_BUFFER_LOAD_TFE
+ : AMDGPU::G_AMDGPU_BUFFER_LOAD;
break;
}
}
@@ -5862,7 +5893,11 @@ bool AMDGPULegalizerInfo::legalizeBufferLoad(MachineInstr &MI,
Register LoadDstReg = B.getMRI()->createGenericVirtualRegister(LoadTy);
buildBufferLoad(Opc, LoadDstReg, RSrc, VIndex, VOffset, SOffset, ImmOffset,
Format, AuxiliaryData, MMO, IsTyped, HasVIndex, B);
- if (NumValueDWords == 1) {
+ if (MemTy.getSizeInBits() < 32) {
+ Register ExtDst = B.getMRI()->createGenericVirtualRegister(S32);
+ B.buildUnmerge({ExtDst, StatusDst}, LoadDstReg);
+ B.buildTrunc(Dst, ExtDst);
+ } else if (NumValueDWords == 1) {
B.buildUnmerge({Dst, StatusDst}, LoadDstReg);
} else {
SmallVector<Register, 5> LoadElts;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index e5ba84a74a0f..4b1d821dadc2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -108,6 +108,8 @@ public:
bool legalizeMul(LegalizerHelper &Helper, MachineInstr &MI) const;
bool legalizeCTLZ_CTTZ(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const;
+ bool legalizeCTLZ_ZERO_UNDEF(MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const;
bool loadInputValue(Register DstReg, MachineIRBuilder &B,
const ArgDescriptor *Arg,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index faf04c3c7e70..c515138d95a2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -22,6 +22,7 @@
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/InitializePasses.h"
#include <cmath>
@@ -1156,35 +1157,49 @@ bool AMDGPULibCalls::fold_pow(FPMathOperator *FPOp, IRBuilder<> &B,
bool AMDGPULibCalls::fold_rootn(FPMathOperator *FPOp, IRBuilder<> &B,
const FuncInfo &FInfo) {
- // skip vector function
- if (getVecSize(FInfo) != 1)
- return false;
-
Value *opr0 = FPOp->getOperand(0);
Value *opr1 = FPOp->getOperand(1);
- ConstantInt *CINT = dyn_cast<ConstantInt>(opr1);
- if (!CINT) {
+ const APInt *CINT = nullptr;
+ if (!match(opr1, m_APIntAllowPoison(CINT)))
return false;
- }
+
+ Function *Parent = B.GetInsertBlock()->getParent();
+
int ci_opr1 = (int)CINT->getSExtValue();
- if (ci_opr1 == 1) { // rootn(x, 1) = x
- LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> " << *opr0 << "\n");
+ if (ci_opr1 == 1 && !Parent->hasFnAttribute(Attribute::StrictFP)) {
+ // rootn(x, 1) = x
+ //
+ // TODO: Insert constrained canonicalize for strictfp case.
+ LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> " << *opr0 << '\n');
replaceCall(FPOp, opr0);
return true;
}
Module *M = B.GetInsertBlock()->getModule();
- if (ci_opr1 == 2) { // rootn(x, 2) = sqrt(x)
- if (FunctionCallee FPExpr =
- getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_SQRT, FInfo))) {
- LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> sqrt(" << *opr0
- << ")\n");
- Value *nval = CreateCallEx(B,FPExpr, opr0, "__rootn2sqrt");
- replaceCall(FPOp, nval);
- return true;
- }
- } else if (ci_opr1 == 3) { // rootn(x, 3) = cbrt(x)
+
+ CallInst *CI = cast<CallInst>(FPOp);
+ if (ci_opr1 == 2 &&
+ shouldReplaceLibcallWithIntrinsic(CI,
+ /*AllowMinSizeF32=*/true,
+ /*AllowF64=*/true)) {
+ // rootn(x, 2) = sqrt(x)
+ LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> sqrt(" << *opr0 << ")\n");
+
+ CallInst *NewCall = B.CreateUnaryIntrinsic(Intrinsic::sqrt, opr0, CI);
+ NewCall->takeName(CI);
+
+ // OpenCL rootn has a looser ulp of 2 requirement than sqrt, so add some
+ // metadata.
+ MDBuilder MDHelper(M->getContext());
+ MDNode *FPMD = MDHelper.createFPMath(std::max(FPOp->getFPAccuracy(), 2.0f));
+ NewCall->setMetadata(LLVMContext::MD_fpmath, FPMD);
+
+ replaceCall(CI, NewCall);
+ return true;
+ }
+
+ if (ci_opr1 == 3) { // rootn(x, 3) = cbrt(x)
if (FunctionCallee FPExpr =
getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_CBRT, FInfo))) {
LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> cbrt(" << *opr0
@@ -1200,16 +1215,36 @@ bool AMDGPULibCalls::fold_rootn(FPMathOperator *FPOp, IRBuilder<> &B,
"__rootn2div");
replaceCall(FPOp, nval);
return true;
- } else if (ci_opr1 == -2) { // rootn(x, -2) = rsqrt(x)
- if (FunctionCallee FPExpr =
- getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_RSQRT, FInfo))) {
- LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> rsqrt(" << *opr0
- << ")\n");
- Value *nval = CreateCallEx(B,FPExpr, opr0, "__rootn2rsqrt");
- replaceCall(FPOp, nval);
- return true;
- }
}
+
+ if (ci_opr1 == -2 &&
+ shouldReplaceLibcallWithIntrinsic(CI,
+ /*AllowMinSizeF32=*/true,
+ /*AllowF64=*/true)) {
+ // rootn(x, -2) = rsqrt(x)
+
+ // The original rootn had looser ulp requirements than the resultant sqrt
+ // and fdiv.
+ MDBuilder MDHelper(M->getContext());
+ MDNode *FPMD = MDHelper.createFPMath(std::max(FPOp->getFPAccuracy(), 2.0f));
+
+ // TODO: Could handle strictfp but need to fix strict sqrt emission
+ FastMathFlags FMF = FPOp->getFastMathFlags();
+ FMF.setAllowContract(true);
+
+ CallInst *Sqrt = B.CreateUnaryIntrinsic(Intrinsic::sqrt, opr0, CI);
+ Instruction *RSqrt = cast<Instruction>(
+ B.CreateFDiv(ConstantFP::get(opr0->getType(), 1.0), Sqrt));
+ Sqrt->setFastMathFlags(FMF);
+ RSqrt->setFastMathFlags(FMF);
+ RSqrt->setMetadata(LLVMContext::MD_fpmath, FPMD);
+
+ LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> rsqrt(" << *opr0
+ << ")\n");
+ replaceCall(CI, RSqrt);
+ return true;
+ }
+
return false;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index 1114a8c40114..f878bd9465d3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -801,7 +801,7 @@ Value *FatPtrConstMaterializer::materialize(Value *V) {
Ops.push_back(cast<Constant>(U.get()));
auto *NewGEP = ConstantExpr::getGetElementPtr(
NewSrcTy, Ops[0], ArrayRef<Constant *>(Ops).slice(1),
- GEPO->isInBounds(), GEPO->getInRange());
+ GEPO->getNoWrapFlags(), GEPO->getInRange());
LLVM_DEBUG(dbgs() << "p7-getting GEP: " << *GEPO << " becomes " << *NewGEP
<< "\n");
Value *FurtherMap = materialize(NewGEP);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index 2c7163a77537..625ac0230f16 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -862,48 +862,6 @@ public:
return N;
}
- /// Strip "amdgpu-no-lds-kernel-id" from any functions where we may have
- /// introduced its use. If AMDGPUAttributor ran prior to the pass, we inferred
- /// the lack of llvm.amdgcn.lds.kernel.id calls.
- void removeNoLdsKernelIdFromReachable(CallGraph &CG, Function *KernelRoot) {
- KernelRoot->removeFnAttr("amdgpu-no-lds-kernel-id");
-
- SmallVector<Function *> WorkList({CG[KernelRoot]->getFunction()});
- SmallPtrSet<Function *, 8> Visited;
- bool SeenUnknownCall = false;
-
- while (!WorkList.empty()) {
- Function *F = WorkList.pop_back_val();
-
- for (auto &CallRecord : *CG[F]) {
- if (!CallRecord.second)
- continue;
-
- Function *Callee = CallRecord.second->getFunction();
- if (!Callee) {
- if (!SeenUnknownCall) {
- SeenUnknownCall = true;
-
- // If we see any indirect calls, assume nothing about potential
- // targets.
- // TODO: This could be refined to possible LDS global users.
- for (auto &ExternalCallRecord : *CG.getExternalCallingNode()) {
- Function *PotentialCallee =
- ExternalCallRecord.second->getFunction();
- assert(PotentialCallee);
- if (!isKernelLDS(PotentialCallee))
- PotentialCallee->removeFnAttr("amdgpu-no-lds-kernel-id");
- }
- }
- } else {
- Callee->removeFnAttr("amdgpu-no-lds-kernel-id");
- if (Visited.insert(Callee).second)
- WorkList.push_back(Callee);
- }
- }
- }
- }
-
DenseMap<Function *, GlobalVariable *> lowerDynamicLDSVariables(
Module &M, LDSUsesInfoTy &LDSUsesInfo,
DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS,
@@ -1059,7 +1017,7 @@ public:
//
// TODO: We could filter out subgraphs that do not access LDS globals.
for (Function *F : KernelsThatAllocateTableLDS)
- removeNoLdsKernelIdFromReachable(CG, F);
+ removeFnAttrFromReachable(CG, F, "amdgpu-no-lds-kernel-id");
}
DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS =
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 56345d14a331..7ebd674757fb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3041,6 +3041,11 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
+ case AMDGPU::G_AMDGPU_BUFFER_LOAD_TFE:
+ case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT_TFE:
+ case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT_TFE:
+ case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE_TFE:
+ case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE_TFE:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT_TFE:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT_D16:
@@ -4323,6 +4328,11 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
+ case AMDGPU::G_AMDGPU_BUFFER_LOAD_TFE:
+ case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE_TFE:
+ case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE_TFE:
+ case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT_TFE:
+ case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT_TFE:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT_TFE:
case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT_D16:
@@ -5132,6 +5142,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
break;
}
+ case Intrinsic::amdgcn_pops_exiting_wave_id:
+ return getDefaultMappingSOP(MI);
default:
return getInvalidInstructionMapping();
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSplitModule.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSplitModule.cpp
new file mode 100644
index 000000000000..dab773f28752
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSplitModule.cpp
@@ -0,0 +1,744 @@
+//===- AMDGPUSplitModule.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Implements a module splitting algorithm designed to support the
+/// FullLTO --lto-partitions option for parallel codegen. This is completely
+/// different from the common SplitModule pass, as this system is designed with
+/// AMDGPU in mind.
+///
+/// The basic idea of this module splitting implementation is the same as
+/// SplitModule: load-balance the module's functions across a set of N
+/// partitions to allow parallel codegen. However, it does it very
+/// differently than the target-agnostic variant:
+/// - Kernels are used as the module's "roots".
+/// They're known entry points on AMDGPU, and everything else is often
+/// internal only.
+/// - Each kernel has a set of dependencies, and when a kernel and its
+/// dependencies is considered "big", we try to put it in a partition where
+/// most dependencies are already imported, to avoid duplicating large
+/// amounts of code.
+/// - There's special care for indirect calls in order to ensure
+/// AMDGPUResourceUsageAnalysis can work correctly.
+///
+/// This file also includes a more elaborate logging system to enable
+/// users to easily generate logs that (if desired) do not include any value
+/// names, in order to not leak information about the source file.
+/// Such logs are very helpful to understand and fix potential issues with
+/// module splitting.
+
+#include "AMDGPUSplitModule.h"
+#include "AMDGPUTargetMachine.h"
+#include "Utils/AMDGPUBaseInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/SHA256.h"
+#include "llvm/Support/Threading.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <memory>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "amdgpu-split-module"
+
+namespace {
+
+static cl::opt<float> LargeKernelFactor(
+ "amdgpu-module-splitting-large-kernel-threshold", cl::init(2.0f),
+ cl::Hidden,
+ cl::desc(
+ "consider a kernel as large and needing special treatment when it "
+ "exceeds the average cost of a partition by this factor; e;g. 2.0 "
+ "means if the kernel and its dependencies is 2 times bigger than "
+ "an average partition; 0 disables large kernels handling entirely"));
+
+static cl::opt<float> LargeKernelOverlapForMerge(
+ "amdgpu-module-splitting-large-kernel-merge-overlap", cl::init(0.8f),
+ cl::Hidden,
+ cl::desc("defines how much overlap between two large kernel's dependencies "
+ "is needed to put them in the same partition"));
+
+static cl::opt<bool> NoExternalizeGlobals(
+ "amdgpu-module-splitting-no-externalize-globals", cl::Hidden,
+ cl::desc("disables externalization of global variable with local linkage; "
+ "may cause globals to be duplicated which increases binary size"));
+
+static cl::opt<std::string>
+ LogDirOpt("amdgpu-module-splitting-log-dir", cl::Hidden,
+ cl::desc("output directory for AMDGPU module splitting logs"));
+
+static cl::opt<bool>
+ LogPrivate("amdgpu-module-splitting-log-private", cl::Hidden,
+ cl::desc("hash value names before printing them in the AMDGPU "
+ "module splitting logs"));
+
+using CostType = InstructionCost::CostType;
+using PartitionID = unsigned;
+
+static bool isEntryPoint(const Function *F) {
+ return AMDGPU::isEntryFunctionCC(F->getCallingConv());
+}
+
+static std::string getName(const Value &V) {
+ static bool HideNames;
+
+ static llvm::once_flag HideNameInitFlag;
+ llvm::call_once(HideNameInitFlag, [&]() {
+ if (LogPrivate.getNumOccurrences())
+ HideNames = LogPrivate;
+ else {
+ const auto EV = sys::Process::GetEnv("AMD_SPLIT_MODULE_LOG_PRIVATE");
+ HideNames = (EV.value_or("0") != "0");
+ }
+ });
+
+ if (!HideNames)
+ return V.getName().str();
+ return toHex(SHA256::hash(arrayRefFromStringRef(V.getName())),
+ /*LowerCase=*/true);
+}
+
+/// Main logging helper.
+///
+/// Logging can be configured by the following environment variable.
+/// AMD_SPLIT_MODULE_LOG_DIR=<filepath>
+/// If set, uses <filepath> as the directory to write logfiles to
+/// each time module splitting is used.
+/// AMD_SPLIT_MODULE_LOG_PRIVATE
+/// If set to anything other than zero, all names are hidden.
+///
+/// Both environment variables have corresponding CL options which
+/// takes priority over them.
+///
+/// Any output printed to the log files is also printed to dbgs() when -debug is
+/// used and LLVM_DEBUG is defined.
+///
+/// This approach has a small disadvantage over LLVM_DEBUG though: logging logic
+/// cannot be removed from the code (by building without debug). This probably
+/// has a small performance cost because if some computation/formatting is
+/// needed for logging purpose, it may be done everytime only to be ignored
+/// by the logger.
+///
+/// As this pass only runs once and is not doing anything computationally
+/// expensive, this is likely a reasonable trade-off.
+///
+/// If some computation should really be avoided when unused, users of the class
+/// can check whether any logging will occur by using the bool operator.
+///
+/// \code
+/// if (SML) {
+/// // Executes only if logging to a file or if -debug is available and
+/// used.
+/// }
+/// \endcode
+class SplitModuleLogger {
+public:
+ SplitModuleLogger(const Module &M) {
+ std::string LogDir = LogDirOpt;
+ if (LogDir.empty())
+ LogDir = sys::Process::GetEnv("AMD_SPLIT_MODULE_LOG_DIR").value_or("");
+
+ // No log dir specified means we don't need to log to a file.
+ // We may still log to dbgs(), though.
+ if (LogDir.empty())
+ return;
+
+ // If a log directory is specified, create a new file with a unique name in
+ // that directory.
+ int Fd;
+ SmallString<0> PathTemplate;
+ SmallString<0> RealPath;
+ sys::path::append(PathTemplate, LogDir, "Module-%%-%%-%%-%%-%%-%%-%%.txt");
+ if (auto Err =
+ sys::fs::createUniqueFile(PathTemplate.str(), Fd, RealPath)) {
+ report_fatal_error("Failed to create log file at '" + Twine(LogDir) +
+ "': " + Err.message(),
+ /*CrashDiag=*/false);
+ }
+
+ FileOS = std::make_unique<raw_fd_ostream>(Fd, /*shouldClose=*/true);
+ }
+
+ bool hasLogFile() const { return FileOS != nullptr; }
+
+ raw_ostream &logfile() {
+ assert(FileOS && "no logfile!");
+ return *FileOS;
+ }
+
+ /// \returns true if this SML will log anything either to a file or dbgs().
+ /// Can be used to avoid expensive computations that are ignored when logging
+ /// is disabled.
+ operator bool() const {
+ return hasLogFile() || (DebugFlag && isCurrentDebugType(DEBUG_TYPE));
+ }
+
+private:
+ std::unique_ptr<raw_fd_ostream> FileOS;
+};
+
+template <typename Ty>
+static SplitModuleLogger &operator<<(SplitModuleLogger &SML, const Ty &Val) {
+ static_assert(
+ !std::is_same_v<Ty, Value>,
+ "do not print values to logs directly, use handleName instead!");
+ LLVM_DEBUG(dbgs() << Val);
+ if (SML.hasLogFile())
+ SML.logfile() << Val;
+ return SML;
+}
+
+/// Calculate the cost of each function in \p M
+/// \param SML Log Helper
+/// \param TM TargetMachine instance used to retrieve TargetTransformInfo.
+/// \param M Module to analyze.
+/// \param CostMap[out] Resulting Function -> Cost map.
+/// \return The module's total cost.
+static CostType
+calculateFunctionCosts(SplitModuleLogger &SML, const AMDGPUTargetMachine &TM,
+ Module &M,
+ DenseMap<const Function *, CostType> &CostMap) {
+ CostType ModuleCost = 0;
+ CostType KernelCost = 0;
+
+ for (auto &Fn : M) {
+ if (Fn.isDeclaration())
+ continue;
+
+ CostType FnCost = 0;
+ TargetTransformInfo TTI = TM.getTargetTransformInfo(Fn);
+
+ for (const auto &BB : Fn) {
+ for (const auto &I : BB) {
+ auto Cost =
+ TTI.getInstructionCost(&I, TargetTransformInfo::TCK_CodeSize);
+ assert(Cost != InstructionCost::getMax());
+ // Assume expensive if we can't tell the cost of an instruction.
+ CostType CostVal =
+ Cost.getValue().value_or(TargetTransformInfo::TCC_Expensive);
+ assert((FnCost + CostVal) >= FnCost && "Overflow!");
+ FnCost += CostVal;
+ }
+ }
+
+ assert(FnCost != 0);
+
+ CostMap[&Fn] = FnCost;
+ assert((ModuleCost + FnCost) >= ModuleCost && "Overflow!");
+ ModuleCost += FnCost;
+
+ if (isEntryPoint(&Fn))
+ KernelCost += FnCost;
+ }
+
+ CostType FnCost = (ModuleCost - KernelCost);
+ SML << "=> Total Module Cost: " << ModuleCost << '\n'
+ << " => KernelCost: " << KernelCost << " ("
+ << format("%0.2f", (float(KernelCost) / ModuleCost) * 100) << "%)\n"
+ << " => FnsCost: " << FnCost << " ("
+ << format("%0.2f", (float(FnCost) / ModuleCost) * 100) << "%)\n";
+
+ return ModuleCost;
+}
+
+static bool canBeIndirectlyCalled(const Function &F) {
+ if (F.isDeclaration() || isEntryPoint(&F))
+ return false;
+ return !F.hasLocalLinkage() ||
+ F.hasAddressTaken(/*PutOffender=*/nullptr,
+ /*IgnoreCallbackUses=*/false,
+ /*IgnoreAssumeLikeCalls=*/true,
+ /*IgnoreLLVMUsed=*/true,
+ /*IgnoreARCAttachedCall=*/false,
+ /*IgnoreCastedDirectCall=*/true);
+}
+
+/// When a kernel or any of its callees performs an indirect call, this function
+/// takes over \ref addAllDependencies and adds all potentially callable
+/// functions to \p Fns so they can be counted as dependencies of the kernel.
+///
+/// This is needed due to how AMDGPUResourceUsageAnalysis operates: in the
+/// presence of an indirect call, the function's resource usage is the same as
+/// the most expensive function in the module.
+/// \param M The module.
+/// \param Fns[out] Resulting list of functions.
+static void addAllIndirectCallDependencies(const Module &M,
+ DenseSet<const Function *> &Fns) {
+ for (const auto &Fn : M) {
+ if (canBeIndirectlyCalled(Fn))
+ Fns.insert(&Fn);
+ }
+}
+
+/// Adds the functions that \p Fn may call to \p Fns, then recurses into each
+/// callee until all reachable functions have been gathered.
+///
+/// \param SML Log Helper
+/// \param CG Call graph for \p Fn's module.
+/// \param Fn Current function to look at.
+/// \param Fns[out] Resulting list of functions.
+/// \param HadIndirectCall[out] Set to true if an indirect call was seen at some
+/// point, either in \p Fn or in one of the function it calls. When that
+/// happens, we fall back to adding all callable functions inside \p Fn's module
+/// to \p Fns.
+static void addAllDependencies(SplitModuleLogger &SML, const CallGraph &CG,
+ const Function &Fn,
+ DenseSet<const Function *> &Fns,
+ bool &HadIndirectCall) {
+ assert(!Fn.isDeclaration());
+
+ const Module &M = *Fn.getParent();
+ SmallVector<const Function *> WorkList({&Fn});
+ while (!WorkList.empty()) {
+ const auto &CurFn = *WorkList.pop_back_val();
+ assert(!CurFn.isDeclaration());
+
+ // Scan for an indirect call. If such a call is found, we have to
+ // conservatively assume this can call all non-entrypoint functions in the
+ // module.
+
+ for (auto &CGEntry : *CG[&CurFn]) {
+ auto *CGNode = CGEntry.second;
+ auto *Callee = CGNode->getFunction();
+ if (!Callee) {
+ // Functions have an edge towards CallsExternalNode if they're external
+ // declarations, or if they do an indirect call. As we only process
+ // definitions here, we know this means the function has an indirect
+ // call. We then have to conservatively assume this can call all
+ // non-entrypoint functions in the module.
+ if (CGNode != CG.getCallsExternalNode())
+ continue; // this is another function-less node we don't care about.
+
+ SML << "Indirect call detected in " << getName(CurFn)
+ << " - treating all non-entrypoint functions as "
+ "potential dependencies\n";
+
+ // TODO: Print an ORE as well ?
+ addAllIndirectCallDependencies(M, Fns);
+ HadIndirectCall = true;
+ return;
+ }
+
+ if (Callee->isDeclaration())
+ continue;
+
+ auto [It, Inserted] = Fns.insert(Callee);
+ if (Inserted)
+ WorkList.push_back(Callee);
+ }
+ }
+}
+
+/// Contains information about a kernel and its dependencies.
+struct KernelWithDependencies {
+ KernelWithDependencies(SplitModuleLogger &SML, CallGraph &CG,
+ const DenseMap<const Function *, CostType> &FnCosts,
+ const Function *Fn)
+ : Fn(Fn) {
+ addAllDependencies(SML, CG, *Fn, Dependencies, HasIndirectCall);
+ TotalCost = FnCosts.at(Fn);
+ for (const auto *Dep : Dependencies) {
+ TotalCost += FnCosts.at(Dep);
+
+ // We cannot duplicate functions with external linkage, or functions that
+ // may be overriden at runtime.
+ HasNonDuplicatableDependecy |=
+ (Dep->hasExternalLinkage() || !Dep->isDefinitionExact());
+ }
+ }
+
+ const Function *Fn = nullptr;
+ DenseSet<const Function *> Dependencies;
+ /// Whether \p Fn or any of its \ref Dependencies contains an indirect call.
+ bool HasIndirectCall = false;
+ /// Whether any of \p Fn's dependencies cannot be duplicated.
+ bool HasNonDuplicatableDependecy = false;
+
+ CostType TotalCost = 0;
+
+ /// \returns true if this kernel and its dependencies can be considered large
+ /// according to \p Threshold.
+ bool isLarge(CostType Threshold) const {
+ return TotalCost > Threshold && !Dependencies.empty();
+ }
+};
+
+/// Calculates how much overlap there is between \p A and \p B.
+/// \return A number between 0.0 and 1.0, where 1.0 means A == B and 0.0 means A
+/// and B have no shared elements. Kernels do not count in overlap calculation.
+static float calculateOverlap(const DenseSet<const Function *> &A,
+ const DenseSet<const Function *> &B) {
+ DenseSet<const Function *> Total;
+ for (const auto *F : A) {
+ if (!isEntryPoint(F))
+ Total.insert(F);
+ }
+
+ if (Total.empty())
+ return 0.0f;
+
+ unsigned NumCommon = 0;
+ for (const auto *F : B) {
+ if (isEntryPoint(F))
+ continue;
+
+ auto [It, Inserted] = Total.insert(F);
+ if (!Inserted)
+ ++NumCommon;
+ }
+
+ return static_cast<float>(NumCommon) / Total.size();
+}
+
+/// Performs all of the partitioning work on \p M.
+/// \param SML Log Helper
+/// \param M Module to partition.
+/// \param NumParts Number of partitions to create.
+/// \param ModuleCost Total cost of all functions in \p M.
+/// \param FnCosts Map of Function -> Cost
+/// \param WorkList Kernels and their dependencies to process in order.
+/// \returns The created partitions (a vector of size \p NumParts )
+static std::vector<DenseSet<const Function *>>
+doPartitioning(SplitModuleLogger &SML, Module &M, unsigned NumParts,
+ CostType ModuleCost,
+ const DenseMap<const Function *, CostType> &FnCosts,
+ const SmallVector<KernelWithDependencies> &WorkList) {
+
+ SML << "\n--Partitioning Starts--\n";
+
+ // Calculate a "large kernel threshold". When more than one kernel's total
+ // import cost exceeds this value, we will try to merge it with other,
+ // similarly large kernels.
+ //
+ // e.g. let two kernels X and Y have a import cost of ~10% of the module, we
+ // assign X to a partition as usual, but when we get to Y, we check if it's
+ // worth also putting it in Y's partition.
+ const CostType LargeKernelThreshold =
+ LargeKernelFactor ? CostType(((ModuleCost / NumParts) * LargeKernelFactor))
+ : std::numeric_limits<CostType>::max();
+
+ std::vector<DenseSet<const Function *>> Partitions;
+ Partitions.resize(NumParts);
+
+ // Assign a partition to each kernel, and try to keep the partitions more or
+ // less balanced. We do that through a priority queue sorted in reverse, so we
+ // can always look at the partition with the least content.
+ //
+ // There are some cases where we will be deliberately unbalanced though.
+ // - Large kernels: we try to merge with existing partitions to reduce code
+ // duplication.
+ // - Kernels with indirect or external calls always go in the first partition
+ // (P0).
+ auto ComparePartitions = [](const std::pair<PartitionID, CostType> &a,
+ const std::pair<PartitionID, CostType> &b) {
+ // When two partitions have the same cost, assign to the one with the
+ // biggest ID first. This allows us to put things in P0 last, because P0 may
+ // have other stuff added later.
+ if (a.second == b.second)
+ return a.first < b.first;
+ return a.second > b.second;
+ };
+
+ // We can't use priority_queue here because we need to be able to access any
+ // element. This makes this a bit inefficient as we need to sort it again
+ // everytime we change it, but it's a very small array anyway (likely under 64
+ // partitions) so it's a cheap operation.
+ std::vector<std::pair<PartitionID, CostType>> BalancingQueue;
+ for (unsigned I = 0; I < NumParts; ++I)
+ BalancingQueue.push_back(std::make_pair(I, 0));
+
+ // Helper function to handle assigning a kernel to a partition. This takes
+ // care of updating the balancing queue.
+ const auto AssignToPartition = [&](PartitionID PID,
+ const KernelWithDependencies &KWD) {
+ auto &FnsInPart = Partitions[PID];
+ FnsInPart.insert(KWD.Fn);
+ FnsInPart.insert(KWD.Dependencies.begin(), KWD.Dependencies.end());
+
+ SML << "assign " << getName(*KWD.Fn) << " to P" << PID << "\n -> ";
+ if (!KWD.Dependencies.empty()) {
+ SML << KWD.Dependencies.size() << " dependencies added\n";
+ };
+
+ // Update the balancing queue. we scan backwards because in the common case
+ // the partition is at the end.
+ for (auto &[QueuePID, Cost] : reverse(BalancingQueue)) {
+ if (QueuePID == PID) {
+ CostType NewCost = 0;
+ for (auto *Fn : Partitions[PID])
+ NewCost += FnCosts.at(Fn);
+
+ SML << "[Updating P" << PID << " Cost]:" << Cost << " -> " << NewCost;
+ if (Cost) {
+ SML << " (" << unsigned(((float(NewCost) / Cost) - 1) * 100)
+ << "% increase)";
+ }
+ SML << '\n';
+
+ Cost = NewCost;
+ }
+ }
+
+ sort(BalancingQueue, ComparePartitions);
+ };
+
+ for (auto &CurKernel : WorkList) {
+ // When a kernel has indirect calls, it must stay in the first partition
+ // alongside every reachable non-entry function. This is a nightmare case
+ // for splitting as it severely limits what we can do.
+ if (CurKernel.HasIndirectCall) {
+ SML << "Kernel with indirect call(s): " << getName(*CurKernel.Fn)
+ << " defaulting to P0\n";
+ AssignToPartition(0, CurKernel);
+ continue;
+ }
+
+ // When a kernel has non duplicatable dependencies, we have to keep it in
+ // the first partition as well. This is a conservative approach, a
+ // finer-grained approach could keep track of which dependencies are
+ // non-duplicatable exactly and just make sure they're grouped together.
+ if (CurKernel.HasNonDuplicatableDependecy) {
+ SML << "Kernel with externally visible dependency "
+ << getName(*CurKernel.Fn) << " defaulting to P0\n";
+ AssignToPartition(0, CurKernel);
+ continue;
+ }
+
+ // Be smart with large kernels to avoid duplicating their dependencies.
+ if (CurKernel.isLarge(LargeKernelThreshold)) {
+ assert(LargeKernelOverlapForMerge >= 0.0f &&
+ LargeKernelOverlapForMerge <= 1.0f);
+ SML << "Large Kernel: " << getName(*CurKernel.Fn)
+ << " - looking for partition with at least "
+ << format("%0.2f", LargeKernelOverlapForMerge * 100) << "% overlap\n";
+
+ bool Assigned = false;
+ for (const auto &[PID, Fns] : enumerate(Partitions)) {
+ float Overlap = calculateOverlap(CurKernel.Dependencies, Fns);
+ SML << " => " << format("%0.2f", Overlap * 100) << "% overlap with P"
+ << PID << '\n';
+ if (Overlap > LargeKernelOverlapForMerge) {
+ SML << " selecting P" << PID << '\n';
+ AssignToPartition(PID, CurKernel);
+ Assigned = true;
+ }
+ }
+
+ if (Assigned)
+ continue;
+ }
+
+ // Normal "load-balancing", assign to partition with least pressure.
+ auto [PID, CurCost] = BalancingQueue.back();
+ AssignToPartition(PID, CurKernel);
+ }
+
+ // Work is mostly done now, verify the partioning and add all functions we may
+ // have missed (= unreachable, or we don't understand how they're reached) to
+ // P0.
+ DenseSet<const Function *> AllFunctions;
+ for (const auto &[Idx, Part] : enumerate(Partitions)) {
+ CostType Cost = 0;
+ for (auto *Fn : Part) {
+ // external linkage functions should exclusively be in the first partition
+ // at this stage. In theory, we should only ever see external linkage
+ // functions here if they're kernels, or if they've been added due to a
+ // kernel using indirect calls somewhere in its CallGraph.
+ assert(Idx == 0 || (!Fn->hasExternalLinkage() || isEntryPoint(Fn)));
+ Cost += FnCosts.at(Fn);
+ }
+ SML << "P" << Idx << " has a total cost of " << Cost << " ("
+ << format("%0.2f", (float(Cost) / ModuleCost) * 100)
+ << "% of source module)\n";
+ AllFunctions.insert(Part.begin(), Part.end());
+ }
+
+ // Add missed functions to P0. This will take care of adding things like
+ // external functions with no callers in the module to P0. This should be
+ // fairly rare as AMDGPU internalizes everything in most cases, so unused
+ // internal functions would get removed.
+ for (auto &Fn : M) {
+ if (!Fn.isDeclaration() && !AllFunctions.contains(&Fn)) {
+ SML << getName(Fn) << " has no partition assigned, defaulting to P0\n";
+ Partitions[0].insert(&Fn);
+ }
+ }
+
+ SML << "--Partitioning Done--\n\n";
+
+ return Partitions;
+}
+
+static void externalize(GlobalValue &GV) {
+ if (GV.hasLocalLinkage()) {
+ GV.setLinkage(GlobalValue::ExternalLinkage);
+ GV.setVisibility(GlobalValue::HiddenVisibility);
+ }
+
+ // Unnamed entities must be named consistently between modules. setName will
+ // give a distinct name to each such entity.
+ if (!GV.hasName())
+ GV.setName("__llvmsplit_unnamed");
+}
+} // end anonymous namespace
+
+void llvm::splitAMDGPUModule(
+ const AMDGPUTargetMachine &TM, Module &M, unsigned N,
+ function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
+
+ SplitModuleLogger SML(M);
+
+ CallGraph CG(M);
+
+ // Externalize functions whose address are taken.
+ //
+ // This is needed because partitioning is purely based on calls, but sometimes
+ // a kernel/function may just look at the address of another local function
+ // and not do anything (no calls). After partitioning, that local function may
+ // end up in a different module (so it's just a declaration in the module
+ // where its address is taken), which emits a "undefined hidden symbol" linker
+ // error.
+ //
+ // Additionally, it guides partitioning to not duplicate this function if it's
+ // called directly at some point.
+ for (auto &Fn : M) {
+ if (Fn.hasAddressTaken()) {
+ if (Fn.hasLocalLinkage()) {
+ SML << "[externalize] " << Fn.getName()
+ << " because its address is taken\n";
+ }
+ externalize(Fn);
+ }
+ }
+
+ // Externalize local GVs, which avoids duplicating their initializers, which
+ // in turns helps keep code size in check.
+ if (!NoExternalizeGlobals) {
+ for (auto &GV : M.globals()) {
+ if (GV.hasLocalLinkage())
+ SML << "[externalize] GV " << GV.getName() << '\n';
+ externalize(GV);
+ }
+ }
+
+ // Start by calculating the cost of every function in the module, as well as
+ // the module's overall cost.
+ DenseMap<const Function *, CostType> FnCosts;
+ const CostType ModuleCost = calculateFunctionCosts(SML, TM, M, FnCosts);
+
+ // Gather every kernel into a WorkList, then sort it by descending total cost
+ // of the kernel so the biggest kernels are seen first.
+ SmallVector<KernelWithDependencies> WorkList;
+ for (auto &Fn : M) {
+ if (isEntryPoint(&Fn) && !Fn.isDeclaration())
+ WorkList.emplace_back(SML, CG, FnCosts, &Fn);
+ }
+ sort(WorkList, [&](auto &A, auto &B) {
+ // Sort by total cost, and if the total cost is identical, sort
+ // alphabetically.
+ if (A.TotalCost == B.TotalCost)
+ return A.Fn->getName() < B.Fn->getName();
+ return A.TotalCost > B.TotalCost;
+ });
+
+ if (SML) {
+ SML << "Worklist\n";
+ for (const auto &KWD : WorkList) {
+ SML << "[Kernel] " << getName(*KWD.Fn) << " (totalCost:" << KWD.TotalCost
+ << " indirect:" << KWD.HasIndirectCall
+ << " hasNonDuplicatableDep:" << KWD.HasNonDuplicatableDependecy
+ << ")\n";
+ for (const auto *Dep : KWD.Dependencies)
+ SML << " [Dep] " << getName(*Dep) << '\n';
+ }
+ }
+
+ // This performs all of the partitioning work.
+ auto Partitions = doPartitioning(SML, M, N, ModuleCost, FnCosts, WorkList);
+ assert(Partitions.size() == N);
+
+ // If we didn't externalize GVs, then local GVs need to be conservatively
+ // imported into every module (including their initializers), and then cleaned
+ // up afterwards.
+ const auto NeedsConservativeImport = [&](const GlobalValue *GV) {
+ // We conservatively import private/internal GVs into every module and clean
+ // them up afterwards.
+ const auto *Var = dyn_cast<GlobalVariable>(GV);
+ return Var && Var->hasLocalLinkage();
+ };
+
+ SML << "Creating " << N << " modules...\n";
+ unsigned TotalFnImpls = 0;
+ for (unsigned I = 0; I < N; ++I) {
+ const auto &FnsInPart = Partitions[I];
+
+ ValueToValueMapTy VMap;
+ std::unique_ptr<Module> MPart(
+ CloneModule(M, VMap, [&](const GlobalValue *GV) {
+ // Functions go in their assigned partition.
+ if (const auto *Fn = dyn_cast<Function>(GV)) {
+// Check we don't import an external linkage function in any
+// partition other than P0.
+#ifndef NDEBUG
+ if (Fn->hasExternalLinkage() && !isEntryPoint(Fn)) {
+ assert((I == 0) == FnsInPart.contains(Fn));
+ }
+#endif
+ return FnsInPart.contains(Fn);
+ }
+
+ if (NeedsConservativeImport(GV))
+ return true;
+
+ // Everything else goes in the first partition.
+ return I == 0;
+ }));
+
+ // Clean-up conservatively imported GVs without any users.
+ for (auto &GV : make_early_inc_range(MPart->globals())) {
+ if (NeedsConservativeImport(&GV) && GV.use_empty())
+ GV.eraseFromParent();
+ }
+
+ unsigned NumAllFns = 0, NumKernels = 0;
+ for (auto &Cur : *MPart) {
+ if (!Cur.isDeclaration()) {
+ ++NumAllFns;
+ if (isEntryPoint(&Cur))
+ ++NumKernels;
+ }
+ }
+ TotalFnImpls += NumAllFns;
+ SML << " - Module " << I << " with " << NumAllFns << " functions ("
+ << NumKernels << " kernels)\n";
+ ModuleCallback(std::move(MPart));
+ }
+
+ SML << TotalFnImpls << " function definitions across all modules ("
+ << format("%0.2f", (float(TotalFnImpls) / FnCosts.size()) * 100)
+ << "% of original module)\n";
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSplitModule.h b/llvm/lib/Target/AMDGPU/AMDGPUSplitModule.h
new file mode 100644
index 000000000000..6171643bd4ad
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSplitModule.h
@@ -0,0 +1,30 @@
+//===- AMDGPUSplitModule.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_AMDGPUSPLITMODULE_H
+#define LLVM_TARGET_AMDGPUSPLITMODULE_H
+
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include <memory>
+
+namespace llvm {
+
+class Module;
+class AMDGPUTargetMachine;
+
+/// Splits the module M into N linkable partitions. The function ModuleCallback
+/// is called N times passing each individual partition as the MPart argument.
+void splitAMDGPUModule(
+ const AMDGPUTargetMachine &TM, Module &M, unsigned N,
+ function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback);
+
+} // end namespace llvm
+
+#endif // LLVM_TARGET_AMDGPUSPLITMODULE_H
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 305a6c8c3b92..dbbfe34a6386 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -15,11 +15,13 @@
#include "AMDGPUTargetMachine.h"
#include "AMDGPU.h"
#include "AMDGPUAliasAnalysis.h"
+#include "AMDGPUCodeGenPassBuilder.h"
#include "AMDGPUCtorDtorLowering.h"
#include "AMDGPUExportClustering.h"
#include "AMDGPUIGroupLP.h"
#include "AMDGPUMacroFusion.h"
#include "AMDGPURegBankSelect.h"
+#include "AMDGPUSplitModule.h"
#include "AMDGPUTargetObjectFile.h"
#include "AMDGPUTargetTransformInfo.h"
#include "AMDGPUUnifyDivergentExitNodes.h"
@@ -646,6 +648,14 @@ parseAMDGPUAtomicOptimizerStrategy(StringRef Params) {
return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
}
+Error AMDGPUTargetMachine::buildCodeGenPipeline(
+ ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
+ CodeGenFileType FileType, const CGPassBuilderOption &Opts,
+ PassInstrumentationCallbacks *PIC) {
+ AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
+ return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
+}
+
void AMDGPUTargetMachine::registerPassBuilderCallbacks(
PassBuilder &PB, bool PopulateClassToPassNames) {
@@ -806,6 +816,13 @@ AMDGPUTargetMachine::getAddressSpaceForPseudoSourceKind(unsigned Kind) const {
return AMDGPUAS::FLAT_ADDRESS;
}
+bool AMDGPUTargetMachine::splitModule(
+ Module &M, unsigned NumParts,
+ function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) const {
+ splitAMDGPUModule(*this, M, NumParts, ModuleCallback);
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// GCN Target Machine (SI+)
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
index 30ab388c7d52..2cfd232483a8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
@@ -52,6 +52,12 @@ public:
return TLOF.get();
}
+ Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out,
+ raw_pwrite_stream *DwoOut,
+ CodeGenFileType FileType,
+ const CGPassBuilderOption &Opts,
+ PassInstrumentationCallbacks *PIC) override;
+
void registerPassBuilderCallbacks(PassBuilder &PB,
bool PopulateClassToPassNames) override;
void registerDefaultAliasAnalyses(AAManager &) override;
@@ -67,6 +73,10 @@ public:
getPredicatedAddrSpace(const Value *V) const override;
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override;
+
+ bool splitModule(Module &M, unsigned NumParts,
+ function_ref<void(std::unique_ptr<Module> MPart)>
+ ModuleCallback) const override;
};
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 84320d296a03..437e01c37c6b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -1129,31 +1129,56 @@ InstructionCost GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
int Index, VectorType *SubTp,
ArrayRef<const Value *> Args,
const Instruction *CxtI) {
+ if (!isa<FixedVectorType>(VT))
+ return BaseT::getShuffleCost(Kind, VT, Mask, CostKind, Index, SubTp);
+
Kind = improveShuffleKindFromMask(Kind, Mask, VT, Index, SubTp);
- // Treat extractsubvector as single op permutation.
- bool IsExtractSubvector = Kind == TTI::SK_ExtractSubvector;
- if (IsExtractSubvector)
- Kind = TTI::SK_PermuteSingleSrc;
-
- if (ST->hasVOP3PInsts()) {
- if (cast<FixedVectorType>(VT)->getNumElements() == 2 &&
- DL.getTypeSizeInBits(VT->getElementType()) == 16) {
- // With op_sel VOP3P instructions freely can access the low half or high
- // half of a register, so any swizzle is free.
- switch (Kind) {
- case TTI::SK_Broadcast:
- case TTI::SK_Reverse:
- case TTI::SK_PermuteSingleSrc:
+ // Larger vector widths may require additional instructions, but are
+ // typically cheaper than scalarized versions.
+ unsigned NumVectorElts = cast<FixedVectorType>(VT)->getNumElements();
+ if (ST->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS &&
+ DL.getTypeSizeInBits(VT->getElementType()) == 16) {
+ bool HasVOP3P = ST->hasVOP3PInsts();
+ unsigned RequestedElts =
+ count_if(Mask, [](int MaskElt) { return MaskElt != -1; });
+ if (RequestedElts == 0)
+ return 0;
+ switch (Kind) {
+ case TTI::SK_Broadcast:
+ case TTI::SK_Reverse:
+ case TTI::SK_PermuteSingleSrc: {
+ // With op_sel VOP3P instructions freely can access the low half or high
+ // half of a register, so any swizzle of two elements is free.
+ if (HasVOP3P && NumVectorElts == 2)
return 0;
- default:
- break;
- }
+ unsigned NumPerms = alignTo(RequestedElts, 2) / 2;
+ // SK_Broadcast just reuses the same mask
+ unsigned NumPermMasks = Kind == TTI::SK_Broadcast ? 1 : NumPerms;
+ return NumPerms + NumPermMasks;
+ }
+ case TTI::SK_ExtractSubvector:
+ case TTI::SK_InsertSubvector: {
+ // Even aligned accesses are free
+ if (!(Index % 2))
+ return 0;
+ // Insert/extract subvectors only require shifts / extract code to get the
+ // relevant bits
+ return alignTo(RequestedElts, 2) / 2;
+ }
+ case TTI::SK_PermuteTwoSrc:
+ case TTI::SK_Splice:
+ case TTI::SK_Select: {
+ unsigned NumPerms = alignTo(RequestedElts, 2) / 2;
+ // SK_Select just reuses the same mask
+ unsigned NumPermMasks = Kind == TTI::SK_Select ? 1 : NumPerms;
+ return NumPerms + NumPermMasks;
+ }
+
+ default:
+ break;
}
}
- // Restore optimal kind.
- if (IsExtractSubvector)
- Kind = TTI::SK_ExtractSubvector;
return BaseT::getShuffleCost(Kind, VT, Mask, CostKind, Index, SubTp);
}
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index c08c35c45984..dcd4b22f4057 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1340,7 +1340,7 @@ private:
bool ParseDirectiveAMDGCNTarget();
bool ParseDirectiveAMDHSACodeObjectVersion();
bool ParseDirectiveAMDHSAKernel();
- bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
+ bool ParseAMDKernelCodeTValue(StringRef ID, AMDGPUMCKernelCodeT &Header);
bool ParseDirectiveAMDKernelCodeT();
// TODO: Possibly make subtargetHasRegister const.
bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo);
@@ -5863,7 +5863,7 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSACodeObjectVersion() {
}
bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
- amd_kernel_code_t &Header) {
+ AMDGPUMCKernelCodeT &C) {
// max_scratch_backing_memory_byte_size is deprecated. Ignore it while parsing
// assembly for backwards compatibility.
if (ID == "max_scratch_backing_memory_byte_size") {
@@ -5873,25 +5873,13 @@ bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
SmallString<40> ErrStr;
raw_svector_ostream Err(ErrStr);
- if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
+ if (!C.ParseKernelCodeT(ID, getParser(), Err)) {
return TokError(Err.str());
}
Lex();
- if (ID == "enable_dx10_clamp") {
- if (G_00B848_DX10_CLAMP(Header.compute_pgm_resource_registers) &&
- isGFX12Plus())
- return TokError("enable_dx10_clamp=1 is not allowed on GFX12+");
- }
-
- if (ID == "enable_ieee_mode") {
- if (G_00B848_IEEE_MODE(Header.compute_pgm_resource_registers) &&
- isGFX12Plus())
- return TokError("enable_ieee_mode=1 is not allowed on GFX12+");
- }
-
if (ID == "enable_wavefront_size32") {
- if (Header.code_properties & AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32) {
+ if (C.code_properties & AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32) {
if (!isGFX10Plus())
return TokError("enable_wavefront_size32=1 is only allowed on GFX10+");
if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize32])
@@ -5903,41 +5891,23 @@ bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
}
if (ID == "wavefront_size") {
- if (Header.wavefront_size == 5) {
+ if (C.wavefront_size == 5) {
if (!isGFX10Plus())
return TokError("wavefront_size=5 is only allowed on GFX10+");
if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize32])
return TokError("wavefront_size=5 requires +WavefrontSize32");
- } else if (Header.wavefront_size == 6) {
+ } else if (C.wavefront_size == 6) {
if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize64])
return TokError("wavefront_size=6 requires +WavefrontSize64");
}
}
- if (ID == "enable_wgp_mode") {
- if (G_00B848_WGP_MODE(Header.compute_pgm_resource_registers) &&
- !isGFX10Plus())
- return TokError("enable_wgp_mode=1 is only allowed on GFX10+");
- }
-
- if (ID == "enable_mem_ordered") {
- if (G_00B848_MEM_ORDERED(Header.compute_pgm_resource_registers) &&
- !isGFX10Plus())
- return TokError("enable_mem_ordered=1 is only allowed on GFX10+");
- }
-
- if (ID == "enable_fwd_progress") {
- if (G_00B848_FWD_PROGRESS(Header.compute_pgm_resource_registers) &&
- !isGFX10Plus())
- return TokError("enable_fwd_progress=1 is only allowed on GFX10+");
- }
-
return false;
}
bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
- amd_kernel_code_t Header;
- AMDGPU::initDefaultAMDKernelCodeT(Header, &getSTI());
+ AMDGPUMCKernelCodeT KernelCode;
+ KernelCode.initDefault(&getSTI(), getContext());
while (true) {
// Lex EndOfStatement. This is in a while loop, because lexing a comment
@@ -5951,11 +5921,12 @@ bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
if (ID == ".end_amd_kernel_code_t")
break;
- if (ParseAMDKernelCodeTValue(ID, Header))
+ if (ParseAMDKernelCodeTValue(ID, KernelCode))
return true;
}
- getTargetStreamer().EmitAMDKernelCodeT(Header);
+ KernelCode.validate(&getSTI(), getContext());
+ getTargetStreamer().EmitAMDKernelCodeT(KernelCode);
return false;
}
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index 8eaa113ac181..1fbebc038c18 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -1434,6 +1434,15 @@ defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_short, i32, "BUFFER_LOAD_SSHORT">;
defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_ubyte, i32, "BUFFER_LOAD_UBYTE">;
defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_ushort, i32, "BUFFER_LOAD_USHORT">;
+defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_tfe, v2i32, "BUFFER_LOAD_DWORD_TFE">;
+defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_tfe, v3i32, "BUFFER_LOAD_DWORDX2_TFE">;
+defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_tfe, v4i32, "BUFFER_LOAD_DWORDX3_TFE">;
+defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_tfe, v5i32, "BUFFER_LOAD_DWORDX4_TFE">;
+defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_byte_tfe, v2i32, "BUFFER_LOAD_SBYTE_TFE">;
+defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_short_tfe, v2i32, "BUFFER_LOAD_SSHORT_TFE">;
+defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_ubyte_tfe, v2i32, "BUFFER_LOAD_UBYTE_TFE">;
+defm : MUBUF_LoadIntrinsicPat<SIbuffer_load_ushort_tfe, v2i32, "BUFFER_LOAD_USHORT_TFE">;
+
multiclass MUBUF_StoreIntrinsicPat_Common<SDPatternOperator name, ValueType vt,
string opcode, ValueType memoryVt = vt> {
defvar st = !if(!eq(memoryVt, vt), name, mubuf_intrinsic_store<name, memoryVt>);
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index 48325a0928f9..c992352cb78d 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -50,6 +50,7 @@ add_llvm_target(AMDGPUCodeGen
AMDGPUAtomicOptimizer.cpp
AMDGPUAttributor.cpp
AMDGPUCallLowering.cpp
+ AMDGPUCodeGenPassBuilder.cpp
AMDGPUCodeGenPrepare.cpp
AMDGPUCombinerHelper.cpp
AMDGPUCtorDtorLowering.cpp
@@ -97,6 +98,7 @@ add_llvm_target(AMDGPUCodeGen
AMDGPURewriteOutArguments.cpp
AMDGPURewriteUndefForPHI.cpp
AMDGPUSetWavePriority.cpp
+ AMDGPUSplitModule.cpp
AMDGPUSubtarget.cpp
AMDGPUTargetMachine.cpp
AMDGPUTargetObjectFile.cpp
@@ -119,6 +121,7 @@ add_llvm_target(AMDGPUCodeGen
GCNVOPDUtils.cpp
R600AsmPrinter.cpp
R600ClauseMergePass.cpp
+ R600CodeGenPassBuilder.cpp
R600ControlFlowFinalizer.cpp
R600EmitClauseMarkers.cpp
R600ExpandSpecialInstrs.cpp
@@ -182,6 +185,7 @@ add_llvm_target(AMDGPUCodeGen
GlobalISel
HipStdPar
IPO
+ IRPrinter
MC
MIRParser
Passes
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index b7548671f2c5..db5b467f2238 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -1312,6 +1312,9 @@ public:
// \returns true if the target has IEEE fminimum/fmaximum instructions
bool hasIEEEMinMax() const { return getGeneration() >= GFX12; }
+ // \returns true if the target has IEEE fminimum3/fmaximum3 instructions
+ bool hasIEEEMinMax3() const { return hasIEEEMinMax(); }
+
// \returns true if the target has WG_RR_MODE kernel descriptor mode bit
bool hasRrWGMode() const { return getGeneration() >= GFX12; }
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
index 02fe7be06280..00e64e3419ba 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
@@ -13,7 +13,6 @@
#include "AMDGPUTargetStreamer.h"
#include "AMDGPUMCKernelDescriptor.h"
#include "AMDGPUPTNote.h"
-#include "AMDKernelCodeT.h"
#include "Utils/AMDGPUBaseInfo.h"
#include "Utils/AMDKernelCodeTUtils.h"
#include "llvm/BinaryFormat/AMDGPUMetadataVerifier.h"
@@ -240,10 +239,9 @@ void AMDGPUTargetAsmStreamer::EmitDirectiveAMDHSACodeObjectVersion(
OS << "\t.amdhsa_code_object_version " << COV << '\n';
}
-void
-AMDGPUTargetAsmStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) {
+void AMDGPUTargetAsmStreamer::EmitAMDKernelCodeT(AMDGPUMCKernelCodeT &Header) {
OS << "\t.amd_kernel_code_t\n";
- dumpAmdKernelCode(&Header, OS, "\t\t");
+ Header.EmitKernelCodeT(OS, getContext());
OS << "\t.end_amd_kernel_code_t\n";
}
@@ -789,12 +787,10 @@ unsigned AMDGPUTargetELFStreamer::getEFlagsV6() {
void AMDGPUTargetELFStreamer::EmitDirectiveAMDGCNTarget() {}
-void
-AMDGPUTargetELFStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) {
-
+void AMDGPUTargetELFStreamer::EmitAMDKernelCodeT(AMDGPUMCKernelCodeT &Header) {
MCStreamer &OS = getStreamer();
OS.pushSection();
- OS.emitBytes(StringRef((const char*)&Header, sizeof(Header)));
+ Header.EmitKernelCodeT(OS, getContext());
OS.popSection();
}
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
index 706897a5dc1f..e5c90060cb5d 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
@@ -13,8 +13,6 @@
#include "Utils/AMDGPUPALMetadata.h"
#include "llvm/MC/MCStreamer.h"
-struct amd_kernel_code_t;
-
namespace llvm {
class MCELFStreamer;
@@ -23,6 +21,7 @@ class formatted_raw_ostream;
namespace AMDGPU {
+struct AMDGPUMCKernelCodeT;
struct MCKernelDescriptor;
namespace HSAMD {
struct Metadata;
@@ -54,7 +53,7 @@ public:
CodeObjectVersion = COV;
}
- virtual void EmitAMDKernelCodeT(const amd_kernel_code_t &Header){};
+ virtual void EmitAMDKernelCodeT(AMDGPU::AMDGPUMCKernelCodeT &Header) {};
virtual void EmitAMDGPUSymbolType(StringRef SymbolName, unsigned Type){};
@@ -130,7 +129,7 @@ public:
void EmitDirectiveAMDHSACodeObjectVersion(unsigned COV) override;
- void EmitAMDKernelCodeT(const amd_kernel_code_t &Header) override;
+ void EmitAMDKernelCodeT(AMDGPU::AMDGPUMCKernelCodeT &Header) override;
void EmitAMDGPUSymbolType(StringRef SymbolName, unsigned Type) override;
@@ -186,7 +185,7 @@ public:
void EmitDirectiveAMDGCNTarget() override;
- void EmitAMDKernelCodeT(const amd_kernel_code_t &Header) override;
+ void EmitAMDKernelCodeT(AMDGPU::AMDGPUMCKernelCodeT &Header) override;
void EmitAMDGPUSymbolType(StringRef SymbolName, unsigned Type) override;
diff --git a/llvm/lib/Target/AMDGPU/R600CodeGenPassBuilder.cpp b/llvm/lib/Target/AMDGPU/R600CodeGenPassBuilder.cpp
new file mode 100644
index 000000000000..a57b3aa0adb1
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/R600CodeGenPassBuilder.cpp
@@ -0,0 +1,33 @@
+//===-- R600CodeGenPassBuilder.cpp ------ Build R600 CodeGen pipeline -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "R600CodeGenPassBuilder.h"
+#include "R600TargetMachine.h"
+
+using namespace llvm;
+
+R600CodeGenPassBuilder::R600CodeGenPassBuilder(
+ R600TargetMachine &TM, const CGPassBuilderOption &Opts,
+ PassInstrumentationCallbacks *PIC)
+ : CodeGenPassBuilder(TM, Opts, PIC) {
+ Opt.RequiresCodeGenSCCOrder = true;
+}
+
+void R600CodeGenPassBuilder::addPreISel(AddIRPass &addPass) const {
+ // TODO: Add passes pre instruction selection.
+}
+
+void R600CodeGenPassBuilder::addAsmPrinter(AddMachinePass &addPass,
+ CreateMCStreamer) const {
+ // TODO: Add AsmPrinter.
+}
+
+Error R600CodeGenPassBuilder::addInstSelector(AddMachinePass &) const {
+ // TODO: Add instruction selector.
+ return Error::success();
+}
diff --git a/llvm/lib/Target/AMDGPU/R600CodeGenPassBuilder.h b/llvm/lib/Target/AMDGPU/R600CodeGenPassBuilder.h
new file mode 100644
index 000000000000..be7c935c094d
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/R600CodeGenPassBuilder.h
@@ -0,0 +1,32 @@
+//===-- R600CodeGenPassBuilder.h -- Build R600 CodeGen pipeline -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_R600CODEGENPASSBUILDER_H
+#define LLVM_LIB_TARGET_AMDGPU_R600CODEGENPASSBUILDER_H
+
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Passes/CodeGenPassBuilder.h"
+
+namespace llvm {
+
+class R600TargetMachine;
+
+class R600CodeGenPassBuilder
+ : public CodeGenPassBuilder<R600CodeGenPassBuilder, R600TargetMachine> {
+public:
+ R600CodeGenPassBuilder(R600TargetMachine &TM, const CGPassBuilderOption &Opts,
+ PassInstrumentationCallbacks *PIC);
+
+ void addPreISel(AddIRPass &addPass) const;
+ void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const;
+ Error addInstSelector(AddMachinePass &) const;
+};
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_AMDGPU_R600CODEGENPASSBUILDER_H
diff --git a/llvm/lib/Target/AMDGPU/R600TargetMachine.cpp b/llvm/lib/Target/AMDGPU/R600TargetMachine.cpp
index 2461263866a9..c550cfaf06c1 100644
--- a/llvm/lib/Target/AMDGPU/R600TargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/R600TargetMachine.cpp
@@ -15,6 +15,7 @@
#include "R600TargetMachine.h"
#include "AMDGPUTargetMachine.h"
#include "R600.h"
+#include "R600CodeGenPassBuilder.h"
#include "R600MachineScheduler.h"
#include "R600TargetTransformInfo.h"
#include "llvm/Transforms/Scalar.h"
@@ -144,3 +145,11 @@ void R600PassConfig::addPreEmitPass() {
TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
return new R600PassConfig(*this, PM);
}
+
+Error R600TargetMachine::buildCodeGenPipeline(
+ ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
+ CodeGenFileType FileType, const CGPassBuilderOption &Opts,
+ PassInstrumentationCallbacks *PIC) {
+ R600CodeGenPassBuilder CGPB(*this, Opts, PIC);
+ return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
+}
diff --git a/llvm/lib/Target/AMDGPU/R600TargetMachine.h b/llvm/lib/Target/AMDGPU/R600TargetMachine.h
index af8dcb848867..29e370edef2c 100644
--- a/llvm/lib/Target/AMDGPU/R600TargetMachine.h
+++ b/llvm/lib/Target/AMDGPU/R600TargetMachine.h
@@ -38,6 +38,12 @@ public:
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+ Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out,
+ raw_pwrite_stream *DwoOut,
+ CodeGenFileType FileType,
+ const CGPassBuilderOption &Opt,
+ PassInstrumentationCallbacks *PIC) override;
+
const TargetSubtargetInfo *getSubtargetImpl(const Function &) const override;
TargetTransformInfo getTargetTransformInfo(const Function &F) const override;
diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h
index 6d0e0b3f4de2..1e9bfc77ab92 100644
--- a/llvm/lib/Target/AMDGPU/SIDefines.h
+++ b/llvm/lib/Target/AMDGPU/SIDefines.h
@@ -1111,7 +1111,7 @@ enum Type { TRAP = -2, WORKGROUP = -1 };
#define C_00B84C_LDS_SIZE 0xFF007FFF
#define S_00B84C_EXCP_EN(x) (((x) & 0x7F) << 24)
#define G_00B84C_EXCP_EN(x) (((x) >> 24) & 0x7F)
-#define C_00B84C_EXCP_EN
+#define C_00B84C_EXCP_EN 0x80FFFFFF
#define R_0286CC_SPI_PS_INPUT_ENA 0x0286CC
#define R_0286D0_SPI_PS_INPUT_ADDR 0x0286D0
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 89e83babcfef..f9948e92862f 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -957,6 +957,11 @@ const GCNSubtarget *SITargetLowering::getSubtarget() const {
return Subtarget;
}
+ArrayRef<MCPhysReg> SITargetLowering::getRoundingControlRegisters() const {
+ static const MCPhysReg RCRegs[] = {AMDGPU::MODE};
+ return RCRegs;
+}
+
//===----------------------------------------------------------------------===//
// TargetLowering queries
//===----------------------------------------------------------------------===//
@@ -1233,13 +1238,13 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
// Atomic
Info.opc = CI.getType()->isVoidTy() ? ISD::INTRINSIC_VOID :
ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
Info.flags |= MachineMemOperand::MOLoad |
MachineMemOperand::MOStore |
MachineMemOperand::MODereferenceable;
switch (IntrID) {
default:
+ Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType());
// XXX - Should this be volatile without known ordering?
Info.flags |= MachineMemOperand::MOVolatile;
break;
@@ -2976,12 +2981,20 @@ SDValue SITargetLowering::LowerFormalArguments(
DL, Elts);
}
- SDValue CMemVT;
- if (VT.isScalarInteger() && VT.bitsLT(NewArg.getSimpleValueType()))
- CMemVT = DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewArg);
- else
- CMemVT = DAG.getBitcast(MemVT, NewArg);
- NewArg = convertArgType(DAG, VT, MemVT, DL, CMemVT,
+ // If the argument was preloaded to multiple consecutive 32-bit
+ // registers because of misalignment between addressable SGPR tuples
+ // and the argument size, we can still assume that because of kernarg
+ // segment alignment restrictions that NewArg's size is the same as
+ // MemVT and just do a bitcast. If MemVT is less than 32-bits we add a
+ // truncate since we cannot preload to less than a single SGPR and the
+ // MemVT may be smaller.
+ EVT MemVTInt =
+ EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
+ if (MemVT.bitsLT(NewArg.getSimpleValueType()))
+ NewArg = DAG.getNode(ISD::TRUNCATE, DL, MemVTInt, NewArg);
+
+ NewArg = DAG.getBitcast(MemVT, NewArg);
+ NewArg = convertArgType(DAG, VT, MemVT, DL, NewArg,
Ins[i].Flags.isSExt(), &Ins[i]);
NewArg = DAG.getMergeValues({NewArg, Chain}, DL);
}
@@ -5949,16 +5962,10 @@ SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat,
assert(M->getNumValues() == 2 || M->getNumValues() == 3);
bool IsTFE = M->getNumValues() == 3;
- unsigned Opc;
- if (IsFormat) {
- Opc = IsTFE ? AMDGPUISD::BUFFER_LOAD_FORMAT_TFE
- : AMDGPUISD::BUFFER_LOAD_FORMAT;
- } else {
- // TODO: Support non-format TFE loads.
- if (IsTFE)
- return SDValue();
- Opc = AMDGPUISD::BUFFER_LOAD;
- }
+ unsigned Opc = IsFormat ? (IsTFE ? AMDGPUISD::BUFFER_LOAD_FORMAT_TFE
+ : AMDGPUISD::BUFFER_LOAD_FORMAT)
+ : IsTFE ? AMDGPUISD::BUFFER_LOAD_TFE
+ : AMDGPUISD::BUFFER_LOAD;
if (IsD16) {
return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops);
@@ -5966,7 +5973,8 @@ SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat,
// Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics
if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32)
- return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M->getMemOperand());
+ return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M->getMemOperand(),
+ IsTFE);
if (isTypeLegal(LoadVT)) {
return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT,
@@ -7580,8 +7588,7 @@ static SDValue constructRetValue(SelectionDAG &DAG, MachineSDNode *Result,
? (ReqRetNumElts + 1) / 2
: ReqRetNumElts;
- int MaskPopDwords = (!IsD16 || (IsD16 && Unpacked)) ?
- DMaskPop : (DMaskPop + 1) / 2;
+ int MaskPopDwords = (!IsD16 || Unpacked) ? DMaskPop : (DMaskPop + 1) / 2;
MVT DataDwordVT = NumDataDwords == 1 ?
MVT::i32 : MVT::getVectorVT(MVT::i32, NumDataDwords);
@@ -10160,11 +10167,30 @@ SDValue SITargetLowering::lowerPointerAsRsrcIntrin(SDNode *Op,
}
// Handle 8 bit and 16 bit buffer loads
-SDValue
-SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG, EVT LoadVT,
- SDLoc DL, ArrayRef<SDValue> Ops,
- MachineMemOperand *MMO) const {
+SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
+ EVT LoadVT, SDLoc DL,
+ ArrayRef<SDValue> Ops,
+ MachineMemOperand *MMO,
+ bool IsTFE) const {
EVT IntVT = LoadVT.changeTypeToInteger();
+
+ if (IsTFE) {
+ unsigned Opc = (LoadVT.getScalarType() == MVT::i8)
+ ? AMDGPUISD::BUFFER_LOAD_UBYTE_TFE
+ : AMDGPUISD::BUFFER_LOAD_USHORT_TFE;
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineMemOperand *OpMMO = MF.getMachineMemOperand(MMO, 0, 8);
+ SDVTList VTs = DAG.getVTList(MVT::v2i32, MVT::Other);
+ SDValue Op = getMemIntrinsicNode(Opc, DL, VTs, Ops, MVT::v2i32, OpMMO, DAG);
+ SDValue Status = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Op,
+ DAG.getConstant(1, DL, MVT::i32));
+ SDValue Data = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Op,
+ DAG.getConstant(0, DL, MVT::i32));
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, IntVT, Data);
+ SDValue Value = DAG.getNode(ISD::BITCAST, DL, LoadVT, Trunc);
+ return DAG.getMergeValues({Value, Status, SDValue(Op.getNode(), 1)}, DL);
+ }
+
unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ?
AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT;
@@ -12066,11 +12092,9 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
return std::nullopt;
auto VecIdx = IdxOp->getZExtValue();
auto ScalarSize = Op.getScalarValueSizeInBits();
- if (ScalarSize != 32) {
+ if (ScalarSize < 32)
Index = ScalarSize == 8 ? VecIdx : VecIdx * 2 + Index;
- }
-
- return calculateSrcByte(ScalarSize == 32 ? Op : Op.getOperand(0),
+ return calculateSrcByte(ScalarSize >= 32 ? Op : Op.getOperand(0),
StartingIndex, Index);
}
@@ -13189,6 +13213,33 @@ SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
return SDValue();
}
+/// \return true if the subtarget supports minimum3 and maximum3 with the given
+/// base min/max opcode \p Opc for type \p VT.
+static bool supportsMin3Max3(const GCNSubtarget &Subtarget, unsigned Opc,
+ EVT VT) {
+ switch (Opc) {
+ case ISD::FMINNUM:
+ case ISD::FMAXNUM:
+ case ISD::FMINNUM_IEEE:
+ case ISD::FMAXNUM_IEEE:
+ case AMDGPUISD::FMIN_LEGACY:
+ case AMDGPUISD::FMAX_LEGACY:
+ return (VT == MVT::f32) || (VT == MVT::f16 && Subtarget.hasMin3Max3_16());
+ case ISD::FMINIMUM:
+ case ISD::FMAXIMUM:
+ return (VT == MVT::f32 || VT == MVT::f16) && Subtarget.hasIEEEMinMax3();
+ case ISD::SMAX:
+ case ISD::SMIN:
+ case ISD::UMAX:
+ case ISD::UMIN:
+ return (VT == MVT::i32) || (VT == MVT::i16 && Subtarget.hasMin3Max3_16());
+ default:
+ return false;
+ }
+
+ llvm_unreachable("not a min/max opcode");
+}
+
SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -13201,10 +13252,7 @@ SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
// Only do this if the inner op has one use since this will just increases
// register pressure for no benefit.
- if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
- !VT.isVector() &&
- (VT == MVT::i32 || VT == MVT::f32 ||
- ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) {
+ if (supportsMin3Max3(*Subtarget, Opc, VT)) {
// max(max(a, b), c) -> max3(a, b, c)
// min(min(a, b), c) -> min3(a, b, c)
if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 08aa2a599163..292b17da9358 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -275,7 +275,8 @@ private:
// Handle 8 bit and 16 bit buffer loads
SDValue handleByteShortBufferLoads(SelectionDAG &DAG, EVT LoadVT, SDLoc DL,
ArrayRef<SDValue> Ops,
- MachineMemOperand *MMO) const;
+ MachineMemOperand *MMO,
+ bool IsTFE = false) const;
// Handle 8 bit and 16 bit buffer stores
SDValue handleByteShortBufferStores(SelectionDAG &DAG, EVT VDataType,
@@ -287,6 +288,8 @@ public:
const GCNSubtarget *getSubtarget() const;
+ ArrayRef<MCPhysReg> getRoundingControlRegisters() const override;
+
bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, EVT DestVT,
EVT SrcVT) const override;
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 5577ce9eb128..230443313d72 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -900,18 +900,6 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
}
}
}
-#if 0 // TODO: check if this is handled by MUBUF code above.
- } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD ||
- Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 ||
- Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) {
- MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data);
- unsigned OpNo;//TODO: find the OpNo for this operand;
- RegInterval Interval = getRegInterval(&Inst, MRI, TRI, OpNo);
- for (int RegNo = Interval.first; RegNo < Interval.second;
- ++RegNo) {
- setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore);
- }
-#endif
} else /* LGKM_CNT || EXP_CNT || VS_CNT || NUM_INST_CNTS */ {
// Match the score to the destination registers.
for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
@@ -1673,59 +1661,6 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
AMDGPU::SendMsg::ID_GS_DONE_PreGFX11)) {
Wait.LoadCnt = 0;
}
-#if 0 // TODO: the following blocks of logic when we have fence.
- else if (MI.getOpcode() == SC_FENCE) {
- const unsigned int group_size =
- context->shader_info->GetMaxThreadGroupSize();
- // group_size == 0 means thread group size is unknown at compile time
- const bool group_is_multi_wave =
- (group_size == 0 || group_size > target_info->GetWaveFrontSize());
- const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence();
-
- for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) {
- SCRegType src_type = Inst->GetSrcType(i);
- switch (src_type) {
- case SCMEM_LDS:
- if (group_is_multi_wave ||
- context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) {
- EmitWaitcnt |= ScoreBrackets->updateByWait(DS_CNT,
- ScoreBrackets->getScoreUB(DS_CNT));
- // LDS may have to wait for VMcnt after buffer load to LDS
- if (target_info->HasBufferLoadToLDS()) {
- EmitWaitcnt |= ScoreBrackets->updateByWait(LOAD_CNT,
- ScoreBrackets->getScoreUB(LOAD_CNT));
- }
- }
- break;
-
- case SCMEM_GDS:
- if (group_is_multi_wave || fence_is_global) {
- EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
- ScoreBrackets->getScoreUB(EXP_CNT));
- EmitWaitcnt |= ScoreBrackets->updateByWait(DS_CNT,
- ScoreBrackets->getScoreUB(DS_CNT));
- }
- break;
-
- case SCMEM_UAV:
- case SCMEM_TFBUF:
- case SCMEM_RING:
- case SCMEM_SCATTER:
- if (group_is_multi_wave || fence_is_global) {
- EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
- ScoreBrackets->getScoreUB(EXP_CNT));
- EmitWaitcnt |= ScoreBrackets->updateByWait(LOAD_CNT,
- ScoreBrackets->getScoreUB(LOAD_CNT));
- }
- break;
-
- case SCMEM_SCRATCH:
- default:
- break;
- }
- }
- }
-#endif
// Export & GDS instructions do not read the EXEC mask until after the export
// is granted (which can occur well after the instruction is issued).
@@ -2309,17 +2244,6 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
updateEventWaitcntAfter(Inst, &ScoreBrackets);
-#if 0 // TODO: implement resource type check controlled by options with ub = LB.
- // If this instruction generates a S_SETVSKIP because it is an
- // indexed resource, and we are on Tahiti, then it will also force
- // an S_WAITCNT vmcnt(0)
- if (RequireCheckResourceType(Inst, context)) {
- // Force the score to as if an S_WAITCNT vmcnt(0) is emitted.
- ScoreBrackets->setScoreLB(LOAD_CNT,
- ScoreBrackets->getScoreUB(LOAD_CNT));
- }
-#endif
-
if (ST->isPreciseMemoryEnabled() && Inst.mayLoadOrStore()) {
AMDGPU::Waitcnt Wait = WCG->getAllZeroWaitcnt(
Inst.mayStore() && !SIInstrInfo::isAtomicRet(Inst));
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 08351c49b223..bb5f166e4792 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -2031,50 +2031,57 @@ MachineBasicBlock *SIInstrInfo::insertSimulatedTrap(MachineRegisterInfo &MRI,
MachineInstr &MI,
const DebugLoc &DL) const {
MachineFunction *MF = MBB.getParent();
- MachineBasicBlock *SplitBB = MBB.splitAt(MI, /*UpdateLiveIns=*/false);
- MachineBasicBlock *HaltLoop = MF->CreateMachineBasicBlock();
- MF->push_back(HaltLoop);
-
constexpr unsigned DoorbellIDMask = 0x3ff;
constexpr unsigned ECQueueWaveAbort = 0x400;
+ MachineBasicBlock *TrapBB = &MBB;
+ MachineBasicBlock *ContBB = &MBB;
+ MachineBasicBlock *HaltLoopBB = MF->CreateMachineBasicBlock();
+
+ if (!MBB.succ_empty() || std::next(MI.getIterator()) != MBB.end()) {
+ ContBB = MBB.splitAt(MI, /*UpdateLiveIns=*/false);
+ TrapBB = MF->CreateMachineBasicBlock();
+ BuildMI(MBB, MI, DL, get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(TrapBB);
+ MF->push_back(TrapBB);
+ MBB.addSuccessor(TrapBB);
+ }
+
// Start with a `s_trap 2`, if we're in PRIV=1 and we need the workaround this
// will be a nop.
- BuildMI(MBB, MI, DL, get(AMDGPU::S_TRAP))
+ BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_TRAP))
.addImm(static_cast<unsigned>(GCNSubtarget::TrapID::LLVMAMDHSATrap));
Register DoorbellReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- BuildMI(MBB, MI, DL, get(AMDGPU::S_SENDMSG_RTN_B32), DoorbellReg)
+ BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_SENDMSG_RTN_B32),
+ DoorbellReg)
.addImm(AMDGPU::SendMsg::ID_RTN_GET_DOORBELL);
- BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::TTMP2)
+ BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_MOV_B32), AMDGPU::TTMP2)
.addUse(AMDGPU::M0);
Register DoorbellRegMasked =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- BuildMI(MBB, MI, DL, get(AMDGPU::S_AND_B32), DoorbellRegMasked)
+ BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_AND_B32), DoorbellRegMasked)
.addUse(DoorbellReg)
.addImm(DoorbellIDMask);
Register SetWaveAbortBit =
MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- BuildMI(MBB, MI, DL, get(AMDGPU::S_OR_B32), SetWaveAbortBit)
+ BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_OR_B32), SetWaveAbortBit)
.addUse(DoorbellRegMasked)
.addImm(ECQueueWaveAbort);
- BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::M0)
+ BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_MOV_B32), AMDGPU::M0)
.addUse(SetWaveAbortBit);
- BuildMI(MBB, MI, DL, get(AMDGPU::S_SENDMSG))
+ BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_SENDMSG))
.addImm(AMDGPU::SendMsg::ID_INTERRUPT);
- BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::M0)
+ BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_MOV_B32), AMDGPU::M0)
.addUse(AMDGPU::TTMP2);
- BuildMI(MBB, MI, DL, get(AMDGPU::S_BRANCH)).addMBB(HaltLoop);
-
- BuildMI(*HaltLoop, HaltLoop->end(), DL, get(AMDGPU::S_SETHALT)).addImm(5);
- BuildMI(*HaltLoop, HaltLoop->end(), DL, get(AMDGPU::S_BRANCH))
- .addMBB(HaltLoop);
+ BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_BRANCH)).addMBB(HaltLoopBB);
+ TrapBB->addSuccessor(HaltLoopBB);
- if (SplitBB != &MBB)
- MBB.removeSuccessor(SplitBB);
- MBB.addSuccessor(HaltLoop);
- HaltLoop->addSuccessor(HaltLoop);
+ BuildMI(*HaltLoopBB, HaltLoopBB->end(), DL, get(AMDGPU::S_SETHALT)).addImm(5);
+ BuildMI(*HaltLoopBB, HaltLoopBB->end(), DL, get(AMDGPU::S_BRANCH))
+ .addMBB(HaltLoopBB);
+ MF->push_back(HaltLoopBB);
+ HaltLoopBB->addSuccessor(HaltLoopBB);
- return SplitBB;
+ return ContBB;
}
unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 0ed2f60ea66a..fd119e0992e5 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -148,6 +148,16 @@ def SIbuffer_load_byte : SDNode <"AMDGPUISD::BUFFER_LOAD_BYTE", SDTBufferLoad,
[SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
def SIbuffer_load_short: SDNode <"AMDGPUISD::BUFFER_LOAD_SHORT", SDTBufferLoad,
[SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
+def SIbuffer_load_tfe : SDNode <"AMDGPUISD::BUFFER_LOAD_TFE", SDTBufferLoad,
+ [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
+def SIbuffer_load_ubyte_tfe : SDNode <"AMDGPUISD::BUFFER_LOAD_UBYTE_TFE", SDTBufferLoad,
+ [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
+def SIbuffer_load_ushort_tfe : SDNode <"AMDGPUISD::BUFFER_LOAD_USHORT_TFE", SDTBufferLoad,
+ [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
+def SIbuffer_load_byte_tfe : SDNode <"AMDGPUISD::BUFFER_LOAD_BYTE_TFE", SDTBufferLoad,
+ [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
+def SIbuffer_load_short_tfe: SDNode <"AMDGPUISD::BUFFER_LOAD_SHORT_TFE", SDTBufferLoad,
+ [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
def SIbuffer_load_format : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT", SDTBufferLoad,
[SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
def SIbuffer_load_format_tfe : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT_TFE", SDTBufferLoad,
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index e7aeaa017306..d1667955f83d 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -3762,6 +3762,11 @@ def G_AMDGPU_BUFFER_LOAD_SBYTE : BufferLoadGenericInstruction;
def G_AMDGPU_BUFFER_LOAD_USHORT : BufferLoadGenericInstruction;
def G_AMDGPU_BUFFER_LOAD_SSHORT : BufferLoadGenericInstruction;
def G_AMDGPU_BUFFER_LOAD : BufferLoadGenericInstruction;
+def G_AMDGPU_BUFFER_LOAD_UBYTE_TFE : BufferLoadGenericInstruction;
+def G_AMDGPU_BUFFER_LOAD_SBYTE_TFE : BufferLoadGenericInstruction;
+def G_AMDGPU_BUFFER_LOAD_USHORT_TFE : BufferLoadGenericInstruction;
+def G_AMDGPU_BUFFER_LOAD_SSHORT_TFE : BufferLoadGenericInstruction;
+def G_AMDGPU_BUFFER_LOAD_TFE : BufferLoadGenericInstruction;
def G_AMDGPU_BUFFER_LOAD_FORMAT : BufferLoadGenericInstruction;
def G_AMDGPU_BUFFER_LOAD_FORMAT_TFE : BufferLoadGenericInstruction;
def G_AMDGPU_BUFFER_LOAD_FORMAT_D16 : BufferLoadGenericInstruction;
diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index 62306fa667b3..24f8788683ed 100644
--- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -18,9 +18,11 @@
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/TargetParser/TargetParser.h"
@@ -678,6 +680,49 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
};
+static const StringMap<SIAtomicAddrSpace> ASNames = {{
+ {"global", SIAtomicAddrSpace::GLOBAL},
+ {"local", SIAtomicAddrSpace::LDS},
+}};
+
+void diagnoseUnknownMMRAASName(const MachineInstr &MI, StringRef AS) {
+ const MachineFunction *MF = MI.getMF();
+ const Function &Fn = MF->getFunction();
+ SmallString<128> Str;
+ raw_svector_ostream OS(Str);
+ OS << "unknown address space '" << AS << "'; expected one of ";
+ ListSeparator LS;
+ for (const auto &[Name, Val] : ASNames)
+ OS << LS << '\'' << Name << '\'';
+ DiagnosticInfoUnsupported BadTag(Fn, Str.str(), MI.getDebugLoc(), DS_Warning);
+ Fn.getContext().diagnose(BadTag);
+}
+
+/// Reads \p MI's MMRAs to parse the "amdgpu-as" MMRA.
+/// If this tag isn't present, or if it has no meaningful values, returns \p
+/// Default. Otherwise returns all the address spaces concerned by the MMRA.
+static SIAtomicAddrSpace getFenceAddrSpaceMMRA(const MachineInstr &MI,
+ SIAtomicAddrSpace Default) {
+ static constexpr StringLiteral FenceASPrefix = "amdgpu-as";
+
+ auto MMRA = MMRAMetadata(MI.getMMRAMetadata());
+ if (!MMRA)
+ return Default;
+
+ SIAtomicAddrSpace Result = SIAtomicAddrSpace::NONE;
+ for (const auto &[Prefix, Suffix] : MMRA) {
+ if (Prefix != FenceASPrefix)
+ continue;
+
+ if (auto It = ASNames.find(Suffix); It != ASNames.end())
+ Result |= It->second;
+ else
+ diagnoseUnknownMMRAASName(MI, Suffix);
+ }
+
+ return (Result != SIAtomicAddrSpace::NONE) ? Result : Default;
+}
+
} // end namespace anonymous
void SIMemOpAccess::reportUnsupported(const MachineBasicBlock::iterator &MI,
@@ -2535,12 +2580,17 @@ bool SIMemoryLegalizer::expandAtomicFence(const SIMemOpInfo &MOI,
AtomicPseudoMIs.push_back(MI);
bool Changed = false;
+ // Refine fenced address space based on MMRAs.
+ //
+ // TODO: Should we support this MMRA on other atomic operations?
+ auto OrderingAddrSpace =
+ getFenceAddrSpaceMMRA(*MI, MOI.getOrderingAddrSpace());
+
if (MOI.isAtomic()) {
if (MOI.getOrdering() == AtomicOrdering::Acquire)
- Changed |= CC->insertWait(MI, MOI.getScope(), MOI.getOrderingAddrSpace(),
- SIMemOp::LOAD | SIMemOp::STORE,
- MOI.getIsCrossAddressSpaceOrdering(),
- Position::BEFORE);
+ Changed |= CC->insertWait(
+ MI, MOI.getScope(), OrderingAddrSpace, SIMemOp::LOAD | SIMemOp::STORE,
+ MOI.getIsCrossAddressSpaceOrdering(), Position::BEFORE);
if (MOI.getOrdering() == AtomicOrdering::Release ||
MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
@@ -2552,8 +2602,7 @@ bool SIMemoryLegalizer::expandAtomicFence(const SIMemOpInfo &MOI,
/// generate a fence. Could add support in this file for
/// barrier. SIInsertWaitcnt.cpp could then stop unconditionally
/// adding S_WAITCNT before a S_BARRIER.
- Changed |= CC->insertRelease(MI, MOI.getScope(),
- MOI.getOrderingAddrSpace(),
+ Changed |= CC->insertRelease(MI, MOI.getScope(), OrderingAddrSpace,
MOI.getIsCrossAddressSpaceOrdering(),
Position::BEFORE);
@@ -2565,8 +2614,7 @@ bool SIMemoryLegalizer::expandAtomicFence(const SIMemOpInfo &MOI,
if (MOI.getOrdering() == AtomicOrdering::Acquire ||
MOI.getOrdering() == AtomicOrdering::AcquireRelease ||
MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent)
- Changed |= CC->insertAcquire(MI, MOI.getScope(),
- MOI.getOrderingAddrSpace(),
+ Changed |= CC->insertAcquire(MI, MOI.getScope(), OrderingAddrSpace,
Position::BEFORE);
return Changed;
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 2beaf903542b..4b34fb27632a 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -11,6 +11,7 @@
#include "AMDGPUAsmUtils.h"
#include "AMDKernelCodeT.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "Utils/AMDKernelCodeTUtils.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/IR/Attributes.h"
@@ -1218,39 +1219,37 @@ unsigned getAllocatedNumVGPRBlocks(const MCSubtargetInfo *STI,
}
} // end namespace IsaInfo
-void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
+void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &KernelCode,
const MCSubtargetInfo *STI) {
IsaVersion Version = getIsaVersion(STI->getCPU());
-
- memset(&Header, 0, sizeof(Header));
-
- Header.amd_kernel_code_version_major = 1;
- Header.amd_kernel_code_version_minor = 2;
- Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
- Header.amd_machine_version_major = Version.Major;
- Header.amd_machine_version_minor = Version.Minor;
- Header.amd_machine_version_stepping = Version.Stepping;
- Header.kernel_code_entry_byte_offset = sizeof(Header);
- Header.wavefront_size = 6;
+ KernelCode.amd_kernel_code_version_major = 1;
+ KernelCode.amd_kernel_code_version_minor = 2;
+ KernelCode.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
+ KernelCode.amd_machine_version_major = Version.Major;
+ KernelCode.amd_machine_version_minor = Version.Minor;
+ KernelCode.amd_machine_version_stepping = Version.Stepping;
+ KernelCode.kernel_code_entry_byte_offset = sizeof(amd_kernel_code_t);
+ if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
+ KernelCode.wavefront_size = 5;
+ KernelCode.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32;
+ } else {
+ KernelCode.wavefront_size = 6;
+ }
// If the code object does not support indirect functions, then the value must
// be 0xffffffff.
- Header.call_convention = -1;
+ KernelCode.call_convention = -1;
// These alignment values are specified in powers of two, so alignment =
// 2^n. The minimum alignment is 2^4 = 16.
- Header.kernarg_segment_alignment = 4;
- Header.group_segment_alignment = 4;
- Header.private_segment_alignment = 4;
+ KernelCode.kernarg_segment_alignment = 4;
+ KernelCode.group_segment_alignment = 4;
+ KernelCode.private_segment_alignment = 4;
if (Version.Major >= 10) {
- if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
- Header.wavefront_size = 5;
- Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32;
- }
- Header.compute_pgm_resource_registers |=
- S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
- S_00B848_MEM_ORDERED(1);
+ KernelCode.compute_pgm_resource_registers |=
+ S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
+ S_00B848_MEM_ORDERED(1);
}
}
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index fc4147df76e3..3cfc42a7d24d 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -37,6 +37,7 @@ class raw_ostream;
namespace AMDGPU {
+struct AMDGPUMCKernelCodeT;
struct IsaVersion;
/// Generic target versions emitted by this version of LLVM.
@@ -860,7 +861,7 @@ unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc);
LLVM_READONLY
unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc);
-void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
+void initDefaultAMDKernelCodeT(AMDGPUMCKernelCodeT &Header,
const MCSubtargetInfo *STI);
bool isGroupSegment(const GlobalValue *GV);
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTInfo.h
index 95ad3f35d18f..75cb6cffbd51 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTInfo.h
@@ -12,34 +12,51 @@
//
//===----------------------------------------------------------------------===//
-#define QNAME(name) amd_kernel_code_t::name
+#define QNAME(name) AMDGPUMCKernelCodeT::name
#define FLD_T(name) decltype(QNAME(name)), &QNAME(name)
-#define FIELD2(sname, aname, name) \
- RECORD(sname, aname, printField<FLD_T(name)>, parseField<FLD_T(name)>)
+#ifndef PRINTFIELD
+#define PRINTFIELD(sname, aname, name) printField<FLD_T(name)>
+#endif
-#define FIELD(name) FIELD2(name, name, name)
+#ifndef FIELD2
+#define FIELD2(sname, aname, name) \
+ RECORD(sname, aname, PRINTFIELD(sname, aname, name), parseField<FLD_T(name)>)
+#endif
+#ifndef FIELD
+#define FIELD(name) FIELD2(name, name, name)
+#endif
+#ifndef PRINTCODEPROP
#define PRINTCODEPROP(name) \
printBitField<FLD_T(code_properties),\
AMD_CODE_PROPERTY_##name##_SHIFT,\
AMD_CODE_PROPERTY_##name##_WIDTH>
+#endif
+#ifndef PARSECODEPROP
#define PARSECODEPROP(name) \
parseBitField<FLD_T(code_properties),\
AMD_CODE_PROPERTY_##name##_SHIFT,\
AMD_CODE_PROPERTY_##name##_WIDTH>
+#endif
+#ifndef CODEPROP
#define CODEPROP(name, shift) \
RECORD(name, name, PRINTCODEPROP(shift), PARSECODEPROP(shift))
+#endif
// have to define these lambdas because of Set/GetMacro
+#ifndef PRINTCOMP
#define PRINTCOMP(GetMacro, Shift) \
[](StringRef Name, const amd_kernel_code_t &C, raw_ostream &OS) { \
printName(OS, Name) << \
(int)GetMacro(C.compute_pgm_resource_registers >> Shift); \
}
+#endif
+
+#ifndef PARSECOMP
#define PARSECOMP(SetMacro, Shift) \
[](amd_kernel_code_t &C, MCAsmParser &MCParser, raw_ostream &Err) { \
int64_t Value = 0; \
@@ -49,15 +66,22 @@
C.compute_pgm_resource_registers |= SetMacro(Value) << Shift; \
return true; \
}
+#endif
+#ifndef COMPPGM
#define COMPPGM(name, aname, GetMacro, SetMacro, Shift) \
RECORD(name, aname, PRINTCOMP(GetMacro, Shift), PARSECOMP(SetMacro, Shift))
+#endif
+#ifndef COMPPGM1
#define COMPPGM1(name, aname, AccMacro) \
COMPPGM(name, aname, G_00B848_##AccMacro, S_00B848_##AccMacro, 0)
+#endif
+#ifndef COMPPGM2
#define COMPPGM2(name, aname, AccMacro) \
COMPPGM(name, aname, G_00B84C_##AccMacro, S_00B84C_##AccMacro, 32)
+#endif
///////////////////////////////////////////////////////////////////////////////
// Begin of the table
@@ -143,13 +167,14 @@ FIELD(runtime_loader_kernel_symbol)
#undef QNAME
#undef FLD_T
+#undef PRINTFIELD
#undef FIELD2
#undef FIELD
#undef PRINTCODEPROP
#undef PARSECODEPROP
#undef CODEPROP
#undef PRINTCOMP
-#undef PAPSECOMP
+#undef PARSECOMP
#undef COMPPGM
#undef COMPPGM1
#undef COMPPGM2
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp
index 6bbc8c315718..eaee1a2a9739 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp
@@ -6,44 +6,205 @@
//
//===----------------------------------------------------------------------===//
//
-/// \file - utility functions to parse/print amd_kernel_code_t structure
+/// \file - utility functions to parse/print AMDGPUMCKernelCodeT structure
//
//===----------------------------------------------------------------------===//
#include "AMDKernelCodeTUtils.h"
#include "AMDKernelCodeT.h"
#include "SIDefines.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/StringMap.h"
+#include "Utils/AMDGPUBaseInfo.h"
+#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+using namespace llvm::AMDGPU;
-static ArrayRef<StringRef> get_amd_kernel_code_t_FldNames() {
- static StringRef const Table[] = {
- "", // not found placeholder
+// Generates the following for AMDGPUMCKernelCodeT struct members:
+// - HasMemberXXXXX class
+// A check to see if AMDGPUMCKernelCodeT has a specific member so it can
+// determine which of the original amd_kernel_code_t members are duplicated
+// (if the names don't match, the table driven strategy won't work).
+// - IsMCExprXXXXX class
+// Check whether a AMDGPUMCKernelcodeT struct member is MCExpr-ified or not.
+// - GetMemberXXXXX class
+// A retrieval helper for said member (of type const MCExpr *&). Will return
+// a `Phony` const MCExpr * initialized to nullptr to preserve reference
+// returns.
+#define GEN_HAS_MEMBER(member) \
+ class HasMember##member { \
+ private: \
+ struct KnownWithMember { \
+ int member; \
+ }; \
+ class AmbiguousDerived : public AMDGPUMCKernelCodeT, \
+ public KnownWithMember {}; \
+ template <typename U> \
+ static constexpr std::false_type Test(decltype(U::member) *); \
+ template <typename U> static constexpr std::true_type Test(...); \
+ \
+ public: \
+ static constexpr bool RESULT = \
+ std::is_same_v<decltype(Test<AmbiguousDerived>(nullptr)), \
+ std::true_type>; \
+ }; \
+ class IsMCExpr##member { \
+ template <typename U, \
+ typename std::enable_if_t< \
+ HasMember##member::RESULT && \
+ std::is_same_v<decltype(U::member), const MCExpr *>, \
+ U> * = nullptr> \
+ static constexpr std::true_type HasMCExprType(decltype(U::member) *); \
+ template <typename U> static constexpr std::false_type HasMCExprType(...); \
+ \
+ public: \
+ static constexpr bool RESULT = \
+ std::is_same_v<decltype(HasMCExprType<AMDGPUMCKernelCodeT>(nullptr)), \
+ std::true_type>; \
+ }; \
+ class GetMember##member { \
+ public: \
+ static const MCExpr *Phony; \
+ template <typename U, typename std::enable_if_t<IsMCExpr##member::RESULT, \
+ U> * = nullptr> \
+ static const MCExpr *&Get(U &C) { \
+ assert(IsMCExpr##member::RESULT && \
+ "Trying to retrieve member that does not exist."); \
+ return C.member; \
+ } \
+ template <typename U, typename std::enable_if_t<!IsMCExpr##member::RESULT, \
+ U> * = nullptr> \
+ static const MCExpr *&Get(U &C) { \
+ return Phony; \
+ } \
+ }; \
+ const MCExpr *GetMember##member::Phony = nullptr;
+
+// Cannot generate class declarations using the table driver approach (see table
+// in AMDKernelCodeTInfo.h). Luckily, if any are missing here or eventually
+// added to the table, an error should occur when trying to retrieve the table
+// in getMCExprIndexTable.
+GEN_HAS_MEMBER(amd_code_version_major)
+GEN_HAS_MEMBER(amd_code_version_minor)
+GEN_HAS_MEMBER(amd_machine_kind)
+GEN_HAS_MEMBER(amd_machine_version_major)
+GEN_HAS_MEMBER(amd_machine_version_minor)
+GEN_HAS_MEMBER(amd_machine_version_stepping)
+
+GEN_HAS_MEMBER(kernel_code_entry_byte_offset)
+GEN_HAS_MEMBER(kernel_code_prefetch_byte_size)
+
+GEN_HAS_MEMBER(granulated_workitem_vgpr_count)
+GEN_HAS_MEMBER(granulated_wavefront_sgpr_count)
+GEN_HAS_MEMBER(priority)
+GEN_HAS_MEMBER(float_mode)
+GEN_HAS_MEMBER(priv)
+GEN_HAS_MEMBER(enable_dx10_clamp)
+GEN_HAS_MEMBER(debug_mode)
+GEN_HAS_MEMBER(enable_ieee_mode)
+GEN_HAS_MEMBER(enable_wgp_mode)
+GEN_HAS_MEMBER(enable_mem_ordered)
+GEN_HAS_MEMBER(enable_fwd_progress)
+
+GEN_HAS_MEMBER(enable_sgpr_private_segment_wave_byte_offset)
+GEN_HAS_MEMBER(user_sgpr_count)
+GEN_HAS_MEMBER(enable_trap_handler)
+GEN_HAS_MEMBER(enable_sgpr_workgroup_id_x)
+GEN_HAS_MEMBER(enable_sgpr_workgroup_id_y)
+GEN_HAS_MEMBER(enable_sgpr_workgroup_id_z)
+GEN_HAS_MEMBER(enable_sgpr_workgroup_info)
+GEN_HAS_MEMBER(enable_vgpr_workitem_id)
+GEN_HAS_MEMBER(enable_exception_msb)
+GEN_HAS_MEMBER(granulated_lds_size)
+GEN_HAS_MEMBER(enable_exception)
+
+GEN_HAS_MEMBER(enable_sgpr_private_segment_buffer)
+GEN_HAS_MEMBER(enable_sgpr_dispatch_ptr)
+GEN_HAS_MEMBER(enable_sgpr_queue_ptr)
+GEN_HAS_MEMBER(enable_sgpr_kernarg_segment_ptr)
+GEN_HAS_MEMBER(enable_sgpr_dispatch_id)
+GEN_HAS_MEMBER(enable_sgpr_flat_scratch_init)
+GEN_HAS_MEMBER(enable_sgpr_private_segment_size)
+GEN_HAS_MEMBER(enable_sgpr_grid_workgroup_count_x)
+GEN_HAS_MEMBER(enable_sgpr_grid_workgroup_count_y)
+GEN_HAS_MEMBER(enable_sgpr_grid_workgroup_count_z)
+GEN_HAS_MEMBER(enable_wavefront_size32)
+GEN_HAS_MEMBER(enable_ordered_append_gds)
+GEN_HAS_MEMBER(private_element_size)
+GEN_HAS_MEMBER(is_ptr64)
+GEN_HAS_MEMBER(is_dynamic_callstack)
+GEN_HAS_MEMBER(is_debug_enabled)
+GEN_HAS_MEMBER(is_xnack_enabled)
+
+GEN_HAS_MEMBER(workitem_private_segment_byte_size)
+GEN_HAS_MEMBER(workgroup_group_segment_byte_size)
+GEN_HAS_MEMBER(gds_segment_byte_size)
+GEN_HAS_MEMBER(kernarg_segment_byte_size)
+GEN_HAS_MEMBER(workgroup_fbarrier_count)
+GEN_HAS_MEMBER(wavefront_sgpr_count)
+GEN_HAS_MEMBER(workitem_vgpr_count)
+GEN_HAS_MEMBER(reserved_vgpr_first)
+GEN_HAS_MEMBER(reserved_vgpr_count)
+GEN_HAS_MEMBER(reserved_sgpr_first)
+GEN_HAS_MEMBER(reserved_sgpr_count)
+GEN_HAS_MEMBER(debug_wavefront_private_segment_offset_sgpr)
+GEN_HAS_MEMBER(debug_private_segment_buffer_sgpr)
+GEN_HAS_MEMBER(kernarg_segment_alignment)
+GEN_HAS_MEMBER(group_segment_alignment)
+GEN_HAS_MEMBER(private_segment_alignment)
+GEN_HAS_MEMBER(wavefront_size)
+GEN_HAS_MEMBER(call_convention)
+GEN_HAS_MEMBER(runtime_loader_kernel_symbol)
+
+static ArrayRef<StringLiteral> get_amd_kernel_code_t_FldNames() {
+ static constexpr StringLiteral const Table[] = {
+ "", // not found placeholder
#define RECORD(name, altName, print, parse) #name
-#include "AMDKernelCodeTInfo.h"
+#include "Utils/AMDKernelCodeTInfo.h"
#undef RECORD
};
return ArrayRef(Table);
}
-static ArrayRef<StringRef> get_amd_kernel_code_t_FldAltNames() {
- static StringRef const Table[] = {
- "", // not found placeholder
+static ArrayRef<StringLiteral> get_amd_kernel_code_t_FldAltNames() {
+ static constexpr StringLiteral const Table[] = {
+ "", // not found placeholder
#define RECORD(name, altName, print, parse) #altName
-#include "AMDKernelCodeTInfo.h"
+#include "Utils/AMDKernelCodeTInfo.h"
+#undef RECORD
+ };
+ return ArrayRef(Table);
+}
+
+static ArrayRef<bool> hasMCExprVersionTable() {
+ static bool const Table[] = {
+#define RECORD(name, altName, print, parse) (IsMCExpr##name::RESULT)
+#include "Utils/AMDKernelCodeTInfo.h"
#undef RECORD
};
return ArrayRef(Table);
}
-static StringMap<int> createIndexMap(const ArrayRef<StringRef> &names,
- const ArrayRef<StringRef> &altNames) {
+using RetrieveFx = const MCExpr *&(*)(AMDGPUMCKernelCodeT &);
+
+static ArrayRef<RetrieveFx> getMCExprIndexTable() {
+ static const RetrieveFx Table[] = {
+#define RECORD(name, altName, print, parse) GetMember##name::Get
+#include "Utils/AMDKernelCodeTInfo.h"
+#undef RECORD
+ };
+ return ArrayRef(Table);
+}
+
+static StringMap<int> createIndexMap(ArrayRef<StringLiteral> names,
+ ArrayRef<StringLiteral> altNames) {
StringMap<int> map;
assert(names.size() == altNames.size());
for (unsigned i = 0; i < names.size(); ++i) {
@@ -59,62 +220,111 @@ static int get_amd_kernel_code_t_FieldIndex(StringRef name) {
return map.lookup(name) - 1; // returns -1 if not found
}
-static StringRef get_amd_kernel_code_t_FieldName(int index) {
- return get_amd_kernel_code_t_FldNames()[index + 1];
-}
+static constexpr std::pair<unsigned, unsigned> getShiftMask(unsigned Value) {
+ unsigned Shift = 0;
+ unsigned Mask = 0;
-// Field printing
+ Mask = ~Value;
+ for (; !(Mask & 1); Shift++, Mask >>= 1) {
+ }
-static raw_ostream &printName(raw_ostream &OS, StringRef Name) {
- return OS << Name << " = ";
+ return std::make_pair(Shift, Mask);
}
-template <typename T, T amd_kernel_code_t::*ptr>
-static void printField(StringRef Name, const amd_kernel_code_t &C,
- raw_ostream &OS) {
- printName(OS, Name) << (int)(C.*ptr);
+static const MCExpr *MaskShiftSet(const MCExpr *Val, uint32_t Mask,
+ uint32_t Shift, MCContext &Ctx) {
+ if (Mask) {
+ const MCExpr *MaskExpr = MCConstantExpr::create(Mask, Ctx);
+ Val = MCBinaryExpr::createAnd(Val, MaskExpr, Ctx);
+ }
+ if (Shift) {
+ const MCExpr *ShiftExpr = MCConstantExpr::create(Shift, Ctx);
+ Val = MCBinaryExpr::createShl(Val, ShiftExpr, Ctx);
+ }
+ return Val;
}
-template <typename T, T amd_kernel_code_t::*ptr, int shift, int width = 1>
-static void printBitField(StringRef Name, const amd_kernel_code_t &c,
- raw_ostream &OS) {
+static const MCExpr *MaskShiftGet(const MCExpr *Val, uint32_t Mask,
+ uint32_t Shift, MCContext &Ctx) {
+ if (Shift) {
+ const MCExpr *ShiftExpr = MCConstantExpr::create(Shift, Ctx);
+ Val = MCBinaryExpr::createLShr(Val, ShiftExpr, Ctx);
+ }
+ if (Mask) {
+ const MCExpr *MaskExpr = MCConstantExpr::create(Mask, Ctx);
+ Val = MCBinaryExpr::createAnd(Val, MaskExpr, Ctx);
+ }
+ return Val;
+}
+
+class PrintField {
+public:
+ template <typename T, T AMDGPUMCKernelCodeT::*ptr,
+ typename std::enable_if_t<!std::is_integral_v<T>, T> * = nullptr>
+ static void printField(StringRef Name, const AMDGPUMCKernelCodeT &C,
+ raw_ostream &OS, MCContext &Ctx) {
+ OS << Name << " = ";
+ const MCExpr *Value = C.*ptr;
+ int64_t Val;
+ if (Value->evaluateAsAbsolute(Val))
+ OS << Val;
+ else
+ Value->print(OS, Ctx.getAsmInfo());
+ }
+
+ template <typename T, T AMDGPUMCKernelCodeT::*ptr,
+ typename std::enable_if_t<std::is_integral_v<T>, T> * = nullptr>
+ static void printField(StringRef Name, const AMDGPUMCKernelCodeT &C,
+ raw_ostream &OS, MCContext &) {
+ OS << Name << " = " << (int)(C.*ptr);
+ }
+};
+
+template <typename T, T AMDGPUMCKernelCodeT::*ptr, int shift, int width = 1>
+static void printBitField(StringRef Name, const AMDGPUMCKernelCodeT &C,
+ raw_ostream &OS, MCContext &) {
const auto Mask = (static_cast<T>(1) << width) - 1;
- printName(OS, Name) << (int)((c.*ptr >> shift) & Mask);
+ OS << Name << " = " << (int)((C.*ptr >> shift) & Mask);
}
-using PrintFx = void(*)(StringRef, const amd_kernel_code_t &, raw_ostream &);
+using PrintFx = void (*)(StringRef, const AMDGPUMCKernelCodeT &, raw_ostream &,
+ MCContext &);
static ArrayRef<PrintFx> getPrinterTable() {
static const PrintFx Table[] = {
+#define COMPPGM1(name, aname, AccMacro) \
+ COMPPGM(name, aname, C_00B848_##AccMacro, S_00B848_##AccMacro, 0)
+#define COMPPGM2(name, aname, AccMacro) \
+ COMPPGM(name, aname, C_00B84C_##AccMacro, S_00B84C_##AccMacro, 32)
+#define PRINTFIELD(sname, aname, name) PrintField::printField<FLD_T(name)>
+#define PRINTCOMP(Complement, PGMType) \
+ [](StringRef Name, const AMDGPUMCKernelCodeT &C, raw_ostream &OS, \
+ MCContext &Ctx) { \
+ OS << Name << " = "; \
+ auto [Shift, Mask] = getShiftMask(Complement); \
+ const MCExpr *Value; \
+ if (PGMType == 0) { \
+ Value = \
+ MaskShiftGet(C.compute_pgm_resource1_registers, Mask, Shift, Ctx); \
+ } else { \
+ Value = \
+ MaskShiftGet(C.compute_pgm_resource2_registers, Mask, Shift, Ctx); \
+ } \
+ int64_t Val; \
+ if (Value->evaluateAsAbsolute(Val)) \
+ OS << Val; \
+ else \
+ Value->print(OS, Ctx.getAsmInfo()); \
+ }
#define RECORD(name, altName, print, parse) print
-#include "AMDKernelCodeTInfo.h"
+#include "Utils/AMDKernelCodeTInfo.h"
#undef RECORD
};
return ArrayRef(Table);
}
-void llvm::printAmdKernelCodeField(const amd_kernel_code_t &C,
- int FldIndex,
- raw_ostream &OS) {
- auto Printer = getPrinterTable()[FldIndex];
- if (Printer)
- Printer(get_amd_kernel_code_t_FieldName(FldIndex), C, OS);
-}
-
-void llvm::dumpAmdKernelCode(const amd_kernel_code_t *C,
- raw_ostream &OS,
- const char *tab) {
- const int Size = getPrinterTable().size();
- for (int i = 0; i < Size; ++i) {
- OS << tab;
- printAmdKernelCodeField(*C, i, OS);
- OS << '\n';
- }
-}
-
-// Field parsing
-
-static bool expectAbsExpression(MCAsmParser &MCParser, int64_t &Value, raw_ostream& Err) {
+static bool expectAbsExpression(MCAsmParser &MCParser, int64_t &Value,
+ raw_ostream &Err) {
if (MCParser.getLexer().isNot(AsmToken::Equal)) {
Err << "expected '='";
@@ -129,8 +339,8 @@ static bool expectAbsExpression(MCAsmParser &MCParser, int64_t &Value, raw_ostre
return true;
}
-template <typename T, T amd_kernel_code_t::*ptr>
-static bool parseField(amd_kernel_code_t &C, MCAsmParser &MCParser,
+template <typename T, T AMDGPUMCKernelCodeT::*ptr>
+static bool parseField(AMDGPUMCKernelCodeT &C, MCAsmParser &MCParser,
raw_ostream &Err) {
int64_t Value = 0;
if (!expectAbsExpression(MCParser, Value, Err))
@@ -139,39 +349,241 @@ static bool parseField(amd_kernel_code_t &C, MCAsmParser &MCParser,
return true;
}
-template <typename T, T amd_kernel_code_t::*ptr, int shift, int width = 1>
-static bool parseBitField(amd_kernel_code_t &C, MCAsmParser &MCParser,
+template <typename T, T AMDGPUMCKernelCodeT::*ptr, int shift, int width = 1>
+static bool parseBitField(AMDGPUMCKernelCodeT &C, MCAsmParser &MCParser,
raw_ostream &Err) {
int64_t Value = 0;
if (!expectAbsExpression(MCParser, Value, Err))
return false;
- const uint64_t Mask = ((UINT64_C(1) << width) - 1) << shift;
+ const uint64_t Mask = ((UINT64_C(1) << width) - 1) << shift;
C.*ptr &= (T)~Mask;
C.*ptr |= (T)((Value << shift) & Mask);
return true;
}
-using ParseFx = bool(*)(amd_kernel_code_t &, MCAsmParser &MCParser,
- raw_ostream &Err);
+static bool parseExpr(MCAsmParser &MCParser, const MCExpr *&Value,
+ raw_ostream &Err) {
+ if (MCParser.getLexer().isNot(AsmToken::Equal)) {
+ Err << "expected '='";
+ return false;
+ }
+ MCParser.getLexer().Lex();
+
+ if (MCParser.parseExpression(Value)) {
+ Err << "Could not parse expression";
+ return false;
+ }
+ return true;
+}
+
+using ParseFx = bool (*)(AMDGPUMCKernelCodeT &, MCAsmParser &, raw_ostream &);
static ArrayRef<ParseFx> getParserTable() {
static const ParseFx Table[] = {
+#define COMPPGM1(name, aname, AccMacro) \
+ COMPPGM(name, aname, G_00B848_##AccMacro, C_00B848_##AccMacro, 0)
+#define COMPPGM2(name, aname, AccMacro) \
+ COMPPGM(name, aname, G_00B84C_##AccMacro, C_00B84C_##AccMacro, 32)
+#define PARSECOMP(Complement, PGMType) \
+ [](AMDGPUMCKernelCodeT &C, MCAsmParser &MCParser, \
+ raw_ostream &Err) -> bool { \
+ MCContext &Ctx = MCParser.getContext(); \
+ const MCExpr *Value; \
+ if (!parseExpr(MCParser, Value, Err)) \
+ return false; \
+ auto [Shift, Mask] = getShiftMask(Complement); \
+ Value = MaskShiftSet(Value, Mask, Shift, Ctx); \
+ const MCExpr *Compl = MCConstantExpr::create(Complement, Ctx); \
+ if (PGMType == 0) { \
+ C.compute_pgm_resource1_registers = MCBinaryExpr::createAnd( \
+ C.compute_pgm_resource1_registers, Compl, Ctx); \
+ C.compute_pgm_resource1_registers = MCBinaryExpr::createOr( \
+ C.compute_pgm_resource1_registers, Value, Ctx); \
+ } else { \
+ C.compute_pgm_resource2_registers = MCBinaryExpr::createAnd( \
+ C.compute_pgm_resource2_registers, Compl, Ctx); \
+ C.compute_pgm_resource2_registers = MCBinaryExpr::createOr( \
+ C.compute_pgm_resource2_registers, Value, Ctx); \
+ } \
+ return true; \
+ }
#define RECORD(name, altName, print, parse) parse
-#include "AMDKernelCodeTInfo.h"
+#include "Utils/AMDKernelCodeTInfo.h"
#undef RECORD
};
return ArrayRef(Table);
}
-bool llvm::parseAmdKernelCodeField(StringRef ID,
- MCAsmParser &MCParser,
- amd_kernel_code_t &C,
- raw_ostream &Err) {
+static void printAmdKernelCodeField(const AMDGPUMCKernelCodeT &C, int FldIndex,
+ raw_ostream &OS, MCContext &Ctx) {
+ auto Printer = getPrinterTable()[FldIndex];
+ if (Printer)
+ Printer(get_amd_kernel_code_t_FldNames()[FldIndex + 1], C, OS, Ctx);
+}
+
+void AMDGPUMCKernelCodeT::initDefault(const MCSubtargetInfo *STI,
+ MCContext &Ctx, bool InitMCExpr) {
+ AMDGPUMCKernelCodeT();
+
+ AMDGPU::initDefaultAMDKernelCodeT(*this, STI);
+
+ if (InitMCExpr) {
+ const MCExpr *ZeroExpr = MCConstantExpr::create(0, Ctx);
+ compute_pgm_resource1_registers =
+ MCConstantExpr::create(Lo_32(compute_pgm_resource_registers), Ctx);
+ compute_pgm_resource2_registers =
+ MCConstantExpr::create(Hi_32(compute_pgm_resource_registers), Ctx);
+ is_dynamic_callstack = ZeroExpr;
+ wavefront_sgpr_count = ZeroExpr;
+ workitem_vgpr_count = ZeroExpr;
+ workitem_private_segment_byte_size = ZeroExpr;
+ }
+}
+
+void AMDGPUMCKernelCodeT::validate(const MCSubtargetInfo *STI, MCContext &Ctx) {
+ int64_t Value;
+ if (!compute_pgm_resource1_registers->evaluateAsAbsolute(Value))
+ return;
+
+ if (G_00B848_DX10_CLAMP(Value) && AMDGPU::isGFX12Plus(*STI)) {
+ Ctx.reportError({}, "enable_dx10_clamp=1 is not allowed on GFX12+");
+ return;
+ }
+
+ if (G_00B848_IEEE_MODE(Value) && AMDGPU::isGFX12Plus(*STI)) {
+ Ctx.reportError({}, "enable_ieee_mode=1 is not allowed on GFX12+");
+ return;
+ }
+
+ if (G_00B848_WGP_MODE(Value) && !AMDGPU::isGFX10Plus(*STI)) {
+ Ctx.reportError({}, "enable_wgp_mode=1 is only allowed on GFX10+");
+ return;
+ }
+
+ if (G_00B848_MEM_ORDERED(Value) && !AMDGPU::isGFX10Plus(*STI)) {
+ Ctx.reportError({}, "enable_mem_ordered=1 is only allowed on GFX10+");
+ return;
+ }
+
+ if (G_00B848_FWD_PROGRESS(Value) && !AMDGPU::isGFX10Plus(*STI)) {
+ Ctx.reportError({}, "enable_fwd_progress=1 is only allowed on GFX10+");
+ return;
+ }
+}
+
+const MCExpr *&AMDGPUMCKernelCodeT::getMCExprForIndex(int Index) {
+ static const auto IndexTable = getMCExprIndexTable();
+ return IndexTable[Index](*this);
+}
+
+bool AMDGPUMCKernelCodeT::ParseKernelCodeT(StringRef ID, MCAsmParser &MCParser,
+ raw_ostream &Err) {
const int Idx = get_amd_kernel_code_t_FieldIndex(ID);
if (Idx < 0) {
Err << "unexpected amd_kernel_code_t field name " << ID;
return false;
}
+
+ if (hasMCExprVersionTable()[Idx]) {
+ const MCExpr *Value;
+ if (!parseExpr(MCParser, Value, Err))
+ return false;
+ getMCExprForIndex(Idx) = Value;
+ return true;
+ }
auto Parser = getParserTable()[Idx];
- return Parser ? Parser(C, MCParser, Err) : false;
+ return Parser ? Parser(*this, MCParser, Err) : false;
+}
+
+void AMDGPUMCKernelCodeT::EmitKernelCodeT(raw_ostream &OS, MCContext &Ctx) {
+ const int Size = hasMCExprVersionTable().size();
+ for (int i = 0; i < Size; ++i) {
+ OS << "\t\t";
+ if (hasMCExprVersionTable()[i]) {
+ OS << get_amd_kernel_code_t_FldNames()[i + 1] << " = ";
+ int64_t Val;
+ const MCExpr *Value = getMCExprForIndex(i);
+ if (Value->evaluateAsAbsolute(Val))
+ OS << Val;
+ else
+ Value->print(OS, Ctx.getAsmInfo());
+ } else {
+ printAmdKernelCodeField(*this, i, OS, Ctx);
+ }
+ OS << '\n';
+ }
+}
+
+void AMDGPUMCKernelCodeT::EmitKernelCodeT(MCStreamer &OS, MCContext &Ctx) {
+ OS.emitIntValue(amd_kernel_code_version_major, /*Size=*/4);
+ OS.emitIntValue(amd_kernel_code_version_minor, /*Size=*/4);
+ OS.emitIntValue(amd_machine_kind, /*Size=*/2);
+ OS.emitIntValue(amd_machine_version_major, /*Size=*/2);
+ OS.emitIntValue(amd_machine_version_minor, /*Size=*/2);
+ OS.emitIntValue(amd_machine_version_stepping, /*Size=*/2);
+ OS.emitIntValue(kernel_code_entry_byte_offset, /*Size=*/8);
+ OS.emitIntValue(kernel_code_prefetch_byte_offset, /*Size=*/8);
+ OS.emitIntValue(kernel_code_prefetch_byte_size, /*Size=*/8);
+ OS.emitIntValue(reserved0, /*Size=*/8);
+
+ if (compute_pgm_resource1_registers != nullptr)
+ OS.emitValue(compute_pgm_resource1_registers, /*Size=*/4);
+ else
+ OS.emitIntValue(Lo_32(compute_pgm_resource_registers),
+ /*Size=*/4);
+
+ if (compute_pgm_resource2_registers != nullptr)
+ OS.emitValue(compute_pgm_resource2_registers, /*Size=*/4);
+ else
+ OS.emitIntValue(Hi_32(compute_pgm_resource_registers),
+ /*Size=*/4);
+
+ if (is_dynamic_callstack != nullptr) {
+ const MCExpr *CodeProps = MCConstantExpr::create(code_properties, Ctx);
+ CodeProps = MCBinaryExpr::createOr(
+ CodeProps,
+ MaskShiftSet(is_dynamic_callstack,
+ (1 << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_WIDTH) - 1,
+ AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT, Ctx),
+ Ctx);
+ OS.emitValue(CodeProps, /*Size=*/4);
+ } else
+ OS.emitIntValue(code_properties, /*Size=*/4);
+
+ if (workitem_private_segment_byte_size != nullptr)
+ OS.emitValue(workitem_private_segment_byte_size, /*Size=*/4);
+ else
+ OS.emitIntValue(0, /*Size=*/4);
+
+ OS.emitIntValue(workgroup_group_segment_byte_size, /*Size=*/4);
+ OS.emitIntValue(gds_segment_byte_size, /*Size=*/4);
+ OS.emitIntValue(kernarg_segment_byte_size, /*Size=*/8);
+ OS.emitIntValue(workgroup_fbarrier_count, /*Size=*/4);
+
+ if (wavefront_sgpr_count != nullptr)
+ OS.emitValue(wavefront_sgpr_count, /*Size=*/2);
+ else
+ OS.emitIntValue(0, /*Size=*/2);
+
+ if (workitem_vgpr_count != nullptr)
+ OS.emitValue(workitem_vgpr_count, /*Size=*/2);
+ else
+ OS.emitIntValue(0, /*Size=*/2);
+
+ OS.emitIntValue(reserved_vgpr_first, /*Size=*/2);
+ OS.emitIntValue(reserved_vgpr_count, /*Size=*/2);
+ OS.emitIntValue(reserved_sgpr_first, /*Size=*/2);
+ OS.emitIntValue(reserved_sgpr_count, /*Size=*/2);
+ OS.emitIntValue(debug_wavefront_private_segment_offset_sgpr,
+ /*Size=*/2);
+ OS.emitIntValue(debug_private_segment_buffer_sgpr, /*Size=*/2);
+ OS.emitIntValue(kernarg_segment_alignment, /*Size=*/1);
+ OS.emitIntValue(group_segment_alignment, /*Size=*/1);
+ OS.emitIntValue(private_segment_alignment, /*Size=*/1);
+ OS.emitIntValue(wavefront_size, /*Size=*/1);
+
+ OS.emitIntValue(call_convention, /*Size=*/4);
+ OS.emitBytes(StringRef((const char *)reserved3, /*Size=*/12));
+ OS.emitIntValue(runtime_loader_kernel_symbol, /*Size=*/8);
+ OS.emitBytes(StringRef((const char *)control_directives, /*Size=*/16 * 8));
}
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.h b/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.h
index 41d0e0d745e5..6aeb98f1ce14 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.h
@@ -7,29 +7,84 @@
//===----------------------------------------------------------------------===//
//
/// \file AMDKernelCodeTUtils.h
+/// MC layer struct for AMDGPUMCKernelCodeT, provides MCExpr functionality where
+/// required.
+///
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_AMDGPU_UTILS_AMDKERNELCODETUTILS_H
-#define LLVM_LIB_TARGET_AMDGPU_UTILS_AMDKERNELCODETUTILS_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCKERNELCODET_H
+#define LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCKERNELCODET_H
-struct amd_kernel_code_t;
+#include "AMDKernelCodeT.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
namespace llvm {
-
class MCAsmParser;
+class MCContext;
+class MCExpr;
+class MCStreamer;
+class MCSubtargetInfo;
class raw_ostream;
-class StringRef;
+namespace AMDGPU {
+
+struct AMDGPUMCKernelCodeT {
+ AMDGPUMCKernelCodeT() = default;
+
+ // Names of most (if not all) members should match the ones used for table
+ // driven (array) generation in AMDKernelCodeTInfo.h.
+ uint32_t amd_kernel_code_version_major = 0;
+ uint32_t amd_kernel_code_version_minor = 0;
+ uint16_t amd_machine_kind = 0;
+ uint16_t amd_machine_version_major = 0;
+ uint16_t amd_machine_version_minor = 0;
+ uint16_t amd_machine_version_stepping = 0;
+ int64_t kernel_code_entry_byte_offset = 0;
+ int64_t kernel_code_prefetch_byte_offset = 0;
+ uint64_t kernel_code_prefetch_byte_size = 0;
+ uint64_t reserved0 = 0;
+ uint64_t compute_pgm_resource_registers = 0;
+ uint32_t code_properties = 0;
+ uint32_t workgroup_group_segment_byte_size = 0;
+ uint32_t gds_segment_byte_size = 0;
+ uint64_t kernarg_segment_byte_size = 0;
+ uint32_t workgroup_fbarrier_count = 0;
+ uint16_t reserved_vgpr_first = 0;
+ uint16_t reserved_vgpr_count = 0;
+ uint16_t reserved_sgpr_first = 0;
+ uint16_t reserved_sgpr_count = 0;
+ uint16_t debug_wavefront_private_segment_offset_sgpr = 0;
+ uint16_t debug_private_segment_buffer_sgpr = 0;
+ uint8_t kernarg_segment_alignment = 0;
+ uint8_t group_segment_alignment = 0;
+ uint8_t private_segment_alignment = 0;
+ uint8_t wavefront_size = 0;
+ int32_t call_convention = 0;
+ uint8_t reserved3[12] = {0};
+ uint64_t runtime_loader_kernel_symbol = 0;
+ uint64_t control_directives[16] = {0};
+
+ const MCExpr *compute_pgm_resource1_registers = nullptr;
+ const MCExpr *compute_pgm_resource2_registers = nullptr;
+
+ const MCExpr *is_dynamic_callstack = nullptr;
+ const MCExpr *wavefront_sgpr_count = nullptr;
+ const MCExpr *workitem_vgpr_count = nullptr;
+ const MCExpr *workitem_private_segment_byte_size = nullptr;
-void printAmdKernelCodeField(const amd_kernel_code_t &C, int FldIndex,
- raw_ostream &OS);
+ void initDefault(const MCSubtargetInfo *STI, MCContext &Ctx,
+ bool InitMCExpr = true);
+ void validate(const MCSubtargetInfo *STI, MCContext &Ctx);
-void dumpAmdKernelCode(const amd_kernel_code_t *C, raw_ostream &OS,
- const char *tab);
+ const MCExpr *&getMCExprForIndex(int Index);
-bool parseAmdKernelCodeField(StringRef ID, MCAsmParser &Parser,
- amd_kernel_code_t &C, raw_ostream &Err);
+ bool ParseKernelCodeT(StringRef ID, MCAsmParser &MCParser, raw_ostream &Err);
+ void EmitKernelCodeT(raw_ostream &OS, MCContext &Ctx);
+ void EmitKernelCodeT(MCStreamer &OS, MCContext &Ctx);
+};
+} // end namespace AMDGPU
} // end namespace llvm
-#endif // LLVM_LIB_TARGET_AMDGPU_UTILS_AMDKERNELCODETUTILS_H
+#endif // LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCKERNELCODET_H
diff --git a/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt b/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt
index 19d3b690b131..2f4ce8eaf1d6 100644
--- a/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt
@@ -11,6 +11,7 @@ add_llvm_component_library(LLVMAMDGPUUtils
CodeGenTypes
Core
MC
+ MCParser
Support
TargetParser
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index b875ddc62a7a..586a4a74ec34 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -1431,38 +1431,12 @@ defm V_CVT_F32_BF8 : VOP1_Real_NoDstSel_SDWA_gfx9<0x55>;
defm V_CVT_PK_F32_FP8 : VOP1_Real_NoDstSel_SDWA_gfx9<0x56>;
defm V_CVT_PK_F32_BF8 : VOP1_Real_NoDstSel_SDWA_gfx9<0x57>;
-//===----------------------------------------------------------------------===//
-// GFX10
-//===----------------------------------------------------------------------===//
-
-let OtherPredicates = [isGFX10Only] in {
-def : GCNPat <
+class MovDPP8Pattern<Predicate Pred, Instruction Inst> : GCNPat <
(i32 (int_amdgcn_mov_dpp8 i32:$src, timm:$dpp8)),
- (V_MOV_B32_dpp8_gfx10 VGPR_32:$src, VGPR_32:$src,
- (as_i32timm $dpp8), (i32 DPP8Mode.FI_0))
->;
-} // End OtherPredicates = [isGFX10Only]
-
-//===----------------------------------------------------------------------===//
-// GFX11
-//===----------------------------------------------------------------------===//
-
-let OtherPredicates = [isGFX11Only] in {
-def : GCNPat <
- (i32 (int_amdgcn_mov_dpp8 i32:$src, timm:$dpp8)),
- (V_MOV_B32_dpp8_gfx11 VGPR_32:$src, VGPR_32:$src,
- (as_i32timm $dpp8), (i32 DPP8Mode.FI_0))
->;
-} // End OtherPredicates = [isGFX11Only]
-
-//===----------------------------------------------------------------------===//
-// GFX12
-//===----------------------------------------------------------------------===//
+ (Inst VGPR_32:$src, VGPR_32:$src, (as_i32timm $dpp8), (i32 DPP8Mode.FI_0))> {
+ let OtherPredicates = [Pred];
+}
-let OtherPredicates = [isGFX12Only] in {
-def : GCNPat <
- (i32 (int_amdgcn_mov_dpp8 i32:$src, timm:$dpp8)),
- (V_MOV_B32_dpp8_gfx12 VGPR_32:$src, VGPR_32:$src,
- (as_i32timm $dpp8), (i32 DPP8Mode.FI_0))
->;
-} // End OtherPredicates = [isGFX12Only]
+def : MovDPP8Pattern<isGFX10Only, V_MOV_B32_dpp8_gfx10>;
+def : MovDPP8Pattern<isGFX11Only, V_MOV_B32_dpp8_gfx11>;
+def : MovDPP8Pattern<isGFX12Only, V_MOV_B32_dpp8_gfx12>;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 73f8bda9a021..5090d8bf6cf2 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -14381,9 +14381,17 @@ static SDValue CombineANDShift(SDNode *N,
}
}
- // FIXME: Transform "(and (shl x, c2) c1)" ->
- // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than
- // c1.
+ // Transform "(and (shl x, c2) c1)" into "(shl (and x, c1>>c2), c2)"
+ // if "c1 >> c2" is a cheaper immediate than "c1"
+ if (LeftShift &&
+ HasLowerConstantMaterializationCost(C1 >> C2, C1, Subtarget)) {
+
+ SDValue And = DAG.getNode(ISD::AND, DL, MVT::i32, N0->getOperand(0),
+ DAG.getConstant(C1 >> C2, DL, MVT::i32));
+ return DAG.getNode(ISD::SHL, DL, MVT::i32, And,
+ DAG.getConstant(C2, DL, MVT::i32));
+ }
+
return SDValue();
}
diff --git a/llvm/lib/Target/BPF/BPFMIChecking.cpp b/llvm/lib/Target/BPF/BPFMIChecking.cpp
index 89ac485b1675..a968950f5bfc 100644
--- a/llvm/lib/Target/BPF/BPFMIChecking.cpp
+++ b/llvm/lib/Target/BPF/BPFMIChecking.cpp
@@ -20,6 +20,7 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
@@ -164,11 +165,9 @@ bool BPFMIPreEmitChecking::processAtomicInsts() {
if (hasLiveDefs(MI, TRI)) {
DebugLoc Empty;
const DebugLoc &DL = MI.getDebugLoc();
- if (DL != Empty)
- report_fatal_error(Twine("line ") + std::to_string(DL.getLine()) +
- ": Invalid usage of the XADD return value", false);
- else
- report_fatal_error("Invalid usage of the XADD return value", false);
+ const Function &F = MF->getFunction();
+ F.getContext().diagnose(DiagnosticInfoUnsupported{
+ F, "Invalid usage of the XADD return value", DL});
}
}
}
diff --git a/llvm/lib/Target/DirectX/DXILOpLowering.cpp b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
index f09e322f88e1..1329308ffec2 100644
--- a/llvm/lib/Target/DirectX/DXILOpLowering.cpp
+++ b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
@@ -41,7 +41,7 @@ static bool isVectorArgExpansion(Function &F) {
}
static SmallVector<Value *> populateOperands(Value *Arg, IRBuilder<> &Builder) {
- SmallVector<Value *, 4> ExtractedElements;
+ SmallVector<Value *> ExtractedElements;
auto *VecArg = dyn_cast<FixedVectorType>(Arg->getType());
for (unsigned I = 0; I < VecArg->getNumElements(); ++I) {
Value *Index = ConstantInt::get(Type::getInt32Ty(Arg->getContext()), I);
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 41462cceef51..22a88734afd4 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -732,6 +732,18 @@ SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
return DAG.getNode(HexagonISD::READCYCLE, dl, VTs, Chain);
}
+// Custom-handle ISD::READSTEADYCOUNTER because the target-independent SDNode
+// is marked as having side-effects, while the register read on Hexagon does
+// not have any. TableGen refuses to accept the direct pattern from that node
+// to the A4_tfrcpp.
+SDValue HexagonTargetLowering::LowerREADSTEADYCOUNTER(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ SDLoc dl(Op);
+ SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
+ return DAG.getNode(HexagonISD::READTIMER, dl, VTs, Chain);
+}
+
SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
@@ -1507,6 +1519,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom);
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
+ setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
@@ -1932,6 +1945,7 @@ const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0";
case HexagonISD::VROR: return "HexagonISD::VROR";
case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE";
+ case HexagonISD::READTIMER: return "HexagonISD::READTIMER";
case HexagonISD::PTRUE: return "HexagonISD::PTRUE";
case HexagonISD::PFALSE: return "HexagonISD::PFALSE";
case HexagonISD::D2P: return "HexagonISD::D2P";
@@ -3389,6 +3403,7 @@ HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
case ISD::PREFETCH: return LowerPREFETCH(Op, DAG);
case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
+ case ISD::READSTEADYCOUNTER: return LowerREADSTEADYCOUNTER(Op, DAG);
break;
}
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index 2ddbed050639..3fd961f5a746 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -77,6 +77,7 @@ enum NodeType : unsigned {
EH_RETURN,
DCFETCH,
READCYCLE,
+ READTIMER,
PTRUE,
PFALSE,
D2P, // Convert 8-byte value to 8-bit predicate register. [*]
@@ -207,6 +208,7 @@ public:
SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerREADSTEADYCOUNTER(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
SDValue
diff --git a/llvm/lib/Target/Hexagon/HexagonPatterns.td b/llvm/lib/Target/Hexagon/HexagonPatterns.td
index ea7c4acd0e83..baa552fcd220 100644
--- a/llvm/lib/Target/Hexagon/HexagonPatterns.td
+++ b/llvm/lib/Target/Hexagon/HexagonPatterns.td
@@ -3392,6 +3392,12 @@ def HexagonREADCYCLE: SDNode<"HexagonISD::READCYCLE", SDTInt64Leaf,
def: Pat<(HexagonREADCYCLE), (A4_tfrcpp UPCYCLE)>;
+// Read time counter.
+def HexagonREADTIMER: SDNode<"HexagonISD::READTIMER", SDTInt64Leaf,
+ [SDNPHasChain]>;
+
+def: Pat<(HexagonREADTIMER), (A4_tfrcpp UTIMER)>;
+
// The declared return value of the store-locked intrinsics is i32, but
// the instructions actually define i1. To avoid register copies from
// IntRegs to PredRegs and back, fold the entire pattern checking the
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index fe2c613b1b30..8a87c82a205b 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -139,6 +139,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
setOperationAction(ISD::BSWAP, MVT::i32, Custom);
+ setOperationAction({ISD::UDIV, ISD::UREM}, MVT::i32, Custom);
}
// Set operations for LA32 only.
@@ -1665,6 +1666,10 @@ static LoongArchISD::NodeType getLoongArchWOpcode(unsigned Opcode) {
switch (Opcode) {
default:
llvm_unreachable("Unexpected opcode");
+ case ISD::UDIV:
+ return LoongArchISD::DIV_WU;
+ case ISD::UREM:
+ return LoongArchISD::MOD_WU;
case ISD::SHL:
return LoongArchISD::SLL_W;
case ISD::SRA:
@@ -1841,6 +1846,12 @@ void LoongArchTargetLowering::ReplaceNodeResults(
switch (N->getOpcode()) {
default:
llvm_unreachable("Don't know how to legalize this operation");
+ case ISD::UDIV:
+ case ISD::UREM:
+ assert(VT == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ Results.push_back(customLegalizeToWOp(N, DAG, 2, ISD::SIGN_EXTEND));
+ break;
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
@@ -3445,6 +3456,8 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(BITREV_W)
NODE_NAME_CASE(ROTR_W)
NODE_NAME_CASE(ROTL_W)
+ NODE_NAME_CASE(DIV_WU)
+ NODE_NAME_CASE(MOD_WU)
NODE_NAME_CASE(CLZ_W)
NODE_NAME_CASE(CTZ_W)
NODE_NAME_CASE(DBAR)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index de3f45172e25..f274b1971fd2 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -43,6 +43,10 @@ enum NodeType : unsigned {
ROTL_W,
ROTR_W,
+ // unsigned 32-bit integer division
+ DIV_WU,
+ MOD_WU,
+
// FPR<->GPR transfer operations
MOVGR2FR_W_LA64,
MOVFR2GR_S_LA64,
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index f56f8f7e1179..35ea9f07866d 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -85,6 +85,8 @@ def loongarch_sll_w : SDNode<"LoongArchISD::SLL_W", SDT_LoongArchIntBinOpW>;
def loongarch_sra_w : SDNode<"LoongArchISD::SRA_W", SDT_LoongArchIntBinOpW>;
def loongarch_srl_w : SDNode<"LoongArchISD::SRL_W", SDT_LoongArchIntBinOpW>;
def loongarch_rotr_w : SDNode<"LoongArchISD::ROTR_W", SDT_LoongArchIntBinOpW>;
+def loongarch_div_wu : SDNode<"LoongArchISD::DIV_WU", SDT_LoongArchIntBinOpW>;
+def loongarch_mod_wu : SDNode<"LoongArchISD::MOD_WU", SDT_LoongArchIntBinOpW>;
def loongarch_crc_w_b_w
: SDNode<"LoongArchISD::CRC_W_B_W", SDT_LoongArchIntBinOpW, [SDNPHasChain]>;
def loongarch_crc_w_h_w
@@ -1110,9 +1112,13 @@ def : PatGprImm_32<add, ADDI_W, simm12>;
def : PatGprGpr<sub, SUB_D>;
def : PatGprGpr_32<sub, SUB_W>;
def : PatGprGpr<sdiv, DIV_D>;
+def : PatGprGpr_32<sdiv, DIV_W>;
def : PatGprGpr<udiv, DIV_DU>;
+def : PatGprGpr<loongarch_div_wu, DIV_WU>;
def : PatGprGpr<srem, MOD_D>;
+def : PatGprGpr_32<srem, MOD_W>;
def : PatGprGpr<urem, MOD_DU>;
+def : PatGprGpr<loongarch_mod_wu, MOD_WU>;
def : PatGprGpr<rotr, ROTR_D>;
def : PatGprGpr<loongarch_rotr_w, ROTR_W>;
def : PatGprGpr_32<rotr, ROTR_W>;
diff --git a/llvm/lib/Target/LoongArch/LoongArchSubtarget.h b/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
index b87ea6e2ec32..a8752c8070aa 100644
--- a/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
+++ b/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
@@ -37,6 +37,10 @@ class LoongArchSubtarget : public LoongArchGenSubtargetInfo {
#include "LoongArchGenSubtargetInfo.inc"
unsigned GRLen = 32;
+ // TODO: The default value is empirical and conservative. Override the
+ // default in initializeProperties once we support optimizing for more
+ // uarches.
+ uint8_t MaxInterleaveFactor = 2;
MVT GRLenVT = MVT::i32;
LoongArchABI::ABI TargetABI = LoongArchABI::ABI_Unknown;
LoongArchFrameLowering FrameLowering;
@@ -99,6 +103,7 @@ public:
Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
Align getPrefLoopAlignment() const { return PrefLoopAlignment; }
unsigned getMaxBytesForAlignment() const { return MaxBytesForAlignment; }
+ unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; }
bool enableMachineScheduler() const override { return true; }
};
} // end namespace llvm
diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
index add1c60d89d2..710650acba30 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
@@ -69,6 +69,10 @@ unsigned LoongArchTTIImpl::getRegisterClassForType(bool Vector,
return LoongArchRegisterClass::GPRRC;
}
+unsigned LoongArchTTIImpl::getMaxInterleaveFactor(ElementCount VF) {
+ return ST->getMaxInterleaveFactor();
+}
+
const char *LoongArchTTIImpl::getRegisterClassName(unsigned ClassID) const {
switch (ClassID) {
case LoongArchRegisterClass::GPRRC:
diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
index 34c18163bbdb..06a03d29931d 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
@@ -43,6 +43,7 @@ public:
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
unsigned getNumberOfRegisters(unsigned ClassID) const;
unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;
+ unsigned getMaxInterleaveFactor(ElementCount VF);
const char *getRegisterClassName(unsigned ClassID) const;
// TODO: Implement more hooks to provide TTI machinery for LoongArch.
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
index 02ab5ede2c1a..aa35e7db6bda 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
@@ -135,6 +135,15 @@ namespace MipsII {
OPERAND_LAST_MIPS_MEM_IMM = OPERAND_MEM_SIMM9
};
}
+
+inline static MCRegister getMSARegFromFReg(MCRegister Reg) {
+ if (Reg >= Mips::F0 && Reg <= Mips::F31)
+ return Reg - Mips::F0 + Mips::W0;
+ else if (Reg >= Mips::D0_64 && Reg <= Mips::D31_64)
+ return Reg - Mips::D0_64 + Mips::W0;
+ else
+ return Mips::NoRegister;
+}
}
#endif
diff --git a/llvm/lib/Target/Mips/Mips32r6InstrInfo.td b/llvm/lib/Target/Mips/Mips32r6InstrInfo.td
index f609305bfee4..91ffbc4eb77d 100644
--- a/llvm/lib/Target/Mips/Mips32r6InstrInfo.td
+++ b/llvm/lib/Target/Mips/Mips32r6InstrInfo.td
@@ -1119,18 +1119,24 @@ def : MipsPat<(select i32:$cond, immz, i32:$f),
// llvm.fmin/fmax operations.
let AdditionalPredicates = [NotInMicroMips] in {
- def : MipsPat<(fmaxnum f32:$lhs, f32:$rhs),
+ def : MipsPat<(fmaxnum_ieee f32:$lhs, f32:$rhs),
(MAX_S f32:$lhs, f32:$rhs)>,
ISA_MIPS32R6;
- def : MipsPat<(fmaxnum f64:$lhs, f64:$rhs),
+ def : MipsPat<(fmaxnum_ieee f64:$lhs, f64:$rhs),
(MAX_D f64:$lhs, f64:$rhs)>,
ISA_MIPS32R6;
- def : MipsPat<(fminnum f32:$lhs, f32:$rhs),
+ def : MipsPat<(fminnum_ieee f32:$lhs, f32:$rhs),
(MIN_S f32:$lhs, f32:$rhs)>,
ISA_MIPS32R6;
- def : MipsPat<(fminnum f64:$lhs, f64:$rhs),
+ def : MipsPat<(fminnum_ieee f64:$lhs, f64:$rhs),
(MIN_D f64:$lhs, f64:$rhs)>,
ISA_MIPS32R6;
+ def : MipsPat<(f32 (fcanonicalize f32:$src)),
+ (MIN_S f32:$src, f32:$src)>,
+ ISA_MIPS32R6;
+ def : MipsPat<(f64 (fcanonicalize f64:$src)),
+ (MIN_D f64:$src, f64:$src)>,
+ ISA_MIPS32R6;
}
// Pseudo instructions
diff --git a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index 66b2b0de8d52..dda33f9a1808 100644
--- a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -565,12 +565,15 @@ bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
}
break;
}
- case 'w':
- // Print MSA registers for the 'f' constraint
- // In LLVM, the 'w' modifier doesn't need to do anything.
- // We can just call printOperand as normal.
+ case 'w': {
+ MCRegister w = getMSARegFromFReg(MO.getReg());
+ if (w != Mips::NoRegister) {
+ O << '$' << MipsInstPrinter::getRegisterName(w);
+ return false;
+ }
break;
}
+ }
}
printOperand(MI, OpNum, O);
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 459164fa7a29..c2be8c80b7a8 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -360,11 +360,15 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
// Lower fmin and fmax operations for MIPS R6.
// Instructions are defined but never used.
- if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
- setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
- setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
- setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
- setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
+ if (Subtarget.hasMips32r6()) {
+ setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
+ setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
+ setOperationAction(ISD::FMINNUM, MVT::f32, Expand);
+ setOperationAction(ISD::FMAXNUM, MVT::f32, Expand);
+ setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
+ setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
+ setOperationAction(ISD::FMINNUM, MVT::f64, Expand);
+ setOperationAction(ISD::FMAXNUM, MVT::f64, Expand);
}
if (Subtarget.isGP64bit()) {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 595395bb1b4b..2713b6859ff3 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -2182,6 +2182,100 @@ bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) {
return true;
}
+// Helpers for constructing opcode (ex: NVPTX::StoreParamV4F32_iiri)
+#define getOpcV2H(ty, opKind0, opKind1) \
+ NVPTX::StoreParamV2##ty##_##opKind0##opKind1
+
+#define getOpcV2H1(ty, opKind0, isImm1) \
+ (isImm1) ? getOpcV2H(ty, opKind0, i) : getOpcV2H(ty, opKind0, r)
+
+#define getOpcodeForVectorStParamV2(ty, isimm) \
+ (isimm[0]) ? getOpcV2H1(ty, i, isimm[1]) : getOpcV2H1(ty, r, isimm[1])
+
+#define getOpcV4H(ty, opKind0, opKind1, opKind2, opKind3) \
+ NVPTX::StoreParamV4##ty##_##opKind0##opKind1##opKind2##opKind3
+
+#define getOpcV4H3(ty, opKind0, opKind1, opKind2, isImm3) \
+ (isImm3) ? getOpcV4H(ty, opKind0, opKind1, opKind2, i) \
+ : getOpcV4H(ty, opKind0, opKind1, opKind2, r)
+
+#define getOpcV4H2(ty, opKind0, opKind1, isImm2, isImm3) \
+ (isImm2) ? getOpcV4H3(ty, opKind0, opKind1, i, isImm3) \
+ : getOpcV4H3(ty, opKind0, opKind1, r, isImm3)
+
+#define getOpcV4H1(ty, opKind0, isImm1, isImm2, isImm3) \
+ (isImm1) ? getOpcV4H2(ty, opKind0, i, isImm2, isImm3) \
+ : getOpcV4H2(ty, opKind0, r, isImm2, isImm3)
+
+#define getOpcodeForVectorStParamV4(ty, isimm) \
+ (isimm[0]) ? getOpcV4H1(ty, i, isimm[1], isimm[2], isimm[3]) \
+ : getOpcV4H1(ty, r, isimm[1], isimm[2], isimm[3])
+
+#define getOpcodeForVectorStParam(n, ty, isimm) \
+ (n == 2) ? getOpcodeForVectorStParamV2(ty, isimm) \
+ : getOpcodeForVectorStParamV4(ty, isimm)
+
+static unsigned pickOpcodeForVectorStParam(SmallVector<SDValue, 8> &Ops,
+ unsigned NumElts,
+ MVT::SimpleValueType MemTy,
+ SelectionDAG *CurDAG, SDLoc DL) {
+ // Determine which inputs are registers and immediates make new operators
+ // with constant values
+ SmallVector<bool, 4> IsImm(NumElts, false);
+ for (unsigned i = 0; i < NumElts; i++) {
+ IsImm[i] = (isa<ConstantSDNode>(Ops[i]) || isa<ConstantFPSDNode>(Ops[i]));
+ if (IsImm[i]) {
+ SDValue Imm = Ops[i];
+ if (MemTy == MVT::f32 || MemTy == MVT::f64) {
+ const ConstantFPSDNode *ConstImm = cast<ConstantFPSDNode>(Imm);
+ const ConstantFP *CF = ConstImm->getConstantFPValue();
+ Imm = CurDAG->getTargetConstantFP(*CF, DL, Imm->getValueType(0));
+ } else {
+ const ConstantSDNode *ConstImm = cast<ConstantSDNode>(Imm);
+ const ConstantInt *CI = ConstImm->getConstantIntValue();
+ Imm = CurDAG->getTargetConstant(*CI, DL, Imm->getValueType(0));
+ }
+ Ops[i] = Imm;
+ }
+ }
+
+ // Get opcode for MemTy, size, and register/immediate operand ordering
+ switch (MemTy) {
+ case MVT::i8:
+ return getOpcodeForVectorStParam(NumElts, I8, IsImm);
+ case MVT::i16:
+ return getOpcodeForVectorStParam(NumElts, I16, IsImm);
+ case MVT::i32:
+ return getOpcodeForVectorStParam(NumElts, I32, IsImm);
+ case MVT::i64:
+ assert(NumElts == 2 && "MVT too large for NumElts > 2");
+ return getOpcodeForVectorStParamV2(I64, IsImm);
+ case MVT::f32:
+ return getOpcodeForVectorStParam(NumElts, F32, IsImm);
+ case MVT::f64:
+ assert(NumElts == 2 && "MVT too large for NumElts > 2");
+ return getOpcodeForVectorStParamV2(F64, IsImm);
+
+ // These cases don't support immediates, just use the all register version
+ // and generate moves.
+ case MVT::i1:
+ return (NumElts == 2) ? NVPTX::StoreParamV2I8_rr
+ : NVPTX::StoreParamV4I8_rrrr;
+ case MVT::f16:
+ case MVT::bf16:
+ return (NumElts == 2) ? NVPTX::StoreParamV2I16_rr
+ : NVPTX::StoreParamV4I16_rrrr;
+ case MVT::v2f16:
+ case MVT::v2bf16:
+ case MVT::v2i16:
+ case MVT::v4i8:
+ return (NumElts == 2) ? NVPTX::StoreParamV2I32_rr
+ : NVPTX::StoreParamV4I32_rrrr;
+ default:
+ llvm_unreachable("Cannot select st.param for unknown MemTy");
+ }
+}
+
bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
@@ -2193,10 +2287,10 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
SDValue Glue = N->getOperand(N->getNumOperands() - 1);
// How many elements do we have?
- unsigned NumElts = 1;
+ unsigned NumElts;
switch (N->getOpcode()) {
default:
- return false;
+ llvm_unreachable("Unexpected opcode");
case NVPTXISD::StoreParamU32:
case NVPTXISD::StoreParamS32:
case NVPTXISD::StoreParam:
@@ -2222,18 +2316,40 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
// Determine target opcode
// If we have an i1, use an 8-bit store. The lowering code in
// NVPTXISelLowering will have already emitted an upcast.
- std::optional<unsigned> Opcode = 0;
+ std::optional<unsigned> Opcode;
switch (N->getOpcode()) {
default:
switch (NumElts) {
default:
- return false;
- case 1:
- Opcode = pickOpcodeForVT(Mem->getMemoryVT().getSimpleVT().SimpleTy,
- NVPTX::StoreParamI8, NVPTX::StoreParamI16,
- NVPTX::StoreParamI32, NVPTX::StoreParamI64,
- NVPTX::StoreParamF32, NVPTX::StoreParamF64);
- if (Opcode == NVPTX::StoreParamI8) {
+ llvm_unreachable("Unexpected NumElts");
+ case 1: {
+ MVT::SimpleValueType MemTy = Mem->getMemoryVT().getSimpleVT().SimpleTy;
+ SDValue Imm = Ops[0];
+ if (MemTy != MVT::f16 && MemTy != MVT::v2f16 &&
+ (isa<ConstantSDNode>(Imm) || isa<ConstantFPSDNode>(Imm))) {
+ // Convert immediate to target constant
+ if (MemTy == MVT::f32 || MemTy == MVT::f64) {
+ const ConstantFPSDNode *ConstImm = cast<ConstantFPSDNode>(Imm);
+ const ConstantFP *CF = ConstImm->getConstantFPValue();
+ Imm = CurDAG->getTargetConstantFP(*CF, DL, Imm->getValueType(0));
+ } else {
+ const ConstantSDNode *ConstImm = cast<ConstantSDNode>(Imm);
+ const ConstantInt *CI = ConstImm->getConstantIntValue();
+ Imm = CurDAG->getTargetConstant(*CI, DL, Imm->getValueType(0));
+ }
+ Ops[0] = Imm;
+ // Use immediate version of store param
+ Opcode = pickOpcodeForVT(MemTy, NVPTX::StoreParamI8_i,
+ NVPTX::StoreParamI16_i, NVPTX::StoreParamI32_i,
+ NVPTX::StoreParamI64_i, NVPTX::StoreParamF32_i,
+ NVPTX::StoreParamF64_i);
+ } else
+ Opcode =
+ pickOpcodeForVT(Mem->getMemoryVT().getSimpleVT().SimpleTy,
+ NVPTX::StoreParamI8_r, NVPTX::StoreParamI16_r,
+ NVPTX::StoreParamI32_r, NVPTX::StoreParamI64_r,
+ NVPTX::StoreParamF32_r, NVPTX::StoreParamF64_r);
+ if (Opcode == NVPTX::StoreParamI8_r) {
// Fine tune the opcode depending on the size of the operand.
// This helps to avoid creating redundant COPY instructions in
// InstrEmitter::AddRegisterOperand().
@@ -2241,35 +2357,28 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
default:
break;
case MVT::i32:
- Opcode = NVPTX::StoreParamI8TruncI32;
+ Opcode = NVPTX::StoreParamI8TruncI32_r;
break;
case MVT::i64:
- Opcode = NVPTX::StoreParamI8TruncI64;
+ Opcode = NVPTX::StoreParamI8TruncI64_r;
break;
}
}
break;
+ }
case 2:
- Opcode = pickOpcodeForVT(Mem->getMemoryVT().getSimpleVT().SimpleTy,
- NVPTX::StoreParamV2I8, NVPTX::StoreParamV2I16,
- NVPTX::StoreParamV2I32, NVPTX::StoreParamV2I64,
- NVPTX::StoreParamV2F32, NVPTX::StoreParamV2F64);
- break;
- case 4:
- Opcode = pickOpcodeForVT(Mem->getMemoryVT().getSimpleVT().SimpleTy,
- NVPTX::StoreParamV4I8, NVPTX::StoreParamV4I16,
- NVPTX::StoreParamV4I32, std::nullopt,
- NVPTX::StoreParamV4F32, std::nullopt);
+ case 4: {
+ MVT::SimpleValueType MemTy = Mem->getMemoryVT().getSimpleVT().SimpleTy;
+ Opcode = pickOpcodeForVectorStParam(Ops, NumElts, MemTy, CurDAG, DL);
break;
}
- if (!Opcode)
- return false;
+ }
break;
// Special case: if we have a sign-extend/zero-extend node, insert the
// conversion instruction first, and use that as the value operand to
// the selected StoreParam node.
case NVPTXISD::StoreParamU32: {
- Opcode = NVPTX::StoreParamI32;
+ Opcode = NVPTX::StoreParamI32_r;
SDValue CvtNone = CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE, DL,
MVT::i32);
SDNode *Cvt = CurDAG->getMachineNode(NVPTX::CVT_u32_u16, DL,
@@ -2278,7 +2387,7 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
break;
}
case NVPTXISD::StoreParamS32: {
- Opcode = NVPTX::StoreParamI32;
+ Opcode = NVPTX::StoreParamI32_r;
SDValue CvtNone = CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE, DL,
MVT::i32);
SDNode *Cvt = CurDAG->getMachineNode(NVPTX::CVT_s32_s16, DL,
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 393fa29ff051..c4c35a1f74ba 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -2637,25 +2637,46 @@ class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
[(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
let mayStore = true in {
- class StoreParamInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
- !strconcat("st.param", opstr, " \t[param$a+$b], $val;"),
- []>;
- class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
- i32imm:$a, i32imm:$b),
- !strconcat("st.param.v2", opstr,
- " \t[param$a+$b], {{$val, $val2}};"),
- []>;
+ multiclass StoreParamInst<NVPTXRegClass regclass, Operand IMMType, string opstr, bit support_imm = true> {
+ foreach op = [IMMType, regclass] in
+ if !or(support_imm, !isa<NVPTXRegClass>(op)) then
+ def _ # !if(!isa<NVPTXRegClass>(op), "r", "i")
+ : NVPTXInst<(outs),
+ (ins op:$val, i32imm:$a, i32imm:$b),
+ "st.param" # opstr # " \t[param$a+$b], $val;",
+ []>;
+ }
- class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3,
- regclass:$val4, i32imm:$a,
- i32imm:$b),
- !strconcat("st.param.v4", opstr,
- " \t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
- []>;
+ multiclass StoreParamV2Inst<NVPTXRegClass regclass, Operand IMMType, string opstr> {
+ foreach op1 = [IMMType, regclass] in
+ foreach op2 = [IMMType, regclass] in
+ def _ # !if(!isa<NVPTXRegClass>(op1), "r", "i")
+ # !if(!isa<NVPTXRegClass>(op2), "r", "i")
+ : NVPTXInst<(outs),
+ (ins op1:$val1, op2:$val2,
+ i32imm:$a, i32imm:$b),
+ "st.param.v2" # opstr # " \t[param$a+$b], {{$val1, $val2}};",
+ []>;
+ }
+
+ multiclass StoreParamV4Inst<NVPTXRegClass regclass, Operand IMMType, string opstr> {
+ foreach op1 = [IMMType, regclass] in
+ foreach op2 = [IMMType, regclass] in
+ foreach op3 = [IMMType, regclass] in
+ foreach op4 = [IMMType, regclass] in
+ def _ # !if(!isa<NVPTXRegClass>(op1), "r", "i")
+ # !if(!isa<NVPTXRegClass>(op2), "r", "i")
+ # !if(!isa<NVPTXRegClass>(op3), "r", "i")
+ # !if(!isa<NVPTXRegClass>(op4), "r", "i")
+
+ : NVPTXInst<(outs),
+ (ins op1:$val1, op2:$val2, op3:$val3, op4:$val4,
+ i32imm:$a, i32imm:$b),
+ "st.param.v4" # opstr #
+ " \t[param$a+$b], {{$val1, $val2, $val3, $val4}};",
+ []>;
+ }
class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
@@ -2735,27 +2756,30 @@ def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">;
def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">;
def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">;
-def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;
-def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
-
-def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;
-def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;
-def StoreParamI8TruncI32 : StoreParamInst<Int32Regs, ".b8">;
-def StoreParamI8TruncI64 : StoreParamInst<Int64Regs, ".b8">;
-def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;
-def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
-def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
-def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
-
-def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
-def StoreParamV4I16 : StoreParamV4Inst<Int16Regs, ".b16">;
-def StoreParamV4I8 : StoreParamV4Inst<Int16Regs, ".b8">;
-
-def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
-def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
-def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;
-def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;
-def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
+defm StoreParamI64 : StoreParamInst<Int64Regs, i64imm, ".b64">;
+defm StoreParamI32 : StoreParamInst<Int32Regs, i32imm, ".b32">;
+defm StoreParamI16 : StoreParamInst<Int16Regs, i16imm, ".b16">;
+defm StoreParamI8 : StoreParamInst<Int16Regs, i8imm, ".b8">;
+
+defm StoreParamI8TruncI32 : StoreParamInst<Int32Regs, i8imm, ".b8", /* support_imm */ false>;
+defm StoreParamI8TruncI64 : StoreParamInst<Int64Regs, i8imm, ".b8", /* support_imm */ false>;
+
+defm StoreParamV2I64 : StoreParamV2Inst<Int64Regs, i64imm, ".b64">;
+defm StoreParamV2I32 : StoreParamV2Inst<Int32Regs, i32imm, ".b32">;
+defm StoreParamV2I16 : StoreParamV2Inst<Int16Regs, i16imm, ".b16">;
+defm StoreParamV2I8 : StoreParamV2Inst<Int16Regs, i8imm, ".b8">;
+
+defm StoreParamV4I32 : StoreParamV4Inst<Int32Regs, i32imm, ".b32">;
+defm StoreParamV4I16 : StoreParamV4Inst<Int16Regs, i16imm, ".b16">;
+defm StoreParamV4I8 : StoreParamV4Inst<Int16Regs, i8imm, ".b8">;
+
+defm StoreParamF32 : StoreParamInst<Float32Regs, f32imm, ".f32">;
+defm StoreParamF64 : StoreParamInst<Float64Regs, f64imm, ".f64">;
+
+defm StoreParamV2F32 : StoreParamV2Inst<Float32Regs, f32imm, ".f32">;
+defm StoreParamV2F64 : StoreParamV2Inst<Float64Regs, f64imm, ".f64">;
+
+defm StoreParamV4F32 : StoreParamV4Inst<Float32Regs, f32imm, ".f32">;
def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index ac48dc5af9d5..f4e84ade3b5a 100644
--- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -1157,12 +1157,12 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
MCSymbolRefExpr::VariantKind VK = GetVKForMO(MO);
- // If the symbol isn't toc-data then use the TOC on AIX.
// Map the global address operand to be a reference to the TOC entry we
// will synthesize later. 'TOCEntry' is a label used to reference the
// storage allocated in the TOC which contains the address of 'MOSymbol'.
- // If the toc-data attribute is used, the TOC entry contains the data
- // rather than the address of the MOSymbol.
+ // If the symbol does not have the toc-data attribute, then we create the
+ // TOC entry on AIX. If the toc-data attribute is used, the TOC entry
+ // contains the data rather than the address of the MOSymbol.
if (![](const MachineOperand &MO) {
if (!MO.isGlobal())
return false;
@@ -1170,7 +1170,6 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
const GlobalVariable *GV = dyn_cast<GlobalVariable>(MO.getGlobal());
if (!GV)
return false;
-
return GV->hasAttribute("toc-data");
}(MO)) {
MOSymbol = lookUpOrCreateTOCEntry(MOSymbol, getTOCEntryTypeForMO(MO), VK);
@@ -1301,8 +1300,10 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
unsigned Op = MI->getOpcode();
- // Change the opcode to load address for tocdata
- TmpInst.setOpcode(Op == PPC::ADDItocL8 ? PPC::ADDI8 : PPC::LA);
+ // Change the opcode to load address for toc-data.
+ // ADDItocL is only used for 32-bit toc-data on AIX and will always use LA.
+ TmpInst.setOpcode(Op == PPC::ADDItocL8 ? (IsAIX ? PPC::LA8 : PPC::ADDI8)
+ : PPC::LA);
const MachineOperand &MO = MI->getOperand(2);
assert((Op == PPC::ADDItocL8)
@@ -1316,8 +1317,7 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
const MCExpr *Exp = MCSymbolRefExpr::create(
MOSymbol,
- Op == PPC::ADDItocL8 ? MCSymbolRefExpr::VK_PPC_TOC_LO
- : MCSymbolRefExpr::VK_PPC_L,
+ IsAIX ? MCSymbolRefExpr::VK_PPC_L : MCSymbolRefExpr::VK_PPC_TOC_LO,
OutContext);
TmpInst.getOperand(2) = MCOperand::createExpr(Exp);
@@ -2831,8 +2831,10 @@ void PPCAIXAsmPrinter::emitGlobalVariableHelper(const GlobalVariable *GV) {
// When -fdata-sections is enabled, every GlobalVariable will
// be put into its own csect; therefore, label is not necessary here.
- if (!TM.getDataSections() || GV->hasSection())
- OutStreamer->emitLabel(EmittedInitSym);
+ if (!TM.getDataSections() || GV->hasSection()) {
+ if (Csect->getMappingClass() != XCOFF::XMC_TD)
+ OutStreamer->emitLabel(EmittedInitSym);
+ }
// No alias to emit.
if (!GOAliasMap[GV].size()) {
diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
index 6e31cdae8476..735050641adf 100644
--- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
@@ -2074,16 +2074,15 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
if (GV->isThreadLocal())
return 0;
- // If the global has the toc-data attribute then fallback to DAG-ISEL.
- if (TM.getTargetTriple().isOSAIX())
- if (const GlobalVariable *Var = dyn_cast_or_null<GlobalVariable>(GV))
- if (Var->hasAttribute("toc-data"))
- return false;
-
PPCFuncInfo->setUsesTOCBasePtr();
+ bool IsAIXTocData = TM.getTargetTriple().isOSAIX() &&
+ isa<GlobalVariable>(GV) &&
+ cast<GlobalVariable>(GV)->hasAttribute("toc-data");
+
// For small code model, generate a simple TOC load.
if (CModel == CodeModel::Small)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::LDtoc),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
+ IsAIXTocData ? TII.get(PPC::ADDItoc8) : TII.get(PPC::LDtoc),
DestReg)
.addGlobalAddress(GV)
.addReg(PPC::X2);
@@ -2101,6 +2100,7 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
HighPartReg).addReg(PPC::X2).addGlobalAddress(GV);
if (Subtarget->isGVIndirectSymbol(GV)) {
+ assert(!IsAIXTocData && "TOC data should always be direct.");
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(PPC::LDtocL),
DestReg).addGlobalAddress(GV).addReg(HighPartReg);
} else {
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 68621558e3fa..26560dc5cdeb 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -6143,23 +6143,22 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
" ELF/AIX or 32-bit AIX in the following.");
// Transforms the ISD::TOC_ENTRY node for 32-bit AIX large code model mode,
- // or 64-bit medium (ELF-only), or large (ELF and AIX) code model code that
- // does not conain TOC data symbols.
- // We generate two instructions as described below. The first source
- // operand is a symbol reference. If it must be referenced via the toc
- // according to Subtarget, we generate:
+ // 64-bit medium (ELF-only), or 64-bit large (ELF and AIX) code model code
+ // that does not contain TOC data symbols. We generate two instructions as
+ // described below. The first source operand is a symbol reference. If it
+ // must be referenced via the TOC according to Subtarget, we generate:
// [32-bit AIX]
// LWZtocL(@sym, ADDIStocHA(%r2, @sym))
// [64-bit ELF/AIX]
// LDtocL(@sym, ADDIStocHA8(%x2, @sym))
- // Otherwise we generate:
+ // Otherwise for medium code model ELF we generate:
// ADDItocL8(ADDIStocHA8(%x2, @sym), @sym)
- // For large code model with TOC data symbols we generate:
+ // And finally for AIX with toc-data we generate:
// [32-bit AIX]
// ADDItocL(ADDIStocHA(%x2, @sym), @sym)
// [64-bit AIX]
- // Currently not supported.
+ // ADDItocL8(ADDIStocHA8(%x2, @sym), @sym)
SDValue GA = N->getOperand(0);
SDValue TOCbase = N->getOperand(1);
@@ -6171,12 +6170,9 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
// On AIX, if the symbol has the toc-data attribute it will be defined
// in the TOC entry, so we use an ADDItocL/ADDItocL8.
if (isAIXABI && hasTocDataAttr(GA)) {
- if (isPPC64)
- report_fatal_error(
- "64-bit large code model toc-data not yet supported");
-
- ReplaceNode(N, CurDAG->getMachineNode(PPC::ADDItocL, dl, VT,
- SDValue(Tmp, 0), GA));
+ ReplaceNode(
+ N, CurDAG->getMachineNode(isPPC64 ? PPC::ADDItocL8 : PPC::ADDItocL,
+ dl, VT, SDValue(Tmp, 0), GA));
return;
}
@@ -6191,6 +6187,7 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
return;
}
+ assert(isPPC64 && "TOC_ENTRY already handled for 32-bit.");
// Build the address relative to the TOC-pointer.
ReplaceNode(N, CurDAG->getMachineNode(PPC::ADDItocL8, dl, MVT::i64,
SDValue(Tmp, 0), GA));
@@ -7777,6 +7774,10 @@ void PPCDAGToDAGISel::PeepholePPC64() {
Flags = PPCII::MO_TLSLD_LO;
break;
case PPC::ADDItocL8:
+ // Skip the following peephole optimizations for ADDItocL8 on AIX which
+ // is used for toc-data access.
+ if (Subtarget->isAIXABI())
+ continue;
Flags = PPCII::MO_TOC_LO;
break;
}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index ad86c393ba79..8450ce9e0e3b 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -9338,7 +9338,7 @@ SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
if ((Op.getValueType() != MVT::f128) ||
(Op0.getOpcode() != ISD::BUILD_PAIR) ||
(Op0.getOperand(0).getValueType() != MVT::i64) ||
- (Op0.getOperand(1).getValueType() != MVT::i64))
+ (Op0.getOperand(1).getValueType() != MVT::i64) || !Subtarget.isPPC64())
return SDValue();
return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 9e56de732c58..85bbfabf5d3c 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -4438,6 +4438,12 @@ bool PPCInstrInfo::isDefMIElgibleForForwarding(MachineInstr &DefMI,
if (Opc != PPC::ADDItocL8 && Opc != PPC::ADDI && Opc != PPC::ADDI8)
return false;
+ // Skip the optimization of transformTo[NewImm|Imm]FormFedByAdd for ADDItocL8
+ // on AIX which is used for toc-data access. TODO: Follow up to see if it can
+ // apply for AIX toc-data as well.
+ if (Opc == PPC::ADDItocL8 && Subtarget.isAIX())
+ return false;
+
assert(DefMI.getNumOperands() >= 3 &&
"Add inst must have at least three operands");
RegMO = &DefMI.getOperand(1);
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index 7929a781dbda..e3d6d2f094f2 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -3346,7 +3346,7 @@ def ADDIStocHA : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, tocentr
"#ADDIStocHA",
[(set i32:$rD,
(PPCtoc_entry i32:$reg, tglobaladdr:$disp))]>;
-// TOC Data Transform AIX
+// TOC Data Transform on AIX
def ADDItoc : PPCEmitTimePseudo<(outs gprc:$rD), (ins tocentry32:$disp, gprc:$reg),
"#ADDItoc",
[(set i32:$rD,
diff --git a/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp b/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp
index 4c9f5ff18bb6..d10fe11bb587 100644
--- a/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp
+++ b/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp
@@ -29,8 +29,10 @@ using namespace llvm;
namespace {
static StringRef MASSVFuncs[] = {
-#define TLI_DEFINE_MASSV_VECFUNCS_NAMES
+#define TLI_DEFINE_MASSV_VECFUNCS
+#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, VABI_PREFIX) VEC,
#include "llvm/Analysis/VecFuncs.def"
+#undef TLI_DEFINE_MASSV_VECFUNCS
};
class PPCLowerMASSVEntries : public ModulePass {
diff --git a/llvm/lib/Target/PowerPC/PPCMergeStringPool.cpp b/llvm/lib/Target/PowerPC/PPCMergeStringPool.cpp
index abc5353e4a5e..309938accdf4 100644
--- a/llvm/lib/Target/PowerPC/PPCMergeStringPool.cpp
+++ b/llvm/lib/Target/PowerPC/PPCMergeStringPool.cpp
@@ -302,13 +302,6 @@ bool PPCMergeStringPool::mergeModuleStringPool(Module &M) {
return true;
}
-static bool userHasOperand(User *TheUser, GlobalVariable *GVOperand) {
- for (Value *Op : TheUser->operands())
- if (Op == GVOperand)
- return true;
- return false;
-}
-
// For pooled strings we need to add the offset into the pool for each string.
// This is done by adding a Get Element Pointer (GEP) before each user. This
// function adds the GEP.
@@ -319,29 +312,13 @@ void PPCMergeStringPool::replaceUsesWithGEP(GlobalVariable *GlobalToReplace,
Indices.push_back(ConstantInt::get(Type::getInt32Ty(*Context), 0));
Indices.push_back(ConstantInt::get(Type::getInt32Ty(*Context), ElementIndex));
- // Need to save a temporary copy of each user list because we remove uses
- // as we replace them.
- SmallVector<User *> Users;
- for (User *CurrentUser : GlobalToReplace->users())
- Users.push_back(CurrentUser);
-
- for (User *CurrentUser : Users) {
- // The user was not found so it must have been replaced earlier.
- if (!userHasOperand(CurrentUser, GlobalToReplace))
- continue;
-
- // We cannot replace operands in globals so we ignore those.
- if (isa<GlobalValue>(CurrentUser))
- continue;
-
- Constant *ConstGEP = ConstantExpr::getInBoundsGetElementPtr(
- PooledStructType, GPool, Indices);
- LLVM_DEBUG(dbgs() << "Replacing this global:\n");
- LLVM_DEBUG(GlobalToReplace->dump());
- LLVM_DEBUG(dbgs() << "with this:\n");
- LLVM_DEBUG(ConstGEP->dump());
- GlobalToReplace->replaceAllUsesWith(ConstGEP);
- }
+ Constant *ConstGEP =
+ ConstantExpr::getInBoundsGetElementPtr(PooledStructType, GPool, Indices);
+ LLVM_DEBUG(dbgs() << "Replacing this global:\n");
+ LLVM_DEBUG(GlobalToReplace->dump());
+ LLVM_DEBUG(dbgs() << "with this:\n");
+ LLVM_DEBUG(ConstGEP->dump());
+ GlobalToReplace->replaceAllUsesWith(ConstGEP);
}
} // namespace
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 791d364655e5..da8daa573b89 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -558,6 +558,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
case TargetOpcode::G_PTRTOINT:
case TargetOpcode::G_INTTOPTR:
case TargetOpcode::G_TRUNC:
+ case TargetOpcode::G_FREEZE:
return selectCopy(MI, MRI);
case TargetOpcode::G_CONSTANT: {
Register DstReg = MI.getOperand(0).getReg();
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 38c1f9868d7d..dbfcab7233bf 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -130,6 +130,10 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
getActionDefinitionsBuilder({G_SADDO, G_SSUBO}).minScalar(0, sXLen).lower();
+ // TODO: Use Vector Single-Width Saturating Instructions for vector types.
+ getActionDefinitionsBuilder({G_UADDSAT, G_SADDSAT, G_USUBSAT, G_SSUBSAT})
+ .lower();
+
auto &ShiftActions = getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL});
if (ST.is64Bit())
ShiftActions.customFor({{s32, s32}});
@@ -137,7 +141,8 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.widenScalarToNextPow2(0)
.clampScalar(1, s32, sXLen)
.clampScalar(0, s32, sXLen)
- .minScalarSameAs(1, 0);
+ .minScalarSameAs(1, 0)
+ .widenScalarToNextPow2(1);
auto &ExtActions =
getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
@@ -227,7 +232,8 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
ConstantActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen);
// TODO: transform illegal vector types into legal vector type
- getActionDefinitionsBuilder(G_IMPLICIT_DEF)
+ getActionDefinitionsBuilder(
+ {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER, G_FREEZE})
.legalFor({s32, sXLen, p0})
.legalIf(typeIsLegalBoolVec(0, BoolVecTys, ST))
.legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
@@ -343,6 +349,9 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.widenScalarToNextPow2(0);
}
+ // TODO: Use libcall for sDoubleXLen.
+ getActionDefinitionsBuilder({G_UDIVREM, G_SDIVREM}).lower();
+
auto &AbsActions = getActionDefinitionsBuilder(G_ABS);
if (ST.hasStdExtZbb())
AbsActions.customFor({s32, sXLen}).minScalar(0, sXLen);
@@ -366,6 +375,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
G_FABS, G_FSQRT, G_FMAXNUM, G_FMINNUM})
.legalIf(typeIsScalarFPArith(0, ST));
+ getActionDefinitionsBuilder(G_FREM)
+ .libcallFor({s32, s64})
+ .minScalar(0, s32)
+ .scalarize(0);
+
getActionDefinitionsBuilder(G_FCOPYSIGN)
.legalIf(all(typeIsScalarFPArith(0, ST), typeIsScalarFPArith(1, ST)));
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 89e1214f469d..a78d78946be3 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -152,7 +152,8 @@ def HasStdExtZimop : Predicate<"Subtarget->hasStdExtZimop()">,
def FeatureStdExtZicfilp
: RISCVExperimentalExtension<"zicfilp", 0, 4,
- "'Zicfilp' (Landing pad)">;
+ "'Zicfilp' (Landing pad)",
+ [FeatureStdExtZicsr]>;
def HasStdExtZicfilp : Predicate<"Subtarget->hasStdExtZicfilp()">,
AssemblerPredicate<(all_of FeatureStdExtZicfilp),
"'Zicfilp' (Landing pad)">;
@@ -211,8 +212,8 @@ def FeatureStdExtZa128rs : RISCVExtension<"za128rs", 1, 0,
"'Za128rs' (Reservation Set Size of at Most 128 Bytes)">;
def FeatureStdExtZaamo
- : RISCVExperimentalExtension<"zaamo", 0, 2,
- "'Zaamo' (Atomic Memory Operations)">;
+ : RISCVExtension<"zaamo", 1, 0,
+ "'Zaamo' (Atomic Memory Operations)">;
def HasStdExtAOrZaamo
: Predicate<"Subtarget->hasStdExtA() || Subtarget->hasStdExtZaamo()">,
AssemblerPredicate<(any_of FeatureStdExtA, FeatureStdExtZaamo),
@@ -242,8 +243,8 @@ def HasStdExtZalasr : Predicate<"Subtarget->hasStdExtZalasr()">,
"'Zalasr' (Load-Acquire and Store-Release Instructions)">;
def FeatureStdExtZalrsc
- : RISCVExperimentalExtension<"zalrsc", 0, 2,
- "'Zalrsc' (Load-Reserved/Store-Conditional)">;
+ : RISCVExtension<"zalrsc", 1, 0,
+ "'Zalrsc' (Load-Reserved/Store-Conditional)">;
def HasStdExtAOrZalrsc
: Predicate<"Subtarget->hasStdExtA() || Subtarget->hasStdExtZalrsc()">,
AssemblerPredicate<(any_of FeatureStdExtA, FeatureStdExtZalrsc),
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8d9b0f2acc5f..f0e5a7d393b6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -688,7 +688,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::VP_ADD, ISD::VP_SUB, ISD::VP_MUL,
ISD::VP_SDIV, ISD::VP_UDIV, ISD::VP_SREM,
ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,
- ISD::VP_XOR, ISD::VP_ASHR, ISD::VP_LSHR,
+ ISD::VP_XOR, ISD::VP_SRA, ISD::VP_SRL,
ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX,
ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
@@ -844,8 +844,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
VT, Custom);
setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
Custom);
- setOperationAction({ISD::AVGFLOORU, ISD::AVGCEILU, ISD::SADDSAT,
- ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT},
+ setOperationAction({ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS,
+ ISD::AVGCEILU, ISD::SADDSAT, ISD::UADDSAT,
+ ISD::SSUBSAT, ISD::USUBSAT},
VT, Legal);
// Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
@@ -1237,8 +1238,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom);
- setOperationAction({ISD::AVGFLOORU, ISD::AVGCEILU, ISD::SADDSAT,
- ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT},
+ setOperationAction({ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS,
+ ISD::AVGCEILU, ISD::SADDSAT, ISD::UADDSAT,
+ ISD::SSUBSAT, ISD::USUBSAT},
VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
@@ -1917,7 +1919,7 @@ bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
return false;
return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
- !isa<ConstantSDNode>(Y);
+ (!isa<ConstantSDNode>(Y) || cast<ConstantSDNode>(Y)->isOpaque());
}
bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
@@ -5339,7 +5341,7 @@ RISCVTargetLowering::lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op,
SDValue Exp;
// Restore back to original type. Truncation after SRL is to generate vnsrl.
if (Op->isVPOpcode()) {
- Exp = DAG.getNode(ISD::VP_LSHR, DL, IntVT, Bitcast,
+ Exp = DAG.getNode(ISD::VP_SRL, DL, IntVT, Bitcast,
DAG.getConstant(ShiftAmt, DL, IntVT), Mask, VL);
Exp = DAG.getVPZExtOrTrunc(DL, VT, Exp, Mask, VL);
} else {
@@ -5841,7 +5843,9 @@ static unsigned getRISCVVLOp(SDValue Op) {
OP_CASE(UADDSAT)
OP_CASE(SSUBSAT)
OP_CASE(USUBSAT)
+ OP_CASE(AVGFLOORS)
OP_CASE(AVGFLOORU)
+ OP_CASE(AVGCEILS)
OP_CASE(AVGCEILU)
OP_CASE(FADD)
OP_CASE(FSUB)
@@ -5919,9 +5923,9 @@ static unsigned getRISCVVLOp(SDValue Op) {
case ISD::VP_SELECT:
case ISD::VP_MERGE:
return RISCVISD::VMERGE_VL;
- case ISD::VP_ASHR:
+ case ISD::VP_SRA:
return RISCVISD::SRA_VL;
- case ISD::VP_LSHR:
+ case ISD::VP_SRL:
return RISCVISD::SRL_VL;
case ISD::VP_SQRT:
return RISCVISD::FSQRT_VL;
@@ -5956,7 +5960,7 @@ static bool hasMergeOp(unsigned Opcode) {
Opcode <= RISCVISD::LAST_RISCV_STRICTFP_OPCODE &&
"not a RISC-V target specific op");
static_assert(RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP ==
- 126 &&
+ 128 &&
RISCVISD::LAST_RISCV_STRICTFP_OPCODE -
ISD::FIRST_TARGET_STRICTFP_OPCODE ==
21 &&
@@ -5982,7 +5986,7 @@ static bool hasMaskOp(unsigned Opcode) {
Opcode <= RISCVISD::LAST_RISCV_STRICTFP_OPCODE &&
"not a RISC-V target specific op");
static_assert(RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP ==
- 126 &&
+ 128 &&
RISCVISD::LAST_RISCV_STRICTFP_OPCODE -
ISD::FIRST_TARGET_STRICTFP_OPCODE ==
21 &&
@@ -6882,7 +6886,9 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
!Subtarget.hasVInstructionsF16()))
return SplitVectorOp(Op, DAG);
[[fallthrough]];
+ case ISD::AVGFLOORS:
case ISD::AVGFLOORU:
+ case ISD::AVGCEILS:
case ISD::AVGCEILU:
case ISD::SMIN:
case ISD::SMAX:
@@ -7004,8 +7010,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
!Subtarget.hasVInstructionsF16()))
return SplitVPOp(Op, DAG);
[[fallthrough]];
- case ISD::VP_ASHR:
- case ISD::VP_LSHR:
+ case ISD::VP_SRA:
+ case ISD::VP_SRL:
case ISD::VP_SHL:
return lowerVPOp(Op, DAG);
case ISD::VP_IS_FPCLASS:
@@ -19958,7 +19964,9 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(UDIV_VL)
NODE_NAME_CASE(UREM_VL)
NODE_NAME_CASE(XOR_VL)
+ NODE_NAME_CASE(AVGFLOORS_VL)
NODE_NAME_CASE(AVGFLOORU_VL)
+ NODE_NAME_CASE(AVGCEILS_VL)
NODE_NAME_CASE(AVGCEILU_VL)
NODE_NAME_CASE(SADDSAT_VL)
NODE_NAME_CASE(UADDSAT_VL)
@@ -21435,7 +21443,8 @@ bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
if (Op == Instruction::Add || Op == Instruction::Sub ||
Op == Instruction::And || Op == Instruction::Or ||
Op == Instruction::Xor || Op == Instruction::InsertElement ||
- Op == Instruction::ShuffleVector || Op == Instruction::Load)
+ Op == Instruction::ShuffleVector || Op == Instruction::Load ||
+ Op == Instruction::Freeze)
return false;
if (Inst.getType()->isScalableTy())
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 1efc54566b4b..856ce06ba1c4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -264,8 +264,12 @@ enum NodeType : unsigned {
SSUBSAT_VL,
USUBSAT_VL,
+ // Averaging adds of signed integers.
+ AVGFLOORS_VL,
// Averaging adds of unsigned integers.
AVGFLOORU_VL,
+ // Rounding averaging adds of signed integers.
+ AVGCEILS_VL,
// Rounding averaging adds of unsigned integers.
AVGCEILU_VL,
@@ -959,7 +963,6 @@ private:
SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
SelectionDAG &DAG) const;
SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
- SDValue lowerUnsignedAvgFloor(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 324ce5cb5ed7..c0b2a695b8ea 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -215,7 +215,11 @@ struct DemandedFields {
// than 64.
SEWNone = 0 // We don't need to preserve SEW at all.
} SEW = SEWNone;
- bool LMUL = false;
+ enum : uint8_t {
+ LMULEqual = 2, // The exact value of LMUL needs to be preserved.
+ LMULLessThanOrEqualToM1 = 1, // We can use any LMUL <= M1.
+ LMULNone = 0 // We don't need to preserve LMUL at all.
+ } LMUL = LMULNone;
bool SEWLMULRatio = false;
bool TailPolicy = false;
bool MaskPolicy = false;
@@ -233,7 +237,7 @@ struct DemandedFields {
// Mark all VTYPE subfields and properties as demanded
void demandVTYPE() {
SEW = SEWEqual;
- LMUL = true;
+ LMUL = LMULEqual;
SEWLMULRatio = true;
TailPolicy = true;
MaskPolicy = true;
@@ -250,7 +254,7 @@ struct DemandedFields {
VLAny |= B.VLAny;
VLZeroness |= B.VLZeroness;
SEW = std::max(SEW, B.SEW);
- LMUL |= B.LMUL;
+ LMUL = std::max(LMUL, B.LMUL);
SEWLMULRatio |= B.SEWLMULRatio;
TailPolicy |= B.TailPolicy;
MaskPolicy |= B.MaskPolicy;
@@ -284,7 +288,19 @@ struct DemandedFields {
break;
};
OS << ", ";
- OS << "LMUL=" << LMUL << ", ";
+ OS << "LMUL=";
+ switch (LMUL) {
+ case LMULEqual:
+ OS << "LMULEqual";
+ break;
+ case LMULLessThanOrEqualToM1:
+ OS << "LMULLessThanOrEqualToM1";
+ break;
+ case LMULNone:
+ OS << "LMULNone";
+ break;
+ };
+ OS << ", ";
OS << "SEWLMULRatio=" << SEWLMULRatio << ", ";
OS << "TailPolicy=" << TailPolicy << ", ";
OS << "MaskPolicy=" << MaskPolicy;
@@ -301,6 +317,11 @@ inline raw_ostream &operator<<(raw_ostream &OS, const DemandedFields &DF) {
}
#endif
+static bool isLMUL1OrSmaller(RISCVII::VLMUL LMUL) {
+ auto [LMul, Fractional] = RISCVVType::decodeVLMUL(LMUL);
+ return Fractional || LMul == 1;
+}
+
/// Return true if moving from CurVType to NewVType is
/// indistinguishable from the perspective of an instruction (or set
/// of instructions) which use only the Used subfields and properties.
@@ -324,9 +345,18 @@ static bool areCompatibleVTYPEs(uint64_t CurVType, uint64_t NewVType,
break;
}
- if (Used.LMUL &&
- RISCVVType::getVLMUL(CurVType) != RISCVVType::getVLMUL(NewVType))
- return false;
+ switch (Used.LMUL) {
+ case DemandedFields::LMULNone:
+ break;
+ case DemandedFields::LMULEqual:
+ if (RISCVVType::getVLMUL(CurVType) != RISCVVType::getVLMUL(NewVType))
+ return false;
+ break;
+ case DemandedFields::LMULLessThanOrEqualToM1:
+ if (!isLMUL1OrSmaller(RISCVVType::getVLMUL(NewVType)))
+ return false;
+ break;
+ }
if (Used.SEWLMULRatio) {
auto Ratio1 = RISCVVType::getSEWLMULRatio(RISCVVType::getSEW(CurVType),
@@ -348,10 +378,10 @@ static bool areCompatibleVTYPEs(uint64_t CurVType, uint64_t NewVType,
/// Return the fields and properties demanded by the provided instruction.
DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
- // Warning: This function has to work on both the lowered (i.e. post
- // emitVSETVLIs) and pre-lowering forms. The main implication of this is
- // that it can't use the value of a SEW, VL, or Policy operand as they might
- // be stale after lowering.
+ // This function works in RISCVCoalesceVSETVLI too. We can still use the value
+ // of a SEW, VL, or Policy operand even though it might not be the exact value
+ // in the VL or VTYPE, since we only care about what the instruction
+ // originally demanded.
// Most instructions don't use any of these subfeilds.
DemandedFields Res;
@@ -382,7 +412,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
// in the opcode. This is asserted when constructing the VSETVLIInfo.
if (getEEWForLoadStore(MI)) {
Res.SEW = DemandedFields::SEWNone;
- Res.LMUL = false;
+ Res.LMUL = DemandedFields::LMULNone;
}
// Store instructions don't use the policy fields.
@@ -397,12 +427,12 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
// * The policy bits can probably be ignored..
if (isMaskRegOp(MI)) {
Res.SEW = DemandedFields::SEWNone;
- Res.LMUL = false;
+ Res.LMUL = DemandedFields::LMULNone;
}
// For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and VL > 0.
if (isScalarInsertInstr(MI)) {
- Res.LMUL = false;
+ Res.LMUL = DemandedFields::LMULNone;
Res.SEWLMULRatio = false;
Res.VLAny = false;
// For vmv.s.x and vfmv.s.f, if the merge operand is *undefined*, we don't
@@ -423,12 +453,49 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
// vmv.x.s, and vmv.f.s are unconditional and ignore everything except SEW.
if (isScalarExtractInstr(MI)) {
assert(!RISCVII::hasVLOp(TSFlags));
- Res.LMUL = false;
+ Res.LMUL = DemandedFields::LMULNone;
Res.SEWLMULRatio = false;
Res.TailPolicy = false;
Res.MaskPolicy = false;
}
+ if (RISCVII::hasVLOp(MI.getDesc().TSFlags)) {
+ const MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
+ // A slidedown/slideup with an *undefined* merge op can freely clobber
+ // elements not copied from the source vector (e.g. masked off, tail, or
+ // slideup's prefix). Notes:
+ // * We can't modify SEW here since the slide amount is in units of SEW.
+ // * VL=1 is special only because we have existing support for zero vs
+ // non-zero VL. We could generalize this if we had a VL > C predicate.
+ // * The LMUL1 restriction is for machines whose latency may depend on VL.
+ // * As above, this is only legal for tail "undefined" not "agnostic".
+ if (isVSlideInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
+ hasUndefinedMergeOp(MI)) {
+ Res.VLAny = false;
+ Res.VLZeroness = true;
+ Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
+ Res.TailPolicy = false;
+ }
+
+ // A tail undefined vmv.v.i/x or vfmv.v.f with VL=1 can be treated in the
+ // same semantically as vmv.s.x. This is particularly useful since we don't
+ // have an immediate form of vmv.s.x, and thus frequently use vmv.v.i in
+ // it's place. Since a splat is non-constant time in LMUL, we do need to be
+ // careful to not increase the number of active vector registers (unlike for
+ // vmv.s.x.)
+ if (isScalarSplatInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
+ hasUndefinedMergeOp(MI)) {
+ Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
+ Res.SEWLMULRatio = false;
+ Res.VLAny = false;
+ if (isFloatScalarMoveOrScalarSplatInstr(MI) && !ST->hasVInstructionsF64())
+ Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
+ else
+ Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
+ Res.TailPolicy = false;
+ }
+ }
+
return Res;
}
@@ -1107,11 +1174,6 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
LIS->getMBBStartIdx(&MBB), LIS->getInstructionIndex(*MI).getRegSlot());
}
-static bool isLMUL1OrSmaller(RISCVII::VLMUL LMUL) {
- auto [LMul, Fractional] = RISCVVType::decodeVLMUL(LMUL);
- return Fractional || LMul == 1;
-}
-
/// Return true if a VSETVLI is required to transition from CurInfo to Require
/// before MI.
bool RISCVInsertVSETVLI::needVSETVLI(const MachineInstr &MI,
@@ -1124,40 +1186,6 @@ bool RISCVInsertVSETVLI::needVSETVLI(const MachineInstr &MI,
DemandedFields Used = getDemanded(MI, ST);
- // A slidedown/slideup with an *undefined* merge op can freely clobber
- // elements not copied from the source vector (e.g. masked off, tail, or
- // slideup's prefix). Notes:
- // * We can't modify SEW here since the slide amount is in units of SEW.
- // * VL=1 is special only because we have existing support for zero vs
- // non-zero VL. We could generalize this if we had a VL > C predicate.
- // * The LMUL1 restriction is for machines whose latency may depend on VL.
- // * As above, this is only legal for tail "undefined" not "agnostic".
- if (isVSlideInstr(MI) && Require.hasAVLImm() && Require.getAVLImm() == 1 &&
- isLMUL1OrSmaller(CurInfo.getVLMUL()) && hasUndefinedMergeOp(MI)) {
- Used.VLAny = false;
- Used.VLZeroness = true;
- Used.LMUL = false;
- Used.TailPolicy = false;
- }
-
- // A tail undefined vmv.v.i/x or vfmv.v.f with VL=1 can be treated in the same
- // semantically as vmv.s.x. This is particularly useful since we don't have an
- // immediate form of vmv.s.x, and thus frequently use vmv.v.i in it's place.
- // Since a splat is non-constant time in LMUL, we do need to be careful to not
- // increase the number of active vector registers (unlike for vmv.s.x.)
- if (isScalarSplatInstr(MI) && Require.hasAVLImm() &&
- Require.getAVLImm() == 1 && isLMUL1OrSmaller(CurInfo.getVLMUL()) &&
- hasUndefinedMergeOp(MI)) {
- Used.LMUL = false;
- Used.SEWLMULRatio = false;
- Used.VLAny = false;
- if (isFloatScalarMoveOrScalarSplatInstr(MI) && !ST->hasVInstructionsF64())
- Used.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
- else
- Used.SEW = DemandedFields::SEWGreaterThanOrEqual;
- Used.TailPolicy = false;
- }
-
if (CurInfo.isCompatible(Used, Require, LIS))
return false;
@@ -1189,7 +1217,7 @@ static VSETVLIInfo adjustIncoming(VSETVLIInfo PrevInfo, VSETVLIInfo NewInfo,
if (auto NewVLMul = RISCVVType::getSameRatioLMUL(
PrevInfo.getSEW(), PrevInfo.getVLMUL(), Info.getSEW()))
Info.setVLMul(*NewVLMul);
- Demanded.LMUL = true;
+ Demanded.LMUL = DemandedFields::LMULEqual;
}
return Info;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 9d574edb4e6d..ce50fe6e2cbb 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1560,8 +1560,8 @@ def PseudoJump : Pseudo<(outs GPR:$rd), (ins pseudo_jump_symbol:$target), [],
// -riscv-use-rematerializable-movimm in RISCVISelDAGToDAG.cpp
// It will be expanded after register allocation.
// FIXME: The scheduling information does not reflect the multiple instructions.
-let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 1,
- isPseudo = 1, isReMaterializable = 1, IsSignExtendingOpW = 1 in
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8,
+ isReMaterializable = 1 in
def PseudoMovImm : Pseudo<(outs GPR:$dst), (ins i32imm:$imm), []>,
Sched<[WriteIALU]>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index e68fb42ece9f..b5817237b7fd 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -975,11 +975,14 @@ multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6> {
SchedUnaryMC<"WriteVNClipI", "ReadVNClipV">;
}
-multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6> {
+multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6, bit slidesUp> {
+ // Note: In the future, if VISlideI is also split into VSlideUpI and
+ // VSlideDownI, it'll probably better to use two separate multiclasses.
+ defvar WriteSlideX = !if(slidesUp, "WriteVSlideUpX", "WriteVSlideDownX");
def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
- SchedBinaryMC<"WriteVISlideX", "ReadVISlideV", "ReadVISlideX">;
+ SchedBinaryMC<WriteSlideX, "ReadVISlideV", "ReadVISlideX">;
def I : VALUVI<funct6, opcodestr # ".vi", uimm5>,
- SchedUnaryMC<"WriteVISlideI", "ReadVISlideV">;
+ SchedUnaryMC<"WriteVSlideI", "ReadVISlideV">;
}
multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6> {
@@ -1658,10 +1661,10 @@ def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
let Predicates = [HasVInstructions] in {
// Vector Slide Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
-defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110>;
+defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, /*slidesUp=*/true>;
defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
-defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111>;
+defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, /*slidesUp=*/false>;
defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
} // Predicates = [HasVInstructions]
@@ -1677,8 +1680,9 @@ let Predicates = [HasVInstructions] in {
let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100>;
def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">,
- SchedBinaryMC<"WriteVRGatherVV", "ReadVRGatherVV_data",
- "ReadVRGatherVV_index">;
+ SchedBinaryMC<"WriteVRGatherEI16VV",
+ "ReadVRGatherEI16VV_data",
+ "ReadVRGatherEI16VV_index">;
} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
// Vector Compress Instruction
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 317a6d7d4c52..f2c867a08ec2 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2249,13 +2249,13 @@ multiclass VPseudoBinaryFV_VV_RM<LMULInfo m, string Constraint = "", int sew = 0
UsesVXRM=0>;
}
-multiclass VPseudoVGTR_VV_EEW<int eew, string Constraint = ""> {
+multiclass VPseudoVGTR_EI16_VV<string Constraint = ""> {
foreach m = MxList in {
defvar mx = m.MX;
foreach sew = EEWList in {
defvar dataEMULOctuple = m.octuple;
- // emul = lmul * eew / sew
- defvar idxEMULOctuple = !srl(!mul(dataEMULOctuple, eew), !logtwo(sew));
+ // emul = lmul * 16 / sew
+ defvar idxEMULOctuple = !srl(!mul(dataEMULOctuple, 16), !logtwo(sew));
if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then {
defvar emulMX = octuple_to_str<idxEMULOctuple>.ret;
defvar emul = !cast<LMULInfo>("V_" # emulMX);
@@ -2264,8 +2264,8 @@ multiclass VPseudoVGTR_VV_EEW<int eew, string Constraint = ""> {
defm _VV
: VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul,
Constraint, e>,
- SchedBinary<"WriteVRGatherVV", "ReadVRGatherVV_data",
- "ReadVRGatherVV_index", mx, e, forceMergeOpRead=true>;
+ SchedBinary<"WriteVRGatherEI16VV", "ReadVRGatherEI16VV_data",
+ "ReadVRGatherEI16VV_index", mx, e, forceMergeOpRead=true>;
}
}
}
@@ -3380,14 +3380,16 @@ multiclass VPseudoVMAC_VV_VF_AAXA_RM<string Constraint = ""> {
}
}
-multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
+multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, bit slidesUp = false,
+ string Constraint = ""> {
+ defvar WriteSlideX = !if(slidesUp, "WriteVSlideUpX", "WriteVSlideDownX");
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoVSLDV_VX<m, Constraint>,
- SchedTernary<"WriteVISlideX", "ReadVISlideV", "ReadVISlideV",
+ SchedTernary<WriteSlideX, "ReadVISlideV", "ReadVISlideV",
"ReadVISlideX", mx>;
defm "" : VPseudoVSLDV_VI<ImmType, m, Constraint>,
- SchedBinary<"WriteVISlideI", "ReadVISlideV", "ReadVISlideV", mx>;
+ SchedBinary<"WriteVSlideI", "ReadVISlideV", "ReadVISlideV", mx>;
}
}
@@ -6861,8 +6863,8 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
// 16.3. Vector Slide Instructions
//===----------------------------------------------------------------------===//
let Predicates = [HasVInstructions] in {
- defm PseudoVSLIDEUP : VPseudoVSLD_VX_VI<uimm5, "@earlyclobber $rd">;
- defm PseudoVSLIDEDOWN : VPseudoVSLD_VX_VI<uimm5>;
+ defm PseudoVSLIDEUP : VPseudoVSLD_VX_VI<uimm5, /*slidesUp=*/true, "@earlyclobber $rd">;
+ defm PseudoVSLIDEDOWN : VPseudoVSLD_VX_VI<uimm5, /*slidesUp=*/false>;
defm PseudoVSLIDE1UP : VPseudoVSLD1_VX<"@earlyclobber $rd">;
defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX;
} // Predicates = [HasVInstructions]
@@ -6877,8 +6879,7 @@ let Predicates = [HasVInstructionsAnyF] in {
//===----------------------------------------------------------------------===//
let Predicates = [HasVInstructions] in {
defm PseudoVRGATHER : VPseudoVGTR_VV_VX_VI<uimm5, "@earlyclobber $rd">;
-defm PseudoVRGATHEREI16 : VPseudoVGTR_VV_EEW<eew=16,
- Constraint="@earlyclobber $rd">;
+defm PseudoVRGATHEREI16 : VPseudoVGTR_EI16_VV<Constraint = "@earlyclobber $rd">;
//===----------------------------------------------------------------------===//
// 16.5. Vector Compress Instruction
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 714f8cff7b63..66df24f2a458 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -881,17 +881,17 @@ multiclass VPatMultiplyAddSDNode_VV_VX<SDNode op, string instruction_name> {
}
}
-multiclass VPatAVGADD_VV_VX_RM<SDNode vop, int vxrm> {
+multiclass VPatAVGADD_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> {
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2)),
- (!cast<Instruction>("PseudoVAADDU_VV_"#vti.LMul.MX)
+ (!cast<Instruction>("PseudoVAADD"#suffix#"_VV_"#vti.LMul.MX)
(vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs2,
vxrm, vti.AVL, vti.Log2SEW, TA_MA)>;
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector (SplatPat (XLenVT GPR:$rs2)))),
- (!cast<Instruction>("PseudoVAADDU_VX_"#vti.LMul.MX)
+ (!cast<Instruction>("PseudoVAADD"#suffix#"_VX_"#vti.LMul.MX)
(vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2,
vxrm, vti.AVL, vti.Log2SEW, TA_MA)>;
}
@@ -1163,8 +1163,10 @@ defm : VPatBinarySDNode_VV_VX<ssubsat, "PseudoVSSUB">;
defm : VPatBinarySDNode_VV_VX<usubsat, "PseudoVSSUBU">;
// 12.2. Vector Single-Width Averaging Add and Subtract
-defm : VPatAVGADD_VV_VX_RM<avgflooru, 0b10>;
-defm : VPatAVGADD_VV_VX_RM<avgceilu, 0b00>;
+defm : VPatAVGADD_VV_VX_RM<avgfloors, 0b10>;
+defm : VPatAVGADD_VV_VX_RM<avgflooru, 0b10, suffix = "U">;
+defm : VPatAVGADD_VV_VX_RM<avgceils, 0b00>;
+defm : VPatAVGADD_VV_VX_RM<avgceilu, 0b00, suffix = "U">;
// 12.5. Vector Narrowing Fixed-Point Clip Instructions
multiclass VPatTruncSatClipSDNode<VTypeInfo vti, VTypeInfo wti> {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index e10b8bf2767b..91f3abe22331 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -111,7 +111,9 @@ def riscv_ctlz_vl : SDNode<"RISCVISD::CTLZ_VL", SDT_RISCVIntUnOp_VL>
def riscv_cttz_vl : SDNode<"RISCVISD::CTTZ_VL", SDT_RISCVIntUnOp_VL>;
def riscv_ctpop_vl : SDNode<"RISCVISD::CTPOP_VL", SDT_RISCVIntUnOp_VL>;
+def riscv_avgfloors_vl : SDNode<"RISCVISD::AVGFLOORS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
def riscv_avgflooru_vl : SDNode<"RISCVISD::AVGFLOORU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+def riscv_avgceils_vl : SDNode<"RISCVISD::AVGCEILS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
def riscv_avgceilu_vl : SDNode<"RISCVISD::AVGCEILU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
@@ -634,18 +636,18 @@ class VPatBinaryVL_V<SDPatternOperator vop,
(mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
class VPatBinaryVL_V_RM<SDPatternOperator vop,
- string instruction_name,
- string suffix,
- ValueType result_type,
- ValueType op1_type,
- ValueType op2_type,
- ValueType mask_type,
- int log2sew,
- LMULInfo vlmul,
- VReg result_reg_class,
- VReg op1_reg_class,
- VReg op2_reg_class,
- bit isSEWAware = 0>
+ string instruction_name,
+ string suffix,
+ ValueType result_type,
+ ValueType op1_type,
+ ValueType op2_type,
+ ValueType mask_type,
+ int log2sew,
+ LMULInfo vlmul,
+ VReg result_reg_class,
+ VReg op1_reg_class,
+ VReg op2_reg_class,
+ bit isSEWAware = 0>
: Pat<(result_type (vop
(op1_type op1_reg_class:$rs1),
(op2_type op2_reg_class:$rs2),
@@ -2073,19 +2075,19 @@ multiclass VPatSlide1VL_VF<SDNode vop, string instruction_name> {
}
}
-multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm> {
+multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> {
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
- (!cast<Instruction>("PseudoVAADDU_VV_"#vti.LMul.MX#"_MASK")
+ (!cast<Instruction>("PseudoVAADD"#suffix#"_VV_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs2,
(vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector (SplatPat (XLenVT GPR:$rs2))),
vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
- (!cast<Instruction>("PseudoVAADDU_VX_"#vti.LMul.MX#"_MASK")
+ (!cast<Instruction>("PseudoVAADD"#suffix#"_VX_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2,
(vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
@@ -2369,8 +2371,10 @@ defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">;
defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">;
// 12.2. Vector Single-Width Averaging Add and Subtract
-defm : VPatAVGADDVL_VV_VX_RM<riscv_avgflooru_vl, 0b10>;
-defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceilu_vl, 0b00>;
+defm : VPatAVGADDVL_VV_VX_RM<riscv_avgfloors_vl, 0b10>;
+defm : VPatAVGADDVL_VV_VX_RM<riscv_avgflooru_vl, 0b10, suffix="U">;
+defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceils_vl, 0b00>;
+defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceilu_vl, 0b00, suffix="U">;
// 12.5. Vector Narrowing Fixed-Point Clip Instructions
multiclass VPatTruncSatClipVL<VTypeInfo vti, VTypeInfo wti> {
diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td
index a4a5d9e96c27..6ebf9f1eb045 100644
--- a/llvm/lib/Target/RISCV/RISCVProcessors.td
+++ b/llvm/lib/Target/RISCV/RISCVProcessors.td
@@ -85,7 +85,7 @@ def ROCKET : RISCVTuneProcessorModel<"rocket",
def SIFIVE_7 : RISCVTuneProcessorModel<"sifive-7-series",
SiFive7Model,
- [TuneSiFive7]>;
+ [TuneSiFive7, FeaturePostRAScheduler]>;
def SIFIVE_E20 : RISCVProcessorModel<"sifive-e20",
RocketModel,
@@ -145,7 +145,7 @@ def SIFIVE_E76 : RISCVProcessorModel<"sifive-e76",
FeatureStdExtA,
FeatureStdExtF,
FeatureStdExtC],
- [TuneSiFive7]>;
+ [TuneSiFive7, FeaturePostRAScheduler]>;
def SIFIVE_S21 : RISCVProcessorModel<"sifive-s21",
RocketModel,
@@ -189,7 +189,7 @@ def SIFIVE_S76 : RISCVProcessorModel<"sifive-s76",
FeatureStdExtD,
FeatureStdExtC,
FeatureStdExtZihintpause],
- [TuneSiFive7]>;
+ [TuneSiFive7, FeaturePostRAScheduler]>;
def SIFIVE_U54 : RISCVProcessorModel<"sifive-u54",
RocketModel,
@@ -212,7 +212,7 @@ def SIFIVE_U74 : RISCVProcessorModel<"sifive-u74",
FeatureStdExtF,
FeatureStdExtD,
FeatureStdExtC],
- [TuneSiFive7]>;
+ [TuneSiFive7, FeaturePostRAScheduler]>;
def SIFIVE_X280 : RISCVProcessorModel<"sifive-x280", SiFive7Model,
[Feature64Bit,
@@ -230,6 +230,7 @@ def SIFIVE_X280 : RISCVProcessorModel<"sifive-x280", SiFive7Model,
FeatureStdExtZba,
FeatureStdExtZbb],
[TuneSiFive7,
+ FeaturePostRAScheduler,
TuneDLenFactor2]>;
def SIFIVE_P450 : RISCVProcessorModel<"sifive-p450", SiFiveP400Model,
@@ -262,7 +263,8 @@ def SIFIVE_P450 : RISCVProcessorModel<"sifive-p450", SiFiveP400Model,
[TuneNoDefaultUnroll,
TuneConditionalCompressedMoveFusion,
TuneLUIADDIFusion,
- TuneAUIPCADDIFusion]>;
+ TuneAUIPCADDIFusion,
+ FeaturePostRAScheduler]>;
def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", SiFiveP600Model,
[Feature64Bit,
@@ -302,7 +304,8 @@ def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", SiFiveP600Model,
TuneConditionalCompressedMoveFusion,
TuneLUIADDIFusion,
TuneAUIPCADDIFusion,
- TuneNoSinkSplatOperands]>;
+ TuneNoSinkSplatOperands,
+ FeaturePostRAScheduler]>;
def SYNTACORE_SCR1_BASE : RISCVProcessorModel<"syntacore-scr1-base",
SyntacoreSCR1Model,
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
index e67da839bdb8..b2991145ee65 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
@@ -199,7 +199,6 @@ def SiFive7Model : SchedMachineModel {
let LoadLatency = 3;
let MispredictPenalty = 3;
let CompleteModel = 0;
- let PostRAScheduler = true;
let EnableIntervals = true;
let UnsupportedFeatures = [HasStdExtZbkb, HasStdExtZbkc, HasStdExtZbkx,
HasStdExtZcmt, HasStdExtZknd, HasStdExtZkne,
@@ -928,6 +927,7 @@ foreach mx = SchedMxList in {
defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxList>.c;
let Latency = !add(Cycles, 3), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
+ defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
defm "" : LMULSEWWriteResMXSEW<"WriteVCompressV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
}
}
@@ -937,10 +937,11 @@ foreach mx = SchedMxList in {
defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
- defm "" : LMULWriteResMX<"WriteVISlideX", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVISlideI", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVISlide1X", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVFSlide1F", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideUpX", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideI", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVISlide1X", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVFSlide1F", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
}
}
@@ -1272,6 +1273,8 @@ defm "" : LMULReadAdvance<"ReadVFSlideV", 0>;
defm "" : LMULReadAdvance<"ReadVFSlideF", 0>;
defm "" : LMULSEWReadAdvance<"ReadVRGatherVV_data", 0>;
defm "" : LMULSEWReadAdvance<"ReadVRGatherVV_index", 0>;
+defm "" : LMULSEWReadAdvance<"ReadVRGatherEI16VV_data", 0>;
+defm "" : LMULSEWReadAdvance<"ReadVRGatherEI16VV_index", 0>;
defm "" : LMULReadAdvance<"ReadVRGatherVX_data", 0>;
defm "" : LMULReadAdvance<"ReadVRGatherVX_index", 0>;
defm "" : LMULReadAdvance<"ReadVRGatherVI_data", 0>;
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP400.td b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP400.td
index a37958826e02..80362cae00fc 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP400.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP400.td
@@ -13,7 +13,6 @@ def SiFiveP400Model : SchedMachineModel {
let MicroOpBufferSize = 56; // Max micro-ops that can be buffered.
let LoadLatency = 4; // Cycles for loads to access the cache.
let MispredictPenalty = 9; // Extra cycles for a mispredicted branch.
- let PostRAScheduler = true;
let UnsupportedFeatures = [HasStdExtZbkb, HasStdExtZbkc, HasStdExtZbkx,
HasStdExtZcmt, HasStdExtZknd, HasStdExtZkne,
HasStdExtZknh, HasStdExtZksed, HasStdExtZksh,
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
index 6ba299385f07..f0697a1b0673 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
@@ -56,7 +56,6 @@ def SiFiveP600Model : SchedMachineModel {
let MicroOpBufferSize = 160; // Max micro-ops that can be buffered.
let LoadLatency = 4; // Cycles for loads to access the cache.
let MispredictPenalty = 9; // Extra cycles for a mispredicted branch.
- let PostRAScheduler = true;
let UnsupportedFeatures = [HasStdExtZbkb, HasStdExtZbkc, HasStdExtZbkx,
HasStdExtZknd, HasStdExtZkne, HasStdExtZknh,
HasStdExtZksed, HasStdExtZksh, HasStdExtZkr,
@@ -669,7 +668,7 @@ foreach mx = SchedMxList in {
defvar LMulLat = SiFiveP600GetLMulCycles<mx>.c;
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = 2, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVISlideI", [SiFiveP600VEXQ0], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideI", [SiFiveP600VEXQ0], mx, IsWorstCase>;
}
let Latency = 1, ReleaseAtCycles = [LMulLat] in {
defm "" : LMULWriteResMX<"WriteVISlide1X", [SiFiveP600VEXQ0], mx, IsWorstCase>;
@@ -679,7 +678,8 @@ foreach mx = SchedMxList in {
foreach mx = ["MF8", "MF4", "MF2", "M1"] in {
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = 2, ReleaseAtCycles = [1] in {
- defm "" : LMULWriteResMX<"WriteVISlideX", [SiFiveP600VEXQ0], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideUpX", [SiFiveP600VEXQ0], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFiveP600VEXQ0], mx, IsWorstCase>;
}
}
@@ -688,7 +688,8 @@ foreach mx = ["M8", "M4", "M2"] in {
defvar LMulLat = SiFiveP600GetLMulCycles<mx>.c;
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = !add(4, LMulLat), ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVISlideX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideUpX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
}
}
@@ -714,6 +715,7 @@ foreach mx = ["MF8", "MF4", "MF2", "M1"] in {
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = 3, ReleaseAtCycles = [1] in {
defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>;
+ defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>;
defm "" : LMULSEWWriteResMXSEW<"WriteVCompressV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>;
}
}
@@ -734,6 +736,7 @@ foreach mx = ["M2", "M4", "M8"] in {
defvar IsWorstCase = SiFiveP600IsWorstCaseMXSEW<mx, sew, SchedMxList>.c;
let Latency = 6, ReleaseAtCycles = [LMulLat] in {
defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>;
+ defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>;
defm "" : LMULSEWWriteResMXSEW<"WriteVCompressV", [SiFiveP600VEXQ1], mx, sew, IsWorstCase>;
}
}
@@ -1069,6 +1072,8 @@ defm "" : LMULReadAdvance<"ReadVFSlideV", 0>;
defm "" : LMULReadAdvance<"ReadVFSlideF", 0>;
defm "" : LMULSEWReadAdvance<"ReadVRGatherVV_data", 0>;
defm "" : LMULSEWReadAdvance<"ReadVRGatherVV_index", 0>;
+defm "" : LMULSEWReadAdvance<"ReadVRGatherEI16VV_data", 0>;
+defm "" : LMULSEWReadAdvance<"ReadVRGatherEI16VV_index", 0>;
defm "" : LMULReadAdvance<"ReadVRGatherVX_data", 0>;
defm "" : LMULReadAdvance<"ReadVRGatherVX_index", 0>;
defm "" : LMULReadAdvance<"ReadVRGatherVI_data", 0>;
diff --git a/llvm/lib/Target/RISCV/RISCVScheduleV.td b/llvm/lib/Target/RISCV/RISCVScheduleV.td
index 5be06d4c3f7e..449611c58303 100644
--- a/llvm/lib/Target/RISCV/RISCVScheduleV.td
+++ b/llvm/lib/Target/RISCV/RISCVScheduleV.td
@@ -514,12 +514,14 @@ def WriteVMovXS : SchedWrite;
def WriteVMovSF : SchedWrite;
def WriteVMovFS : SchedWrite;
// 16.3. Vector Slide Instructions
-defm "" : LMULSchedWrites<"WriteVISlideX">;
-defm "" : LMULSchedWrites<"WriteVISlideI">;
+defm "" : LMULSchedWrites<"WriteVSlideUpX">;
+defm "" : LMULSchedWrites<"WriteVSlideDownX">;
+defm "" : LMULSchedWrites<"WriteVSlideI">;
defm "" : LMULSchedWrites<"WriteVISlide1X">;
defm "" : LMULSchedWrites<"WriteVFSlide1F">;
// 16.4. Vector Register Gather Instructions
defm "" : LMULSEWSchedWrites<"WriteVRGatherVV">;
+defm "" : LMULSEWSchedWrites<"WriteVRGatherEI16VV">;
defm "" : LMULSchedWrites<"WriteVRGatherVX">;
defm "" : LMULSchedWrites<"WriteVRGatherVI">;
// 16.5. Vector Compress Instruction
@@ -748,6 +750,8 @@ defm "" : LMULSchedReads<"ReadVFSlideF">;
// 16.4. Vector Register Gather Instructions
defm "" : LMULSEWSchedReads<"ReadVRGatherVV_data">;
defm "" : LMULSEWSchedReads<"ReadVRGatherVV_index">;
+defm "" : LMULSEWSchedReads<"ReadVRGatherEI16VV_data">;
+defm "" : LMULSEWSchedReads<"ReadVRGatherEI16VV_index">;
defm "" : LMULSchedReads<"ReadVRGatherVX_data">;
defm "" : LMULSchedReads<"ReadVRGatherVX_index">;
defm "" : LMULSchedReads<"ReadVRGatherVI_data">;
@@ -949,11 +953,13 @@ def : WriteRes<WriteVMovSX, []>;
def : WriteRes<WriteVMovXS, []>;
def : WriteRes<WriteVMovSF, []>;
def : WriteRes<WriteVMovFS, []>;
-defm "" : LMULWriteRes<"WriteVISlideX", []>;
-defm "" : LMULWriteRes<"WriteVISlideI", []>;
+defm "" : LMULWriteRes<"WriteVSlideUpX", []>;
+defm "" : LMULWriteRes<"WriteVSlideDownX", []>;
+defm "" : LMULWriteRes<"WriteVSlideI", []>;
defm "" : LMULWriteRes<"WriteVISlide1X", []>;
defm "" : LMULWriteRes<"WriteVFSlide1F", []>;
defm "" : LMULSEWWriteRes<"WriteVRGatherVV", []>;
+defm "" : LMULSEWWriteRes<"WriteVRGatherEI16VV", []>;
defm "" : LMULWriteRes<"WriteVRGatherVX", []>;
defm "" : LMULWriteRes<"WriteVRGatherVI", []>;
defm "" : LMULSEWWriteRes<"WriteVCompressV", []>;
@@ -1118,6 +1124,8 @@ defm "" : LMULReadAdvance<"ReadVFSlideV", 0>;
defm "" : LMULReadAdvance<"ReadVFSlideF", 0>;
defm "" : LMULSEWReadAdvance<"ReadVRGatherVV_data", 0>;
defm "" : LMULSEWReadAdvance<"ReadVRGatherVV_index", 0>;
+defm "" : LMULSEWReadAdvance<"ReadVRGatherEI16VV_data", 0>;
+defm "" : LMULSEWReadAdvance<"ReadVRGatherEI16VV_index", 0>;
defm "" : LMULReadAdvance<"ReadVRGatherVX_data", 0>;
defm "" : LMULReadAdvance<"ReadVRGatherVX_index", 0>;
defm "" : LMULReadAdvance<"ReadVRGatherVI_data", 0>;
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index c880c9e921e0..347c1bc3c278 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -121,9 +121,7 @@ public:
}
bool enableMachineScheduler() const override { return true; }
- bool enablePostRAScheduler() const override {
- return getSchedModel().PostRAScheduler || UsePostRAScheduler;
- }
+ bool enablePostRAScheduler() const override { return UsePostRAScheduler; }
Align getPrefFunctionAlignment() const {
return Align(TuneInfo->PrefFunctionAlignment);
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 5aab138dae40..d9f8222669ca 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -96,6 +96,11 @@ static cl::opt<bool> EnableMISchedLoadClustering(
cl::desc("Enable load clustering in the machine scheduler"),
cl::init(false));
+static cl::opt<bool> EnableVSETVLIAfterRVVRegAlloc(
+ "riscv-vsetvl-after-rvv-regalloc", cl::Hidden,
+ cl::desc("Insert vsetvls after vector register allocation"),
+ cl::init(true));
+
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
RegisterTargetMachine<RISCVTargetMachine> X(getTheRISCV32Target());
RegisterTargetMachine<RISCVTargetMachine> Y(getTheRISCV64Target());
@@ -389,6 +394,8 @@ FunctionPass *RISCVPassConfig::createRVVRegAllocPass(bool Optimized) {
bool RISCVPassConfig::addRegAssignAndRewriteFast() {
addPass(createRVVRegAllocPass(false));
+ if (EnableVSETVLIAfterRVVRegAlloc)
+ addPass(createRISCVInsertVSETVLIPass());
addPass(createRISCVCoalesceVSETVLIPass());
if (TM->getOptLevel() != CodeGenOptLevel::None &&
EnableRISCVDeadRegisterElimination)
@@ -399,6 +406,8 @@ bool RISCVPassConfig::addRegAssignAndRewriteFast() {
bool RISCVPassConfig::addRegAssignAndRewriteOptimized() {
addPass(createRVVRegAllocPass(true));
addPass(createVirtRegRewriter(false));
+ if (EnableVSETVLIAfterRVVRegAlloc)
+ addPass(createRISCVInsertVSETVLIPass());
addPass(createRISCVCoalesceVSETVLIPass());
if (TM->getOptLevel() != CodeGenOptLevel::None &&
EnableRISCVDeadRegisterElimination)
@@ -547,10 +556,12 @@ void RISCVPassConfig::addPreRegAlloc() {
// Run RISCVInsertVSETVLI after PHI elimination. On O1 and above do it after
// register coalescing so needVSETVLIPHI doesn't need to look through COPYs.
- if (TM->getOptLevel() == CodeGenOptLevel::None)
- insertPass(&PHIEliminationID, &RISCVInsertVSETVLIID);
- else
- insertPass(&RegisterCoalescerID, &RISCVInsertVSETVLIID);
+ if (!EnableVSETVLIAfterRVVRegAlloc) {
+ if (TM->getOptLevel() == CodeGenOptLevel::None)
+ insertPass(&PHIEliminationID, &RISCVInsertVSETVLIID);
+ else
+ insertPass(&RegisterCoalescerID, &RISCVInsertVSETVLIID);
+ }
}
void RISCVPassConfig::addFastRegAlloc() {
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index b73ed208ed74..176d0e79253a 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -14,9 +14,11 @@
#include "llvm/CodeGen/CostTable.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PatternMatch.h"
#include <cmath>
#include <optional>
using namespace llvm;
+using namespace llvm::PatternMatch;
#define DEBUG_TYPE "riscvtti"
@@ -1469,6 +1471,21 @@ InstructionCost RISCVTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
}
}
+ // With ShortForwardBranchOpt or ConditionalMoveFusion, scalar icmp + select
+ // instructions will lower to SELECT_CC and lower to PseudoCCMOVGPR which will
+ // generate a conditional branch + mv. The cost of scalar (icmp + select) will
+ // be (0 + select instr cost).
+ if (ST->hasConditionalMoveFusion() && I && isa<ICmpInst>(I) &&
+ ValTy->isIntegerTy() && !I->user_empty()) {
+ if (all_of(I->users(), [&](const User *U) {
+ return match(U, m_Select(m_Specific(I), m_Value(), m_Value())) &&
+ U->getType()->isIntegerTy() &&
+ !isa<ConstantData>(U->getOperand(1)) &&
+ !isa<ConstantData>(U->getOperand(2));
+ }))
+ return 0;
+ }
+
// TODO: Add cost for scalar type.
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
@@ -1864,10 +1881,14 @@ unsigned RISCVTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
bool RISCVTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
const TargetTransformInfo::LSRCost &C2) {
// RISC-V specific here are "instruction number 1st priority".
- return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
+ // If we need to emit adds inside the loop to add up base registers, then
+ // we need at least one extra temporary register.
+ unsigned C1NumRegs = C1.NumRegs + (C1.NumBaseAdds != 0);
+ unsigned C2NumRegs = C2.NumRegs + (C2.NumBaseAdds != 0);
+ return std::tie(C1.Insns, C1NumRegs, C1.AddRecCost,
C1.NumIVMuls, C1.NumBaseAdds,
C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
- std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
+ std::tie(C2.Insns, C2NumRegs, C2.AddRecCost,
C2.NumIVMuls, C2.NumBaseAdds,
C2.ScaleCost, C2.ImmCost, C2.SetupCost);
}
diff --git a/llvm/lib/Target/SPIRV/CMakeLists.txt b/llvm/lib/Target/SPIRV/CMakeLists.txt
index 7001ac382f41..fe09d5903045 100644
--- a/llvm/lib/Target/SPIRV/CMakeLists.txt
+++ b/llvm/lib/Target/SPIRV/CMakeLists.txt
@@ -17,6 +17,7 @@ add_llvm_target(SPIRVCodeGen
SPIRVAsmPrinter.cpp
SPIRVBuiltins.cpp
SPIRVCallLowering.cpp
+ SPIRVInlineAsmLowering.cpp
SPIRVCommandLine.cpp
SPIRVDuplicatesTracker.cpp
SPIRVEmitIntrinsics.cpp
diff --git a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp
index b468b71cc0ef..5c286acdcc9b 100644
--- a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp
+++ b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp
@@ -321,14 +321,19 @@ void SPIRVInstPrinter::printStringImm(const MCInst *MI, unsigned OpNo,
if (MI->getOperand(StrStartIndex).isReg())
break;
- std::string Str = getSPIRVStringOperand(*MI, OpNo);
+ std::string Str = getSPIRVStringOperand(*MI, StrStartIndex);
if (StrStartIndex != OpNo)
O << ' '; // Add a space if we're starting a new string/argument.
O << '"';
for (char c : Str) {
- if (c == '"')
- O.write('\\'); // Escape " characters (might break for complex UTF-8).
- O.write(c);
+ // Escape ", \n characters (might break for complex UTF-8).
+ if (c == '\n') {
+ O.write("\\n", 2);
+ } else {
+ if (c == '"')
+ O.write('\\');
+ O.write(c);
+ }
}
O << '"';
diff --git a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
index 2ebe5bdc4771..3206c264f99d 100644
--- a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
@@ -69,7 +69,8 @@ public:
void outputOpFunctionEnd();
void outputExtFuncDecls();
void outputExecutionModeFromMDNode(Register Reg, MDNode *Node,
- SPIRV::ExecutionMode::ExecutionMode EM);
+ SPIRV::ExecutionMode::ExecutionMode EM,
+ unsigned ExpectMDOps, int64_t DefVal);
void outputExecutionModeFromNumthreadsAttribute(
const Register &Reg, const Attribute &Attr,
SPIRV::ExecutionMode::ExecutionMode EM);
@@ -114,12 +115,9 @@ void SPIRVAsmPrinter::emitEndOfAsmFile(Module &M) {
// Bound is an approximation that accounts for the maximum used register
// number and number of generated OpLabels
unsigned Bound = 2 * (ST->getBound() + 1) + NLabels;
- bool FlagToRestore = OutStreamer->getUseAssemblerInfoForParsing();
- OutStreamer->setUseAssemblerInfoForParsing(true);
if (MCAssembler *Asm = OutStreamer->getAssemblerPtr())
Asm->setBuildVersion(static_cast<MachO::PlatformType>(0), Major, Minor,
Bound, VersionTuple(Major, Minor, 0, Bound));
- OutStreamer->setUseAssemblerInfoForParsing(FlagToRestore);
}
void SPIRVAsmPrinter::emitFunctionHeader() {
@@ -425,12 +423,19 @@ static void addOpsFromMDNode(MDNode *MDN, MCInst &Inst,
}
void SPIRVAsmPrinter::outputExecutionModeFromMDNode(
- Register Reg, MDNode *Node, SPIRV::ExecutionMode::ExecutionMode EM) {
+ Register Reg, MDNode *Node, SPIRV::ExecutionMode::ExecutionMode EM,
+ unsigned ExpectMDOps, int64_t DefVal) {
MCInst Inst;
Inst.setOpcode(SPIRV::OpExecutionMode);
Inst.addOperand(MCOperand::createReg(Reg));
Inst.addOperand(MCOperand::createImm(static_cast<unsigned>(EM)));
addOpsFromMDNode(Node, Inst, MAI);
+ // reqd_work_group_size and work_group_size_hint require 3 operands,
+ // if metadata contains less operands, just add a default value
+ unsigned NodeSz = Node->getNumOperands();
+ if (ExpectMDOps > 0 && NodeSz < ExpectMDOps)
+ for (unsigned i = NodeSz; i < ExpectMDOps; ++i)
+ Inst.addOperand(MCOperand::createImm(DefVal));
outputMCInst(Inst);
}
@@ -476,17 +481,17 @@ void SPIRVAsmPrinter::outputExecutionMode(const Module &M) {
Register FReg = MAI->getFuncReg(&F);
assert(FReg.isValid());
if (MDNode *Node = F.getMetadata("reqd_work_group_size"))
- outputExecutionModeFromMDNode(FReg, Node,
- SPIRV::ExecutionMode::LocalSize);
+ outputExecutionModeFromMDNode(FReg, Node, SPIRV::ExecutionMode::LocalSize,
+ 3, 1);
if (Attribute Attr = F.getFnAttribute("hlsl.numthreads"); Attr.isValid())
outputExecutionModeFromNumthreadsAttribute(
FReg, Attr, SPIRV::ExecutionMode::LocalSize);
if (MDNode *Node = F.getMetadata("work_group_size_hint"))
outputExecutionModeFromMDNode(FReg, Node,
- SPIRV::ExecutionMode::LocalSizeHint);
+ SPIRV::ExecutionMode::LocalSizeHint, 3, 1);
if (MDNode *Node = F.getMetadata("intel_reqd_sub_group_size"))
outputExecutionModeFromMDNode(FReg, Node,
- SPIRV::ExecutionMode::SubgroupSize);
+ SPIRV::ExecutionMode::SubgroupSize, 0, 0);
if (MDNode *Node = F.getMetadata("vec_type_hint")) {
MCInst Inst;
Inst.setOpcode(SPIRV::OpExecutionMode);
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index 32de8b9587b4..424087f361a6 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -1118,6 +1118,39 @@ static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call,
return true;
}
+static bool generateKernelClockInst(const SPIRV::IncomingCall *Call,
+ MachineIRBuilder &MIRBuilder,
+ SPIRVGlobalRegistry *GR) {
+ const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
+ MachineFunction &MF = MIRBuilder.getMF();
+ const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
+ if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
+ std::string DiagMsg = std::string(Builtin->Name) +
+ ": the builtin requires the following SPIR-V "
+ "extension: SPV_KHR_shader_clock";
+ report_fatal_error(DiagMsg.c_str(), false);
+ }
+
+ MachineRegisterInfo *MRI = MIRBuilder.getMRI();
+ Register ResultReg = Call->ReturnRegister;
+ MRI->setRegClass(ResultReg, &SPIRV::IDRegClass);
+
+ // Deduce the `Scope` operand from the builtin function name.
+ SPIRV::Scope::Scope ScopeArg =
+ StringSwitch<SPIRV::Scope::Scope>(Builtin->Name)
+ .EndsWith("device", SPIRV::Scope::Scope::Device)
+ .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
+ .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
+ Register ScopeReg = buildConstantIntReg(ScopeArg, MIRBuilder, GR);
+
+ MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
+ .addDef(ResultReg)
+ .addUse(GR->getSPIRVTypeID(Call->ReturnType))
+ .addUse(ScopeReg);
+
+ return true;
+}
+
// These queries ask for a single size_t result for a given dimension index, e.g
// size_t get_global_id(uint dimindex). In SPIR-V, the builtins corresonding to
// these values are all vec3 types, so we need to extract the correct index or
@@ -1886,7 +1919,7 @@ static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call,
// Local sizes arguments: Sizes of block invoke arguments. Clang generates
// local size operands as an array, so we need to unpack them.
SmallVector<Register, 16> LocalSizes;
- if (Call->Builtin->Name.find("_varargs") != StringRef::npos || IsSpirvOp) {
+ if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
Register GepReg = Call->Arguments[LocalSizeArrayIdx];
MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
@@ -2290,6 +2323,8 @@ std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
case SPIRV::GroupUniform:
return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
+ case SPIRV::KernelClock:
+ return generateKernelClockInst(Call.get(), MIRBuilder, GR);
}
return false;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
index 564028547821..692234c405ab 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
@@ -58,6 +58,7 @@ def LoadStore : BuiltinGroup;
def IntelSubgroups : BuiltinGroup;
def AtomicFloating : BuiltinGroup;
def GroupUniform : BuiltinGroup;
+def KernelClock : BuiltinGroup;
//===----------------------------------------------------------------------===//
// Class defining a demangled builtin record. The information in the record
@@ -952,6 +953,14 @@ defm : DemangledGroupBuiltin<"group_scan_exclusive_logical_xor", OnlyWork, OpGro
defm : DemangledGroupBuiltin<"group_scan_inclusive_logical_xor", OnlyWork, OpGroupLogicalXorKHR>;
defm : DemangledGroupBuiltin<"group_reduce_logical_xor", OnlyWork, OpGroupLogicalXorKHR>;
+// cl_khr_kernel_clock / SPV_KHR_shader_clock
+defm : DemangledNativeBuiltin<"clock_read_device", OpenCL_std, KernelClock, 0, 0, OpReadClockKHR>;
+defm : DemangledNativeBuiltin<"clock_read_work_group", OpenCL_std, KernelClock, 0, 0, OpReadClockKHR>;
+defm : DemangledNativeBuiltin<"clock_read_sub_group", OpenCL_std, KernelClock, 0, 0, OpReadClockKHR>;
+defm : DemangledNativeBuiltin<"clock_read_hilo_device", OpenCL_std, KernelClock, 0, 0, OpReadClockKHR>;
+defm : DemangledNativeBuiltin<"clock_read_hilo_work_group", OpenCL_std, KernelClock, 0, 0, OpReadClockKHR>;
+defm : DemangledNativeBuiltin<"clock_read_hilo_sub_group", OpenCL_std, KernelClock, 0, 0, OpReadClockKHR>;
+
//===----------------------------------------------------------------------===//
// Class defining an atomic instruction on floating-point numbers.
//
diff --git a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
index 727e4e584c05..f4daab7d06eb 100644
--- a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
@@ -171,9 +171,9 @@ getArgAccessQual(const Function &F, unsigned ArgIdx) {
if (!ArgAttribute)
return SPIRV::AccessQualifier::ReadWrite;
- if (ArgAttribute->getString().compare("read_only") == 0)
+ if (ArgAttribute->getString() == "read_only")
return SPIRV::AccessQualifier::ReadOnly;
- if (ArgAttribute->getString().compare("write_only") == 0)
+ if (ArgAttribute->getString() == "write_only")
return SPIRV::AccessQualifier::WriteOnly;
return SPIRV::AccessQualifier::ReadWrite;
}
@@ -181,7 +181,7 @@ getArgAccessQual(const Function &F, unsigned ArgIdx) {
static std::vector<SPIRV::Decoration::Decoration>
getKernelArgTypeQual(const Function &F, unsigned ArgIdx) {
MDString *ArgAttribute = getOCLKernelArgTypeQual(F, ArgIdx);
- if (ArgAttribute && ArgAttribute->getString().compare("volatile") == 0)
+ if (ArgAttribute && ArgAttribute->getString() == "volatile")
return {SPIRV::Decoration::Volatile};
return {};
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp
index 691e6ee0e582..7f531542544a 100644
--- a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp
@@ -47,6 +47,8 @@ static const std::map<std::string, SPIRV::Extension::Extension>
SPIRV::Extension::Extension::SPV_KHR_bit_instructions},
{"SPV_KHR_linkonce_odr",
SPIRV::Extension::Extension::SPV_KHR_linkonce_odr},
+ {"SPV_INTEL_inline_assembly",
+ SPIRV::Extension::Extension::SPV_INTEL_inline_assembly},
{"SPV_INTEL_bfloat16_conversion",
SPIRV::Extension::Extension::SPV_INTEL_bfloat16_conversion},
{"SPV_KHR_subgroup_rotate",
@@ -55,6 +57,8 @@ static const std::map<std::string, SPIRV::Extension::Extension>
SPIRV::Extension::Extension::SPV_INTEL_variable_length_array},
{"SPV_INTEL_function_pointers",
SPIRV::Extension::Extension::SPV_INTEL_function_pointers},
+ {"SPV_KHR_shader_clock",
+ SPIRV::Extension::Extension::SPV_KHR_shader_clock},
};
bool SPIRVExtensionsParser::parse(cl::Option &O, llvm::StringRef ArgName,
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index 32df2403dfe5..ea53fe55e7ab 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -140,6 +140,7 @@ public:
Instruction *visitAllocaInst(AllocaInst &I);
Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
Instruction *visitUnreachableInst(UnreachableInst &I);
+ Instruction *visitCallInst(CallInst &I);
StringRef getPassName() const override { return "SPIRV emit intrinsics"; }
@@ -489,10 +490,6 @@ void SPIRVEmitIntrinsics::deduceOperandElementType(Instruction *I) {
Type *Ty = GR->findDeducedElementType(Op);
if (Ty == KnownElemTy)
continue;
- if (Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get()))
- setInsertPointSkippingPhis(B, User->getNextNode());
- else
- setInsertPointSkippingPhis(B, I);
Value *OpTyVal = Constant::getNullValue(KnownElemTy);
Type *OpTy = Op->getType();
if (!Ty) {
@@ -500,6 +497,8 @@ void SPIRVEmitIntrinsics::deduceOperandElementType(Instruction *I) {
// check if there is existing Intrinsic::spv_assign_ptr_type instruction
auto It = AssignPtrTypeInstr.find(Op);
if (It == AssignPtrTypeInstr.end()) {
+ Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get());
+ setInsertPointSkippingPhis(B, User ? User->getNextNode() : I);
CallInst *CI =
buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {OpTy}, OpTyVal, Op,
{B.getInt32(getPointerAddressSpace(OpTy))}, B);
@@ -511,6 +510,17 @@ void SPIRVEmitIntrinsics::deduceOperandElementType(Instruction *I) {
Ctx, MDNode::get(Ctx, ValueAsMetadata::getConstant(OpTyVal))));
}
} else {
+ if (auto *OpI = dyn_cast<Instruction>(Op)) {
+ // spv_ptrcast's argument Op denotes an instruction that generates
+ // a value, and we may use getInsertionPointAfterDef()
+ B.SetInsertPoint(*OpI->getInsertionPointAfterDef());
+ B.SetCurrentDebugLocation(OpI->getDebugLoc());
+ } else if (auto *OpA = dyn_cast<Argument>(Op)) {
+ B.SetInsertPointPastAllocas(OpA->getParent());
+ B.SetCurrentDebugLocation(DebugLoc());
+ } else {
+ B.SetInsertPoint(F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
+ }
SmallVector<Type *, 2> Types = {OpTy, OpTy};
MetadataAsValue *VMD = MetadataAsValue::get(
Ctx, MDNode::get(Ctx, ValueAsMetadata::getConstant(OpTyVal)));
@@ -620,6 +630,28 @@ void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
}
}
+Instruction *SPIRVEmitIntrinsics::visitCallInst(CallInst &Call) {
+ if (!Call.isInlineAsm())
+ return &Call;
+
+ const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
+ LLVMContext &Ctx = F->getContext();
+
+ Constant *TyC = UndefValue::get(IA->getFunctionType());
+ MDString *ConstraintString = MDString::get(Ctx, IA->getConstraintString());
+ SmallVector<Value *> Args = {
+ MetadataAsValue::get(Ctx,
+ MDNode::get(Ctx, ValueAsMetadata::getConstant(TyC))),
+ MetadataAsValue::get(Ctx, MDNode::get(Ctx, ConstraintString))};
+ for (unsigned OpIdx = 0; OpIdx < Call.arg_size(); OpIdx++)
+ Args.push_back(Call.getArgOperand(OpIdx));
+
+ IRBuilder<> B(Call.getParent());
+ B.SetInsertPoint(&Call);
+ B.CreateIntrinsic(Intrinsic::spv_inline_asm, {}, {Args});
+ return &Call;
+}
+
Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
BasicBlock *ParentBB = I.getParent();
IRBuilder<> B(ParentBB);
diff --git a/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
index 96b4a570a26b..2bd22bbd6316 100644
--- a/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
@@ -82,6 +82,28 @@ bool SPIRVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
return false;
}
+std::pair<unsigned, const TargetRegisterClass *>
+SPIRVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint,
+ MVT VT) const {
+ const TargetRegisterClass *RC = nullptr;
+ if (Constraint.starts_with("{"))
+ return std::make_pair(0u, RC);
+
+ if (VT.isFloatingPoint())
+ RC = VT.isVector() ? &SPIRV::vfIDRegClass
+ : (VT.getScalarSizeInBits() > 32 ? &SPIRV::fID64RegClass
+ : &SPIRV::fIDRegClass);
+ else if (VT.isInteger())
+ RC = VT.isVector() ? &SPIRV::vIDRegClass
+ : (VT.getScalarSizeInBits() > 32 ? &SPIRV::ID64RegClass
+ : &SPIRV::IDRegClass);
+ else
+ RC = &SPIRV::IDRegClass;
+
+ return std::make_pair(0u, RC);
+}
+
// Insert a bitcast before the instruction to keep SPIR-V code valid
// when there is a type mismatch between results and operand types.
static void validatePtrTypes(const SPIRVSubtarget &STI,
diff --git a/llvm/lib/Target/SPIRV/SPIRVISelLowering.h b/llvm/lib/Target/SPIRV/SPIRVISelLowering.h
index 8c1de7d97d1a..6fc200abf462 100644
--- a/llvm/lib/Target/SPIRV/SPIRVISelLowering.h
+++ b/llvm/lib/Target/SPIRV/SPIRVISelLowering.h
@@ -55,6 +55,15 @@ public:
MachineFunction &MF,
unsigned Intrinsic) const override;
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint, MVT VT) const override;
+ unsigned
+ getNumRegisters(LLVMContext &Context, EVT VT,
+ std::optional<MVT> RegisterVT = std::nullopt) const override {
+ return 1;
+ }
+
// Call the default implementation and finalize target lowering by inserting
// extra instructions required to preserve validity of SPIR-V code imposed by
// the standard.
diff --git a/llvm/lib/Target/SPIRV/SPIRVInlineAsmLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVInlineAsmLowering.cpp
new file mode 100644
index 000000000000..8bd4fb6bf8b1
--- /dev/null
+++ b/llvm/lib/Target/SPIRV/SPIRVInlineAsmLowering.cpp
@@ -0,0 +1,46 @@
+//===--- SPIRVInlineAsmLowering.cpp - Inline Asm lowering -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the lowering of LLVM inline asm calls to machine code
+// calls for GlobalISel.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SPIRVInlineAsmLowering.h"
+#include "SPIRVSubtarget.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/IntrinsicsSPIRV.h"
+
+using namespace llvm;
+
+SPIRVInlineAsmLowering::SPIRVInlineAsmLowering(const SPIRVTargetLowering &TLI)
+ : InlineAsmLowering(&TLI) {}
+
+bool SPIRVInlineAsmLowering::lowerAsmOperandForConstraint(
+ Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops,
+ MachineIRBuilder &MIRBuilder) const {
+ Value *ValOp = nullptr;
+ if (isa<ConstantInt>(Val)) {
+ ValOp = Val;
+ } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(Val)) {
+ Ops.push_back(MachineOperand::CreateFPImm(CFP));
+ return true;
+ } else if (auto *II = dyn_cast<IntrinsicInst>(Val)) {
+ if (II->getIntrinsicID() == Intrinsic::spv_track_constant) {
+ if (isa<ConstantInt>(II->getOperand(0))) {
+ ValOp = II->getOperand(0);
+ } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(II->getOperand(0))) {
+ Ops.push_back(MachineOperand::CreateFPImm(CFP));
+ return true;
+ }
+ }
+ }
+ return ValOp ? InlineAsmLowering::lowerAsmOperandForConstraint(
+ ValOp, Constraint, Ops, MIRBuilder)
+ : false;
+}
diff --git a/llvm/lib/Target/SPIRV/SPIRVInlineAsmLowering.h b/llvm/lib/Target/SPIRV/SPIRVInlineAsmLowering.h
new file mode 100644
index 000000000000..72291855a18c
--- /dev/null
+++ b/llvm/lib/Target/SPIRV/SPIRVInlineAsmLowering.h
@@ -0,0 +1,33 @@
+//===--- SPIRVInlineAsmLowering.h - Inline Asm lowering ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes how to lower LLVM inline asm calls to machine
+// code calls for GlobalISel.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPIRV_SPIRVINLINEASMLOWERING_H
+#define LLVM_LIB_TARGET_SPIRV_SPIRVINLINEASMLOWERING_H
+
+#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
+
+namespace llvm {
+
+class SPIRVTargetLowering;
+
+class SPIRVInlineAsmLowering : public InlineAsmLowering {
+public:
+ SPIRVInlineAsmLowering(const SPIRVTargetLowering &TLI);
+ bool
+ lowerAsmOperandForConstraint(Value *Val, StringRef Constraint,
+ std::vector<MachineOperand> &Ops,
+ MachineIRBuilder &MIRBuilder) const override;
+};
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_SPIRV_SPIRVINLINEASMLOWERING_H
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp
index af98f2f88045..12cf7613a45c 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp
@@ -47,6 +47,16 @@ bool SPIRVInstrInfo::isConstantInstr(const MachineInstr &MI) const {
}
}
+bool SPIRVInstrInfo::isInlineAsmDefInstr(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ case SPIRV::OpAsmTargetINTEL:
+ case SPIRV::OpAsmINTEL:
+ return true;
+ default:
+ return false;
+ }
+}
+
bool SPIRVInstrInfo::isTypeDeclInstr(const MachineInstr &MI) const {
auto &MRI = MI.getMF()->getRegInfo();
if (MI.getNumDefs() >= 1 && MI.getOperand(0).isReg()) {
@@ -246,7 +256,8 @@ void SPIRVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
}
bool SPIRVInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
- if (MI.getOpcode() == SPIRV::GET_ID || MI.getOpcode() == SPIRV::GET_fID ||
+ if (MI.getOpcode() == SPIRV::GET_ID || MI.getOpcode() == SPIRV::GET_ID64 ||
+ MI.getOpcode() == SPIRV::GET_fID || MI.getOpcode() == SPIRV::GET_fID64 ||
MI.getOpcode() == SPIRV::GET_pID32 ||
MI.getOpcode() == SPIRV::GET_pID64 || MI.getOpcode() == SPIRV::GET_vfID ||
MI.getOpcode() == SPIRV::GET_vID || MI.getOpcode() == SPIRV::GET_vpID32 ||
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h
index 4f2781c9404b..95f387491357 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h
+++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h
@@ -30,6 +30,7 @@ public:
const SPIRVRegisterInfo &getRegisterInfo() const { return RI; }
bool isHeaderInstr(const MachineInstr &MI) const;
bool isConstantInstr(const MachineInstr &MI) const;
+ bool isInlineAsmDefInstr(const MachineInstr &MI) const;
bool isTypeDeclInstr(const MachineInstr &MI) const;
bool isDecorationInstr(const MachineInstr &MI) const;
bool canUseFastMathFlags(const MachineInstr &MI) const;
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
index 151d0ec1fe56..7c9b84a48a2a 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
+++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
@@ -18,7 +18,9 @@ let isCodeGenOnly=1 in {
def ASSIGN_TYPE: Pseudo<(outs ANYID:$dst_id), (ins ANYID:$src_id, TYPE:$src_ty)>;
def DECL_TYPE: Pseudo<(outs ANYID:$dst_id), (ins ANYID:$src_id, TYPE:$src_ty)>;
def GET_ID: Pseudo<(outs ID:$dst_id), (ins ANYID:$src)>;
+ def GET_ID64: Pseudo<(outs ID64:$dst_id), (ins ANYID:$src)>;
def GET_fID: Pseudo<(outs fID:$dst_id), (ins ANYID:$src)>;
+ def GET_fID64: Pseudo<(outs fID64:$dst_id), (ins ANYID:$src)>;
def GET_pID32: Pseudo<(outs pID32:$dst_id), (ins ANYID:$src)>;
def GET_pID64: Pseudo<(outs pID64:$dst_id), (ins ANYID:$src)>;
def GET_vID: Pseudo<(outs vID:$dst_id), (ins ANYID:$src)>;
@@ -802,6 +804,11 @@ def OpGroupNonUniformRotateKHR: Op<4431, (outs ID:$res),
(ins TYPE:$type, ID:$scope, ID:$value, ID:$delta, variable_ops),
"$res = OpGroupNonUniformRotateKHR $type $scope $value $delta">;
+// SPV_KHR_shader_clock
+def OpReadClockKHR: Op<5056, (outs ID:$res),
+ (ins TYPE:$type, ID:$scope),
+ "$res = OpReadClockKHR $type $scope">;
+
// 3.49.7, Constant-Creation Instructions
// - SPV_INTEL_function_pointers
@@ -849,3 +856,11 @@ def OpGroupLogicalOrKHR: Op<6407, (outs ID:$res), (ins TYPE:$type, ID:$scope, i3
"$res = OpGroupLogicalOrKHR $type $scope $groupOp $value">;
def OpGroupLogicalXorKHR: Op<6408, (outs ID:$res), (ins TYPE:$type, ID:$scope, i32imm:$groupOp, ID:$value),
"$res = OpGroupLogicalXorKHR $type $scope $groupOp $value">;
+
+// Inline Assembly Instructions
+def OpAsmTargetINTEL: Op<5609, (outs ID:$res), (ins StringImm:$str), "$res = OpAsmTargetINTEL $str">;
+def OpAsmINTEL: Op<5610, (outs ID:$res), (ins TYPE:$type, TYPE:$asm_type, ID:$target,
+ StringImm:$asm, StringImm:$constraints),
+ "$res = OpAsmINTEL $type $asm_type $target $asm">;
+def OpAsmCallINTEL: Op<5611, (outs ID:$res), (ins TYPE:$type, ID:$asm, variable_ops),
+ "$res = OpAsmCallINTEL $type $asm">;
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index 235f947901d8..c86ab285f354 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -1117,6 +1117,14 @@ void addInstrRequirements(const MachineInstr &MI,
Reqs.addCapability(SPIRV::Capability::GroupUniformArithmeticKHR);
}
break;
+ case SPIRV::OpReadClockKHR:
+ if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock))
+ report_fatal_error("OpReadClockKHR instruction requires the "
+ "following SPIR-V extension: SPV_KHR_shader_clock",
+ false);
+ Reqs.addExtension(SPIRV::Extension::SPV_KHR_shader_clock);
+ Reqs.addCapability(SPIRV::Capability::ShaderClockKHR);
+ break;
case SPIRV::OpFunctionPointerCallINTEL:
if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
Reqs.addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
@@ -1143,6 +1151,14 @@ void addInstrRequirements(const MachineInstr &MI,
Reqs.addCapability(SPIRV::Capability::VariableLengthArrayINTEL);
}
break;
+ case SPIRV::OpAsmTargetINTEL:
+ case SPIRV::OpAsmINTEL:
+ case SPIRV::OpAsmCallINTEL:
+ if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_inline_assembly)) {
+ Reqs.addExtension(SPIRV::Extension::SPV_INTEL_inline_assembly);
+ Reqs.addCapability(SPIRV::Capability::AsmINTEL);
+ }
+ break;
default:
break;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp
index d652b5de6080..c3842f026670 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPostLegalizer.cpp
@@ -54,7 +54,8 @@ extern void processInstr(MachineInstr &MI, MachineIRBuilder &MIB,
} // namespace llvm
static bool isMetaInstrGET(unsigned Opcode) {
- return Opcode == SPIRV::GET_ID || Opcode == SPIRV::GET_fID ||
+ return Opcode == SPIRV::GET_ID || Opcode == SPIRV::GET_ID64 ||
+ Opcode == SPIRV::GET_fID || Opcode == SPIRV::GET_fID64 ||
Opcode == SPIRV::GET_pID32 || Opcode == SPIRV::GET_pID64 ||
Opcode == SPIRV::GET_vID || Opcode == SPIRV::GET_vfID ||
Opcode == SPIRV::GET_vpID32 || Opcode == SPIRV::GET_vpID64;
diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
index 9bff23dd9666..85299a49a6b9 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
@@ -215,6 +215,8 @@ static SPIRVType *propagateSPIRVType(MachineInstr *MI, SPIRVGlobalRegistry *GR,
SpirvTy = GR->getOrCreateSPIRVType(Ty, MIB);
break;
}
+ case TargetOpcode::G_ANYEXT:
+ case TargetOpcode::G_SEXT:
case TargetOpcode::G_ZEXT: {
if (MI->getOperand(1).isReg()) {
if (MachineInstr *DefInstr =
@@ -457,12 +459,7 @@ generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
Ty = VectorType::get(ElemTy, NumElts, false);
}
insertAssignInstr(Reg, Ty, nullptr, GR, MIB, MRI);
- } else if (MI.getOpcode() == TargetOpcode::G_TRUNC ||
- MI.getOpcode() == TargetOpcode::G_ZEXT ||
- MI.getOpcode() == TargetOpcode::G_PTRTOINT ||
- MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
- MI.getOpcode() == TargetOpcode::COPY ||
- MI.getOpcode() == TargetOpcode::G_ADDRSPACE_CAST) {
+ } else if (MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
propagateSPIRVType(&MI, GR, MRI, MIB);
}
@@ -474,6 +471,24 @@ generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
}
for (MachineInstr *MI : ToErase)
MI->eraseFromParent();
+
+ // Address the case when IRTranslator introduces instructions with new
+ // registers without SPIRVType associated.
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &MI : MBB) {
+ switch (MI.getOpcode()) {
+ case TargetOpcode::G_TRUNC:
+ case TargetOpcode::G_ANYEXT:
+ case TargetOpcode::G_SEXT:
+ case TargetOpcode::G_ZEXT:
+ case TargetOpcode::G_PTRTOINT:
+ case TargetOpcode::COPY:
+ case TargetOpcode::G_ADDRSPACE_CAST:
+ propagateSPIRVType(&MI, GR, MRI, MIB);
+ break;
+ }
+ }
+ }
}
// Defined in SPIRVLegalizerInfo.cpp.
@@ -519,6 +534,128 @@ static void processInstrsWithTypeFolding(MachineFunction &MF,
}
}
+static void
+insertInlineAsmProcess(MachineFunction &MF, SPIRVGlobalRegistry *GR,
+ const SPIRVSubtarget &ST, MachineIRBuilder MIRBuilder,
+ const SmallVector<MachineInstr *> &ToProcess) {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ Register AsmTargetReg;
+ for (unsigned i = 0, Sz = ToProcess.size(); i + 1 < Sz; i += 2) {
+ MachineInstr *I1 = ToProcess[i], *I2 = ToProcess[i + 1];
+ assert(isSpvIntrinsic(*I1, Intrinsic::spv_inline_asm) && I2->isInlineAsm());
+ MIRBuilder.setInsertPt(*I1->getParent(), *I1);
+
+ if (!AsmTargetReg.isValid()) {
+ // define vendor specific assembly target or dialect
+ AsmTargetReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
+ MRI.setRegClass(AsmTargetReg, &SPIRV::IDRegClass);
+ auto AsmTargetMIB =
+ MIRBuilder.buildInstr(SPIRV::OpAsmTargetINTEL).addDef(AsmTargetReg);
+ addStringImm(ST.getTargetTripleAsStr(), AsmTargetMIB);
+ GR->add(AsmTargetMIB.getInstr(), &MF, AsmTargetReg);
+ }
+
+ // create types
+ const MDNode *IAMD = I1->getOperand(1).getMetadata();
+ FunctionType *FTy = cast<FunctionType>(getMDOperandAsType(IAMD, 0));
+ SmallVector<SPIRVType *, 4> ArgTypes;
+ for (const auto &ArgTy : FTy->params())
+ ArgTypes.push_back(GR->getOrCreateSPIRVType(ArgTy, MIRBuilder));
+ SPIRVType *RetType =
+ GR->getOrCreateSPIRVType(FTy->getReturnType(), MIRBuilder);
+ SPIRVType *FuncType = GR->getOrCreateOpTypeFunctionWithArgs(
+ FTy, RetType, ArgTypes, MIRBuilder);
+
+ // define vendor specific assembly instructions string
+ Register AsmReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
+ MRI.setRegClass(AsmReg, &SPIRV::IDRegClass);
+ auto AsmMIB = MIRBuilder.buildInstr(SPIRV::OpAsmINTEL)
+ .addDef(AsmReg)
+ .addUse(GR->getSPIRVTypeID(RetType))
+ .addUse(GR->getSPIRVTypeID(FuncType))
+ .addUse(AsmTargetReg);
+ // inline asm string:
+ addStringImm(I2->getOperand(InlineAsm::MIOp_AsmString).getSymbolName(),
+ AsmMIB);
+ // inline asm constraint string:
+ addStringImm(cast<MDString>(I1->getOperand(2).getMetadata()->getOperand(0))
+ ->getString(),
+ AsmMIB);
+ GR->add(AsmMIB.getInstr(), &MF, AsmReg);
+
+ // calls the inline assembly instruction
+ unsigned ExtraInfo = I2->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
+ MIRBuilder.buildInstr(SPIRV::OpDecorate)
+ .addUse(AsmReg)
+ .addImm(static_cast<uint32_t>(SPIRV::Decoration::SideEffectsINTEL));
+ Register DefReg;
+ SmallVector<unsigned, 4> Ops;
+ unsigned StartOp = InlineAsm::MIOp_FirstOperand,
+ AsmDescOp = InlineAsm::MIOp_FirstOperand;
+ unsigned I2Sz = I2->getNumOperands();
+ for (unsigned Idx = StartOp; Idx != I2Sz; ++Idx) {
+ const MachineOperand &MO = I2->getOperand(Idx);
+ if (MO.isMetadata())
+ continue;
+ if (Idx == AsmDescOp && MO.isImm()) {
+ // compute the index of the next operand descriptor
+ const InlineAsm::Flag F(MO.getImm());
+ AsmDescOp += 1 + F.getNumOperandRegisters();
+ } else {
+ if (MO.isReg() && MO.isDef())
+ DefReg = MO.getReg();
+ else
+ Ops.push_back(Idx);
+ }
+ }
+ if (!DefReg.isValid()) {
+ DefReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
+ MRI.setRegClass(DefReg, &SPIRV::IDRegClass);
+ SPIRVType *VoidType = GR->getOrCreateSPIRVType(
+ Type::getVoidTy(MF.getFunction().getContext()), MIRBuilder);
+ GR->assignSPIRVTypeToVReg(VoidType, DefReg, MF);
+ }
+ auto AsmCall = MIRBuilder.buildInstr(SPIRV::OpAsmCallINTEL)
+ .addDef(DefReg)
+ .addUse(GR->getSPIRVTypeID(RetType))
+ .addUse(AsmReg);
+ unsigned IntrIdx = 2;
+ for (unsigned Idx : Ops) {
+ ++IntrIdx;
+ const MachineOperand &MO = I2->getOperand(Idx);
+ if (MO.isReg())
+ AsmCall.addUse(MO.getReg());
+ else
+ AsmCall.addUse(I1->getOperand(IntrIdx).getReg());
+ }
+ }
+ for (MachineInstr *MI : ToProcess)
+ MI->eraseFromParent();
+}
+
+static void insertInlineAsm(MachineFunction &MF, SPIRVGlobalRegistry *GR,
+ const SPIRVSubtarget &ST,
+ MachineIRBuilder MIRBuilder) {
+ SmallVector<MachineInstr *> ToProcess;
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &MI : MBB) {
+ if (isSpvIntrinsic(MI, Intrinsic::spv_inline_asm) ||
+ MI.getOpcode() == TargetOpcode::INLINEASM)
+ ToProcess.push_back(&MI);
+ }
+ }
+ if (ToProcess.size() == 0)
+ return;
+
+ if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_inline_assembly))
+ report_fatal_error("Inline assembly instructions require the "
+ "following SPIR-V extension: SPV_INTEL_inline_assembly",
+ false);
+
+ insertInlineAsmProcess(MF, GR, ST, MIRBuilder, ToProcess);
+}
+
static void insertSpirvDecorations(MachineFunction &MF, MachineIRBuilder MIB) {
SmallVector<MachineInstr *, 10> ToErase;
for (MachineBasicBlock &MBB : MF) {
@@ -673,6 +810,7 @@ bool SPIRVPreLegalizer::runOnMachineFunction(MachineFunction &MF) {
processInstrsWithTypeFolding(MF, GR, MIB);
removeImplicitFallthroughs(MF, MIB);
insertSpirvDecorations(MF, MIB);
+ insertInlineAsm(MF, GR, ST, MIB);
return true;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVRegisterBanks.td b/llvm/lib/Target/SPIRV/SPIRVRegisterBanks.td
index dea2ef402d3d..e81d96956404 100644
--- a/llvm/lib/Target/SPIRV/SPIRVRegisterBanks.td
+++ b/llvm/lib/Target/SPIRV/SPIRVRegisterBanks.td
@@ -10,4 +10,4 @@
// as InstructionSelector RegClass checking code relies on them
def TYPERegBank : RegisterBank<"TYPEBank", [TYPE]>;
-def IDRegBank : RegisterBank<"IDBank", [ID, fID, pID32, pID64, vID, vfID, vpID32, vpID64]>;
+def IDRegBank : RegisterBank<"IDBank", [ID, ID64, fID, fID64, pID32, pID64, vID, vfID, vpID32, vpID64]>;
diff --git a/llvm/lib/Target/SPIRV/SPIRVRegisterInfo.td b/llvm/lib/Target/SPIRV/SPIRVRegisterInfo.td
index 9231d22e8d83..17f6ba59cc5d 100644
--- a/llvm/lib/Target/SPIRV/SPIRVRegisterInfo.td
+++ b/llvm/lib/Target/SPIRV/SPIRVRegisterInfo.td
@@ -29,7 +29,9 @@ let Namespace = "SPIRV" in {
// Class for non-type registers
def ID0 : Register<"ID0">;
+ def ID640 : Register<"ID640">;
def fID0 : Register<"fID0">;
+ def fID640 : Register<"fID640">;
def pID320 : Register<"pID320">;
def pID640 : Register<"pID640">;
def vID0 : Register<"vID0">;
@@ -38,7 +40,9 @@ let Namespace = "SPIRV" in {
def vpID640 : Register<"vpID640">;
def ID : RegisterClass<"SPIRV", [i32], 32, (add ID0)>;
+ def ID64 : RegisterClass<"SPIRV", [i64], 32, (add ID640)>;
def fID : RegisterClass<"SPIRV", [f32], 32, (add fID0)>;
+ def fID64 : RegisterClass<"SPIRV", [f64], 32, (add fID640)>;
def pID32 : RegisterClass<"SPIRV", [p32], 32, (add pID320)>;
def pID64 : RegisterClass<"SPIRV", [p64], 32, (add pID640)>;
def vID : RegisterClass<"SPIRV", [v2i32], 32, (add vID0)>;
@@ -48,9 +52,9 @@ let Namespace = "SPIRV" in {
def ANYID : RegisterClass<
"SPIRV",
- [i32, f32, p32, p64, v2i32, v2f32, v2p32, v2p64],
+ [i32, i64, f32, f64, p32, p64, v2i32, v2f32, v2p32, v2p64],
32,
- (add ID0, fID0, pID320, pID640, vID0, vfID0, vpID320, vpID640)>;
+ (add ID0, ID640, fID0, fID640, pID320, pID640, vID0, vfID0, vpID320, vpID640)>;
// A few instructions like OpName can take ids from both type and non-type
// instructions, so we need a super-class to allow for both to count as valid
diff --git a/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp b/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp
index 7aa0c566c75f..27472923ee08 100644
--- a/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp
@@ -82,6 +82,7 @@ SPIRVSubtarget::SPIRVSubtarget(const Triple &TT, const std::string &CPU,
GR = std::make_unique<SPIRVGlobalRegistry>(PointerSize);
CallLoweringInfo = std::make_unique<SPIRVCallLowering>(TLInfo, GR.get());
+ InlineAsmInfo = std::make_unique<SPIRVInlineAsmLowering>(TLInfo);
Legalizer = std::make_unique<SPIRVLegalizerInfo>(*this);
RegBankInfo = std::make_unique<SPIRVRegisterBankInfo>();
InstSelector.reset(
diff --git a/llvm/lib/Target/SPIRV/SPIRVSubtarget.h b/llvm/lib/Target/SPIRV/SPIRVSubtarget.h
index 3e4044084266..211216488db7 100644
--- a/llvm/lib/Target/SPIRV/SPIRVSubtarget.h
+++ b/llvm/lib/Target/SPIRV/SPIRVSubtarget.h
@@ -16,6 +16,7 @@
#include "SPIRVCallLowering.h"
#include "SPIRVFrameLowering.h"
#include "SPIRVISelLowering.h"
+#include "SPIRVInlineAsmLowering.h"
#include "SPIRVInstrInfo.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
@@ -54,6 +55,7 @@ private:
std::unique_ptr<RegisterBankInfo> RegBankInfo;
std::unique_ptr<LegalizerInfo> Legalizer;
std::unique_ptr<InstructionSelector> InstSelector;
+ std::unique_ptr<InlineAsmLowering> InlineAsmInfo;
// TODO: Initialise the available extensions, extended instruction sets
// based on the environment settings.
@@ -81,6 +83,7 @@ public:
TargetTriple.getArch() == Triple::spirv64;
}
bool isVulkanEnv() const { return TargetTriple.getArch() == Triple::spirv; }
+ const std::string &getTargetTripleAsStr() const { return TargetTriple.str(); }
VersionTuple getSPIRVVersion() const { return SPIRVVersion; };
bool isAtLeastSPIRVVer(VersionTuple VerToCompareTo) const;
bool isAtLeastOpenCLVer(VersionTuple VerToCompareTo) const;
@@ -108,6 +111,9 @@ public:
InstructionSelector *getInstructionSelector() const override {
return InstSelector.get();
}
+ const InlineAsmLowering *getInlineAsmLowering() const override {
+ return InlineAsmInfo.get();
+ }
const SPIRVInstrInfo *getInstrInfo() const override { return &InstrInfo; }
const SPIRVFrameLowering *getFrameLowering() const override {
return &FrameLowering;
diff --git a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
index 31e19ad8630c..98cbd9d2c1f2 100644
--- a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
+++ b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
@@ -298,6 +298,7 @@ defm SPV_INTEL_optnone : ExtensionOperand<103>;
defm SPV_INTEL_function_pointers : ExtensionOperand<104>;
defm SPV_INTEL_variable_length_array : ExtensionOperand<105>;
defm SPV_INTEL_bfloat16_conversion : ExtensionOperand<106>;
+defm SPV_INTEL_inline_assembly : ExtensionOperand<107>;
//===----------------------------------------------------------------------===//
// Multiclass used to define Capabilities enum values and at the same time
@@ -413,6 +414,7 @@ defm ImageGatherBiasLodAMD : CapabilityOperand<5009, 0, 0, [], [Shader]>;
defm FragmentMaskAMD : CapabilityOperand<5010, 0, 0, [], [Shader]>;
defm StencilExportEXT : CapabilityOperand<5013, 0, 0, [], [Shader]>;
defm ImageReadWriteLodAMD : CapabilityOperand<5015, 0, 0, [], [Shader]>;
+defm ShaderClockKHR : CapabilityOperand<5055, 0, 0, [SPV_KHR_shader_clock], []>;
defm SampleMaskOverrideCoverageNV : CapabilityOperand<5249, 0, 0, [], [SampleRateShading]>;
defm GeometryShaderPassthroughNV : CapabilityOperand<5251, 0, 0, [], [Geometry]>;
defm ShaderViewportIndexLayerEXT : CapabilityOperand<5254, 0, 0, [], [MultiViewport]>;
@@ -457,6 +459,7 @@ defm BitInstructions : CapabilityOperand<6025, 0, 0, [SPV_KHR_bit_instructions],
defm ExpectAssumeKHR : CapabilityOperand<5629, 0, 0, [SPV_KHR_expect_assume], []>;
defm FunctionPointersINTEL : CapabilityOperand<5603, 0, 0, [SPV_INTEL_function_pointers], []>;
defm IndirectReferencesINTEL : CapabilityOperand<5604, 0, 0, [SPV_INTEL_function_pointers], []>;
+defm AsmINTEL : CapabilityOperand<5606, 0, 0, [SPV_INTEL_inline_assembly], []>;
defm GroupNonUniformRotateKHR : CapabilityOperand<6026, 0, 0, [SPV_KHR_subgroup_rotate], [GroupNonUniform]>;
defm AtomicFloat32AddEXT : CapabilityOperand<6033, 0, 0, [SPV_EXT_shader_atomic_float_add], []>;
defm AtomicFloat64AddEXT : CapabilityOperand<6034, 0, 0, [SPV_EXT_shader_atomic_float_add], []>;
@@ -1200,6 +1203,8 @@ defm UserSemantic : DecorationOperand<5635, 0, 0, [], []>;
defm RestrictPointerEXT : DecorationOperand<5355, 0, 0, [], [PhysicalStorageBufferAddressesEXT]>;
defm AliasedPointerEXT : DecorationOperand<5356, 0, 0, [], [PhysicalStorageBufferAddressesEXT]>;
defm ReferencedIndirectlyINTEL : DecorationOperand<5602, 0, 0, [], [IndirectReferencesINTEL]>;
+defm ClobberINTEL : DecorationOperand<5607, 0, 0, [SPV_INTEL_inline_assembly], [AsmINTEL]>;
+defm SideEffectsINTEL : DecorationOperand<5608, 0, 0, [SPV_INTEL_inline_assembly], [AsmINTEL]>;
defm ArgumentAttributeINTEL : DecorationOperand<6409, 0, 0, [], [FunctionPointersINTEL]>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/VE/VVPNodes.def b/llvm/lib/Target/VE/VVPNodes.def
index a60588672293..da1e4836fc4f 100644
--- a/llvm/lib/Target/VE/VVPNodes.def
+++ b/llvm/lib/Target/VE/VVPNodes.def
@@ -99,8 +99,8 @@ ADD_BINARY_VVP_OP_COMPACT(MUL)
ADD_BINARY_VVP_OP_COMPACT(UDIV)
ADD_BINARY_VVP_OP_COMPACT(SDIV)
-ADD_BINARY_VVP_OP(VVP_SRA,VP_ASHR,SRA) REGISTER_PACKED(VVP_SRA)
-ADD_BINARY_VVP_OP(VVP_SRL,VP_LSHR,SRL) REGISTER_PACKED(VVP_SRL)
+ADD_BINARY_VVP_OP(VVP_SRA,VP_SRA,SRA) REGISTER_PACKED(VVP_SRA)
+ADD_BINARY_VVP_OP(VVP_SRL,VP_SRL,SRL) REGISTER_PACKED(VVP_SRL)
ADD_BINARY_VVP_OP_COMPACT(SHL) REGISTER_PACKED(VVP_SHL)
ADD_BINARY_VVP_OP_COMPACT(AND) REGISTER_PACKED(VVP_AND)
diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
index d4e9fb057c44..34502170a5c7 100644
--- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
+++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
@@ -345,6 +345,8 @@ inline bool isArgument(unsigned Opc) {
case WebAssembly::ARGUMENT_v4i32_S:
case WebAssembly::ARGUMENT_v2i64:
case WebAssembly::ARGUMENT_v2i64_S:
+ case WebAssembly::ARGUMENT_v8f16:
+ case WebAssembly::ARGUMENT_v8f16_S:
case WebAssembly::ARGUMENT_v4f32:
case WebAssembly::ARGUMENT_v4f32_S:
case WebAssembly::ARGUMENT_v2f64:
diff --git a/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp b/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp
index fac2e0d935f5..867953b4e8d7 100644
--- a/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp
+++ b/llvm/lib/Target/WebAssembly/Utils/WebAssemblyTypeUtilities.cpp
@@ -50,6 +50,7 @@ wasm::ValType WebAssembly::toValType(MVT Type) {
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
+ case MVT::v8f16:
case MVT::v4f32:
case MVT::v2f64:
return wasm::ValType::V128;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
index 3524abba8990..443558537da2 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
@@ -62,7 +62,7 @@ MVT WebAssemblyAsmPrinter::getRegType(unsigned RegNo) const {
const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
const TargetRegisterClass *TRC = MRI->getRegClass(RegNo);
for (MVT T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64, MVT::v16i8, MVT::v8i16,
- MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64})
+ MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64, MVT::v8f16})
if (TRI->isTypeLegalForClass(*TRC, T))
return T;
LLVM_DEBUG(errs() << "Unknown type for register number: " << RegNo);
@@ -662,6 +662,8 @@ void WebAssemblyAsmPrinter::emitInstruction(const MachineInstr *MI) {
case WebAssembly::ARGUMENT_v4f32_S:
case WebAssembly::ARGUMENT_v2f64:
case WebAssembly::ARGUMENT_v2f64_S:
+ case WebAssembly::ARGUMENT_v8f16:
+ case WebAssembly::ARGUMENT_v8f16_S:
// These represent values which are live into the function entry, so there's
// no instruction to emit.
break;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
index 1c62290704fe..26e13948bc9a 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -885,18 +885,6 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) {
Table->setNoStrip();
MIB.addImm(0);
}
- // See if we must truncate the function pointer.
- // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
- // as 64-bit for uniformity with other pointer types.
- // See also: WebAssemblyISelLowering.cpp: LowerCallResults
- if (Subtarget->hasAddr64()) {
- auto Wrap = BuildMI(*FuncInfo.MBB, std::prev(FuncInfo.InsertPt), MIMD,
- TII.get(WebAssembly::I32_WRAP_I64));
- Register Reg32 = createResultReg(&WebAssembly::I32RegClass);
- Wrap.addReg(Reg32, RegState::Define);
- Wrap.addReg(CalleeReg);
- CalleeReg = Reg32;
- }
}
for (unsigned ArgReg : Args)
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 527bb4c9fbea..518b6932a0c8 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -70,6 +70,9 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
}
+ if (Subtarget->hasHalfPrecision()) {
+ addRegisterClass(MVT::v8f16, &WebAssembly::V128RegClass);
+ }
if (Subtarget->hasReferenceTypes()) {
addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
@@ -576,20 +579,6 @@ LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
const MCInstrDesc &MCID = TII.get(CallOp);
MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
- // See if we must truncate the function pointer.
- // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
- // as 64-bit for uniformity with other pointer types.
- // See also: WebAssemblyFastISel::selectCall
- if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
- Register Reg32 =
- MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
- auto &FnPtr = CallParams.getOperand(0);
- BuildMI(*BB, CallResults.getIterator(), DL,
- TII.get(WebAssembly::I32_WRAP_I64), Reg32)
- .addReg(FnPtr.getReg());
- FnPtr.setReg(Reg32);
- }
-
// Move the function pointer to the end of the arguments for indirect calls
if (IsIndirect) {
auto FnPtr = CallParams.getOperand(0);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
index af95dfa25a18..558e3d859dcd 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
@@ -38,6 +38,13 @@ multiclass RELAXED_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s,
asmstr_s, simdop, HasRelaxedSIMD>;
}
+multiclass HALF_PRECISION_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s,
+ list<dag> pattern_r, string asmstr_r = "",
+ string asmstr_s = "", bits<32> simdop = -1> {
+ defm "" : ABSTRACT_SIMD_I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r,
+ asmstr_s, simdop, HasHalfPrecision>;
+}
+
defm "" : ARGUMENT<V128, v16i8>;
defm "" : ARGUMENT<V128, v8i16>;
@@ -45,6 +52,7 @@ defm "" : ARGUMENT<V128, v4i32>;
defm "" : ARGUMENT<V128, v2i64>;
defm "" : ARGUMENT<V128, v4f32>;
defm "" : ARGUMENT<V128, v2f64>;
+defm "" : ARGUMENT<V128, v8f16>;
// Constrained immediate argument types. Allow any value from the minimum signed
// value to the maximum unsigned value for the lane size.
@@ -591,6 +599,14 @@ defm "" : Splat<I64x2, 18>;
defm "" : Splat<F32x4, 19>;
defm "" : Splat<F64x2, 20>;
+// Half values are not fully supported so an intrinsic is used instead of a
+// regular Splat pattern as above.
+defm SPLAT_F16x8 :
+ HALF_PRECISION_I<(outs V128:$dst), (ins F32:$x),
+ (outs), (ins),
+ [(set (v8f16 V128:$dst), (int_wasm_splat_f16x8 F32:$x))],
+ "f16x8.splat\t$dst, $x", "f16x8.splat", 0x120>;
+
// scalar_to_vector leaves high lanes undefined, so can be a splat
foreach vec = AllVecs in
def : Pat<(vec.vt (scalar_to_vector (vec.lane_vt vec.lane_rc:$x))),
@@ -644,6 +660,14 @@ def : Pat<
(and (vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx)), (i32 0xffff)),
(EXTRACT_LANE_I16x8_u $vec, imm:$idx)>;
+defm EXTRACT_LANE_F16x8 :
+ HALF_PRECISION_I<(outs F32:$dst), (ins V128:$vec, vec_i8imm_op:$idx),
+ (outs), (ins vec_i8imm_op:$idx),
+ [(set (f32 F32:$dst), (int_wasm_extract_lane_f16x8
+ (v8f16 V128:$vec), (i32 LaneIdx16:$idx)))],
+ "f16x8.extract_lane\t$dst, $vec, $idx",
+ "f16x8.extract_lane\t$idx", 0x121>;
+
// Replace lane value: replace_lane
multiclass ReplaceLane<Vec vec, bits<32> simdop> {
defm REPLACE_LANE_#vec :
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td b/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td
index ba2936b492a9..4e2faa608be0 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td
@@ -63,7 +63,8 @@ def I32 : WebAssemblyRegClass<[i32], 32, (add FP32, SP32, I32_0)>;
def I64 : WebAssemblyRegClass<[i64], 64, (add FP64, SP64, I64_0)>;
def F32 : WebAssemblyRegClass<[f32], 32, (add F32_0)>;
def F64 : WebAssemblyRegClass<[f64], 64, (add F64_0)>;
-def V128 : WebAssemblyRegClass<[v4f32, v2f64, v2i64, v4i32, v16i8, v8i16], 128,
- (add V128_0)>;
+def V128 : WebAssemblyRegClass<[v8f16, v4f32, v2f64, v2i64, v4i32, v16i8,
+ v8i16],
+ 128, (add V128_0)>;
def FUNCREF : WebAssemblyRegClass<[funcref], 0, (add FUNCREF_0)>;
def EXTERNREF : WebAssemblyRegClass<[externref], 0, (add EXTERNREF_0)>;
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 62b4a9278954..662310610931 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -1802,7 +1802,7 @@ bool X86AsmParser::ParseIntelNamedOperator(StringRef Name,
bool &ParseError, SMLoc &End) {
// A named operator should be either lower or upper case, but not a mix...
// except in MASM, which uses full case-insensitivity.
- if (Name.compare(Name.lower()) && Name.compare(Name.upper()) &&
+ if (Name != Name.lower() && Name != Name.upper() &&
!getParser().isParsingMasm())
return false;
if (Name.equals_insensitive("not")) {
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 99dc9797f6df..472f34a4efdb 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -723,9 +723,10 @@ void X86AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
bool X86AsmBackend::mayNeedRelaxation(const MCInst &MI,
const MCSubtargetInfo &STI) const {
unsigned Opcode = MI.getOpcode();
+ unsigned SkipOperands = X86::isCCMPCC(Opcode) ? 2 : 0;
return isRelaxableBranch(Opcode) ||
(X86::getOpcodeForLongImmediateForm(Opcode) != Opcode &&
- MI.getOperand(MI.getNumOperands() - 1).isExpr());
+ MI.getOperand(MI.getNumOperands() - 1 - SkipOperands).isExpr());
}
bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimizationForImmediate.def b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimizationForImmediate.def
index 27b6a654e6eb..f3997a092e45 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimizationForImmediate.def
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimizationForImmediate.def
@@ -18,6 +18,12 @@ ENTRY(CMP32mi, CMP32mi8)
ENTRY(CMP32ri, CMP32ri8)
ENTRY(CMP64mi32, CMP64mi8)
ENTRY(CMP64ri32, CMP64ri8)
+ENTRY(CCMP16mi, CCMP16mi8)
+ENTRY(CCMP16ri, CCMP16ri8)
+ENTRY(CCMP32mi, CCMP32mi8)
+ENTRY(CCMP32ri, CCMP32ri8)
+ENTRY(CCMP64mi32, CCMP64mi8)
+ENTRY(CCMP64ri32, CCMP64ri8)
ENTRY(PUSH16i, PUSH16i8)
ENTRY(PUSH32i, PUSH32i8)
ENTRY(PUSH64i32, PUSH64i8)
diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td
index 54642ecde18c..7e8133e3e1ac 100644
--- a/llvm/lib/Target/X86/X86.td
+++ b/llvm/lib/Target/X86/X86.td
@@ -124,24 +124,15 @@ def FeatureEVEX512 : SubtargetFeature<"evex512", "HasEVEX512", "true",
def FeatureAVX512 : SubtargetFeature<"avx512f", "X86SSELevel", "AVX512",
"Enable AVX-512 instructions",
[FeatureAVX2, FeatureFMA, FeatureF16C]>;
-def FeatureERI : SubtargetFeature<"avx512er", "HasERI", "true",
- "Enable AVX-512 Exponential and Reciprocal Instructions",
- [FeatureAVX512]>;
def FeatureCDI : SubtargetFeature<"avx512cd", "HasCDI", "true",
"Enable AVX-512 Conflict Detection Instructions",
[FeatureAVX512]>;
def FeatureVPOPCNTDQ : SubtargetFeature<"avx512vpopcntdq", "HasVPOPCNTDQ",
"true", "Enable AVX-512 Population Count Instructions",
[FeatureAVX512]>;
-def FeaturePFI : SubtargetFeature<"avx512pf", "HasPFI", "true",
- "Enable AVX-512 PreFetch Instructions",
- [FeatureAVX512]>;
def FeaturePREFETCHI : SubtargetFeature<"prefetchi", "HasPREFETCHI",
"true",
"Prefetch instruction with T0 or T1 Hint">;
-def FeaturePREFETCHWT1 : SubtargetFeature<"prefetchwt1", "HasPREFETCHWT1",
- "true",
- "Prefetch with Intent to Write and T1 Hint">;
def FeatureDQI : SubtargetFeature<"avx512dq", "HasDQI", "true",
"Enable AVX-512 Doubleword and Quadword Instructions",
[FeatureAVX512]>;
@@ -1312,10 +1303,7 @@ def ProcessorFeatures {
FeatureFSGSBase,
FeatureAVX512,
FeatureEVEX512,
- FeatureERI,
FeatureCDI,
- FeaturePFI,
- FeaturePREFETCHWT1,
FeatureADX,
FeatureRDSEED,
FeatureMOVBE,
diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
index db1d21b59a7b..a0c91d4e3c3d 100644
--- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp
+++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
@@ -53,7 +53,6 @@
#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineSizeOpts.h"
#include "llvm/CodeGen/Passes.h"
@@ -113,8 +112,6 @@ public:
FixupBWInstPass() : MachineFunctionPass(ID) { }
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<MachineLoopInfo>(); // Machine loop info is used to
- // guide some heuristics.
AU.addRequired<ProfileSummaryInfoWrapperPass>();
AU.addRequired<LazyMachineBlockFrequencyInfoPass>();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -141,9 +138,6 @@ private:
/// Local member for function's OptForSize attribute.
bool OptForSize = false;
- /// Machine loop info used for guiding some heruistics.
- MachineLoopInfo *MLI = nullptr;
-
/// Register Liveness information after the current instruction.
LiveRegUnits LiveUnits;
@@ -164,7 +158,6 @@ bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) {
this->MF = &MF;
TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
TRI = MF.getRegInfo().getTargetRegisterInfo();
- MLI = &getAnalysis<MachineLoopInfo>();
PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
MBFI = (PSI && PSI->hasProfileSummary()) ?
&getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI() :
diff --git a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
index ea5ef5b5a602..80ff98b46617 100644
--- a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
+++ b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
@@ -73,7 +73,7 @@ using CondRegArray = std::array<unsigned, X86::LAST_VALID_COND + 1>;
class X86FlagsCopyLoweringPass : public MachineFunctionPass {
public:
- X86FlagsCopyLoweringPass() : MachineFunctionPass(ID) { }
+ X86FlagsCopyLoweringPass() : MachineFunctionPass(ID) {}
StringRef getPassName() const override { return "X86 EFLAGS copy lowering"; }
bool runOnMachineFunction(MachineFunction &MF) override;
@@ -102,32 +102,14 @@ private:
void insertTest(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
const DebugLoc &Loc, unsigned Reg);
- void rewriteArithmetic(MachineBasicBlock &TestMBB,
- MachineBasicBlock::iterator TestPos,
- const DebugLoc &TestLoc, MachineInstr &MI,
- MachineOperand &FlagUse, CondRegArray &CondRegs);
- void rewriteCMov(MachineBasicBlock &TestMBB,
- MachineBasicBlock::iterator TestPos, const DebugLoc &TestLoc,
- MachineInstr &CMovI, MachineOperand &FlagUse,
- CondRegArray &CondRegs);
- void rewriteFCMov(MachineBasicBlock &TestMBB,
- MachineBasicBlock::iterator TestPos,
- const DebugLoc &TestLoc, MachineInstr &CMovI,
- MachineOperand &FlagUse, CondRegArray &CondRegs);
- void rewriteCondJmp(MachineBasicBlock &TestMBB,
- MachineBasicBlock::iterator TestPos,
- const DebugLoc &TestLoc, MachineInstr &JmpI,
- CondRegArray &CondRegs);
- void rewriteCopy(MachineInstr &MI, MachineOperand &FlagUse,
- MachineInstr &CopyDefI);
- void rewriteSetCC(MachineBasicBlock &TestMBB,
- MachineBasicBlock::iterator TestPos,
- const DebugLoc &TestLoc, MachineInstr &SetCCI,
- MachineOperand &FlagUse, CondRegArray &CondRegs);
- void rewriteCCMP(MachineBasicBlock &TestMBB,
- MachineBasicBlock::iterator TestPos, const DebugLoc &TestLoc,
- MachineInstr &CMovI, MachineOperand &FlagUse,
- CondRegArray &CondRegs);
+ void rewriteSetCC(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
+ const DebugLoc &Loc, MachineInstr &MI,
+ CondRegArray &CondRegs);
+ void rewriteArithmetic(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Pos, const DebugLoc &Loc,
+ MachineInstr &MI, CondRegArray &CondRegs);
+ void rewriteMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
+ const DebugLoc &Loc, MachineInstr &MI, CondRegArray &CondRegs);
};
} // end anonymous namespace
@@ -148,85 +130,9 @@ void X86FlagsCopyLoweringPass::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
-namespace {
-/// An enumeration of the arithmetic instruction mnemonics which have
-/// interesting flag semantics.
-///
-/// We can map instruction opcodes into these mnemonics to make it easy to
-/// dispatch with specific functionality.
-enum class FlagArithMnemonic {
- ADC,
- RCL,
- RCR,
- SBB,
- SETB,
-};
-} // namespace
-
-static FlagArithMnemonic getMnemonicFromOpcode(unsigned Opcode) {
- switch (Opcode) {
- default:
- report_fatal_error("No support for lowering a copy into EFLAGS when used "
- "by this instruction!");
-
-#define CASE_ND(OP) \
- case X86::OP: \
- case X86::OP##_ND:
-
-#define LLVM_EXPAND_INSTR_SIZES(MNEMONIC, SUFFIX) \
- CASE_ND(MNEMONIC##8##SUFFIX) \
- CASE_ND(MNEMONIC##16##SUFFIX) \
- CASE_ND(MNEMONIC##32##SUFFIX) \
- CASE_ND(MNEMONIC##64##SUFFIX)
-
-#define LLVM_EXPAND_ADC_SBB_INSTR(MNEMONIC) \
- LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rr) \
- LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rm) \
- LLVM_EXPAND_INSTR_SIZES(MNEMONIC, mr) \
- CASE_ND(MNEMONIC##8ri) \
- CASE_ND(MNEMONIC##16ri8) \
- CASE_ND(MNEMONIC##32ri8) \
- CASE_ND(MNEMONIC##64ri8) \
- CASE_ND(MNEMONIC##16ri) \
- CASE_ND(MNEMONIC##32ri) \
- CASE_ND(MNEMONIC##64ri32) \
- CASE_ND(MNEMONIC##8mi) \
- CASE_ND(MNEMONIC##16mi8) \
- CASE_ND(MNEMONIC##32mi8) \
- CASE_ND(MNEMONIC##64mi8) \
- CASE_ND(MNEMONIC##16mi) \
- CASE_ND(MNEMONIC##32mi) \
- CASE_ND(MNEMONIC##64mi32) \
- case X86::MNEMONIC##8i8: \
- case X86::MNEMONIC##16i16: \
- case X86::MNEMONIC##32i32: \
- case X86::MNEMONIC##64i32:
-
- LLVM_EXPAND_ADC_SBB_INSTR(ADC)
- return FlagArithMnemonic::ADC;
-
- LLVM_EXPAND_ADC_SBB_INSTR(SBB)
- return FlagArithMnemonic::SBB;
-
-#undef LLVM_EXPAND_ADC_SBB_INSTR
-
- LLVM_EXPAND_INSTR_SIZES(RCL, rCL)
- LLVM_EXPAND_INSTR_SIZES(RCL, r1)
- LLVM_EXPAND_INSTR_SIZES(RCL, ri)
- return FlagArithMnemonic::RCL;
-
- LLVM_EXPAND_INSTR_SIZES(RCR, rCL)
- LLVM_EXPAND_INSTR_SIZES(RCR, r1)
- LLVM_EXPAND_INSTR_SIZES(RCR, ri)
- return FlagArithMnemonic::RCR;
-
-#undef LLVM_EXPAND_INSTR_SIZES
-#undef CASE_ND
-
- case X86::SETB_C32r:
- case X86::SETB_C64r:
- return FlagArithMnemonic::SETB;
- }
+static bool isArithmeticOp(unsigned Opc) {
+ return X86::isADC(Opc) || X86::isSBB(Opc) || X86::isRCL(Opc) ||
+ X86::isRCR(Opc) || (Opc == X86::SETB_C32r || Opc == X86::SETB_C64r);
}
static MachineBasicBlock &splitBlock(MachineBasicBlock &MBB,
@@ -329,28 +235,6 @@ static MachineBasicBlock &splitBlock(MachineBasicBlock &MBB,
return NewMBB;
}
-static X86::CondCode getCondFromFCMOV(unsigned Opcode) {
- switch (Opcode) {
- default: return X86::COND_INVALID;
- case X86::CMOVBE_Fp32: case X86::CMOVBE_Fp64: case X86::CMOVBE_Fp80:
- return X86::COND_BE;
- case X86::CMOVB_Fp32: case X86::CMOVB_Fp64: case X86::CMOVB_Fp80:
- return X86::COND_B;
- case X86::CMOVE_Fp32: case X86::CMOVE_Fp64: case X86::CMOVE_Fp80:
- return X86::COND_E;
- case X86::CMOVNBE_Fp32: case X86::CMOVNBE_Fp64: case X86::CMOVNBE_Fp80:
- return X86::COND_A;
- case X86::CMOVNB_Fp32: case X86::CMOVNB_Fp64: case X86::CMOVNB_Fp80:
- return X86::COND_AE;
- case X86::CMOVNE_Fp32: case X86::CMOVNE_Fp64: case X86::CMOVNE_Fp80:
- return X86::COND_NE;
- case X86::CMOVNP_Fp32: case X86::CMOVNP_Fp64: case X86::CMOVNP_Fp80:
- return X86::COND_NP;
- case X86::CMOVP_Fp32: case X86::CMOVP_Fp64: case X86::CMOVP_Fp80:
- return X86::COND_P;
- }
-}
-
bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
<< " **********\n");
@@ -362,7 +246,7 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
MDT = &getAnalysis<MachineDominatorTree>();
PromoteRC = &X86::GR8RegClass;
- if (MF.begin() == MF.end())
+ if (MF.empty())
// Nothing to do for a degenerate empty function...
return false;
@@ -569,20 +453,12 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
MachineOperand *FlagUse =
MI.findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr);
- if (!FlagUse) {
- if (MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr)) {
- // If EFLAGS are defined, it's as-if they were killed. We can stop
- // scanning here.
- //
- // NB!!! Many instructions only modify some flags. LLVM currently
- // models this as clobbering all flags, but if that ever changes
- // this will need to be carefully updated to handle that more
- // complex logic.
- FlagsKilled = true;
- break;
- }
+ FlagsKilled = MI.modifiesRegister(X86::EFLAGS, TRI);
+
+ if (!FlagUse && FlagsKilled)
+ break;
+ else if (!FlagUse)
continue;
- }
LLVM_DEBUG(dbgs() << " Rewriting use: "; MI.dump());
@@ -604,40 +480,23 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
JmpIs.push_back(&*JmpIt);
++JmpIt;
} while (JmpIt != UseMBB.instr_end() &&
- X86::getCondFromBranch(*JmpIt) !=
- X86::COND_INVALID);
+ X86::getCondFromBranch(*JmpIt) != X86::COND_INVALID);
break;
}
// Otherwise we can just rewrite in-place.
- if (X86::getCondFromCMov(MI) != X86::COND_INVALID ||
- X86::getCondFromCFCMov(MI) != X86::COND_INVALID) {
- rewriteCMov(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
- } else if (getCondFromFCMOV(MI.getOpcode()) != X86::COND_INVALID) {
- rewriteFCMov(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
- } else if (X86::getCondFromSETCC(MI) != X86::COND_INVALID) {
- rewriteSetCC(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
- } else if (X86::getCondFromCCMP(MI) != X86::COND_INVALID) {
- rewriteCCMP(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
- FlagsKilled = true;
- } else if (MI.getOpcode() == TargetOpcode::COPY) {
- rewriteCopy(MI, *FlagUse, CopyDefI);
+ unsigned Opc = MI.getOpcode();
+ if (Opc == TargetOpcode::COPY) {
+ // Just replace this copy with the original copy def.
+ MRI->replaceRegWith(MI.getOperand(0).getReg(),
+ CopyDefI.getOperand(0).getReg());
+ MI.eraseFromParent();
+ } else if (X86::isSETCC(Opc)) {
+ rewriteSetCC(*TestMBB, TestPos, TestLoc, MI, CondRegs);
+ } else if (isArithmeticOp(Opc)) {
+ rewriteArithmetic(*TestMBB, TestPos, TestLoc, MI, CondRegs);
} else {
- // We assume all other instructions that use flags also def them.
- assert(MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr) &&
- "Expected a def of EFLAGS for this instruction!");
-
- // NB!!! Several arithmetic instructions only *partially* update
- // flags. Theoretically, we could generate MI code sequences that
- // would rely on this fact and observe different flags independently.
- // But currently LLVM models all of these instructions as clobbering
- // all the flags in an undef way. We rely on that to simplify the
- // logic.
- FlagsKilled = true;
-
- // Generically handle remaining uses as arithmetic instructions.
- rewriteArithmetic(*TestMBB, TestPos, TestLoc, MI, *FlagUse,
- CondRegs);
+ rewriteMI(*TestMBB, TestPos, TestLoc, MI, CondRegs);
}
// If this was the last use of the flags, we're done.
@@ -702,7 +561,7 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
else
LastJmpMBB = JmpI->getParent();
- rewriteCondJmp(*TestMBB, TestPos, TestLoc, *JmpI, CondRegs);
+ rewriteMI(*TestMBB, TestPos, TestLoc, *JmpI, CondRegs);
}
// FIXME: Mark the last use of EFLAGS before the copy's def as a kill if
@@ -753,8 +612,8 @@ Register X86FlagsCopyLoweringPass::promoteCondToReg(
MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
const DebugLoc &TestLoc, X86::CondCode Cond) {
Register Reg = MRI->createVirtualRegister(PromoteRC);
- auto SetI = BuildMI(TestMBB, TestPos, TestLoc,
- TII->get(X86::SETCCr), Reg).addImm(Cond);
+ auto SetI = BuildMI(TestMBB, TestPos, TestLoc, TII->get(X86::SETCCr), Reg)
+ .addImm(Cond);
(void)SetI;
LLVM_DEBUG(dbgs() << " save cond: "; SetI->dump());
++NumSetCCsInserted;
@@ -785,43 +644,66 @@ void X86FlagsCopyLoweringPass::insertTest(MachineBasicBlock &MBB,
++NumTestsInserted;
}
-void X86FlagsCopyLoweringPass::rewriteArithmetic(
- MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
- const DebugLoc &TestLoc, MachineInstr &MI, MachineOperand &FlagUse,
- CondRegArray &CondRegs) {
- // Arithmetic is either reading CF or OF. Figure out which condition we need
- // to preserve in a register.
- X86::CondCode Cond = X86::COND_INVALID;
+void X86FlagsCopyLoweringPass::rewriteSetCC(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Pos,
+ const DebugLoc &Loc,
+ MachineInstr &MI,
+ CondRegArray &CondRegs) {
+ X86::CondCode Cond = X86::getCondFromSETCC(MI);
+ // Note that we can't usefully rewrite this to the inverse without complex
+ // analysis of the users of the setCC. Largely we rely on duplicates which
+ // could have been avoided already being avoided here.
+ unsigned &CondReg = CondRegs[Cond];
+ if (!CondReg)
+ CondReg = promoteCondToReg(MBB, Pos, Loc, Cond);
- // The addend to use to reset CF or OF when added to the flag value.
- int Addend = 0;
-
- switch (getMnemonicFromOpcode(MI.getOpcode())) {
- case FlagArithMnemonic::ADC:
- case FlagArithMnemonic::RCL:
- case FlagArithMnemonic::RCR:
- case FlagArithMnemonic::SBB:
- case FlagArithMnemonic::SETB:
- Cond = X86::COND_B; // CF == 1
- // Set up an addend that when one is added will need a carry due to not
- // having a higher bit available.
- Addend = 255;
- break;
+ // Rewriting a register def is trivial: we just replace the register and
+ // remove the setcc.
+ if (!MI.mayStore()) {
+ assert(MI.getOperand(0).isReg() &&
+ "Cannot have a non-register defined operand to SETcc!");
+ Register OldReg = MI.getOperand(0).getReg();
+ // Drop Kill flags on the old register before replacing. CondReg may have
+ // a longer live range.
+ MRI->clearKillFlags(OldReg);
+ MRI->replaceRegWith(OldReg, CondReg);
+ MI.eraseFromParent();
+ return;
}
+ // Otherwise, we need to emit a store.
+ auto MIB = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
+ TII->get(X86::MOV8mr));
+ // Copy the address operands.
+ for (int i = 0; i < X86::AddrNumOperands; ++i)
+ MIB.add(MI.getOperand(i));
+
+ MIB.addReg(CondReg);
+ MIB.setMemRefs(MI.memoperands());
+ MI.eraseFromParent();
+}
+
+void X86FlagsCopyLoweringPass::rewriteArithmetic(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
+ const DebugLoc &Loc, MachineInstr &MI, CondRegArray &CondRegs) {
+ // Arithmetic is either reading CF or OF.
+ X86::CondCode Cond = X86::COND_B; // CF == 1
+ // The addend to use to reset CF or OF when added to the flag value.
+ // Set up an addend that when one is added will need a carry due to not
+ // having a higher bit available.
+ int Addend = 255;
+
// Now get a register that contains the value of the flag input to the
// arithmetic. We require exactly this flag to simplify the arithmetic
// required to materialize it back into the flag.
unsigned &CondReg = CondRegs[Cond];
if (!CondReg)
- CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
-
- MachineBasicBlock &MBB = *MI.getParent();
+ CondReg = promoteCondToReg(MBB, Pos, Loc, Cond);
// Insert an instruction that will set the flag back to the desired value.
Register TmpReg = MRI->createVirtualRegister(PromoteRC);
auto AddI =
- BuildMI(MBB, MI.getIterator(), MI.getDebugLoc(),
+ BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
TII->get(Subtarget->hasNDD() ? X86::ADD8ri_ND : X86::ADD8ri))
.addDef(TmpReg, RegState::Dead)
.addReg(CondReg)
@@ -829,177 +711,81 @@ void X86FlagsCopyLoweringPass::rewriteArithmetic(
(void)AddI;
LLVM_DEBUG(dbgs() << " add cond: "; AddI->dump());
++NumAddsInserted;
- FlagUse.setIsKill(true);
+ MI.findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true);
}
-void X86FlagsCopyLoweringPass::rewriteCMov(MachineBasicBlock &TestMBB,
- MachineBasicBlock::iterator TestPos,
- const DebugLoc &TestLoc,
- MachineInstr &CMovI,
- MachineOperand &FlagUse,
- CondRegArray &CondRegs) {
- // First get the register containing this specific condition.
- X86::CondCode Cond = X86::getCondFromCMov(CMovI) == X86::COND_INVALID
- ? X86::getCondFromCFCMov(CMovI)
- : X86::getCondFromCMov(CMovI);
- unsigned CondReg;
- bool Inverted;
- std::tie(CondReg, Inverted) =
- getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
-
- MachineBasicBlock &MBB = *CMovI.getParent();
+static X86::CondCode getImplicitCondFromMI(unsigned Opc) {
+#define FROM_TO(A, B) \
+ case X86::CMOV##A##_Fp32: \
+ case X86::CMOV##A##_Fp64: \
+ case X86::CMOV##A##_Fp80: \
+ return X86::COND_##B;
- // Insert a direct test of the saved register.
- insertTest(MBB, CMovI.getIterator(), CMovI.getDebugLoc(), CondReg);
-
- // Rewrite the CMov to use the !ZF flag from the test, and then kill its use
- // of the flags afterward.
- CMovI.getOperand(CMovI.getDesc().getNumOperands() - 1)
- .setImm(Inverted ? X86::COND_E : X86::COND_NE);
- FlagUse.setIsKill(true);
- LLVM_DEBUG(dbgs() << " fixed cmov: "; CMovI.dump());
+ switch (Opc) {
+ default:
+ return X86::COND_INVALID;
+ FROM_TO(B, B)
+ FROM_TO(E, E)
+ FROM_TO(P, P)
+ FROM_TO(BE, BE)
+ FROM_TO(NB, AE)
+ FROM_TO(NE, NE)
+ FROM_TO(NP, NP)
+ FROM_TO(NBE, A)
+ }
+#undef FROM_TO
}
-void X86FlagsCopyLoweringPass::rewriteFCMov(MachineBasicBlock &TestMBB,
- MachineBasicBlock::iterator TestPos,
- const DebugLoc &TestLoc,
- MachineInstr &CMovI,
- MachineOperand &FlagUse,
- CondRegArray &CondRegs) {
- // First get the register containing this specific condition.
- X86::CondCode Cond = getCondFromFCMOV(CMovI.getOpcode());
- unsigned CondReg;
- bool Inverted;
- std::tie(CondReg, Inverted) =
- getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
-
- MachineBasicBlock &MBB = *CMovI.getParent();
-
- // Insert a direct test of the saved register.
- insertTest(MBB, CMovI.getIterator(), CMovI.getDebugLoc(), CondReg);
-
- auto getFCMOVOpcode = [](unsigned Opcode, bool Inverted) {
- switch (Opcode) {
- default: llvm_unreachable("Unexpected opcode!");
- case X86::CMOVBE_Fp32: case X86::CMOVNBE_Fp32:
- case X86::CMOVB_Fp32: case X86::CMOVNB_Fp32:
- case X86::CMOVE_Fp32: case X86::CMOVNE_Fp32:
- case X86::CMOVP_Fp32: case X86::CMOVNP_Fp32:
- return Inverted ? X86::CMOVE_Fp32 : X86::CMOVNE_Fp32;
- case X86::CMOVBE_Fp64: case X86::CMOVNBE_Fp64:
- case X86::CMOVB_Fp64: case X86::CMOVNB_Fp64:
- case X86::CMOVE_Fp64: case X86::CMOVNE_Fp64:
- case X86::CMOVP_Fp64: case X86::CMOVNP_Fp64:
- return Inverted ? X86::CMOVE_Fp64 : X86::CMOVNE_Fp64;
- case X86::CMOVBE_Fp80: case X86::CMOVNBE_Fp80:
- case X86::CMOVB_Fp80: case X86::CMOVNB_Fp80:
- case X86::CMOVE_Fp80: case X86::CMOVNE_Fp80:
- case X86::CMOVP_Fp80: case X86::CMOVNP_Fp80:
- return Inverted ? X86::CMOVE_Fp80 : X86::CMOVNE_Fp80;
- }
- };
-
- // Rewrite the CMov to use the !ZF flag from the test.
- CMovI.setDesc(TII->get(getFCMOVOpcode(CMovI.getOpcode(), Inverted)));
- FlagUse.setIsKill(true);
- LLVM_DEBUG(dbgs() << " fixed fcmov: "; CMovI.dump());
+static unsigned getOpcodeWithCC(unsigned Opc, X86::CondCode CC) {
+ assert((CC == X86::COND_E || CC == X86::COND_NE) && "Unexpected CC");
+#define CASE(A) \
+ case X86::CMOVB_##A: \
+ case X86::CMOVE_##A: \
+ case X86::CMOVP_##A: \
+ case X86::CMOVBE_##A: \
+ case X86::CMOVNB_##A: \
+ case X86::CMOVNE_##A: \
+ case X86::CMOVNP_##A: \
+ case X86::CMOVNBE_##A: \
+ return (CC == X86::COND_E) ? X86::CMOVE_##A : X86::CMOVNE_##A;
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ CASE(Fp32)
+ CASE(Fp64)
+ CASE(Fp80)
+ }
+#undef CASE
}
-void X86FlagsCopyLoweringPass::rewriteCondJmp(
- MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
- const DebugLoc &TestLoc, MachineInstr &JmpI, CondRegArray &CondRegs) {
+void X86FlagsCopyLoweringPass::rewriteMI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Pos,
+ const DebugLoc &Loc, MachineInstr &MI,
+ CondRegArray &CondRegs) {
// First get the register containing this specific condition.
- X86::CondCode Cond = X86::getCondFromBranch(JmpI);
+ bool IsImplicitCC = false;
+ X86::CondCode CC = X86::getCondFromMI(MI);
+ if (CC == X86::COND_INVALID) {
+ CC = getImplicitCondFromMI(MI.getOpcode());
+ IsImplicitCC = true;
+ }
+ assert(CC != X86::COND_INVALID && "Unknown EFLAG user!");
unsigned CondReg;
bool Inverted;
std::tie(CondReg, Inverted) =
- getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
-
- MachineBasicBlock &JmpMBB = *JmpI.getParent();
+ getCondOrInverseInReg(MBB, Pos, Loc, CC, CondRegs);
// Insert a direct test of the saved register.
- insertTest(JmpMBB, JmpI.getIterator(), JmpI.getDebugLoc(), CondReg);
-
- // Rewrite the jump to use the !ZF flag from the test, and kill its use of
- // flags afterward.
- JmpI.getOperand(1).setImm(Inverted ? X86::COND_E : X86::COND_NE);
- JmpI.findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true);
- LLVM_DEBUG(dbgs() << " fixed jCC: "; JmpI.dump());
-}
-
-void X86FlagsCopyLoweringPass::rewriteCopy(MachineInstr &MI,
- MachineOperand &FlagUse,
- MachineInstr &CopyDefI) {
- // Just replace this copy with the original copy def.
- MRI->replaceRegWith(MI.getOperand(0).getReg(),
- CopyDefI.getOperand(0).getReg());
- MI.eraseFromParent();
-}
-
-void X86FlagsCopyLoweringPass::rewriteSetCC(MachineBasicBlock &TestMBB,
- MachineBasicBlock::iterator TestPos,
- const DebugLoc &TestLoc,
- MachineInstr &SetCCI,
- MachineOperand &FlagUse,
- CondRegArray &CondRegs) {
- X86::CondCode Cond = X86::getCondFromSETCC(SetCCI);
- // Note that we can't usefully rewrite this to the inverse without complex
- // analysis of the users of the setCC. Largely we rely on duplicates which
- // could have been avoided already being avoided here.
- unsigned &CondReg = CondRegs[Cond];
- if (!CondReg)
- CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
-
- // Rewriting a register def is trivial: we just replace the register and
- // remove the setcc.
- if (!SetCCI.mayStore()) {
- assert(SetCCI.getOperand(0).isReg() &&
- "Cannot have a non-register defined operand to SETcc!");
- Register OldReg = SetCCI.getOperand(0).getReg();
- // Drop Kill flags on the old register before replacing. CondReg may have
- // a longer live range.
- MRI->clearKillFlags(OldReg);
- MRI->replaceRegWith(OldReg, CondReg);
- SetCCI.eraseFromParent();
- return;
- }
+ insertTest(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), CondReg);
- // Otherwise, we need to emit a store.
- auto MIB = BuildMI(*SetCCI.getParent(), SetCCI.getIterator(),
- SetCCI.getDebugLoc(), TII->get(X86::MOV8mr));
- // Copy the address operands.
- for (int i = 0; i < X86::AddrNumOperands; ++i)
- MIB.add(SetCCI.getOperand(i));
-
- MIB.addReg(CondReg);
-
- MIB.setMemRefs(SetCCI.memoperands());
-
- SetCCI.eraseFromParent();
-}
-
-void X86FlagsCopyLoweringPass::rewriteCCMP(MachineBasicBlock &TestMBB,
- MachineBasicBlock::iterator TestPos,
- const DebugLoc &TestLoc,
- MachineInstr &CCMPI,
- MachineOperand &FlagUse,
- CondRegArray &CondRegs) {
- // First get the register containing this specific condition.
- X86::CondCode Cond = X86::getCondFromCCMP(CCMPI);
- unsigned CondReg;
- bool Inverted;
- std::tie(CondReg, Inverted) =
- getCondOrInverseInReg(TestMBB, TestPos, TestLoc, Cond, CondRegs);
-
- MachineBasicBlock &MBB = *CCMPI.getParent();
+ // Rewrite the instruction to use the !ZF flag from the test, and then kill
+ // its use of the flags afterward.
+ X86::CondCode NewCC = Inverted ? X86::COND_E : X86::COND_NE;
+ if (IsImplicitCC)
+ MI.setDesc(TII->get(getOpcodeWithCC(MI.getOpcode(), NewCC)));
+ else
+ MI.getOperand(MI.getDesc().getNumOperands() - 1).setImm(NewCC);
- // Insert a direct test of the saved register.
- insertTest(MBB, CCMPI.getIterator(), CCMPI.getDebugLoc(), CondReg);
-
- // Rewrite the CCMP/CTEST to use the !ZF flag from the test, and then kill its
- // use of the flags afterward.
- CCMPI.getOperand(CCMPI.getDesc().getNumOperands() - 1)
- .setImm(Inverted ? X86::COND_E : X86::COND_NE);
- FlagUse.setIsKill(true);
- LLVM_DEBUG(dbgs() << " fixed ccmp/ctest: "; CCMPI.dump());
+ MI.findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true);
+ LLVM_DEBUG(dbgs() << " fixed instruction: "; MI.dump());
}
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 7f76324fa570..3227bf75a43f 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -1553,11 +1553,16 @@ void X86DAGToDAGISel::PostprocessISelDAG() {
switch (Opc) {
default:
continue;
- // TESTrr+ANDrr/rm -> TESTrr/TESTmr
+ // ANDrr/rm + TESTrr+ -> TESTrr/TESTmr
case X86::TEST8rr:
case X86::TEST16rr:
case X86::TEST32rr:
- case X86::TEST64rr: {
+ case X86::TEST64rr:
+ // ANDrr/rm + CTESTrr -> CTESTrr/CTESTmr
+ case X86::CTEST8rr:
+ case X86::CTEST16rr:
+ case X86::CTEST32rr:
+ case X86::CTEST64rr: {
auto &Op0 = N->getOperand(0);
if (Op0 != N->getOperand(1) || !Op0->hasNUsesOfValue(2, Op0.getResNo()) ||
!Op0.isMachineOpcode())
@@ -1575,8 +1580,11 @@ void X86DAGToDAGISel::PostprocessISelDAG() {
CASE_ND(AND64rr) {
if (And->hasAnyUseOfValue(1))
continue;
- MachineSDNode *Test = CurDAG->getMachineNode(
- Opc, SDLoc(N), MVT::i32, And.getOperand(0), And.getOperand(1));
+ SmallVector<SDValue> Ops(N->op_values());
+ Ops[0] = And.getOperand(0);
+ Ops[1] = And.getOperand(1);
+ MachineSDNode *Test =
+ CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i32, Ops);
ReplaceUses(N, Test);
MadeChange = true;
continue;
@@ -1588,8 +1596,9 @@ void X86DAGToDAGISel::PostprocessISelDAG() {
if (And->hasAnyUseOfValue(1))
continue;
unsigned NewOpc;
+ bool IsCTESTCC = X86::isCTESTCC(Opc);
#define FROM_TO(A, B) \
- CASE_ND(A) NewOpc = X86::B; \
+ CASE_ND(A) NewOpc = IsCTESTCC ? X86::C##B : X86::B; \
break;
switch (And.getMachineOpcode()) {
FROM_TO(AND8rm, TEST8mr);
@@ -1600,10 +1609,20 @@ void X86DAGToDAGISel::PostprocessISelDAG() {
#undef FROM_TO
#undef CASE_ND
// Need to swap the memory and register operand.
- SDValue Ops[] = {And.getOperand(1), And.getOperand(2),
- And.getOperand(3), And.getOperand(4),
- And.getOperand(5), And.getOperand(0),
- And.getOperand(6) /* Chain */};
+ SmallVector<SDValue> Ops = {And.getOperand(1), And.getOperand(2),
+ And.getOperand(3), And.getOperand(4),
+ And.getOperand(5), And.getOperand(0)};
+ // CC, Cflags.
+ if (IsCTESTCC) {
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ }
+ // Chain of memory load
+ Ops.push_back(And.getOperand(6));
+ // Glue
+ if (IsCTESTCC)
+ Ops.push_back(N->getOperand(4));
+
MachineSDNode *Test = CurDAG->getMachineNode(
NewOpc, SDLoc(N), MVT::i32, MVT::Other, Ops);
CurDAG->setNodeMemRefs(
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 5d0846453685..7df8ffb7d039 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -88,6 +88,12 @@ static cl::opt<int> BrMergingBaseCostThresh(
"to never merge branches."),
cl::Hidden);
+static cl::opt<int> BrMergingCcmpBias(
+ "x86-br-merging-ccmp-bias", cl::init(6),
+ cl::desc("Increases 'x86-br-merging-base-cost' in cases that the target "
+ "supports conditional compare instructions."),
+ cl::Hidden);
+
static cl::opt<int> BrMergingLikelyBias(
"x86-br-merging-likely-bias", cl::init(0),
cl::desc("Increases 'x86-br-merging-base-cost' in cases that it is likely "
@@ -1108,13 +1114,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
}
- setOperationAction(ISD::ABDU, MVT::v16i8, Custom);
- setOperationAction(ISD::ABDS, MVT::v16i8, Custom);
- setOperationAction(ISD::ABDU, MVT::v8i16, Custom);
- setOperationAction(ISD::ABDS, MVT::v8i16, Custom);
- setOperationAction(ISD::ABDU, MVT::v4i32, Custom);
- setOperationAction(ISD::ABDS, MVT::v4i32, Custom);
-
setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
@@ -1132,9 +1131,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
- setOperationAction(ISD::SETCC, VT, Custom);
- setOperationAction(ISD::CTPOP, VT, Custom);
- setOperationAction(ISD::ABS, VT, Custom);
+ setOperationAction(ISD::SETCC, VT, Custom);
+ setOperationAction(ISD::CTPOP, VT, Custom);
+ setOperationAction(ISD::ABS, VT, Custom);
+ setOperationAction(ISD::ABDS, VT, Custom);
+ setOperationAction(ISD::ABDU, VT, Custom);
// The condition codes aren't legal in SSE/AVX and under AVX512 we use
// setcc all the way to isel and prefer SETGT in some isel patterns.
@@ -1336,11 +1337,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
- for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
- setOperationAction(ISD::ABDS, VT, Custom);
- setOperationAction(ISD::ABDU, VT, Custom);
- }
-
setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom);
setOperationAction(ISD::SADDSAT, MVT::v2i64, Custom);
setOperationAction(ISD::SSUBSAT, MVT::v2i64, Custom);
@@ -2032,6 +2028,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::ROTL, MVT::v32i16, Custom);
setOperationAction(ISD::ROTR, MVT::v32i16, Custom);
}
+
+ setOperationAction(ISD::FNEG, MVT::v32f16, Custom);
+ setOperationAction(ISD::FABS, MVT::v32f16, Custom);
+ setOperationAction(ISD::FCOPYSIGN, MVT::v32f16, Custom);
}// useAVX512Regs
if (!Subtarget.useSoftFloat() && Subtarget.hasVBMI2()) {
@@ -2108,9 +2108,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
setOperationAction(ISD::CTPOP, VT, Legal);
}
- setOperationAction(ISD::FNEG, MVT::v32f16, Custom);
- setOperationAction(ISD::FABS, MVT::v32f16, Custom);
- setOperationAction(ISD::FCOPYSIGN, MVT::v32f16, Custom);
}
// This block control legalization of v32i1/v64i1 which are available with
@@ -3292,7 +3289,7 @@ bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
if (VT != MVT::i32 && VT != MVT::i64)
return false;
- return !isa<ConstantSDNode>(Y);
+ return !isa<ConstantSDNode>(Y) || cast<ConstantSDNode>(Y)->isOpaque();
}
bool X86TargetLowering::hasAndNot(SDValue Y) const {
@@ -3412,6 +3409,9 @@ X86TargetLowering::getJumpConditionMergingParams(Instruction::BinaryOps Opc,
const Value *Rhs) const {
using namespace llvm::PatternMatch;
int BaseCost = BrMergingBaseCostThresh.getValue();
+ // With CCMP, branches can be merged in a more efficient way.
+ if (BaseCost >= 0 && Subtarget.hasCCMP())
+ BaseCost += BrMergingCcmpBias;
// a == b && a == c is a fast pattern on x86.
ICmpInst::Predicate Pred;
if (BaseCost >= 0 && Opc == Instruction::And &&
@@ -20130,12 +20130,11 @@ SDValue X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
return Res;
}
-static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
+static SDValue LowerAVXExtend(SDValue Op, const SDLoc &dl, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
MVT InVT = In.getSimpleValueType();
- SDLoc dl(Op);
unsigned Opc = Op.getOpcode();
assert(VT.isVector() && InVT.isVector() && "Expected vector type");
@@ -20206,14 +20205,13 @@ static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
}
-static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
- const X86Subtarget &Subtarget,
- SelectionDAG &DAG) {
+static SDValue LowerZERO_EXTEND_Mask(SDValue Op, const SDLoc &DL,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
- SDLoc DL(Op);
unsigned NumElts = VT.getVectorNumElements();
// For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
@@ -20268,12 +20266,13 @@ static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = Op.getOperand(0);
MVT SVT = In.getSimpleValueType();
+ SDLoc DL(Op);
if (SVT.getVectorElementType() == MVT::i1)
- return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
+ return LowerZERO_EXTEND_Mask(Op, DL, Subtarget, DAG);
assert(Subtarget.hasAVX() && "Expected AVX support");
- return LowerAVXExtend(Op, DAG, Subtarget);
+ return LowerAVXExtend(Op, DL, DAG, Subtarget);
}
/// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
@@ -24320,7 +24319,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops, Op->getFlags());
}
-static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
+static SDValue LowerSIGN_EXTEND_Mask(SDValue Op, const SDLoc &dl,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
@@ -24328,8 +24327,6 @@ static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
MVT InVT = In.getSimpleValueType();
assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
MVT VTElt = VT.getVectorElementType();
- SDLoc dl(Op);
-
unsigned NumElts = VT.getVectorNumElements();
// Extend VT if the scalar type is i8/i16 and BWI is not supported.
@@ -24381,12 +24378,13 @@ static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
+ SDLoc DL(Op);
if (InVT.getVectorElementType() == MVT::i1)
- return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
+ return LowerSIGN_EXTEND_Mask(Op, DL, Subtarget, DAG);
assert(Subtarget.hasAVX() && "Expected AVX support");
- return LowerAVXExtend(Op, DAG, Subtarget);
+ return LowerAVXExtend(Op, DL, DAG, Subtarget);
}
// Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
@@ -24524,7 +24522,7 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SDLoc dl(Op);
if (InVT.getVectorElementType() == MVT::i1)
- return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
+ return LowerSIGN_EXTEND_Mask(Op, dl, Subtarget, DAG);
assert(VT.isVector() && InVT.isVector() && "Expected vector type");
assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
@@ -28421,18 +28419,6 @@ static SDValue LowerABD(SDValue Op, const X86Subtarget &Subtarget,
}
}
- // TODO: Move to TargetLowering expandABD().
- if (!Subtarget.hasSSE41() &&
- ((IsSigned && VT == MVT::v16i8) || VT == MVT::v4i32)) {
- SDValue LHS = DAG.getFreeze(Op.getOperand(0));
- SDValue RHS = DAG.getFreeze(Op.getOperand(1));
- ISD::CondCode CC = IsSigned ? ISD::CondCode::SETGT : ISD::CondCode::SETUGT;
- SDValue Cmp = DAG.getSetCC(dl, VT, LHS, RHS, CC);
- SDValue Diff0 = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
- SDValue Diff1 = DAG.getNode(ISD::SUB, dl, VT, RHS, LHS);
- return getBitSelect(dl, VT, Diff0, Diff1, Cmp, DAG);
- }
-
// Default to expand.
return SDValue();
}
@@ -33849,18 +33835,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(ADDSUB)
NODE_NAME_CASE(RCP14)
NODE_NAME_CASE(RCP14S)
- NODE_NAME_CASE(RCP28)
- NODE_NAME_CASE(RCP28_SAE)
- NODE_NAME_CASE(RCP28S)
- NODE_NAME_CASE(RCP28S_SAE)
- NODE_NAME_CASE(EXP2)
- NODE_NAME_CASE(EXP2_SAE)
NODE_NAME_CASE(RSQRT14)
NODE_NAME_CASE(RSQRT14S)
- NODE_NAME_CASE(RSQRT28)
- NODE_NAME_CASE(RSQRT28_SAE)
- NODE_NAME_CASE(RSQRT28S)
- NODE_NAME_CASE(RSQRT28S_SAE)
NODE_NAME_CASE(FADD_RND)
NODE_NAME_CASE(FADDS)
NODE_NAME_CASE(FADDS_RND)
@@ -33970,6 +33946,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(TESTUI)
NODE_NAME_CASE(FP80_ADD)
NODE_NAME_CASE(STRICT_FP80_ADD)
+ NODE_NAME_CASE(CCMP)
+ NODE_NAME_CASE(CTEST)
}
return nullptr;
#undef NODE_NAME_CASE
@@ -35417,7 +35395,7 @@ X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
MachineFunction &MF = *BB->getParent();
// Emit CALLSEQ_START right before the instruction.
- BB->getParent()->getFrameInfo().setAdjustsStack(true);
+ MF.getFrameInfo().setAdjustsStack(true);
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
MachineInstrBuilder CallseqStart =
BuildMI(MF, MIMD, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
@@ -36400,6 +36378,31 @@ X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
}
MachineBasicBlock *
+X86TargetLowering::emitPatchableEventCall(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
+ // Wrap patchable event calls in CALLSEQ_START/CALLSEQ_END, as tracing
+ // calls may require proper stack alignment.
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+ const MIMetadata MIMD(MI);
+ MachineFunction &MF = *BB->getParent();
+
+ // Emit CALLSEQ_START right before the instruction.
+ MF.getFrameInfo().setAdjustsStack(true);
+ unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
+ MachineInstrBuilder CallseqStart =
+ BuildMI(MF, MIMD, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
+ BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
+
+ // Emit CALLSEQ_END right after the instruction.
+ unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
+ MachineInstrBuilder CallseqEnd =
+ BuildMI(MF, MIMD, TII.get(AdjStackUp)).addImm(0).addImm(0);
+ BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
+
+ return BB;
+}
+
+MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
@@ -36629,7 +36632,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
case TargetOpcode::PATCHABLE_EVENT_CALL:
case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
- return BB;
+ return emitPatchableEventCall(MI, BB);
case X86::LCMPXCHG8B: {
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
@@ -42963,7 +42966,6 @@ bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
bool PoisonOnly, unsigned Depth) const {
unsigned NumElts = DemandedElts.getBitWidth();
- // TODO: Add more target shuffles.
switch (Op.getOpcode()) {
case X86ISD::PSHUFD:
case X86ISD::VPERMILPI: {
@@ -42999,8 +43001,12 @@ bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode(
SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const {
- // TODO: Add more target shuffles.
switch (Op.getOpcode()) {
+ // SSE vector shifts handle out of bounds shift amounts.
+ case X86ISD::VSHLI:
+ case X86ISD::VSRLI:
+ case X86ISD::VSRAI:
+ return false;
case X86ISD::PSHUFD:
case X86ISD::VPERMILPI:
case X86ISD::UNPCKH:
@@ -43443,7 +43449,11 @@ static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
// the chain.
static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
SelectionDAG &DAG,
- const X86Subtarget &Subtarget) {
+ const X86Subtarget &Subtarget,
+ unsigned Depth = 0) {
+ if (Depth >= SelectionDAG::MaxRecursionDepth)
+ return SDValue(); // Limit search depth.
+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned Opc = V.getOpcode();
switch (Opc) {
@@ -43455,14 +43465,22 @@ static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
return DAG.getBitcast(VT, Src);
break;
}
+ case ISD::Constant: {
+ auto *C = cast<ConstantSDNode>(V);
+ if (C->isZero())
+ return DAG.getConstant(0, DL, VT);
+ if (C->isAllOnes())
+ return DAG.getAllOnesConstant(DL, VT);
+ break;
+ }
case ISD::TRUNCATE: {
// If we find a suitable source, a truncated scalar becomes a subvector.
SDValue Src = V.getOperand(0);
EVT NewSrcVT =
EVT::getVectorVT(*DAG.getContext(), MVT::i1, Src.getValueSizeInBits());
if (TLI.isTypeLegal(NewSrcVT))
- if (SDValue N0 =
- combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
+ if (SDValue N0 = combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG,
+ Subtarget, Depth + 1))
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N0,
DAG.getIntPtrConstant(0, DL));
break;
@@ -43474,20 +43492,22 @@ static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
Src.getScalarValueSizeInBits());
if (TLI.isTypeLegal(NewSrcVT))
- if (SDValue N0 =
- combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
+ if (SDValue N0 = combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG,
+ Subtarget, Depth + 1))
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
Opc == ISD::ANY_EXTEND ? DAG.getUNDEF(VT)
: DAG.getConstant(0, DL, VT),
N0, DAG.getIntPtrConstant(0, DL));
break;
}
- case ISD::OR: {
- // If we find suitable sources, we can just move an OR to the vector domain.
- SDValue Src0 = V.getOperand(0);
- SDValue Src1 = V.getOperand(1);
- if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
- if (SDValue N1 = combineBitcastToBoolVector(VT, Src1, DL, DAG, Subtarget))
+ case ISD::OR:
+ case ISD::XOR: {
+ // If we find suitable sources, we can just move the op to the vector
+ // domain.
+ if (SDValue N0 = combineBitcastToBoolVector(VT, V.getOperand(0), DL, DAG,
+ Subtarget, Depth + 1))
+ if (SDValue N1 = combineBitcastToBoolVector(VT, V.getOperand(1), DL, DAG,
+ Subtarget, Depth + 1))
return DAG.getNode(Opc, DL, VT, N0, N1);
break;
}
@@ -43499,13 +43519,20 @@ static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
break;
if (auto *Amt = dyn_cast<ConstantSDNode>(V.getOperand(1)))
- if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
+ if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget,
+ Depth + 1))
return DAG.getNode(
X86ISD::KSHIFTL, DL, VT, N0,
DAG.getTargetConstant(Amt->getZExtValue(), DL, MVT::i8));
break;
}
}
+
+ // Does the inner bitcast already exist?
+ if (Depth > 0)
+ if (SDNode *Alt = DAG.getNodeIfExists(ISD::BITCAST, DAG.getVTList(VT), {V}))
+ return SDValue(Alt, 0);
+
return SDValue();
}
@@ -43694,14 +43721,14 @@ static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
return combinevXi1ConstantToInteger(N0, DAG);
}
- if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
- VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
- isa<ConstantSDNode>(N0)) {
- auto *C = cast<ConstantSDNode>(N0);
- if (C->isAllOnes())
- return DAG.getConstant(1, SDLoc(N0), VT);
- if (C->isZero())
- return DAG.getConstant(0, SDLoc(N0), VT);
+ if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() && VT.isVector() &&
+ VT.getVectorElementType() == MVT::i1) {
+ if (auto *C = dyn_cast<ConstantSDNode>(N0)) {
+ if (C->isAllOnes())
+ return DAG.getConstant(1, SDLoc(N0), VT);
+ if (C->isZero())
+ return DAG.getConstant(0, SDLoc(N0), VT);
+ }
}
// Look for MOVMSK that is maybe truncated and then bitcasted to vXi1.
@@ -49217,6 +49244,148 @@ static SDValue combineBMILogicOp(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue combineX86SubCmpForFlags(SDNode *N, SDValue Flag,
+ SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &ST) {
+ // cmp(setcc(cc, X), 0)
+ // brcond ne
+ // ->
+ // X
+ // brcond cc
+
+ // sub(setcc(cc, X), 1)
+ // brcond ne
+ // ->
+ // X
+ // brcond ~cc
+ //
+ // if only flag has users
+
+ SDValue SetCC = N->getOperand(0);
+
+ if (SetCC.getOpcode() != X86ISD::SETCC || !Flag.hasOneUse())
+ return SDValue();
+
+ // Check the only user of flag is `brcond ne`.
+ SDNode *BrCond = *Flag->uses().begin();
+ if (BrCond->getOpcode() != X86ISD::BRCOND)
+ return SDValue();
+ unsigned CondNo = 2;
+ if (static_cast<X86::CondCode>(BrCond->getConstantOperandVal(CondNo)) !=
+ X86::COND_NE)
+ return SDValue();
+
+ SDValue X = SetCC.getOperand(1);
+ // sub has two results while X only have one. DAG combine assumes the value
+ // type matches.
+ if (N->getOpcode() == X86ISD::SUB)
+ X = DAG.getMergeValues({N->getOperand(0), X}, SDLoc(N));
+
+ SDValue CCN = SetCC.getOperand(0);
+ X86::CondCode CC =
+ static_cast<X86::CondCode>(CCN->getAsAPIntVal().getSExtValue());
+ X86::CondCode OppositeCC = X86::GetOppositeBranchCondition(CC);
+ // Update CC for the consumer of the flag.
+ // The old CC is `ne`. Hence, when comparing the result with 0, we are
+ // checking if the second condition evaluates to true. When comparing the
+ // result with 1, we are checking uf the second condition evaluates to false.
+ SmallVector<SDValue> Ops(BrCond->op_values());
+ if (isNullConstant(N->getOperand(1)))
+ Ops[CondNo] = CCN;
+ else if (isOneConstant(N->getOperand(1)))
+ Ops[CondNo] = DAG.getTargetConstant(OppositeCC, SDLoc(BrCond), MVT::i8);
+ else
+ llvm_unreachable("expect constant 0 or 1");
+
+ SDValue NewBrCond =
+ DAG.getNode(X86ISD::BRCOND, SDLoc(BrCond), BrCond->getValueType(0), Ops);
+ // Avoid self-assign error b/c CC1 can be `e/ne`.
+ if (BrCond != NewBrCond.getNode())
+ DCI.CombineTo(BrCond, NewBrCond);
+ return X;
+}
+
+static SDValue combineAndOrForCcmpCtest(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &ST) {
+ // and/or(setcc(cc0, flag0), setcc(cc1, sub (X, Y)))
+ // ->
+ // setcc(cc1, ccmp(X, Y, ~cflags/cflags, cc0/~cc0, flag0))
+
+ // and/or(setcc(cc0, flag0), setcc(cc1, cmp (X, 0)))
+ // ->
+ // setcc(cc1, ctest(X, X, ~cflags/cflags, cc0/~cc0, flag0))
+ //
+ // where cflags is determined by cc1.
+
+ if (!ST.hasCCMP())
+ return SDValue();
+
+ SDValue SetCC0 = N->getOperand(0);
+ SDValue SetCC1 = N->getOperand(1);
+ if (SetCC0.getOpcode() != X86ISD::SETCC ||
+ SetCC1.getOpcode() != X86ISD::SETCC)
+ return SDValue();
+
+ auto GetCombineToOpc = [&](SDValue V) -> unsigned {
+ SDValue Op = V.getOperand(1);
+ unsigned Opc = Op.getOpcode();
+ if (Opc == X86ISD::SUB)
+ return X86ISD::CCMP;
+ if (Opc == X86ISD::CMP && isNullConstant(Op.getOperand(1)))
+ return X86ISD::CTEST;
+ return 0U;
+ };
+
+ unsigned NewOpc = 0;
+
+ // AND/OR is commutable. Canonicalize the operands to make SETCC with SUB/CMP
+ // appear on the right.
+ if (!(NewOpc = GetCombineToOpc(SetCC1))) {
+ std::swap(SetCC0, SetCC1);
+ if (!(NewOpc = GetCombineToOpc(SetCC1)))
+ return SDValue();
+ }
+
+ X86::CondCode CC0 =
+ static_cast<X86::CondCode>(SetCC0.getConstantOperandVal(0));
+ // CCMP/CTEST is not conditional when the source condition is COND_P/COND_NP.
+ if (CC0 == X86::COND_P || CC0 == X86::COND_NP)
+ return SDValue();
+
+ bool IsOR = N->getOpcode() == ISD::OR;
+
+ // CMP/TEST is executed and updates the EFLAGS normally only when SrcCC
+ // evaluates to true. So we need to inverse CC0 as SrcCC when the logic
+ // operator is OR. Similar for CC1.
+ SDValue SrcCC =
+ IsOR ? DAG.getTargetConstant(X86::GetOppositeBranchCondition(CC0),
+ SDLoc(SetCC0.getOperand(0)), MVT::i8)
+ : SetCC0.getOperand(0);
+ SDValue CC1N = SetCC1.getOperand(0);
+ X86::CondCode CC1 =
+ static_cast<X86::CondCode>(CC1N->getAsAPIntVal().getSExtValue());
+ X86::CondCode OppositeCC1 = X86::GetOppositeBranchCondition(CC1);
+ X86::CondCode CFlagsCC = IsOR ? CC1 : OppositeCC1;
+ SDLoc DL(N);
+ SDValue CFlags = DAG.getTargetConstant(
+ X86::getCCMPCondFlagsFromCondCode(CFlagsCC), DL, MVT::i8);
+ SDValue Sub = SetCC1.getOperand(1);
+
+ // Replace any uses of the old flag produced by SUB/CMP with the new one
+ // produced by CCMP/CTEST.
+ SDValue CCMP = (NewOpc == X86ISD::CCMP)
+ ? DAG.getNode(X86ISD::CCMP, DL, MVT::i32,
+ {Sub.getOperand(0), Sub.getOperand(1),
+ CFlags, SrcCC, SetCC0.getOperand(1)})
+ : DAG.getNode(X86ISD::CTEST, DL, MVT::i32,
+ {Sub.getOperand(0), Sub.getOperand(0),
+ CFlags, SrcCC, SetCC0.getOperand(1)});
+
+ return DAG.getNode(X86ISD::SETCC, DL, MVT::i8, {CC1N, CCMP});
+}
+
static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
@@ -49300,6 +49469,9 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
}
}
+ if (SDValue SetCC = combineAndOrForCcmpCtest(N, DAG, DCI, Subtarget))
+ return SetCC;
+
if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
return V;
@@ -50085,6 +50257,9 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
}
}
+ if (SDValue SetCC = combineAndOrForCcmpCtest(N, DAG, DCI, Subtarget))
+ return SetCC;
+
if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
return R;
@@ -54606,6 +54781,7 @@ static bool onlyZeroFlagUsed(SDValue Flags) {
}
static SDValue combineCMP(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
// Only handle test patterns.
if (!isNullConstant(N->getOperand(1)))
@@ -54620,6 +54796,10 @@ static SDValue combineCMP(SDNode *N, SelectionDAG &DAG,
EVT VT = Op.getValueType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (SDValue CMP =
+ combineX86SubCmpForFlags(N, SDValue(N, 0), DAG, DCI, Subtarget))
+ return CMP;
+
// If we have a constant logical shift that's only used in a comparison
// against zero turn it into an equivalent AND. This allows turning it into
// a TEST instruction later.
@@ -54748,7 +54928,8 @@ static SDValue combineCMP(SDNode *N, SelectionDAG &DAG,
}
static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &ST) {
assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
"Expected X86ISD::ADD or X86ISD::SUB");
@@ -54759,6 +54940,10 @@ static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
bool IsSub = X86ISD::SUB == N->getOpcode();
unsigned GenericOpc = IsSub ? ISD::SUB : ISD::ADD;
+ if (IsSub && isOneConstant(N->getOperand(1)) && !N->hasAnyUseOfValue(0))
+ if (SDValue CMP = combineX86SubCmpForFlags(N, SDValue(N, 1), DAG, DCI, ST))
+ return CMP;
+
// If we don't use the flag result, simplify back to a generic ADD/SUB.
if (!N->hasAnyUseOfValue(1)) {
SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
@@ -57058,11 +57243,11 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
- case X86ISD::CMP: return combineCMP(N, DAG, Subtarget);
+ case X86ISD::CMP: return combineCMP(N, DAG, DCI, Subtarget);
case ISD::ADD: return combineAdd(N, DAG, DCI, Subtarget);
case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget);
case X86ISD::ADD:
- case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
+ case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI, Subtarget);
case X86ISD::SBB: return combineSBB(N, DAG);
case X86ISD::ADC: return combineADC(N, DAG, DCI);
case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index ade54f73bff0..b0efa074b197 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -699,18 +699,6 @@ namespace llvm {
// Test if in transactional execution.
XTEST,
- // ERI instructions.
- RSQRT28,
- RSQRT28_SAE,
- RSQRT28S,
- RSQRT28S_SAE,
- RCP28,
- RCP28_SAE,
- RCP28S,
- RCP28S_SAE,
- EXP2,
- EXP2_SAE,
-
// Conversions between float and half-float.
CVTPS2PH,
CVTPS2PH_SAE,
@@ -747,6 +735,10 @@ namespace llvm {
// Perform an FP80 add after changing precision control in FPCW.
FP80_ADD,
+ // Conditional compare instructions
+ CCMP,
+ CTEST,
+
/// X86 strict FP compare instructions.
STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
STRICT_FCMPS,
@@ -1806,6 +1798,9 @@ namespace llvm {
MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr &MI,
MachineBasicBlock *MBB) const;
+ MachineBasicBlock *emitPatchableEventCall(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
+
/// Emit flags for the given setcc condition and operands. Also returns the
/// corresponding X86 condition code constant in X86CC.
SDValue emitFlagsForSetcc(SDValue Op0, SDValue Op1, ISD::CondCode CC,
diff --git a/llvm/lib/Target/X86/X86Instr3DNow.td b/llvm/lib/Target/X86/X86Instr3DNow.td
index 3be03ab0f433..03612de0fad9 100644
--- a/llvm/lib/Target/X86/X86Instr3DNow.td
+++ b/llvm/lib/Target/X86/X86Instr3DNow.td
@@ -90,8 +90,7 @@ def PREFETCHW : I<0x0D, MRM1m, (outs), (ins i8mem:$addr), "prefetchw\t$addr",
TB, Requires<[HasPrefetchW]>;
def PREFETCHWT1 : I<0x0D, MRM2m, (outs), (ins i8mem:$addr), "prefetchwt1\t$addr",
- [(prefetch addr:$addr, (i32 1), (i32 PrefetchWT1Level), (i32 1))]>,
- TB, Requires<[HasPREFETCHWT1]>;
+ []>, TB;
}
// "3DNowA" instructions
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 0723328d40e3..da690aea43f5 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -9265,6 +9265,37 @@ multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
}
}
+multiclass avx512_fp28_s_ass<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
+ X86FoldableSchedWrite sched> {
+ let ExeDomain = _.ExeDomain, Uses = [MXCSR], hasSideEffects = 0 in {
+ defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (null_frag)>, Sched<[sched]>, SIMD_EXC;
+ defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
+ "{sae}, $src2, $src1", "$src1, $src2, {sae}",
+ (null_frag)>, EVEX_B, Sched<[sched]>;
+ let mayLoad = 1 in
+ defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (null_frag)>,
+ Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
+ }
+}
+
+multiclass avx512_eri_s_ass<bits<8> opc, string OpcodeStr,
+ X86FoldableSchedWrite sched> {
+ defm SSZ : avx512_fp28_s_ass<opc, OpcodeStr#"ss", f32x_info, sched>,
+ EVEX_CD8<32, CD8VT1>, VEX_LIG, T8, PD, EVEX, VVVV;
+ defm SDZ : avx512_fp28_s_ass<opc, OpcodeStr#"sd", f64x_info, sched>,
+ EVEX_CD8<64, CD8VT1>, VEX_LIG, REX_W, T8, PD, EVEX, VVVV;
+}
+
+defm VRCP28 : avx512_eri_s_ass<0xCB, "vrcp28", SchedWriteFRcp.Scl>;
+defm VRSQRT28 : avx512_eri_s_ass<0xCD, "vrsqrt28", SchedWriteFRsqrt.Scl>;
+
multiclass avx512_eri_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
SDNode OpNodeSAE, X86FoldableSchedWrite sched> {
defm SSZ : avx512_fp28_s<opc, OpcodeStr#"ss", f32x_info, OpNode, OpNodeSAE,
@@ -9280,13 +9311,6 @@ multiclass avx512_vgetexpsh<bits<8> opc, string OpcodeStr, SDNode OpNode,
EVEX_CD8<16, CD8VT1>, T_MAP6, PD, EVEX, VVVV;
}
-let Predicates = [HasERI] in {
- defm VRCP28 : avx512_eri_s<0xCB, "vrcp28", X86rcp28s, X86rcp28SAEs,
- SchedWriteFRcp.Scl>;
- defm VRSQRT28 : avx512_eri_s<0xCD, "vrsqrt28", X86rsqrt28s, X86rsqrt28SAEs,
- SchedWriteFRsqrt.Scl>;
-}
-
defm VGETEXP : avx512_eri_s<0x43, "vgetexp", X86fgetexps, X86fgetexpSAEs,
SchedWriteFRnd.Scl>,
avx512_vgetexpsh<0x43, "vgetexp", X86fgetexps, X86fgetexpSAEs,
@@ -9325,6 +9349,49 @@ multiclass avx512_fp28_p_sae<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
EVEX_B, Sched<[sched]>;
}
+multiclass avx512_fp28_p_ass<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
+ X86FoldableSchedWrite sched> {
+ let ExeDomain = _.ExeDomain, Uses = [MXCSR], mayRaiseFPException = 1,
+ hasSideEffects = 0 in {
+ defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src), OpcodeStr, "$src", "$src",
+ (null_frag)>, Sched<[sched]>;
+ let mayLoad = 1 in
+ defm m : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
+ (null_frag)>,
+ Sched<[sched.Folded, sched.ReadAfterFold]>;
+ let mayLoad = 1 in
+ defm mb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.ScalarMemOp:$src), OpcodeStr,
+ "${src}"#_.BroadcastStr, "${src}"#_.BroadcastStr,
+ (null_frag)>,
+ EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>;
+ }
+}
+multiclass avx512_fp28_p_sae_ass<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
+ X86FoldableSchedWrite sched> {
+ let ExeDomain = _.ExeDomain, Uses = [MXCSR], hasSideEffects = 0 in
+ defm rb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src), OpcodeStr,
+ "{sae}, $src", "$src, {sae}",
+ (null_frag)>, Sched<[sched]>, EVEX_B;
+}
+
+multiclass avx512_eri_ass<bits<8> opc, string OpcodeStr,
+ X86SchedWriteWidths sched> {
+ defm PSZ : avx512_fp28_p_ass<opc, OpcodeStr#"ps", v16f32_info, sched.ZMM>,
+ avx512_fp28_p_sae_ass<opc, OpcodeStr#"ps", v16f32_info, sched.ZMM>,
+ T8, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ defm PDZ : avx512_fp28_p_ass<opc, OpcodeStr#"pd", v8f64_info, sched.ZMM>,
+ avx512_fp28_p_sae_ass<opc, OpcodeStr#"pd", v8f64_info, sched.ZMM>,
+ T8, PD, EVEX_V512, REX_W, EVEX_CD8<64, CD8VF>;
+}
+
+defm VRSQRT28 : avx512_eri_ass<0xCC, "vrsqrt28", SchedWriteFRsqrt>, EVEX;
+defm VRCP28 : avx512_eri_ass<0xCA, "vrcp28", SchedWriteFRcp>, EVEX;
+defm VEXP2 : avx512_eri_ass<0xC8, "vexp2", SchedWriteFAdd>, EVEX;
+
multiclass avx512_eri<bits<8> opc, string OpcodeStr, SDNode OpNode,
SDNode OpNodeSAE, X86SchedWriteWidths sched> {
defm PSZ : avx512_fp28_p<opc, OpcodeStr#"ps", v16f32_info, OpNode, sched.ZMM>,
@@ -9367,14 +9434,6 @@ multiclass avx512_vgetexp_fp16<bits<8> opc, string OpcodeStr, SDNode OpNode,
EVEX_V256, T_MAP6, PD, EVEX_CD8<16, CD8VF>;
}
}
-let Predicates = [HasERI] in {
- defm VRSQRT28 : avx512_eri<0xCC, "vrsqrt28", X86rsqrt28, X86rsqrt28SAE,
- SchedWriteFRsqrt>, EVEX;
- defm VRCP28 : avx512_eri<0xCA, "vrcp28", X86rcp28, X86rcp28SAE,
- SchedWriteFRcp>, EVEX;
- defm VEXP2 : avx512_eri<0xC8, "vexp2", X86exp2, X86exp2SAE,
- SchedWriteFAdd>, EVEX;
-}
defm VGETEXP : avx512_eri<0x42, "vgetexp", X86fgetexp, X86fgetexpSAE,
SchedWriteFRnd>,
avx512_vgetexp_fp16<0x42, "vgetexp", X86fgetexp, X86fgetexpSAE,
@@ -10308,7 +10367,7 @@ defm VPSCATTER : avx512_scatter_q_pd<0xA0, 0xA1, avx512vl_i64_info, "vpscatter",
// prefetch
multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
RegisterClass KRC, X86MemOperand memop> {
- let Predicates = [HasPFI], mayLoad = 1, mayStore = 1 in
+ let mayLoad = 1, mayStore = 1 in
def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
!strconcat(OpcodeStr, "\t{$src {${mask}}|{${mask}}, $src}"), []>,
EVEX, EVEX_K, Sched<[WriteLoad]>;
diff --git a/llvm/lib/Target/X86/X86InstrConditionalCompare.td b/llvm/lib/Target/X86/X86InstrConditionalCompare.td
index e5c1143eba87..3d296773103b 100644
--- a/llvm/lib/Target/X86/X86InstrConditionalCompare.td
+++ b/llvm/lib/Target/X86/X86InstrConditionalCompare.td
@@ -78,6 +78,34 @@ let mayLoad = 1 in {
}
}
+def : Pat<(X86ccmp GR8:$src1, GR8:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP8rr GR8:$src1, GR8:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR16:$src1, GR16:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP16rr GR16:$src1, GR16:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR32:$src1, GR32:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP32rr GR32:$src1, GR32:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR64:$src1, GR64:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP64rr GR64:$src1, GR64:$src2, timm:$dcf, timm:$cond)>;
+
+def : Pat<(X86ccmp GR8:$src1, (i8 imm:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP8ri GR8:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR16:$src1, (i16 imm:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP16ri GR16:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR32:$src1, (i32 imm:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP32ri GR32:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR64:$src1, (i64 imm:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP64ri32 GR64:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+
+def : Pat<(X86ccmp GR8:$src1, (loadi8 addr:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP8rm GR8:$src1, addr:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR16:$src1, (loadi16 addr:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP16rm GR16:$src1, addr:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR32:$src1, (loadi32 addr:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP32rm GR32:$src1, addr:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR64:$src1, (loadi64 addr:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP64rm GR64:$src1, addr:$src2, timm:$dcf, timm:$cond)>;
+
+
//===----------------------------------------------------------------------===//
// CTEST Instructions
//
@@ -108,3 +136,21 @@ let mayLoad = 1 in {
def CTEST64mr: Ctest<0x85, MRMDestMem, Xi64, i64mem, GR64>;
}
}
+
+def : Pat<(X86ctest GR8:$src1, GR8:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CTEST8rr GR8:$src1, GR8:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctest GR16:$src1, GR16:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CTEST16rr GR16:$src1, GR16:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctest GR32:$src1, GR32:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CTEST32rr GR32:$src1, GR32:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctest GR64:$src1, GR64:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CTEST64rr GR64:$src1, GR64:$src2, timm:$dcf, timm:$cond)>;
+
+def : Pat<(X86ctestpat GR8:$src1, imm:$src2, timm:$dcf, timm:$cond),
+ (CTEST8ri GR8:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctestpat GR16:$src1, imm:$src2, timm:$dcf, timm:$cond),
+ (CTEST16ri GR16:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctestpat GR32:$src1, imm:$src2, timm:$dcf, timm:$cond),
+ (CTEST32ri GR32:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctestpat GR64:$src1, imm:$src2, timm:$dcf, timm:$cond),
+ (CTEST64ri32 GR64:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
diff --git a/llvm/lib/Target/X86/X86InstrFragments.td b/llvm/lib/Target/X86/X86InstrFragments.td
index f14c7200af96..162e322712a6 100644
--- a/llvm/lib/Target/X86/X86InstrFragments.td
+++ b/llvm/lib/Target/X86/X86InstrFragments.td
@@ -12,6 +12,9 @@ def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
def SDTX86FCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisFP<1>,
SDTCisSameAs<1, 2>]>;
+def SDTX86Ccmp : SDTypeProfile<1, 5,
+ [SDTCisVT<3, i8>, SDTCisVT<4, i8>, SDTCisVT<5, i32>]>;
+
def SDTX86Cmov : SDTypeProfile<1, 4,
[SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
@@ -138,6 +141,9 @@ def X86strict_fcmp : SDNode<"X86ISD::STRICT_FCMP", SDTX86FCmp, [SDNPHasChain]>;
def X86strict_fcmps : SDNode<"X86ISD::STRICT_FCMPS", SDTX86FCmp, [SDNPHasChain]>;
def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
+def X86ccmp : SDNode<"X86ISD::CCMP", SDTX86Ccmp>;
+def X86ctest : SDNode<"X86ISD::CTEST", SDTX86Ccmp>;
+
def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
[SDNPHasChain]>;
@@ -577,6 +583,14 @@ def add_su : binop_oneuse<add>;
def and_su : binop_oneuse<and>;
def srl_su : binop_oneuse<srl>;
+class binop_twouses<SDPatternOperator operator>
+ : PatFrag<(ops node:$A, node:$B),
+ (operator node:$A, node:$B), [{
+ return N->hasNUsesOfValue(2, 0);
+}]>;
+
+def and_du : binop_twouses<and>;
+
// unary op with only one user
class unop_oneuse<SDPatternOperator operator>
: PatFrag<(ops node:$A),
@@ -601,20 +615,17 @@ def X86sub_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
def X86testpat : PatFrag<(ops node:$lhs, node:$rhs),
(X86cmp (and_su node:$lhs, node:$rhs), 0)>;
-
+def X86ctestpat : PatFrag<(ops node:$lhs, node:$rhs, node:$dcf, node:$cond),
+ (X86ctest (and_du node:$lhs, node:$rhs),
+ (and_du node:$lhs, node:$rhs), node:$dcf,
+ node:$cond, EFLAGS)>;
def X86any_fcmp : PatFrags<(ops node:$lhs, node:$rhs),
[(X86strict_fcmp node:$lhs, node:$rhs),
(X86fcmp node:$lhs, node:$rhs)]>;
-// PREFETCHWT1 is supported we want to use it for everything but T0.
def PrefetchWLevel : PatFrag<(ops), (i32 timm), [{
- return N->getSExtValue() == 3 || !Subtarget->hasPREFETCHWT1();
-}]>;
-
-// Use PREFETCHWT1 for NTA, T2, T1.
-def PrefetchWT1Level : TImmLeaf<i32, [{
- return Imm < 3;
+ return N->getSExtValue() <= 3;
}]>;
def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs),
diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index f86e15b3ed5d..dff33a469b97 100644
--- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -600,19 +600,8 @@ def X86Vpdpbusds : SDNode<"X86ISD::VPDPBUSDS", SDTVnni>;
def X86Vpdpwssd : SDNode<"X86ISD::VPDPWSSD", SDTVnni>;
def X86Vpdpwssds : SDNode<"X86ISD::VPDPWSSDS", SDTVnni>;
-def X86rsqrt28 : SDNode<"X86ISD::RSQRT28", SDTFPUnaryOp>;
-def X86rsqrt28SAE: SDNode<"X86ISD::RSQRT28_SAE", SDTFPUnaryOp>;
-def X86rcp28 : SDNode<"X86ISD::RCP28", SDTFPUnaryOp>;
-def X86rcp28SAE : SDNode<"X86ISD::RCP28_SAE", SDTFPUnaryOp>;
-def X86exp2 : SDNode<"X86ISD::EXP2", SDTFPUnaryOp>;
-def X86exp2SAE : SDNode<"X86ISD::EXP2_SAE", SDTFPUnaryOp>;
-
def X86rsqrt14s : SDNode<"X86ISD::RSQRT14S", SDTFPBinOp>;
def X86rcp14s : SDNode<"X86ISD::RCP14S", SDTFPBinOp>;
-def X86rsqrt28s : SDNode<"X86ISD::RSQRT28S", SDTFPBinOp>;
-def X86rsqrt28SAEs : SDNode<"X86ISD::RSQRT28S_SAE", SDTFPBinOp>;
-def X86rcp28s : SDNode<"X86ISD::RCP28S", SDTFPBinOp>;
-def X86rcp28SAEs : SDNode<"X86ISD::RCP28S_SAE", SDTFPBinOp>;
def X86Ranges : SDNode<"X86ISD::VRANGES", SDTFPBinOpImm>;
def X86RndScales : SDNode<"X86ISD::VRNDSCALES", SDTFPBinOpImm>;
def X86Reduces : SDNode<"X86ISD::VREDUCES", SDTFPBinOpImm>;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 26c68ce3c1a2..7d05f950b6fe 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -3164,6 +3164,63 @@ X86::CondCode X86::getCondFromCCMP(const MachineInstr &MI) {
: X86::COND_INVALID;
}
+int X86::getCCMPCondFlagsFromCondCode(X86::CondCode CC) {
+ // CCMP/CTEST has two conditional operands:
+ // - SCC: source conditonal code (same as CMOV)
+ // - DCF: destination conditional flags, which has 4 valid bits
+ //
+ // +----+----+----+----+
+ // | OF | SF | ZF | CF |
+ // +----+----+----+----+
+ //
+ // If SCC(source conditional code) evaluates to false, CCMP/CTEST will updates
+ // the conditional flags by as follows:
+ //
+ // OF = DCF.OF
+ // SF = DCF.SF
+ // ZF = DCF.ZF
+ // CF = DCF.CF
+ // PF = DCF.CF
+ // AF = 0 (Auxiliary Carry Flag)
+ //
+ // Otherwise, the CMP or TEST is executed and it updates the
+ // CSPAZO flags normally.
+ //
+ // NOTE:
+ // If SCC = P, then SCC evaluates to true regardless of the CSPAZO value.
+ // If SCC = NP, then SCC evaluates to false regardless of the CSPAZO value.
+
+ enum { CF = 1, ZF = 2, SF = 4, OF = 8, PF = CF };
+
+ switch (CC) {
+ default:
+ llvm_unreachable("Illegal condition code!");
+ case X86::COND_NO:
+ case X86::COND_NE:
+ case X86::COND_GE:
+ case X86::COND_G:
+ case X86::COND_AE:
+ case X86::COND_A:
+ case X86::COND_NS:
+ case X86::COND_NP:
+ return 0;
+ case X86::COND_O:
+ return OF;
+ case X86::COND_B:
+ case X86::COND_BE:
+ return CF;
+ break;
+ case X86::COND_E:
+ case X86::COND_LE:
+ return ZF;
+ case X86::COND_S:
+ case X86::COND_L:
+ return SF;
+ case X86::COND_P:
+ return PF;
+ }
+}
+
/// Return the inverse of the specified condition,
/// e.g. turning COND_E to COND_NE.
X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 55deca73b1f3..295fac60c6e4 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -74,6 +74,9 @@ CondCode getCondFromCFCMov(const MachineInstr &MI);
// Turn CCMP instruction into condition code.
CondCode getCondFromCCMP(const MachineInstr &MI);
+// Turn condition code into condition flags for CCMP/CTEST.
+int getCCMPCondFlagsFromCondCode(CondCode CC);
+
/// GetOppositeBranchCondition - Return the inverse of the specified cond,
/// e.g. turning COND_E to COND_NE.
CondCode GetOppositeBranchCondition(CondCode CC);
diff --git a/llvm/lib/Target/X86/X86InstrPredicates.td b/llvm/lib/Target/X86/X86InstrPredicates.td
index 9f2709d6b1a2..419ff9e6f5c0 100644
--- a/llvm/lib/Target/X86/X86InstrPredicates.td
+++ b/llvm/lib/Target/X86/X86InstrPredicates.td
@@ -79,8 +79,6 @@ def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
def HasCDI : Predicate<"Subtarget->hasCDI()">;
def HasVPOPCNTDQ : Predicate<"Subtarget->hasVPOPCNTDQ()">;
-def HasPFI : Predicate<"Subtarget->hasPFI()">;
-def HasERI : Predicate<"Subtarget->hasERI()">;
def HasDQI : Predicate<"Subtarget->hasDQI()">;
def NoDQI : Predicate<"!Subtarget->hasDQI()">;
def HasBWI : Predicate<"Subtarget->hasBWI()">;
@@ -147,7 +145,6 @@ def NoSSEPrefetch : Predicate<"!Subtarget->hasSSEPrefetch()">;
def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">;
def HasPREFETCHI : Predicate<"Subtarget->hasPREFETCHI()">;
def HasPrefetchW : Predicate<"Subtarget->hasPrefetchW()">;
-def HasPREFETCHWT1 : Predicate<"Subtarget->hasPREFETCHWT1()">;
def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">;
def HasLAHFSAHF64 : Predicate<"Subtarget->hasLAHFSAHF64()">;
def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">;
diff --git a/llvm/lib/Target/X86/X86IntrinsicsInfo.h b/llvm/lib/Target/X86/X86IntrinsicsInfo.h
index 3bb2f07b5f1a..e3961e0094d3 100644
--- a/llvm/lib/Target/X86/X86IntrinsicsInfo.h
+++ b/llvm/lib/Target/X86/X86IntrinsicsInfo.h
@@ -108,15 +108,6 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(avx512_gather3siv8_sf, GATHER, 0, 0),
X86_INTRINSIC_DATA(avx512_gather3siv8_si, GATHER, 0, 0),
- X86_INTRINSIC_DATA(avx512_gatherpf_dpd_512, PREFETCH,
- X86::VGATHERPF0DPDm, X86::VGATHERPF1DPDm),
- X86_INTRINSIC_DATA(avx512_gatherpf_dps_512, PREFETCH,
- X86::VGATHERPF0DPSm, X86::VGATHERPF1DPSm),
- X86_INTRINSIC_DATA(avx512_gatherpf_qpd_512, PREFETCH,
- X86::VGATHERPF0QPDm, X86::VGATHERPF1QPDm),
- X86_INTRINSIC_DATA(avx512_gatherpf_qps_512, PREFETCH,
- X86::VGATHERPF0QPSm, X86::VGATHERPF1QPSm),
-
X86_INTRINSIC_DATA(avx512_mask_gather_dpd_512, GATHER, 0, 0),
X86_INTRINSIC_DATA(avx512_mask_gather_dpi_512, GATHER, 0, 0),
X86_INTRINSIC_DATA(avx512_mask_gather_dpq_512, GATHER, 0, 0),
@@ -292,14 +283,6 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(avx512_scatterdiv4_si, SCATTER, 0, 0),
X86_INTRINSIC_DATA(avx512_scatterdiv8_sf, SCATTER, 0, 0),
X86_INTRINSIC_DATA(avx512_scatterdiv8_si, SCATTER, 0, 0),
- X86_INTRINSIC_DATA(avx512_scatterpf_dpd_512, PREFETCH, X86::VSCATTERPF0DPDm,
- X86::VSCATTERPF1DPDm),
- X86_INTRINSIC_DATA(avx512_scatterpf_dps_512, PREFETCH, X86::VSCATTERPF0DPSm,
- X86::VSCATTERPF1DPSm),
- X86_INTRINSIC_DATA(avx512_scatterpf_qpd_512, PREFETCH, X86::VSCATTERPF0QPDm,
- X86::VSCATTERPF1QPDm),
- X86_INTRINSIC_DATA(avx512_scatterpf_qps_512, PREFETCH, X86::VSCATTERPF0QPSm,
- X86::VSCATTERPF1QPSm),
X86_INTRINSIC_DATA(avx512_scattersiv2_df, SCATTER, 0, 0),
X86_INTRINSIC_DATA(avx512_scattersiv2_di, SCATTER, 0, 0),
X86_INTRINSIC_DATA(avx512_scattersiv4_df, SCATTER, 0, 0),
@@ -454,8 +437,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_dbpsadbw_512, INTR_TYPE_3OP_IMM8, X86ISD::DBPSADBW, 0),
X86_INTRINSIC_DATA(avx512_div_pd_512, INTR_TYPE_2OP, ISD::FDIV, X86ISD::FDIV_RND),
X86_INTRINSIC_DATA(avx512_div_ps_512, INTR_TYPE_2OP, ISD::FDIV, X86ISD::FDIV_RND),
- X86_INTRINSIC_DATA(avx512_exp2_pd, INTR_TYPE_1OP_MASK_SAE, X86ISD::EXP2, X86ISD::EXP2_SAE),
- X86_INTRINSIC_DATA(avx512_exp2_ps, INTR_TYPE_1OP_MASK_SAE, X86ISD::EXP2, X86ISD::EXP2_SAE),
X86_INTRINSIC_DATA(avx512_fpclass_pd_128, INTR_TYPE_2OP, X86ISD::VFPCLASS, 0),
X86_INTRINSIC_DATA(avx512_fpclass_pd_256, INTR_TYPE_2OP, X86ISD::VFPCLASS, 0),
X86_INTRINSIC_DATA(avx512_fpclass_pd_512, INTR_TYPE_2OP, X86ISD::VFPCLASS, 0),
@@ -908,10 +889,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_rcp14_ps_512, INTR_TYPE_1OP_MASK, X86ISD::RCP14, 0),
X86_INTRINSIC_DATA(avx512_rcp14_sd, INTR_TYPE_SCALAR_MASK, X86ISD::RCP14S, 0),
X86_INTRINSIC_DATA(avx512_rcp14_ss, INTR_TYPE_SCALAR_MASK, X86ISD::RCP14S, 0),
- X86_INTRINSIC_DATA(avx512_rcp28_pd, INTR_TYPE_1OP_MASK_SAE, X86ISD::RCP28, X86ISD::RCP28_SAE),
- X86_INTRINSIC_DATA(avx512_rcp28_ps, INTR_TYPE_1OP_MASK_SAE, X86ISD::RCP28, X86ISD::RCP28_SAE),
- X86_INTRINSIC_DATA(avx512_rcp28_sd, INTR_TYPE_SCALAR_MASK_SAE, X86ISD::RCP28S, X86ISD::RCP28S_SAE),
- X86_INTRINSIC_DATA(avx512_rcp28_ss, INTR_TYPE_SCALAR_MASK_SAE, X86ISD::RCP28S, X86ISD::RCP28S_SAE),
X86_INTRINSIC_DATA(avx512_rsqrt14_pd_128, INTR_TYPE_1OP_MASK, X86ISD::RSQRT14, 0),
X86_INTRINSIC_DATA(avx512_rsqrt14_pd_256, INTR_TYPE_1OP_MASK, X86ISD::RSQRT14, 0),
X86_INTRINSIC_DATA(avx512_rsqrt14_pd_512, INTR_TYPE_1OP_MASK, X86ISD::RSQRT14, 0),
@@ -920,10 +897,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_rsqrt14_ps_512, INTR_TYPE_1OP_MASK, X86ISD::RSQRT14, 0),
X86_INTRINSIC_DATA(avx512_rsqrt14_sd, INTR_TYPE_SCALAR_MASK, X86ISD::RSQRT14S, 0),
X86_INTRINSIC_DATA(avx512_rsqrt14_ss, INTR_TYPE_SCALAR_MASK, X86ISD::RSQRT14S, 0),
- X86_INTRINSIC_DATA(avx512_rsqrt28_pd, INTR_TYPE_1OP_MASK_SAE,X86ISD::RSQRT28, X86ISD::RSQRT28_SAE),
- X86_INTRINSIC_DATA(avx512_rsqrt28_ps, INTR_TYPE_1OP_MASK_SAE,X86ISD::RSQRT28, X86ISD::RSQRT28_SAE),
- X86_INTRINSIC_DATA(avx512_rsqrt28_sd, INTR_TYPE_SCALAR_MASK_SAE,X86ISD::RSQRT28S, X86ISD::RSQRT28S_SAE),
- X86_INTRINSIC_DATA(avx512_rsqrt28_ss, INTR_TYPE_SCALAR_MASK_SAE,X86ISD::RSQRT28S, X86ISD::RSQRT28S_SAE),
X86_INTRINSIC_DATA(avx512_sitofp_round, INTR_TYPE_1OP, ISD::SINT_TO_FP, X86ISD::SINT_TO_FP_RND),
X86_INTRINSIC_DATA(avx512_sqrt_pd_512, INTR_TYPE_1OP, ISD::FSQRT, X86ISD::FSQRT_RND),
X86_INTRINSIC_DATA(avx512_sqrt_ps_512, INTR_TYPE_1OP, ISD::FSQRT, X86ISD::FSQRT_RND),
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index 1d699b42dc67..8f6fba8ac22c 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -22,6 +22,7 @@
#include "X86RegisterInfo.h"
#include "X86ShuffleDecodeConstantPool.h"
#include "X86Subtarget.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -1362,6 +1363,35 @@ void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI,
void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI,
X86MCInstLower &MCIL) {
+ MCInst TC;
+ TC.setOpcode(convertTailJumpOpcode(MI.getOperand(0).getImm()));
+ // Drop the tail jump opcode.
+ auto TCOperands = drop_begin(MI.operands());
+ bool IsConditional = TC.getOpcode() == X86::JCC_1;
+ MCSymbol *FallthroughLabel;
+ if (IsConditional) {
+ // Rewrite:
+ // je target
+ //
+ // To:
+ // jne .fallthrough
+ // .p2align 1, ...
+ // .Lxray_sled_N:
+ // SLED_CODE
+ // jmp target
+ // .fallthrough:
+ FallthroughLabel = OutContext.createTempSymbol();
+ EmitToStreamer(
+ *OutStreamer,
+ MCInstBuilder(X86::JCC_1)
+ .addExpr(MCSymbolRefExpr::create(FallthroughLabel, OutContext))
+ .addImm(X86::GetOppositeBranchCondition(
+ static_cast<X86::CondCode>(MI.getOperand(2).getImm()))));
+ TC.setOpcode(X86::JMP_1);
+ // Drop the condition code.
+ TCOperands = drop_end(TCOperands);
+ }
+
NoAutoPaddingScope NoPadScope(*OutStreamer);
// Like PATCHABLE_RET, we have the actual instruction in the operands to this
@@ -1383,18 +1413,16 @@ void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI,
OutStreamer->emitLabel(Target);
recordSled(CurSled, MI, SledKind::TAIL_CALL, 2);
- unsigned OpCode = MI.getOperand(0).getImm();
- OpCode = convertTailJumpOpcode(OpCode);
- MCInst TC;
- TC.setOpcode(OpCode);
-
// Before emitting the instruction, add a comment to indicate that this is
// indeed a tail call.
OutStreamer->AddComment("TAILCALL");
- for (auto &MO : drop_begin(MI.operands()))
+ for (auto &MO : TCOperands)
if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO))
TC.addOperand(*MaybeOperand);
OutStreamer->emitInstruction(TC, getSubtargetInfo());
+
+ if (IsConditional)
+ OutStreamer->emitLabel(FallthroughLabel);
}
// Returns instruction preceding MBBI in MachineFunction.
diff --git a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
index 0bff1884933d..e5f07f230fe6 100644
--- a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -48,26 +48,25 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Val,
SDValue Size, Align Alignment, bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo) const {
- ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
- const X86Subtarget &Subtarget =
- DAG.getMachineFunction().getSubtarget<X86Subtarget>();
+ // If to a segment-relative address space, use the default lowering.
+ if (DstPtrInfo.getAddrSpace() >= 256)
+ return SDValue();
-#ifndef NDEBUG
// If the base register might conflict with our physical registers, bail out.
const MCPhysReg ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
X86::ECX, X86::EAX, X86::EDI};
- assert(!isBaseRegConflictPossible(DAG, ClobberSet));
-#endif
-
- // If to a segment-relative address space, use the default lowering.
- if (DstPtrInfo.getAddrSpace() >= 256)
+ if (isBaseRegConflictPossible(DAG, ClobberSet))
return SDValue();
+ ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
+ const X86Subtarget &Subtarget =
+ DAG.getMachineFunction().getSubtarget<X86Subtarget>();
+
// If not DWORD aligned or size is more than the threshold, call the library.
// The libc version is likely to be faster for these cases. It can use the
// address value and run time information about the CPU.
if (Alignment < Align(4) || !ConstantSize ||
- ConstantSize->getZExtValue() > Subtarget.getMaxInlineSizeThreshold())
+ ConstantSize->getZExtValue() > Subtarget.getMaxInlineSizeThreshold())
return SDValue();
uint64_t SizeVal = ConstantSize->getZExtValue();
@@ -128,26 +127,29 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
InGlue = Chain.getValue(1);
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue Ops[] = { Chain, DAG.getValueType(AVT), InGlue };
- Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops);
-
- if (BytesLeft) {
- // Handle the last 1 - 7 bytes.
- unsigned Offset = SizeVal - BytesLeft;
- EVT AddrVT = Dst.getValueType();
- EVT SizeVT = Size.getValueType();
-
- Chain =
- DAG.getMemset(Chain, dl,
- DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
- DAG.getConstant(Offset, dl, AddrVT)),
- Val, DAG.getConstant(BytesLeft, dl, SizeVT), Alignment,
- isVolatile, AlwaysInline,
- /* isTailCall */ false, DstPtrInfo.getWithOffset(Offset));
- }
+ SDValue Ops[] = {Chain, DAG.getValueType(AVT), InGlue};
+ SDValue RepStos = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops);
- // TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
- return Chain;
+ /// RepStos can process the whole length.
+ if (BytesLeft == 0)
+ return RepStos;
+
+ // Handle the last 1 - 7 bytes.
+ SmallVector<SDValue, 4> Results;
+ Results.push_back(RepStos);
+ unsigned Offset = SizeVal - BytesLeft;
+ EVT AddrVT = Dst.getValueType();
+ EVT SizeVT = Size.getValueType();
+
+ Results.push_back(
+ DAG.getMemset(Chain, dl,
+ DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
+ DAG.getConstant(Offset, dl, AddrVT)),
+ Val, DAG.getConstant(BytesLeft, dl, SizeVT), Alignment,
+ isVolatile, AlwaysInline,
+ /* isTailCall */ false, DstPtrInfo.getWithOffset(Offset)));
+
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Results);
}
/// Emit a single REP MOVS{B,W,D,Q} instruction.
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index 4d55a084b730..4532db134fcb 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -213,17 +213,15 @@ public:
bool hasAnyFMA() const { return hasFMA() || hasFMA4(); }
bool hasPrefetchW() const {
// The PREFETCHW instruction was added with 3DNow but later CPUs gave it
- // its own CPUID bit as part of deprecating 3DNow. Intel eventually added
- // it and KNL has another that prefetches to L2 cache. We assume the
+ // its own CPUID bit as part of deprecating 3DNow. We assume the
// L1 version exists if the L2 version does.
- return hasThreeDNow() || hasPRFCHW() || hasPREFETCHWT1();
+ return hasThreeDNow() || hasPRFCHW();
}
bool hasSSEPrefetch() const {
// We implicitly enable these when we have a write prefix supporting cache
// level OR if we have prfchw, but don't already have a read prefetch from
// 3dnow.
- return hasSSE1() || (hasPRFCHW() && !hasThreeDNow()) || hasPREFETCHWT1() ||
- hasPREFETCHI();
+ return hasSSE1() || (hasPRFCHW() && !hasThreeDNow()) || hasPREFETCHI();
}
bool canUseLAHFSAHF() const { return hasLAHFSAHF64() || !is64Bit(); }
// These are generic getters that OR together all of the thunk types
diff --git a/llvm/lib/TargetParser/Host.cpp b/llvm/lib/TargetParser/Host.cpp
index c5156c6cb802..68155acd9e5b 100644
--- a/llvm/lib/TargetParser/Host.cpp
+++ b/llvm/lib/TargetParser/Host.cpp
@@ -1005,8 +1005,6 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
CPU = "cascadelake";
} else if (testFeature(X86::FEATURE_AVX512VL)) {
CPU = "skylake-avx512";
- } else if (testFeature(X86::FEATURE_AVX512ER)) {
- CPU = "knl";
} else if (testFeature(X86::FEATURE_CLFLUSHOPT)) {
if (testFeature(X86::FEATURE_SHA))
CPU = "goldmont";
@@ -1300,10 +1298,6 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
setFeature(X86::FEATURE_AVX512IFMA);
if (HasLeaf7 && ((EBX >> 23) & 1))
setFeature(X86::FEATURE_CLFLUSHOPT);
- if (HasLeaf7 && ((EBX >> 26) & 1) && HasAVX512Save)
- setFeature(X86::FEATURE_AVX512PF);
- if (HasLeaf7 && ((EBX >> 27) & 1) && HasAVX512Save)
- setFeature(X86::FEATURE_AVX512ER);
if (HasLeaf7 && ((EBX >> 28) & 1) && HasAVX512Save)
setFeature(X86::FEATURE_AVX512CD);
if (HasLeaf7 && ((EBX >> 29) & 1))
@@ -1810,14 +1804,11 @@ bool sys::getHostCPUFeatures(StringMap<bool> &Features) {
Features["avx512ifma"] = HasLeaf7 && ((EBX >> 21) & 1) && HasAVX512Save;
Features["clflushopt"] = HasLeaf7 && ((EBX >> 23) & 1);
Features["clwb"] = HasLeaf7 && ((EBX >> 24) & 1);
- Features["avx512pf"] = HasLeaf7 && ((EBX >> 26) & 1) && HasAVX512Save;
- Features["avx512er"] = HasLeaf7 && ((EBX >> 27) & 1) && HasAVX512Save;
Features["avx512cd"] = HasLeaf7 && ((EBX >> 28) & 1) && HasAVX512Save;
Features["sha"] = HasLeaf7 && ((EBX >> 29) & 1);
Features["avx512bw"] = HasLeaf7 && ((EBX >> 30) & 1) && HasAVX512Save;
Features["avx512vl"] = HasLeaf7 && ((EBX >> 31) & 1) && HasAVX512Save;
- Features["prefetchwt1"] = HasLeaf7 && ((ECX >> 0) & 1);
Features["avx512vbmi"] = HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save;
Features["pku"] = HasLeaf7 && ((ECX >> 4) & 1);
Features["waitpkg"] = HasLeaf7 && ((ECX >> 5) & 1);
diff --git a/llvm/lib/TargetParser/RISCVISAInfo.cpp b/llvm/lib/TargetParser/RISCVISAInfo.cpp
index 827bc5b44387..01d0c71c25a9 100644
--- a/llvm/lib/TargetParser/RISCVISAInfo.cpp
+++ b/llvm/lib/TargetParser/RISCVISAInfo.cpp
@@ -880,7 +880,7 @@ void RISCVISAInfo::updateImplication() {
// implied
if (!HasE && !HasI) {
auto Version = findDefaultVersion("i");
- addExtension("i", Version.value());
+ addExtension("i", *Version);
}
if (HasE && HasI)
@@ -906,7 +906,7 @@ void RISCVISAInfo::updateImplication() {
if (Exts.count(ImpliedExt))
return;
auto Version = findDefaultVersion(ImpliedExt);
- addExtension(ImpliedExt, Version.value());
+ addExtension(ImpliedExt, *Version);
WorkList.insert(ImpliedExt);
});
}
@@ -915,7 +915,7 @@ void RISCVISAInfo::updateImplication() {
if (XLen == 32 && Exts.count("zce") && Exts.count("f") &&
!Exts.count("zcf")) {
auto Version = findDefaultVersion("zcf");
- addExtension("zcf", Version.value());
+ addExtension("zcf", *Version);
}
}
@@ -942,7 +942,7 @@ void RISCVISAInfo::updateCombination() {
});
if (HasAllRequiredFeatures) {
auto Version = findDefaultVersion(CombineExt);
- addExtension(CombineExt, Version.value());
+ addExtension(CombineExt, *Version);
MadeChange = true;
}
}
diff --git a/llvm/lib/TargetParser/X86TargetParser.cpp b/llvm/lib/TargetParser/X86TargetParser.cpp
index efe392b94545..e3802380d2be 100644
--- a/llvm/lib/TargetParser/X86TargetParser.cpp
+++ b/llvm/lib/TargetParser/X86TargetParser.cpp
@@ -95,9 +95,9 @@ constexpr FeatureBitset FeaturesBroadwell =
// Intel Knights Landing and Knights Mill
// Knights Landing has feature parity with Broadwell.
-constexpr FeatureBitset FeaturesKNL =
- FeaturesBroadwell | FeatureAES | FeatureAVX512F | FeatureEVEX512 |
- FeatureAVX512CD | FeatureAVX512ER | FeatureAVX512PF | FeaturePREFETCHWT1;
+constexpr FeatureBitset FeaturesKNL = FeaturesBroadwell | FeatureAES |
+ FeatureAVX512F | FeatureEVEX512 |
+ FeatureAVX512CD;
constexpr FeatureBitset FeaturesKNM = FeaturesKNL | FeatureAVX512VPOPCNTDQ;
// Intel Skylake processors.
@@ -500,7 +500,6 @@ constexpr FeatureBitset ImpliedFeaturesMOVDIRI = {};
constexpr FeatureBitset ImpliedFeaturesPCONFIG = {};
constexpr FeatureBitset ImpliedFeaturesPOPCNT = {};
constexpr FeatureBitset ImpliedFeaturesPKU = {};
-constexpr FeatureBitset ImpliedFeaturesPREFETCHWT1 = {};
constexpr FeatureBitset ImpliedFeaturesPRFCHW = {};
constexpr FeatureBitset ImpliedFeaturesPTWRITE = {};
constexpr FeatureBitset ImpliedFeaturesRDPID = {};
@@ -569,8 +568,6 @@ constexpr FeatureBitset ImpliedFeaturesSM4 = FeatureAVX2;
constexpr FeatureBitset ImpliedFeaturesAVX512CD = FeatureAVX512F;
constexpr FeatureBitset ImpliedFeaturesAVX512BW = FeatureAVX512F;
constexpr FeatureBitset ImpliedFeaturesAVX512DQ = FeatureAVX512F;
-constexpr FeatureBitset ImpliedFeaturesAVX512ER = FeatureAVX512F;
-constexpr FeatureBitset ImpliedFeaturesAVX512PF = FeatureAVX512F;
constexpr FeatureBitset ImpliedFeaturesAVX512VL = FeatureAVX512F;
constexpr FeatureBitset ImpliedFeaturesAVX512BF16 = FeatureAVX512BW;
@@ -751,13 +748,13 @@ unsigned llvm::X86::getFeaturePriority(ProcessorFeatures Feat) {
#ifndef NDEBUG
// Check that priorities are set properly in the .def file. We expect that
// "compat" features are assigned non-duplicate consecutive priorities
- // starting from one (1, ..., 37) and multiple zeros.
+ // starting from one (1, ..., 35) and multiple zeros.
#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) PRIORITY,
unsigned Priorities[] = {
#include "llvm/TargetParser/X86TargetParser.def"
};
std::array<unsigned, std::size(Priorities)> HelperList;
- const size_t MaxPriority = 37;
+ const size_t MaxPriority = 35;
std::iota(HelperList.begin(), HelperList.begin() + MaxPriority + 1, 0);
for (size_t i = MaxPriority + 1; i != std::size(Priorities); ++i)
HelperList[i] = 0;
diff --git a/llvm/lib/TextAPI/Utils.cpp b/llvm/lib/TextAPI/Utils.cpp
index 08f14f65177e..01021e3a264d 100644
--- a/llvm/lib/TextAPI/Utils.cpp
+++ b/llvm/lib/TextAPI/Utils.cpp
@@ -184,7 +184,7 @@ llvm::Expected<Regex> llvm::MachO::createRegexFromGlob(StringRef Glob) {
break;
}
default:
- if (RegexMetachars.find(C) != StringRef::npos)
+ if (RegexMetachars.contains(C))
RegexString.push_back('\\');
RegexString.push_back(C);
}
diff --git a/llvm/lib/Transforms/Coroutines/CoroElide.cpp b/llvm/lib/Transforms/Coroutines/CoroElide.cpp
index bb244489e4c2..74b5ccb7b9b7 100644
--- a/llvm/lib/Transforms/Coroutines/CoroElide.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroElide.cpp
@@ -464,13 +464,9 @@ bool CoroIdElider::attemptElide() {
return true;
}
-static bool declaresCoroElideIntrinsics(Module &M) {
- return coro::declaresIntrinsics(M, {"llvm.coro.id", "llvm.coro.id.async"});
-}
-
PreservedAnalyses CoroElidePass::run(Function &F, FunctionAnalysisManager &AM) {
auto &M = *F.getParent();
- if (!declaresCoroElideIntrinsics(M))
+ if (!coro::declaresIntrinsics(M, {"llvm.coro.id"}))
return PreservedAnalyses::all();
FunctionElideInfo FEI{&F};
diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
index 08a4522e3fac..38b8dab984db 100644
--- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -19,6 +19,7 @@
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/Analysis/StackLifetime.h"
#include "llvm/Config/llvm-config.h"
@@ -1440,17 +1441,22 @@ namespace {
struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
using Base = PtrUseVisitor<AllocaUseVisitor>;
AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
- const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
+ const coro::Shape &CoroShape,
+ const SuspendCrossingInfo &Checker,
bool ShouldUseLifetimeStartInfo)
- : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
- ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
+ : PtrUseVisitor(DL), DT(DT), CoroShape(CoroShape), Checker(Checker),
+ ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {
+ for (AnyCoroSuspendInst *SuspendInst : CoroShape.CoroSuspends)
+ CoroSuspendBBs.insert(SuspendInst->getParent());
+ }
void visit(Instruction &I) {
Users.insert(&I);
Base::visit(I);
// If the pointer is escaped prior to CoroBegin, we have to assume it would
// be written into before CoroBegin as well.
- if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
+ if (PI.isEscaped() &&
+ !DT.dominates(CoroShape.CoroBegin, PI.getEscapingInst())) {
MayWriteBeforeCoroBegin = true;
}
}
@@ -1553,10 +1559,19 @@ struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
// When we found the lifetime markers refers to a
// subrange of the original alloca, ignore the lifetime
// markers to avoid misleading the analysis.
- if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
- !Offset.isZero())
+ if (!IsOffsetKnown || !Offset.isZero())
+ return Base::visitIntrinsicInst(II);
+ switch (II.getIntrinsicID()) {
+ default:
return Base::visitIntrinsicInst(II);
- LifetimeStarts.insert(&II);
+ case Intrinsic::lifetime_start:
+ LifetimeStarts.insert(&II);
+ LifetimeStartBBs.push_back(II.getParent());
+ break;
+ case Intrinsic::lifetime_end:
+ LifetimeEndBBs.insert(II.getParent());
+ break;
+ }
}
void visitCallBase(CallBase &CB) {
@@ -1586,7 +1601,7 @@ struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
private:
const DominatorTree &DT;
- const CoroBeginInst &CoroBegin;
+ const coro::Shape &CoroShape;
const SuspendCrossingInfo &Checker;
// All alias to the original AllocaInst, created before CoroBegin and used
// after CoroBegin. Each entry contains the instruction and the offset in the
@@ -1594,6 +1609,9 @@ private:
DenseMap<Instruction *, std::optional<APInt>> AliasOffetMap{};
SmallPtrSet<Instruction *, 4> Users{};
SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
+ SmallVector<BasicBlock *> LifetimeStartBBs{};
+ SmallPtrSet<BasicBlock *, 2> LifetimeEndBBs{};
+ SmallPtrSet<const BasicBlock *, 2> CoroSuspendBBs{};
bool MayWriteBeforeCoroBegin{false};
bool ShouldUseLifetimeStartInfo{true};
@@ -1605,10 +1623,19 @@ private:
// every basic block that uses the pointer to see if they cross suspension
// points. The uses cover both direct uses as well as indirect uses.
if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
- for (auto *I : Users)
- for (auto *S : LifetimeStarts)
- if (Checker.isDefinitionAcrossSuspend(*S, I))
- return true;
+ // If there is no explicit lifetime.end, then assume the address can
+ // cross suspension points.
+ if (LifetimeEndBBs.empty())
+ return true;
+
+ // If there is a path from a lifetime.start to a suspend without a
+ // corresponding lifetime.end, then the alloca's lifetime persists
+ // beyond that suspension point and the alloca must go on the frame.
+ llvm::SmallVector<BasicBlock *> Worklist(LifetimeStartBBs);
+ if (isManyPotentiallyReachableFromMany(Worklist, CoroSuspendBBs,
+ &LifetimeEndBBs, &DT))
+ return true;
+
// Addresses are guaranteed to be identical after every lifetime.start so
// we cannot use the local stack if the address escaped and there is a
// suspend point between lifetime markers. This should also cover the
@@ -1646,13 +1673,13 @@ private:
}
void handleMayWrite(const Instruction &I) {
- if (!DT.dominates(&CoroBegin, &I))
+ if (!DT.dominates(CoroShape.CoroBegin, &I))
MayWriteBeforeCoroBegin = true;
}
bool usedAfterCoroBegin(Instruction &I) {
for (auto &U : I.uses())
- if (DT.dominates(&CoroBegin, U))
+ if (DT.dominates(CoroShape.CoroBegin, U))
return true;
return false;
}
@@ -1661,7 +1688,7 @@ private:
// We track all aliases created prior to CoroBegin but used after.
// These aliases may need to be recreated after CoroBegin if the alloca
// need to live on the frame.
- if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
+ if (DT.dominates(CoroShape.CoroBegin, &I) || !usedAfterCoroBegin(I))
return;
if (!IsOffsetKnown) {
@@ -2830,8 +2857,7 @@ static void collectFrameAlloca(AllocaInst *AI, coro::Shape &Shape,
bool ShouldUseLifetimeStartInfo =
(Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
Shape.ABI != coro::ABI::RetconOnce);
- AllocaUseVisitor Visitor{AI->getModule()->getDataLayout(), DT,
- *Shape.CoroBegin, Checker,
+ AllocaUseVisitor Visitor{AI->getModule()->getDataLayout(), DT, Shape, Checker,
ShouldUseLifetimeStartInfo};
Visitor.visitPtr(*AI);
if (!Visitor.getShouldLiveOnFrame())
@@ -2948,10 +2974,12 @@ void coro::salvageDebugInfo(
std::optional<BasicBlock::iterator> InsertPt;
if (auto *I = dyn_cast<Instruction>(Storage)) {
InsertPt = I->getInsertionPointAfterDef();
- // Update DILocation only in O0 since it is easy to get out of sync in
- // optimizations. See https://github.com/llvm/llvm-project/pull/75104 for
- // an example.
- if (!OptimizeFrame && I->getDebugLoc())
+ // Update DILocation only if variable was not inlined.
+ DebugLoc ILoc = I->getDebugLoc();
+ DebugLoc DVILoc = DVI.getDebugLoc();
+ if (ILoc && DVILoc &&
+ DVILoc->getScope()->getSubprogram() ==
+ ILoc->getScope()->getSubprogram())
DVI.setDebugLoc(I->getDebugLoc());
} else if (isa<Argument>(Storage))
InsertPt = F->getEntryBlock().begin();
@@ -2988,11 +3016,13 @@ void coro::salvageDebugInfo(
std::optional<BasicBlock::iterator> InsertPt;
if (auto *I = dyn_cast<Instruction>(Storage)) {
InsertPt = I->getInsertionPointAfterDef();
- // Update DILocation only in O0 since it is easy to get out of sync in
- // optimizations. See https://github.com/llvm/llvm-project/pull/75104 for
- // an example.
- if (!OptimizeFrame && I->getDebugLoc())
- DVR.setDebugLoc(I->getDebugLoc());
+ // Update DILocation only if variable was not inlined.
+ DebugLoc ILoc = I->getDebugLoc();
+ DebugLoc DVRLoc = DVR.getDebugLoc();
+ if (ILoc && DVRLoc &&
+ DVRLoc->getScope()->getSubprogram() ==
+ ILoc->getScope()->getSubprogram())
+ DVR.setDebugLoc(ILoc);
} else if (isa<Argument>(Storage))
InsertPt = F->getEntryBlock().begin();
if (InsertPt) {
diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
index 1d9cf185b75a..5a58a99d2879 100644
--- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
@@ -227,6 +227,7 @@ static void lowerAwaitSuspend(IRBuilder<> &Builder, CoroAwaitSuspendInst *CB,
FunctionType *ResumeTy = FunctionType::get(
Type::getVoidTy(Ctx), PointerType::getUnqual(Ctx), false);
auto *ResumeCall = Builder.CreateCall(ResumeTy, ResumeAddr, {NewCall});
+ ResumeCall->setCallingConv(CallingConv::Fast);
// We can't insert the 'ret' instruction and adjust the cc until the
// function has been split, so remember this for later.
@@ -1088,7 +1089,6 @@ void CoroCloner::create() {
// Turn symmetric transfers into musttail calls.
for (CallInst *ResumeCall : Shape.SymmetricTransfers) {
ResumeCall = cast<CallInst>(VMap[ResumeCall]);
- ResumeCall->setCallingConv(NewF->getCallingConv());
if (TTI.supportsTailCallFor(ResumeCall)) {
// FIXME: Could we support symmetric transfer effectively without
// musttail?
diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp
index e3920b9e1d2b..b6866580ccd3 100644
--- a/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -3954,7 +3954,7 @@ static bool runAttributorLightOnFunctions(InformationCache &InfoCache,
// We look at internal functions only on-demand but if any use is not a
// direct call or outside the current set of analyzed functions, we have
// to do it eagerly.
- if (F->hasLocalLinkage()) {
+ if (AC.UseLiveness && F->hasLocalLinkage()) {
if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
const auto *CB = dyn_cast<CallBase>(U.getUser());
return CB && CB->isCallee(&U) &&
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 41b66aafe7d3..1b3bf3c732ed 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -5690,6 +5690,9 @@ bool AANoCapture::isImpliedByIR(Attributor &A, const IRPosition &IRP,
return V.use_empty();
// You cannot "capture" null in the default address space.
+ //
+ // FIXME: This should use NullPointerIsDefined to account for the function
+ // attribute.
if (isa<UndefValue>(V) || (isa<ConstantPointerNull>(V) &&
V.getType()->getPointerAddressSpace() == 0)) {
return true;
@@ -5899,10 +5902,13 @@ ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
const Function *F =
isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
- assert(F && "Expected a function!");
- const IRPosition &FnPos = IRPosition::function(*F);
+
+ // TODO: Is the checkForAllUses below useful for constants?
+ if (!F)
+ return indicatePessimisticFixpoint();
AANoCapture::StateType T;
+ const IRPosition &FnPos = IRPosition::function(*F);
// Readonly means we cannot capture through memory.
bool IsKnown;
diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp
index 68f9799616ae..cb19bf2a4ae1 100644
--- a/llvm/lib/Transforms/IPO/FunctionImport.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp
@@ -140,6 +140,17 @@ static cl::opt<bool>
ImportAllIndex("import-all-index",
cl::desc("Import all external functions in index."));
+/// This is a test-only option.
+/// If this option is enabled, the ThinLTO indexing step will import each
+/// function declaration as a fallback. In a real build this may increase ram
+/// usage of the indexing step unnecessarily.
+/// TODO: Implement selective import (based on combined summary analysis) to
+/// ensure the imported function has a use case in the postlink pipeline.
+static cl::opt<bool> ImportDeclaration(
+ "import-declaration", cl::init(false), cl::Hidden,
+ cl::desc("If true, import function declaration as fallback if the function "
+ "definition is not imported."));
+
/// Pass a workload description file - an example of workload would be the
/// functions executed to satisfy a RPC request. A workload is defined by a root
/// function and the list of functions that are (frequently) needed to satisfy
@@ -245,8 +256,12 @@ static auto qualifyCalleeCandidates(
}
/// Given a list of possible callee implementation for a call site, select one
-/// that fits the \p Threshold. If none are found, the Reason will give the last
-/// reason for the failure (last, in the order of CalleeSummaryList entries).
+/// that fits the \p Threshold for function definition import. If none are
+/// found, the Reason will give the last reason for the failure (last, in the
+/// order of CalleeSummaryList entries). While looking for a callee definition,
+/// sets \p TooLargeOrNoInlineSummary to the last seen too-large or noinline
+/// candidate; other modules may want to know the function summary or
+/// declaration even if a definition is not needed.
///
/// FIXME: select "best" instead of first that fits. But what is "best"?
/// - The smallest: more likely to be inlined.
@@ -259,24 +274,32 @@ static const GlobalValueSummary *
selectCallee(const ModuleSummaryIndex &Index,
ArrayRef<std::unique_ptr<GlobalValueSummary>> CalleeSummaryList,
unsigned Threshold, StringRef CallerModulePath,
+ const GlobalValueSummary *&TooLargeOrNoInlineSummary,
FunctionImporter::ImportFailureReason &Reason) {
+ // Records the last summary with reason noinline or too-large.
+ TooLargeOrNoInlineSummary = nullptr;
auto QualifiedCandidates =
qualifyCalleeCandidates(Index, CalleeSummaryList, CallerModulePath);
for (auto QualifiedValue : QualifiedCandidates) {
Reason = QualifiedValue.first;
+ // Skip a summary if its import is not (proved to be) legal.
if (Reason != FunctionImporter::ImportFailureReason::None)
continue;
auto *Summary =
cast<FunctionSummary>(QualifiedValue.second->getBaseObject());
+ // Don't bother importing the definition if the chance of inlining it is
+ // not high enough (except under `--force-import-all`).
if ((Summary->instCount() > Threshold) && !Summary->fflags().AlwaysInline &&
!ForceImportAll) {
+ TooLargeOrNoInlineSummary = Summary;
Reason = FunctionImporter::ImportFailureReason::TooLarge;
continue;
}
- // Don't bother importing if we can't inline it anyway.
+ // Don't bother importing the definition if we can't inline it anyway.
if (Summary->fflags().NoInline && !ForceImportAll) {
+ TooLargeOrNoInlineSummary = Summary;
Reason = FunctionImporter::ImportFailureReason::NoInline;
continue;
}
@@ -358,17 +381,27 @@ class GlobalsImporter final {
if (!GVS || !Index.canImportGlobalVar(GVS, /* AnalyzeRefs */ true) ||
LocalNotInModule(GVS))
continue;
- auto ILI = ImportList[RefSummary->modulePath()].insert(VI.getGUID());
+
+ // If there isn't an entry for GUID, insert <GUID, Definition> pair.
+ // Otherwise, definition should take precedence over declaration.
+ auto [Iter, Inserted] =
+ ImportList[RefSummary->modulePath()].try_emplace(
+ VI.getGUID(), GlobalValueSummary::Definition);
// Only update stat and exports if we haven't already imported this
// variable.
- if (!ILI.second)
+ if (!Inserted) {
+ // Set the value to 'std::min(existing-value, new-value)' to make
+ // sure a definition takes precedence over a declaration.
+ Iter->second = std::min(GlobalValueSummary::Definition, Iter->second);
break;
+ }
NumImportedGlobalVarsThinLink++;
// Any references made by this variable will be marked exported
// later, in ComputeCrossModuleImport, after import decisions are
// complete, which is more efficient than adding them here.
if (ExportLists)
- (*ExportLists)[RefSummary->modulePath()].insert(VI);
+ (*ExportLists)[RefSummary->modulePath()][VI] =
+ GlobalValueSummary::Definition;
// If variable is not writeonly we attempt to recursively analyze
// its references in order to import referenced constants.
@@ -545,10 +578,11 @@ class WorkloadImportsManager : public ModuleImportsManager {
LLVM_DEBUG(dbgs() << "[Workload][Including]" << VI.name() << " from "
<< ExportingModule << " : "
<< Function::getGUID(VI.name()) << "\n");
- ImportList[ExportingModule].insert(VI.getGUID());
+ ImportList[ExportingModule][VI.getGUID()] =
+ GlobalValueSummary::Definition;
GVI.onImportingSummary(*GVS);
if (ExportLists)
- (*ExportLists)[ExportingModule].insert(VI);
+ (*ExportLists)[ExportingModule][VI] = GlobalValueSummary::Definition;
}
LLVM_DEBUG(dbgs() << "[Workload] Done\n");
}
@@ -769,9 +803,28 @@ static void computeImportForFunction(
}
FunctionImporter::ImportFailureReason Reason{};
- CalleeSummary = selectCallee(Index, VI.getSummaryList(), NewThreshold,
- Summary.modulePath(), Reason);
+
+ // `SummaryForDeclImport` is an summary eligible for declaration import.
+ const GlobalValueSummary *SummaryForDeclImport = nullptr;
+ CalleeSummary =
+ selectCallee(Index, VI.getSummaryList(), NewThreshold,
+ Summary.modulePath(), SummaryForDeclImport, Reason);
if (!CalleeSummary) {
+ // There isn't a callee for definition import but one for declaration
+ // import.
+ if (ImportDeclaration && SummaryForDeclImport) {
+ StringRef DeclSourceModule = SummaryForDeclImport->modulePath();
+
+ // Since definition takes precedence over declaration for the same VI,
+ // try emplace <VI, declaration> pair without checking insert result.
+ // If insert doesn't happen, there must be an existing entry keyed by
+ // VI.
+ if (ExportLists)
+ (*ExportLists)[DeclSourceModule].try_emplace(
+ VI, GlobalValueSummary::Declaration);
+ ImportList[DeclSourceModule].try_emplace(
+ VI.getGUID(), GlobalValueSummary::Declaration);
+ }
// Update with new larger threshold if this was a retry (otherwise
// we would have already inserted with NewThreshold above). Also
// update failure info if requested.
@@ -816,11 +869,15 @@ static void computeImportForFunction(
"selectCallee() didn't honor the threshold");
auto ExportModulePath = ResolvedCalleeSummary->modulePath();
- auto ILI = ImportList[ExportModulePath].insert(VI.getGUID());
+
+ // Try emplace the definition entry, and update stats based on insertion
+ // status.
+ auto [Iter, Inserted] = ImportList[ExportModulePath].try_emplace(
+ VI.getGUID(), GlobalValueSummary::Definition);
+
// We previously decided to import this GUID definition if it was already
// inserted in the set of imports from the exporting module.
- bool PreviouslyImported = !ILI.second;
- if (!PreviouslyImported) {
+ if (Inserted || Iter->second == GlobalValueSummary::Declaration) {
NumImportedFunctionsThinLink++;
if (IsHotCallsite)
NumImportedHotFunctionsThinLink++;
@@ -828,11 +885,14 @@ static void computeImportForFunction(
NumImportedCriticalFunctionsThinLink++;
}
+ if (Iter->second == GlobalValueSummary::Declaration)
+ Iter->second = GlobalValueSummary::Definition;
+
// Any calls/references made by this function will be marked exported
// later, in ComputeCrossModuleImport, after import decisions are
// complete, which is more efficient than adding them here.
if (ExportLists)
- (*ExportLists)[ExportModulePath].insert(VI);
+ (*ExportLists)[ExportModulePath][VI] = GlobalValueSummary::Definition;
}
auto GetAdjustedThreshold = [](unsigned Threshold, bool IsHotCallsite) {
@@ -939,12 +999,20 @@ static bool isGlobalVarSummary(const ModuleSummaryIndex &Index,
}
template <class T>
-static unsigned numGlobalVarSummaries(const ModuleSummaryIndex &Index,
- T &Cont) {
+static unsigned numGlobalVarSummaries(const ModuleSummaryIndex &Index, T &Cont,
+ unsigned &DefinedGVS,
+ unsigned &DefinedFS) {
unsigned NumGVS = 0;
- for (auto &V : Cont)
- if (isGlobalVarSummary(Index, V))
+ DefinedGVS = 0;
+ DefinedFS = 0;
+ for (auto &[GUID, Type] : Cont) {
+ if (isGlobalVarSummary(Index, GUID)) {
+ if (Type == GlobalValueSummary::Definition)
+ ++DefinedGVS;
++NumGVS;
+ } else if (Type == GlobalValueSummary::Definition)
+ ++DefinedFS;
+ }
return NumGVS;
}
#endif
@@ -954,13 +1022,12 @@ static bool checkVariableImport(
const ModuleSummaryIndex &Index,
DenseMap<StringRef, FunctionImporter::ImportMapTy> &ImportLists,
DenseMap<StringRef, FunctionImporter::ExportSetTy> &ExportLists) {
-
DenseSet<GlobalValue::GUID> FlattenedImports;
for (auto &ImportPerModule : ImportLists)
for (auto &ExportPerModule : ImportPerModule.second)
- FlattenedImports.insert(ExportPerModule.second.begin(),
- ExportPerModule.second.end());
+ for (auto &[GUID, Type] : ExportPerModule.second)
+ FlattenedImports.insert(GUID);
// Checks that all GUIDs of read/writeonly vars we see in export lists
// are also in the import lists. Otherwise we my face linker undefs,
@@ -979,7 +1046,7 @@ static bool checkVariableImport(
};
for (auto &ExportPerModule : ExportLists)
- for (auto &VI : ExportPerModule.second)
+ for (auto &[VI, Unused] : ExportPerModule.second)
if (!FlattenedImports.count(VI.getGUID()) &&
IsReadOrWriteOnlyVarNeedingImporting(ExportPerModule.first, VI))
return false;
@@ -1015,7 +1082,11 @@ void llvm::ComputeCrossModuleImport(
FunctionImporter::ExportSetTy NewExports;
const auto &DefinedGVSummaries =
ModuleToDefinedGVSummaries.lookup(ELI.first);
- for (auto &EI : ELI.second) {
+ for (auto &[EI, Type] : ELI.second) {
+ // If a variable is exported as a declaration, its 'refs' and 'calls' are
+ // not further exported.
+ if (Type == GlobalValueSummary::Declaration)
+ continue;
// Find the copy defined in the exporting module so that we can mark the
// values it references in that specific definition as exported.
// Below we will add all references and called values, without regard to
@@ -1034,22 +1105,31 @@ void llvm::ComputeCrossModuleImport(
// we convert such variables initializers to "zeroinitializer".
// See processGlobalForThinLTO.
if (!Index.isWriteOnly(GVS))
- for (const auto &VI : GVS->refs())
- NewExports.insert(VI);
+ for (const auto &VI : GVS->refs()) {
+ // Try to emplace the declaration entry. If a definition entry
+ // already exists for key `VI`, this is a no-op.
+ NewExports.try_emplace(VI, GlobalValueSummary::Declaration);
+ }
} else {
auto *FS = cast<FunctionSummary>(S);
- for (const auto &Edge : FS->calls())
- NewExports.insert(Edge.first);
- for (const auto &Ref : FS->refs())
- NewExports.insert(Ref);
+ for (const auto &Edge : FS->calls()) {
+ // Try to emplace the declaration entry. If a definition entry
+ // already exists for key `VI`, this is a no-op.
+ NewExports.try_emplace(Edge.first, GlobalValueSummary::Declaration);
+ }
+ for (const auto &Ref : FS->refs()) {
+ // Try to emplace the declaration entry. If a definition entry
+ // already exists for key `VI`, this is a no-op.
+ NewExports.try_emplace(Ref, GlobalValueSummary::Declaration);
+ }
}
}
- // Prune list computed above to only include values defined in the exporting
- // module. We do this after the above insertion since we may hit the same
- // ref/call target multiple times in above loop, and it is more efficient to
- // avoid a set lookup each time.
+ // Prune list computed above to only include values defined in the
+ // exporting module. We do this after the above insertion since we may hit
+ // the same ref/call target multiple times in above loop, and it is more
+ // efficient to avoid a set lookup each time.
for (auto EI = NewExports.begin(); EI != NewExports.end();) {
- if (!DefinedGVSummaries.count(EI->getGUID()))
+ if (!DefinedGVSummaries.count(EI->first.getGUID()))
NewExports.erase(EI++);
else
++EI;
@@ -1064,18 +1144,29 @@ void llvm::ComputeCrossModuleImport(
for (auto &ModuleImports : ImportLists) {
auto ModName = ModuleImports.first;
auto &Exports = ExportLists[ModName];
- unsigned NumGVS = numGlobalVarSummaries(Index, Exports);
- LLVM_DEBUG(dbgs() << "* Module " << ModName << " exports "
- << Exports.size() - NumGVS << " functions and " << NumGVS
- << " vars. Imports from " << ModuleImports.second.size()
- << " modules.\n");
+ unsigned DefinedGVS = 0, DefinedFS = 0;
+ unsigned NumGVS =
+ numGlobalVarSummaries(Index, Exports, DefinedGVS, DefinedFS);
+ LLVM_DEBUG(dbgs() << "* Module " << ModName << " exports " << DefinedFS
+ << " function as definitions, "
+ << Exports.size() - NumGVS - DefinedFS
+ << " functions as declarations, " << DefinedGVS
+ << " var definitions and " << NumGVS - DefinedGVS
+ << " var declarations. Imports from "
+ << ModuleImports.second.size() << " modules.\n");
for (auto &Src : ModuleImports.second) {
auto SrcModName = Src.first;
- unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second);
- LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
- << " functions imported from " << SrcModName << "\n");
- LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod
- << " global vars imported from " << SrcModName << "\n");
+ unsigned DefinedGVS = 0, DefinedFS = 0;
+ unsigned NumGVSPerMod =
+ numGlobalVarSummaries(Index, Src.second, DefinedGVS, DefinedFS);
+ LLVM_DEBUG(dbgs() << " - " << DefinedFS << " function definitions and "
+ << Src.second.size() - NumGVSPerMod - DefinedFS
+ << " function declarations imported from " << SrcModName
+ << "\n");
+ LLVM_DEBUG(dbgs() << " - " << DefinedGVS << " global vars definition and "
+ << NumGVSPerMod - DefinedGVS
+ << " global vars declaration imported from "
+ << SrcModName << "\n");
}
}
#endif
@@ -1089,11 +1180,17 @@ static void dumpImportListForModule(const ModuleSummaryIndex &Index,
<< ImportList.size() << " modules.\n");
for (auto &Src : ImportList) {
auto SrcModName = Src.first;
- unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second);
- LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
- << " functions imported from " << SrcModName << "\n");
- LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod << " vars imported from "
- << SrcModName << "\n");
+ unsigned DefinedGVS = 0, DefinedFS = 0;
+ unsigned NumGVSPerMod =
+ numGlobalVarSummaries(Index, Src.second, DefinedGVS, DefinedFS);
+ LLVM_DEBUG(dbgs() << " - " << DefinedFS << " function definitions and "
+ << Src.second.size() - DefinedFS - NumGVSPerMod
+ << " function declarations imported from " << SrcModName
+ << "\n");
+ LLVM_DEBUG(dbgs() << " - " << DefinedGVS << " var definitions and "
+ << NumGVSPerMod - DefinedGVS
+ << " var declarations imported from " << SrcModName
+ << "\n");
}
}
#endif
@@ -1149,7 +1246,13 @@ static void ComputeCrossModuleImportForModuleFromIndexForTest(
if (Summary->modulePath() == ModulePath)
continue;
// Add an entry to provoke importing by thinBackend.
- ImportList[Summary->modulePath()].insert(GUID);
+ auto [Iter, Inserted] = ImportList[Summary->modulePath()].try_emplace(
+ GUID, Summary->importType());
+ if (!Inserted) {
+ // Use 'std::min' to make sure definition (with enum value 0) takes
+ // precedence over declaration (with enum value 1).
+ Iter->second = std::min(Iter->second, Summary->importType());
+ }
}
#ifndef NDEBUG
dumpImportListForModule(Index, ModulePath, ImportList);
@@ -1332,20 +1435,25 @@ void llvm::gatherImportedSummariesForModule(
StringRef ModulePath,
const DenseMap<StringRef, GVSummaryMapTy> &ModuleToDefinedGVSummaries,
const FunctionImporter::ImportMapTy &ImportList,
- std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex) {
+ std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex,
+ GVSummaryPtrSet &DecSummaries) {
// Include all summaries from the importing module.
ModuleToSummariesForIndex[std::string(ModulePath)] =
ModuleToDefinedGVSummaries.lookup(ModulePath);
// Include summaries for imports.
for (const auto &ILI : ImportList) {
auto &SummariesForIndex = ModuleToSummariesForIndex[std::string(ILI.first)];
+
const auto &DefinedGVSummaries =
ModuleToDefinedGVSummaries.lookup(ILI.first);
- for (const auto &GI : ILI.second) {
- const auto &DS = DefinedGVSummaries.find(GI);
+ for (const auto &[GUID, Type] : ILI.second) {
+ const auto &DS = DefinedGVSummaries.find(GUID);
assert(DS != DefinedGVSummaries.end() &&
"Expected a defined summary for imported global value");
- SummariesForIndex[GI] = DS->second;
+ if (Type == GlobalValueSummary::Declaration)
+ DecSummaries.insert(DS->second);
+
+ SummariesForIndex[GUID] = DS->second;
}
}
}
@@ -1617,6 +1725,16 @@ Expected<bool> FunctionImporter::importFunctions(
for (const auto &FunctionsToImportPerModule : ImportList) {
ModuleNameOrderedList.insert(FunctionsToImportPerModule.first);
}
+
+ auto getImportType = [&](const FunctionsToImportTy &GUIDToImportType,
+ GlobalValue::GUID GUID)
+ -> std::optional<GlobalValueSummary::ImportKind> {
+ auto Iter = GUIDToImportType.find(GUID);
+ if (Iter == GUIDToImportType.end())
+ return std::nullopt;
+ return Iter->second;
+ };
+
for (const auto &Name : ModuleNameOrderedList) {
// Get the module for the import
const auto &FunctionsToImportPerModule = ImportList.find(Name);
@@ -1634,17 +1752,27 @@ Expected<bool> FunctionImporter::importFunctions(
return std::move(Err);
auto &ImportGUIDs = FunctionsToImportPerModule->second;
+
// Find the globals to import
SetVector<GlobalValue *> GlobalsToImport;
for (Function &F : *SrcModule) {
if (!F.hasName())
continue;
auto GUID = F.getGUID();
- auto Import = ImportGUIDs.count(GUID);
- LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing function "
+ auto MaybeImportType = getImportType(ImportGUIDs, GUID);
+
+ bool ImportDefinition =
+ (MaybeImportType &&
+ (*MaybeImportType == GlobalValueSummary::Definition));
+
+ LLVM_DEBUG(dbgs() << (MaybeImportType ? "Is" : "Not")
+ << " importing function"
+ << (ImportDefinition
+ ? " definition "
+ : (MaybeImportType ? " declaration " : " "))
<< GUID << " " << F.getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
- if (Import) {
+ if (ImportDefinition) {
if (Error Err = F.materialize())
return std::move(Err);
// MemProf should match function's definition and summary,
@@ -1670,11 +1798,20 @@ Expected<bool> FunctionImporter::importFunctions(
if (!GV.hasName())
continue;
auto GUID = GV.getGUID();
- auto Import = ImportGUIDs.count(GUID);
- LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing global "
+ auto MaybeImportType = getImportType(ImportGUIDs, GUID);
+
+ bool ImportDefinition =
+ (MaybeImportType &&
+ (*MaybeImportType == GlobalValueSummary::Definition));
+
+ LLVM_DEBUG(dbgs() << (MaybeImportType ? "Is" : "Not")
+ << " importing global"
+ << (ImportDefinition
+ ? " definition "
+ : (MaybeImportType ? " declaration " : " "))
<< GUID << " " << GV.getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
- if (Import) {
+ if (ImportDefinition) {
if (Error Err = GV.materialize())
return std::move(Err);
ImportedGVCount += GlobalsToImport.insert(&GV);
@@ -1684,11 +1821,20 @@ Expected<bool> FunctionImporter::importFunctions(
if (!GA.hasName() || isa<GlobalIFunc>(GA.getAliaseeObject()))
continue;
auto GUID = GA.getGUID();
- auto Import = ImportGUIDs.count(GUID);
- LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing alias "
+ auto MaybeImportType = getImportType(ImportGUIDs, GUID);
+
+ bool ImportDefinition =
+ (MaybeImportType &&
+ (*MaybeImportType == GlobalValueSummary::Definition));
+
+ LLVM_DEBUG(dbgs() << (MaybeImportType ? "Is" : "Not")
+ << " importing alias"
+ << (ImportDefinition
+ ? " definition "
+ : (MaybeImportType ? " declaration " : " "))
<< GUID << " " << GA.getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
- if (Import) {
+ if (ImportDefinition) {
if (Error Err = GA.materialize())
return std::move(Err);
// Import alias as a copy of its aliasee.
@@ -1754,6 +1900,7 @@ Expected<bool> FunctionImporter::importFunctions(
NumImportedFunctions += (ImportedCount - ImportedGVCount);
NumImportedGlobalVars += ImportedGVCount;
+ // TODO: Print counters for definitions and declarations in the debugging log.
LLVM_DEBUG(dbgs() << "Imported " << ImportedCount - ImportedGVCount
<< " functions for Module "
<< DestModule.getModuleIdentifier() << "\n");
diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index b9d84d583f49..c53b9451625c 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -1889,15 +1889,17 @@ bool ModuleCallsiteContextGraph::findProfiledCalleeThroughTailCalls(
} else if (findProfiledCalleeThroughTailCalls(
ProfiledCallee, CalledFunction, Depth + 1,
FoundCalleeChain, FoundMultipleCalleeChains)) {
- if (FoundMultipleCalleeChains)
- return false;
+ // findProfiledCalleeThroughTailCalls should not have returned
+ // true if FoundMultipleCalleeChains.
+ assert(!FoundMultipleCalleeChains);
if (FoundSingleCalleeChain) {
FoundMultipleCalleeChains = true;
return false;
}
FoundSingleCalleeChain = true;
SaveCallsiteInfo(&I, CalleeFunc);
- }
+ } else if (FoundMultipleCalleeChains)
+ return false;
}
}
@@ -2004,8 +2006,9 @@ bool IndexCallsiteContextGraph::findProfiledCalleeThroughTailCalls(
} else if (findProfiledCalleeThroughTailCalls(
ProfiledCallee, CallEdge.first, Depth + 1,
FoundCalleeChain, FoundMultipleCalleeChains)) {
- if (FoundMultipleCalleeChains)
- return false;
+ // findProfiledCalleeThroughTailCalls should not have returned
+ // true if FoundMultipleCalleeChains.
+ assert(!FoundMultipleCalleeChains);
if (FoundSingleCalleeChain) {
FoundMultipleCalleeChains = true;
return false;
@@ -2015,7 +2018,8 @@ bool IndexCallsiteContextGraph::findProfiledCalleeThroughTailCalls(
// Add FS to FSToVIMap in case it isn't already there.
assert(!FSToVIMap.count(FS) || FSToVIMap[FS] == FSVI);
FSToVIMap[FS] = FSVI;
- }
+ } else if (FoundMultipleCalleeChains)
+ return false;
}
}
diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index eea9399127e8..e3a4821b8226 100644
--- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -4238,7 +4238,7 @@ struct AAKernelInfoFunction : AAKernelInfo {
ORA << "Value has potential side effects preventing SPMD-mode "
"execution";
if (isa<CallBase>(NonCompatibleI)) {
- ORA << ". Add `__attribute__((assume(\"ompx_spmd_amenable\")))` to "
+ ORA << ". Add `[[omp::assume(\"ompx_spmd_amenable\")]]` to "
"the called function to override";
}
return ORA << ".";
@@ -4380,7 +4380,7 @@ struct AAKernelInfoFunction : AAKernelInfo {
continue;
auto Remark = [&](OptimizationRemarkAnalysis ORA) {
return ORA << "Call may contain unknown parallel regions. Use "
- << "`__attribute__((assume(\"omp_no_parallelism\")))` to "
+ << "`[[omp::assume(\"omp_no_parallelism\")]]` to "
"override.";
};
A.emitRemark<OptimizationRemarkAnalysis>(UnknownParallelRegionCB,
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index cdec8c8c7c80..b6f339da31f7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -2424,17 +2424,6 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
break;
}
case Intrinsic::fmuladd: {
- // Canonicalize fast fmuladd to the separate fmul + fadd.
- if (II->isFast()) {
- BuilderTy::FastMathFlagGuard Guard(Builder);
- Builder.setFastMathFlags(II->getFastMathFlags());
- Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
- II->getArgOperand(1));
- Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
- Add->takeName(II);
- return replaceInstUsesWith(*II, Add);
- }
-
// Try to simplify the underlying FMul.
if (Value *V = simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
II->getFastMathFlags(),
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 11e31877de38..1b4c319032ca 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -2049,16 +2049,28 @@ Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) {
Mask->getType() == Ty)
return BinaryOperator::CreateAnd(Builder.CreatePtrToInt(Ptr, Ty), Mask);
- if (auto *GEP = dyn_cast<GetElementPtrInst>(SrcOp)) {
+ if (auto *GEP = dyn_cast<GEPOperator>(SrcOp)) {
// Fold ptrtoint(gep null, x) to multiply + constant if the GEP has one use.
// While this can increase the number of instructions it doesn't actually
// increase the overall complexity since the arithmetic is just part of
// the GEP otherwise.
if (GEP->hasOneUse() &&
isa<ConstantPointerNull>(GEP->getPointerOperand())) {
- return replaceInstUsesWith(
- CI, Builder.CreateIntCast(EmitGEPOffset(cast<GEPOperator>(GEP)), Ty,
- /*isSigned=*/false));
+ return replaceInstUsesWith(CI,
+ Builder.CreateIntCast(EmitGEPOffset(GEP), Ty,
+ /*isSigned=*/false));
+ }
+
+ // (ptrtoint (gep (inttoptr Base), ...)) -> Base + Offset
+ Value *Base;
+ if (GEP->hasOneUse() &&
+ match(GEP->getPointerOperand(), m_OneUse(m_IntToPtr(m_Value(Base)))) &&
+ Base->getType() == Ty) {
+ Value *Offset = EmitGEPOffset(GEP);
+ auto *NewOp = BinaryOperator::CreateAdd(Base, Offset);
+ if (GEP->isInBounds() && isKnownNonNegative(Offset, SQ))
+ NewOp->setHasNoUnsignedWrap(true);
+ return NewOp;
}
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 542a1c82b127..430f3e12fa5b 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -214,6 +214,9 @@ Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
// Find out if the comparison would be true or false for the i'th element.
Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
CompareRHS, DL, &TLI);
+ if (!C)
+ return nullptr;
+
// If the result is undef for this element, ignore it.
if (isa<UndefValue>(C)) {
// Extend range state machines to cover this element in case there is an
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 4351a55ea1d3..832f89ed0b64 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -332,7 +332,7 @@ bool PointerReplacer::collectUsersRecursive(Instruction &I) {
Worklist.insert(SI);
if (!collectUsersRecursive(*SI))
return false;
- } else if (isa<GetElementPtrInst, BitCastInst>(Inst)) {
+ } else if (isa<GetElementPtrInst>(Inst)) {
Worklist.insert(Inst);
if (!collectUsersRecursive(*Inst))
return false;
@@ -393,15 +393,6 @@ void PointerReplacer::replace(Instruction *I) {
NewI->takeName(GEP);
NewI->setIsInBounds(GEP->isInBounds());
WorkMap[GEP] = NewI;
- } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
- auto *V = getReplacement(BC->getOperand(0));
- assert(V && "Operand not replaced");
- auto *NewT = PointerType::get(BC->getType()->getContext(),
- V->getType()->getPointerAddressSpace());
- auto *NewI = new BitCastInst(V, NewT);
- IC.InsertNewInstWith(NewI, BC->getIterator());
- NewI->takeName(BC);
- WorkMap[BC] = NewI;
} else if (auto *SI = dyn_cast<SelectInst>(I)) {
auto *NewSI = SelectInst::Create(
SI->getCondition(), getReplacement(SI->getTrueValue()),
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index ba297111d945..0f1979fbe0c7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1174,7 +1174,11 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
// X & (CC << C)
Value *M = Builder.CreateAnd(X, ConstantInt::get(Ty, CC->shl(*C)),
X->getName() + ".mask");
- return BinaryOperator::Create(Op0BO->getOpcode(), M, YS);
+ auto *NewOp = BinaryOperator::Create(Op0BO->getOpcode(), M, YS);
+ if (auto *Disjoint = dyn_cast<PossiblyDisjointInst>(Op0BO);
+ Disjoint && Disjoint->isDisjoint())
+ cast<PossiblyDisjointInst>(NewOp)->setIsDisjoint(true);
+ return NewOp;
}
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 99f1f8eb34bb..244f099f0654 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -619,7 +619,7 @@ static bool collectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
"Invalid CollectSingleShuffleElements");
unsigned NumElts = cast<FixedVectorType>(V->getType())->getNumElements();
- if (match(V, m_Undef())) {
+ if (match(V, m_Poison())) {
Mask.assign(NumElts, -1);
return true;
}
@@ -1319,7 +1319,7 @@ static Instruction *foldInsEltIntoSplat(InsertElementInst &InsElt) {
static Instruction *foldInsEltIntoIdentityShuffle(InsertElementInst &InsElt) {
// Check if the vector operand of this insert is an identity shuffle.
auto *Shuf = dyn_cast<ShuffleVectorInst>(InsElt.getOperand(0));
- if (!Shuf || !match(Shuf->getOperand(1), m_Undef()) ||
+ if (!Shuf || !match(Shuf->getOperand(1), m_Poison()) ||
!(Shuf->isIdentityWithExtract() || Shuf->isIdentityWithPadding()))
return nullptr;
@@ -2214,19 +2214,19 @@ static Instruction *canonicalizeInsertSplat(ShuffleVectorInst &Shuf,
uint64_t IndexC;
// Match a shuffle that is a splat to a non-zero element.
- if (!match(Op0, m_OneUse(m_InsertElt(m_Undef(), m_Value(X),
+ if (!match(Op0, m_OneUse(m_InsertElt(m_Poison(), m_Value(X),
m_ConstantInt(IndexC)))) ||
- !match(Op1, m_Undef()) || match(Mask, m_ZeroMask()) || IndexC == 0)
+ !match(Op1, m_Poison()) || match(Mask, m_ZeroMask()) || IndexC == 0)
return nullptr;
// Insert into element 0 of a poison vector.
PoisonValue *PoisonVec = PoisonValue::get(Shuf.getType());
Value *NewIns = Builder.CreateInsertElement(PoisonVec, X, (uint64_t)0);
- // Splat from element 0. Any mask element that is undefined remains undefined.
+ // Splat from element 0. Any mask element that is poison remains poison.
// For example:
- // shuf (inselt undef, X, 2), _, <2,2,undef>
- // --> shuf (inselt undef, X, 0), poison, <0,0,undef>
+ // shuf (inselt poison, X, 2), _, <2,2,undef>
+ // --> shuf (inselt poison, X, 0), poison, <0,0,undef>
unsigned NumMaskElts =
cast<FixedVectorType>(Shuf.getType())->getNumElements();
SmallVector<int, 16> NewMask(NumMaskElts, 0);
@@ -2383,7 +2383,7 @@ static Instruction *foldTruncShuffle(ShuffleVectorInst &Shuf,
Type *DestType = Shuf.getType();
Value *X;
if (!match(Shuf.getOperand(0), m_BitCast(m_Value(X))) ||
- !match(Shuf.getOperand(1), m_Undef()) || !DestType->isIntOrIntVectorTy())
+ !match(Shuf.getOperand(1), m_Poison()) || !DestType->isIntOrIntVectorTy())
return nullptr;
// The source type must have the same number of elements as the shuffle,
@@ -2416,13 +2416,13 @@ static Instruction *foldTruncShuffle(ShuffleVectorInst &Shuf,
}
/// Match a shuffle-select-shuffle pattern where the shuffles are widening and
-/// narrowing (concatenating with undef and extracting back to the original
+/// narrowing (concatenating with poison and extracting back to the original
/// length). This allows replacing the wide select with a narrow select.
static Instruction *narrowVectorSelect(ShuffleVectorInst &Shuf,
InstCombiner::BuilderTy &Builder) {
// This must be a narrowing identity shuffle. It extracts the 1st N elements
// of the 1st vector operand of a shuffle.
- if (!match(Shuf.getOperand(1), m_Undef()) || !Shuf.isIdentityWithExtract())
+ if (!match(Shuf.getOperand(1), m_Poison()) || !Shuf.isIdentityWithExtract())
return nullptr;
// The vector being shuffled must be a vector select that we can eliminate.
@@ -2432,19 +2432,20 @@ static Instruction *narrowVectorSelect(ShuffleVectorInst &Shuf,
m_OneUse(m_Select(m_Value(Cond), m_Value(X), m_Value(Y)))))
return nullptr;
- // We need a narrow condition value. It must be extended with undef elements
+ // We need a narrow condition value. It must be extended with poison elements
// and have the same number of elements as this shuffle.
unsigned NarrowNumElts =
cast<FixedVectorType>(Shuf.getType())->getNumElements();
Value *NarrowCond;
- if (!match(Cond, m_OneUse(m_Shuffle(m_Value(NarrowCond), m_Undef()))) ||
+ if (!match(Cond, m_OneUse(m_Shuffle(m_Value(NarrowCond), m_Poison()))) ||
cast<FixedVectorType>(NarrowCond->getType())->getNumElements() !=
NarrowNumElts ||
!cast<ShuffleVectorInst>(Cond)->isIdentityWithPadding())
return nullptr;
- // shuf (sel (shuf NarrowCond, undef, WideMask), X, Y), undef, NarrowMask) -->
- // sel NarrowCond, (shuf X, undef, NarrowMask), (shuf Y, undef, NarrowMask)
+ // shuf (sel (shuf NarrowCond, poison, WideMask), X, Y), poison, NarrowMask)
+ // -->
+ // sel NarrowCond, (shuf X, poison, NarrowMask), (shuf Y, poison, NarrowMask)
Value *NarrowX = Builder.CreateShuffleVector(X, Shuf.getShuffleMask());
Value *NarrowY = Builder.CreateShuffleVector(Y, Shuf.getShuffleMask());
return SelectInst::Create(NarrowCond, NarrowX, NarrowY);
@@ -2462,7 +2463,7 @@ static Instruction *foldShuffleOfUnaryOps(ShuffleVectorInst &Shuf,
// Match 1-input (unary) shuffle.
// shuffle (fneg/fabs X), Mask --> fneg/fabs (shuffle X, Mask)
- if (S0->hasOneUse() && match(Shuf.getOperand(1), m_Undef())) {
+ if (S0->hasOneUse() && match(Shuf.getOperand(1), m_Poison())) {
Value *NewShuf = Builder.CreateShuffleVector(X, Shuf.getShuffleMask());
if (IsFNeg)
return UnaryOperator::CreateFNegFMF(NewShuf, S0);
@@ -2549,7 +2550,7 @@ static Instruction *foldCastShuffle(ShuffleVectorInst &Shuf,
/// Try to fold an extract subvector operation.
static Instruction *foldIdentityExtractShuffle(ShuffleVectorInst &Shuf) {
Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
- if (!Shuf.isIdentityWithExtract() || !match(Op1, m_Undef()))
+ if (!Shuf.isIdentityWithExtract() || !match(Op1, m_Poison()))
return nullptr;
// Check if we are extracting all bits of an inserted scalar:
@@ -2578,10 +2579,10 @@ static Instruction *foldIdentityExtractShuffle(ShuffleVectorInst &Shuf) {
// not allow arbitrary shuffle mask creation as a target-independent transform
// (because we can't guarantee that will lower efficiently).
//
- // If the extracting shuffle has an undef mask element, it transfers to the
+ // If the extracting shuffle has an poison mask element, it transfers to the
// new shuffle mask. Otherwise, copy the original mask element. Example:
- // shuf (shuf X, Y, <C0, C1, C2, undef, C4>), undef, <0, undef, 2, 3> -->
- // shuf X, Y, <C0, undef, C2, undef>
+ // shuf (shuf X, Y, <C0, C1, C2, poison, C4>), poison, <0, poison, 2, 3> -->
+ // shuf X, Y, <C0, poison, C2, poison>
unsigned NumElts = cast<FixedVectorType>(Shuf.getType())->getNumElements();
SmallVector<int, 16> NewMask(NumElts);
assert(NumElts < Mask.size() &&
@@ -2755,17 +2756,17 @@ static Instruction *foldIdentityPaddedShuffles(ShuffleVectorInst &Shuf) {
// BinOp's operands are the result of a first element splat can be simplified to
// splatting the first element of the result of the BinOp
Instruction *InstCombinerImpl::simplifyBinOpSplats(ShuffleVectorInst &SVI) {
- if (!match(SVI.getOperand(1), m_Undef()) ||
+ if (!match(SVI.getOperand(1), m_Poison()) ||
!match(SVI.getShuffleMask(), m_ZeroMask()) ||
!SVI.getOperand(0)->hasOneUse())
return nullptr;
Value *Op0 = SVI.getOperand(0);
Value *X, *Y;
- if (!match(Op0, m_BinOp(m_Shuffle(m_Value(X), m_Undef(), m_ZeroMask()),
+ if (!match(Op0, m_BinOp(m_Shuffle(m_Value(X), m_Poison(), m_ZeroMask()),
m_Value(Y))) &&
!match(Op0, m_BinOp(m_Value(X),
- m_Shuffle(m_Value(Y), m_Undef(), m_ZeroMask()))))
+ m_Shuffle(m_Value(Y), m_Poison(), m_ZeroMask()))))
return nullptr;
if (X->getType() != Y->getType())
return nullptr;
@@ -2901,7 +2902,7 @@ Instruction *InstCombinerImpl::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
if (Instruction *I = foldIdentityPaddedShuffles(SVI))
return I;
- if (match(RHS, m_Undef()) && canEvaluateShuffled(LHS, Mask)) {
+ if (match(RHS, m_Poison()) && canEvaluateShuffled(LHS, Mask)) {
Value *V = evaluateInDifferentElementOrder(LHS, Mask, Builder);
return replaceInstUsesWith(SVI, V);
}
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 6c25ff215c37..eb48157af009 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -5000,31 +5000,24 @@ bool InstCombinerImpl::run() {
BasicBlock *UserParent = nullptr;
unsigned NumUsers = 0;
- for (auto *U : I->users()) {
- if (U->isDroppable())
+ for (Use &U : I->uses()) {
+ User *User = U.getUser();
+ if (User->isDroppable())
continue;
if (NumUsers > MaxSinkNumUsers)
return std::nullopt;
- Instruction *UserInst = cast<Instruction>(U);
+ Instruction *UserInst = cast<Instruction>(User);
// Special handling for Phi nodes - get the block the use occurs in.
- if (PHINode *PN = dyn_cast<PHINode>(UserInst)) {
- for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
- if (PN->getIncomingValue(i) == I) {
- // Bail out if we have uses in different blocks. We don't do any
- // sophisticated analysis (i.e finding NearestCommonDominator of
- // these use blocks).
- if (UserParent && UserParent != PN->getIncomingBlock(i))
- return std::nullopt;
- UserParent = PN->getIncomingBlock(i);
- }
- }
- assert(UserParent && "expected to find user block!");
- } else {
- if (UserParent && UserParent != UserInst->getParent())
- return std::nullopt;
- UserParent = UserInst->getParent();
- }
+ BasicBlock *UserBB = UserInst->getParent();
+ if (PHINode *PN = dyn_cast<PHINode>(UserInst))
+ UserBB = PN->getIncomingBlock(U);
+ // Bail out if we have uses in different blocks. We don't do any
+ // sophisticated analysis (i.e finding NearestCommonDominator of these
+ // use blocks).
+ if (UserParent && UserParent != UserBB)
+ return std::nullopt;
+ UserParent = UserBB;
// Make sure these checks are done only once, naturally we do the checks
// the first time we get the userparent, this will save compile time.
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 8d39217992c7..2aa21759d56e 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1589,6 +1589,14 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
assert(!ShadowBase);
+ // Remove memory attributes that are about to become invalid.
+ // HWASan checks read from shadow, which invalidates memory(argmem: *)
+ // Short granule checks on function arguments read from the argument memory
+ // (last byte of the granule), which invalidates writeonly.
+ F.removeFnAttr(llvm::Attribute::Memory);
+ for (auto &A : F.args())
+ A.removeAttr(llvm::Attribute::WriteOnly);
+
BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
emitPrologue(EntryIRB,
diff --git a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
index 7e48c28176bd..70bfa469193b 100644
--- a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
@@ -554,6 +554,12 @@ static Decomposition decompose(Value *V,
V = Op0;
}
+ if (match(V, m_SExt(m_Value(Op0)))) {
+ V = Op0;
+ Preconditions.emplace_back(CmpInst::ICMP_SGE, Op0,
+ ConstantInt::get(Op0->getType(), 0));
+ }
+
Value *Op1;
ConstantInt *CI;
if (match(V, m_NUWAdd(m_Value(Op0), m_Value(Op1)))) {
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index ed4212d29cef..172cce2cfa38 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -2109,10 +2109,12 @@ struct DSEState {
if (auto *MemSetI = dyn_cast<MemSetInst>(UpperInst)) {
if (auto *SI = dyn_cast<StoreInst>(DefInst)) {
// MemSetInst must have a write location.
- MemoryLocation UpperLoc = *getLocForWrite(UpperInst);
+ auto UpperLoc = getLocForWrite(UpperInst);
+ if (!UpperLoc)
+ return false;
int64_t InstWriteOffset = 0;
int64_t DepWriteOffset = 0;
- auto OR = isOverwrite(UpperInst, DefInst, UpperLoc, *MaybeDefLoc,
+ auto OR = isOverwrite(UpperInst, DefInst, *UpperLoc, *MaybeDefLoc,
InstWriteOffset, DepWriteOffset);
Value *StoredByte = isBytewiseValue(SI->getValueOperand(), DL);
return StoredByte && StoredByte == MemSetI->getOperand(1) &&
diff --git a/llvm/lib/Transforms/Scalar/DivRemPairs.cpp b/llvm/lib/Transforms/Scalar/DivRemPairs.cpp
index 45f36a36b5dd..f7ada9fb8eb8 100644
--- a/llvm/lib/Transforms/Scalar/DivRemPairs.cpp
+++ b/llvm/lib/Transforms/Scalar/DivRemPairs.cpp
@@ -381,8 +381,7 @@ static bool optimizeDivRem(Function &F, const TargetTransformInfo &TTI,
// %mul = mul %div, 1 // %mul = undef
// %rem = sub %x, %mul // %rem = undef - undef = undef
// If X is not frozen, %rem becomes undef after transformation.
- // TODO: We need a undef-specific checking function in ValueTracking
- if (!isGuaranteedNotToBeUndefOrPoison(X, nullptr, DivInst, &DT)) {
+ if (!isGuaranteedNotToBeUndef(X, nullptr, DivInst, &DT)) {
auto *FrX =
new FreezeInst(X, X->getName() + ".frozen", DivInst->getIterator());
DivInst->setOperand(0, FrX);
@@ -390,7 +389,7 @@ static bool optimizeDivRem(Function &F, const TargetTransformInfo &TTI,
}
// Same for Y. If X = 1 and Y = (undef | 1), %rem in src is either 1 or 0,
// but %rem in tgt can be one of many integer values.
- if (!isGuaranteedNotToBeUndefOrPoison(Y, nullptr, DivInst, &DT)) {
+ if (!isGuaranteedNotToBeUndef(Y, nullptr, DivInst, &DT)) {
auto *FrY =
new FreezeInst(Y, Y->getName() + ".frozen", DivInst->getIterator());
DivInst->setOperand(1, FrY);
diff --git a/llvm/lib/Transforms/Scalar/GVNSink.cpp b/llvm/lib/Transforms/Scalar/GVNSink.cpp
index 7a183e4d3aa8..3dfa2dd9df27 100644
--- a/llvm/lib/Transforms/Scalar/GVNSink.cpp
+++ b/llvm/lib/Transforms/Scalar/GVNSink.cpp
@@ -132,7 +132,7 @@ public:
ActiveBlocks.remove(BB);
continue;
}
- Insts.push_back(BB->getTerminator()->getPrevNode());
+ Insts.push_back(BB->getTerminator()->getPrevNonDebugInstruction());
}
if (Insts.empty())
Fail = true;
@@ -168,7 +168,7 @@ public:
if (Inst == &Inst->getParent()->front())
ActiveBlocks.remove(Inst->getParent());
else
- NewInsts.push_back(Inst->getPrevNode());
+ NewInsts.push_back(Inst->getPrevNonDebugInstruction());
}
if (NewInsts.empty()) {
Fail = true;
@@ -883,7 +883,7 @@ void GVNSink::sinkLastInstruction(ArrayRef<BasicBlock *> Blocks,
BasicBlock *BBEnd) {
SmallVector<Instruction *, 4> Insts;
for (BasicBlock *BB : Blocks)
- Insts.push_back(BB->getTerminator()->getPrevNode());
+ Insts.push_back(BB->getTerminator()->getPrevNonDebugInstruction());
Instruction *I0 = Insts.front();
SmallVector<Value *, 4> NewOperands;
@@ -921,8 +921,10 @@ void GVNSink::sinkLastInstruction(ArrayRef<BasicBlock *> Blocks,
}
for (auto *I : Insts)
- if (I != I0)
+ if (I != I0) {
I->replaceAllUsesWith(I0);
+ I0->applyMergedLocation(I0->getDebugLoc(), I->getDebugLoc());
+ }
foldPointlessPHINodes(BBEnd);
// Finally nuke all instructions apart from the common instruction.
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 7ef5dceffec0..8fe3780bcf1b 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -983,10 +983,8 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
append_range(srcUseList, U->users());
continue;
}
- if (const auto *G = dyn_cast<GetElementPtrInst>(U)) {
- if (!G->hasAllZeroIndices())
- return false;
-
+ if (const auto *G = dyn_cast<GetElementPtrInst>(U);
+ G && G->hasAllZeroIndices()) {
append_range(srcUseList, U->users());
continue;
}
@@ -994,8 +992,10 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
if (IT->isLifetimeStartOrEnd())
continue;
- if (U != C && U != cpyLoad)
+ if (U != C && U != cpyLoad) {
+ LLVM_DEBUG(dbgs() << "Call slot: Source accessed by " << *U << "\n");
return false;
+ }
}
// Check whether src is captured by the called function, in which case there
diff --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
index 308622615332..224cd24915fa 100644
--- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
@@ -519,6 +519,7 @@ Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr,
default:
llvm_unreachable("Unexpected instruction.");
}
+ NewI->setDebugLoc(I->getDebugLoc());
NewI->takeName(I);
return NewI;
}
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 096c6d1b1fad..756daf5bb41f 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -630,7 +630,7 @@ public:
int OldSize = Slices.size();
Slices.append(NewSlices.begin(), NewSlices.end());
auto SliceI = Slices.begin() + OldSize;
- llvm::sort(SliceI, Slices.end());
+ std::stable_sort(SliceI, Slices.end());
std::inplace_merge(Slices.begin(), SliceI, Slices.end());
}
@@ -5122,7 +5122,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
}
if (!IsSorted)
- llvm::sort(AS);
+ llvm::stable_sort(AS);
/// Describes the allocas introduced by rewritePartition in order to migrate
/// the debug info.
diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index 7ac1f43b7b6a..471c7ca4d735 100644
--- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -972,14 +972,9 @@ SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
bool SeparateConstOffsetFromGEP::reorderGEP(GetElementPtrInst *GEP,
TargetTransformInfo &TTI) {
- if (GEP->getNumIndices() != 1)
- return false;
-
auto PtrGEP = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
if (!PtrGEP)
return false;
- if (PtrGEP->getNumIndices() != 1)
- return false;
bool NestedNeedsExtraction;
int64_t NestedByteOffset =
@@ -997,14 +992,12 @@ bool SeparateConstOffsetFromGEP::reorderGEP(GetElementPtrInst *GEP,
bool PtrGEPInBounds = PtrGEP->isInBounds();
bool IsChainInBounds = GEPInBounds && PtrGEPInBounds;
if (IsChainInBounds) {
- auto GEPIdx = GEP->indices().begin();
- auto KnownGEPIdx = computeKnownBits(GEPIdx->get(), *DL);
- IsChainInBounds &= KnownGEPIdx.isNonNegative();
- if (IsChainInBounds) {
- auto PtrGEPIdx = PtrGEP->indices().begin();
- auto KnownPtrGEPIdx = computeKnownBits(PtrGEPIdx->get(), *DL);
- IsChainInBounds &= KnownPtrGEPIdx.isNonNegative();
- }
+ auto IsKnownNonNegative = [this](Value *V) {
+ return isKnownNonNegative(V, *DL);
+ };
+ IsChainInBounds &= all_of(GEP->indices(), IsKnownNonNegative);
+ if (IsChainInBounds)
+ IsChainInBounds &= all_of(PtrGEP->indices(), IsKnownNonNegative);
}
IRBuilder<> Builder(GEP);
@@ -1109,8 +1102,9 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
//
// TODO(jingyue): do some range analysis to keep as many inbounds as
// possible. GEPs with inbounds are more friendly to alias analysis.
+ // TODO(gep_nowrap): Preserve nuw at least.
bool GEPWasInBounds = GEP->isInBounds();
- GEP->setIsInBounds(false);
+ GEP->setNoWrapFlags(GEPNoWrapFlags::none());
// Lowers a GEP to either GEPs with a single index or arithmetic operations.
if (LowerGEP) {
@@ -1384,8 +1378,9 @@ void SeparateConstOffsetFromGEP::swapGEPOperand(GetElementPtrInst *First,
uint64_t ObjectSize;
if (!getObjectSize(NewBase, ObjectSize, DAL, TLI) ||
Offset.ugt(ObjectSize)) {
- First->setIsInBounds(false);
- Second->setIsInBounds(false);
+ // TODO(gep_nowrap): Make flag preservation more precise.
+ First->setNoWrapFlags(GEPNoWrapFlags::none());
+ Second->setNoWrapFlags(GEPNoWrapFlags::none());
} else
First->setIsInBounds(true);
}
diff --git a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
index 9ca9aaf9ee9d..dda80d419999 100644
--- a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
+++ b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
@@ -12,9 +12,11 @@
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/CallPromotionUtils.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/TypeMetadataUtils.h"
#include "llvm/IR/AttributeMask.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -188,9 +190,9 @@ static void createRetBitCast(CallBase &CB, Type *RetTy, CastInst **RetBitCast) {
/// Predicate and clone the given call site.
///
/// This function creates an if-then-else structure at the location of the call
-/// site. The "if" condition is specified by `Cond`. The original call site is
-/// moved into the "else" block, and a clone of the call site is placed in the
-/// "then" block. The cloned instruction is returned.
+/// site. The "if" condition is specified by `Cond`.
+/// The original call site is moved into the "else" block, and a clone of the
+/// call site is placed in the "then" block. The cloned instruction is returned.
///
/// For example, the call instruction below:
///
@@ -518,7 +520,8 @@ CallBase &llvm::promoteCall(CallBase &CB, Function *Callee,
Type *FormalTy = CalleeType->getParamType(ArgNo);
Type *ActualTy = Arg->getType();
if (FormalTy != ActualTy) {
- auto *Cast = CastInst::CreateBitOrPointerCast(Arg, FormalTy, "", CB.getIterator());
+ auto *Cast =
+ CastInst::CreateBitOrPointerCast(Arg, FormalTy, "", CB.getIterator());
CB.setArgOperand(ArgNo, Cast);
// Remove any incompatible attributes for the argument.
@@ -568,6 +571,27 @@ CallBase &llvm::promoteCallWithIfThenElse(CallBase &CB, Function *Callee,
return promoteCall(NewInst, Callee);
}
+CallBase &llvm::promoteCallWithVTableCmp(CallBase &CB, Instruction *VPtr,
+ Function *Callee,
+ ArrayRef<Constant *> AddressPoints,
+ MDNode *BranchWeights) {
+ assert(!AddressPoints.empty() && "Caller should guarantee");
+ IRBuilder<> Builder(&CB);
+ SmallVector<Value *, 2> ICmps;
+ for (auto &AddressPoint : AddressPoints)
+ ICmps.push_back(Builder.CreateICmpEQ(VPtr, AddressPoint));
+
+ // TODO: Perform tree height reduction if the number of ICmps is high.
+ Value *Cond = Builder.CreateOr(ICmps);
+
+ // Version the indirect call site. If Cond is true, 'NewInst' will be
+ // executed, otherwise the original call site will be executed.
+ CallBase &NewInst = versionCallSiteWithCond(CB, Cond, BranchWeights);
+
+ // Promote 'NewInst' so that it directly calls the desired function.
+ return promoteCall(NewInst, Callee);
+}
+
bool llvm::tryPromoteCall(CallBase &CB) {
assert(!CB.getCalledFunction());
Module *M = CB.getCaller()->getParent();
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp
index 981183682b8b..1fef8bc46121 100644
--- a/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -825,13 +825,6 @@ void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
if (!NewI)
continue;
- // Skip over non-intrinsic callsites, we don't want to remove any nodes
- // from the CGSCC.
- CallBase *CB = dyn_cast<CallBase>(NewI);
- if (CB && CB->getCalledFunction() &&
- !CB->getCalledFunction()->isIntrinsic())
- continue;
-
if (Value *V = simplifyInstruction(NewI, DL)) {
NewI->replaceAllUsesWith(V);
diff --git a/llvm/lib/Transforms/Utils/FunctionComparator.cpp b/llvm/lib/Transforms/Utils/FunctionComparator.cpp
index d95248c84b86..4c80bfa1bf02 100644
--- a/llvm/lib/Transforms/Utils/FunctionComparator.cpp
+++ b/llvm/lib/Transforms/Utils/FunctionComparator.cpp
@@ -436,7 +436,8 @@ int FunctionComparator::cmpConstants(const Constant *L,
if (int Res = cmpTypes(GEPL->getSourceElementType(),
GEPR->getSourceElementType()))
return Res;
- if (int Res = cmpNumbers(GEPL->isInBounds(), GEPR->isInBounds()))
+ if (int Res = cmpNumbers(GEPL->getNoWrapFlags().getRaw(),
+ GEPR->getNoWrapFlags().getRaw()))
return Res;
std::optional<ConstantRange> InRangeL = GEPL->getInRange();
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 82daaedaa0e8..7b846f2d2d72 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -1389,6 +1389,12 @@ static void AddParamAndFnBasicAttributes(const CallBase &CB,
if (!Arg)
continue;
+ if (AL.hasParamAttr(I, Attribute::ByVal))
+ // It's unsound to propagate memory attributes to byval arguments.
+ // Even if CalledFunction doesn't e.g. write to the argument,
+ // the call to NewInnerCB may write to its by-value copy.
+ continue;
+
unsigned ArgNo = Arg->getArgNo();
// If so, propagate its access attributes.
AL = AL.addParamAttributes(Context, I, ValidParamAttrs[ArgNo]);
diff --git a/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index f5921e5ccb09..f4ef6d02abf0 100644
--- a/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -208,7 +208,7 @@ BasicBlock *NewLeafBlock(CaseRange &Leaf, Value *Val, ConstantInt *LowerBound,
PHINode *PN = cast<PHINode>(I);
// Remove all but one incoming entries from the cluster
APInt Range = Leaf.High->getValue() - Leaf.Low->getValue();
- for (APInt j(Range.getBitWidth(), 0, true); j.slt(Range); ++j) {
+ for (APInt j(Range.getBitWidth(), 0, false); j.ult(Range); ++j) {
PN->removeIncomingValue(OrigBlock);
}
diff --git a/llvm/lib/Transforms/Utils/SCCPSolver.cpp b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
index ce40e8b31b76..4f36bac11e34 100644
--- a/llvm/lib/Transforms/Utils/SCCPSolver.cpp
+++ b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
@@ -43,7 +43,7 @@ static ValueLatticeElement::MergeOptions getMaxWidenStepsOpts() {
}
static ConstantRange getConstantRange(const ValueLatticeElement &LV, Type *Ty,
- bool UndefAllowed = true) {
+ bool UndefAllowed) {
assert(Ty->isIntOrIntVectorTy() && "Should be int or int vector");
if (LV.isConstantRange(UndefAllowed))
return LV.getConstantRange();
@@ -1297,7 +1297,8 @@ void SCCPInstVisitor::visitCastInst(CastInst &I) {
if (I.getDestTy()->isIntegerTy() && I.getSrcTy()->isIntOrIntVectorTy()) {
auto &LV = getValueState(&I);
- ConstantRange OpRange = getConstantRange(OpSt, I.getSrcTy());
+ ConstantRange OpRange =
+ getConstantRange(OpSt, I.getSrcTy(), /*UndefAllowed=*/false);
Type *DestTy = I.getDestTy();
// Vectors where all elements have the same known constant range are treated
@@ -1329,8 +1330,8 @@ void SCCPInstVisitor::handleExtractOfWithOverflow(ExtractValueInst &EVI,
return; // Wait to resolve.
Type *Ty = LHS->getType();
- ConstantRange LR = getConstantRange(L, Ty);
- ConstantRange RR = getConstantRange(R, Ty);
+ ConstantRange LR = getConstantRange(L, Ty, /*UndefAllowed=*/false);
+ ConstantRange RR = getConstantRange(R, Ty, /*UndefAllowed=*/false);
if (Idx == 0) {
ConstantRange Res = LR.binaryOp(WO->getBinaryOp(), RR);
mergeInValue(&EVI, ValueLatticeElement::getRange(Res));
@@ -1534,8 +1535,10 @@ void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
return markOverdefined(&I);
// Try to simplify to a constant range.
- ConstantRange A = getConstantRange(V1State, I.getType());
- ConstantRange B = getConstantRange(V2State, I.getType());
+ ConstantRange A =
+ getConstantRange(V1State, I.getType(), /*UndefAllowed=*/false);
+ ConstantRange B =
+ getConstantRange(V2State, I.getType(), /*UndefAllowed=*/false);
auto *BO = cast<BinaryOperator>(&I);
ConstantRange R = ConstantRange::getEmpty(I.getType()->getScalarSizeInBits());
@@ -1818,7 +1821,8 @@ void SCCPInstVisitor::handleCallResult(CallBase &CB) {
// Combine range info for the original value with the new range from the
// condition.
- auto CopyOfCR = getConstantRange(CopyOfVal, CopyOf->getType());
+ auto CopyOfCR = getConstantRange(CopyOfVal, CopyOf->getType(),
+ /*UndefAllowed=*/true);
auto NewCR = ImposedCR.intersectWith(CopyOfCR);
// If the existing information is != x, do not use the information from
// a chained predicate, as the != x information is more likely to be
@@ -1863,7 +1867,8 @@ void SCCPInstVisitor::handleCallResult(CallBase &CB) {
const ValueLatticeElement &State = getValueState(Op);
if (State.isUnknownOrUndef())
return;
- OpRanges.push_back(getConstantRange(State, Op->getType()));
+ OpRanges.push_back(
+ getConstantRange(State, Op->getType(), /*UndefAllowed=*/false));
}
ConstantRange Result =
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 93701b2a7791..fe6ec8819ff9 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -5501,11 +5501,13 @@ static bool CasesAreContiguous(SmallVectorImpl<ConstantInt *> &Cases) {
}
static void createUnreachableSwitchDefault(SwitchInst *Switch,
- DomTreeUpdater *DTU) {
+ DomTreeUpdater *DTU,
+ bool RemoveOrigDefaultBlock = true) {
LLVM_DEBUG(dbgs() << "SimplifyCFG: switch default is dead.\n");
auto *BB = Switch->getParent();
auto *OrigDefaultBlock = Switch->getDefaultDest();
- OrigDefaultBlock->removePredecessor(BB);
+ if (RemoveOrigDefaultBlock)
+ OrigDefaultBlock->removePredecessor(BB);
BasicBlock *NewDefaultBlock = BasicBlock::Create(
BB->getContext(), BB->getName() + ".unreachabledefault", BB->getParent(),
OrigDefaultBlock);
@@ -5514,7 +5516,8 @@ static void createUnreachableSwitchDefault(SwitchInst *Switch,
if (DTU) {
SmallVector<DominatorTree::UpdateType, 2> Updates;
Updates.push_back({DominatorTree::Insert, BB, &*NewDefaultBlock});
- if (!is_contained(successors(BB), OrigDefaultBlock))
+ if (RemoveOrigDefaultBlock &&
+ !is_contained(successors(BB), OrigDefaultBlock))
Updates.push_back({DominatorTree::Delete, BB, &*OrigDefaultBlock});
DTU->applyUpdates(Updates);
}
@@ -5696,10 +5699,33 @@ static bool eliminateDeadSwitchCases(SwitchInst *SI, DomTreeUpdater *DTU,
Known.getBitWidth() - (Known.Zero | Known.One).popcount();
assert(NumUnknownBits <= Known.getBitWidth());
if (HasDefault && DeadCases.empty() &&
- NumUnknownBits < 64 /* avoid overflow */ &&
- SI->getNumCases() == (1ULL << NumUnknownBits)) {
- createUnreachableSwitchDefault(SI, DTU);
- return true;
+ NumUnknownBits < 64 /* avoid overflow */) {
+ uint64_t AllNumCases = 1ULL << NumUnknownBits;
+ if (SI->getNumCases() == AllNumCases) {
+ createUnreachableSwitchDefault(SI, DTU);
+ return true;
+ }
+ // When only one case value is missing, replace default with that case.
+ // Eliminating the default branch will provide more opportunities for
+ // optimization, such as lookup tables.
+ if (SI->getNumCases() == AllNumCases - 1) {
+ assert(NumUnknownBits > 1 && "Should be canonicalized to a branch");
+ IntegerType *CondTy = cast<IntegerType>(Cond->getType());
+ if (CondTy->getIntegerBitWidth() > 64 ||
+ !DL.fitsInLegalInteger(CondTy->getIntegerBitWidth()))
+ return false;
+
+ uint64_t MissingCaseVal = 0;
+ for (const auto &Case : SI->cases())
+ MissingCaseVal ^= Case.getCaseValue()->getValue().getLimitedValue();
+ auto *MissingCase =
+ cast<ConstantInt>(ConstantInt::get(Cond->getType(), MissingCaseVal));
+ SwitchInstProfUpdateWrapper SIW(*SI);
+ SIW.addCase(MissingCase, SI->getDefaultDest(), SIW.getSuccessorWeight(0));
+ createUnreachableSwitchDefault(SI, DTU, /*RemoveOrigDefaultBlock*/ false);
+ SIW.setSuccessorWeight(0, 0);
+ return true;
+ }
}
if (DeadCases.empty())
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index c9567b740026..eb1224abf00e 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -2087,15 +2087,16 @@ Value *LibCallSimplifier::replacePowWithExp(CallInst *Pow, IRBuilderBase &B) {
AttributeList NoAttrs; // Attributes are only meaningful on the original call
+ const bool UseIntrinsic = Pow->doesNotAccessMemory();
+
// pow(2.0, itofp(x)) -> ldexp(1.0, x)
- // TODO: This does not work for vectors because there is no ldexp intrinsic.
- if (!Ty->isVectorTy() && match(Base, m_SpecificFP(2.0)) &&
+ if ((UseIntrinsic || !Ty->isVectorTy()) && match(Base, m_SpecificFP(2.0)) &&
(isa<SIToFPInst>(Expo) || isa<UIToFPInst>(Expo)) &&
hasFloatFn(M, TLI, Ty, LibFunc_ldexp, LibFunc_ldexpf, LibFunc_ldexpl)) {
if (Value *ExpoI = getIntToFPVal(Expo, B, TLI->getIntSize())) {
Constant *One = ConstantFP::get(Ty, 1.0);
- if (Pow->doesNotAccessMemory()) {
+ if (UseIntrinsic) {
return copyFlags(*Pow, B.CreateIntrinsic(Intrinsic::ldexp,
{Ty, ExpoI->getType()},
{One, ExpoI}, Pow, "exp2"));
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index fd652cb78954..e20073d8de69 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -3384,18 +3384,6 @@ LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
TargetTransformInfo::TCK_RecipThroughput);
}
-static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
- auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
- auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
- return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
-}
-
-static Type *largestIntegerVectorType(Type *T1, Type *T2) {
- auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
- auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
- return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
-}
-
void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State,
VPlan &Plan) {
// Fix widened non-induction PHIs by setting up the PHI operands.
@@ -5824,6 +5812,10 @@ void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
// Remember that BB will remain after vectorization.
PredicatedBBsAfterVectorization[VF].insert(BB);
+ for (auto *Pred : predecessors(BB)) {
+ if (Pred->getSingleSuccessor() == BB)
+ PredicatedBBsAfterVectorization[VF].insert(Pred);
+ }
}
}
}
@@ -7116,26 +7108,12 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
return *RedCost;
Type *SrcScalarTy = I->getOperand(0)->getType();
+ Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
+ if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
+ SrcScalarTy =
+ IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
Type *SrcVecTy =
VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
- if (canTruncateToMinimalBitwidth(I, VF)) {
- // This cast is going to be shrunk. This may remove the cast or it might
- // turn it into slightly different cast. For example, if MinBW == 16,
- // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
- //
- // Calculate the modified src and dest types.
- Type *MinVecTy = VectorTy;
- if (Opcode == Instruction::Trunc) {
- SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
- VectorTy =
- largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
- } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
- // Leave SrcVecTy unchanged - we only shrink the destination element
- // type.
- VectorTy =
- smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
- }
- }
return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
}
@@ -8153,8 +8131,7 @@ VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
static VPWidenIntOrFpInductionRecipe *
createWidenInductionRecipes(PHINode *Phi, Instruction *PhiOrTrunc,
VPValue *Start, const InductionDescriptor &IndDesc,
- VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop,
- VFRange &Range) {
+ VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop) {
assert(IndDesc.getStartValue() ==
Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
@@ -8176,7 +8153,7 @@ VPHeaderPHIRecipe *VPRecipeBuilder::tryToOptimizeInductionPHI(
// produces its scalar and vector values.
if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
return createWidenInductionRecipes(Phi, Phi, Operands[0], *II, Plan,
- *PSE.getSE(), *OrigLoop, Range);
+ *PSE.getSE(), *OrigLoop);
// Check if this is pointer induction. If so, build the recipe for it.
if (auto *II = Legal->getPointerInductionDescriptor(Phi)) {
@@ -8216,7 +8193,7 @@ VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
VPValue *Start = Plan.getOrAddLiveIn(II.getStartValue());
return createWidenInductionRecipes(Phi, I, Start, II, Plan, *PSE.getSE(),
- *OrigLoop, Range);
+ *OrigLoop);
}
return nullptr;
}
@@ -8557,8 +8534,10 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
*Plan, CM.getMinimalBitwidths(), PSE.getSE()->getContext());
VPlanTransforms::optimize(*Plan, *PSE.getSE());
// TODO: try to put it close to addActiveLaneMask().
- if (CM.foldTailWithEVL())
- VPlanTransforms::addExplicitVectorLength(*Plan);
+ // Discard the plan if it is not EVL-compatible
+ if (CM.foldTailWithEVL() &&
+ !VPlanTransforms::tryAddExplicitVectorLength(*Plan))
+ break;
assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
VPlans.push_back(std::move(Plan));
}
@@ -10390,15 +10369,9 @@ PreservedAnalyses LoopVectorizePass::run(Function &F,
RemoveRedundantDbgInstrs(&BB);
}
- // We currently do not preserve dominator analyses with outer loop
- // vectorization. Until this is addressed, mark these analyses as preserved
- // only for non-VPlan-native path.
- // TODO: Preserve Dominator analysis for VPlan-native path.
- if (!EnableVPlanNativePath) {
- PA.preserve<DominatorTreeAnalysis>();
- PA.preserve<ScalarEvolutionAnalysis>();
- }
PA.preserve<LoopAnalysis>();
+ PA.preserve<DominatorTreeAnalysis>();
+ PA.preserve<ScalarEvolutionAnalysis>();
if (Result.MadeCFGChange) {
// Making CFG changes likely means a loop got vectorized. Indicate that
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index d21b5e1cc041..f044a8cdd2f3 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -255,6 +255,21 @@ static bool isVectorLikeInstWithConstOps(Value *V) {
return isConstant(I->getOperand(2));
}
+/// Returns power-of-2 number of elements in a single register (part), given the
+/// total number of elements \p Size and number of registers (parts) \p
+/// NumParts.
+static unsigned getPartNumElems(unsigned Size, unsigned NumParts) {
+ return PowerOf2Ceil(divideCeil(Size, NumParts));
+}
+
+/// Returns correct remaining number of elements, considering total amount \p
+/// Size, (power-of-2 number) of elements in a single register \p PartNumElems
+/// and current register (part) \p Part.
+static unsigned getNumElems(unsigned Size, unsigned PartNumElems,
+ unsigned Part) {
+ return std::min<unsigned>(PartNumElems, Size - Part * PartNumElems);
+}
+
#if !defined(NDEBUG)
/// Print a short descriptor of the instruction bundle suitable for debug output.
static std::string shortBundleName(ArrayRef<Value *> VL) {
@@ -502,6 +517,15 @@ isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements();
Value *Vec1 = nullptr;
Value *Vec2 = nullptr;
+ bool HasNonUndefVec = any_of(VL, [](Value *V) {
+ auto *EE = dyn_cast<ExtractElementInst>(V);
+ if (!EE)
+ return false;
+ Value *Vec = EE->getVectorOperand();
+ if (isa<UndefValue>(Vec))
+ return false;
+ return isGuaranteedNotToBePoison(Vec);
+ });
enum ShuffleMode { Unknown, Select, Permute };
ShuffleMode CommonShuffleMode = Unknown;
Mask.assign(VL.size(), PoisonMaskElem);
@@ -514,21 +538,27 @@ isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
return std::nullopt;
auto *Vec = EI->getVectorOperand();
// We can extractelement from undef or poison vector.
- if (isUndefVector(Vec).all())
+ if (isUndefVector</*isPoisonOnly=*/true>(Vec).all())
continue;
// All vector operands must have the same number of vector elements.
- if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size)
- return std::nullopt;
- if (isa<UndefValue>(EI->getIndexOperand()))
- continue;
- auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
- if (!Idx)
- return std::nullopt;
- // Undefined behavior if Idx is negative or >= Size.
- if (Idx->getValue().uge(Size))
+ if (isa<UndefValue>(Vec)) {
+ Mask[I] = I;
+ } else {
+ if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size)
+ return std::nullopt;
+ if (isa<UndefValue>(EI->getIndexOperand()))
+ continue;
+ auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
+ if (!Idx)
+ return std::nullopt;
+ // Undefined behavior if Idx is negative or >= Size.
+ if (Idx->getValue().uge(Size))
+ continue;
+ unsigned IntIdx = Idx->getValue().getZExtValue();
+ Mask[I] = IntIdx;
+ }
+ if (isUndefVector(Vec).all() && HasNonUndefVec)
continue;
- unsigned IntIdx = Idx->getValue().getZExtValue();
- Mask[I] = IntIdx;
// For correct shuffling we have to have at most 2 different vector operands
// in all extractelement instructions.
if (!Vec1 || Vec1 == Vec) {
@@ -543,7 +573,7 @@ isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
continue;
// If the extract index is not the same as the operation number, it is a
// permutation.
- if (IntIdx != I) {
+ if (Mask[I] % Size != I) {
CommonShuffleMode = Permute;
continue;
}
@@ -4066,7 +4096,8 @@ BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
const int VF = GetVF(I);
if (VF == 0)
continue;
- MutableArrayRef<unsigned> Slice = CurrentOrder.slice(I * PartSz, PartSz);
+ unsigned Limit = getNumElems(CurrentOrder.size(), PartSz, I);
+ MutableArrayRef<unsigned> Slice = CurrentOrder.slice(I * PartSz, Limit);
// Shuffle of at least 2 vectors - ignore.
if (any_of(Slice, [&](int I) { return I != NumScalars; })) {
std::fill(Slice.begin(), Slice.end(), NumScalars);
@@ -4076,7 +4107,7 @@ BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
// Try to include as much elements from the mask as possible.
int FirstMin = INT_MAX;
int SecondVecFound = false;
- for (int K : seq<int>(0, PartSz)) {
+ for (int K : seq<int>(Limit)) {
int Idx = Mask[I * PartSz + K];
if (Idx == PoisonMaskElem) {
Value *V = GatheredScalars[I * PartSz + K];
@@ -4101,7 +4132,7 @@ BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
ShuffledSubMasks.set(I);
continue;
}
- for (int K : seq<int>(0, PartSz)) {
+ for (int K : seq<int>(Limit)) {
int Idx = Mask[I * PartSz + K];
if (Idx == PoisonMaskElem)
continue;
@@ -4124,14 +4155,15 @@ BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
}
}
};
- int PartSz = NumScalars / NumParts;
+ int PartSz = getPartNumElems(NumScalars, NumParts);
if (!ExtractShuffles.empty())
TransformMaskToOrder(
CurrentOrder, ExtractMask, PartSz, NumParts, [&](unsigned I) {
if (!ExtractShuffles[I])
return 0U;
unsigned VF = 0;
- for (unsigned Idx : seq<unsigned>(0, PartSz)) {
+ unsigned Sz = getNumElems(TE.getVectorFactor(), PartSz, I);
+ for (unsigned Idx : seq<unsigned>(Sz)) {
int K = I * PartSz + Idx;
if (ExtractMask[K] == PoisonMaskElem)
continue;
@@ -4762,12 +4794,13 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
::addMask(ReorderMask, TE.ReuseShuffleIndices);
unsigned VF = ReorderMask.size();
OrdersType ResOrder(VF, VF);
- unsigned NumParts = VF / Sz;
+ unsigned NumParts = divideCeil(VF, Sz);
SmallBitVector UsedVals(NumParts);
for (unsigned I = 0; I < VF; I += Sz) {
int Val = PoisonMaskElem;
unsigned UndefCnt = 0;
- if (any_of(ArrayRef(ReorderMask).slice(I, Sz),
+ unsigned Limit = std::min(Sz, VF - I);
+ if (any_of(ArrayRef(ReorderMask).slice(I, Limit),
[&](int Idx) {
if (Val == PoisonMaskElem && Idx != PoisonMaskElem)
Val = Idx;
@@ -6861,23 +6894,16 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
case Instruction::ExtractElement: {
if (CurrentOrder.empty()) {
LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
- newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
- ReuseShuffleIndices);
- // This is a special case, as it does not gather, but at the same time
- // we are not extending buildTree_rec() towards the operands.
- ValueList Op0;
- Op0.assign(VL.size(), VL0->getOperand(0));
- VectorizableTree.back()->setOperand(0, Op0);
- return;
+ } else {
+ LLVM_DEBUG({
+ dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
+ "with order";
+ for (unsigned Idx : CurrentOrder)
+ dbgs() << " " << Idx;
+ dbgs() << "\n";
+ });
+ fixupOrderingIndices(CurrentOrder);
}
- LLVM_DEBUG({
- dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
- "with order";
- for (unsigned Idx : CurrentOrder)
- dbgs() << " " << Idx;
- dbgs() << "\n";
- });
- fixupOrderingIndices(CurrentOrder);
// Insert new order with initial value 0, if it does not exist,
// otherwise return the iterator to the existing one.
newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
@@ -6916,15 +6942,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
std::nullopt, CurrentOrder);
LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n");
- constexpr int NumOps = 2;
- ValueList VectorOperands[NumOps];
- for (int I = 0; I < NumOps; ++I) {
- for (Value *V : VL)
- VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I));
-
- TE->setOperand(I, VectorOperands[I]);
- }
- buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1});
+ TE->setOperandsInOrder();
+ buildTree_rec(TE->getOperand(1), Depth + 1, {TE, 1});
return;
}
case Instruction::Load: {
@@ -6938,28 +6957,18 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
fixupOrderingIndices(CurrentOrder);
switch (State) {
case TreeEntry::Vectorize:
- if (CurrentOrder.empty()) {
- // Original loads are consecutive and does not require reordering.
- TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
- ReuseShuffleIndices);
+ TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
+ ReuseShuffleIndices, CurrentOrder);
+ if (CurrentOrder.empty())
LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
- } else {
- // Need to reorder.
- TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
- ReuseShuffleIndices, CurrentOrder);
+ else
LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
- }
TE->setOperandsInOrder();
break;
case TreeEntry::StridedVectorize:
// Vectorizing non-consecutive loads with `llvm.masked.gather`.
- if (CurrentOrder.empty()) {
- TE = newTreeEntry(VL, TreeEntry::StridedVectorize, Bundle, S,
- UserTreeIdx, ReuseShuffleIndices);
- } else {
- TE = newTreeEntry(VL, TreeEntry::StridedVectorize, Bundle, S,
- UserTreeIdx, ReuseShuffleIndices, CurrentOrder);
- }
+ TE = newTreeEntry(VL, TreeEntry::StridedVectorize, Bundle, S,
+ UserTreeIdx, ReuseShuffleIndices, CurrentOrder);
TE->setOperandsInOrder();
LLVM_DEBUG(dbgs() << "SLP: added a vector of strided loads.\n");
break;
@@ -7024,14 +7033,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
TE->setOperandsInOrder();
- for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) {
- ValueList Operands;
- // Prepare the operand vector.
- for (Value *V : VL)
- Operands.push_back(cast<Instruction>(V)->getOperand(I));
-
- buildTree_rec(Operands, Depth + 1, {TE, I});
- }
+ for (unsigned I : seq<unsigned>(0, VL0->getNumOperands()))
+ buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I});
return;
}
case Instruction::ICmp:
@@ -7116,14 +7119,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
}
TE->setOperandsInOrder();
- for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) {
- ValueList Operands;
- // Prepare the operand vector.
- for (Value *V : VL)
- Operands.push_back(cast<Instruction>(V)->getOperand(I));
-
- buildTree_rec(Operands, Depth + 1, {TE, I});
- }
+ for (unsigned I : seq<unsigned>(0, VL0->getNumOperands()))
+ buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I});
return;
}
case Instruction::GetElementPtr: {
@@ -7182,30 +7179,17 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
return;
}
case Instruction::Store: {
- // Check if the stores are consecutive or if we need to swizzle them.
- ValueList Operands(VL.size());
- auto *OIter = Operands.begin();
- for (Value *V : VL) {
- auto *SI = cast<StoreInst>(V);
- *OIter = SI->getValueOperand();
- ++OIter;
- }
- // Check that the sorted pointer operands are consecutive.
- if (CurrentOrder.empty()) {
- // Original stores are consecutive and does not require reordering.
- TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
- ReuseShuffleIndices);
- TE->setOperandsInOrder();
- buildTree_rec(Operands, Depth + 1, {TE, 0});
- LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
- } else {
+ bool Consecutive = CurrentOrder.empty();
+ if (!Consecutive)
fixupOrderingIndices(CurrentOrder);
- TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
- ReuseShuffleIndices, CurrentOrder);
- TE->setOperandsInOrder();
- buildTree_rec(Operands, Depth + 1, {TE, 0});
+ TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
+ ReuseShuffleIndices, CurrentOrder);
+ TE->setOperandsInOrder();
+ buildTree_rec(TE->getOperand(0), Depth + 1, {TE, 0});
+ if (Consecutive)
+ LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
+ else
LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
- }
return;
}
case Instruction::Call: {
@@ -7305,14 +7289,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
}
TE->setOperandsInOrder();
- for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) {
- ValueList Operands;
- // Prepare the operand vector.
- for (Value *V : VL)
- Operands.push_back(cast<Instruction>(V)->getOperand(I));
-
- buildTree_rec(Operands, Depth + 1, {TE, I});
- }
+ for (unsigned I : seq<unsigned>(0, VL0->getNumOperands()))
+ buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I});
return;
}
default:
@@ -8004,6 +7982,10 @@ void BoUpSLP::transformNodes() {
TreeEntry &E = *TE.get();
switch (E.getOpcode()) {
case Instruction::Load: {
+ // No need to reorder masked gather loads, just reorder the scalar
+ // operands.
+ if (E.State != TreeEntry::Vectorize)
+ break;
Type *ScalarTy = E.getMainOp()->getType();
auto *VecTy = FixedVectorType::get(ScalarTy, E.Scalars.size());
Align CommonAlignment = computeCommonAlignment<LoadInst>(E.Scalars);
@@ -8317,19 +8299,18 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
return Sz;
return std::max(Sz, VecTy->getNumElements());
});
- unsigned NumSrcRegs =
- TTI.getNumberOfParts(FixedVectorType::get(ScalarTy, NumElts));
- if (NumSrcRegs == 0)
- NumSrcRegs = 1;
// FIXME: this must be moved to TTI for better estimation.
- unsigned EltsPerVector = PowerOf2Ceil(std::max(
- divideCeil(VL.size(), NumParts), divideCeil(NumElts, NumSrcRegs)));
+ unsigned EltsPerVector = getPartNumElems(VL.size(), NumParts);
auto CheckPerRegistersShuffle =
- [&](MutableArrayRef<int> Mask) -> std::optional<TTI::ShuffleKind> {
+ [&](MutableArrayRef<int> Mask,
+ SmallVector<int> Indices) -> std::optional<TTI::ShuffleKind> {
+ if (NumElts <= EltsPerVector)
+ return std::nullopt;
DenseSet<int> RegIndices;
// Check that if trying to permute same single/2 input vectors.
TTI::ShuffleKind ShuffleKind = TTI::SK_PermuteSingleSrc;
int FirstRegId = -1;
+ Indices.assign(1, -1);
for (int &I : Mask) {
if (I == PoisonMaskElem)
continue;
@@ -8339,8 +8320,15 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
RegIndices.insert(RegId);
if (RegIndices.size() > 2)
return std::nullopt;
- if (RegIndices.size() == 2)
+ if (RegIndices.size() == 2) {
ShuffleKind = TTI::SK_PermuteTwoSrc;
+ if (Indices.size() == 1)
+ Indices.push_back(-1);
+ }
+ if (RegId == FirstRegId)
+ Indices.front() = I % NumElts;
+ else
+ Indices.back() = I % NumElts;
I = (I % NumElts) % EltsPerVector +
(RegId == FirstRegId ? 0 : EltsPerVector);
}
@@ -8351,22 +8339,23 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
// Process extracts in blocks of EltsPerVector to check if the source vector
// operand can be re-used directly. If not, add the cost of creating a
// shuffle to extract the values into a vector register.
- for (unsigned Part = 0; Part < NumParts; ++Part) {
+ for (unsigned Part : seq<unsigned>(NumParts)) {
if (!ShuffleKinds[Part])
continue;
- ArrayRef<int> MaskSlice =
- Mask.slice(Part * EltsPerVector,
- (Part == NumParts - 1 && Mask.size() % EltsPerVector != 0)
- ? Mask.size() % EltsPerVector
- : EltsPerVector);
+ ArrayRef<int> MaskSlice = Mask.slice(
+ Part * EltsPerVector, getNumElems(Mask.size(), EltsPerVector, Part));
SmallVector<int> SubMask(EltsPerVector, PoisonMaskElem);
copy(MaskSlice, SubMask.begin());
+ SmallVector<int> Indices;
std::optional<TTI::ShuffleKind> RegShuffleKind =
- CheckPerRegistersShuffle(SubMask);
+ CheckPerRegistersShuffle(SubMask, Indices);
if (!RegShuffleKind) {
- Cost += ::getShuffleCost(TTI, *ShuffleKinds[Part],
- FixedVectorType::get(ScalarTy, NumElts),
- MaskSlice);
+ if (*ShuffleKinds[Part] != TTI::SK_PermuteSingleSrc ||
+ !ShuffleVectorInst::isIdentityMask(
+ MaskSlice, std::max<unsigned>(NumElts, MaskSlice.size())))
+ Cost += ::getShuffleCost(TTI, *ShuffleKinds[Part],
+ FixedVectorType::get(ScalarTy, NumElts),
+ MaskSlice);
continue;
}
if (*RegShuffleKind != TTI::SK_PermuteSingleSrc ||
@@ -8375,6 +8364,12 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
FixedVectorType::get(ScalarTy, EltsPerVector),
SubMask);
}
+ for (int Idx : Indices) {
+ Cost += ::getShuffleCost(TTI, TTI::SK_ExtractSubvector,
+ FixedVectorType::get(ScalarTy, NumElts),
+ std::nullopt, CostKind, Idx,
+ FixedVectorType::get(ScalarTy, EltsPerVector));
+ }
}
return Cost;
}
@@ -8402,11 +8397,11 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
InVectors.front().get<const TreeEntry *>() == &E1 &&
InVectors.back().get<const TreeEntry *>() == E2) ||
(!E2 && InVectors.front().get<const TreeEntry *>() == &E1)) {
- assert(all_of(ArrayRef(CommonMask).slice(Part * SliceSize, SliceSize),
+ unsigned Limit = getNumElems(Mask.size(), SliceSize, Part);
+ assert(all_of(ArrayRef(CommonMask).slice(Part * SliceSize, Limit),
[](int Idx) { return Idx == PoisonMaskElem; }) &&
"Expected all poisoned elements.");
- ArrayRef<int> SubMask =
- ArrayRef(Mask).slice(Part * SliceSize, SliceSize);
+ ArrayRef<int> SubMask = ArrayRef(Mask).slice(Part * SliceSize, Limit);
copy(SubMask, std::next(CommonMask.begin(), SliceSize * Part));
return;
}
@@ -8726,10 +8721,11 @@ public:
});
});
SmallPtrSet<Value *, 4> UniqueBases;
- unsigned SliceSize = VL.size() / NumParts;
- for (unsigned Part = 0; Part < NumParts; ++Part) {
- ArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize);
- for (auto [I, V] : enumerate(VL.slice(Part * SliceSize, SliceSize))) {
+ unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
+ for (unsigned Part : seq<unsigned>(NumParts)) {
+ unsigned Limit = getNumElems(VL.size(), SliceSize, Part);
+ ArrayRef<int> SubMask = Mask.slice(Part * SliceSize, Limit);
+ for (auto [I, V] : enumerate(VL.slice(Part * SliceSize, Limit))) {
// Ignore non-extractelement scalars.
if (isa<UndefValue>(V) ||
(!SubMask.empty() && SubMask[I] == PoisonMaskElem))
@@ -8826,7 +8822,7 @@ public:
unsigned NumParts = TTI.getNumberOfParts(MaskVecTy);
if (NumParts == 0 || NumParts >= Mask.size())
NumParts = 1;
- unsigned SliceSize = Mask.size() / NumParts;
+ unsigned SliceSize = getPartNumElems(Mask.size(), NumParts);
const auto *It =
find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; });
unsigned Part = std::distance(Mask.begin(), It) / SliceSize;
@@ -8843,7 +8839,7 @@ public:
unsigned NumParts = TTI.getNumberOfParts(MaskVecTy);
if (NumParts == 0 || NumParts >= Mask.size())
NumParts = 1;
- unsigned SliceSize = Mask.size() / NumParts;
+ unsigned SliceSize = getPartNumElems(Mask.size(), NumParts);
const auto *It =
find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; });
unsigned Part = std::distance(Mask.begin(), It) / SliceSize;
@@ -10700,12 +10696,12 @@ BoUpSLP::tryToGatherExtractElements(SmallVectorImpl<Value *> &VL,
assert(NumParts > 0 && "NumParts expected be greater than or equal to 1.");
SmallVector<std::optional<TTI::ShuffleKind>> ShufflesRes(NumParts);
Mask.assign(VL.size(), PoisonMaskElem);
- unsigned SliceSize = VL.size() / NumParts;
- for (unsigned Part = 0; Part < NumParts; ++Part) {
+ unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
+ for (unsigned Part : seq<unsigned>(NumParts)) {
// Scan list of gathered scalars for extractelements that can be represented
// as shuffles.
- MutableArrayRef<Value *> SubVL =
- MutableArrayRef(VL).slice(Part * SliceSize, SliceSize);
+ MutableArrayRef<Value *> SubVL = MutableArrayRef(VL).slice(
+ Part * SliceSize, getNumElems(VL.size(), SliceSize, Part));
SmallVector<int> SubMask;
std::optional<TTI::ShuffleKind> Res =
tryToGatherSingleRegisterExtractElements(SubVL, SubMask);
@@ -11109,10 +11105,11 @@ BoUpSLP::isGatherShuffledEntry(
"Expected only single user of the gather node.");
assert(VL.size() % NumParts == 0 &&
"Number of scalars must be divisible by NumParts.");
- unsigned SliceSize = VL.size() / NumParts;
+ unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
SmallVector<std::optional<TTI::ShuffleKind>> Res;
- for (unsigned Part = 0; Part < NumParts; ++Part) {
- ArrayRef<Value *> SubVL = VL.slice(Part * SliceSize, SliceSize);
+ for (unsigned Part : seq<unsigned>(NumParts)) {
+ ArrayRef<Value *> SubVL =
+ VL.slice(Part * SliceSize, getNumElems(VL.size(), SliceSize, Part));
SmallVectorImpl<const TreeEntry *> &SubEntries = Entries.emplace_back();
std::optional<TTI::ShuffleKind> SubRes =
isGatherShuffledSingleRegisterEntry(TE, SubVL, Mask, SubEntries, Part,
@@ -11715,11 +11712,12 @@ public:
// into a long virtual vector register, forming the original vector.
Value *Vec = nullptr;
SmallVector<int> VecMask(Mask.size(), PoisonMaskElem);
- unsigned SliceSize = E->Scalars.size() / NumParts;
- for (unsigned Part = 0; Part < NumParts; ++Part) {
+ unsigned SliceSize = getPartNumElems(E->Scalars.size(), NumParts);
+ for (unsigned Part : seq<unsigned>(NumParts)) {
+ unsigned Limit = getNumElems(E->Scalars.size(), SliceSize, Part);
ArrayRef<Value *> VL =
- ArrayRef(E->Scalars).slice(Part * SliceSize, SliceSize);
- MutableArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize);
+ ArrayRef(E->Scalars).slice(Part * SliceSize, Limit);
+ MutableArrayRef<int> SubMask = Mask.slice(Part * SliceSize, Limit);
constexpr int MaxBases = 2;
SmallVector<Value *, MaxBases> Bases(MaxBases);
#ifndef NDEBUG
@@ -11756,7 +11754,9 @@ public:
assert((Part == 0 || all_of(seq<unsigned>(0, Part),
[&](unsigned P) {
ArrayRef<int> SubMask =
- Mask.slice(P * SliceSize, SliceSize);
+ Mask.slice(P * SliceSize,
+ getNumElems(Mask.size(),
+ SliceSize, P));
return all_of(SubMask, [](int Idx) {
return Idx == PoisonMaskElem;
});
@@ -12140,13 +12140,19 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Type *ScalarTy,
Idx == 0) ||
(Mask.size() == InputVF &&
ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))) {
- std::iota(std::next(Mask.begin(), I * SliceSize),
- std::next(Mask.begin(), (I + 1) * SliceSize), 0);
+ std::iota(
+ std::next(Mask.begin(), I * SliceSize),
+ std::next(Mask.begin(),
+ I * SliceSize + getNumElems(Mask.size(), SliceSize, I)),
+ 0);
} else {
unsigned IVal =
*find_if_not(Mask, [](int Idx) { return Idx == PoisonMaskElem; });
- std::fill(std::next(Mask.begin(), I * SliceSize),
- std::next(Mask.begin(), (I + 1) * SliceSize), IVal);
+ std::fill(
+ std::next(Mask.begin(), I * SliceSize),
+ std::next(Mask.begin(),
+ I * SliceSize + getNumElems(Mask.size(), SliceSize, I)),
+ IVal);
}
return true;
};
@@ -12406,7 +12412,7 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Type *ScalarTy,
}
}
if (!GatherShuffles.empty()) {
- unsigned SliceSize = E->Scalars.size() / NumParts;
+ unsigned SliceSize = getPartNumElems(E->Scalars.size(), NumParts);
SmallVector<int> VecMask(Mask.size(), PoisonMaskElem);
for (const auto [I, TEs] : enumerate(Entries)) {
if (TEs.empty()) {
@@ -12416,7 +12422,8 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Type *ScalarTy,
}
assert((TEs.size() == 1 || TEs.size() == 2) &&
"Expected shuffle of 1 or 2 entries.");
- auto SubMask = ArrayRef(Mask).slice(I * SliceSize, SliceSize);
+ unsigned Limit = getNumElems(Mask.size(), SliceSize, I);
+ auto SubMask = ArrayRef(Mask).slice(I * SliceSize, Limit);
VecMask.assign(VecMask.size(), PoisonMaskElem);
copy(SubMask, std::next(VecMask.begin(), I * SliceSize));
if (TEs.size() == 1) {
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 27f8e239b1c0..bf467eb8434f 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -25,6 +25,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/DomTreeUpdater.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
@@ -218,7 +219,7 @@ VPTransformState::VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
DominatorTree *DT, IRBuilderBase &Builder,
InnerLoopVectorizer *ILV, VPlan *Plan,
LLVMContext &Ctx)
- : VF(VF), UF(UF), LI(LI), DT(DT), Builder(Builder), ILV(ILV), Plan(Plan),
+ : VF(VF), UF(UF), CFG(DT), LI(LI), Builder(Builder), ILV(ILV), Plan(Plan),
LVer(nullptr),
TypeAnalysis(Plan->getCanonicalIV()->getScalarType(), Ctx) {}
@@ -436,6 +437,7 @@ VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) {
"Trying to reset an existing successor block.");
TermBr->setSuccessor(idx, NewBB);
}
+ CFG.DTU.applyUpdates({{DominatorTree::Insert, PredBB, NewBB}});
}
return NewBB;
}
@@ -467,6 +469,7 @@ void VPBasicBlock::execute(VPTransformState *State) {
// The Exit block of a loop is always set to be successor 0 of the Exiting
// block.
cast<BranchInst>(ExitingBB->getTerminator())->setSuccessor(0, NewBB);
+ State->CFG.DTU.applyUpdates({{DominatorTree::Insert, ExitingBB, NewBB}});
} else if (PrevVPBB && /* A */
!((SingleHPred = getSingleHierarchicalPredecessor()) &&
SingleHPred->getExitingBasicBlock() == PrevVPBB &&
@@ -829,6 +832,11 @@ void VPlan::execute(VPTransformState *State) {
BasicBlock *VectorPreHeader = State->CFG.PrevBB;
State->Builder.SetInsertPoint(VectorPreHeader->getTerminator());
+ // Disconnect VectorPreHeader from ExitBB in both the CFG and DT.
+ cast<BranchInst>(VectorPreHeader->getTerminator())->setSuccessor(0, nullptr);
+ State->CFG.DTU.applyUpdates(
+ {{DominatorTree::Delete, VectorPreHeader, State->CFG.ExitBB}});
+
// Generate code in the loop pre-header and body.
for (VPBlockBase *Block : vp_depth_first_shallow(Entry))
Block->execute(State);
@@ -891,13 +899,10 @@ void VPlan::execute(VPTransformState *State) {
}
}
- // We do not attempt to preserve DT for outer loop vectorization currently.
- if (!EnableVPlanNativePath) {
- BasicBlock *VectorHeaderBB = State->CFG.VPBB2IRBB[Header];
- State->DT->addNewBlock(VectorHeaderBB, VectorPreHeader);
- updateDominatorTree(State->DT, VectorHeaderBB, VectorLatchBB,
- State->CFG.ExitBB);
- }
+ State->CFG.DTU.flush();
+ assert(State->CFG.DTU.getDomTree().verify(
+ DominatorTree::VerificationLevel::Fast) &&
+ "DT not preserved correctly");
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -995,44 +1000,6 @@ void VPlan::addLiveOut(PHINode *PN, VPValue *V) {
LiveOuts.insert({PN, new VPLiveOut(PN, V)});
}
-void VPlan::updateDominatorTree(DominatorTree *DT, BasicBlock *LoopHeaderBB,
- BasicBlock *LoopLatchBB,
- BasicBlock *LoopExitBB) {
- // The vector body may be more than a single basic-block by this point.
- // Update the dominator tree information inside the vector body by propagating
- // it from header to latch, expecting only triangular control-flow, if any.
- BasicBlock *PostDomSucc = nullptr;
- for (auto *BB = LoopHeaderBB; BB != LoopLatchBB; BB = PostDomSucc) {
- // Get the list of successors of this block.
- std::vector<BasicBlock *> Succs(succ_begin(BB), succ_end(BB));
- assert(Succs.size() <= 2 &&
- "Basic block in vector loop has more than 2 successors.");
- PostDomSucc = Succs[0];
- if (Succs.size() == 1) {
- assert(PostDomSucc->getSinglePredecessor() &&
- "PostDom successor has more than one predecessor.");
- DT->addNewBlock(PostDomSucc, BB);
- continue;
- }
- BasicBlock *InterimSucc = Succs[1];
- if (PostDomSucc->getSingleSuccessor() == InterimSucc) {
- PostDomSucc = Succs[1];
- InterimSucc = Succs[0];
- }
- assert(InterimSucc->getSingleSuccessor() == PostDomSucc &&
- "One successor of a basic block does not lead to the other.");
- assert(InterimSucc->getSinglePredecessor() &&
- "Interim successor has more than one predecessor.");
- assert(PostDomSucc->hasNPredecessors(2) &&
- "PostDom successor has more than two predecessors.");
- DT->addNewBlock(InterimSucc, BB);
- DT->addNewBlock(PostDomSucc, BB);
- }
- // Latch block is a new dominator for the loop exit.
- DT->changeImmediateDominator(LoopExitBB, LoopLatchBB);
- assert(DT->verify(DominatorTree::VerificationLevel::Fast));
-}
-
static void remapOperands(VPBlockBase *Entry, VPBlockBase *NewEntry,
DenseMap<VPValue *, VPValue *> &Old2NewVPValues) {
// Update the operands of all cloned recipes starting at NewEntry. This
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 4b3cb15b5e1e..e75a1de548f7 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -35,6 +35,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
+#include "llvm/Analysis/DomTreeUpdater.h"
#include "llvm/Analysis/IVDescriptors.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/VectorUtils.h"
@@ -372,7 +373,11 @@ struct VPTransformState {
/// of replication, maps the BasicBlock of the last replica created.
SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
- CFGState() = default;
+ /// Updater for the DominatorTree.
+ DomTreeUpdater DTU;
+
+ CFGState(DominatorTree *DT)
+ : DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy) {}
/// Returns the BasicBlock* mapped to the pre-header of the loop region
/// containing \p R.
@@ -382,9 +387,6 @@ struct VPTransformState {
/// Hold a pointer to LoopInfo to register new basic blocks in the loop.
LoopInfo *LI;
- /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
- DominatorTree *DT;
-
/// Hold a reference to the IRBuilder used to generate output IR code.
IRBuilderBase &Builder;
@@ -1093,7 +1095,10 @@ public:
I->setIsExact(ExactFlags.IsExact);
break;
case OperationType::GEPOp:
- cast<GetElementPtrInst>(I)->setIsInBounds(GEPFlags.IsInBounds);
+ // TODO(gep_nowrap): Track the full GEPNoWrapFlags in VPlan.
+ cast<GetElementPtrInst>(I)->setNoWrapFlags(
+ GEPFlags.IsInBounds ? GEPNoWrapFlags::inBounds()
+ : GEPNoWrapFlags::none());
break;
case OperationType::FPMathOp:
I->setHasAllowReassoc(FMFs.AllowReassoc);
@@ -3289,13 +3294,6 @@ public:
/// Clone the current VPlan, update all VPValues of the new VPlan and cloned
/// recipes to refer to the clones, and return it.
VPlan *duplicate();
-
-private:
- /// Add to the given dominator tree the header block and every new basic block
- /// that was created between it and the latch block, inclusive.
- static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopHeaderBB,
- BasicBlock *LoopLatchBB,
- BasicBlock *LoopExitBB);
};
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
index 50b08bbb7ebf..058746880743 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
@@ -157,7 +157,7 @@ using AllUnaryRecipe_match =
UnaryRecipe_match<Op0_t, Opcode, VPWidenRecipe, VPReplicateRecipe,
VPWidenCastRecipe, VPInstruction>;
-template <typename Op0_t, typename Op1_t, unsigned Opcode,
+template <typename Op0_t, typename Op1_t, unsigned Opcode, bool Commutative,
typename... RecipeTys>
struct BinaryRecipe_match {
Op0_t Op0;
@@ -179,18 +179,23 @@ struct BinaryRecipe_match {
return false;
assert(R->getNumOperands() == 2 &&
"recipe with matched opcode does not have 2 operands");
- return Op0.match(R->getOperand(0)) && Op1.match(R->getOperand(1));
+ if (Op0.match(R->getOperand(0)) && Op1.match(R->getOperand(1)))
+ return true;
+ return Commutative && Op0.match(R->getOperand(1)) &&
+ Op1.match(R->getOperand(0));
}
};
template <typename Op0_t, typename Op1_t, unsigned Opcode>
using BinaryVPInstruction_match =
- BinaryRecipe_match<Op0_t, Op1_t, Opcode, VPInstruction>;
+ BinaryRecipe_match<Op0_t, Op1_t, Opcode, /*Commutative*/ false,
+ VPInstruction>;
-template <typename Op0_t, typename Op1_t, unsigned Opcode>
+template <typename Op0_t, typename Op1_t, unsigned Opcode,
+ bool Commutative = false>
using AllBinaryRecipe_match =
- BinaryRecipe_match<Op0_t, Op1_t, Opcode, VPWidenRecipe, VPReplicateRecipe,
- VPWidenCastRecipe, VPInstruction>;
+ BinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative, VPWidenRecipe,
+ VPReplicateRecipe, VPWidenCastRecipe, VPInstruction>;
template <unsigned Opcode, typename Op0_t>
inline UnaryVPInstruction_match<Op0_t, Opcode>
@@ -256,10 +261,11 @@ m_ZExtOrSExt(const Op0_t &Op0) {
return m_CombineOr(m_ZExt(Op0), m_SExt(Op0));
}
-template <unsigned Opcode, typename Op0_t, typename Op1_t>
-inline AllBinaryRecipe_match<Op0_t, Op1_t, Opcode> m_Binary(const Op0_t &Op0,
- const Op1_t &Op1) {
- return AllBinaryRecipe_match<Op0_t, Op1_t, Opcode>(Op0, Op1);
+template <unsigned Opcode, typename Op0_t, typename Op1_t,
+ bool Commutative = false>
+inline AllBinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative>
+m_Binary(const Op0_t &Op0, const Op1_t &Op1) {
+ return AllBinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative>(Op0, Op1);
}
template <typename Op0_t, typename Op1_t>
@@ -268,10 +274,27 @@ m_Mul(const Op0_t &Op0, const Op1_t &Op1) {
return m_Binary<Instruction::Mul, Op0_t, Op1_t>(Op0, Op1);
}
+/// Match a binary OR operation. Note that while conceptually the operands can
+/// be matched commutatively, \p Commutative defaults to false in line with the
+/// IR-based pattern matching infrastructure. Use m_c_BinaryOr for a commutative
+/// version of the matcher.
+template <typename Op0_t, typename Op1_t, bool Commutative = false>
+inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or, Commutative>
+m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
+ return m_Binary<Instruction::Or, Op0_t, Op1_t, Commutative>(Op0, Op1);
+}
+
+template <typename Op0_t, typename Op1_t>
+inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or,
+ /*Commutative*/ true>
+m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
+ return m_BinaryOr<Op0_t, Op1_t, /*Commutative*/ true>(Op0, Op1);
+}
+
template <typename Op0_t, typename Op1_t>
-inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or>
-m_Or(const Op0_t &Op0, const Op1_t &Op1) {
- return m_Binary<Instruction::Or, Op0_t, Op1_t>(Op0, Op1);
+inline BinaryVPInstruction_match<Op0_t, Op1_t, VPInstruction::LogicalAnd>
+m_LogicalAnd(const Op0_t &Op0, const Op1_t &Op1) {
+ return m_VPInstruction<VPInstruction::LogicalAnd, Op0_t, Op1_t>(Op0, Op1);
}
} // namespace VPlanPatternMatch
} // namespace llvm
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index c0eb6d710ad3..422579ea8b84 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -935,6 +935,19 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
#endif
}
+ // Simplify (X && Y) || (X && !Y) -> X.
+ // TODO: Split up into simpler, modular combines: (X && Y) || (X && Z) into X
+ // && (Y || Z) and (X || !X) into true. This requires queuing newly created
+ // recipes to be visited during simplification.
+ VPValue *X, *Y, *X1, *Y1;
+ if (match(&R,
+ m_c_BinaryOr(m_LogicalAnd(m_VPValue(X), m_VPValue(Y)),
+ m_LogicalAnd(m_VPValue(X1), m_Not(m_VPValue(Y1))))) &&
+ X == X1 && Y == Y1) {
+ R.getVPSingleValue()->replaceAllUsesWith(X);
+ return;
+ }
+
if (match(&R, m_CombineOr(m_Mul(m_VPValue(A), m_SpecificInt(1)),
m_Mul(m_SpecificInt(1), m_VPValue(A)))))
return R.getVPSingleValue()->replaceAllUsesWith(A);
@@ -1305,8 +1318,16 @@ void VPlanTransforms::addActiveLaneMask(
/// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi
/// ...
///
-void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) {
+bool VPlanTransforms::tryAddExplicitVectorLength(VPlan &Plan) {
VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock();
+ // The transform updates all users of inductions to work based on EVL, instead
+ // of the VF directly. At the moment, widened inductions cannot be updated, so
+ // bail out if the plan contains any.
+ if (any_of(Header->phis(), [](VPRecipeBase &Phi) {
+ return (isa<VPWidenIntOrFpInductionRecipe>(&Phi) ||
+ isa<VPWidenPointerInductionRecipe>(&Phi));
+ }))
+ return false;
auto *CanonicalIVPHI = Plan.getCanonicalIV();
VPValue *StartV = CanonicalIVPHI->getStartValue();
@@ -1364,6 +1385,7 @@ void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) {
CanonicalIVIncrement->setOperand(0, CanonicalIVPHI);
// TODO: support unroll factor > 1.
Plan.setUF(1);
+ return true;
}
void VPlanTransforms::dropPoisonGeneratingRecipes(
@@ -1402,7 +1424,7 @@ void VPlanTransforms::dropPoisonGeneratingRecipes(
// for dependence analysis). Instead, replace it with an equivalent Add.
// This is possible as all users of the disjoint OR only access lanes
// where the operands are disjoint or poison otherwise.
- if (match(RecWithFlags, m_Or(m_VPValue(A), m_VPValue(B))) &&
+ if (match(RecWithFlags, m_BinaryOr(m_VPValue(A), m_VPValue(B))) &&
RecWithFlags->isDisjoint()) {
VPBuilder Builder(RecWithFlags);
VPInstruction *New = Builder.createOverflowingOp(
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index 0cbc70713d9c..96b8a6639723 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -104,7 +104,8 @@ struct VPlanTransforms {
/// VPCanonicalIVPHIRecipe with a VPEVLBasedIVPHIRecipe.
/// VPCanonicalIVPHIRecipe is only used to control the loop after
/// this transformation.
- static void addExplicitVectorLength(VPlan &Plan);
+ /// \returns true if the transformation succeeds, or false if it doesn't.
+ static bool tryAddExplicitVectorLength(VPlan &Plan);
};
} // namespace llvm
diff --git a/llvm/lib/Transforms/Vectorize/VPlanValue.h b/llvm/lib/Transforms/Vectorize/VPlanValue.h
index 96d04271850f..8d945f6f2b8e 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanValue.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanValue.h
@@ -261,11 +261,6 @@ public:
New->addUser(*this);
}
- void removeLastOperand() {
- VPValue *Op = Operands.pop_back_val();
- Op->removeUser(*this);
- }
-
typedef SmallVectorImpl<VPValue *>::iterator operand_iterator;
typedef SmallVectorImpl<VPValue *>::const_iterator const_operand_iterator;
typedef iterator_range<operand_iterator> operand_range;
diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index 9d43fb4ab607..b5a292841172 100644
--- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -1668,6 +1668,86 @@ bool VectorCombine::foldShuffleOfShuffles(Instruction &I) {
return true;
}
+using InstLane = std::pair<Value *, int>;
+
+static InstLane lookThroughShuffles(Value *V, int Lane) {
+ while (auto *SV = dyn_cast<ShuffleVectorInst>(V)) {
+ unsigned NumElts =
+ cast<FixedVectorType>(SV->getOperand(0)->getType())->getNumElements();
+ int M = SV->getMaskValue(Lane);
+ if (M < 0)
+ return {nullptr, PoisonMaskElem};
+ if (static_cast<unsigned>(M) < NumElts) {
+ V = SV->getOperand(0);
+ Lane = M;
+ } else {
+ V = SV->getOperand(1);
+ Lane = M - NumElts;
+ }
+ }
+ return InstLane{V, Lane};
+}
+
+static SmallVector<InstLane>
+generateInstLaneVectorFromOperand(ArrayRef<InstLane> Item, int Op) {
+ SmallVector<InstLane> NItem;
+ for (InstLane IL : Item) {
+ auto [V, Lane] = IL;
+ InstLane OpLane =
+ V ? lookThroughShuffles(cast<Instruction>(V)->getOperand(Op), Lane)
+ : InstLane{nullptr, PoisonMaskElem};
+ NItem.emplace_back(OpLane);
+ }
+ return NItem;
+}
+
+static Value *generateNewInstTree(ArrayRef<InstLane> Item, FixedVectorType *Ty,
+ const SmallPtrSet<Value *, 4> &IdentityLeafs,
+ const SmallPtrSet<Value *, 4> &SplatLeafs,
+ IRBuilder<> &Builder) {
+ auto [FrontV, FrontLane] = Item.front();
+
+ if (IdentityLeafs.contains(FrontV) &&
+ all_of(drop_begin(enumerate(Item)), [Item](const auto &E) {
+ Value *FrontV = Item.front().first;
+ auto [V, Lane] = E.value();
+ return !V || (V == FrontV && Lane == (int)E.index());
+ })) {
+ return FrontV;
+ }
+ if (SplatLeafs.contains(FrontV)) {
+ if (auto *ILI = dyn_cast<Instruction>(FrontV))
+ Builder.SetInsertPoint(*ILI->getInsertionPointAfterDef());
+ else if (auto *Arg = dyn_cast<Argument>(FrontV))
+ Builder.SetInsertPointPastAllocas(Arg->getParent());
+ SmallVector<int, 16> Mask(Ty->getNumElements(), FrontLane);
+ return Builder.CreateShuffleVector(FrontV, Mask);
+ }
+
+ auto *I = cast<Instruction>(FrontV);
+ auto *II = dyn_cast<IntrinsicInst>(I);
+ unsigned NumOps = I->getNumOperands() - (II ? 1 : 0);
+ SmallVector<Value *> Ops(NumOps);
+ for (unsigned Idx = 0; Idx < NumOps; Idx++) {
+ if (II && isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Idx)) {
+ Ops[Idx] = II->getOperand(Idx);
+ continue;
+ }
+ Ops[Idx] = generateNewInstTree(generateInstLaneVectorFromOperand(Item, Idx),
+ Ty, IdentityLeafs, SplatLeafs, Builder);
+ }
+ Builder.SetInsertPoint(I);
+ Type *DstTy =
+ FixedVectorType::get(I->getType()->getScalarType(), Ty->getNumElements());
+ if (auto *BI = dyn_cast<BinaryOperator>(I))
+ return Builder.CreateBinOp((Instruction::BinaryOps)BI->getOpcode(), Ops[0],
+ Ops[1]);
+ if (II)
+ return Builder.CreateIntrinsic(DstTy, II->getIntrinsicID(), Ops);
+ assert(isa<UnaryInstruction>(I) && "Unexpected instruction type in Generate");
+ return Builder.CreateUnOp((Instruction::UnaryOps)I->getOpcode(), Ops[0]);
+}
+
// Starting from a shuffle, look up through operands tracking the shuffled index
// of each lane. If we can simplify away the shuffles to identities then
// do so.
@@ -1677,42 +1757,9 @@ bool VectorCombine::foldShuffleToIdentity(Instruction &I) {
!isa<Instruction>(I.getOperand(1)))
return false;
- using InstLane = std::pair<Value *, int>;
-
- auto LookThroughShuffles = [](Value *V, int Lane) -> InstLane {
- while (auto *SV = dyn_cast<ShuffleVectorInst>(V)) {
- unsigned NumElts =
- cast<FixedVectorType>(SV->getOperand(0)->getType())->getNumElements();
- int M = SV->getMaskValue(Lane);
- if (M < 0)
- return {nullptr, PoisonMaskElem};
- else if (M < (int)NumElts) {
- V = SV->getOperand(0);
- Lane = M;
- } else {
- V = SV->getOperand(1);
- Lane = M - NumElts;
- }
- }
- return InstLane{V, Lane};
- };
-
- auto GenerateInstLaneVectorFromOperand =
- [&LookThroughShuffles](ArrayRef<InstLane> Item, int Op) {
- SmallVector<InstLane> NItem;
- for (InstLane V : Item) {
- NItem.emplace_back(
- !V.first
- ? InstLane{nullptr, PoisonMaskElem}
- : LookThroughShuffles(
- cast<Instruction>(V.first)->getOperand(Op), V.second));
- }
- return NItem;
- };
-
SmallVector<InstLane> Start(Ty->getNumElements());
for (unsigned M = 0, E = Ty->getNumElements(); M < E; ++M)
- Start[M] = LookThroughShuffles(&I, M);
+ Start[M] = lookThroughShuffles(&I, M);
SmallVector<SmallVector<InstLane>> Worklist;
Worklist.push_back(Start);
@@ -1720,125 +1767,92 @@ bool VectorCombine::foldShuffleToIdentity(Instruction &I) {
unsigned NumVisited = 0;
while (!Worklist.empty()) {
- SmallVector<InstLane> Item = Worklist.pop_back_val();
if (++NumVisited > MaxInstrsToScan)
return false;
+ SmallVector<InstLane> Item = Worklist.pop_back_val();
+ auto [FrontV, FrontLane] = Item.front();
+
// If we found an undef first lane then bail out to keep things simple.
- if (!Item[0].first)
+ if (!FrontV)
return false;
// Look for an identity value.
- if (Item[0].second == 0 &&
- cast<FixedVectorType>(Item[0].first->getType())->getNumElements() ==
+ if (!FrontLane &&
+ cast<FixedVectorType>(FrontV->getType())->getNumElements() ==
Ty->getNumElements() &&
- all_of(drop_begin(enumerate(Item)), [&](const auto &E) {
- return !E.value().first || (E.value().first == Item[0].first &&
+ all_of(drop_begin(enumerate(Item)), [Item](const auto &E) {
+ Value *FrontV = Item.front().first;
+ return !E.value().first || (E.value().first == FrontV &&
E.value().second == (int)E.index());
})) {
- IdentityLeafs.insert(Item[0].first);
+ IdentityLeafs.insert(FrontV);
continue;
}
// Look for a splat value.
- if (all_of(drop_begin(Item), [&](InstLane &IL) {
- return !IL.first ||
- (IL.first == Item[0].first && IL.second == Item[0].second);
+ if (all_of(drop_begin(Item), [Item](InstLane &IL) {
+ auto [FrontV, FrontLane] = Item.front();
+ auto [V, Lane] = IL;
+ return !V || (V == FrontV && Lane == FrontLane);
})) {
- SplatLeafs.insert(Item[0].first);
+ SplatLeafs.insert(FrontV);
continue;
}
// We need each element to be the same type of value, and check that each
// element has a single use.
- if (!all_of(drop_begin(Item), [&](InstLane IL) {
- if (!IL.first)
+ if (!all_of(drop_begin(Item), [Item](InstLane IL) {
+ Value *FrontV = Item.front().first;
+ Value *V = IL.first;
+ if (!V)
return true;
- if (auto *I = dyn_cast<Instruction>(IL.first); I && !I->hasOneUse())
+ if (auto *I = dyn_cast<Instruction>(V); I && !I->hasOneUse())
return false;
- if (IL.first->getValueID() != Item[0].first->getValueID())
+ if (V->getValueID() != FrontV->getValueID())
return false;
- if (isa<CallInst>(IL.first) && !isa<IntrinsicInst>(IL.first))
+ if (isa<CallInst>(V) && !isa<IntrinsicInst>(V))
return false;
- auto *II = dyn_cast<IntrinsicInst>(IL.first);
- return !II ||
- (isa<IntrinsicInst>(Item[0].first) &&
- II->getIntrinsicID() ==
- cast<IntrinsicInst>(Item[0].first)->getIntrinsicID());
+ auto *II = dyn_cast<IntrinsicInst>(V);
+ return !II || (isa<IntrinsicInst>(FrontV) &&
+ II->getIntrinsicID() ==
+ cast<IntrinsicInst>(FrontV)->getIntrinsicID());
}))
return false;
// Check the operator is one that we support. We exclude div/rem in case
// they hit UB from poison lanes.
- if (isa<BinaryOperator>(Item[0].first) &&
- !cast<BinaryOperator>(Item[0].first)->isIntDivRem()) {
- Worklist.push_back(GenerateInstLaneVectorFromOperand(Item, 0));
- Worklist.push_back(GenerateInstLaneVectorFromOperand(Item, 1));
- } else if (isa<UnaryOperator>(Item[0].first)) {
- Worklist.push_back(GenerateInstLaneVectorFromOperand(Item, 0));
- } else if (auto *II = dyn_cast<IntrinsicInst>(Item[0].first);
+ if (isa<BinaryOperator>(FrontV) &&
+ !cast<BinaryOperator>(FrontV)->isIntDivRem()) {
+ Worklist.push_back(generateInstLaneVectorFromOperand(Item, 0));
+ Worklist.push_back(generateInstLaneVectorFromOperand(Item, 1));
+ } else if (isa<UnaryOperator>(FrontV)) {
+ Worklist.push_back(generateInstLaneVectorFromOperand(Item, 0));
+ } else if (auto *II = dyn_cast<IntrinsicInst>(FrontV);
II && isTriviallyVectorizable(II->getIntrinsicID())) {
for (unsigned Op = 0, E = II->getNumOperands() - 1; Op < E; Op++) {
if (isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Op)) {
- if (!all_of(drop_begin(Item), [&](InstLane &IL) {
- return !IL.first ||
- (cast<Instruction>(IL.first)->getOperand(Op) ==
- cast<Instruction>(Item[0].first)->getOperand(Op));
+ if (!all_of(drop_begin(Item), [Item, Op](InstLane &IL) {
+ Value *FrontV = Item.front().first;
+ Value *V = IL.first;
+ return !V || (cast<Instruction>(V)->getOperand(Op) ==
+ cast<Instruction>(FrontV)->getOperand(Op));
}))
return false;
continue;
}
- Worklist.push_back(GenerateInstLaneVectorFromOperand(Item, Op));
+ Worklist.push_back(generateInstLaneVectorFromOperand(Item, Op));
}
} else {
return false;
}
}
+ if (NumVisited <= 1)
+ return false;
+
// If we got this far, we know the shuffles are superfluous and can be
// removed. Scan through again and generate the new tree of instructions.
- std::function<Value *(ArrayRef<InstLane>)> Generate =
- [&](ArrayRef<InstLane> Item) -> Value * {
- if (IdentityLeafs.contains(Item[0].first) &&
- all_of(drop_begin(enumerate(Item)), [&](const auto &E) {
- return !E.value().first || (E.value().first == Item[0].first &&
- E.value().second == (int)E.index());
- })) {
- return Item[0].first;
- }
- if (SplatLeafs.contains(Item[0].first)) {
- if (auto ILI = dyn_cast<Instruction>(Item[0].first))
- Builder.SetInsertPoint(*ILI->getInsertionPointAfterDef());
- else if (isa<Argument>(Item[0].first))
- Builder.SetInsertPointPastAllocas(I.getParent()->getParent());
- SmallVector<int, 16> Mask(Ty->getNumElements(), Item[0].second);
- return Builder.CreateShuffleVector(Item[0].first, Mask);
- }
-
- auto *I = cast<Instruction>(Item[0].first);
- auto *II = dyn_cast<IntrinsicInst>(I);
- unsigned NumOps = I->getNumOperands() - (II ? 1 : 0);
- SmallVector<Value *> Ops(NumOps);
- for (unsigned Idx = 0; Idx < NumOps; Idx++) {
- if (II && isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Idx)) {
- Ops[Idx] = II->getOperand(Idx);
- continue;
- }
- Ops[Idx] = Generate(GenerateInstLaneVectorFromOperand(Item, Idx));
- }
- Builder.SetInsertPoint(I);
- Type *DstTy = FixedVectorType::get(I->getType()->getScalarType(),
- Ty->getNumElements());
- if (auto BI = dyn_cast<BinaryOperator>(I))
- return Builder.CreateBinOp((Instruction::BinaryOps)BI->getOpcode(),
- Ops[0], Ops[1]);
- if (II)
- return Builder.CreateIntrinsic(DstTy, II->getIntrinsicID(), Ops);
- assert(isa<UnaryInstruction>(I) &&
- "Unexpected instruction type in Generate");
- return Builder.CreateUnOp((Instruction::UnaryOps)I->getOpcode(), Ops[0]);
- };
-
- Value *V = Generate(Start);
+ Value *V = generateNewInstTree(Start, Ty, IdentityLeafs, SplatLeafs, Builder);
replaceValue(I, *V);
return true;
}
diff --git a/llvm/lib/WindowsManifest/WindowsManifestMerger.cpp b/llvm/lib/WindowsManifest/WindowsManifestMerger.cpp
index 8f5c53faf91e..b59b666ce04f 100644
--- a/llvm/lib/WindowsManifest/WindowsManifestMerger.cpp
+++ b/llvm/lib/WindowsManifest/WindowsManifestMerger.cpp
@@ -669,7 +669,6 @@ WindowsManifestMerger::WindowsManifestMergerImpl::getMergedManifest() {
xmlDocSetRootElement(OutputDoc.get(), CombinedRoot);
assert(nullptr == xmlDocGetRootElement(CombinedDoc));
- xmlKeepBlanksDefault(0);
xmlChar *Buff = nullptr;
xmlDocDumpFormatMemoryEnc(OutputDoc.get(), &Buff, &BufferSize, "UTF-8", 1);
Buffer.reset(Buff);
diff --git a/llvm/runtimes/CMakeLists.txt b/llvm/runtimes/CMakeLists.txt
index 8a3ec1e3300d..7fd4e61cc46e 100644
--- a/llvm/runtimes/CMakeLists.txt
+++ b/llvm/runtimes/CMakeLists.txt
@@ -93,6 +93,7 @@ function(builtin_default_target compiler_rt_path)
SANITIZER
USE_TOOLCHAIN
TARGET_TRIPLE ${LLVM_TARGET_TRIPLE}
+ FOLDER "Compiler-RT"
${EXTRA_ARGS})
endfunction()
@@ -128,6 +129,7 @@ function(builtin_register_target compiler_rt_path name)
${COMMON_CMAKE_ARGS}
${${name}_extra_args}
USE_TOOLCHAIN
+ FOLDER "Compiler-RT"
${EXTRA_ARGS} ${ARG_EXTRA_ARGS})
endfunction()
@@ -148,6 +150,10 @@ if(compiler_rt_path)
add_custom_target(builtins)
add_custom_target(install-builtins)
add_custom_target(install-builtins-stripped)
+ set_target_properties(
+ builtins install-builtins install-builtins-stripped
+ PROPERTIES FOLDER "Compiler-RT"
+ )
endif()
foreach(target ${LLVM_BUILTIN_TARGETS})
@@ -263,6 +269,7 @@ function(runtime_default_target)
${SUB_INSTALL_TARGETS}
USE_TOOLCHAIN
TARGET_TRIPLE ${LLVM_TARGET_TRIPLE}
+ FOLDER "Runtimes"
${EXTRA_ARGS})
endfunction()
@@ -388,6 +395,7 @@ function(runtime_register_target name)
EXTRA_TARGETS ${${name}_extra_targets}
${${name}_test_targets}
USE_TOOLCHAIN
+ FOLDER "Runtimes"
${EXTRA_ARGS} ${ARG_EXTRA_ARGS})
add_dependencies(runtimes runtimes-${name})
@@ -401,14 +409,17 @@ function(runtime_register_target name)
foreach(runtime_name ${runtime_names})
if(NOT TARGET ${runtime_name})
add_custom_target(${runtime_name})
+ set_target_properties(${runtime_name} PROPERTIES FOLDER "${runtime_name}")
endif()
add_dependencies(${runtime_name} ${runtime_name}-${name})
if(NOT TARGET install-${runtime_name})
add_custom_target(install-${runtime_name})
+ set_target_properties(install-${runtime_name} PROPERTIES FOLDER "${runtime_name}")
endif()
add_dependencies(install-${runtime_name} install-${runtime_name}-${name})
if(NOT TARGET install-${runtime_name}-stripped)
add_custom_target(install-${runtime_name}-stripped)
+ set_target_properties(install-${runtime_name} PROPERTIES FOLDER "${runtime_name}")
endif()
add_dependencies(install-${runtime_name}-stripped install-${runtime_name}-${name}-stripped)
endforeach()
@@ -507,9 +518,17 @@ if(runtimes)
add_custom_target(runtimes-configure)
add_custom_target(install-runtimes)
add_custom_target(install-runtimes-stripped)
+ set_target_properties(
+ runtimes runtimes-configure install-runtimes install-runtimes-stripped
+ PROPERTIES FOLDER "Runtimes"
+ )
if(LLVM_INCLUDE_TESTS)
add_custom_target(check-runtimes)
add_custom_target(runtimes-test-depends)
+ set_target_properties(
+ check-runtimes runtimes-test-depends
+ PROPERTIES FOLDER "Runtimes"
+ )
set(test_targets "")
endif()
if(LLVM_RUNTIME_DISTRIBUTION_COMPONENTS)
@@ -517,6 +536,10 @@ if(runtimes)
add_custom_target(${component})
add_custom_target(install-${component})
add_custom_target(install-${component}-stripped)
+ set_target_properties(
+ ${component} install-${component} install-${component}-stripped
+ PROPERTIES FOLDER "${component}"
+ )
endforeach()
endif()
endif()
diff --git a/llvm/test/Analysis/CostModel/AArch64/cast.ll b/llvm/test/Analysis/CostModel/AArch64/cast.ll
index 0cd444f84985..fa778864ae97 100644
--- a/llvm/test/Analysis/CostModel/AArch64/cast.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/cast.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 %s | FileCheck --check-prefixes=CHECK,CHECK-NOFP16 %s
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+sve -force-streaming-compatible-sve %s | FileCheck --check-prefixes=SVE,SVE128-NO-NEON %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+sve -force-streaming-compatible %s | FileCheck --check-prefixes=SVE,SVE128-NO-NEON %s
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+fullfp16 %s | FileCheck --check-prefixes=CHECK,CHECK-FP16 %s
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+sve -aarch64-sve-vector-bits-min=256 %s | FileCheck --check-prefixes=SVE,FIXED-MIN-256 %s
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+sve -aarch64-sve-vector-bits-min=2048 %s | FileCheck --check-prefixes=SVE,FIXED-MIN-2048 %s
diff --git a/llvm/test/Analysis/CostModel/AArch64/cttz_elts.ll b/llvm/test/Analysis/CostModel/AArch64/cttz_elts.ll
index cc1532ee33dc..e1a9ee114d26 100644
--- a/llvm/test/Analysis/CostModel/AArch64/cttz_elts.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/cttz_elts.ll
@@ -13,15 +13,15 @@ define void @foo_no_vscale_range() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.nxv8i1.zip = call i32 @llvm.experimental.cttz.elts.i32.nxv8i1(<vscale x 8 x i1> undef, i1 true)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.nxv16i1.zip = call i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> undef, i1 true)
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %res.i32.nxv32i1.zip = call i32 @llvm.experimental.cttz.elts.i32.nxv32i1(<vscale x 32 x i1> undef, i1 true)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i64.v2i1.zip = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> undef, i1 true)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i64.v4i1.zip = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> undef, i1 true)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i64.v8i1.zip = call i64 @llvm.experimental.cttz.elts.i64.v8i1(<8 x i1> undef, i1 true)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i64.v16i1.zip = call i64 @llvm.experimental.cttz.elts.i64.v16i1(<16 x i1> undef, i1 true)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i64.v2i1.zip = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> undef, i1 true)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i64.v4i1.zip = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> undef, i1 true)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i64.v8i1.zip = call i64 @llvm.experimental.cttz.elts.i64.v8i1(<8 x i1> undef, i1 true)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i64.v16i1.zip = call i64 @llvm.experimental.cttz.elts.i64.v16i1(<16 x i1> undef, i1 true)
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %res.i64.v32i1.zip = call i64 @llvm.experimental.cttz.elts.i64.v32i1(<32 x i1> undef, i1 true)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i32.v2i1.zip = call i32 @llvm.experimental.cttz.elts.i32.v2i1(<2 x i1> undef, i1 true)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i32.v4i1.zip = call i32 @llvm.experimental.cttz.elts.i32.v4i1(<4 x i1> undef, i1 true)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i32.v8i1.zip = call i32 @llvm.experimental.cttz.elts.i32.v8i1(<8 x i1> undef, i1 true)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i32.v16i1.zip = call i32 @llvm.experimental.cttz.elts.i32.v16i1(<16 x i1> undef, i1 true)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.v2i1.zip = call i32 @llvm.experimental.cttz.elts.i32.v2i1(<2 x i1> undef, i1 true)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.v4i1.zip = call i32 @llvm.experimental.cttz.elts.i32.v4i1(<4 x i1> undef, i1 true)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.v8i1.zip = call i32 @llvm.experimental.cttz.elts.i32.v8i1(<8 x i1> undef, i1 true)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.v16i1.zip = call i32 @llvm.experimental.cttz.elts.i32.v16i1(<16 x i1> undef, i1 true)
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %res.i32.v32i1.zip = call i32 @llvm.experimental.cttz.elts.i32.v32i1(<32 x i1> undef, i1 true)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i64.nxv2i1.nzip = call i64 @llvm.experimental.cttz.elts.i64.nxv2i1(<vscale x 2 x i1> undef, i1 false)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i64.nxv4i1.nzip = call i64 @llvm.experimental.cttz.elts.i64.nxv4i1(<vscale x 4 x i1> undef, i1 false)
@@ -33,15 +33,15 @@ define void @foo_no_vscale_range() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.nxv8i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.nxv8i1(<vscale x 8 x i1> undef, i1 false)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.nxv16i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.nxv16i1(<vscale x 16 x i1> undef, i1 false)
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %res.i32.nxv32i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.nxv32i1(<vscale x 32 x i1> undef, i1 false)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i64.v2i1.nzip = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> undef, i1 false)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i64.v4i1.nzip = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> undef, i1 false)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i64.v8i1.nzip = call i64 @llvm.experimental.cttz.elts.i64.v8i1(<8 x i1> undef, i1 false)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i64.v16i1.nzip = call i64 @llvm.experimental.cttz.elts.i64.v16i1(<16 x i1> undef, i1 false)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i64.v2i1.nzip = call i64 @llvm.experimental.cttz.elts.i64.v2i1(<2 x i1> undef, i1 false)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i64.v4i1.nzip = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> undef, i1 false)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i64.v8i1.nzip = call i64 @llvm.experimental.cttz.elts.i64.v8i1(<8 x i1> undef, i1 false)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i64.v16i1.nzip = call i64 @llvm.experimental.cttz.elts.i64.v16i1(<16 x i1> undef, i1 false)
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %res.i64.v32i1.nzip = call i64 @llvm.experimental.cttz.elts.i64.v32i1(<32 x i1> undef, i1 false)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i32.v2i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.v2i1(<2 x i1> undef, i1 false)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i32.v4i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.v4i1(<4 x i1> undef, i1 false)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i32.v8i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.v8i1(<8 x i1> undef, i1 false)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %res.i32.v16i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.v16i1(<16 x i1> undef, i1 false)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.v2i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.v2i1(<2 x i1> undef, i1 false)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.v4i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.v4i1(<4 x i1> undef, i1 false)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.v8i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.v8i1(<8 x i1> undef, i1 false)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res.i32.v16i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.v16i1(<16 x i1> undef, i1 false)
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %res.i32.v32i1.nzip = call i32 @llvm.experimental.cttz.elts.i32.v32i1(<32 x i1> undef, i1 false)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/shufflevector.ll b/llvm/test/Analysis/CostModel/AMDGPU/shufflevector.ll
index be5cca0765ed..a18156744a36 100644
--- a/llvm/test/Analysis/CostModel/AMDGPU/shufflevector.ll
+++ b/llvm/test/Analysis/CostModel/AMDGPU/shufflevector.ll
@@ -7,603 +7,1140 @@
; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=fiji -cost-kind=code-size -S | FileCheck -check-prefixes=ALL-SIZE,VI-SIZE %s
; END.
-define amdgpu_kernel void @shufflevector_i16() {
+define amdgpu_kernel void @shufflevector_i16(<2 x i16> %vec1, <2 x i16> %vec2) {
; GFX9-10-LABEL: 'shufflevector_i16'
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> zeroinitializer
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 3>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 3>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 3>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 3>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf000 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> zeroinitializer
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf222 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> zeroinitializer
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> zeroinitializer
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> zeroinitializer
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> zeroinitializer
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; GFX9-10-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; VI-LABEL: 'shufflevector_i16'
-; VI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf00 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> zeroinitializer
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf22 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 3>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 3>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 3>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 3>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf000 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> zeroinitializer
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf222 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf00 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> zeroinitializer
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf22 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> zeroinitializer
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf00_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> zeroinitializer
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf22_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> zeroinitializer
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; VI-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; GFX9-10-SIZE-LABEL: 'shufflevector_i16'
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> zeroinitializer
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 3>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 3>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 3>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 3>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf000 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> zeroinitializer
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf222 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> zeroinitializer
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> zeroinitializer
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> zeroinitializer
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> zeroinitializer
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
; VI-SIZE-LABEL: 'shufflevector_i16'
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf00 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> zeroinitializer
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf22 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 3>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 3>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 3>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 3>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf000 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> zeroinitializer
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf222 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf00 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> zeroinitializer
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf22 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> zeroinitializer
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf00_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> zeroinitializer
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf22_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> zeroinitializer
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
- %shuf00 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> zeroinitializer
- %shuf01 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
- %shuf10 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
- %shuf11 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
- %shuf02 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 2>
- %shuf20 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 0>
- %shuf22 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 2>
- %shuf03 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 3>
- %shuf30 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 0>
- %shuf33 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 3>
- %shuf12 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 2>
- %shuf21 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 1>
- %shuf13 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 3>
- %shuf31 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
- %shuf23 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 3>
- %shuf32 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 2>
- %shuf000 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 0>
- %shuf001 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 1>
- %shuf010 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 0>
- %shuf011 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 1>
- %shuf100 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 0>
- %shuf101 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 1>
- %shuf110 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 0>
- %shuf111 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 1>
- %shuf002 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 2>
- %shuf020 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 0>
- %shuf022 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 2>
- %shuf200 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 0>
- %shuf202 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 2>
- %shuf220 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 0>
- %shuf222 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 2>
- %shuf112 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 2>
- %shuf121 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 1>
- %shuf122 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 2>
- %shuf211 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 1>
- %shuf212 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 2>
- %shuf221 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> zeroinitializer
+ %shuf01 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 1>
+ %shuf10 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 0>
+ %shuf11 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 1>
+ %shuf02 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+ %shuf20 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 0>
+ %shuf22 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 2>
+ %shuf03 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 3>
+ %shuf30 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 0>
+ %shuf33 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 3>
+ %shuf12 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 2>
+ %shuf21 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 1>
+ %shuf13 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 3>
+ %shuf31 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 1>
+ %shuf23 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 3>
+ %shuf32 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 2>
+ %shuf000 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> zeroinitializer
+ %shuf01_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 1>
+ %shuf10_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 0>
+ %shuf11_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 1>
+ %shuf02_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 2>
+ %shuf20_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 0>
+ %shuf22_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 2>
+ %shuf03_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 3>
+ %shuf30_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 0>
+ %shuf33_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 3>
+ %shuf12_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 2>
+ %shuf21_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 1>
+ %shuf13_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 3>
+ %shuf31_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 1>
+ %shuf23_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 3>
+ %shuf32_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 2>
+ %shuf000_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
ret void
}
; Should not assert
-define amdgpu_kernel void @shufflevector_i8() {
+define amdgpu_kernel void @shufflevector_i8(<2 x i8> %vec1, <2 x i8> %vec2) {
; ALL-LABEL: 'shufflevector_i8'
-; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> zeroinitializer
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> zeroinitializer
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf000 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf222 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf000_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf222_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; ALL-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; ALL-SIZE-LABEL: 'shufflevector_i8'
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> zeroinitializer
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> zeroinitializer
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf000 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf222 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf000_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf222_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
- %shuf00 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> zeroinitializer
- %shuf01 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 1>
- %shuf10 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
- %shuf11 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 1>
- %shuf02 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 2>
- %shuf20 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 0>
- %shuf22 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 2>
- %shuf03 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 3>
- %shuf30 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 0>
- %shuf33 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 3>
- %shuf12 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 2>
- %shuf21 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 1>
- %shuf13 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 3>
- %shuf31 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 1>
- %shuf23 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 3>
- %shuf32 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 2>
- %shuf000 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 0>
- %shuf001 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 1>
- %shuf010 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 0>
- %shuf011 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 1>
- %shuf100 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 0>
- %shuf101 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 1>
- %shuf110 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 0>
- %shuf111 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 1>
- %shuf002 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 2>
- %shuf020 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 0>
- %shuf022 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 2>
- %shuf200 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 0>
- %shuf202 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 2>
- %shuf220 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 0>
- %shuf222 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 2>
- %shuf112 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 2>
- %shuf121 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 1>
- %shuf122 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 2>
- %shuf211 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 1>
- %shuf212 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 2>
- %shuf221 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> zeroinitializer
+ %shuf01 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 1>
+ %shuf10 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 0>
+ %shuf11 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 1>
+ %shuf02 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 2>
+ %shuf20 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 0>
+ %shuf22 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 2>
+ %shuf03 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 3>
+ %shuf30 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 0>
+ %shuf33 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 3>
+ %shuf12 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 2>
+ %shuf21 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 1>
+ %shuf13 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 3>
+ %shuf31 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 1>
+ %shuf23 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 3>
+ %shuf32 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 2>
+ %shuf000 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> zeroinitializer
+ %shuf01_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 1>
+ %shuf10_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 0>
+ %shuf11_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 1>
+ %shuf02_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 2>
+ %shuf20_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 0>
+ %shuf22_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 2>
+ %shuf03_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 3>
+ %shuf30_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 0>
+ %shuf33_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 3>
+ %shuf12_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 2>
+ %shuf21_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 1>
+ %shuf13_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 3>
+ %shuf31_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 1>
+ %shuf23_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 3>
+ %shuf32_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 2>
+ %shuf000_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
ret void
}
-define amdgpu_kernel void @shufflevector_i32() {
+define amdgpu_kernel void @shufflevector_i32(<2 x i32> %vec1, <2 x i32> %vec2) {
; ALL-LABEL: 'shufflevector_i32'
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> zeroinitializer
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> zeroinitializer
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; ALL-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; ALL-SIZE-LABEL: 'shufflevector_i32'
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> zeroinitializer
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> zeroinitializer
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
- %shuf00 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> zeroinitializer
- %shuf01 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 1>
- %shuf10 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
- %shuf11 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %shuf02 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 2>
- %shuf20 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 0>
- %shuf22 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 2>
- %shuf03 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 3>
- %shuf30 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 0>
- %shuf33 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 3>
- %shuf12 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 2>
- %shuf21 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 1>
- %shuf13 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 3>
- %shuf31 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 1>
- %shuf23 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuf32 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 2>
- %shuf000 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 0>
- %shuf001 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 1>
- %shuf010 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 0>
- %shuf011 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 1>
- %shuf100 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 0>
- %shuf101 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 1>
- %shuf110 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 0>
- %shuf111 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 1>
- %shuf002 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 2>
- %shuf020 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 0>
- %shuf022 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 2>
- %shuf200 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 0>
- %shuf202 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 2>
- %shuf220 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 0>
- %shuf222 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 2>
- %shuf112 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 2>
- %shuf121 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 1>
- %shuf122 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 2>
- %shuf211 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 1>
- %shuf212 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 2>
- %shuf221 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> zeroinitializer
+ %shuf01 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 1>
+ %shuf10 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 0>
+ %shuf11 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 1>
+ %shuf02 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 2>
+ %shuf20 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 0>
+ %shuf22 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 2>
+ %shuf03 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 3>
+ %shuf30 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 0>
+ %shuf33 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 3>
+ %shuf12 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 2>
+ %shuf21 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 1>
+ %shuf13 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 3>
+ %shuf31 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 1>
+ %shuf23 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 3>
+ %shuf32 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 2>
+ %shuf000 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> zeroinitializer
+ %shuf01_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 1>
+ %shuf10_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 0>
+ %shuf11_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 1>
+ %shuf02_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 2>
+ %shuf20_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 0>
+ %shuf22_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 2>
+ %shuf03_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 3>
+ %shuf30_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 0>
+ %shuf33_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 3>
+ %shuf12_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 2>
+ %shuf21_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 1>
+ %shuf13_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 3>
+ %shuf31_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 1>
+ %shuf23_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 3>
+ %shuf32_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 2>
+ %shuf000_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
ret void
}
; Other shuffle cases
-define void @shuffle() {
+define void @shuffle(<2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i8> %i8v4, <4 x i8> %i8v4_2, <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i8> %i8v8, <8 x i8> %i8v8_2, <16 x i8> %i8v16, <16 x i8> %i8v16_2, <2 x i16> %i16v2, <2 x i16> %i16v2_2, <4 x i16> %i16v4, <4 x i16> %i16v4_2, <8 x i16> %i16v8, <8 x i16> %i16v8_2, <2 x i32> %i32v2, <2 x i32> %i32v2_2, <4 x i32> %i32v4, <4 x i32> %i32v4_2, <2 x float> %floatv2, <2 x float> %floatv2_2, <4 x float> %floatv4, <4 x float> %floatv4_2,<2 x i64> %i64v2, <2 x i64> %i64v2_2,<2 x double> %doublev2, <2 x double> %doublev2_2) {
; GFX9-10-LABEL: 'shuffle'
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> undef, <4 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> undef, <2 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> undef, <6 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> undef, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i16_4 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v8i16_8 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8_2 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16_2 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16_2, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <2 x i32> <i32 1, i32 0>
; GFX9-10-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; VI-LABEL: 'shuffle'
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> undef, <4 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> undef, <2 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> undef, <6 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> undef, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i16_4 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v8i16_8 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8_2 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16_2 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16_2, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <2 x i32> <i32 1, i32 0>
; VI-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; GFX9-10-SIZE-LABEL: 'shuffle'
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> undef, <4 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> undef, <2 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> undef, <6 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> undef, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i16_4 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v8i16_8 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8_2 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16_2 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16_2, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <2 x i32> <i32 1, i32 0>
; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
; VI-SIZE-LABEL: 'shuffle'
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> undef, <4 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> undef, <2 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> undef, <6 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> undef, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i16_4 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v8i16_8 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8_2 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16_2 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16_2, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <2 x i32> <i32 1, i32 0>
; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
- %v2i8_2 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
- %v2i8_4 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
- %v4i8_4 = shufflevector <4 x i8> undef, <4 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
- %v2i8_8 = shufflevector <2 x i8> undef, <2 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v4i8_8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v6i8_8 = shufflevector <6 x i8> undef, <6 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v8i8_8 = shufflevector <8 x i8> undef, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v16i8_16 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v2i16_2 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
- %v4i16_4 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
- %v8i16_8 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v2i32_2 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
- %v4i32_4 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
- %v2f32_2 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
- %v4f32_4 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
- %v2i64_2 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
- %v2f64_2 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+ %v2i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <2 x i32> <i32 1, i32 0>
+ %v2i8_2_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <2 x i32> <i32 1, i32 0>
+ %v2i8_4 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v2i8_4_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v4i8_4 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v4i8_4_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v2i8_8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v2i8_8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v4i8_8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v4i8_8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v6i8_8 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v6i8_8_2 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v8i8_8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v8i8_8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v16i8_16 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v16i8_16_2 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16_2, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v2i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <2 x i32> <i32 1, i32 0>
+ %v2i16_2_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <2 x i32> <i32 1, i32 0>
+ %v4i16_4 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v4i16_4_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v8i16_8 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v8i16_8_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v2i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <2 x i32> <i32 1, i32 0>
+ %v2i32_2_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <2 x i32> <i32 1, i32 0>
+ %v4i32_4 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v4i32_4_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v2f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <2 x i32> <i32 1, i32 0>
+ %v2f32_2_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <2 x i32> <i32 1, i32 0>
+ %v4f32_4 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v4f32_4_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v2i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <2 x i32> <i32 1, i32 0>
+ %v2i64_2_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <2 x i32> <i32 1, i32 0>
+ %v2f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <2 x i32> <i32 1, i32 0>
+ %v2f64_2_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <2 x i32> <i32 1, i32 0>
ret void
}
-define void @concat() {
+define void @concat(<2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i8> %i8v8, <8 x i8> %i8v8_2, <2 x half> %halfv2, <2 x half> %halfv2_2, <4 x half> %halfv4, <4 x half> %halfv4_2, <8 x half> %halfv8, <8 x half> %halfv8_2, <2 x i16> %i16v2, <2 x i16> %i16v2_2, <4 x i16> %i16v4, <4 x i16> %i16v4_2, <8 x i16> %i16v8, <8 x i16> %i16v8_2, <2 x i32> %i32v2, <2 x i32> %i32v2_2, <4 x i32> %i32v4, <4 x i32> %i32v4_2, <2 x float> %floatv2, <2 x float> %floatv2_2, <4 x float> %floatv4, <4 x float> %floatv4_2,<2 x i64> %i64v2, <2 x i64> %i64v2_2,<2 x double> %doublev2, <2 x double> %doublev2_2) {
; ALL-LABEL: 'concat'
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8 = shufflevector <8 x i8> undef, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i16 = shufflevector <2 x i16> undef, <2 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i16 = shufflevector <4 x i16> undef, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v16i16 = shufflevector <8 x i16> undef, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32 = shufflevector <2 x i32> undef, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32 = shufflevector <4 x i32> undef, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f16 = shufflevector <2 x half> undef, <2 x half> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8f16 = shufflevector <4 x half> undef, <4 x half> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v16f16 = shufflevector <8 x half> undef, <8 x half> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32 = shufflevector <2 x float> undef, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32 = shufflevector <4 x float> undef, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64 = shufflevector <2 x double> undef, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i16 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i16 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i16 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f16 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f16 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16f16 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i16_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i16_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f16_2 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f16_2 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16f16_2 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; ALL-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; ALL-SIZE-LABEL: 'concat'
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8 = shufflevector <8 x i8> undef, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i16 = shufflevector <2 x i16> undef, <2 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i16 = shufflevector <4 x i16> undef, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v16i16 = shufflevector <8 x i16> undef, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32 = shufflevector <2 x i32> undef, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32 = shufflevector <4 x i32> undef, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f16 = shufflevector <2 x half> undef, <2 x half> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8f16 = shufflevector <4 x half> undef, <4 x half> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v16f16 = shufflevector <8 x half> undef, <8 x half> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32 = shufflevector <2 x float> undef, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32 = shufflevector <4 x float> undef, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64 = shufflevector <2 x double> undef, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i16 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i16 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i16 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f16 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f16 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16f16 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i16_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i16_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f16_2 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f16_2 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16f16_2 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
- %v4i8 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v8i8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %v16i8 = shufflevector <8 x i8> undef, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %v4i16 = shufflevector <2 x i16> undef, <2 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v8i16 = shufflevector <4 x i16> undef, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %v16i16 = shufflevector <8 x i16> undef, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %v4i32 = shufflevector <2 x i32> undef, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v8i32 = shufflevector <4 x i32> undef, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %v4i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v4f16 = shufflevector <2 x half> undef, <2 x half> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v8f16 = shufflevector <4 x half> undef, <4 x half> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %v16f16 = shufflevector <8 x half> undef, <8 x half> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %v4f32 = shufflevector <2 x float> undef, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v8f32 = shufflevector <4 x float> undef, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %v4f64 = shufflevector <2 x double> undef, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v4i8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16i8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4i16 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i16 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16i16 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4i32 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i32 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v4i64 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v4f16 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8f16 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16f16 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4f32 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8f32 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v4f64 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v4i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16i8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i16_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16i16_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i32_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v4i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v4f16_2 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8f16_2 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16f16_2 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8f32_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v4f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret void
}
diff --git a/llvm/test/Analysis/CostModel/RISCV/cmp-select.ll b/llvm/test/Analysis/CostModel/RISCV/cmp-select.ll
new file mode 100644
index 000000000000..dc0810b12869
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/RISCV/cmp-select.ll
@@ -0,0 +1,258 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -mtriple=riscv64 -mattr=+v,+f,+short-forward-branch-opt -passes="print<cost-model>" -cost-kind=throughput 2>&1 -disable-output | FileCheck %s --check-prefixes=SFB64
+; RUN: opt < %s -mtriple=riscv64 -mattr=+v,+f -passes="print<cost-model>" -cost-kind=throughput 2>&1 -disable-output | FileCheck %s --check-prefixes=RV64
+
+define i32 @icmp-iselect(i64 %ca, i64 %cb, i32 %a, i32 %b) {
+; SFB64-LABEL: 'icmp-iselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %cmp1 = icmp slt i64 %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select i1 %cmp1, i32 %a, i32 %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %select1
+;
+; RV64-LABEL: 'icmp-iselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %cmp1 = icmp slt i64 %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select i1 %cmp1, i32 %a, i32 %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %select1
+;
+ %cmp1 = icmp slt i64 %ca, %cb
+ %select1 = select i1 %cmp1, i32 %a, i32 %b
+ ret i32 %select1
+}
+
+define i32 @icmp-iselects(i64 %ca, i64 %cb, i32 %a, i32 %b, i32 %c) {
+; SFB64-LABEL: 'icmp-iselects'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %cmp1 = icmp slt i64 %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select i1 %cmp1, i32 %a, i32 %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select2 = select i1 %cmp1, i32 %a, i32 %c
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %ret = add i32 %select1, %select2
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %ret
+;
+; RV64-LABEL: 'icmp-iselects'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %cmp1 = icmp slt i64 %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select i1 %cmp1, i32 %a, i32 %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select2 = select i1 %cmp1, i32 %a, i32 %c
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %ret = add i32 %select1, %select2
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %ret
+;
+ %cmp1 = icmp slt i64 %ca, %cb
+ %select1 = select i1 %cmp1, i32 %a, i32 %b
+ %select2 = select i1 %cmp1, i32 %a, i32 %c
+ %ret = add i32 %select1, %select2
+ ret i32 %ret
+}
+
+define i32 @icmp-ifselects(i64 %ca, i64 %cb, i32 %a, i32 %b, float %c, float %d) {
+; SFB64-LABEL: 'icmp-ifselects'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %cmp1 = icmp slt i64 %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select i1 %cmp1, i32 %a, i32 %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select2 = select i1 %cmp1, float %c, float %d
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %selectint = fptosi float %select2 to i32
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %ret = add i32 %select1, %selectint
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %ret
+;
+; RV64-LABEL: 'icmp-ifselects'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %cmp1 = icmp slt i64 %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select i1 %cmp1, i32 %a, i32 %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select2 = select i1 %cmp1, float %c, float %d
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %selectint = fptosi float %select2 to i32
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %ret = add i32 %select1, %selectint
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %ret
+;
+ %cmp1 = icmp slt i64 %ca, %cb
+ %select1 = select i1 %cmp1, i32 %a, i32 %b
+ %select2 = select i1 %cmp1, float %c, float %d
+ %selectint = fptosi float %select2 to i32
+ %ret = add i32 %select1, %selectint
+ ret i32 %ret
+}
+
+define i32 @constant-icmp-iselect(i64 %ca, i64 %cb, i32 %a) {
+; SFB64-LABEL: 'constant-icmp-iselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %cmp1 = icmp slt i64 %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select i1 %cmp1, i32 %a, i32 7
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %select1
+;
+; RV64-LABEL: 'constant-icmp-iselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %cmp1 = icmp slt i64 %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select i1 %cmp1, i32 %a, i32 7
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %select1
+;
+ %cmp1 = icmp slt i64 %ca, %cb
+ %select1 = select i1 %cmp1, i32 %a, i32 7
+ ret i32 %select1
+}
+
+define i32 @fcmp-iselect(float %ca, float %cb, i32 %a, i32 %b) {
+; SFB64-LABEL: 'fcmp-iselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt float %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select i1 %fcmp1, i32 %a, i32 %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %select1
+;
+; RV64-LABEL: 'fcmp-iselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt float %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select i1 %fcmp1, i32 %a, i32 %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %select1
+;
+ %fcmp1 = fcmp ogt float %ca, %cb
+ %select1 = select i1 %fcmp1, i32 %a, i32 %b
+ ret i32 %select1
+}
+
+define float @fcmp-fselect(float %ca, float %cb, float %a, float %b) {
+; SFB64-LABEL: 'fcmp-fselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt float %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fselect1 = select i1 %fcmp1, float %a, float %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %fselect1
+;
+; RV64-LABEL: 'fcmp-fselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt float %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fselect1 = select i1 %fcmp1, float %a, float %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %fselect1
+;
+ %fcmp1 = fcmp ogt float %ca, %cb
+ %fselect1 = select i1 %fcmp1, float %a, float %b
+ ret float %fselect1
+}
+
+define float @icmp-fselect(i64 %ca, i64 %cb, float %a, float %b) {
+; SFB64-LABEL: 'icmp-fselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %icmp1 = icmp slt i64 %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fselect1 = select i1 %icmp1, float %a, float %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %fselect1
+;
+; RV64-LABEL: 'icmp-fselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %icmp1 = icmp slt i64 %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fselect1 = select i1 %icmp1, float %a, float %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float %fselect1
+;
+ %icmp1 = icmp slt i64 %ca, %cb
+ %fselect1 = select i1 %icmp1, float %a, float %b
+ ret float %fselect1
+}
+
+define <2 x i32> @vector-icmp-vector-iselect(<2 x i32> %ca, <2 x i32> %cb, <2 x i32> %a, <2 x i32> %b) {
+; SFB64-LABEL: 'vector-icmp-vector-iselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %icmp = icmp slt <2 x i32> %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select <2 x i1> %icmp, <2 x i32> %a, <2 x i32> %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i32> %select1
+;
+; RV64-LABEL: 'vector-icmp-vector-iselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %icmp = icmp slt <2 x i32> %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select <2 x i1> %icmp, <2 x i32> %a, <2 x i32> %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i32> %select1
+;
+ %icmp = icmp slt <2 x i32> %ca, %cb
+ %select1 = select <2 x i1> %icmp, <2 x i32> %a, <2 x i32> %b
+ ret <2 x i32> %select1
+}
+
+define <2 x i32> @vector-fcmp-vector-iselect(<2 x float> %ca, <2 x float> %cb, <2 x i32> %a, <2 x i32> %b) {
+; SFB64-LABEL: 'vector-fcmp-vector-iselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt <2 x float> %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select <2 x i1> %fcmp1, <2 x i32> %a, <2 x i32> %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i32> %select1
+;
+; RV64-LABEL: 'vector-fcmp-vector-iselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt <2 x float> %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select <2 x i1> %fcmp1, <2 x i32> %a, <2 x i32> %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i32> %select1
+;
+ %fcmp1 = fcmp ogt <2 x float> %ca, %cb
+ %select1 = select <2 x i1> %fcmp1, <2 x i32> %a, <2 x i32> %b
+ ret <2 x i32> %select1
+}
+
+define <2 x float> @vector-fcmp-vector-fselect(<2 x float> %ca, <2 x float> %cb, <2 x float> %a, <2 x float> %b) {
+; SFB64-LABEL: 'vector-fcmp-vector-fselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt <2 x float> %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select <2 x i1> %fcmp1, <2 x float> %a, <2 x float> %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x float> %select1
+;
+; RV64-LABEL: 'vector-fcmp-vector-fselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt <2 x float> %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select <2 x i1> %fcmp1, <2 x float> %a, <2 x float> %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x float> %select1
+;
+ %fcmp1 = fcmp ogt <2 x float> %ca, %cb
+ %select1 = select <2 x i1> %fcmp1, <2 x float> %a, <2 x float> %b
+ ret <2 x float> %select1
+}
+
+define <2 x float> @vector-icmp-vector-fselect(<2 x i32> %ca, <2 x i32> %cb, <2 x float> %a, <2 x float> %b) {
+; SFB64-LABEL: 'vector-icmp-vector-fselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %icmp1 = icmp slt <2 x i32> %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select <2 x i1> %icmp1, <2 x float> %a, <2 x float> %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x float> %select1
+;
+; RV64-LABEL: 'vector-icmp-vector-fselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %icmp1 = icmp slt <2 x i32> %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %select1 = select <2 x i1> %icmp1, <2 x float> %a, <2 x float> %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x float> %select1
+;
+ %icmp1 = icmp slt <2 x i32> %ca, %cb
+ %select1 = select <2 x i1> %icmp1, <2 x float> %a, <2 x float> %b
+ ret <2 x float> %select1
+}
+
+define <2 x float> @icmp-vector-fselect(i1 %ca, i1 %cb, <2 x float> %a, <2 x float> %b) {
+; SFB64-LABEL: 'icmp-vector-fselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %icmp1 = icmp slt i1 %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %select1 = select i1 %icmp1, <2 x float> %a, <2 x float> %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x float> %select1
+;
+; RV64-LABEL: 'icmp-vector-fselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %icmp1 = icmp slt i1 %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %select1 = select i1 %icmp1, <2 x float> %a, <2 x float> %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x float> %select1
+;
+ %icmp1 = icmp slt i1 %ca, %cb
+ %select1 = select i1 %icmp1, <2 x float> %a, <2 x float> %b
+ ret <2 x float> %select1
+}
+
+define <2 x i32> @icmp-vector-iselect(i1 %ca, i1 %cb, <2 x i32> %a, <2 x i32> %b) {
+; SFB64-LABEL: 'icmp-vector-iselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %icmp1 = icmp slt i1 %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %select1 = select i1 %icmp1, <2 x i32> %a, <2 x i32> %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i32> %select1
+;
+; RV64-LABEL: 'icmp-vector-iselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %icmp1 = icmp slt i1 %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %select1 = select i1 %icmp1, <2 x i32> %a, <2 x i32> %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i32> %select1
+;
+ %icmp1 = icmp slt i1 %ca, %cb
+ %select1 = select i1 %icmp1, <2 x i32> %a, <2 x i32> %b
+ ret <2 x i32> %select1
+}
+
+define <2 x float> @fcmp-vector-fselect(float %ca, float %cb, <2 x float> %a, <2 x float> %b) {
+; SFB64-LABEL: 'fcmp-vector-fselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt float %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %select1 = select i1 %fcmp1, <2 x float> %a, <2 x float> %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x float> %select1
+;
+; RV64-LABEL: 'fcmp-vector-fselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt float %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %select1 = select i1 %fcmp1, <2 x float> %a, <2 x float> %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x float> %select1
+;
+ %fcmp1 = fcmp ogt float %ca, %cb
+ %select1 = select i1 %fcmp1, <2 x float> %a, <2 x float> %b
+ ret <2 x float> %select1
+}
+
+define <2 x i32> @fcmp-vector-iselect(float %ca, float %cb, <2 x i32> %a, <2 x i32> %b) {
+; SFB64-LABEL: 'fcmp-vector-iselect'
+; SFB64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt float %ca, %cb
+; SFB64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %select1 = select i1 %fcmp1, <2 x i32> %a, <2 x i32> %b
+; SFB64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i32> %select1
+;
+; RV64-LABEL: 'fcmp-vector-iselect'
+; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %fcmp1 = fcmp ogt float %ca, %cb
+; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %select1 = select i1 %fcmp1, <2 x i32> %a, <2 x i32> %b
+; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i32> %select1
+;
+ %fcmp1 = fcmp ogt float %ca, %cb
+ %select1 = select i1 %fcmp1, <2 x i32> %a, <2 x i32> %b
+ ret <2 x i32> %select1
+}
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll b/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll
index 8c436de4c3f6..809b15b20049 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 4
; RUN: opt -S -disable-output -passes='print<access-info>' < %s 2>&1 | FileCheck %s
@@ -7,7 +8,8 @@
%int_pair = type { i32, i32 }
-; CHECK-LABEL: function 'backdep_type_size_equivalence':
+define void @backdep_type_size_equivalence(ptr nocapture %vec, i64 %n) {
+; CHECK-LABEL: 'backdep_type_size_equivalence'
; CHECK-NEXT: loop:
; CHECK-NEXT: Memory dependences are safe with a maximum safe vector width of 3200 bits
; CHECK-NEXT: Dependences:
@@ -23,10 +25,15 @@
; CHECK-NEXT: store float %val, ptr %gep.iv.min.100, align 8 ->
; CHECK-NEXT: store i32 %indvars.iv.i32, ptr %gep.iv, align 8
; CHECK-EMPTY:
-; CHECK-NEXT: Run-time memory checks:
-; CHECK-NEXT: Grouped accesses:
-
-define void @backdep_type_size_equivalence(ptr nocapture %vec, i64 %n) {
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-NEXT: {(4 + (8 * %n) + %vec),+,8}<%loop> Added Flags: <nusw>
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
br label %loop
@@ -72,20 +79,25 @@ exit:
; different store size than the i32 type, even though their alloc sizes are
; equivalent. This is a negative test to ensure that they are not analyzed as
; in the tests above.
-;
-; CHECK-LABEL: function 'backdep_type_store_size_equivalence':
+
+define void @backdep_type_store_size_equivalence(ptr nocapture %vec, i64 %n) {
+; CHECK-LABEL: 'backdep_type_store_size_equivalence'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop.
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Unknown data dependence.
; CHECK-NEXT: Dependences:
; CHECK-NEXT: Unknown:
; CHECK-NEXT: %ld.f32 = load float, ptr %gep.iv, align 8 ->
; CHECK-NEXT: store i19 %indvars.iv.i19, ptr %gep.iv, align 8
; CHECK-EMPTY:
-; CHECK-NEXT: Run-time memory checks:
-; CHECK-NEXT: Grouped accesses:
-
-define void @backdep_type_store_size_equivalence(ptr nocapture %vec, i64 %n) {
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
br label %loop
@@ -114,19 +126,12 @@ exit:
; are done as i64 and i32 types. This is a negative test to ensure that they
; are not analyzed as in the tests above.
-; CHECK-LABEL: function 'neg_dist_dep_type_size_equivalence':
+define void @neg_dist_dep_type_size_equivalence(ptr nocapture %vec, i64 %n) {
+; CHECK-LABEL: 'neg_dist_dep_type_size_equivalence'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop.
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Backward loop carried data dependence that prevents store-to-load forwarding.
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %ld.f64 = load double, ptr %gep.iv, align 8 ->
-; CHECK-NEXT: store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
-; CHECK-EMPTY:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %ld.i64 = load i64, ptr %gep.iv, align 8 ->
-; CHECK-NEXT: store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
-; CHECK-EMPTY:
; CHECK-NEXT: BackwardVectorizableButPreventsForwarding:
; CHECK-NEXT: %ld.f64 = load double, ptr %gep.iv, align 8 ->
; CHECK-NEXT: store double %val, ptr %gep.iv.101.i64, align 8
@@ -137,12 +142,17 @@ exit:
; CHECK-EMPTY:
; CHECK-NEXT: Unknown:
; CHECK-NEXT: store double %val, ptr %gep.iv.101.i64, align 8 ->
-; CHECK-NEXT: store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
+; CHECK-NEXT: store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
-
-define void @neg_dist_dep_type_size_equivalence(ptr nocapture %vec, i64 %n) {
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-NEXT: {((8 * %n) + %vec),+,8}<%loop> Added Flags: <nusw>
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
br label %loop
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll b/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll
new file mode 100644
index 000000000000..0d85f11f06dc
--- /dev/null
+++ b/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll
@@ -0,0 +1,187 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes='print<access-info>' -disable-output %s 2>&1 | FileCheck %s
+
+define void @all_exits_dominate_latch_countable_exits_at_most_500_iterations(ptr %A, ptr %B) {
+; CHECK-LABEL: 'all_exits_dominate_latch_countable_exits_at_most_500_iterations'
+; CHECK-NEXT: loop.header:
+; CHECK-NEXT: Report: could not determine number of loop iterations
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv
+ %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
+ %l = load i32, ptr %gep.A, align 4
+ store i32 0, ptr %gep.B, align 4
+ %cntable.c.1 = icmp ult i64 %iv, 1000
+ %iv.next = add nuw nsw i64 %iv, 1
+ br i1 %cntable.c.1, label %b2, label %e.1
+
+b2:
+ %uncntable.c.0 = icmp eq i32 %l, 0
+ br i1 %uncntable.c.0, label %e.2, label %b3
+
+b3:
+ %cntable.c.2 = icmp eq i64 %iv.next, 500
+ br i1 %cntable.c.2, label %cleanup4, label %latch
+
+latch:
+ br label %loop.header
+
+cleanup4:
+ ret void
+
+e.1:
+ ret void
+e.2:
+ ret void
+
+}
+
+
+
+define i32 @all_exits_dominate_latch_countable_exits_at_most_1000_iterations(ptr %A, ptr %B) {
+; CHECK-LABEL: 'all_exits_dominate_latch_countable_exits_at_most_1000_iterations'
+; CHECK-NEXT: loop.header:
+; CHECK-NEXT: Report: could not determine number of loop iterations
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv
+ %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
+ %l = load i32, ptr %gep.A, align 4
+ store i32 0, ptr %gep.B, align 4
+ %cntable.c.1 = icmp ult i64 %iv, 1000
+ br i1 %cntable.c.1, label %b2, label %e.1
+
+b2:
+ %uncntable.c.0 = icmp eq i32 %l, 0
+ br i1 %uncntable.c.0, label %e.2, label %b3
+
+b3:
+ %iv.next = add nuw nsw i64 %iv, 1
+ %cntable.c.2 = icmp eq i64 %iv.next, 2000
+ br i1 %cntable.c.2, label %e.0, label %latch
+
+latch:
+ br label %loop.header
+
+e.0:
+ ret i32 0
+
+e.1:
+ ret i32 1
+
+e.2:
+ ret i32 2
+}
+
+
+define i32 @not_all_exits_dominate_latch(ptr %A, ptr %B) {
+; CHECK-LABEL: 'not_all_exits_dominate_latch'
+; CHECK-NEXT: loop.header:
+; CHECK-NEXT: Report: could not determine number of loop iterations
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv
+ %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
+ %l = load i32, ptr %gep.A, align 4
+ store i32 0, ptr %gep.B, align 4
+ %cntable.c.1 = icmp ult i64 %iv, 1000
+ %iv.next = add nuw nsw i64 %iv, 1
+ br i1 %cntable.c.1, label %b2, label %latch
+
+b2:
+ %uncntable.c.0 = icmp eq i32 %l, 0
+ br i1 %uncntable.c.0, label %e.2, label %b3
+
+b3:
+ %cntable.c.2 = icmp eq i64 %iv.next, 2000
+ br i1 %cntable.c.2, label %e.0, label %latch
+
+latch:
+ br label %loop.header
+
+e.0:
+ ret i32 0
+
+e.2:
+ ret i32 1
+}
+
+define i32 @b3_does_not_dominate_latch(ptr %A, ptr %B) {
+; CHECK-LABEL: 'b3_does_not_dominate_latch'
+; CHECK-NEXT: loop.header:
+; CHECK-NEXT: Report: could not determine number of loop iterations
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ]
+ %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv
+ %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv
+ %l = load i32, ptr %gep.A, align 4
+ store i32 0, ptr %gep.B, align 4
+ %cntable.c.1 = icmp ult i64 %iv, 1000
+ %iv.next = add nuw nsw i64 %iv, 1
+ br i1 %cntable.c.1, label %b2, label %e.1
+
+b2:
+ %uncntable.c.0 = icmp eq i32 %l, 0
+ br i1 %uncntable.c.0, label %latch, label %b3
+
+b3:
+ %cntable.c.2 = icmp eq i64 %iv.next, 500
+ br i1 %cntable.c.2, label %e.0, label %latch
+
+latch:
+ br label %loop.header
+
+e.0:
+ ret i32 0
+
+e.1:
+ ret i32 1
+}
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll b/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll
index 42d87edd8b4b..f1ae1a897fff 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes='print<access-info>' -disable-output < %s 2>&1 | FileCheck %s
; Check that loop-indepedent forward dependences are discovered properly.
@@ -21,17 +22,31 @@
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define void @f(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
-
-; CHECK: Dependences:
-; CHECK-NEXT: Forward:
-; CHECK-NEXT: store i32 %b_p1, ptr %Aidx, align 4 ->
-; CHECK-NEXT: %a = load i32, ptr %Aidx, align 4
-; CHECK: ForwardButPreventsForwarding:
-; CHECK-NEXT: store i32 %b_p2, ptr %Aidx_next, align 4 ->
-; CHECK-NEXT: %a = load i32, ptr %Aidx, align 4
-; CHECK: Forward:
-; CHECK-NEXT: store i32 %b_p2, ptr %Aidx_next, align 4 ->
-; CHECK-NEXT: store i32 %b_p1, ptr %Aidx, align 4
+; CHECK-LABEL: 'f'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Forward loop carried data dependence that prevents store-to-load forwarding.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Forward:
+; CHECK-NEXT: store i32 %b_p1, ptr %Aidx, align 4 ->
+; CHECK-NEXT: %a = load i32, ptr %Aidx, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: ForwardButPreventsForwarding:
+; CHECK-NEXT: store i32 %b_p2, ptr %Aidx_next, align 4 ->
+; CHECK-NEXT: %a = load i32, ptr %Aidx, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Forward:
+; CHECK-NEXT: store i32 %b_p2, ptr %Aidx_next, align 4 ->
+; CHECK-NEXT: store i32 %b_p1, ptr %Aidx, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
br label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/invariant-dependence-before.ll b/llvm/test/Analysis/LoopAccessAnalysis/invariant-dependence-before.ll
index 2a210a5a445b..2139804753ef 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/invariant-dependence-before.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/invariant-dependence-before.ll
@@ -4,13 +4,8 @@
define void @test_invar_dependence_before_positive_strided_access_1(ptr %a) {
; CHECK-LABEL: 'test_invar_dependence_before_positive_strided_access_1'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %a, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %gep, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -39,13 +34,8 @@ exit:
define void @test_invar_dependence_before_positive_strided_access_2(ptr %a) {
; CHECK-LABEL: 'test_invar_dependence_before_positive_strided_access_2'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %a, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -144,13 +134,8 @@ exit:
define void @test_invar_dependence_before_positive_strided_access_1_different_access_sizes(ptr %a) {
; CHECK-LABEL: 'test_invar_dependence_before_positive_strided_access_1_different_access_sizes'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %a, align 4 ->
-; CHECK-NEXT: store i8 %t, ptr %gep, align 1
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -216,13 +201,8 @@ exit:
define void @test_invar_dependence_before_negative_strided_access_1(ptr %a) {
; CHECK-LABEL: 'test_invar_dependence_before_negative_strided_access_1'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %a, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %gep, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -251,13 +231,8 @@ exit:
define void @test_invar_dependence_before_negative_strided_access_2(ptr %a) {
; CHECK-LABEL: 'test_invar_dependence_before_negative_strided_access_2'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %a, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -357,13 +332,8 @@ exit:
define void @test_both_invar_before_1(ptr %a) {
; CHECK-LABEL: 'test_both_invar_before_1'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %a, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %gep.off, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -391,13 +361,8 @@ exit:
define void @test_both_invar_before_2(ptr %a) {
; CHECK-LABEL: 'test_both_invar_before_2'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep.off, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %a, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -754,3 +719,68 @@ loop:
exit:
ret void
}
+
+define void @test_invar_vector_dependence_before_positive_strided_access_1(ptr %a) {
+; CHECK-LABEL: 'test_invar_vector_dependence_before_positive_strided_access_1'
+; CHECK-NEXT: loop:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ %gep.off = getelementptr i8, ptr %a, i32 4
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep = getelementptr i32, ptr %gep.off, i32 %iv
+ %l = load <4 x i8>, ptr %a
+ store i32 0, ptr %gep
+ %iv.next = add i32 %iv, 1
+ %ec = icmp eq i32 %iv.next, 100
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+define void @test_invar_scalable_dependence_before_positive_strided_access_1(ptr %a) {
+; CHECK-LABEL: 'test_invar_scalable_dependence_before_positive_strided_access_1'
+; CHECK-NEXT: loop:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Unknown:
+; CHECK-NEXT: %l = load <vscale x 4 x i8>, ptr %a, align 4 ->
+; CHECK-NEXT: store i32 0, ptr %gep, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ %gep.off = getelementptr i8, ptr %a, i32 4
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep = getelementptr i32, ptr %gep.off, i32 %iv
+ %l = load <vscale x 4 x i8>, ptr %a
+ store i32 0, ptr %gep
+ %iv.next = add i32 %iv, 1
+ %ec = icmp eq i32 %iv.next, 100
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/loop-invariant-dep-with-backedge-taken-count.ll b/llvm/test/Analysis/LoopAccessAnalysis/loop-invariant-dep-with-backedge-taken-count.ll
index 02285031f628..723d01b38f45 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/loop-invariant-dep-with-backedge-taken-count.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/loop-invariant-dep-with-backedge-taken-count.ll
@@ -7,13 +7,8 @@
define void @test_distance_greater_than_BTC_100(ptr %a) {
; CHECK-LABEL: 'test_distance_greater_than_BTC_100'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep.x, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %gep, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -112,13 +107,8 @@ exit:
define void @test_distance_greater_than_BTC_10000(ptr %a) {
; CHECK-LABEL: 'test_distance_greater_than_BTC_10000'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep.x, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %gep, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/non-constant-strides-backward.ll b/llvm/test/Analysis/LoopAccessAnalysis/non-constant-strides-backward.ll
index 416742a94e0d..845ff078ee0e 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/non-constant-strides-backward.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/non-constant-strides-backward.ll
@@ -45,13 +45,8 @@ exit:
define void @different_non_constant_strides_known_backward_distance_larger_than_trip_count(ptr %A) {
; CHECK-LABEL: 'different_non_constant_strides_known_backward_distance_larger_than_trip_count'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep, align 4 ->
-; CHECK-NEXT: store i32 %add, ptr %gep.mul.2, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pr64637.ll b/llvm/test/Analysis/LoopAccessAnalysis/pr64637.ll
index 4d4d2bf3eee8..d3e589cf99cf 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/pr64637.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pr64637.ll
@@ -1,4 +1,5 @@
-; RUN: opt -S -passes='print<access-info>' -pass-remarks-analysis=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ANALYSIS
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -passes='print<access-info>' -pass-remarks-analysis=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s
; Test that LoopVectorize don't report 'Use #pragma loop distribute(enable) to allow loop distribution'
; when we already add #pragma clang loop distribute(enable).
@@ -17,8 +18,31 @@
; }
define void @foo(ptr noalias nocapture noundef %y, ptr noalias nocapture noundef readnone %x, ptr noalias nocapture noundef readonly %indices, i32 noundef %n) {
-; ANALYSIS: Report: unsafe dependent memory operations in loop.
-; ANALYSIS: Backward loop carried data dependence that prevents store-to-load forwarding.
+; CHECK-LABEL: 'foo'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop.
+; CHECK-NEXT: Backward loop carried data dependence that prevents store-to-load forwarding.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: BackwardVectorizableButPreventsForwarding:
+; CHECK-NEXT: %1 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add8, ptr %arrayidx12, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: BackwardVectorizable:
+; CHECK-NEXT: store i32 %add1, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add8, ptr %arrayidx12, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Forward:
+; CHECK-NEXT: %1 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add1, ptr %arrayidx, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
%cmp22 = icmp sgt i32 %n, 0
br i1 %cmp22, label %for.body.preheader, label %for.cond.cleanup
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/select-dependence.ll b/llvm/test/Analysis/LoopAccessAnalysis/select-dependence.ll
index 07e32f443554..60fe8b4fcbed 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/select-dependence.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/select-dependence.ll
@@ -1,30 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes='print<access-info>' -disable-output 2>&1 < %s | FileCheck %s
-; CHECK: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %t63 = load double, ptr %t62, align 8 ->
-; CHECK-NEXT: store double %t63, ptr %t64, align 8
-
-define i32 @test() {
- %a1 = alloca [128 x double], align 8
- %a2 = alloca [128 x double], align 8
- %a3 = alloca [128 x double], align 8
- %t30 = getelementptr double, ptr %a2, i64 -32
+define void @test(ptr noalias %x, ptr noalias %y, ptr noalias %z) {
+; CHECK-LABEL: 'test'
+; CHECK-NEXT: loop:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Unknown:
+; CHECK-NEXT: %load = load double, ptr %gep.sel, align 8 ->
+; CHECK-NEXT: store double %load, ptr %gep.sel2, align 8
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ %gep.y = getelementptr double, ptr %y, i64 -32
br label %loop
loop:
- %t58 = phi i64 [ %t65, %loop ], [ 0, %0 ]
- %t59 = icmp ule i64 %t58, 32
- %t60 = select i1 %t59, ptr %a1, ptr %t30
- %t62 = getelementptr inbounds double, ptr %t60, i64 %t58
- %t63 = load double, ptr %t62, align 8
- %t61 = select i1 %t59, ptr %a2, ptr %a3
- %t64 = getelementptr inbounds double, ptr %t61, i64 %t58
- store double %t63, ptr %t64, align 8
- %t65 = add nuw nsw i64 %t58, 1
- %t66 = icmp eq i64 %t65, 94
- br i1 %t66, label %exit, label %loop
+ %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
+ %icmp = icmp ule i64 %iv, 32
+ %sel = select i1 %icmp, ptr %x, ptr %gep.y
+ %gep.sel = getelementptr inbounds double, ptr %sel, i64 %iv
+ %load = load double, ptr %gep.sel, align 8
+ %sel2 = select i1 %icmp, ptr %y, ptr %z
+ %gep.sel2 = getelementptr inbounds double, ptr %sel2, i64 %iv
+ store double %load, ptr %gep.sel2, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exit.cond = icmp eq i64 %iv, 94
+ br i1 %exit.cond, label %exit, label %loop
exit:
- ret i32 0
+ ret void
}
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll b/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
index bfdd15f170d0..ef19e173b659 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes='print<access-info>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
@@ -10,13 +11,19 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
; B[i] = A[i] + 1;
; }
-; CHECK: function 'nodep_Read_Write':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Memory dependences are safe
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Run-time memory checks:
-
define void @nodep_Read_Write(ptr nocapture %A) {
+; CHECK-LABEL: 'nodep_Read_Write'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
%add.ptr = getelementptr inbounds i32, ptr %A, i64 1
br label %for.body
@@ -42,17 +49,23 @@ for.body: ; preds = %entry, %for.body
; A[i] = i;
; sum += A[i+3];
; }
-;
+;
; return sum;
; }
-; CHECK: function 'nodep_Write_Read':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Memory dependences are safe
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Run-time memory checks:
-
define i32 @nodep_Write_Read(ptr nocapture %A) {
+; CHECK-LABEL: 'nodep_Write_Read'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
br label %for.body
@@ -81,13 +94,19 @@ for.body: ; preds = %entry, %for.body
; }
; }
-; CHECK: function 'nodep_Write_Write':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Memory dependences are safe
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Run-time memory checks:
-
define void @nodep_Write_Write(ptr nocapture %A) {
+; CHECK-LABEL: 'nodep_Write_Write'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
br label %for.body
@@ -115,16 +134,24 @@ for.body: ; preds = %entry, %for.body
; A[i+3] = A[i] + 1;
; }
-; CHECK: function 'unsafe_Read_Write':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop
-; CHECK-NEXT: Backward loop carried data dependence.
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Backward:
-; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %add, ptr %arrayidx3, align 4
-
define void @unsafe_Read_Write(ptr nocapture %A) {
+; CHECK-LABEL: 'unsafe_Read_Write'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Backward loop carried data dependence.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %arrayidx3, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
br label %for.body
@@ -155,16 +182,24 @@ for.body: ; preds = %entry, %for.body
; return sum;
; }
-; CHECK: function 'unsafe_Write_Read':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop
-; CHECK-NEXT: Backward loop carried data dependence.
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Backward:
-; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
-; CHECK-NEXT: %1 = load i32, ptr %arrayidx2, align 4
-
define i32 @unsafe_Write_Read(ptr nocapture %A) {
+; CHECK-LABEL: 'unsafe_Write_Read'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Backward loop carried data dependence.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: %1 = load i32, ptr %arrayidx2, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
br label %for.body
@@ -192,16 +227,24 @@ for.body: ; preds = %entry, %for.body
; }
; }
-; CHECK: function 'unsafe_Write_Write':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop
-; CHECK-NEXT: Backward loop carried data dependence.
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Backward:
-; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %2, ptr %arrayidx3, align 4
-
define void @unsafe_Write_Write(ptr nocapture %A) {
+; CHECK-LABEL: 'unsafe_Write_Write'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Backward loop carried data dependence.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %2, ptr %arrayidx3, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
br label %for.body
@@ -230,15 +273,23 @@ for.body: ; preds = %entry, %for.body
; B[i] = A[i] + 1;
; }
-; CHECK: function 'vectorizable_Read_Write':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Memory dependences are safe
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: BackwardVectorizable:
-; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
-
define void @vectorizable_Read_Write(ptr nocapture %A) {
+; CHECK-LABEL: 'vectorizable_Read_Write'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe with a maximum safe vector width of 64 bits
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: BackwardVectorizable:
+; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
%add.ptr = getelementptr inbounds i32, ptr %A, i64 4
br label %for.body
@@ -265,19 +316,27 @@ for.body: ; preds = %entry, %for.body
; A[i] = i;
; sum += B[i];
; }
-;
+;
; return sum;
; }
-; CHECK: function 'vectorizable_Write_Read':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Memory dependences are safe
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: BackwardVectorizable:
-; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
-; CHECK-NEXT: %1 = load i32, ptr %arrayidx2, align 4
-
define i32 @vectorizable_Write_Read(ptr nocapture %A) {
+; CHECK-LABEL: 'vectorizable_Write_Read'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe with a maximum safe vector width of 64 bits
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: BackwardVectorizable:
+; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: %1 = load i32, ptr %arrayidx2, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
%add.ptr = getelementptr inbounds i32, ptr %A, i64 4
br label %for.body
@@ -307,15 +366,23 @@ for.body: ; preds = %entry, %for.body
; }
; }
-; CHECK: function 'vectorizable_Write_Write':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Memory dependences are safe
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: BackwardVectorizable:
-; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %2, ptr %arrayidx2, align 4
-
define void @vectorizable_Write_Write(ptr nocapture %A) {
+; CHECK-LABEL: 'vectorizable_Write_Write'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe with a maximum safe vector width of 64 bits
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: BackwardVectorizable:
+; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %2, ptr %arrayidx2, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
%add.ptr = getelementptr inbounds i32, ptr %A, i64 4
br label %for.body
@@ -346,16 +413,24 @@ for.body: ; preds = %entry, %for.body
; FIXME: This case looks like previous case @vectorizable_Read_Write. It sould
; be vectorizable.
-; CHECK: function 'vectorizable_unscaled_Read_Write':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop
-; CHECK-NEXT: Backward loop carried data dependence that prevents store-to-load forwarding.
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: BackwardVectorizableButPreventsForwarding:
-; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
-
define void @vectorizable_unscaled_Read_Write(ptr nocapture %A) {
+; CHECK-LABEL: 'vectorizable_unscaled_Read_Write'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Backward loop carried data dependence that prevents store-to-load forwarding.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: BackwardVectorizableButPreventsForwarding:
+; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
%add.ptr = getelementptr inbounds i8, ptr %A, i64 14
br label %for.body
@@ -382,19 +457,27 @@ for.body: ; preds = %entry, %for.body
; A[i] = i;
; sum += B[i];
; }
-;
+;
; return sum;
; }
-; CHECK: function 'vectorizable_unscaled_Write_Read':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Memory dependences are safe
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: BackwardVectorizable:
-; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
-; CHECK-NEXT: %1 = load i32, ptr %arrayidx2, align 4
-
define i32 @vectorizable_unscaled_Write_Read(ptr nocapture %A) {
+; CHECK-LABEL: 'vectorizable_unscaled_Write_Read'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Memory dependences are safe with a maximum safe vector width of 64 bits
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: BackwardVectorizable:
+; CHECK-NEXT: store i32 %0, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: %1 = load i32, ptr %arrayidx2, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
%add.ptr = getelementptr inbounds i8, ptr %A, i64 17
br label %for.body
@@ -422,16 +505,24 @@ for.body: ; preds = %entry, %for.body
; B[i] = A[i] + 1;
; }
-; CHECK: function 'unsafe_unscaled_Read_Write':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop
-; CHECK-NEXT: Backward loop carried data dependence.
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Backward:
-; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
-
define void @unsafe_unscaled_Read_Write(ptr nocapture %A) {
+; CHECK-LABEL: 'unsafe_unscaled_Read_Write'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Backward loop carried data dependence.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
%add.ptr = getelementptr inbounds i8, ptr %A, i64 11
br label %for.body
@@ -451,15 +542,6 @@ for.body: ; preds = %entry, %for.body
br i1 %cmp, label %for.body, label %for.cond.cleanup
}
-; CHECK: function 'unsafe_unscaled_Read_Write2':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop
-; CHECK-NEXT: Backward loop carried data dependence.
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Backward:
-; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
-; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
-
; void unsafe_unscaled_Read_Write2(int *A) {
; int *B = (int *)((char *)A + 1);
; for (unsigned i = 0; i < 1024; i+=2)
@@ -467,6 +549,23 @@ for.body: ; preds = %entry, %for.body
; }
define void @unsafe_unscaled_Read_Write2(ptr nocapture %A) {
+; CHECK-LABEL: 'unsafe_unscaled_Read_Write2'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Backward loop carried data dependence.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: %0 = load i32, ptr %arrayidx, align 4 ->
+; CHECK-NEXT: store i32 %add, ptr %arrayidx2, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
%add.ptr = getelementptr inbounds i8, ptr %A, i64 1
br label %for.body
@@ -500,19 +599,28 @@ for.body: ; preds = %entry, %for.body
;
; The access (2) has overlaps with (1) and (3).
-; CHECK: function 'interleaved_stores':
-; CHECK-NEXT: for.body:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop
-; CHECK-NEXT: Backward loop carried data dependence.
-; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Backward:
-; CHECK-NEXT: store i32 %2, ptr %arrayidx5, align 4 ->
-; CHECK-NEXT: store i32 %2, ptr %arrayidx9, align 4
-; CHECK: Backward:
-; CHECK-NEXT: store i32 %0, ptr %arrayidx2, align 4 ->
-; CHECK-NEXT: store i32 %2, ptr %arrayidx5, align 4
-
define void @interleaved_stores(ptr nocapture %A) {
+; CHECK-LABEL: 'interleaved_stores'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Backward loop carried data dependence.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: store i32 %2, ptr %arrayidx5, align 4 ->
+; CHECK-NEXT: store i32 %2, ptr %arrayidx9, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Backward:
+; CHECK-NEXT: store i32 %0, ptr %arrayidx2, align 4 ->
+; CHECK-NEXT: store i32 %2, ptr %arrayidx5, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
entry:
%incdec.ptr = getelementptr inbounds i8, ptr %A, i64 1
br label %for.body
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll b/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll
index 6cc045d7a681..3da0f543c5c1 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/symbolic-stride.ll
@@ -95,6 +95,127 @@ exit:
ret void
}
+define void @single_stride_castexpr(i32 %offset, ptr %src, ptr %dst, i1 %cond) {
+; CHECK-LABEL: 'single_stride_castexpr'
+; CHECK-NEXT: inner.loop:
+; CHECK-NEXT: Memory dependences are safe with run-time checks
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Check 0:
+; CHECK-NEXT: Comparing group ([[GRP1:0x[0-9a-f]+]]):
+; CHECK-NEXT: %gep.dst = getelementptr i32, ptr %dst, i64 %iv.2
+; CHECK-NEXT: Against group ([[GRP2:0x[0-9a-f]+]]):
+; CHECK-NEXT: %gep.src = getelementptr inbounds i32, ptr %src, i32 %iv.3
+; CHECK-NEXT: Grouped accesses:
+; CHECK-NEXT: Group [[GRP1]]:
+; CHECK-NEXT: (Low: ((4 * %iv.1) + %dst) High: (804 + (4 * %iv.1) + %dst))
+; CHECK-NEXT: Member: {((4 * %iv.1) + %dst),+,4}<%inner.loop>
+; CHECK-NEXT: Group [[GRP2]]:
+; CHECK-NEXT: (Low: %src High: (804 + %src))
+; CHECK-NEXT: Member: {%src,+,4}<nuw><%inner.loop>
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-NEXT: Equal predicate: %offset == 1
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+; CHECK-NEXT: [PSE] %gep.dst = getelementptr i32, ptr %dst, i64 %iv.2:
+; CHECK-NEXT: {((4 * %iv.1) + %dst),+,(4 * (sext i32 %offset to i64))<nsw>}<%inner.loop>
+; CHECK-NEXT: --> {((4 * %iv.1) + %dst),+,4}<%inner.loop>
+; CHECK-NEXT: outer.header:
+; CHECK-NEXT: Report: loop is not the innermost loop
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ %offset.ext = sext i32 %offset to i64
+ br label %outer.header
+
+outer.header:
+ %iv.1 = phi i64 [ 0, %entry ], [ %iv.2.next, %inner.loop ]
+ br i1 %cond, label %inner.loop, label %exit
+
+inner.loop:
+ %iv.2 = phi i64 [ %iv.1, %outer.header ], [ %iv.2.next, %inner.loop ]
+ %iv.3 = phi i32 [ 0, %outer.header ], [ %iv.3.next, %inner.loop ]
+ %gep.src = getelementptr inbounds i32, ptr %src, i32 %iv.3
+ %load = load i32, ptr %gep.src, align 8
+ %gep.dst = getelementptr i32, ptr %dst, i64 %iv.2
+ store i32 %load, ptr %gep.dst, align 8
+ %iv.2.next = add i64 %iv.2, %offset.ext
+ %iv.3.next = add i32 %iv.3, 1
+ %ec = icmp eq i32 %iv.3, 200
+ br i1 %ec, label %outer.header, label %inner.loop
+
+exit:
+ ret void
+}
+
+define void @single_stride_castexpr_multiuse(i32 %offset, ptr %src, ptr %dst, i1 %cond) {
+; CHECK-LABEL: 'single_stride_castexpr_multiuse'
+; CHECK-NEXT: inner.loop:
+; CHECK-NEXT: Memory dependences are safe with run-time checks
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Check 0:
+; CHECK-NEXT: Comparing group ([[GRP3:0x[0-9a-f]+]]):
+; CHECK-NEXT: %gep.dst = getelementptr i32, ptr %dst, i64 %iv.2
+; CHECK-NEXT: Against group ([[GRP4:0x[0-9a-f]+]]):
+; CHECK-NEXT: %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv.3
+; CHECK-NEXT: Grouped accesses:
+; CHECK-NEXT: Group [[GRP3]]:
+; CHECK-NEXT: (Low: (((4 * %iv.1) + %dst) umin ((4 * %iv.1) + (4 * (sext i32 %offset to i64) * (200 + (-1 * (zext i32 %offset to i64))<nsw>)<nsw>) + %dst)) High: (4 + (((4 * %iv.1) + %dst) umax ((4 * %iv.1) + (4 * (sext i32 %offset to i64) * (200 + (-1 * (zext i32 %offset to i64))<nsw>)<nsw>) + %dst))))
+; CHECK-NEXT: Member: {((4 * %iv.1) + %dst),+,(4 * (sext i32 %offset to i64))<nsw>}<%inner.loop>
+; CHECK-NEXT: Group [[GRP4]]:
+; CHECK-NEXT: (Low: ((4 * (zext i32 %offset to i64))<nuw><nsw> + %src) High: (804 + %src))
+; CHECK-NEXT: Member: {((4 * (zext i32 %offset to i64))<nuw><nsw> + %src),+,4}<%inner.loop>
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+; CHECK-NEXT: outer.header:
+; CHECK-NEXT: Report: loop is not the innermost loop
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ %offset.ext = sext i32 %offset to i64
+ %offset.zext = zext i32 %offset to i64
+ br label %outer.header
+
+outer.header:
+ %iv.1 = phi i64 [ 0, %entry ], [ %iv.2.next, %inner.loop ]
+ br i1 %cond, label %inner.loop, label %exit
+
+inner.loop:
+ %iv.2 = phi i64 [ %iv.1, %outer.header ], [ %iv.2.next, %inner.loop ]
+ %iv.3 = phi i64 [ %offset.zext, %outer.header ], [ %iv.3.next, %inner.loop ]
+ %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv.3
+ %load = load i32, ptr %gep.src, align 8
+ %gep.dst = getelementptr i32, ptr %dst, i64 %iv.2
+ store i32 %load, ptr %gep.dst, align 8
+ %iv.2.next = add i64 %iv.2, %offset.ext
+ %iv.3.next = add i64 %iv.3, 1
+ %ec = icmp eq i64 %iv.3, 200
+ br i1 %ec, label %outer.header, label %inner.loop
+
+exit:
+ ret void
+}
+
; A loop with two symbolic strides.
define void @two_strides(ptr noalias %A, ptr noalias %B, i64 %N, i64 %stride.1, i64 %stride.2) {
; CHECK-LABEL: 'two_strides'
diff --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/LoopnestFixedSize.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/LoopnestFixedSize.ll
index e15f06843500..5209d290c83d 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/LoopnestFixedSize.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/LoopnestFixedSize.ll
@@ -7,7 +7,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
; The IR is copied from llvm/test/Analysis/DependenceAnalysis/SimpleSIVNoValidityCheckFixedSize.ll
; CHECK: Loop 'for.body' has cost = 4186116
-; CHECK-NEXT: Loop 'for.body4' has cost = 128898
+; CHECK-NEXT: Loop 'for.body4' has cost = 130944
;; #define N 1024
;; #define M 2048
@@ -49,7 +49,7 @@ for.end13: ; preds = %for.inc11
; CHECK: Loop 'for.body' has cost = 4186116
-; CHECK-NEXT: Loop 'for.body4' has cost = 128898
+; CHECK-NEXT: Loop 'for.body4' has cost = 130944
define void @t2(ptr %a) {
entry:
@@ -87,7 +87,7 @@ declare ptr @func_with_returned_arg(ptr returned %arg)
; CHECK-NEXT: Loop 'for.body4' has cost = 16762927104000000
; CHECK-NEXT: Loop 'for.body8' has cost = 130960368000000
; CHECK-NEXT: Loop 'for.body12' has cost = 1047682944000
-; CHECK-NEXT: Loop 'for.body16' has cost = 32260032000
+; CHECK-NEXT: Loop 'for.body16' has cost = 32772096000
;; #define N 128
;; #define M 2048
diff --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost.ll
index 87f522c98254..7275d04c92b4 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/compute-cost.ll
@@ -38,7 +38,7 @@ for.end: ; preds = %for.cond
; CHECK: Loop 'for.cond' has cost = 100000000
; CHECK: Loop 'for.cond1' has cost = 1000000
-; CHECK: Loop 'for.cond5' has cost = 30000
+; CHECK: Loop 'for.cond5' has cost = 40000
@data = external dso_local global [2 x [4 x [18 x i32]]], align 1
@@ -118,7 +118,7 @@ for.neg.end: ; preds = %for.neg.cond
; access functions. When this is fixed this testcase should have a cost
; approximately 2x higher.
-; CHECK: Loop 'for.cond2' has cost = 2560
+; CHECK: Loop 'for.cond2' has cost = 2561
define void @Test2(ptr %B) {
entry:
br label %for.cond2
@@ -148,7 +148,7 @@ for.end: ; preds = %for.cond
; for (i = 40960; i > 0; i--)
; C[i] = C[i];
-; CHECK: Loop 'for.cond3' has cost = 2560
+; CHECK: Loop 'for.cond3' has cost = 2561
define void @Test3(ptr %C) {
entry:
br label %for.cond3
@@ -177,7 +177,7 @@ for.end: ; preds = %for.cond
; for (i = 0; i < 40960; i++)
; D[i] = D[i];
-; CHECK: Loop 'for.cond4' has cost = 2560
+; CHECK: Loop 'for.cond4' has cost = 2561
define void @Test4(ptr %D) {
entry:
br label %for.cond4
diff --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/loads-store.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/loads-store.ll
index 39fe382a4119..efb1d907605a 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/loads-store.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/loads-store.ll
@@ -11,8 +11,8 @@ target triple = "powerpc64le-unknown-linux-gnu"
; }
; CHECK: Loop 'for.i' has cost = 3000000
-; CHECK-NEXT: Loop 'for.k' has cost = 2030000
-; CHECK-NEXT: Loop 'for.j' has cost = 1060000
+; CHECK-NEXT: Loop 'for.k' has cost = 2040000
+; CHECK-NEXT: Loop 'for.j' has cost = 1080000
define void @foo(i64 %n, i64 %m, i64 %o, ptr %A, ptr %B, ptr %C) {
entry:
diff --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matmul.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matmul.ll
index 9538c3c93538..0e8a25ffb1ca 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matmul.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matmul.ll
@@ -11,8 +11,8 @@ target triple = "powerpc64le-unknown-linux-gnu"
; }
; CHECK:Loop 'for.i' has cost = 2010000
-; CHECK-NEXT:Loop 'for.k' has cost = 1040000
-; CHECK-NEXT:Loop 'for.j' has cost = 70000
+; CHECK-NEXT:Loop 'for.k' has cost = 1050000
+; CHECK-NEXT:Loop 'for.j' has cost = 90000
define void @matmul(i64 %n, i64 %m, i64 %o, ptr %A, ptr %B, ptr %C) {
entry:
diff --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matvecmul.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matvecmul.ll
index 7bbbe43f5a2f..bf5425881ce3 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matvecmul.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/matvecmul.ll
@@ -17,8 +17,8 @@ target triple = "powerpc64le-unknown-linux-gnu"
; CHECK: Loop 'k_loop' has cost = 10200000000000000
; CHECK-NEXT: Loop 'j_loop' has cost = 102000000000000
; CHECK-NEXT: Loop 'i_loop' has cost = 1020000000000
-; CHECK-NEXT: Loop 'm_loop' has cost = 10700000000
-; CHECK-NEXT: Loop 'l_loop' has cost = 1300000000
+; CHECK-NEXT: Loop 'm_loop' has cost = 10800000000
+; CHECK-NEXT: Loop 'l_loop' has cost = 1500000000
%_elem_type_of_double = type <{ double }>
diff --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/multi-store.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/multi-store.ll
index 63425c7ecef4..b6c2497d45b9 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/multi-store.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/multi-store.ll
@@ -5,7 +5,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
; CHECK: Loop 'for.j' has cost = 201000000
; CHECK-NEXT: Loop 'for.i' has cost = 102000000
-; CHECK-NEXT: Loop 'for.k' has cost = 90000
+; CHECK-NEXT: Loop 'for.k' has cost = 120000
;; Test to make sure when we have multiple conflicting access patterns, the
;; chosen loop configuration favours the majority of those accesses.
diff --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/single-store.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/single-store.ll
index f583822579cf..9aa048489bd3 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/single-store.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/single-store.ll
@@ -12,7 +12,7 @@ target triple = "powerpc64le-unknown-linux-gnu"
; CHECK: Loop 'for.i' has cost = 100000000
; CHECK-NEXT: Loop 'for.j' has cost = 1000000
-; CHECK-NEXT: Loop 'for.k' has cost = 60000
+; CHECK-NEXT: Loop 'for.k' has cost = 70000
define void @foo(i64 %n, i64 %m, i64 %o, ptr %A) {
entry:
@@ -90,7 +90,7 @@ for.end: ; preds = %for.end.loopexit, %
; CHECK: Loop 'for.i' has cost = 100000000
; CHECK-NEXT: Loop 'for.j' has cost = 1000000
-; CHECK-NEXT: Loop 'for.k' has cost = 60000
+; CHECK-NEXT: Loop 'for.k' has cost = 70000
define void @foo2(i64 %n, i64 %m, i64 %o, ptr %A) {
entry:
diff --git a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/stencil.ll b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/stencil.ll
index b79a47aed1ef..a4be5ba5dbf0 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/stencil.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/PowerPC/stencil.ll
@@ -11,8 +11,8 @@ target triple = "powerpc64le-unknown-linux-gnu"
; }
; }
-; CHECK: Loop 'for.i' has cost = 20600
-; CHECK-NEXT: Loop 'for.j' has cost = 800
+; CHECK: Loop 'for.i' has cost = 20800
+; CHECK-NEXT: Loop 'for.j' has cost = 1000
define void @foo(i64 %n, i64 %m, ptr %A, ptr %B, ptr %C) {
entry:
diff --git a/llvm/test/Analysis/LoopCacheAnalysis/compute-cost.ll b/llvm/test/Analysis/LoopCacheAnalysis/compute-cost.ll
index d979645bef57..205cd851fce0 100644
--- a/llvm/test/Analysis/LoopCacheAnalysis/compute-cost.ll
+++ b/llvm/test/Analysis/LoopCacheAnalysis/compute-cost.ll
@@ -8,6 +8,9 @@
; Check IndexedReference::computeRefCost can handle type differences between
; Stride and TripCount
+; Round costs up to the nearest whole number i.e. in 'for.cond5' cost is calculated 12.5 and
+; it makes more sense to say 13 cache lines are used rather than 12 cache lines.
+
; SMALLER-CACHELINE: Loop 'for.cond' has cost = 256
; LARGER-CACHELINE: Loop 'for.cond' has cost = 32
%struct._Handleitem = type { ptr }
@@ -40,10 +43,10 @@ for.end: ; preds = %for.cond
; SMALLER-CACHELINE: Loop 'for.cond' has cost = 100000000
; SMALLER-CACHELINE: Loop 'for.cond1' has cost = 1000000
-; SMALLER-CACHELINE: Loop 'for.cond5' has cost = 120000
+; SMALLER-CACHELINE: Loop 'for.cond5' has cost = 130000
; LARGER-CACHELINE: Loop 'for.cond' has cost = 100000000
; LARGER-CACHELINE: Loop 'for.cond1' has cost = 1000000
-; LARGER-CACHELINE: Loop 'for.cond5' has cost = 10000
+; LARGER-CACHELINE: Loop 'for.cond5' has cost = 20000
@data = external dso_local global [2 x [4 x [18 x i32]]], align 1
define dso_local void @handle_to_ptr_2(i1 %b0, i1 %b1, i1 %b2) {
@@ -122,8 +125,8 @@ for.neg.end: ; preds = %for.neg.cond
; access functions. When this is fixed this testcase should have a cost
; approximately 2x higher.
-; SMALLER-CACHELINE: Loop 'for.cond2' has cost = 10240
-; LARGER-CACHELINE: Loop 'for.cond2' has cost = 1280
+; SMALLER-CACHELINE: Loop 'for.cond2' has cost = 10241
+; LARGER-CACHELINE: Loop 'for.cond2' has cost = 1281
define void @Test2(ptr %B) {
entry:
br label %for.cond2
@@ -153,8 +156,8 @@ for.end: ; preds = %for.cond
; for (i = 40960; i > 0; i--)
; C[i] = C[i];
-; SMALLER-CACHELINE: Loop 'for.cond3' has cost = 10240
-; LARGER-CACHELINE: Loop 'for.cond3' has cost = 1280
+; SMALLER-CACHELINE: Loop 'for.cond3' has cost = 10241
+; LARGER-CACHELINE: Loop 'for.cond3' has cost = 1281
define void @Test3(ptr %C) {
entry:
br label %for.cond3
@@ -183,8 +186,8 @@ for.end: ; preds = %for.cond
; for (i = 0; i < 40960; i++)
; D[i] = D[i];
-; SMALLER-CACHELINE: Loop 'for.cond4' has cost = 10240
-; LARGER-CACHELINE: Loop 'for.cond4' has cost = 1280
+; SMALLER-CACHELINE: Loop 'for.cond4' has cost = 10241
+; LARGER-CACHELINE: Loop 'for.cond4' has cost = 1281
define void @Test4(ptr %D) {
entry:
br label %for.cond4
diff --git a/llvm/test/Analysis/LoopCacheAnalysis/interchange-cost-beneficial.ll b/llvm/test/Analysis/LoopCacheAnalysis/interchange-cost-beneficial.ll
new file mode 100644
index 000000000000..3086224c5820
--- /dev/null
+++ b/llvm/test/Analysis/LoopCacheAnalysis/interchange-cost-beneficial.ll
@@ -0,0 +1,62 @@
+; RUN: opt < %s -cache-line-size=64 -passes='print<loop-cache-cost>' -disable-output 2>&1 | FileCheck %s
+
+;; This test checks the effect of rounding cache cost to 1 when it is
+;; evaluated to 0 because at least 1 cache line is accessed by the loopnest.
+;; It does not make sense to output that zero cache lines are used.
+;; The cost of reference group for B[j], C[j], D[j] and E[j] were
+;; calculted 0 before but now they are 1 which makes each loop cost more reasonable.
+;
+; void test(int n, int m, int o, int A[2][3], int B[2], int C[2], int D[2], int E[2]) {
+; for (int i = 0; i < 3; i++)
+; for (int j = 0; j < 2; j++)
+; A[j][i] = 1;
+; B[j] = 1;
+; C[j] = 1;
+; D[j] = 1
+; E[j] = 1
+; }
+
+; CHECK: Loop 'for.j' has cost = 18
+; CHECK-NEXT: Loop 'for.i' has cost = 10
+
+define void @test(ptr %A, ptr %B, ptr %C, ptr %D, ptr %E) {
+
+entry:
+ br label %for.i.preheader.split
+
+for.i.preheader.split: ; preds = %for.i.preheader
+ br label %for.i
+
+for.i: ; preds = %for.inci, %for.i.preheader.split
+ %i = phi i64 [ %inci, %for.inci ], [ 0, %for.i.preheader.split ]
+ br label %for.j
+
+for.j: ; preds = %for.incj, %for.i
+ %j = phi i64 [ %incj, %for.j ], [ 0, %for.i ]
+ %mul_j = mul nsw i64 %j, 3
+ %index_j = add i64 %mul_j, %i
+ %arrayidxA = getelementptr inbounds [2 x [ 3 x i32]], ptr %A, i64 %j, i64 %i
+ store i32 1, ptr %arrayidxA, align 4
+ %arrayidxB = getelementptr inbounds i32, ptr %B, i64 %j
+ store i32 1, ptr %arrayidxB, align 4
+ %arrayidxC = getelementptr inbounds i32, ptr %C, i64 %j
+ store i32 1, ptr %arrayidxC, align 4
+ %arrayidxD = getelementptr inbounds i32, ptr %D, i64 %j
+ store i32 1, ptr %arrayidxD, align 4
+ %arrayidxE = getelementptr inbounds i32, ptr %E, i64 %j
+ store i32 1, ptr %arrayidxE, align 4
+ %incj = add nsw i64 %j, 1
+ %exitcond.us = icmp eq i64 %incj, 2
+ br i1 %exitcond.us, label %for.inci, label %for.j
+
+for.inci: ; preds = %for.incj
+ %inci = add nsw i64 %i, 1
+ %exitcond55.us = icmp eq i64 %inci, 3
+ br i1 %exitcond55.us, label %for.end.loopexit, label %for.i
+
+for.end.loopexit: ; preds = %for.inci
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %for.cond1.preheader.lr.ph, %entry
+ ret void
+}
diff --git a/llvm/test/Analysis/ScalarEvolution/exhaustive-trip-counts.ll b/llvm/test/Analysis/ScalarEvolution/exhaustive-trip-counts.ll
index 21237f426693..cc08fa5fc7d8 100644
--- a/llvm/test/Analysis/ScalarEvolution/exhaustive-trip-counts.ll
+++ b/llvm/test/Analysis/ScalarEvolution/exhaustive-trip-counts.ll
@@ -27,4 +27,156 @@ for.cond.cleanup:
ret void
}
+; Do not compute exhaustive trip count based on FP libcalls, as their exact
+; return value may not be specified.
+define i64 @test_fp_libcall() {
+; CHECK-LABEL: 'test_fp_libcall'
+; CHECK-NEXT: Determining loop execution counts for: @test_fp_libcall
+; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count.
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %fv = phi double [ 1.000000e+00, %entry ], [ %fv.next, %loop ]
+ call void @use(double %fv)
+ %fv.next = call double @llvm.sin.f64(double %fv)
+ %iv.next = add i64 %iv, 1
+ %fcmp = fcmp une double %fv, 0x3FC6BA15EE8460B0
+ br i1 %fcmp, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+; Do not compute exhaustive trip count based on FP constant folding resulting
+; in NaN values, as we don't specify which NaN exactly is returned.
+define i64 @test_nan_sign() {
+; CHECK-LABEL: 'test_nan_sign'
+; CHECK-NEXT: Determining loop execution counts for: @test_nan_sign
+; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count.
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %fv = phi double [ -1.000000e+00, %entry ], [ %fv.next, %loop ]
+ call void @use(double %fv)
+ %a = fsub double %fv, 0x7F86C16C16C16C16
+ %b = fadd double %a, %a
+ %fv.next = fsub double %b, %a
+ %iv.next = add i64 %iv, 1
+ %fv.bc = bitcast double %fv to i64
+ %icmp = icmp slt i64 %fv.bc, 0
+ br i1 %icmp, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+; Do not compute exhaustive trip count based on FP constant folding if the
+; involved operation has nsz or one of the algebraic FMF flags (reassoc, arcp,
+; contract) set. The examples in the following are dummies and don't illustrate
+; real cases where FMF transforms could cause issues.
+
+define i64 @test_fp_nsz() {
+; CHECK-LABEL: 'test_fp_nsz'
+; CHECK-NEXT: Determining loop execution counts for: @test_fp_nsz
+; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count.
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %fv = phi double [ 1.000000e+00, %entry ], [ %fv.next, %loop ]
+ call void @use(double %fv)
+ %fv.next = fadd nsz double %fv, 1.0
+ %iv.next = add i64 %iv, 1
+ %fcmp = fcmp une double %fv, 100.0
+ br i1 %fcmp, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+define i64 @test_fp_reassoc() {
+; CHECK-LABEL: 'test_fp_reassoc'
+; CHECK-NEXT: Determining loop execution counts for: @test_fp_reassoc
+; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count.
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %fv = phi double [ 1.000000e+00, %entry ], [ %fv.next, %loop ]
+ call void @use(double %fv)
+ %fv.next = fadd reassoc double %fv, 1.0
+ %iv.next = add i64 %iv, 1
+ %fcmp = fcmp une double %fv, 100.0
+ br i1 %fcmp, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+define i64 @test_fp_arcp() {
+; CHECK-LABEL: 'test_fp_arcp'
+; CHECK-NEXT: Determining loop execution counts for: @test_fp_arcp
+; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count.
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %fv = phi double [ 1.000000e+00, %entry ], [ %fv.next, %loop ]
+ call void @use(double %fv)
+ %fv.next = fadd arcp double %fv, 1.0
+ %iv.next = add i64 %iv, 1
+ %fcmp = fcmp une double %fv, 100.0
+ br i1 %fcmp, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
+define i64 @test_fp_contract() {
+; CHECK-LABEL: 'test_fp_contract'
+; CHECK-NEXT: Determining loop execution counts for: @test_fp_contract
+; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count.
+; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count.
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %fv = phi double [ 1.000000e+00, %entry ], [ %fv.next, %loop ]
+ call void @use(double %fv)
+ %fv.next = fadd contract double %fv, 1.0
+ %iv.next = add i64 %iv, 1
+ %fcmp = fcmp une double %fv, 100.0
+ br i1 %fcmp, label %loop, label %exit
+
+exit:
+ ret i64 %iv
+}
+
declare void @dummy()
+declare void @use(double %i)
+declare double @llvm.sin.f64(double)
diff --git a/llvm/test/Analysis/ScalarEvolution/exit-count-non-strict.ll b/llvm/test/Analysis/ScalarEvolution/exit-count-non-strict.ll
index 2117c779f4b3..e9faf98eee44 100644
--- a/llvm/test/Analysis/ScalarEvolution/exit-count-non-strict.ll
+++ b/llvm/test/Analysis/ScalarEvolution/exit-count-non-strict.ll
@@ -4,13 +4,14 @@
define void @ule_from_zero(i32 %M, i32 %N) {
; CHECK-LABEL: 'ule_from_zero'
; CHECK-NEXT: Determining loop execution counts for: @ule_from_zero
-; CHECK-NEXT: Loop %loop: <multiple exits> Unpredictable backedge-taken count.
-; CHECK-NEXT: exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: Loop %loop: <multiple exits> backedge-taken count is ((zext i32 %N to i64) umin (1 + (zext i32 %M to i64))<nuw><nsw>)
+; CHECK-NEXT: exit count for loop: (1 + (zext i32 %M to i64))<nuw><nsw>
; CHECK-NEXT: exit count for latch: %N
-; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i32 -1
-; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is %N
-; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 4294967295
+; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((zext i32 %N to i64) umin (1 + (zext i32 %M to i64))<nuw><nsw>)
+; CHECK-NEXT: symbolic max exit count for loop: (1 + (zext i32 %M to i64))<nuw><nsw>
; CHECK-NEXT: symbolic max exit count for latch: %N
+; CHECK-NEXT: Loop %loop: Trip multiple is 1
;
entry:
br label %loop
@@ -61,13 +62,14 @@ exit:
define void @ule_from_unknown(i32 %M, i32 %N, i32 %S) {
; CHECK-LABEL: 'ule_from_unknown'
; CHECK-NEXT: Determining loop execution counts for: @ule_from_unknown
-; CHECK-NEXT: Loop %loop: <multiple exits> Unpredictable backedge-taken count.
-; CHECK-NEXT: exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: Loop %loop: <multiple exits> backedge-taken count is (((-1 * (zext i32 %S to i64))<nsw> + ((zext i32 %S to i64) umax (1 + (zext i32 %M to i64))<nuw><nsw>)) umin_seq (zext i32 ((-1 * %S) + %N) to i64))
+; CHECK-NEXT: exit count for loop: ((-1 * (zext i32 %S to i64))<nsw> + ((zext i32 %S to i64) umax (1 + (zext i32 %M to i64))<nuw><nsw>))
; CHECK-NEXT: exit count for latch: ((-1 * %S) + %N)
-; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i32 -1
-; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-1 * %S) + %N)
-; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 4294967295
+; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is (((-1 * (zext i32 %S to i64))<nsw> + ((zext i32 %S to i64) umax (1 + (zext i32 %M to i64))<nuw><nsw>)) umin_seq (zext i32 ((-1 * %S) + %N) to i64))
+; CHECK-NEXT: symbolic max exit count for loop: ((-1 * (zext i32 %S to i64))<nsw> + ((zext i32 %S to i64) umax (1 + (zext i32 %M to i64))<nuw><nsw>))
; CHECK-NEXT: symbolic max exit count for latch: ((-1 * %S) + %N)
+; CHECK-NEXT: Loop %loop: Trip multiple is 1
;
entry:
br label %loop
@@ -96,6 +98,9 @@ define void @ule_from_zero_no_nuw(i32 %M, i32 %N) {
; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is %N
; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE***
; CHECK-NEXT: symbolic max exit count for latch: %N
+; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((zext i32 %N to i64) umin (1 + (zext i32 %M to i64))<nuw><nsw>)
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {0,+,1}<%loop> Added Flags: <nusw>
;
entry:
br label %loop
@@ -117,13 +122,14 @@ exit:
define void @sle_from_int_min(i32 %M, i32 %N) {
; CHECK-LABEL: 'sle_from_int_min'
; CHECK-NEXT: Determining loop execution counts for: @sle_from_int_min
-; CHECK-NEXT: Loop %loop: <multiple exits> Unpredictable backedge-taken count.
-; CHECK-NEXT: exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: Loop %loop: <multiple exits> backedge-taken count is ((zext i32 (-2147483648 + %N) to i64) umin (2147483649 + (sext i32 %M to i64))<nsw>)
+; CHECK-NEXT: exit count for loop: (2147483649 + (sext i32 %M to i64))<nsw>
; CHECK-NEXT: exit count for latch: (-2147483648 + %N)
-; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i32 -1
-; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is (-2147483648 + %N)
-; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 4294967295
+; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((zext i32 (-2147483648 + %N) to i64) umin (2147483649 + (sext i32 %M to i64))<nsw>)
+; CHECK-NEXT: symbolic max exit count for loop: (2147483649 + (sext i32 %M to i64))<nsw>
; CHECK-NEXT: symbolic max exit count for latch: (-2147483648 + %N)
+; CHECK-NEXT: Loop %loop: Trip multiple is 1
;
entry:
br label %loop
@@ -174,13 +180,14 @@ exit:
define void @sle_from_unknown(i32 %M, i32 %N, i32 %S) {
; CHECK-LABEL: 'sle_from_unknown'
; CHECK-NEXT: Determining loop execution counts for: @sle_from_unknown
-; CHECK-NEXT: Loop %loop: <multiple exits> Unpredictable backedge-taken count.
-; CHECK-NEXT: exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: Loop %loop: <multiple exits> backedge-taken count is (((-1 * (sext i32 %S to i64))<nsw> + ((sext i32 %S to i64) smax (1 + (sext i32 %M to i64))<nsw>)) umin_seq (zext i32 ((-1 * %S) + %N) to i64))
+; CHECK-NEXT: exit count for loop: ((-1 * (sext i32 %S to i64))<nsw> + ((sext i32 %S to i64) smax (1 + (sext i32 %M to i64))<nsw>))
; CHECK-NEXT: exit count for latch: ((-1 * %S) + %N)
-; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i32 -1
-; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-1 * %S) + %N)
-; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 4294967295
+; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is (((-1 * (sext i32 %S to i64))<nsw> + ((sext i32 %S to i64) smax (1 + (sext i32 %M to i64))<nsw>)) umin_seq (zext i32 ((-1 * %S) + %N) to i64))
+; CHECK-NEXT: symbolic max exit count for loop: ((-1 * (sext i32 %S to i64))<nsw> + ((sext i32 %S to i64) smax (1 + (sext i32 %M to i64))<nsw>))
; CHECK-NEXT: symbolic max exit count for latch: ((-1 * %S) + %N)
+; CHECK-NEXT: Loop %loop: Trip multiple is 1
;
entry:
br label %loop
@@ -209,6 +216,9 @@ define void @sle_from_int_min_no_nsw(i32 %M, i32 %N) {
; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is (-2147483648 + %N)
; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE***
; CHECK-NEXT: symbolic max exit count for latch: (-2147483648 + %N)
+; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((zext i32 (-2147483648 + %N) to i64) umin (2147483649 + (sext i32 %M to i64))<nsw>)
+; CHECK-NEXT: Predicates:
+; CHECK-NEXT: {-2147483648,+,1}<%loop> Added Flags: <nssw>
;
entry:
br label %loop
diff --git a/llvm/test/Assembler/ConstantExprFold.ll b/llvm/test/Assembler/ConstantExprFold.ll
index 4ce44d2e5513..ab7e767d767b 100644
--- a/llvm/test/Assembler/ConstantExprFold.ll
+++ b/llvm/test/Assembler/ConstantExprFold.ll
@@ -37,22 +37,22 @@
; Need a function to make update_test_checks.py work.
;.
-; CHECK: @[[A:[a-zA-Z0-9_$"\\.-]+]] = global i64 0
-; CHECK: @[[ADD:[a-zA-Z0-9_$"\\.-]+]] = global ptr @A
-; CHECK: @[[SUB:[a-zA-Z0-9_$"\\.-]+]] = global ptr @A
-; CHECK: @[[MUL:[a-zA-Z0-9_$"\\.-]+]] = global ptr null
-; CHECK: @[[XOR:[a-zA-Z0-9_$"\\.-]+]] = global ptr @A
-; CHECK: @[[B:[a-zA-Z0-9_$"\\.-]+]] = external global [[TY:%.*]]
-; CHECK: @[[ICMP_ULT1:[a-zA-Z0-9_$"\\.-]+]] = global i1 icmp ugt (ptr getelementptr inbounds (i64, ptr @A, i64 1), ptr @A)
-; CHECK: @[[ICMP_SLT:[a-zA-Z0-9_$"\\.-]+]] = global i1 false
-; CHECK: @[[ICMP_ULT2:[a-zA-Z0-9_$"\\.-]+]] = global i1 icmp ugt (ptr getelementptr inbounds ([[TY:%.*]], ptr @B, i64 0, i32 1), ptr @B)
-; CHECK: @[[CONS:[a-zA-Z0-9_$"\\.-]+]] = weak global i32 0, align 8
-; CHECK: @[[GEP1:[a-zA-Z0-9_$"\\.-]+]] = global <2 x ptr> undef
-; CHECK: @[[GEP2:[a-zA-Z0-9_$"\\.-]+]] = global <2 x ptr> undef
-; CHECK: @[[GEP3:[a-zA-Z0-9_$"\\.-]+]] = global <2 x ptr> zeroinitializer
-; CHECK: @[[GEP4:[a-zA-Z0-9_$"\\.-]+]] = global <2 x ptr> zeroinitializer
-; CHECK: @[[BITCAST1:[a-zA-Z0-9_$"\\.-]+]] = global <2 x i32> <i32 -1, i32 -1>
-; CHECK: @[[BITCAST2:[a-zA-Z0-9_$"\\.-]+]] = global <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>
+; CHECK: @A = global i64 0
+; CHECK: @add = global ptr @A
+; CHECK: @sub = global ptr @A
+; CHECK: @mul = global ptr null
+; CHECK: @xor = global ptr @A
+; CHECK: @B = external global %Ty
+; CHECK: @icmp_ult1 = global i1 icmp ugt (ptr getelementptr inbounds (i64, ptr @A, i64 1), ptr @A)
+; CHECK: @icmp_slt = global i1 false
+; CHECK: @icmp_ult2 = global i1 icmp ugt (ptr getelementptr inbounds (%Ty, ptr @B, i64 0, i32 1), ptr @B)
+; CHECK: @cons = weak global i32 0, align 8
+; CHECK: @gep1 = global <2 x ptr> undef
+; CHECK: @gep2 = global <2 x ptr> undef
+; CHECK: @gep3 = global <2 x ptr> zeroinitializer
+; CHECK: @gep4 = global <2 x ptr> zeroinitializer
+; CHECK: @bitcast1 = global <2 x i32> <i32 -1, i32 -1>
+; CHECK: @bitcast2 = global <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>
;.
define void @dummy() {
; CHECK-LABEL: @dummy(
diff --git a/llvm/test/Assembler/flags.ll b/llvm/test/Assembler/flags.ll
index e0ad8bf000be..7d2aafa005e7 100644
--- a/llvm/test/Assembler/flags.ll
+++ b/llvm/test/Assembler/flags.ll
@@ -2,6 +2,7 @@
; RUN: verify-uselistorder %s
@addr = external global i64
+@addr_as1 = external addrspace(1) global i64
define i64 @add_unsigned(i64 %x, i64 %y) {
; CHECK: %z = add nuw i64 %x, %y
@@ -316,3 +317,104 @@ define <2 x i32> @test_trunc_both_reversed_vector(<2 x i64> %a) {
%res = trunc nsw nuw <2 x i64> %a to <2 x i32>
ret <2 x i32> %res
}
+
+define ptr @gep_nuw(ptr %p, i64 %idx) {
+; CHECK: %gep = getelementptr nuw i8, ptr %p, i64 %idx
+ %gep = getelementptr nuw i8, ptr %p, i64 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_inbounds_nuw(ptr %p, i64 %idx) {
+; CHECK: %gep = getelementptr inbounds nuw i8, ptr %p, i64 %idx
+ %gep = getelementptr inbounds nuw i8, ptr %p, i64 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_nusw(ptr %p, i64 %idx) {
+; CHECK: %gep = getelementptr nusw i8, ptr %p, i64 %idx
+ %gep = getelementptr nusw i8, ptr %p, i64 %idx
+ ret ptr %gep
+}
+
+; inbounds implies nusw, so the flag is not printed back.
+define ptr @gep_inbounds_nusw(ptr %p, i64 %idx) {
+; CHECK: %gep = getelementptr inbounds i8, ptr %p, i64 %idx
+ %gep = getelementptr inbounds nusw i8, ptr %p, i64 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_nusw_nuw(ptr %p, i64 %idx) {
+; CHECK: %gep = getelementptr nusw nuw i8, ptr %p, i64 %idx
+ %gep = getelementptr nusw nuw i8, ptr %p, i64 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_inbounds_nusw_nuw(ptr %p, i64 %idx) {
+; CHECK: %gep = getelementptr inbounds nuw i8, ptr %p, i64 %idx
+ %gep = getelementptr inbounds nusw nuw i8, ptr %p, i64 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_nuw_nusw_inbounds(ptr %p, i64 %idx) {
+; CHECK: %gep = getelementptr inbounds nuw i8, ptr %p, i64 %idx
+ %gep = getelementptr nuw nusw inbounds i8, ptr %p, i64 %idx
+ ret ptr %gep
+}
+
+define ptr addrspace(1) @gep_nusw_nuw_as1(ptr addrspace(1) %p, i64 %idx) {
+; CHECK: %gep = getelementptr nusw nuw i8, ptr addrspace(1) %p, i64 %idx
+ %gep = getelementptr nusw nuw i8, ptr addrspace(1) %p, i64 %idx
+ ret ptr addrspace(1) %gep
+}
+
+define <2 x ptr> @gep_nusw_nuw_vec(<2 x ptr> %p, i64 %idx) {
+; CHECK: %gep = getelementptr nusw nuw i8, <2 x ptr> %p, i64 %idx
+ %gep = getelementptr nusw nuw i8, <2 x ptr> %p, i64 %idx
+ ret <2 x ptr> %gep
+}
+
+define ptr @const_gep_nuw() {
+; CHECK: ret ptr getelementptr nuw (i8, ptr @addr, i64 100)
+ ret ptr getelementptr nuw (i8, ptr @addr, i64 100)
+}
+
+define ptr @const_gep_inbounds_nuw() {
+; CHECK: ret ptr getelementptr inbounds nuw (i8, ptr @addr, i64 100)
+ ret ptr getelementptr inbounds nuw (i8, ptr @addr, i64 100)
+}
+
+define ptr @const_gep_nusw() {
+; CHECK: ret ptr getelementptr nusw (i8, ptr @addr, i64 100)
+ ret ptr getelementptr nusw (i8, ptr @addr, i64 100)
+}
+
+; inbounds implies nusw, so the flag is not printed back.
+define ptr @const_gep_inbounds_nusw() {
+; CHECK: ret ptr getelementptr inbounds (i8, ptr @addr, i64 100)
+ ret ptr getelementptr inbounds nusw (i8, ptr @addr, i64 100)
+}
+
+define ptr @const_gep_nusw_nuw() {
+; CHECK: ret ptr getelementptr nusw nuw (i8, ptr @addr, i64 100)
+ ret ptr getelementptr nusw nuw (i8, ptr @addr, i64 100)
+}
+
+define ptr @const_gep_inbounds_nusw_nuw() {
+; CHECK: ret ptr getelementptr inbounds nuw (i8, ptr @addr, i64 100)
+ ret ptr getelementptr inbounds nusw nuw (i8, ptr @addr, i64 100)
+}
+
+define ptr @const_gep_nuw_nusw_inbounds() {
+; CHECK: ret ptr getelementptr inbounds nuw (i8, ptr @addr, i64 100)
+ ret ptr getelementptr nuw nusw inbounds (i8, ptr @addr, i64 100)
+}
+
+define ptr @const_gep_nuw_inrange() {
+; CHECK: ret ptr getelementptr nuw inrange(-8, 16) (i8, ptr @addr, i64 100)
+ ret ptr getelementptr nuw inrange(-8, 16) (i8, ptr @addr, i64 100)
+}
+
+define ptr addrspace(1) @const_gep_nusw_nuw_as1() {
+; CHECK: ret ptr addrspace(1) getelementptr nusw nuw (i8, ptr addrspace(1) @addr_as1, i64 100)
+ ret ptr addrspace(1) getelementptr nusw nuw (i8, ptr addrspace(1) @addr_as1, i64 100)
+}
diff --git a/llvm/test/CMakeLists.txt b/llvm/test/CMakeLists.txt
index eb4013511416..c942339e4360 100644
--- a/llvm/test/CMakeLists.txt
+++ b/llvm/test/CMakeLists.txt
@@ -230,7 +230,7 @@ if (LLVM_INCLUDE_SPIRV_TOOLS_TESTS)
endif()
add_custom_target(llvm-test-depends DEPENDS ${LLVM_TEST_DEPENDS})
-set_target_properties(llvm-test-depends PROPERTIES FOLDER "Tests")
+set_target_properties(llvm-test-depends PROPERTIES FOLDER "LLVM/Tests")
if(LLVM_BUILD_TOOLS)
set(exclude_from_check_all "")
@@ -243,7 +243,7 @@ add_lit_testsuite(check-llvm "Running the LLVM regression tests"
${exclude_from_check_all}
DEPENDS ${LLVM_TEST_DEPENDS}
)
-set_target_properties(check-llvm PROPERTIES FOLDER "Tests")
+set_target_properties(check-llvm PROPERTIES FOLDER "LLVM/Tests")
add_lit_testsuites(LLVM ${CMAKE_CURRENT_SOURCE_DIR}
${exclude_from_check_all}
@@ -254,4 +254,4 @@ add_lit_testsuites(LLVM ${CMAKE_CURRENT_SOURCE_DIR}
# Setup an alias for 'check-all'.
add_custom_target(check)
add_dependencies(check check-all)
-set_target_properties(check PROPERTIES FOLDER "Tests")
+set_target_properties(check PROPERTIES FOLDER "LLVM/Tests")
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
new file mode 100644
index 000000000000..be33f9f7b284
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-integer.mir
@@ -0,0 +1,252 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner %s -o - | FileCheck %s
+
+
+---
+name: ZeroMinusAPlusB
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: ZeroMinusAPlusB
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %a:_(s32) = COPY $w0
+ ; CHECK-NEXT: %b:_(s32) = COPY $w0
+ ; CHECK-NEXT: %add:_(s32) = G_SUB %b, %a
+ ; CHECK-NEXT: $w0 = COPY %add(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %a:_(s32) = COPY $w0
+ %b:_(s32) = COPY $w0
+ %zero:_(s32) = G_CONSTANT i32 0
+ %sub:_(s32) = G_SUB %zero, %a
+ %add:_(s32) = G_ADD %sub, %b
+ $w0 = COPY %add
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: ZeroMinusAPlusB_multi_use
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: ZeroMinusAPlusB_multi_use
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %a:_(s32) = COPY $w0
+ ; CHECK-NEXT: %b:_(s32) = COPY $w0
+ ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: %sub:_(s32) = G_SUB %zero, %a
+ ; CHECK-NEXT: %add:_(s32) = G_SUB %b, %a
+ ; CHECK-NEXT: $w0 = COPY %add(s32)
+ ; CHECK-NEXT: $w0 = COPY %sub(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %x:_(s32) = COPY $w0
+ %a:_(s32) = COPY $w0
+ %b:_(s32) = COPY $w0
+ %zero:_(s32) = G_CONSTANT i32 0
+ %sub:_(s32) = G_SUB %zero, %a
+ %add:_(s32) = G_ADD %sub, %b
+ $w0 = COPY %add
+ $w0 = COPY %sub
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: APlusZeroMiunusB
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: APlusZeroMiunusB
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %a:_(s64) = COPY $x1
+ ; CHECK-NEXT: %b:_(s64) = COPY $x2
+ ; CHECK-NEXT: %add:_(s64) = G_SUB %a, %b
+ ; CHECK-NEXT: $x0 = COPY %add(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %a:_(s64) = COPY $x1
+ %b:_(s64) = COPY $x2
+ %zero:_(s64) = G_CONSTANT i64 0
+ %sub:_(s64) = G_SUB %zero, %b
+ %add:_(s64) = G_ADD %a, %sub
+ $x0 = COPY %add
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: APlusBMinusB
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: APlusBMinusB
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %b:_(s64) = COPY $x1
+ ; CHECK-NEXT: $x0 = COPY %b(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %a:_(s64) = COPY $x0
+ %b:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %sub:_(s64) = G_SUB %b, %a
+ %add:_(s64) = G_ADD %a, %sub
+ $x0 = COPY %add
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: BMinusAPlusA
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: BMinusAPlusA
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %b:_(s64) = COPY $x1
+ ; CHECK-NEXT: $x0 = COPY %b(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %a:_(s64) = COPY $x0
+ %b:_(s64) = COPY $x1
+ %zero:_(s64) = G_CONSTANT i64 0
+ %sub:_(s64) = G_SUB %b, %a
+ %add:_(s64) = G_ADD %sub, %a
+ $x0 = COPY %add
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: AMinusBPlusCMinusA
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: AMinusBPlusCMinusA
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %b:_(s64) = COPY $x1
+ ; CHECK-NEXT: %c:_(s64) = COPY $x2
+ ; CHECK-NEXT: %add:_(s64) = G_SUB %c, %b
+ ; CHECK-NEXT: $x0 = COPY %add(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %a:_(s64) = COPY $x0
+ %b:_(s64) = COPY $x1
+ %c:_(s64) = COPY $x2
+ %zero:_(s64) = G_CONSTANT i64 0
+ %sub2:_(s64) = G_SUB %c, %a
+ %sub1:_(s64) = G_SUB %a, %b
+ %add:_(s64) = G_ADD %sub1, %sub2
+ $x0 = COPY %add
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: AMinusBPlusBMinusC
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: AMinusBPlusBMinusC
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %a:_(s64) = COPY $x0
+ ; CHECK-NEXT: %c:_(s64) = COPY $x2
+ ; CHECK-NEXT: %add:_(s64) = G_SUB %a, %c
+ ; CHECK-NEXT: $x0 = COPY %add(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %a:_(s64) = COPY $x0
+ %b:_(s64) = COPY $x1
+ %c:_(s64) = COPY $x2
+ %zero:_(s64) = G_CONSTANT i64 0
+ %sub2:_(s64) = G_SUB %b, %c
+ %sub1:_(s64) = G_SUB %a, %b
+ %add:_(s64) = G_ADD %sub1, %sub2
+ $x0 = COPY %add
+ RET_ReallyLR implicit $x0
+
+
+...
+---
+name: APlusBMinusAplusC
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: APlusBMinusAplusC
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %b:_(s64) = COPY $x1
+ ; CHECK-NEXT: %c:_(s64) = COPY $x2
+ ; CHECK-NEXT: %add:_(s64) = G_SUB %b, %c
+ ; CHECK-NEXT: $x0 = COPY %add(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %a:_(s64) = COPY $x0
+ %b:_(s64) = COPY $x1
+ %c:_(s64) = COPY $x2
+ %zero:_(s64) = G_CONSTANT i64 0
+ %add1:_(s64) = G_ADD %a, %c
+ %sub1:_(s64) = G_SUB %b, %add1
+ %add:_(s64) = G_ADD %a, %sub1
+ $x0 = COPY %add
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: APlusBMinusCPlusA
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: APlusBMinusCPlusA
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %b:_(s64) = COPY $x1
+ ; CHECK-NEXT: %c:_(s64) = COPY $x2
+ ; CHECK-NEXT: %add:_(s64) = G_SUB %b, %c
+ ; CHECK-NEXT: $x0 = COPY %add(s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %a:_(s64) = COPY $x0
+ %b:_(s64) = COPY $x1
+ %c:_(s64) = COPY $x2
+ %zero:_(s64) = G_CONSTANT i64 0
+ %add1:_(s64) = G_ADD %c, %a
+ %sub1:_(s64) = G_SUB %b, %add1
+ %add:_(s64) = G_ADD %a, %sub1
+ $x0 = COPY %add
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: APlusBMinusCPlusA_BV
+body: |
+ bb.0:
+ liveins: $w0, $w1
+
+ ; CHECK-LABEL: name: APlusBMinusCPlusA_BV
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %a1:_(s64) = COPY $x0
+ ; CHECK-NEXT: %b1:_(s64) = COPY $x1
+ ; CHECK-NEXT: %c1:_(s64) = COPY $x2
+ ; CHECK-NEXT: %b:_(<2 x s64>) = G_BUILD_VECTOR %b1(s64), %ba:_(s64)
+ ; CHECK-NEXT: %c:_(<2 x s64>) = G_BUILD_VECTOR %a1(s64), %c1(s64)
+ ; CHECK-NEXT: %add:_(<2 x s64>) = G_SUB %b, %c
+ ; CHECK-NEXT: $q0 = COPY %add(<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+ %a1:_(s64) = COPY $x0
+ %b1:_(s64) = COPY $x1
+ %c1:_(s64) = COPY $x2
+ %a:_(<2 x s64>) = G_BUILD_VECTOR %a1:_(s64), %b1:_(s64)
+ %b:_(<2 x s64>) = G_BUILD_VECTOR %b1:_(s64), %ba:_(s64)
+ %c:_(<2 x s64>) = G_BUILD_VECTOR %a1:_(s64), %c1:_(s64)
+ %zero:_(s64) = G_CONSTANT i64 0
+ %add1:_(<2 x s64>) = G_ADD %c, %a
+ %sub1:_(<2 x s64>) = G_SUB %b, %add1
+ %add:_(<2 x s64>) = G_ADD %a, %sub1
+ $q0 = COPY %add
+ RET_ReallyLR implicit $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
index 353c1550d697..074d4ecbd878 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
@@ -117,9 +117,9 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x2
; CHECK-NEXT: %c:_(s1) = G_TRUNC [[COPY]](s64)
- ; CHECK-NEXT: %f:_(s1) = G_TRUNC [[COPY1]](s64)
- ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE %f
- ; CHECK-NEXT: %sel:_(s1) = G_OR %c, [[FREEZE]]
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
+ ; CHECK-NEXT: %f:_(s1) = G_TRUNC [[FREEZE]](s64)
+ ; CHECK-NEXT: %sel:_(s1) = G_OR %c, %f
; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
; CHECK-NEXT: $w0 = COPY %ext(s32)
%0:_(s64) = COPY $x0
@@ -144,9 +144,9 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x2
; CHECK-NEXT: %c:_(s1) = G_TRUNC [[COPY]](s64)
- ; CHECK-NEXT: %f:_(s1) = G_TRUNC [[COPY1]](s64)
- ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE %f
- ; CHECK-NEXT: %sel:_(s1) = G_OR %c, [[FREEZE]]
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
+ ; CHECK-NEXT: %f:_(s1) = G_TRUNC [[FREEZE]](s64)
+ ; CHECK-NEXT: %sel:_(s1) = G_OR %c, %f
; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
; CHECK-NEXT: $w0 = COPY %ext(s32)
%0:_(s64) = COPY $x0
@@ -172,9 +172,9 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d2
; CHECK-NEXT: %c:_(<2 x s1>) = G_TRUNC [[COPY]](<2 x s32>)
- ; CHECK-NEXT: %f:_(<2 x s1>) = G_TRUNC [[COPY1]](<2 x s32>)
- ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<2 x s1>) = G_FREEZE %f
- ; CHECK-NEXT: %sel:_(<2 x s1>) = G_OR %c, [[FREEZE]]
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<2 x s32>) = G_FREEZE [[COPY1]]
+ ; CHECK-NEXT: %f:_(<2 x s1>) = G_TRUNC [[FREEZE]](<2 x s32>)
+ ; CHECK-NEXT: %sel:_(<2 x s1>) = G_OR %c, %f
; CHECK-NEXT: %ext:_(<2 x s32>) = G_ANYEXT %sel(<2 x s1>)
; CHECK-NEXT: $d0 = COPY %ext(<2 x s32>)
%0:_(<2 x s32>) = COPY $d0
@@ -201,9 +201,9 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK-NEXT: %c:_(s1) = G_TRUNC [[COPY]](s64)
- ; CHECK-NEXT: %t:_(s1) = G_TRUNC [[COPY1]](s64)
- ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE %t
- ; CHECK-NEXT: %sel:_(s1) = G_AND %c, [[FREEZE]]
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
+ ; CHECK-NEXT: %t:_(s1) = G_TRUNC [[FREEZE]](s64)
+ ; CHECK-NEXT: %sel:_(s1) = G_AND %c, %t
; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
; CHECK-NEXT: $w0 = COPY %ext(s32)
%0:_(s64) = COPY $x0
@@ -229,9 +229,9 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK-NEXT: %c:_(s1) = G_TRUNC [[COPY]](s64)
- ; CHECK-NEXT: %t:_(s1) = G_TRUNC [[COPY1]](s64)
- ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE %t
- ; CHECK-NEXT: %sel:_(s1) = G_AND %c, [[FREEZE]]
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
+ ; CHECK-NEXT: %t:_(s1) = G_TRUNC [[FREEZE]](s64)
+ ; CHECK-NEXT: %sel:_(s1) = G_AND %c, %t
; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
; CHECK-NEXT: $w0 = COPY %ext(s32)
%0:_(s64) = COPY $x0
@@ -257,11 +257,11 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK-NEXT: %c:_(s1) = G_TRUNC [[COPY]](s64)
- ; CHECK-NEXT: %t:_(s1) = G_TRUNC [[COPY1]](s64)
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
+ ; CHECK-NEXT: %t:_(s1) = G_TRUNC [[FREEZE]](s64)
; CHECK-NEXT: %one:_(s1) = G_CONSTANT i1 true
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR %c, %one
- ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE %t
- ; CHECK-NEXT: %sel:_(s1) = G_OR [[XOR]], [[FREEZE]]
+ ; CHECK-NEXT: %sel:_(s1) = G_OR [[XOR]], %t
; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
; CHECK-NEXT: $w0 = COPY %ext(s32)
%0:_(s64) = COPY $x0
@@ -287,11 +287,11 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x2
; CHECK-NEXT: %c:_(s1) = G_TRUNC [[COPY]](s64)
- ; CHECK-NEXT: %f:_(s1) = G_TRUNC [[COPY1]](s64)
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY1]]
+ ; CHECK-NEXT: %f:_(s1) = G_TRUNC [[FREEZE]](s64)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR %c, [[C]]
- ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s1) = G_FREEZE %f
- ; CHECK-NEXT: %sel:_(s1) = G_AND [[XOR]], [[FREEZE]]
+ ; CHECK-NEXT: %sel:_(s1) = G_AND [[XOR]], %f
; CHECK-NEXT: %ext:_(s32) = G_ANYEXT %sel(s1)
; CHECK-NEXT: $w0 = COPY %ext(s32)
%0:_(s64) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
index e754f01daa2a..a8be8bbd193a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
@@ -1379,7 +1379,7 @@ define <8 x i8> @sextmask2v8i8(<8 x i16> %src1, <8 x i8> %src2) {
define <8 x i8> @sextmask3v8i8(<8 x i16> %src1, <8 x i8> %src2) {
; CHECK-LABEL: sextmask3v8i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: sshr.8h v0, v0, #7
+; CHECK-NEXT: ushr.8h v0, v0, #7
; CHECK-NEXT: sshll.8h v1, v1, #0
; CHECK-NEXT: shadd.8h v0, v0, v1
; CHECK-NEXT: xtn.8b v0, v0
diff --git a/llvm/test/CodeGen/AArch64/bitfield-insert.ll b/llvm/test/CodeGen/AArch64/bitfield-insert.ll
index 30b5e86c1e6d..14a594e8028d 100644
--- a/llvm/test/CodeGen/AArch64/bitfield-insert.ll
+++ b/llvm/test/CodeGen/AArch64/bitfield-insert.ll
@@ -193,11 +193,10 @@ define void @test_64bit_badmask(ptr %existing, ptr %new) {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x8, [x0]
; CHECK-NEXT: ldr x9, [x1]
-; CHECK-NEXT: mov w10, #135 // =0x87
-; CHECK-NEXT: mov w11, #664 // =0x298
-; CHECK-NEXT: lsl w9, w9, #3
-; CHECK-NEXT: and x8, x8, x10
-; CHECK-NEXT: and x9, x9, x11
+; CHECK-NEXT: mov w10, #664 // =0x298
+; CHECK-NEXT: mov w11, #135 // =0x87
+; CHECK-NEXT: and x9, x10, x9, lsl #3
+; CHECK-NEXT: and x8, x8, x11
; CHECK-NEXT: orr x8, x8, x9
; CHECK-NEXT: str x8, [x0]
; CHECK-NEXT: ret
@@ -579,7 +578,6 @@ define <2 x i32> @test_complex_type(ptr %addr, i64 %in, ptr %bf ) {
define i64 @test_truncated_shift(i64 %x, i64 %y) {
; CHECK-LABEL: test_truncated_shift:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $w1 killed $w1 killed $x1 def $x1
; CHECK-NEXT: bfi x0, x1, #25, #5
; CHECK-NEXT: ret
entry:
@@ -593,7 +591,6 @@ entry:
define i64 @test_and_extended_shift_with_imm(i64 %0) {
; CHECK-LABEL: test_and_extended_shift_with_imm:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 def $x0
; CHECK-NEXT: ubfiz x0, x0, #7, #8
; CHECK-NEXT: ret
%2 = shl i64 %0, 7
diff --git a/llvm/test/CodeGen/AArch64/exp10-libcall-names.ll b/llvm/test/CodeGen/AArch64/exp10-libcall-names.ll
new file mode 100644
index 000000000000..1220aec447ab
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/exp10-libcall-names.ll
@@ -0,0 +1,39 @@
+; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck -check-prefix=LINUX %s
+; RUN: llc -mtriple=aarch64-apple-macos10.9 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=aarch64-apple-ios7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=aarch64-apple-tvos7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=aarch64-apple-watchos7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=aarch64-apple-xros7.0 < %s | FileCheck -check-prefix=APPLE %s
+
+; RUN: not llc -mtriple=aarch64-apple-macos10.8 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=aarch64-apple-ios6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=aarch64-apple-tvos6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=aarch64-apple-xros6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+
+; Check exp10/exp10f is emitted as __exp10/__exp10f on assorted systems.
+
+; ERR: no libcall available for fexp10
+
+define float @test_exp10_f32(float %x) {
+; LINUX-LABEL: test_exp10_f32:
+; LINUX: // %bb.0:
+; LINUX-NEXT: b exp10f
+;
+; APPLE-LABEL: test_exp10_f32:
+; APPLE: ; %bb.0:
+; APPLE-NEXT: b ___exp10f
+ %ret = call float @llvm.exp10.f32(float %x)
+ ret float %ret
+}
+
+define double @test_exp10_f64(double %x) {
+; LINUX-LABEL: test_exp10_f64:
+; LINUX: // %bb.0:
+; LINUX-NEXT: b exp10
+;
+; APPLE-LABEL: test_exp10_f64:
+; APPLE: ; %bb.0:
+; APPLE-NEXT: b ___exp10
+ %ret = call double @llvm.exp10.f64(double %x)
+ ret double %ret
+}
diff --git a/llvm/test/CodeGen/AArch64/frem-power2.ll b/llvm/test/CodeGen/AArch64/frem-power2.ll
index 402e03c5e265..4192745abd34 100644
--- a/llvm/test/CodeGen/AArch64/frem-power2.ll
+++ b/llvm/test/CodeGen/AArch64/frem-power2.ll
@@ -3,10 +3,22 @@
; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define float @frem2(float %x) {
-; CHECK-LABEL: frem2:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s1, #2.00000000
-; CHECK-NEXT: b fmodf
+; CHECK-SD-LABEL: frem2:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s1, #2.00000000
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-SD-NEXT: fdiv s2, s0, s1
+; CHECK-SD-NEXT: frintz s2, s2
+; CHECK-SD-NEXT: fmsub s1, s2, s1, s0
+; CHECK-SD-NEXT: mvni v2.4s, #128, lsl #24
+; CHECK-SD-NEXT: bit v0.16b, v1.16b, v2.16b
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: frem2:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s1, #2.00000000
+; CHECK-GI-NEXT: b fmodf
entry:
%fmod = frem float %x, 2.0
ret float %fmod
@@ -311,6 +323,67 @@ entry:
ret float %fmod
}
+define <4 x float> @frem2_vec(<4 x float> %x) {
+; CHECK-SD-LABEL: frem2_vec:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: movi v1.4s, #64, lsl #24
+; CHECK-SD-NEXT: mov v3.16b, v0.16b
+; CHECK-SD-NEXT: fdiv v2.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: frintz v2.4s, v2.4s
+; CHECK-SD-NEXT: fmls v3.4s, v1.4s, v2.4s
+; CHECK-SD-NEXT: mvni v1.4s, #128, lsl #24
+; CHECK-SD-NEXT: bit v0.16b, v3.16b, v1.16b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: frem2_vec:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sub sp, sp, #80
+; CHECK-GI-NEXT: str d10, [sp, #48] // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp d9, d8, [sp, #56] // 16-byte Folded Spill
+; CHECK-GI-NEXT: str x30, [sp, #72] // 8-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 80
+; CHECK-GI-NEXT: .cfi_offset w30, -8
+; CHECK-GI-NEXT: .cfi_offset b8, -16
+; CHECK-GI-NEXT: .cfi_offset b9, -24
+; CHECK-GI-NEXT: .cfi_offset b10, -32
+; CHECK-GI-NEXT: fmov s1, #2.00000000
+; CHECK-GI-NEXT: mov s8, v0.s[1]
+; CHECK-GI-NEXT: mov s9, v0.s[2]
+; CHECK-GI-NEXT: mov s10, v0.s[3]
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-GI-NEXT: bl fmodf
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: str q0, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fmov s1, #2.00000000
+; CHECK-GI-NEXT: fmov s0, s8
+; CHECK-GI-NEXT: bl fmodf
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fmov s1, #2.00000000
+; CHECK-GI-NEXT: fmov s0, s9
+; CHECK-GI-NEXT: bl fmodf
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fmov s1, #2.00000000
+; CHECK-GI-NEXT: fmov s0, s10
+; CHECK-GI-NEXT: bl fmodf
+; CHECK-GI-NEXT: ldp q2, q1, [sp, #16] // 32-byte Folded Reload
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: ldr x30, [sp, #72] // 8-byte Folded Reload
+; CHECK-GI-NEXT: ldp d9, d8, [sp, #56] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr d10, [sp, #48] // 8-byte Folded Reload
+; CHECK-GI-NEXT: mov v1.s[1], v2.s[0]
+; CHECK-GI-NEXT: ldr q2, [sp] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v1.s[2], v2.s[0]
+; CHECK-GI-NEXT: mov v1.s[3], v0.s[0]
+; CHECK-GI-NEXT: mov v0.16b, v1.16b
+; CHECK-GI-NEXT: add sp, sp, #80
+; CHECK-GI-NEXT: ret
+entry:
+ %fmod = frem <4 x float> %x, <float 2.0, float 2.0, float 2.0, float 2.0>
+ ret <4 x float> %fmod
+}
+
define <4 x float> @frem2_nsz_vec(<4 x float> %x) {
; CHECK-SD-LABEL: frem2_nsz_vec:
; CHECK-SD: // %bb.0: // %entry
@@ -514,10 +587,15 @@ define float @frem2_constneg_sitofp(float %x, i32 %sa) {
; CHECK-SD-LABEL: frem2_constneg_sitofp:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: mov w8, #1 // =0x1
-; CHECK-SD-NEXT: fmov s0, #-12.50000000
+; CHECK-SD-NEXT: fmov s1, #-12.50000000
; CHECK-SD-NEXT: lsl w8, w8, w0
-; CHECK-SD-NEXT: scvtf s1, w8
-; CHECK-SD-NEXT: b fmodf
+; CHECK-SD-NEXT: scvtf s0, w8
+; CHECK-SD-NEXT: fdiv s2, s1, s0
+; CHECK-SD-NEXT: frintz s2, s2
+; CHECK-SD-NEXT: fmsub s0, s2, s0, s1
+; CHECK-SD-NEXT: fabs s0, s0
+; CHECK-SD-NEXT: fneg s0, s0
+; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: frem2_constneg_sitofp:
; CHECK-GI: // %bb.0: // %entry
diff --git a/llvm/test/CodeGen/AArch64/hadd-combine.ll b/llvm/test/CodeGen/AArch64/hadd-combine.ll
index c0f76784eb37..28f454767c12 100644
--- a/llvm/test/CodeGen/AArch64/hadd-combine.ll
+++ b/llvm/test/CodeGen/AArch64/hadd-combine.ll
@@ -955,6 +955,71 @@ define <8 x i16> @urhadd_demandedelts(<8 x i16> %a0, <8 x i16> %a1) {
ret <8 x i16> %r0
}
+; Remove unnecessary sign_extend_inreg after shadd
+define <2 x i32> @shadd_signbits_v2i32(<2 x i32> %a0, <2 x i32> %a1, ptr %p2) {
+; CHECK-LABEL: shadd_signbits_v2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.2s, v0.2s, #17
+; CHECK-NEXT: sshr v1.2s, v1.2s, #17
+; CHECK-NEXT: shadd v0.2s, v0.2s, v1.2s
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
+ %x0 = ashr <2 x i32> %a0, <i32 17, i32 17>
+ %x1 = ashr <2 x i32> %a1, <i32 17, i32 17>
+ %m = and <2 x i32> %x0, %x1
+ %s = xor <2 x i32> %x0, %x1
+ %x = ashr <2 x i32> %s, <i32 1, i32 1>
+ %avg = add <2 x i32> %m, %x
+ %avg1 = shl <2 x i32> %avg, <i32 17, i32 17>
+ %avg2 = ashr <2 x i32> %avg1, <i32 17, i32 17>
+ store <2 x i32> %avg, ptr %p2 ; extra use
+ ret <2 x i32> %avg2
+}
+
+; Remove unnecessary sign_extend_inreg after srhadd
+define <2 x i32> @srhadd_signbits_v2i32(<2 x i32> %a0, <2 x i32> %a1, ptr %p2) {
+; CHECK-LABEL: srhadd_signbits_v2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.2s, v0.2s, #17
+; CHECK-NEXT: sshr v1.2s, v1.2s, #17
+; CHECK-NEXT: srhadd v0.2s, v0.2s, v1.2s
+; CHECK-NEXT: str d0, [x0]
+; CHECK-NEXT: ret
+ %x0 = ashr <2 x i32> %a0, <i32 17, i32 17>
+ %x1 = ashr <2 x i32> %a1, <i32 17, i32 17>
+ %m = or <2 x i32> %x0, %x1
+ %s = xor <2 x i32> %x0, %x1
+ %x = ashr <2 x i32> %s, <i32 1, i32 1>
+ %avg = sub <2 x i32> %m, %x
+ %avg1 = shl <2 x i32> %avg, <i32 17, i32 17>
+ %avg2 = ashr <2 x i32> %avg1, <i32 17, i32 17>
+ store <2 x i32> %avg, ptr %p2 ; extra use
+ ret <2 x i32> %avg2
+}
+
+; negative test - not enough signbits to remove sign_extend_inreg after srhadd
+define <2 x i32> @srhadd_signbits_v2i32_negative(<2 x i32> %a0, <2 x i32> %a1, ptr %p2) {
+; CHECK-LABEL: srhadd_signbits_v2i32_negative:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sshr v0.2s, v0.2s, #17
+; CHECK-NEXT: sshr v1.2s, v1.2s, #17
+; CHECK-NEXT: srhadd v1.2s, v0.2s, v1.2s
+; CHECK-NEXT: shl v0.2s, v1.2s, #22
+; CHECK-NEXT: str d1, [x0]
+; CHECK-NEXT: sshr v0.2s, v0.2s, #22
+; CHECK-NEXT: ret
+ %x0 = ashr <2 x i32> %a0, <i32 17, i32 17>
+ %x1 = ashr <2 x i32> %a1, <i32 17, i32 17>
+ %m = or <2 x i32> %x0, %x1
+ %s = xor <2 x i32> %x0, %x1
+ %x = ashr <2 x i32> %s, <i32 1, i32 1>
+ %avg = sub <2 x i32> %m, %x
+ %avg1 = shl <2 x i32> %avg, <i32 22, i32 22>
+ %avg2 = ashr <2 x i32> %avg1, <i32 22, i32 22>
+ store <2 x i32> %avg, ptr %p2 ; extra use
+ ret <2 x i32> %avg2
+}
+
declare <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8>, <8 x i8>)
declare <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16>, <4 x i16>)
declare <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32>, <2 x i32>)
@@ -979,4 +1044,4 @@ declare <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16>, <8 x i16>)
declare <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32>, <4 x i32>)
declare <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8>, <16 x i8>)
declare <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16>, <8 x i16>)
-declare <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32>, <4 x i32>) \ No newline at end of file
+declare <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32>, <4 x i32>)
diff --git a/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll b/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll
index 211237542a15..9c72afd84fa7 100644
--- a/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll
+++ b/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll
@@ -359,6 +359,152 @@ define i32 @add_i32_ctz_nxv16i1_poison(<vscale x 16 x i1> %a, i32 %b) {
ret i32 %add
}
+; FIXED-WIDTH VECTOR TYPES
+
+define i32 @ctz_v16i1(<16 x i1> %a) {
+; CHECK-LABEL: ctz_v16i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.16b, v0.16b, #7
+; CHECK-NEXT: ptrue p0.b, vl16
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: cmlt v0.16b, v0.16b, #0
+; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0
+; CHECK-NEXT: brkb p0.b, p1/z, p0.b
+; CHECK-NEXT: cntp x0, p0, p0.b
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
+ %res = call i32 @llvm.experimental.cttz.elts.i32.v16i1(<16 x i1> %a, i1 0)
+ ret i32 %res
+}
+
+define i32 @ctz_v16i1_poison(<16 x i1> %a) {
+; CHECK-LABEL: ctz_v16i1_poison:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.16b, v0.16b, #7
+; CHECK-NEXT: ptrue p0.b, vl16
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: cmlt v0.16b, v0.16b, #0
+; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0
+; CHECK-NEXT: brkb p0.b, p1/z, p0.b
+; CHECK-NEXT: cntp x0, p0, p0.b
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
+ %res = call i32 @llvm.experimental.cttz.elts.i32.v16i1(<16 x i1> %a, i1 1)
+ ret i32 %res
+}
+
+define i64 @add_i64_ctz_v16i1_poison(<16 x i1> %a, i64 %b) {
+; CHECK-LABEL: add_i64_ctz_v16i1_poison:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.16b, v0.16b, #7
+; CHECK-NEXT: ptrue p0.b, vl16
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: cmlt v0.16b, v0.16b, #0
+; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0
+; CHECK-NEXT: brkb p0.b, p1/z, p0.b
+; CHECK-NEXT: incp x0, p0.b
+; CHECK-NEXT: ret
+ %res = call i64 @llvm.experimental.cttz.elts.i64.v16i1(<16 x i1> %a, i1 1)
+ %add = add i64 %res, %b
+ ret i64 %add
+}
+
+define i32 @ctz_v8i1(<8 x i1> %a) {
+; CHECK-LABEL: ctz_v8i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.8b, v0.8b, #7
+; CHECK-NEXT: ptrue p0.b, vl8
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: cmlt v0.8b, v0.8b, #0
+; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0
+; CHECK-NEXT: brkb p0.b, p1/z, p0.b
+; CHECK-NEXT: cntp x0, p0, p0.b
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
+ %res = call i32 @llvm.experimental.cttz.elts.i32.v8i1(<8 x i1> %a, i1 0)
+ ret i32 %res
+}
+
+define i32 @ctz_v8i1_poison(<8 x i1> %a) {
+; CHECK-LABEL: ctz_v8i1_poison:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.8b, v0.8b, #7
+; CHECK-NEXT: ptrue p0.b, vl8
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: cmlt v0.8b, v0.8b, #0
+; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0
+; CHECK-NEXT: brkb p0.b, p1/z, p0.b
+; CHECK-NEXT: cntp x0, p0, p0.b
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
+ %res = call i32 @llvm.experimental.cttz.elts.i32.v8i1(<8 x i1> %a, i1 1)
+ ret i32 %res
+}
+
+define i32 @ctz_v4i1(<4 x i1> %a) {
+; CHECK-LABEL: ctz_v4i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.4h, v0.4h, #15
+; CHECK-NEXT: ptrue p0.h, vl4
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: cmlt v0.4h, v0.4h, #0
+; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0
+; CHECK-NEXT: brkb p0.b, p1/z, p0.b
+; CHECK-NEXT: cntp x0, p0, p0.h
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
+ %res = call i32 @llvm.experimental.cttz.elts.i32.v4i1(<4 x i1> %a, i1 0)
+ ret i32 %res
+}
+
+define i32 @ctz_v4i1_poison(<4 x i1> %a) {
+; CHECK-LABEL: ctz_v4i1_poison:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.4h, v0.4h, #15
+; CHECK-NEXT: ptrue p0.h, vl4
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: cmlt v0.4h, v0.4h, #0
+; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0
+; CHECK-NEXT: brkb p0.b, p1/z, p0.b
+; CHECK-NEXT: cntp x0, p0, p0.h
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
+ %res = call i32 @llvm.experimental.cttz.elts.i32.v4i1(<4 x i1> %a, i1 1)
+ ret i32 %res
+}
+
+define i32 @ctz_v2i1(<2 x i1> %a) {
+; CHECK-LABEL: ctz_v2i1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.2s, v0.2s, #31
+; CHECK-NEXT: ptrue p0.s, vl2
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: cmlt v0.2s, v0.2s, #0
+; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0
+; CHECK-NEXT: brkb p0.b, p1/z, p0.b
+; CHECK-NEXT: cntp x0, p0, p0.s
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
+ %res = call i32 @llvm.experimental.cttz.elts.i32.v2i1(<2 x i1> %a, i1 0)
+ ret i32 %res
+}
+
+define i32 @ctz_v2i1_poison(<2 x i1> %a) {
+; CHECK-LABEL: ctz_v2i1_poison:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shl v0.2s, v0.2s, #31
+; CHECK-NEXT: ptrue p0.s, vl2
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: cmlt v0.2s, v0.2s, #0
+; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0
+; CHECK-NEXT: brkb p0.b, p1/z, p0.b
+; CHECK-NEXT: cntp x0, p0, p0.s
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
+ %res = call i32 @llvm.experimental.cttz.elts.i32.v2i1(<2 x i1> %a, i1 1)
+ ret i32 %res
+}
+
declare i32 @llvm.experimental.cttz.elts.i32.nxv8i1(<vscale x 8 x i1>, i1)
declare i64 @llvm.experimental.cttz.elts.i64.nxv8i1(<vscale x 8 x i1>, i1)
declare i64 @llvm.experimental.cttz.elts.i64.nxv16i1(<vscale x 16 x i1>, i1)
diff --git a/llvm/test/CodeGen/AArch64/movimm-expand-ldst.ll b/llvm/test/CodeGen/AArch64/movimm-expand-ldst.ll
new file mode 100644
index 000000000000..b25ac96f97c7
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/movimm-expand-ldst.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s
+
+define i64 @test0x1234567812345678() {
+; CHECK-LABEL: test0x1234567812345678:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, #22136 // =0x5678
+; CHECK-NEXT: movk x0, #4660, lsl #16
+; CHECK-NEXT: orr x0, x0, x0, lsl #32
+; CHECK-NEXT: ret
+ ret i64 u0x1234567812345678
+}
+
+define i64 @test0xff3456ffff3456ff() {
+; CHECK-LABEL: test0xff3456ffff3456ff:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, #22271 // =0x56ff
+; CHECK-NEXT: movk x0, #65332, lsl #16
+; CHECK-NEXT: orr x0, x0, x0, lsl #32
+; CHECK-NEXT: ret
+ ret i64 u0xff3456ffff3456ff
+}
+
+define i64 @test0x00345600345600() {
+; CHECK-LABEL: test0x00345600345600:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, #22016 // =0x5600
+; CHECK-NEXT: movk x0, #52, lsl #16
+; CHECK-NEXT: movk x0, #13398, lsl #32
+; CHECK-NEXT: ret
+ ret i64 u0x00345600345600
+}
+
+define i64 @test0x5555555555555555() {
+; CHECK-LABEL: test0x5555555555555555:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, #6148914691236517205 // =0x5555555555555555
+; CHECK-NEXT: ret
+ ret i64 u0x5555555555555555
+}
+
+define i64 @test0x5055555550555555() {
+; CHECK-LABEL: test0x5055555550555555:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, #6148914691236517205 // =0x5555555555555555
+; CHECK-NEXT: and x0, x0, #0xf0fffffff0ffffff
+; CHECK-NEXT: ret
+ ret i64 u0x5055555550555555
+}
+
+define i64 @test0x0000555555555555() {
+; CHECK-LABEL: test0x0000555555555555:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, #6148914691236517205 // =0x5555555555555555
+; CHECK-NEXT: movk x0, #0, lsl #48
+; CHECK-NEXT: ret
+ ret i64 u0x0000555555555555
+}
+
+define i64 @test0x0000555500005555() {
+; CHECK-LABEL: test0x0000555500005555:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, #21845 // =0x5555
+; CHECK-NEXT: movk x0, #21845, lsl #32
+; CHECK-NEXT: ret
+ ret i64 u0x0000555500005555
+}
+
+define i64 @testu0xffff5555ffff5555() {
+; CHECK-LABEL: testu0xffff5555ffff5555:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, #-43691 // =0xffffffffffff5555
+; CHECK-NEXT: movk x0, #21845, lsl #32
+; CHECK-NEXT: ret
+ ret i64 u0xffff5555ffff5555
+}
+
+define i64 @testuu0xfffff555f555f555() {
+; CHECK-LABEL: testuu0xfffff555f555f555:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, #-2731 // =0xfffffffffffff555
+; CHECK-NEXT: movk x0, #62805, lsl #16
+; CHECK-NEXT: movk x0, #62805, lsl #32
+; CHECK-NEXT: ret
+ ret i64 u0xfffff555f555f555
+}
+
+define i64 @testuu0xf555f555f555f555() {
+; CHECK-LABEL: testuu0xf555f555f555f555:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, #6148914691236517205 // =0x5555555555555555
+; CHECK-NEXT: orr x0, x0, #0xe001e001e001e001
+; CHECK-NEXT: ret
+ ret i64 u0xf555f555f555f555
+}
diff --git a/llvm/test/CodeGen/AArch64/movimm-expand-ldst.mir b/llvm/test/CodeGen/AArch64/movimm-expand-ldst.mir
new file mode 100644
index 000000000000..72529807d5d5
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/movimm-expand-ldst.mir
@@ -0,0 +1,34 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=aarch64 -verify-machineinstrs -run-pass=aarch64-expand-pseudo -run-pass=aarch64-ldst-opt -debug-only=aarch64-ldst-opt %s -o - | FileCheck %s
+# REQUIRES: asserts
+---
+name: test_fold_repeating_constant_load
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: test_fold_repeating_constant_load
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x0 = MOVZXi 49370, 0
+ ; CHECK-NEXT: renamable $x0 = MOVKXi $x0, 320, 16
+ ; CHECK-NEXT: renamable $x0 = ORRXrs $x0, $x0, 32
+ ; CHECK-NEXT: RET undef $lr, implicit $x0
+ renamable $x0 = MOVi64imm 90284035103834330
+ RET_ReallyLR implicit $x0
+...
+---
+name: test_fold_repeating_constant_load_neg
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: test_fold_repeating_constant_load_neg
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x0 = MOVZXi 320, 0
+ ; CHECK-NEXT: renamable $x0 = MOVKXi $x0, 49370, 16
+ ; CHECK-NEXT: renamable $x0 = ORRXrs $x0, $x0, 32
+ ; CHECK-NEXT: RET undef $lr, implicit $x0
+ renamable $x0 = MOVi64imm -4550323095879417536
+ RET_ReallyLR implicit $x0
diff --git a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
index 736f66c935e7..40b8a47f92aa 100644
--- a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
@@ -1709,289 +1709,289 @@ define i32 @test_sdot_v33i8_double(<33 x i8> %a, <33 x i8> %b, <33 x i8> %c, <33
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: fmov s4, w0
; CHECK-NEXT: ldr b0, [sp, #80]
; CHECK-NEXT: add x8, sp, #88
-; CHECK-NEXT: ldr b2, [sp, #144]
-; CHECK-NEXT: fmov s4, w0
+; CHECK-NEXT: ldr b1, [sp, #144]
; CHECK-NEXT: add x10, sp, #152
-; CHECK-NEXT: ldr b3, [sp, #16]
+; CHECK-NEXT: ldr b6, [sp, #16]
; CHECK-NEXT: ld1 { v0.b }[1], [x8]
-; CHECK-NEXT: ld1 { v2.b }[1], [x10]
-; CHECK-NEXT: add x10, sp, #24
-; CHECK-NEXT: ldr b1, [sp, #344]
; CHECK-NEXT: add x9, sp, #96
-; CHECK-NEXT: ld1 { v3.b }[1], [x10]
-; CHECK-NEXT: add x10, sp, #352
+; CHECK-NEXT: ldr b2, [sp, #344]
; CHECK-NEXT: mov v4.b[1], w1
+; CHECK-NEXT: ld1 { v1.b }[1], [x10]
+; CHECK-NEXT: add x10, sp, #24
+; CHECK-NEXT: ld1 { v6.b }[1], [x10]
+; CHECK-NEXT: add x10, sp, #352
; CHECK-NEXT: add x8, sp, #104
; CHECK-NEXT: ld1 { v0.b }[2], [x9]
; CHECK-NEXT: add x9, sp, #160
-; CHECK-NEXT: ld1 { v1.b }[1], [x10]
-; CHECK-NEXT: ld1 { v2.b }[2], [x9]
-; CHECK-NEXT: add x9, sp, #32
-; CHECK-NEXT: add x12, sp, #360
-; CHECK-NEXT: ld1 { v3.b }[2], [x9]
+; CHECK-NEXT: ld1 { v2.b }[1], [x10]
+; CHECK-NEXT: ld1 { v1.b }[2], [x9]
+; CHECK-NEXT: add x10, sp, #32
; CHECK-NEXT: add x11, sp, #112
-; CHECK-NEXT: add x10, sp, #120
-; CHECK-NEXT: ld1 { v1.b }[2], [x12]
-; CHECK-NEXT: add x12, sp, #168
-; CHECK-NEXT: ld1 { v0.b }[3], [x8]
; CHECK-NEXT: mov v4.b[2], w2
-; CHECK-NEXT: ld1 { v2.b }[3], [x12]
-; CHECK-NEXT: add x12, sp, #40
-; CHECK-NEXT: ld1 { v3.b }[3], [x12]
-; CHECK-NEXT: add x13, sp, #176
-; CHECK-NEXT: ldr b16, [sp, #216]
-; CHECK-NEXT: ld1 { v0.b }[4], [x11]
-; CHECK-NEXT: add x11, sp, #48
-; CHECK-NEXT: add x12, sp, #368
-; CHECK-NEXT: ld1 { v2.b }[4], [x13]
+; CHECK-NEXT: ld1 { v6.b }[2], [x10]
+; CHECK-NEXT: add x10, sp, #168
+; CHECK-NEXT: ld1 { v0.b }[3], [x8]
+; CHECK-NEXT: ldr b5, [sp, #216]
; CHECK-NEXT: add x13, sp, #224
-; CHECK-NEXT: add x9, sp, #128
+; CHECK-NEXT: ld1 { v1.b }[3], [x10]
+; CHECK-NEXT: add x10, sp, #40
+; CHECK-NEXT: add x12, sp, #120
+; CHECK-NEXT: ld1 { v6.b }[3], [x10]
+; CHECK-NEXT: add x10, sp, #176
+; CHECK-NEXT: ld1 { v5.b }[1], [x13]
; CHECK-NEXT: mov v4.b[3], w3
-; CHECK-NEXT: ld1 { v3.b }[4], [x11]
-; CHECK-NEXT: ld1 { v16.b }[1], [x13]
-; CHECK-NEXT: ld1 { v0.b }[5], [x10]
-; CHECK-NEXT: add x10, sp, #56
-; CHECK-NEXT: ld1 { v1.b }[3], [x12]
-; CHECK-NEXT: add x12, sp, #184
-; CHECK-NEXT: ldr b5, [sp, #280]
-; CHECK-NEXT: add x11, sp, #376
-; CHECK-NEXT: ld1 { v3.b }[5], [x10]
-; CHECK-NEXT: ld1 { v2.b }[5], [x12]
-; CHECK-NEXT: add x10, sp, #232
+; CHECK-NEXT: ld1 { v0.b }[4], [x11]
+; CHECK-NEXT: add x11, sp, #48
+; CHECK-NEXT: add x8, sp, #360
+; CHECK-NEXT: ld1 { v1.b }[4], [x10]
+; CHECK-NEXT: add x13, sp, #56
+; CHECK-NEXT: ld1 { v6.b }[4], [x11]
+; CHECK-NEXT: ldr b7, [sp, #280]
+; CHECK-NEXT: ld1 { v2.b }[2], [x8]
+; CHECK-NEXT: add x15, sp, #232
+; CHECK-NEXT: ld1 { v0.b }[5], [x12]
+; CHECK-NEXT: add x14, sp, #184
; CHECK-NEXT: mov v4.b[4], w4
+; CHECK-NEXT: ld1 { v5.b }[2], [x15]
+; CHECK-NEXT: add x9, sp, #128
+; CHECK-NEXT: ld1 { v6.b }[5], [x13]
+; CHECK-NEXT: add x13, sp, #288
+; CHECK-NEXT: add x10, sp, #368
+; CHECK-NEXT: ld1 { v7.b }[1], [x13]
+; CHECK-NEXT: ld1 { v1.b }[5], [x14]
+; CHECK-NEXT: ld1 { v2.b }[3], [x10]
+; CHECK-NEXT: add x15, sp, #240
; CHECK-NEXT: ld1 { v0.b }[6], [x9]
-; CHECK-NEXT: add x9, sp, #288
-; CHECK-NEXT: add x15, sp, #64
-; CHECK-NEXT: ld1 { v16.b }[2], [x10]
-; CHECK-NEXT: ldr b17, [sp, #408]
-; CHECK-NEXT: ld1 { v5.b }[1], [x9]
-; CHECK-NEXT: add x14, sp, #192
-; CHECK-NEXT: ld1 { v1.b }[4], [x11]
-; CHECK-NEXT: ld1 { v3.b }[6], [x15]
-; CHECK-NEXT: add x15, sp, #416
-; CHECK-NEXT: ld1 { v2.b }[6], [x14]
-; CHECK-NEXT: add x14, sp, #240
-; CHECK-NEXT: ld1 { v17.b }[1], [x15]
; CHECK-NEXT: add x9, sp, #296
-; CHECK-NEXT: add x8, sp, #136
; CHECK-NEXT: mov v4.b[5], w5
-; CHECK-NEXT: add x13, sp, #384
-; CHECK-NEXT: ld1 { v16.b }[3], [x14]
-; CHECK-NEXT: ld1 { v5.b }[2], [x9]
-; CHECK-NEXT: ld1 { v1.b }[5], [x13]
-; CHECK-NEXT: ld1 { v0.b }[7], [x8]
-; CHECK-NEXT: add x8, sp, #424
-; CHECK-NEXT: add x9, sp, #248
-; CHECK-NEXT: ld1 { v17.b }[2], [x8]
-; CHECK-NEXT: add x8, sp, #304
-; CHECK-NEXT: add x10, sp, #392
-; CHECK-NEXT: ld1 { v16.b }[4], [x9]
-; CHECK-NEXT: ld1 { v5.b }[3], [x8]
+; CHECK-NEXT: add x11, sp, #192
+; CHECK-NEXT: ld1 { v5.b }[3], [x15]
+; CHECK-NEXT: ldr b3, [sp, #408]
+; CHECK-NEXT: ld1 { v7.b }[2], [x9]
+; CHECK-NEXT: add x12, sp, #64
+; CHECK-NEXT: add x13, sp, #376
+; CHECK-NEXT: ld1 { v1.b }[6], [x11]
+; CHECK-NEXT: add x11, sp, #416
+; CHECK-NEXT: ld1 { v6.b }[6], [x12]
+; CHECK-NEXT: add x12, sp, #248
+; CHECK-NEXT: ld1 { v3.b }[1], [x11]
; CHECK-NEXT: mov v4.b[6], w6
-; CHECK-NEXT: ld1 { v1.b }[6], [x10]
-; CHECK-NEXT: add x10, sp, #432
-; CHECK-NEXT: add x9, sp, #256
-; CHECK-NEXT: ld1 { v17.b }[3], [x10]
-; CHECK-NEXT: add x10, sp, #312
-; CHECK-NEXT: ldr b22, [sp, #608]
-; CHECK-NEXT: add x8, sp, #400
-; CHECK-NEXT: ld1 { v16.b }[5], [x9]
-; CHECK-NEXT: ld1 { v5.b }[4], [x10]
-; CHECK-NEXT: add x9, sp, #616
-; CHECK-NEXT: ld1 { v1.b }[7], [x8]
-; CHECK-NEXT: add x8, sp, #440
-; CHECK-NEXT: ld1 { v22.b }[1], [x9]
+; CHECK-NEXT: ld1 { v2.b }[4], [x13]
+; CHECK-NEXT: add x11, sp, #304
+; CHECK-NEXT: ld1 { v5.b }[4], [x12]
+; CHECK-NEXT: ld1 { v7.b }[3], [x11]
+; CHECK-NEXT: add x8, sp, #136
+; CHECK-NEXT: add x15, sp, #384
+; CHECK-NEXT: add x9, sp, #424
+; CHECK-NEXT: ld1 { v0.b }[7], [x8]
+; CHECK-NEXT: ld1 { v3.b }[2], [x9]
+; CHECK-NEXT: ld1 { v2.b }[5], [x15]
+; CHECK-NEXT: add x8, sp, #312
; CHECK-NEXT: mov v4.b[7], w7
-; CHECK-NEXT: ld1 { v17.b }[4], [x8]
+; CHECK-NEXT: add x9, sp, #256
+; CHECK-NEXT: add x10, sp, #200
+; CHECK-NEXT: ld1 { v7.b }[4], [x8]
+; CHECK-NEXT: ld1 { v5.b }[5], [x9]
+; CHECK-NEXT: add x14, sp, #72
+; CHECK-NEXT: ld1 { v1.b }[7], [x10]
+; CHECK-NEXT: add x10, sp, #432
+; CHECK-NEXT: add x8, sp, #392
+; CHECK-NEXT: ld1 { v6.b }[7], [x14]
+; CHECK-NEXT: ld1 { v3.b }[3], [x10]
+; CHECK-NEXT: ld1 { v2.b }[6], [x8]
; CHECK-NEXT: add x8, sp, #320
+; CHECK-NEXT: add x9, sp, #264
+; CHECK-NEXT: sshll v21.8h, v4.8b, #0
+; CHECK-NEXT: ldr b4, [sp, #208]
+; CHECK-NEXT: ld1 { v7.b }[5], [x8]
+; CHECK-NEXT: ld1 { v5.b }[6], [x9]
+; CHECK-NEXT: add x10, sp, #440
+; CHECK-NEXT: add x8, sp, #400
+; CHECK-NEXT: sshll v16.8h, v6.8b, #0
+; CHECK-NEXT: sshll v6.8h, v4.8b, #0
+; CHECK-NEXT: ld1 { v3.b }[4], [x10]
+; CHECK-NEXT: ld1 { v2.b }[7], [x8]
+; CHECK-NEXT: add x8, sp, #272
+; CHECK-NEXT: add x9, sp, #328
+; CHECK-NEXT: ldr b4, [sp, #608]
+; CHECK-NEXT: ld1 { v7.b }[6], [x9]
+; CHECK-NEXT: ld1 { v5.b }[7], [x8]
+; CHECK-NEXT: add x8, sp, #616
; CHECK-NEXT: add x10, sp, #448
-; CHECK-NEXT: ldr b6, [sp, #208]
-; CHECK-NEXT: ld1 { v5.b }[5], [x8]
-; CHECK-NEXT: add x8, sp, #624
-; CHECK-NEXT: ldr b7, [sp, #472]
-; CHECK-NEXT: ld1 { v22.b }[2], [x8]
-; CHECK-NEXT: ld1 { v17.b }[5], [x10]
-; CHECK-NEXT: add x10, sp, #328
-; CHECK-NEXT: sshll v20.8h, v4.8b, #0
-; CHECK-NEXT: ldr b4, [sp, #480]
+; CHECK-NEXT: ld1 { v4.b }[1], [x8]
+; CHECK-NEXT: ldr b18, [sp, #480]
+; CHECK-NEXT: ld1 { v3.b }[5], [x10]
+; CHECK-NEXT: add x9, sp, #336
+; CHECK-NEXT: ldr b17, [sp, #472]
+; CHECK-NEXT: add x8, sp, #488
+; CHECK-NEXT: ld1 { v7.b }[7], [x9]
+; CHECK-NEXT: add x9, sp, #624
+; CHECK-NEXT: ld1 { v18.b }[1], [x8]
+; CHECK-NEXT: sshll v22.8h, v5.8b, #0
; CHECK-NEXT: add x8, sp, #456
-; CHECK-NEXT: ld1 { v5.b }[6], [x10]
-; CHECK-NEXT: add x10, sp, #632
-; CHECK-NEXT: sshll v6.8h, v6.8b, #0
-; CHECK-NEXT: ld1 { v22.b }[3], [x10]
-; CHECK-NEXT: add x10, sp, #488
-; CHECK-NEXT: ld1 { v17.b }[6], [x8]
-; CHECK-NEXT: add x8, sp, #336
-; CHECK-NEXT: ld1 { v4.b }[1], [x10]
-; CHECK-NEXT: sshll v7.8h, v7.8b, #0
-; CHECK-NEXT: ld1 { v5.b }[7], [x8]
-; CHECK-NEXT: add x8, sp, #640
-; CHECK-NEXT: add x9, sp, #264
-; CHECK-NEXT: ld1 { v22.b }[4], [x8]
+; CHECK-NEXT: sshll v5.8h, v17.8b, #0
+; CHECK-NEXT: ld1 { v4.b }[2], [x9]
+; CHECK-NEXT: ld1 { v3.b }[6], [x8]
; CHECK-NEXT: add x8, sp, #496
-; CHECK-NEXT: ld1 { v16.b }[6], [x9]
-; CHECK-NEXT: ld1 { v4.b }[2], [x8]
-; CHECK-NEXT: add x8, sp, #648
-; CHECK-NEXT: smull v18.4s, v6.4h, v7.4h
-; CHECK-NEXT: ldr b7, [sp, #544]
-; CHECK-NEXT: add x9, sp, #272
-; CHECK-NEXT: movi v6.2d, #0000000000000000
-; CHECK-NEXT: ld1 { v22.b }[5], [x8]
+; CHECK-NEXT: sshll v17.8h, v7.8b, #0
+; CHECK-NEXT: add x10, sp, #632
+; CHECK-NEXT: ld1 { v18.b }[2], [x8]
+; CHECK-NEXT: add x9, sp, #464
; CHECK-NEXT: add x8, sp, #504
-; CHECK-NEXT: ld1 { v16.b }[7], [x9]
-; CHECK-NEXT: ld1 { v4.b }[3], [x8]
-; CHECK-NEXT: add x8, sp, #552
-; CHECK-NEXT: add x9, sp, #656
-; CHECK-NEXT: ld1 { v7.b }[1], [x8]
+; CHECK-NEXT: smull v19.4s, v6.4h, v5.4h
+; CHECK-NEXT: movi v5.2d, #0000000000000000
+; CHECK-NEXT: ld1 { v4.b }[3], [x10]
+; CHECK-NEXT: ld1 { v3.b }[7], [x9]
+; CHECK-NEXT: smull v6.4s, v16.4h, v17.4h
+; CHECK-NEXT: add x9, sp, #640
+; CHECK-NEXT: ld1 { v18.b }[3], [x8]
+; CHECK-NEXT: smull2 v16.4s, v16.8h, v17.8h
+; CHECK-NEXT: ldr b17, [sp, #672]
+; CHECK-NEXT: ld1 { v4.b }[4], [x9]
+; CHECK-NEXT: add x9, sp, #680
+; CHECK-NEXT: ldr b20, [sp, #544]
+; CHECK-NEXT: mov v5.s[0], v19.s[0]
; CHECK-NEXT: add x8, sp, #512
-; CHECK-NEXT: ldr b21, [sp, #672]
-; CHECK-NEXT: ld1 { v22.b }[6], [x9]
-; CHECK-NEXT: mov v6.s[0], v18.s[0]
-; CHECK-NEXT: add x9, sp, #664
-; CHECK-NEXT: ld1 { v4.b }[4], [x8]
-; CHECK-NEXT: add x8, sp, #560
-; CHECK-NEXT: sshll v23.8h, v16.8b, #0
-; CHECK-NEXT: ld1 { v7.b }[2], [x8]
-; CHECK-NEXT: add x8, sp, #520
-; CHECK-NEXT: movi v19.2d, #0000000000000000
-; CHECK-NEXT: ld1 { v22.b }[7], [x9]
-; CHECK-NEXT: add x9, sp, #528
-; CHECK-NEXT: add x10, sp, #464
-; CHECK-NEXT: ld1 { v4.b }[5], [x8]
-; CHECK-NEXT: add x8, sp, #568
-; CHECK-NEXT: smull2 v18.4s, v20.8h, v23.8h
-; CHECK-NEXT: ld1 { v7.b }[3], [x8]
-; CHECK-NEXT: add x8, sp, #680
-; CHECK-NEXT: smlal v6.4s, v20.4h, v23.4h
-; CHECK-NEXT: ld1 { v21.b }[1], [x8]
-; CHECK-NEXT: sshll v20.8h, v22.8b, #0
-; CHECK-NEXT: ldr b22, [sp, #736]
-; CHECK-NEXT: ld1 { v4.b }[6], [x9]
-; CHECK-NEXT: add x9, sp, #576
-; CHECK-NEXT: ldr b23, [sp, #1000]
-; CHECK-NEXT: ld1 { v7.b }[4], [x9]
-; CHECK-NEXT: add x9, sp, #688
-; CHECK-NEXT: sshll v24.8h, v22.8b, #0
-; CHECK-NEXT: ld1 { v21.b }[2], [x9]
+; CHECK-NEXT: ld1 { v17.b }[1], [x9]
+; CHECK-NEXT: add x11, sp, #552
+; CHECK-NEXT: add x10, sp, #648
+; CHECK-NEXT: ld1 { v18.b }[4], [x8]
+; CHECK-NEXT: ld1 { v20.b }[1], [x11]
+; CHECK-NEXT: ld1 { v4.b }[5], [x10]
+; CHECK-NEXT: add x10, sp, #688
+; CHECK-NEXT: add x9, sp, #520
+; CHECK-NEXT: ld1 { v17.b }[2], [x10]
+; CHECK-NEXT: add x10, sp, #560
+; CHECK-NEXT: smull2 v7.4s, v21.8h, v22.8h
+; CHECK-NEXT: ld1 { v18.b }[5], [x9]
+; CHECK-NEXT: smlal v5.4s, v21.4h, v22.4h
+; CHECK-NEXT: ld1 { v20.b }[2], [x10]
+; CHECK-NEXT: ldr b21, [sp, #736]
+; CHECK-NEXT: ldr b22, [sp, #1000]
+; CHECK-NEXT: add x8, sp, #656
; CHECK-NEXT: add x9, sp, #696
-; CHECK-NEXT: sshll v25.8h, v23.8b, #0
-; CHECK-NEXT: add x8, sp, #536
-; CHECK-NEXT: ldr b22, [sp, #872]
-; CHECK-NEXT: ldr b23, [sp, #936]
-; CHECK-NEXT: ld1 { v4.b }[7], [x8]
-; CHECK-NEXT: add x8, sp, #584
-; CHECK-NEXT: ld1 { v17.b }[7], [x10]
-; CHECK-NEXT: ld1 { v21.b }[3], [x9]
-; CHECK-NEXT: ld1 { v7.b }[5], [x8]
-; CHECK-NEXT: add x8, sp, #880
-; CHECK-NEXT: add x9, sp, #704
-; CHECK-NEXT: smull v25.4s, v24.4h, v25.4h
-; CHECK-NEXT: ldr b24, [sp, #744]
-; CHECK-NEXT: ld1 { v22.b }[1], [x8]
-; CHECK-NEXT: add x8, sp, #944
-; CHECK-NEXT: add x10, sp, #888
-; CHECK-NEXT: ld1 { v21.b }[4], [x9]
-; CHECK-NEXT: add x9, sp, #752
-; CHECK-NEXT: ld1 { v23.b }[1], [x8]
-; CHECK-NEXT: ld1 { v24.b }[1], [x9]
-; CHECK-NEXT: add x8, sp, #712
+; CHECK-NEXT: add x11, sp, #568
+; CHECK-NEXT: ld1 { v4.b }[6], [x8]
+; CHECK-NEXT: add x8, sp, #528
+; CHECK-NEXT: ld1 { v17.b }[3], [x9]
+; CHECK-NEXT: sshll v21.8h, v21.8b, #0
+; CHECK-NEXT: sshll v24.8h, v22.8b, #0
+; CHECK-NEXT: ld1 { v18.b }[6], [x8]
+; CHECK-NEXT: ld1 { v20.b }[3], [x11]
+; CHECK-NEXT: add x10, sp, #704
+; CHECK-NEXT: ldr b23, [sp, #808]
+; CHECK-NEXT: movi v19.2d, #0000000000000000
+; CHECK-NEXT: add x9, sp, #536
+; CHECK-NEXT: ld1 { v17.b }[4], [x10]
+; CHECK-NEXT: add x10, sp, #576
+; CHECK-NEXT: ldr b22, [sp, #744]
+; CHECK-NEXT: add x11, sp, #816
+; CHECK-NEXT: smull v24.4s, v21.4h, v24.4h
+; CHECK-NEXT: ld1 { v18.b }[7], [x9]
+; CHECK-NEXT: ld1 { v20.b }[4], [x10]
+; CHECK-NEXT: add x10, sp, #752
+; CHECK-NEXT: ld1 { v23.b }[1], [x11]
+; CHECK-NEXT: add x9, sp, #712
+; CHECK-NEXT: ld1 { v22.b }[1], [x10]
+; CHECK-NEXT: ld1 { v17.b }[5], [x9]
+; CHECK-NEXT: add x9, sp, #584
+; CHECK-NEXT: add x10, sp, #824
+; CHECK-NEXT: sshll v21.8h, v18.8b, #0
+; CHECK-NEXT: ld1 { v20.b }[5], [x9]
; CHECK-NEXT: add x9, sp, #760
-; CHECK-NEXT: ld1 { v22.b }[2], [x10]
-; CHECK-NEXT: add x10, sp, #952
-; CHECK-NEXT: mov v19.s[0], v25.s[0]
-; CHECK-NEXT: ldr b25, [sp, #808]
+; CHECK-NEXT: ldr b18, [sp, #936]
; CHECK-NEXT: ld1 { v23.b }[2], [x10]
-; CHECK-NEXT: ld1 { v21.b }[5], [x8]
-; CHECK-NEXT: ld1 { v24.b }[2], [x9]
-; CHECK-NEXT: add x8, sp, #816
-; CHECK-NEXT: add x9, sp, #896
-; CHECK-NEXT: ld1 { v25.b }[1], [x8]
-; CHECK-NEXT: add x8, sp, #960
-; CHECK-NEXT: ld1 { v22.b }[3], [x9]
-; CHECK-NEXT: add x9, sp, #768
-; CHECK-NEXT: ld1 { v23.b }[3], [x8]
-; CHECK-NEXT: add x10, sp, #904
-; CHECK-NEXT: ld1 { v24.b }[3], [x9]
-; CHECK-NEXT: add x9, sp, #824
-; CHECK-NEXT: add x8, sp, #720
-; CHECK-NEXT: ld1 { v25.b }[2], [x9]
-; CHECK-NEXT: add x9, sp, #968
-; CHECK-NEXT: ld1 { v22.b }[4], [x10]
-; CHECK-NEXT: add x10, sp, #776
-; CHECK-NEXT: ld1 { v23.b }[4], [x9]
-; CHECK-NEXT: ld1 { v21.b }[6], [x8]
-; CHECK-NEXT: ld1 { v24.b }[4], [x10]
-; CHECK-NEXT: add x8, sp, #832
-; CHECK-NEXT: add x9, sp, #912
-; CHECK-NEXT: ld1 { v25.b }[3], [x8]
-; CHECK-NEXT: add x8, sp, #976
-; CHECK-NEXT: ld1 { v22.b }[5], [x9]
-; CHECK-NEXT: add x9, sp, #784
-; CHECK-NEXT: ld1 { v23.b }[5], [x8]
-; CHECK-NEXT: add x10, sp, #920
-; CHECK-NEXT: ld1 { v24.b }[5], [x9]
-; CHECK-NEXT: add x9, sp, #840
-; CHECK-NEXT: add x8, sp, #728
-; CHECK-NEXT: ld1 { v25.b }[4], [x9]
-; CHECK-NEXT: add x9, sp, #984
-; CHECK-NEXT: ld1 { v22.b }[6], [x10]
-; CHECK-NEXT: add x10, sp, #792
-; CHECK-NEXT: ld1 { v23.b }[6], [x9]
-; CHECK-NEXT: ld1 { v21.b }[7], [x8]
-; CHECK-NEXT: ld1 { v24.b }[6], [x10]
-; CHECK-NEXT: add x8, sp, #848
-; CHECK-NEXT: add x9, sp, #928
-; CHECK-NEXT: ld1 { v25.b }[5], [x8]
-; CHECK-NEXT: add x12, sp, #72
-; CHECK-NEXT: add x8, sp, #992
-; CHECK-NEXT: ld1 { v22.b }[7], [x9]
-; CHECK-NEXT: add x9, sp, #800
-; CHECK-NEXT: ld1 { v3.b }[7], [x12]
-; CHECK-NEXT: ld1 { v23.b }[7], [x8]
-; CHECK-NEXT: add x8, sp, #592
-; CHECK-NEXT: ld1 { v24.b }[7], [x9]
-; CHECK-NEXT: add x9, sp, #856
-; CHECK-NEXT: ld1 { v7.b }[6], [x8]
-; CHECK-NEXT: add x11, sp, #200
-; CHECK-NEXT: ld1 { v25.b }[6], [x9]
-; CHECK-NEXT: sshll v3.8h, v3.8b, #0
-; CHECK-NEXT: sshll v5.8h, v5.8b, #0
-; CHECK-NEXT: sshll v4.8h, v4.8b, #0
-; CHECK-NEXT: sshll v21.8h, v21.8b, #0
+; CHECK-NEXT: mov v19.s[0], v24.s[0]
+; CHECK-NEXT: ldr b24, [sp, #872]
+; CHECK-NEXT: ld1 { v22.b }[2], [x9]
+; CHECK-NEXT: add x9, sp, #944
+; CHECK-NEXT: add x11, sp, #880
+; CHECK-NEXT: add x10, sp, #768
+; CHECK-NEXT: ld1 { v18.b }[1], [x9]
+; CHECK-NEXT: add x9, sp, #832
+; CHECK-NEXT: ld1 { v24.b }[1], [x11]
+; CHECK-NEXT: ld1 { v23.b }[3], [x9]
+; CHECK-NEXT: ld1 { v22.b }[3], [x10]
+; CHECK-NEXT: add x10, sp, #952
+; CHECK-NEXT: add x12, sp, #888
+; CHECK-NEXT: add x9, sp, #592
+; CHECK-NEXT: add x11, sp, #776
+; CHECK-NEXT: ld1 { v18.b }[2], [x10]
+; CHECK-NEXT: add x10, sp, #840
+; CHECK-NEXT: ld1 { v24.b }[2], [x12]
+; CHECK-NEXT: ld1 { v23.b }[4], [x10]
+; CHECK-NEXT: ld1 { v22.b }[4], [x11]
+; CHECK-NEXT: ld1 { v20.b }[6], [x9]
+; CHECK-NEXT: add x9, sp, #960
+; CHECK-NEXT: add x11, sp, #896
+; CHECK-NEXT: add x10, sp, #784
+; CHECK-NEXT: ld1 { v18.b }[3], [x9]
+; CHECK-NEXT: add x9, sp, #848
+; CHECK-NEXT: ld1 { v24.b }[3], [x11]
+; CHECK-NEXT: ld1 { v23.b }[5], [x9]
+; CHECK-NEXT: ld1 { v22.b }[5], [x10]
+; CHECK-NEXT: add x10, sp, #968
+; CHECK-NEXT: add x12, sp, #904
+; CHECK-NEXT: add x9, sp, #600
+; CHECK-NEXT: add x11, sp, #792
+; CHECK-NEXT: ld1 { v18.b }[4], [x10]
+; CHECK-NEXT: add x10, sp, #856
+; CHECK-NEXT: ld1 { v24.b }[4], [x12]
+; CHECK-NEXT: ld1 { v23.b }[6], [x10]
+; CHECK-NEXT: ld1 { v22.b }[6], [x11]
+; CHECK-NEXT: ld1 { v20.b }[7], [x9]
+; CHECK-NEXT: add x9, sp, #976
+; CHECK-NEXT: add x11, sp, #912
+; CHECK-NEXT: add x10, sp, #800
+; CHECK-NEXT: ld1 { v18.b }[5], [x9]
+; CHECK-NEXT: add x9, sp, #864
+; CHECK-NEXT: ld1 { v24.b }[5], [x11]
+; CHECK-NEXT: ld1 { v23.b }[7], [x9]
+; CHECK-NEXT: add x9, sp, #720
+; CHECK-NEXT: ld1 { v22.b }[7], [x10]
+; CHECK-NEXT: add x10, sp, #984
+; CHECK-NEXT: ld1 { v17.b }[6], [x9]
+; CHECK-NEXT: add x9, sp, #920
+; CHECK-NEXT: ld1 { v18.b }[6], [x10]
+; CHECK-NEXT: ld1 { v24.b }[6], [x9]
+; CHECK-NEXT: add x10, sp, #728
+; CHECK-NEXT: add x8, sp, #664
+; CHECK-NEXT: sshll v20.8h, v20.8b, #0
; CHECK-NEXT: sshll v22.8h, v22.8b, #0
; CHECK-NEXT: sshll v23.8h, v23.8b, #0
-; CHECK-NEXT: add x8, sp, #600
-; CHECK-NEXT: sshll v24.8h, v24.8b, #0
-; CHECK-NEXT: add x9, sp, #864
-; CHECK-NEXT: ld1 { v2.b }[7], [x11]
-; CHECK-NEXT: ld1 { v7.b }[7], [x8]
-; CHECK-NEXT: ld1 { v25.b }[7], [x9]
-; CHECK-NEXT: smull v16.4s, v3.4h, v5.4h
-; CHECK-NEXT: smull2 v3.4s, v3.8h, v5.8h
-; CHECK-NEXT: smull v5.4s, v21.4h, v23.4h
-; CHECK-NEXT: smull2 v21.4s, v21.8h, v23.8h
-; CHECK-NEXT: smull2 v23.4s, v20.8h, v22.8h
-; CHECK-NEXT: smlal v19.4s, v4.4h, v24.4h
-; CHECK-NEXT: sshll v2.8h, v2.8b, #0
-; CHECK-NEXT: sshll v17.8h, v17.8b, #0
+; CHECK-NEXT: add x9, sp, #992
+; CHECK-NEXT: ld1 { v17.b }[7], [x10]
+; CHECK-NEXT: add x10, sp, #928
+; CHECK-NEXT: ld1 { v18.b }[7], [x9]
+; CHECK-NEXT: ld1 { v4.b }[7], [x8]
+; CHECK-NEXT: ld1 { v24.b }[7], [x10]
+; CHECK-NEXT: smlal v19.4s, v21.4h, v22.4h
+; CHECK-NEXT: smull2 v21.4s, v21.8h, v22.8h
+; CHECK-NEXT: smull v22.4s, v20.4h, v23.4h
+; CHECK-NEXT: smull2 v20.4s, v20.8h, v23.8h
; CHECK-NEXT: sshll v0.8h, v0.8b, #0
; CHECK-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-NEXT: sshll v7.8h, v7.8b, #0
-; CHECK-NEXT: sshll v25.8h, v25.8b, #0
-; CHECK-NEXT: smlal2 v3.4s, v2.8h, v17.8h
-; CHECK-NEXT: smlal v16.4s, v2.4h, v17.4h
-; CHECK-NEXT: smlal2 v23.4s, v4.8h, v24.8h
-; CHECK-NEXT: smlal2 v18.4s, v0.8h, v1.8h
-; CHECK-NEXT: smlal v6.4s, v0.4h, v1.4h
-; CHECK-NEXT: smlal v19.4s, v20.4h, v22.4h
-; CHECK-NEXT: smlal2 v21.4s, v7.8h, v25.8h
-; CHECK-NEXT: smlal v5.4s, v7.4h, v25.4h
-; CHECK-NEXT: add v0.4s, v18.4s, v3.4s
-; CHECK-NEXT: add v1.4s, v6.4s, v16.4s
-; CHECK-NEXT: add v2.4s, v23.4s, v21.4s
-; CHECK-NEXT: add v3.4s, v19.4s, v5.4s
+; CHECK-NEXT: sshll v3.8h, v3.8b, #0
+; CHECK-NEXT: sshll v2.8h, v2.8b, #0
+; CHECK-NEXT: sshll v17.8h, v17.8b, #0
+; CHECK-NEXT: sshll v18.8h, v18.8b, #0
+; CHECK-NEXT: sshll v4.8h, v4.8b, #0
+; CHECK-NEXT: sshll v23.8h, v24.8b, #0
+; CHECK-NEXT: smlal2 v16.4s, v1.8h, v3.8h
+; CHECK-NEXT: smlal v6.4s, v1.4h, v3.4h
+; CHECK-NEXT: smlal2 v7.4s, v0.8h, v2.8h
+; CHECK-NEXT: smlal v5.4s, v0.4h, v2.4h
+; CHECK-NEXT: smlal2 v20.4s, v17.8h, v18.8h
+; CHECK-NEXT: smlal v22.4s, v17.4h, v18.4h
+; CHECK-NEXT: smlal2 v21.4s, v4.8h, v23.8h
+; CHECK-NEXT: smlal v19.4s, v4.4h, v23.4h
+; CHECK-NEXT: add v0.4s, v7.4s, v16.4s
+; CHECK-NEXT: add v1.4s, v5.4s, v6.4s
+; CHECK-NEXT: add v2.4s, v21.4s, v20.4s
+; CHECK-NEXT: add v3.4s, v19.4s, v22.4s
; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-NEXT: add v1.4s, v3.4s, v2.4s
; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
@@ -2050,10 +2050,10 @@ define i32 @test_sdot_v33i8_double_nomla(<33 x i8> %a, <33 x i8> %b, <33 x i8> %
; CHECK-NEXT: ld1 { v3.b }[2], [x10]
; CHECK-NEXT: ld1 { v5.b }[2], [x8]
; CHECK-NEXT: add x8, sp, #176
-; CHECK-NEXT: ldr b6, [sp, #544]
+; CHECK-NEXT: ldr b6, [sp, #672]
; CHECK-NEXT: ld1 { v0.b }[4], [x12]
-; CHECK-NEXT: add x14, sp, #552
-; CHECK-NEXT: ldr b7, [sp, #672]
+; CHECK-NEXT: add x14, sp, #680
+; CHECK-NEXT: ldr b7, [sp, #544]
; CHECK-NEXT: ld1 { v2.b }[4], [x8]
; CHECK-NEXT: add x13, sp, #40
; CHECK-NEXT: ld1 { v6.b }[1], [x14]
@@ -2061,7 +2061,7 @@ define i32 @test_sdot_v33i8_double_nomla(<33 x i8> %a, <33 x i8> %b, <33 x i8> %
; CHECK-NEXT: add x11, sp, #128
; CHECK-NEXT: ld1 { v3.b }[3], [x13]
; CHECK-NEXT: ld1 { v0.b }[5], [x9]
-; CHECK-NEXT: add x9, sp, #680
+; CHECK-NEXT: add x9, sp, #552
; CHECK-NEXT: add x13, sp, #184
; CHECK-NEXT: ld1 { v7.b }[1], [x9]
; CHECK-NEXT: ld1 { v2.b }[5], [x13]
@@ -2070,26 +2070,26 @@ define i32 @test_sdot_v33i8_double_nomla(<33 x i8> %a, <33 x i8> %b, <33 x i8> %
; CHECK-NEXT: ld1 { v4.b }[2], [x13]
; CHECK-NEXT: add x10, sp, #136
; CHECK-NEXT: ld1 { v0.b }[6], [x11]
-; CHECK-NEXT: add x11, sp, #560
+; CHECK-NEXT: add x11, sp, #688
; CHECK-NEXT: ld1 { v5.b }[3], [x15]
; CHECK-NEXT: ld1 { v6.b }[2], [x11]
-; CHECK-NEXT: add x11, sp, #688
+; CHECK-NEXT: add x11, sp, #560
; CHECK-NEXT: mov v1.b[3], w3
; CHECK-NEXT: ld1 { v7.b }[2], [x11]
; CHECK-NEXT: add x9, sp, #632
; CHECK-NEXT: add x11, sp, #512
; CHECK-NEXT: ld1 { v0.b }[7], [x10]
; CHECK-NEXT: ld1 { v4.b }[3], [x9]
-; CHECK-NEXT: add x9, sp, #568
-; CHECK-NEXT: add x10, sp, #696
+; CHECK-NEXT: add x9, sp, #696
+; CHECK-NEXT: add x10, sp, #568
; CHECK-NEXT: ld1 { v6.b }[3], [x9]
; CHECK-NEXT: ld1 { v5.b }[4], [x11]
; CHECK-NEXT: ld1 { v7.b }[3], [x10]
; CHECK-NEXT: add x9, sp, #640
; CHECK-NEXT: mov v1.b[4], w4
; CHECK-NEXT: ld1 { v4.b }[4], [x9]
-; CHECK-NEXT: add x9, sp, #576
-; CHECK-NEXT: add x10, sp, #704
+; CHECK-NEXT: add x9, sp, #704
+; CHECK-NEXT: add x10, sp, #576
; CHECK-NEXT: add x11, sp, #520
; CHECK-NEXT: ld1 { v6.b }[4], [x9]
; CHECK-NEXT: ldr b18, [sp, #736]
@@ -2101,8 +2101,8 @@ define i32 @test_sdot_v33i8_double_nomla(<33 x i8> %a, <33 x i8> %b, <33 x i8> %
; CHECK-NEXT: add x9, sp, #648
; CHECK-NEXT: ld1 { v3.b }[4], [x8]
; CHECK-NEXT: add x10, sp, #528
-; CHECK-NEXT: add x11, sp, #584
-; CHECK-NEXT: add x12, sp, #712
+; CHECK-NEXT: add x11, sp, #712
+; CHECK-NEXT: add x12, sp, #584
; CHECK-NEXT: sshll v18.8h, v18.8b, #0
; CHECK-NEXT: mov v1.b[5], w5
; CHECK-NEXT: ld1 { v6.b }[5], [x11]
@@ -2114,8 +2114,8 @@ define i32 @test_sdot_v33i8_double_nomla(<33 x i8> %a, <33 x i8> %b, <33 x i8> %
; CHECK-NEXT: ld1 { v3.b }[5], [x14]
; CHECK-NEXT: add x9, sp, #656
; CHECK-NEXT: add x10, sp, #536
-; CHECK-NEXT: add x11, sp, #592
-; CHECK-NEXT: add x12, sp, #720
+; CHECK-NEXT: add x11, sp, #720
+; CHECK-NEXT: add x12, sp, #592
; CHECK-NEXT: sshll v18.4s, v18.4h, #0
; CHECK-NEXT: ldr b16, [sp, #208]
; CHECK-NEXT: ld1 { v6.b }[6], [x11]
@@ -2127,8 +2127,8 @@ define i32 @test_sdot_v33i8_double_nomla(<33 x i8> %a, <33 x i8> %b, <33 x i8> %
; CHECK-NEXT: sshll v16.8h, v16.8b, #0
; CHECK-NEXT: ld1 { v3.b }[6], [x8]
; CHECK-NEXT: add x8, sp, #664
-; CHECK-NEXT: add x9, sp, #600
-; CHECK-NEXT: add x10, sp, #728
+; CHECK-NEXT: add x9, sp, #728
+; CHECK-NEXT: add x10, sp, #600
; CHECK-NEXT: mov v17.s[0], v18.s[0]
; CHECK-NEXT: ld1 { v6.b }[7], [x9]
; CHECK-NEXT: ld1 { v7.b }[7], [x10]
@@ -2151,7 +2151,7 @@ define i32 @test_sdot_v33i8_double_nomla(<33 x i8> %a, <33 x i8> %b, <33 x i8> %
; CHECK-NEXT: sshll v2.8h, v2.8b, #0
; CHECK-NEXT: sshll v3.8h, v3.8b, #0
; CHECK-NEXT: saddl2 v16.4s, v7.8h, v6.8h
-; CHECK-NEXT: saddl2 v5.4s, v4.8h, v5.8h
+; CHECK-NEXT: saddl2 v5.4s, v5.8h, v4.8h
; CHECK-NEXT: saddl v6.4s, v7.4h, v6.4h
; CHECK-NEXT: saddw v4.4s, v17.4s, v4.4h
; CHECK-NEXT: saddl2 v17.4s, v1.8h, v0.8h
diff --git a/llvm/test/CodeGen/AArch64/pr58431.ll b/llvm/test/CodeGen/AArch64/pr58431.ll
index dcd97597ae40..e87d8f7874d6 100644
--- a/llvm/test/CodeGen/AArch64/pr58431.ll
+++ b/llvm/test/CodeGen/AArch64/pr58431.ll
@@ -4,8 +4,8 @@
define i32 @f(i64 %0) {
; CHECK-LABEL: f:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #10
-; CHECK-NEXT: mov w9, w0
+; CHECK-NEXT: mov w8, #10 // =0xa
+; CHECK-NEXT: and x9, x0, #0xffffffff
; CHECK-NEXT: udiv x10, x9, x8
; CHECK-NEXT: msub x0, x10, x8, x9
; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
diff --git a/llvm/test/CodeGen/AArch64/selectopt-not.ll b/llvm/test/CodeGen/AArch64/selectopt-not.ll
index 7a949d11c80d..a7939d651a2c 100644
--- a/llvm/test/CodeGen/AArch64/selectopt-not.ll
+++ b/llvm/test/CodeGen/AArch64/selectopt-not.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -select-optimize -mtriple=aarch64-linux-gnu -mcpu=neoverse-v2 -S < %s | FileCheck %s
+; RUN: opt -select-optimize -mtriple=aarch64-linux-gnu -mcpu=neoverse-v2 -S < %s | FileCheck %s --check-prefixes=CHECK,CHECK-STANDARD
+; RUN: opt -select-optimize -mtriple=aarch64-linux-gnu -mcpu=neoverse-v2 -S -disable-loop-level-heuristics < %s | FileCheck %s --check-prefixes=CHECK,CHECK-FORCED
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
target triple = "aarch64"
@@ -29,10 +30,10 @@ define i32 @minloc1(ptr nocapture readonly %0, ptr nocapture readonly %1, ptr no
; CHECK-NEXT: [[TMP21:%.*]] = sub i64 0, [[TMP7]]
; CHECK-NEXT: br label [[DOTPREHEADER35:%.*]]
; CHECK: .preheader35:
-; CHECK-NEXT: [[TMP22:%.*]] = phi i32 [ 2147483647, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP30:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[TMP23:%.*]] = phi i64 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[IV_N:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[DOT045:%.*]] = phi i1 [ false, [[DOTPREHEADER35_LR_PH]] ], [ [[DOT2:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[DOTLCSSA364144:%.*]] = phi i32 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP29:%.*]], [[DOTPREHEADER35]] ]
+; CHECK-NEXT: [[TMP22:%.*]] = phi i32 [ 2147483647, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP30:%.*]], [[SELECT_END:%.*]] ]
+; CHECK-NEXT: [[TMP23:%.*]] = phi i64 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[IV_N:%.*]], [[SELECT_END]] ]
+; CHECK-NEXT: [[DOT045:%.*]] = phi i1 [ false, [[DOTPREHEADER35_LR_PH]] ], [ [[DOT2:%.*]], [[SELECT_END]] ]
+; CHECK-NEXT: [[DOTLCSSA364144:%.*]] = phi i32 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP29:%.*]], [[SELECT_END]] ]
; CHECK-NEXT: [[TMP24:%.*]] = mul nsw i64 [[TMP23]], [[TMP11]]
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP19]], i64 [[TMP24]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
@@ -40,15 +41,20 @@ define i32 @minloc1(ptr nocapture readonly %0, ptr nocapture readonly %1, ptr no
; CHECK-NEXT: [[TMP28:%.*]] = icmp sge i32 [[TMP26]], [[TMP22]]
; CHECK-NEXT: [[DOTNOT33:%.*]] = and i1 [[DOT045]], [[TMP28]]
; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[TMP27]], i1 true, i1 [[DOTNOT33]]
-; CHECK-NEXT: [[TMP29]] = select i1 [[OR_COND]], i32 [[DOTLCSSA364144]], i32 1
+; CHECK-NEXT: [[OR_COND_FROZEN:%.*]] = freeze i1 [[OR_COND]]
+; CHECK-NEXT: br i1 [[OR_COND_FROZEN]], label [[SELECT_END]], label [[SELECT_FALSE:%.*]]
+; CHECK: select.false:
+; CHECK-NEXT: br label [[SELECT_END]]
+; CHECK: select.end:
+; CHECK-NEXT: [[TMP29]] = phi i32 [ [[DOTLCSSA364144]], [[DOTPREHEADER35]] ], [ 1, [[SELECT_FALSE]] ]
+; CHECK-NEXT: [[DOT2]] = phi i1 [ [[DOT045]], [[DOTPREHEADER35]] ], [ true, [[SELECT_FALSE]] ]
+; CHECK-NEXT: [[TMP30]] = phi i32 [ [[TMP22]], [[DOTPREHEADER35]] ], [ [[TMP20]], [[SELECT_FALSE]] ]
; CHECK-NEXT: [[NOT_OR_COND:%.*]] = xor i1 [[OR_COND]], true
-; CHECK-NEXT: [[DOT2]] = select i1 [[NOT_OR_COND]], i1 true, i1 [[DOT045]]
-; CHECK-NEXT: [[TMP30]] = select i1 [[OR_COND]], i32 [[TMP22]], i32 [[TMP20]]
; CHECK-NEXT: [[IV_N]] = add nuw nsw i64 [[TMP23]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_N]], [[TMP9]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[DOTPREHEADER]], label [[DOTPREHEADER35]]
; CHECK: .preheader:
-; CHECK-NEXT: [[DOTLCSSA3641_LCSSA:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP29]], [[DOTPREHEADER35]] ]
+; CHECK-NEXT: [[DOTLCSSA3641_LCSSA:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP29]], [[SELECT_END]] ]
; CHECK-NEXT: ret i32 [[DOTLCSSA3641_LCSSA]]
;
%4 = getelementptr i8, ptr %0, i64 40
@@ -101,53 +107,106 @@ define i32 @minloc1(ptr nocapture readonly %0, ptr nocapture readonly %1, ptr no
}
define i32 @minloc1_otherunusednot(ptr nocapture readonly %0, ptr nocapture readonly %1, ptr nocapture readonly %2) {
-; CHECK-LABEL: @minloc1_otherunusednot(
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], i64 40
-; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP0]], i64 64
-; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP6]], align 8
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP0]], i64 80
-; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP0]], i64 88
-; CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8
-; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP0]], align 8
-; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP1:%.*]], align 4
-; CHECK-NEXT: [[TMP14:%.*]] = sext i32 [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = add nsw i64 [[TMP14]], -1
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], [[TMP5]]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP12]], i64 [[TMP16]]
-; CHECK-NEXT: [[TMP18:%.*]] = shl i64 [[TMP7]], 3
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP2:%.*]], align 4
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp slt i64 [[TMP9]], 1
-; CHECK-NEXT: br i1 [[DOTNOT]], label [[DOTPREHEADER:%.*]], label [[DOTPREHEADER35_LR_PH:%.*]]
-; CHECK: .preheader35.lr.ph:
-; CHECK-NEXT: [[TMP21:%.*]] = sub i64 0, [[TMP7]]
-; CHECK-NEXT: br label [[DOTPREHEADER35:%.*]]
-; CHECK: .preheader35:
-; CHECK-NEXT: [[TMP22:%.*]] = phi i32 [ 2147483647, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP30:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[TMP23:%.*]] = phi i64 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[IV_N:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[DOT045:%.*]] = phi i1 [ false, [[DOTPREHEADER35_LR_PH]] ], [ [[DOT2:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[DOTLCSSA364144:%.*]] = phi i32 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP29:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[TMP24:%.*]] = mul nsw i64 [[TMP23]], [[TMP11]]
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP19]], i64 [[TMP24]]
-; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
-; CHECK-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], [[TMP20]]
-; CHECK-NEXT: [[TMP28:%.*]] = icmp sge i32 [[TMP26]], [[TMP22]]
-; CHECK-NEXT: [[DOTNOT33:%.*]] = and i1 [[DOT045]], [[TMP28]]
-; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[TMP27]], i1 true, i1 [[DOTNOT33]]
-; CHECK-NEXT: [[TMP29]] = select i1 [[OR_COND]], i32 [[DOTLCSSA364144]], i32 1
-; CHECK-NEXT: [[DOT2]] = select i1 [[OR_COND]], i1 [[DOT045]], i1 true
-; CHECK-NEXT: [[NOT_OR_COND:%.*]] = xor i1 [[OR_COND]], true
-; CHECK-NEXT: [[TMP30]] = select i1 [[OR_COND]], i32 [[TMP22]], i32 [[TMP20]]
-; CHECK-NEXT: [[IV_N]] = add nuw nsw i64 [[TMP23]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_N]], [[TMP9]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[DOTPREHEADER]], label [[DOTPREHEADER35]]
-; CHECK: .preheader:
-; CHECK-NEXT: [[DOTLCSSA3641_LCSSA:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP29]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[P:%.*]] = phi i1 [ false, [[TMP3]] ], [ [[NOT_OR_COND]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[Q:%.*]] = select i1 [[P]], i32 [[DOTLCSSA3641_LCSSA]], i32 1
-; CHECK-NEXT: ret i32 [[Q]]
+; CHECK-STANDARD-LABEL: @minloc1_otherunusednot(
+; CHECK-STANDARD-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], i64 40
+; CHECK-STANDARD-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8
+; CHECK-STANDARD-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP0]], i64 64
+; CHECK-STANDARD-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP6]], align 8
+; CHECK-STANDARD-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP0]], i64 80
+; CHECK-STANDARD-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8
+; CHECK-STANDARD-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP0]], i64 88
+; CHECK-STANDARD-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8
+; CHECK-STANDARD-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP0]], align 8
+; CHECK-STANDARD-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP1:%.*]], align 4
+; CHECK-STANDARD-NEXT: [[TMP14:%.*]] = sext i32 [[TMP13]] to i64
+; CHECK-STANDARD-NEXT: [[TMP15:%.*]] = add nsw i64 [[TMP14]], -1
+; CHECK-STANDARD-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], [[TMP5]]
+; CHECK-STANDARD-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP12]], i64 [[TMP16]]
+; CHECK-STANDARD-NEXT: [[TMP18:%.*]] = shl i64 [[TMP7]], 3
+; CHECK-STANDARD-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP18]]
+; CHECK-STANDARD-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP2:%.*]], align 4
+; CHECK-STANDARD-NEXT: [[DOTNOT:%.*]] = icmp slt i64 [[TMP9]], 1
+; CHECK-STANDARD-NEXT: br i1 [[DOTNOT]], label [[DOTPREHEADER:%.*]], label [[DOTPREHEADER35_LR_PH:%.*]]
+; CHECK-STANDARD: .preheader35.lr.ph:
+; CHECK-STANDARD-NEXT: [[TMP21:%.*]] = sub i64 0, [[TMP7]]
+; CHECK-STANDARD-NEXT: br label [[DOTPREHEADER35:%.*]]
+; CHECK-STANDARD: .preheader35:
+; CHECK-STANDARD-NEXT: [[TMP22:%.*]] = phi i32 [ 2147483647, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP30:%.*]], [[DOTPREHEADER35]] ]
+; CHECK-STANDARD-NEXT: [[TMP23:%.*]] = phi i64 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[IV_N:%.*]], [[DOTPREHEADER35]] ]
+; CHECK-STANDARD-NEXT: [[DOT045:%.*]] = phi i1 [ false, [[DOTPREHEADER35_LR_PH]] ], [ [[DOT2:%.*]], [[DOTPREHEADER35]] ]
+; CHECK-STANDARD-NEXT: [[DOTLCSSA364144:%.*]] = phi i32 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP29:%.*]], [[DOTPREHEADER35]] ]
+; CHECK-STANDARD-NEXT: [[TMP24:%.*]] = mul nsw i64 [[TMP23]], [[TMP11]]
+; CHECK-STANDARD-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP19]], i64 [[TMP24]]
+; CHECK-STANDARD-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
+; CHECK-STANDARD-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], [[TMP20]]
+; CHECK-STANDARD-NEXT: [[TMP28:%.*]] = icmp sge i32 [[TMP26]], [[TMP22]]
+; CHECK-STANDARD-NEXT: [[DOTNOT33:%.*]] = and i1 [[DOT045]], [[TMP28]]
+; CHECK-STANDARD-NEXT: [[OR_COND:%.*]] = select i1 [[TMP27]], i1 true, i1 [[DOTNOT33]]
+; CHECK-STANDARD-NEXT: [[TMP29]] = select i1 [[OR_COND]], i32 [[DOTLCSSA364144]], i32 1
+; CHECK-STANDARD-NEXT: [[DOT2]] = select i1 [[OR_COND]], i1 [[DOT045]], i1 true
+; CHECK-STANDARD-NEXT: [[NOT_OR_COND:%.*]] = xor i1 [[OR_COND]], true
+; CHECK-STANDARD-NEXT: [[TMP30]] = select i1 [[OR_COND]], i32 [[TMP22]], i32 [[TMP20]]
+; CHECK-STANDARD-NEXT: [[IV_N]] = add nuw nsw i64 [[TMP23]], 1
+; CHECK-STANDARD-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_N]], [[TMP9]]
+; CHECK-STANDARD-NEXT: br i1 [[EXITCOND_NOT]], label [[DOTPREHEADER]], label [[DOTPREHEADER35]]
+; CHECK-STANDARD: .preheader:
+; CHECK-STANDARD-NEXT: [[DOTLCSSA3641_LCSSA:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP29]], [[DOTPREHEADER35]] ]
+; CHECK-STANDARD-NEXT: [[P:%.*]] = phi i1 [ false, [[TMP3]] ], [ [[NOT_OR_COND]], [[DOTPREHEADER35]] ]
+; CHECK-STANDARD-NEXT: [[Q:%.*]] = select i1 [[P]], i32 [[DOTLCSSA3641_LCSSA]], i32 1
+; CHECK-STANDARD-NEXT: ret i32 [[Q]]
+;
+; CHECK-FORCED-LABEL: @minloc1_otherunusednot(
+; CHECK-FORCED-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], i64 40
+; CHECK-FORCED-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8
+; CHECK-FORCED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP0]], i64 64
+; CHECK-FORCED-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP6]], align 8
+; CHECK-FORCED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP0]], i64 80
+; CHECK-FORCED-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8
+; CHECK-FORCED-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP0]], i64 88
+; CHECK-FORCED-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8
+; CHECK-FORCED-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP0]], align 8
+; CHECK-FORCED-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP1:%.*]], align 4
+; CHECK-FORCED-NEXT: [[TMP14:%.*]] = sext i32 [[TMP13]] to i64
+; CHECK-FORCED-NEXT: [[TMP15:%.*]] = add nsw i64 [[TMP14]], -1
+; CHECK-FORCED-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], [[TMP5]]
+; CHECK-FORCED-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP12]], i64 [[TMP16]]
+; CHECK-FORCED-NEXT: [[TMP18:%.*]] = shl i64 [[TMP7]], 3
+; CHECK-FORCED-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP18]]
+; CHECK-FORCED-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP2:%.*]], align 4
+; CHECK-FORCED-NEXT: [[DOTNOT:%.*]] = icmp slt i64 [[TMP9]], 1
+; CHECK-FORCED-NEXT: br i1 [[DOTNOT]], label [[DOTPREHEADER:%.*]], label [[DOTPREHEADER35_LR_PH:%.*]]
+; CHECK-FORCED: .preheader35.lr.ph:
+; CHECK-FORCED-NEXT: [[TMP21:%.*]] = sub i64 0, [[TMP7]]
+; CHECK-FORCED-NEXT: br label [[DOTPREHEADER35:%.*]]
+; CHECK-FORCED: .preheader35:
+; CHECK-FORCED-NEXT: [[TMP22:%.*]] = phi i32 [ 2147483647, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP30:%.*]], [[SELECT_END:%.*]] ]
+; CHECK-FORCED-NEXT: [[TMP23:%.*]] = phi i64 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[IV_N:%.*]], [[SELECT_END]] ]
+; CHECK-FORCED-NEXT: [[DOT045:%.*]] = phi i1 [ false, [[DOTPREHEADER35_LR_PH]] ], [ [[DOT2:%.*]], [[SELECT_END]] ]
+; CHECK-FORCED-NEXT: [[DOTLCSSA364144:%.*]] = phi i32 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP29:%.*]], [[SELECT_END]] ]
+; CHECK-FORCED-NEXT: [[TMP24:%.*]] = mul nsw i64 [[TMP23]], [[TMP11]]
+; CHECK-FORCED-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP19]], i64 [[TMP24]]
+; CHECK-FORCED-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
+; CHECK-FORCED-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], [[TMP20]]
+; CHECK-FORCED-NEXT: [[TMP28:%.*]] = icmp sge i32 [[TMP26]], [[TMP22]]
+; CHECK-FORCED-NEXT: [[DOTNOT33:%.*]] = and i1 [[DOT045]], [[TMP28]]
+; CHECK-FORCED-NEXT: [[OR_COND:%.*]] = select i1 [[TMP27]], i1 true, i1 [[DOTNOT33]]
+; CHECK-FORCED-NEXT: [[OR_COND_FROZEN:%.*]] = freeze i1 [[OR_COND]]
+; CHECK-FORCED-NEXT: br i1 [[OR_COND_FROZEN]], label [[SELECT_END]], label [[SELECT_FALSE:%.*]]
+; CHECK-FORCED: select.false:
+; CHECK-FORCED-NEXT: br label [[SELECT_END]]
+; CHECK-FORCED: select.end:
+; CHECK-FORCED-NEXT: [[TMP29]] = phi i32 [ [[DOTLCSSA364144]], [[DOTPREHEADER35]] ], [ 1, [[SELECT_FALSE]] ]
+; CHECK-FORCED-NEXT: [[DOT2]] = phi i1 [ [[DOT045]], [[DOTPREHEADER35]] ], [ true, [[SELECT_FALSE]] ]
+; CHECK-FORCED-NEXT: [[TMP30]] = phi i32 [ [[TMP22]], [[DOTPREHEADER35]] ], [ [[TMP20]], [[SELECT_FALSE]] ]
+; CHECK-FORCED-NEXT: [[NOT_OR_COND:%.*]] = xor i1 [[OR_COND]], true
+; CHECK-FORCED-NEXT: [[IV_N]] = add nuw nsw i64 [[TMP23]], 1
+; CHECK-FORCED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_N]], [[TMP9]]
+; CHECK-FORCED-NEXT: br i1 [[EXITCOND_NOT]], label [[DOTPREHEADER]], label [[DOTPREHEADER35]]
+; CHECK-FORCED: .preheader:
+; CHECK-FORCED-NEXT: [[DOTLCSSA3641_LCSSA:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP29]], [[SELECT_END]] ]
+; CHECK-FORCED-NEXT: [[P:%.*]] = phi i1 [ false, [[TMP3]] ], [ [[NOT_OR_COND]], [[SELECT_END]] ]
+; CHECK-FORCED-NEXT: [[Q:%.*]] = select i1 [[P]], i32 [[DOTLCSSA3641_LCSSA]], i32 1
+; CHECK-FORCED-NEXT: ret i32 [[Q]]
;
%4 = getelementptr i8, ptr %0, i64 40
%5 = load i64, ptr %4, align 8
@@ -225,10 +284,10 @@ define i32 @minloc1_twonot(ptr nocapture readonly %0, ptr nocapture readonly %1,
; CHECK-NEXT: [[TMP21:%.*]] = sub i64 0, [[TMP7]]
; CHECK-NEXT: br label [[DOTPREHEADER35:%.*]]
; CHECK: .preheader35:
-; CHECK-NEXT: [[TMP22:%.*]] = phi i32 [ 2147483647, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP30:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[TMP23:%.*]] = phi i64 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[IV_N:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[DOT045:%.*]] = phi i1 [ false, [[DOTPREHEADER35_LR_PH]] ], [ [[DOT3:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[DOTLCSSA364144:%.*]] = phi i32 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP29:%.*]], [[DOTPREHEADER35]] ]
+; CHECK-NEXT: [[TMP22:%.*]] = phi i32 [ 2147483647, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP30:%.*]], [[SELECT_END:%.*]] ]
+; CHECK-NEXT: [[TMP23:%.*]] = phi i64 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[IV_N:%.*]], [[SELECT_END]] ]
+; CHECK-NEXT: [[DOT045:%.*]] = phi i1 [ false, [[DOTPREHEADER35_LR_PH]] ], [ [[DOT3:%.*]], [[SELECT_END]] ]
+; CHECK-NEXT: [[DOTLCSSA364144:%.*]] = phi i32 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP29:%.*]], [[SELECT_END]] ]
; CHECK-NEXT: [[TMP24:%.*]] = mul nsw i64 [[TMP23]], [[TMP11]]
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP19]], i64 [[TMP24]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
@@ -236,16 +295,21 @@ define i32 @minloc1_twonot(ptr nocapture readonly %0, ptr nocapture readonly %1,
; CHECK-NEXT: [[TMP28:%.*]] = icmp sge i32 [[TMP26]], [[TMP22]]
; CHECK-NEXT: [[DOTNOT33:%.*]] = and i1 [[DOT045]], [[TMP28]]
; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[TMP27]], i1 true, i1 [[DOTNOT33]]
-; CHECK-NEXT: [[TMP29]] = select i1 [[OR_COND]], i32 [[DOTLCSSA364144]], i32 1
+; CHECK-NEXT: [[OR_COND_FROZEN:%.*]] = freeze i1 [[OR_COND]]
+; CHECK-NEXT: br i1 [[OR_COND_FROZEN]], label [[SELECT_END]], label [[SELECT_FALSE:%.*]]
+; CHECK: select.false:
+; CHECK-NEXT: br label [[SELECT_END]]
+; CHECK: select.end:
+; CHECK-NEXT: [[TMP29]] = phi i32 [ [[DOTLCSSA364144]], [[DOTPREHEADER35]] ], [ 1, [[SELECT_FALSE]] ]
+; CHECK-NEXT: [[DOT2:%.*]] = phi i1 [ [[DOT045]], [[DOTPREHEADER35]] ], [ true, [[SELECT_FALSE]] ]
+; CHECK-NEXT: [[DOT3]] = phi i1 [ [[DOT045]], [[DOTPREHEADER35]] ], [ true, [[SELECT_FALSE]] ]
+; CHECK-NEXT: [[TMP30]] = phi i32 [ [[TMP22]], [[DOTPREHEADER35]] ], [ [[TMP20]], [[SELECT_FALSE]] ]
; CHECK-NEXT: [[NOT_OR_COND:%.*]] = xor i1 [[OR_COND]], true
-; CHECK-NEXT: [[DOT2:%.*]] = select i1 [[NOT_OR_COND]], i1 true, i1 [[DOT045]]
-; CHECK-NEXT: [[DOT3]] = select i1 [[NOT_OR_COND]], i1 true, i1 [[DOT2]]
-; CHECK-NEXT: [[TMP30]] = select i1 [[OR_COND]], i32 [[TMP22]], i32 [[TMP20]]
; CHECK-NEXT: [[IV_N]] = add nuw nsw i64 [[TMP23]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_N]], [[TMP9]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[DOTPREHEADER]], label [[DOTPREHEADER35]]
; CHECK: .preheader:
-; CHECK-NEXT: [[DOTLCSSA3641_LCSSA:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP29]], [[DOTPREHEADER35]] ]
+; CHECK-NEXT: [[DOTLCSSA3641_LCSSA:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP29]], [[SELECT_END]] ]
; CHECK-NEXT: ret i32 [[DOTLCSSA3641_LCSSA]]
;
%4 = getelementptr i8, ptr %0, i64 40
@@ -323,10 +387,10 @@ define i32 @minloc1_onenotdependent(ptr nocapture readonly %0, ptr nocapture rea
; CHECK-NEXT: [[TMP21:%.*]] = sub i64 0, [[TMP7]]
; CHECK-NEXT: br label [[DOTPREHEADER35:%.*]]
; CHECK: .preheader35:
-; CHECK-NEXT: [[TMP22:%.*]] = phi i32 [ 2147483647, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP30:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[TMP23:%.*]] = phi i64 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[IV_N:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[DOT045:%.*]] = phi i1 [ false, [[DOTPREHEADER35_LR_PH]] ], [ [[DOT3:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[DOTLCSSA364144:%.*]] = phi i32 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP29:%.*]], [[DOTPREHEADER35]] ]
+; CHECK-NEXT: [[TMP22:%.*]] = phi i32 [ 2147483647, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP30:%.*]], [[SELECT_END:%.*]] ]
+; CHECK-NEXT: [[TMP23:%.*]] = phi i64 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[IV_N:%.*]], [[SELECT_END]] ]
+; CHECK-NEXT: [[DOT045:%.*]] = phi i1 [ false, [[DOTPREHEADER35_LR_PH]] ], [ [[DOT3:%.*]], [[SELECT_END]] ]
+; CHECK-NEXT: [[DOTLCSSA364144:%.*]] = phi i32 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP29:%.*]], [[SELECT_END]] ]
; CHECK-NEXT: [[TMP24:%.*]] = mul nsw i64 [[TMP23]], [[TMP11]]
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP19]], i64 [[TMP24]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
@@ -334,16 +398,21 @@ define i32 @minloc1_onenotdependent(ptr nocapture readonly %0, ptr nocapture rea
; CHECK-NEXT: [[TMP28:%.*]] = icmp sge i32 [[TMP26]], [[TMP22]]
; CHECK-NEXT: [[DOTNOT33:%.*]] = and i1 [[DOT045]], [[TMP28]]
; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[TMP27]], i1 true, i1 [[DOTNOT33]]
-; CHECK-NEXT: [[TMP29]] = select i1 [[OR_COND]], i32 [[DOTLCSSA364144]], i32 1
+; CHECK-NEXT: [[OR_COND_FROZEN:%.*]] = freeze i1 [[OR_COND]]
+; CHECK-NEXT: br i1 [[OR_COND_FROZEN]], label [[SELECT_END]], label [[SELECT_FALSE:%.*]]
+; CHECK: select.false:
+; CHECK-NEXT: br label [[SELECT_END]]
+; CHECK: select.end:
+; CHECK-NEXT: [[TMP29]] = phi i32 [ [[DOTLCSSA364144]], [[DOTPREHEADER35]] ], [ 1, [[SELECT_FALSE]] ]
+; CHECK-NEXT: [[DOT2:%.*]] = phi i1 [ true, [[DOTPREHEADER35]] ], [ [[DOT045]], [[SELECT_FALSE]] ]
+; CHECK-NEXT: [[DOT3]] = phi i1 [ true, [[DOTPREHEADER35]] ], [ true, [[SELECT_FALSE]] ]
+; CHECK-NEXT: [[TMP30]] = phi i32 [ [[TMP22]], [[DOTPREHEADER35]] ], [ [[TMP20]], [[SELECT_FALSE]] ]
; CHECK-NEXT: [[NOT_OR_COND:%.*]] = xor i1 [[OR_COND]], true
-; CHECK-NEXT: [[DOT2:%.*]] = select i1 [[OR_COND]], i1 true, i1 [[DOT045]]
-; CHECK-NEXT: [[DOT3]] = select i1 [[NOT_OR_COND]], i1 true, i1 [[DOT2]]
-; CHECK-NEXT: [[TMP30]] = select i1 [[OR_COND]], i32 [[TMP22]], i32 [[TMP20]]
; CHECK-NEXT: [[IV_N]] = add nuw nsw i64 [[TMP23]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_N]], [[TMP9]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[DOTPREHEADER]], label [[DOTPREHEADER35]]
; CHECK: .preheader:
-; CHECK-NEXT: [[DOTLCSSA3641_LCSSA:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP29]], [[DOTPREHEADER35]] ]
+; CHECK-NEXT: [[DOTLCSSA3641_LCSSA:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP29]], [[SELECT_END]] ]
; CHECK-NEXT: ret i32 [[DOTLCSSA3641_LCSSA]]
;
%4 = getelementptr i8, ptr %0, i64 40
@@ -429,10 +498,10 @@ define i32 @minloc9(ptr nocapture readonly %0, ptr nocapture readonly %1, ptr no
; CHECK-NEXT: [[DOTNEG55:%.*]] = mul i64 [[TMP7]], -8
; CHECK-NEXT: br label [[DOTPREHEADER35:%.*]]
; CHECK: .preheader35:
-; CHECK-NEXT: [[TMP22:%.*]] = phi i32 [ 2147483647, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP78:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[TMP23:%.*]] = phi i64 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP79:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[DOT045:%.*]] = phi i1 [ false, [[DOTPREHEADER35_LR_PH]] ], [ [[DOT2_8:%.*]], [[DOTPREHEADER35]] ]
-; CHECK-NEXT: [[DOTLCSSA364144:%.*]] = phi i32 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP77:%.*]], [[DOTPREHEADER35]] ]
+; CHECK-NEXT: [[TMP22:%.*]] = phi i32 [ 2147483647, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP78:%.*]], [[SELECT_END15:%.*]] ]
+; CHECK-NEXT: [[TMP23:%.*]] = phi i64 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP79:%.*]], [[SELECT_END15]] ]
+; CHECK-NEXT: [[DOT045:%.*]] = phi i1 [ false, [[DOTPREHEADER35_LR_PH]] ], [ [[DOT2_8:%.*]], [[SELECT_END15]] ]
+; CHECK-NEXT: [[DOTLCSSA364144:%.*]] = phi i32 [ 0, [[DOTPREHEADER35_LR_PH]] ], [ [[TMP77:%.*]], [[SELECT_END15]] ]
; CHECK-NEXT: [[TMP24:%.*]] = mul nsw i64 [[TMP23]], [[TMP11]]
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP19]], i64 [[TMP24]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
@@ -440,95 +509,140 @@ define i32 @minloc9(ptr nocapture readonly %0, ptr nocapture readonly %1, ptr no
; CHECK-NEXT: [[TMP28:%.*]] = icmp sge i32 [[TMP26]], [[TMP22]]
; CHECK-NEXT: [[DOTNOT33:%.*]] = and i1 [[DOT045]], [[TMP28]]
; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[TMP27]], i1 true, i1 [[DOTNOT33]]
-; CHECK-NEXT: [[TMP29:%.*]] = select i1 [[OR_COND]], i32 [[DOTLCSSA364144]], i32 1
+; CHECK-NEXT: [[OR_COND_FROZEN:%.*]] = freeze i1 [[OR_COND]]
+; CHECK-NEXT: br i1 [[OR_COND_FROZEN]], label [[SELECT_END:%.*]], label [[SELECT_FALSE:%.*]]
+; CHECK: select.false:
+; CHECK-NEXT: br label [[SELECT_END]]
+; CHECK: select.end:
+; CHECK-NEXT: [[TMP29:%.*]] = phi i32 [ [[DOTLCSSA364144]], [[DOTPREHEADER35]] ], [ 1, [[SELECT_FALSE]] ]
+; CHECK-NEXT: [[DOT2:%.*]] = phi i1 [ [[DOT045]], [[DOTPREHEADER35]] ], [ true, [[SELECT_FALSE]] ]
+; CHECK-NEXT: [[TMP30:%.*]] = phi i32 [ [[TMP22]], [[DOTPREHEADER35]] ], [ [[TMP20]], [[SELECT_FALSE]] ]
; CHECK-NEXT: [[NOT_OR_COND:%.*]] = xor i1 [[OR_COND]], true
-; CHECK-NEXT: [[DOT2:%.*]] = select i1 [[NOT_OR_COND]], i1 true, i1 [[DOT045]]
-; CHECK-NEXT: [[TMP30:%.*]] = select i1 [[OR_COND]], i32 [[TMP22]], i32 [[TMP20]]
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[TMP25]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4
; CHECK-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], [[TMP20]]
; CHECK-NEXT: [[TMP34:%.*]] = icmp sge i32 [[TMP32]], [[TMP30]]
; CHECK-NEXT: [[DOTNOT33_1:%.*]] = and i1 [[DOT2]], [[TMP34]]
; CHECK-NEXT: [[OR_COND_1:%.*]] = select i1 [[TMP33]], i1 true, i1 [[DOTNOT33_1]]
-; CHECK-NEXT: [[TMP35:%.*]] = select i1 [[OR_COND_1]], i32 [[TMP29]], i32 2
+; CHECK-NEXT: [[OR_COND_1_FROZEN:%.*]] = freeze i1 [[OR_COND_1]]
+; CHECK-NEXT: br i1 [[OR_COND_1_FROZEN]], label [[SELECT_END1:%.*]], label [[SELECT_FALSE2:%.*]]
+; CHECK: select.false2:
+; CHECK-NEXT: br label [[SELECT_END1]]
+; CHECK: select.end1:
+; CHECK-NEXT: [[TMP35:%.*]] = phi i32 [ [[TMP29]], [[SELECT_END]] ], [ 2, [[SELECT_FALSE2]] ]
+; CHECK-NEXT: [[DOT2_1:%.*]] = phi i1 [ [[DOT2]], [[SELECT_END]] ], [ true, [[SELECT_FALSE2]] ]
+; CHECK-NEXT: [[TMP36:%.*]] = phi i32 [ [[TMP30]], [[SELECT_END]] ], [ [[TMP20]], [[SELECT_FALSE2]] ]
; CHECK-NEXT: [[NOT_OR_COND_1:%.*]] = xor i1 [[OR_COND_1]], true
-; CHECK-NEXT: [[DOT2_1:%.*]] = select i1 [[NOT_OR_COND_1]], i1 true, i1 [[DOT2]]
-; CHECK-NEXT: [[TMP36:%.*]] = select i1 [[OR_COND_1]], i32 [[TMP30]], i32 [[TMP20]]
; CHECK-NEXT: [[TMP37:%.*]] = getelementptr i8, ptr [[TMP25]], i64 [[DOTNEG]]
; CHECK-NEXT: [[TMP38:%.*]] = load i32, ptr [[TMP37]], align 4
; CHECK-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], [[TMP20]]
; CHECK-NEXT: [[TMP40:%.*]] = icmp sge i32 [[TMP38]], [[TMP36]]
; CHECK-NEXT: [[DOTNOT33_2:%.*]] = and i1 [[DOT2_1]], [[TMP40]]
; CHECK-NEXT: [[OR_COND_2:%.*]] = select i1 [[TMP39]], i1 true, i1 [[DOTNOT33_2]]
-; CHECK-NEXT: [[TMP41:%.*]] = select i1 [[OR_COND_2]], i32 [[TMP35]], i32 3
+; CHECK-NEXT: [[OR_COND_2_FROZEN:%.*]] = freeze i1 [[OR_COND_2]]
+; CHECK-NEXT: br i1 [[OR_COND_2_FROZEN]], label [[SELECT_END3:%.*]], label [[SELECT_FALSE4:%.*]]
+; CHECK: select.false4:
+; CHECK-NEXT: br label [[SELECT_END3]]
+; CHECK: select.end3:
+; CHECK-NEXT: [[TMP41:%.*]] = phi i32 [ [[TMP35]], [[SELECT_END1]] ], [ 3, [[SELECT_FALSE4]] ]
+; CHECK-NEXT: [[DOT2_2:%.*]] = phi i1 [ [[DOT2_1]], [[SELECT_END1]] ], [ true, [[SELECT_FALSE4]] ]
+; CHECK-NEXT: [[TMP42:%.*]] = phi i32 [ [[TMP36]], [[SELECT_END1]] ], [ [[TMP20]], [[SELECT_FALSE4]] ]
; CHECK-NEXT: [[NOT_OR_COND_2:%.*]] = xor i1 [[OR_COND_2]], true
-; CHECK-NEXT: [[DOT2_2:%.*]] = select i1 [[NOT_OR_COND_2]], i1 true, i1 [[DOT2_1]]
-; CHECK-NEXT: [[TMP42:%.*]] = select i1 [[OR_COND_2]], i32 [[TMP36]], i32 [[TMP20]]
; CHECK-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP25]], i64 [[DOTNEG50]]
; CHECK-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP43]], align 4
; CHECK-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], [[TMP20]]
; CHECK-NEXT: [[TMP46:%.*]] = icmp sge i32 [[TMP44]], [[TMP42]]
; CHECK-NEXT: [[DOTNOT33_3:%.*]] = and i1 [[DOT2_2]], [[TMP46]]
; CHECK-NEXT: [[OR_COND_3:%.*]] = select i1 [[TMP45]], i1 true, i1 [[DOTNOT33_3]]
-; CHECK-NEXT: [[TMP47:%.*]] = select i1 [[OR_COND_3]], i32 [[TMP41]], i32 4
+; CHECK-NEXT: [[OR_COND_3_FROZEN:%.*]] = freeze i1 [[OR_COND_3]]
+; CHECK-NEXT: br i1 [[OR_COND_3_FROZEN]], label [[SELECT_END5:%.*]], label [[SELECT_FALSE6:%.*]]
+; CHECK: select.false6:
+; CHECK-NEXT: br label [[SELECT_END5]]
+; CHECK: select.end5:
+; CHECK-NEXT: [[TMP47:%.*]] = phi i32 [ [[TMP41]], [[SELECT_END3]] ], [ 4, [[SELECT_FALSE6]] ]
+; CHECK-NEXT: [[DOT2_3:%.*]] = phi i1 [ [[DOT2_2]], [[SELECT_END3]] ], [ true, [[SELECT_FALSE6]] ]
+; CHECK-NEXT: [[TMP48:%.*]] = phi i32 [ [[TMP42]], [[SELECT_END3]] ], [ [[TMP20]], [[SELECT_FALSE6]] ]
; CHECK-NEXT: [[NOT_OR_COND_3:%.*]] = xor i1 [[OR_COND_3]], true
-; CHECK-NEXT: [[DOT2_3:%.*]] = select i1 [[NOT_OR_COND_3]], i1 true, i1 [[DOT2_2]]
-; CHECK-NEXT: [[TMP48:%.*]] = select i1 [[OR_COND_3]], i32 [[TMP42]], i32 [[TMP20]]
; CHECK-NEXT: [[TMP49:%.*]] = getelementptr i8, ptr [[TMP25]], i64 [[DOTNEG51]]
; CHECK-NEXT: [[TMP50:%.*]] = load i32, ptr [[TMP49]], align 4
; CHECK-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], [[TMP20]]
; CHECK-NEXT: [[TMP52:%.*]] = icmp sge i32 [[TMP50]], [[TMP48]]
; CHECK-NEXT: [[DOTNOT33_4:%.*]] = and i1 [[DOT2_3]], [[TMP52]]
; CHECK-NEXT: [[OR_COND_4:%.*]] = select i1 [[TMP51]], i1 true, i1 [[DOTNOT33_4]]
-; CHECK-NEXT: [[TMP53:%.*]] = select i1 [[OR_COND_4]], i32 [[TMP47]], i32 5
+; CHECK-NEXT: [[OR_COND_4_FROZEN:%.*]] = freeze i1 [[OR_COND_4]]
+; CHECK-NEXT: br i1 [[OR_COND_4_FROZEN]], label [[SELECT_END7:%.*]], label [[SELECT_FALSE8:%.*]]
+; CHECK: select.false8:
+; CHECK-NEXT: br label [[SELECT_END7]]
+; CHECK: select.end7:
+; CHECK-NEXT: [[TMP53:%.*]] = phi i32 [ [[TMP47]], [[SELECT_END5]] ], [ 5, [[SELECT_FALSE8]] ]
+; CHECK-NEXT: [[DOT2_4:%.*]] = phi i1 [ [[DOT2_3]], [[SELECT_END5]] ], [ true, [[SELECT_FALSE8]] ]
+; CHECK-NEXT: [[TMP54:%.*]] = phi i32 [ [[TMP48]], [[SELECT_END5]] ], [ [[TMP20]], [[SELECT_FALSE8]] ]
; CHECK-NEXT: [[NOT_OR_COND_4:%.*]] = xor i1 [[OR_COND_4]], true
-; CHECK-NEXT: [[DOT2_4:%.*]] = select i1 [[NOT_OR_COND_4]], i1 true, i1 [[DOT2_3]]
-; CHECK-NEXT: [[TMP54:%.*]] = select i1 [[OR_COND_4]], i32 [[TMP48]], i32 [[TMP20]]
; CHECK-NEXT: [[TMP55:%.*]] = getelementptr i8, ptr [[TMP25]], i64 [[DOTNEG52]]
; CHECK-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4
; CHECK-NEXT: [[TMP57:%.*]] = icmp ne i32 [[TMP56]], [[TMP20]]
; CHECK-NEXT: [[TMP58:%.*]] = icmp sge i32 [[TMP56]], [[TMP54]]
; CHECK-NEXT: [[DOTNOT33_5:%.*]] = and i1 [[DOT2_4]], [[TMP58]]
; CHECK-NEXT: [[OR_COND_5:%.*]] = select i1 [[TMP57]], i1 true, i1 [[DOTNOT33_5]]
-; CHECK-NEXT: [[TMP59:%.*]] = select i1 [[OR_COND_5]], i32 [[TMP53]], i32 6
+; CHECK-NEXT: [[OR_COND_5_FROZEN:%.*]] = freeze i1 [[OR_COND_5]]
+; CHECK-NEXT: br i1 [[OR_COND_5_FROZEN]], label [[SELECT_END9:%.*]], label [[SELECT_FALSE10:%.*]]
+; CHECK: select.false10:
+; CHECK-NEXT: br label [[SELECT_END9]]
+; CHECK: select.end9:
+; CHECK-NEXT: [[TMP59:%.*]] = phi i32 [ [[TMP53]], [[SELECT_END7]] ], [ 6, [[SELECT_FALSE10]] ]
+; CHECK-NEXT: [[DOT2_5:%.*]] = phi i1 [ [[DOT2_4]], [[SELECT_END7]] ], [ true, [[SELECT_FALSE10]] ]
+; CHECK-NEXT: [[TMP60:%.*]] = phi i32 [ [[TMP54]], [[SELECT_END7]] ], [ [[TMP20]], [[SELECT_FALSE10]] ]
; CHECK-NEXT: [[NOT_OR_COND_5:%.*]] = xor i1 [[OR_COND_5]], true
-; CHECK-NEXT: [[DOT2_5:%.*]] = select i1 [[NOT_OR_COND_5]], i1 true, i1 [[DOT2_4]]
-; CHECK-NEXT: [[TMP60:%.*]] = select i1 [[OR_COND_5]], i32 [[TMP54]], i32 [[TMP20]]
; CHECK-NEXT: [[TMP61:%.*]] = getelementptr i8, ptr [[TMP25]], i64 [[DOTNEG53]]
; CHECK-NEXT: [[TMP62:%.*]] = load i32, ptr [[TMP61]], align 4
; CHECK-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], [[TMP20]]
; CHECK-NEXT: [[TMP64:%.*]] = icmp sge i32 [[TMP62]], [[TMP60]]
; CHECK-NEXT: [[DOTNOT33_6:%.*]] = and i1 [[DOT2_5]], [[TMP64]]
; CHECK-NEXT: [[OR_COND_6:%.*]] = select i1 [[TMP63]], i1 true, i1 [[DOTNOT33_6]]
-; CHECK-NEXT: [[TMP65:%.*]] = select i1 [[OR_COND_6]], i32 [[TMP59]], i32 7
+; CHECK-NEXT: [[OR_COND_6_FROZEN:%.*]] = freeze i1 [[OR_COND_6]]
+; CHECK-NEXT: br i1 [[OR_COND_6_FROZEN]], label [[SELECT_END11:%.*]], label [[SELECT_FALSE12:%.*]]
+; CHECK: select.false12:
+; CHECK-NEXT: br label [[SELECT_END11]]
+; CHECK: select.end11:
+; CHECK-NEXT: [[TMP65:%.*]] = phi i32 [ [[TMP59]], [[SELECT_END9]] ], [ 7, [[SELECT_FALSE12]] ]
+; CHECK-NEXT: [[DOT2_6:%.*]] = phi i1 [ [[DOT2_5]], [[SELECT_END9]] ], [ true, [[SELECT_FALSE12]] ]
+; CHECK-NEXT: [[TMP66:%.*]] = phi i32 [ [[TMP60]], [[SELECT_END9]] ], [ [[TMP20]], [[SELECT_FALSE12]] ]
; CHECK-NEXT: [[NOT_OR_COND_6:%.*]] = xor i1 [[OR_COND_6]], true
-; CHECK-NEXT: [[DOT2_6:%.*]] = select i1 [[NOT_OR_COND_6]], i1 true, i1 [[DOT2_5]]
-; CHECK-NEXT: [[TMP66:%.*]] = select i1 [[OR_COND_6]], i32 [[TMP60]], i32 [[TMP20]]
; CHECK-NEXT: [[TMP67:%.*]] = getelementptr i8, ptr [[TMP25]], i64 [[DOTNEG54]]
; CHECK-NEXT: [[TMP68:%.*]] = load i32, ptr [[TMP67]], align 4
; CHECK-NEXT: [[TMP69:%.*]] = icmp ne i32 [[TMP68]], [[TMP20]]
; CHECK-NEXT: [[TMP70:%.*]] = icmp sge i32 [[TMP68]], [[TMP66]]
; CHECK-NEXT: [[DOTNOT33_7:%.*]] = and i1 [[DOT2_6]], [[TMP70]]
; CHECK-NEXT: [[OR_COND_7:%.*]] = select i1 [[TMP69]], i1 true, i1 [[DOTNOT33_7]]
-; CHECK-NEXT: [[TMP71:%.*]] = select i1 [[OR_COND_7]], i32 [[TMP65]], i32 8
+; CHECK-NEXT: [[OR_COND_7_FROZEN:%.*]] = freeze i1 [[OR_COND_7]]
+; CHECK-NEXT: br i1 [[OR_COND_7_FROZEN]], label [[SELECT_END13:%.*]], label [[SELECT_FALSE14:%.*]]
+; CHECK: select.false14:
+; CHECK-NEXT: br label [[SELECT_END13]]
+; CHECK: select.end13:
+; CHECK-NEXT: [[TMP71:%.*]] = phi i32 [ [[TMP65]], [[SELECT_END11]] ], [ 8, [[SELECT_FALSE14]] ]
+; CHECK-NEXT: [[DOT2_7:%.*]] = phi i1 [ [[DOT2_6]], [[SELECT_END11]] ], [ true, [[SELECT_FALSE14]] ]
+; CHECK-NEXT: [[TMP72:%.*]] = phi i32 [ [[TMP66]], [[SELECT_END11]] ], [ [[TMP20]], [[SELECT_FALSE14]] ]
; CHECK-NEXT: [[NOT_OR_COND_7:%.*]] = xor i1 [[OR_COND_7]], true
-; CHECK-NEXT: [[DOT2_7:%.*]] = select i1 [[NOT_OR_COND_7]], i1 true, i1 [[DOT2_6]]
-; CHECK-NEXT: [[TMP72:%.*]] = select i1 [[OR_COND_7]], i32 [[TMP66]], i32 [[TMP20]]
; CHECK-NEXT: [[TMP73:%.*]] = getelementptr i8, ptr [[TMP25]], i64 [[DOTNEG55]]
; CHECK-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP73]], align 4
; CHECK-NEXT: [[TMP75:%.*]] = icmp ne i32 [[TMP74]], [[TMP20]]
; CHECK-NEXT: [[TMP76:%.*]] = icmp sge i32 [[TMP74]], [[TMP72]]
; CHECK-NEXT: [[DOTNOT33_8:%.*]] = and i1 [[DOT2_7]], [[TMP76]]
; CHECK-NEXT: [[OR_COND_8:%.*]] = select i1 [[TMP75]], i1 true, i1 [[DOTNOT33_8]]
-; CHECK-NEXT: [[TMP77]] = select i1 [[OR_COND_8]], i32 [[TMP71]], i32 9
+; CHECK-NEXT: [[OR_COND_8_FROZEN:%.*]] = freeze i1 [[OR_COND_8]]
+; CHECK-NEXT: br i1 [[OR_COND_8_FROZEN]], label [[SELECT_END15]], label [[SELECT_FALSE16:%.*]]
+; CHECK: select.false16:
+; CHECK-NEXT: br label [[SELECT_END15]]
+; CHECK: select.end15:
+; CHECK-NEXT: [[TMP77]] = phi i32 [ [[TMP71]], [[SELECT_END13]] ], [ 9, [[SELECT_FALSE16]] ]
+; CHECK-NEXT: [[DOT2_8]] = phi i1 [ [[DOT2_7]], [[SELECT_END13]] ], [ true, [[SELECT_FALSE16]] ]
+; CHECK-NEXT: [[TMP78]] = phi i32 [ [[TMP72]], [[SELECT_END13]] ], [ [[TMP20]], [[SELECT_FALSE16]] ]
; CHECK-NEXT: [[NOT_OR_COND_8:%.*]] = xor i1 [[OR_COND_8]], true
-; CHECK-NEXT: [[DOT2_8]] = select i1 [[NOT_OR_COND_8]], i1 true, i1 [[DOT2_7]]
-; CHECK-NEXT: [[TMP78]] = select i1 [[OR_COND_8]], i32 [[TMP72]], i32 [[TMP20]]
; CHECK-NEXT: [[TMP79]] = add nuw nsw i64 [[TMP23]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP79]], [[TMP9]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[DOTPREHEADER]], label [[DOTPREHEADER35]]
; CHECK: .preheader:
-; CHECK-NEXT: [[DOTLCSSA3641_LCSSA:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP77]], [[DOTPREHEADER35]] ]
+; CHECK-NEXT: [[DOTLCSSA3641_LCSSA:%.*]] = phi i32 [ 0, [[TMP3:%.*]] ], [ [[TMP77]], [[SELECT_END15]] ]
; CHECK-NEXT: ret i32 [[DOTLCSSA3641_LCSSA]]
;
%4 = getelementptr i8, ptr %0, i64 40
diff --git a/llvm/test/CodeGen/AArch64/sign-return-address-tailcall.ll b/llvm/test/CodeGen/AArch64/sign-return-address-tailcall.ll
index cf033cb8208c..0cc707298e45 100644
--- a/llvm/test/CodeGen/AArch64/sign-return-address-tailcall.ll
+++ b/llvm/test/CodeGen/AArch64/sign-return-address-tailcall.ll
@@ -129,4 +129,36 @@ define i32 @tailcall_ib_key() "sign-return-address"="all" "sign-return-address-k
ret i32 %call
}
+define i32 @tailcall_two_branches(i1 %0) "sign-return-address"="all" {
+; COMMON-LABEL: tailcall_two_branches:
+; COMMON: tbz w0, #0, .[[ELSE:LBB[_0-9]+]]
+; COMMON: str x30, [sp, #-16]!
+; COMMON: bl callee2
+; COMMON: ldr x30, [sp], #16
+; COMMON-NEXT: [[AUTIASP]]
+; COMMON-NEXT: .[[ELSE]]:
+
+; LDR-NEXT: ldr w16, [x30]
+;
+; BITS-NOTBI-NEXT: eor x16, x30, x30, lsl #1
+; BITS-NOTBI-NEXT: tbnz x16, #62, .[[FAIL:LBB[_0-9]+]]
+;
+; XPAC-NEXT: mov x16, x30
+; XPAC-NEXT: [[XPACLRI]]
+; XPAC-NEXT: cmp x16, x30
+; XPAC-NEXT: b.ne .[[FAIL:LBB[_0-9]+]]
+;
+; COMMON-NEXT: b callee
+; BRK-NEXT: .[[FAIL]]:
+; BRK-NEXT: brk #0xc470
+ br i1 %0, label %2, label %3
+2:
+ call void @callee2()
+ br label %3
+3:
+ %call = tail call i32 @callee()
+ ret i32 %call
+}
+
declare i32 @callee()
+declare void @callee2()
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
index 8cb8b1c92fa7..8ce24ceb33d7 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
@@ -72,6 +72,203 @@ define aarch64_sve_vector_pcs <vscale x 16 x i1> @caller_with_many_svepred_arg(<
ret <vscale x 16 x i1> %ret
}
+; Test that arg2 is passed through x0, i.e., x0 = &%arg2; and return values are loaded from x0:
+; P0 = ldr [x0]
+define aarch64_sve_vector_pcs <vscale x 16 x i1> @callee_with_svepred_arg_4xv16i1_1xv16i1([4 x <vscale x 16 x i1>] %arg1, [1 x <vscale x 16 x i1>] %arg2) {
+; CHECK: name: callee_with_svepred_arg_4xv16i1_1xv16i1
+; CHECK: [[BASE:%[0-9]+]]:gpr64common = COPY $x0
+; CHECK: [[PRED0:%[0-9]+]]:ppr = LDR_PXI [[BASE]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: $p0 = COPY [[PRED0]]
+; CHECK: RET_ReallyLR implicit $p0
+ %res = extractvalue [1 x <vscale x 16 x i1>] %arg2, 0
+ ret <vscale x 16 x i1> %res
+}
+
+; Test that arg1 is stored to the stack from p0; and the stack location is passed throuch x0 to setup the call:
+; str P0, [stack_loc_for_args]
+; x0 = stack_loc_for_args
+define aarch64_sve_vector_pcs <vscale x 16 x i1> @caller_with_svepred_arg_1xv16i1_4xv16i1([1 x <vscale x 16 x i1>] %arg1, [4 x <vscale x 16 x i1>] %arg2) {
+; CHECK: name: caller_with_svepred_arg_1xv16i1_4xv16i1
+; CHECK: stack:
+; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2,
+; CHECK-NEXT: stack-id: scalable-vector,
+; CHECK: [[PRED0:%[0-9]+]]:ppr = COPY $p0
+; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+; CHECK: STR_PXI [[PRED0]], %stack.0, 0 :: (store (<vscale x 1 x s16>) into %stack.0)
+; CHECK: [[STACK:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+; CHECK: $x0 = COPY [[STACK]]
+; CHECK: BL @callee_with_svepred_arg_4xv16i1_1xv16i1, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $p0, implicit $p1, implicit $p2, implicit $p3, implicit $x0, implicit-def $sp, implicit-def $p0
+; CHECK: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ %res = call <vscale x 16 x i1> @callee_with_svepred_arg_4xv16i1_1xv16i1([4 x <vscale x 16 x i1>] %arg2, [1 x <vscale x 16 x i1>] %arg1)
+ ret <vscale x 16 x i1> %res
+}
+
+; Test that arg2 is passed through x0, i.e., x0 = &%arg2; and return values are loaded from x0:
+; P0 = ldr [x0]
+; P1 = ldr [x0 + sizeof(Px)]
+; P2 = ldr [x0 + 2*sizeof(Px)]
+; P3 = ldr [x0 + 3*sizeof(Px)]
+define aarch64_sve_vector_pcs [4 x <vscale x 16 x i1>] @callee_with_svepred_arg_4xv16i1_4xv16i1([4 x <vscale x 16 x i1>] %arg1, [4 x <vscale x 16 x i1>] %arg2) {
+; CHECK: name: callee_with_svepred_arg_4xv16i1_4xv16i1
+; CHECK: [[BASE:%[0-9]+]]:gpr64common = COPY $x0
+; CHECK: [[OFFSET1:%[0-9]+]]:gpr64 = CNTD_XPiI 31, 1, implicit $vg
+; CHECK: [[ADDR1:%[0-9]+]]:gpr64common = nuw ADDXrr [[BASE]], killed [[OFFSET1]]
+; CHECK: [[PRED1:%[0-9]+]]:ppr = LDR_PXI killed [[ADDR1]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: [[OFFSET2:%[0-9]+]]:gpr64 = CNTW_XPiI 31, 1, implicit $vg
+; CHECK: [[ADDR2:%[0-9]+]]:gpr64common = ADDXrr [[BASE]], killed [[OFFSET2]]
+; CHECK: [[PRED2:%[0-9]+]]:ppr = LDR_PXI killed [[ADDR2]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: [[OFFSET3:%[0-9]+]]:gpr64 = CNTD_XPiI 31, 3, implicit $vg
+; CHECK: [[ADDR3:%[0-9]+]]:gpr64common = ADDXrr [[BASE]], killed [[OFFSET3]]
+; CHECK: [[PRED3:%[0-9]+]]:ppr = LDR_PXI killed [[ADDR3]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: [[PRED0:%[0-9]+]]:ppr = LDR_PXI [[BASE]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: $p0 = COPY [[PRED0]]
+; CHECK: $p1 = COPY [[PRED1]]
+; CHECK: $p2 = COPY [[PRED2]]
+; CHECK: $p3 = COPY [[PRED3]]
+; CHECK: RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
+ ret [4 x <vscale x 16 x i1>] %arg2
+}
+
+; Test that arg1 is stored to the stack from p0~p3; and the stack location is passed throuch x0 to setup the call:
+; str P0, [stack_loc_for_args]
+; str P1, [stack_loc_for_args + sizeof(Px)]
+; str P2, [stack_loc_for_args + 2*sizeof(Px)]
+; str P3, [stack_loc_for_args + 3*sizeof(Px)]
+; x0 = stack_loc_for_args
+define [4 x <vscale x 16 x i1>] @caller_with_svepred_arg_4xv16i1_4xv16i1([4 x <vscale x 16 x i1>] %arg1, [4 x <vscale x 16 x i1>] %arg2) {
+; CHECK: name: caller_with_svepred_arg_4xv16i1_4xv16i1
+; CHECK: stack:
+; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 8, alignment: 2,
+; CHECK-NEXT: stack-id: scalable-vector,
+; CHECK: [[PRED3:%[0-9]+]]:ppr = COPY $p3
+; CHECK: [[PRED2:%[0-9]+]]:ppr = COPY $p2
+; CHECK: [[PRED1:%[0-9]+]]:ppr = COPY $p1
+; CHECK: [[PRED0:%[0-9]+]]:ppr = COPY $p0
+; CHECK: [[OFFSET1:%[0-9]+]]:gpr64 = CNTD_XPiI 31, 1, implicit $vg
+; CHECK: [[OFFSET2:%[0-9]+]]:gpr64 = CNTW_XPiI 31, 1, implicit $vg
+; CHECK: [[OFFSET3:%[0-9]+]]:gpr64 = CNTD_XPiI 31, 3, implicit $vg
+; CHECK: [[STACK:%[0-9]+]]:gpr64common = ADDXri %stack.0, 0, 0
+; CHECK: [[ADDR3:%[0-9]+]]:gpr64common = ADDXrr [[STACK]], [[OFFSET3]]
+; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+; CHECK: STR_PXI [[PRED3]], killed [[ADDR3]], 0 :: (store (<vscale x 1 x s16>))
+; CHECK: [[ADDR2:%[0-9]+]]:gpr64common = ADDXrr [[STACK]], [[OFFSET2]]
+; CHECK: STR_PXI [[PRED2]], killed [[ADDR2]], 0 :: (store (<vscale x 1 x s16>))
+; CHECK: [[ADDR1:%[0-9]+]]:gpr64common = nuw ADDXrr [[STACK]], [[OFFSET1]]
+; CHECK: STR_PXI [[PRED1]], killed [[ADDR1]], 0 :: (store (<vscale x 1 x s16>))
+; CHECK: STR_PXI [[PRED0]], %stack.0, 0 :: (store (<vscale x 1 x s16>) into %stack.0)
+; CHECK: $x0 = COPY [[STACK]]
+; CHECK: BL @callee_with_svepred_arg_4xv16i1_4xv16i1, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $p0, implicit $p1, implicit $p2, implicit $p3, implicit $x0, implicit-def $sp, implicit-def $p0, implicit-def $p1, implicit-def $p2, implicit-def $p3
+; CHECK: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ %res = call [4 x <vscale x 16 x i1>] @callee_with_svepred_arg_4xv16i1_4xv16i1([4 x <vscale x 16 x i1>] %arg2, [4 x <vscale x 16 x i1>] %arg1)
+ ret [4 x <vscale x 16 x i1>] %res
+}
+
+; Test that arg2 is passed through x0, i.e., x0 = &%arg2; and return values are loaded from x0:
+; P0 = ldr [x0]
+; P1 = ldr [x0 + sizeof(Px)]
+; P2 = ldr [x0 + 2*sizeof(Px)]
+; P3 = ldr [x0 + 3*sizeof(Px)]
+define aarch64_sve_vector_pcs [2 x <vscale x 32 x i1>] @callee_with_svepred_arg_1xv16i1_2xv32i1([1 x <vscale x 16 x i1>] %arg1, [2 x <vscale x 32 x i1>] %arg2) {
+; CHECK: name: callee_with_svepred_arg_1xv16i1_2xv32i1
+; CHECK: [[BASE:%[0-9]+]]:gpr64common = COPY $x0
+; CHECK: [[OFFSET1:%[0-9]+]]:gpr64 = CNTD_XPiI 31, 1, implicit $vg
+; CHECK: [[ADDR1:%[0-9]+]]:gpr64common = nuw ADDXrr [[BASE]], killed [[OFFSET1]]
+; CHECK: [[PRED1:%[0-9]+]]:ppr = LDR_PXI killed [[ADDR1]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: [[OFFSET2:%[0-9]+]]:gpr64 = CNTW_XPiI 31, 1, implicit $vg
+; CHECK: [[ADDR2:%[0-9]+]]:gpr64common = ADDXrr [[BASE]], killed [[OFFSET2]]
+; CHECK: [[PRED2:%[0-9]+]]:ppr = LDR_PXI killed [[ADDR2]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: [[OFFSET3:%[0-9]+]]:gpr64 = CNTD_XPiI 31, 3, implicit $vg
+; CHECK: [[ADDR3:%[0-9]+]]:gpr64common = ADDXrr [[BASE]], killed [[OFFSET3]]
+; CHECK: [[PRED3:%[0-9]+]]:ppr = LDR_PXI killed [[ADDR3]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: [[PRED0:%[0-9]+]]:ppr = LDR_PXI [[BASE]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: $p0 = COPY [[PRED0]]
+; CHECK: $p1 = COPY [[PRED1]]
+; CHECK: $p2 = COPY [[PRED2]]
+; CHECK: $p3 = COPY [[PRED3]]
+; CHECK: RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
+ ret [2 x <vscale x 32 x i1>] %arg2
+}
+
+; Test that arg1 is stored to the stack from p0~p3; and the stack location is passed throuch x0 to setup the call:
+; str P0, [stack_loc_for_args]
+; str P1, [stack_loc_for_args + sizeof(Px)]
+; str P2, [stack_loc_for_args + 2*sizeof(Px)]
+; str P3, [stack_loc_for_args + 3*sizeof(Px)]
+; x0 = stack_loc_for_args
+define [2 x <vscale x 32 x i1>] @caller_with_svepred_arg_2xv32i1_1xv16i1([2 x <vscale x 32 x i1>] %arg1, [1 x <vscale x 16 x i1>] %arg2) {
+; CHECK: name: caller_with_svepred_arg_2xv32i1_1xv16i1
+; CHECK: stack:
+; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 8, alignment: 2,
+; CHECK-NEXT: stack-id: scalable-vector,
+; CHECK: [[PRED3:%[0-9]+]]:ppr = COPY $p3
+; CHECK: [[PRED2:%[0-9]+]]:ppr = COPY $p2
+; CHECK: [[PRED1:%[0-9]+]]:ppr = COPY $p1
+; CHECK: [[PRED0:%[0-9]+]]:ppr = COPY $p0
+; CHECK: [[OFFSET3:%[0-9]+]]:gpr64 = CNTD_XPiI 31, 3, implicit $vg
+; CHECK: [[STACK:%[0-9]+]]:gpr64common = ADDXri %stack.0, 0, 0
+; CHECK: [[ADDR3:%[0-9]+]]:gpr64common = ADDXrr [[STACK]], killed [[OFFSET3]]
+; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+; CHECK: STR_PXI [[PRED3]], killed [[ADDR3]], 0 :: (store (<vscale x 1 x s16>))
+; CHECK: [[OFFSET2:%[0-9]+]]:gpr64 = CNTW_XPiI 31, 1, implicit $vg
+; CHECK: [[ADDR2:%[0-9]+]]:gpr64common = ADDXrr [[STACK]], killed [[OFFSET2]]
+; CHECK: STR_PXI [[PRED2]], killed [[ADDR2]], 0 :: (store (<vscale x 1 x s16>))
+; CHECK: [[OFFSET1:%[0-9]+]]:gpr64 = CNTD_XPiI 31, 1, implicit $vg
+; CHECK: [[ADDR1:%[0-9]+]]:gpr64common = nuw ADDXrr [[STACK]], killed [[OFFSET1]]
+; CHECK: STR_PXI [[PRED1]], killed [[ADDR1]], 0 :: (store (<vscale x 1 x s16>))
+; CHECK: STR_PXI [[PRED0]], %stack.0, 0 :: (store (<vscale x 1 x s16>) into %stack.0)
+; CHECK: $x0 = COPY [[STACK]]
+; CHECK: BL @callee_with_svepred_arg_1xv16i1_2xv32i1, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $p0, implicit $x0, implicit-def $sp, implicit-def $p0, implicit-def $p1, implicit-def $p2, implicit-def $p3
+; CHECK: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ %res = call [2 x <vscale x 32 x i1>] @callee_with_svepred_arg_1xv16i1_2xv32i1([1 x <vscale x 16 x i1>] %arg2, [2 x <vscale x 32 x i1>] %arg1)
+ ret [2 x <vscale x 32 x i1>] %res
+}
+
+; Test that arg1 and arg3 are passed via P0~P3, arg1 is passed indirectly through address on stack in x0
+define aarch64_sve_vector_pcs [4 x <vscale x 16 x i1>] @callee_with_svepred_arg_2xv16i1_4xv16i1_2xv16i1([2 x <vscale x 16 x i1>] %arg1, [4 x <vscale x 16 x i1>] %arg2, [2 x <vscale x 16 x i1>] %arg3) nounwind {
+; CHECK: name: callee_with_svepred_arg_2xv16i1_4xv16i1_2xv16i1
+; CHECK: [[P3:%[0-9]+]]:ppr = COPY $p3
+; CHECK: [[P2:%[0-9]+]]:ppr = COPY $p2
+; CHECK: [[X0:%[0-9]+]]:gpr64common = COPY $x0
+; CHECK: [[P1:%[0-9]+]]:ppr = COPY $p1
+; CHECK: [[P0:%[0-9]+]]:ppr = COPY $p0
+; CHECK: [[OFFSET3:%[0-9]+]]:gpr64 = CNTD_XPiI 31, 3, implicit $vg
+; CHECK: [[ADDR3:%[0-9]+]]:gpr64common = ADDXrr [[X0]], killed [[OFFSET3]]
+; CHECK: [[P7:%[0-9]+]]:ppr = LDR_PXI killed [[ADDR3]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: [[OFFSET2:%[0-9]+]]:gpr64 = CNTW_XPiI 31, 1, implicit $vg
+; CHECK: [[ADDR2:%[0-9]+]]:gpr64common = ADDXrr [[X0]], killed [[OFFSET2]]
+; CHECK: [[P6:%[0-9]+]]:ppr = LDR_PXI killed [[ADDR2]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: [[OFFSET1:%[0-9]+]]:gpr64 = CNTD_XPiI 31, 1, implicit $vg
+; CHECK: [[ADDR1:%[0-9]+]]:gpr64common = nuw ADDXrr [[X0]], killed [[OFFSET1]]
+; CHECK: [[P5:%[0-9]+]]:ppr = LDR_PXI killed [[ADDR1]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: [[P4:%[0-9]+]]:ppr = LDR_PXI [[X0]], 0 :: (load (<vscale x 1 x s16>))
+; CHECK: [[RES0:%[0-9]+]]:ppr = AND_PPzPP [[P0]], [[P0]], killed [[P4]]
+; CHECK: [[RES1:%[0-9]+]]:ppr = AND_PPzPP [[P1]], [[P1]], killed [[P5]]
+; CHECK: [[RES2:%[0-9]+]]:ppr = AND_PPzPP [[P2]], [[P2]], killed [[P6]]
+; CHECK: [[RES3:%[0-9]+]]:ppr = AND_PPzPP [[P3]], [[P3]], killed [[P7]]
+; CHECK: $p0 = COPY [[RES0]]
+; CHECK: $p1 = COPY [[RES1]]
+; CHECK: $p2 = COPY [[RES2]]
+; CHECK: $p3 = COPY [[RES3]]
+; CHECK: RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
+ %p0 = extractvalue [2 x <vscale x 16 x i1>] %arg1, 0
+ %p1 = extractvalue [2 x <vscale x 16 x i1>] %arg1, 1
+ %p2 = extractvalue [2 x <vscale x 16 x i1>] %arg3, 0
+ %p3 = extractvalue [2 x <vscale x 16 x i1>] %arg3, 1
+ %p4 = extractvalue [4 x <vscale x 16 x i1>] %arg2, 0
+ %p5 = extractvalue [4 x <vscale x 16 x i1>] %arg2, 1
+ %p6 = extractvalue [4 x <vscale x 16 x i1>] %arg2, 2
+ %p7 = extractvalue [4 x <vscale x 16 x i1>] %arg2, 3
+ %r0 = and <vscale x 16 x i1> %p0, %p4
+ %r1 = and <vscale x 16 x i1> %p1, %p5
+ %r2 = and <vscale x 16 x i1> %p2, %p6
+ %r3 = and <vscale x 16 x i1> %p3, %p7
+ %1 = insertvalue [4 x <vscale x 16 x i1>] undef, <vscale x 16 x i1> %r0, 0
+ %2 = insertvalue [4 x <vscale x 16 x i1>] %1, <vscale x 16 x i1> %r1, 1
+ %3 = insertvalue [4 x <vscale x 16 x i1>] %2, <vscale x 16 x i1> %r2, 2
+ %4 = insertvalue [4 x <vscale x 16 x i1>] %3, <vscale x 16 x i1> %r3, 3
+ ret [4 x <vscale x 16 x i1>] %4
+}
+
; Test that z8 and z9, passed by reference, are loaded from a location that is passed on the stack.
; i.e. x0 = %x0
; :
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention.ll
index 0a45244f12be..bfb750517cbf 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention.ll
@@ -128,6 +128,52 @@ define <vscale x 4 x i1> @sve_signature_pred(<vscale x 4 x i1> %arg1, <vscale x
ret <vscale x 4 x i1> %arg2
}
+; Test that scalable predicate argument in [1 x <vscale x 4 x i1>] type are properly assigned to P registers.
+; CHECK-LABEL: name: sve_signature_pred_1xv4i1
+; CHECK: [[RES:%[0-9]+]]:ppr = COPY $p1
+; CHECK: $p0 = COPY [[RES]]
+; CHECK: RET_ReallyLR implicit $p0
+define [1 x <vscale x 4 x i1>] @sve_signature_pred_1xv4i1([1 x <vscale x 4 x i1>] %arg1, [1 x <vscale x 4 x i1>] %arg2) nounwind {
+ ret [1 x <vscale x 4 x i1>] %arg2
+}
+
+; Test that upto to two scalable predicate arguments in [2 x <vscale x 4 x i1>] type can be assigned to P registers.
+; CHECK-LABEL: name: sve_signature_pred_2xv4i1
+; CHECK: [[RES1:%[0-9]+]]:ppr = COPY $p3
+; CHECK: [[RES0:%[0-9]+]]:ppr = COPY $p2
+; CHECK: $p0 = COPY [[RES0]]
+; CHECK: $p1 = COPY [[RES1]]
+; CHECK: RET_ReallyLR implicit $p0, implicit $p1
+define [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1([2 x <vscale x 4 x i1>] %arg1, [2 x <vscale x 4 x i1>] %arg2) nounwind {
+ ret [2 x <vscale x 4 x i1>] %arg2
+}
+
+; Test that a scalable predicate argument in [1 x <vscale x 32 x i1>] type is assigned to two P registers.
+; CHECK-LABLE: name: sve_signature_pred_1xv32i1
+; CHECK: [[RES1:%[0-9]+]]:ppr = COPY $p3
+; CHECK: [[RES0:%[0-9]+]]:ppr = COPY $p2
+; CHECK: $p0 = COPY [[RES0]]
+; CHECK: $p1 = COPY [[RES1]]
+; CHECK: RET_ReallyLR implicit $p0, implicit $p1
+define [1 x <vscale x 32 x i1>] @sve_signature_pred_1xv32i1([1 x <vscale x 32 x i1>] %arg1, [1 x <vscale x 32 x i1>] %arg2) nounwind {
+ ret [1 x <vscale x 32 x i1>] %arg2
+}
+
+; Test that a scalable predicate argument in [2 x <vscale x 32 x i1>] type is assigned to four P registers.
+; CHECK-LABLE: name: sve_signature_pred_2xv32i1
+; CHECK: [[RES3:%[0-9]+]]:ppr = COPY $p3
+; CHECK: [[RES2:%[0-9]+]]:ppr = COPY $p2
+; CHECK: [[RES1:%[0-9]+]]:ppr = COPY $p1
+; CHECK: [[RES0:%[0-9]+]]:ppr = COPY $p0
+; CHECK: $p0 = COPY [[RES0]]
+; CHECK: $p1 = COPY [[RES1]]
+; CHECK: $p2 = COPY [[RES2]]
+; CHECK: $p3 = COPY [[RES3]]
+; CHECK: RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
+define [2 x <vscale x 32 x i1>] @sve_signature_pred_2xv32i1([2 x <vscale x 32 x i1>] %arg1) nounwind {
+ ret [2 x <vscale x 32 x i1>] %arg1
+}
+
; CHECK-LABEL: name: sve_signature_vec_caller
; CHECK-DAG: [[ARG2:%[0-9]+]]:zpr = COPY $z1
; CHECK-DAG: [[ARG1:%[0-9]+]]:zpr = COPY $z0
@@ -156,6 +202,84 @@ define <vscale x 4 x i1> @sve_signature_pred_caller(<vscale x 4 x i1> %arg1, <vs
ret <vscale x 4 x i1> %res
}
+; CHECK-LABEL: name: sve_signature_pred_1xv4i1_caller
+; CHECK-DAG: [[ARG2:%[0-9]+]]:ppr = COPY $p1
+; CHECK-DAG: [[ARG1:%[0-9]+]]:ppr = COPY $p0
+; CHECK-DAG: $p0 = COPY [[ARG2]]
+; CHECK-DAG: $p1 = COPY [[ARG1]]
+; CHECK-NEXT: BL @sve_signature_pred_1xv4i1, csr_aarch64_sve_aapcs
+; CHECK: [[RES:%[0-9]+]]:ppr = COPY $p0
+; CHECK: $p0 = COPY [[RES]]
+; CHECK: RET_ReallyLR implicit $p0
+define [1 x <vscale x 4 x i1>] @sve_signature_pred_1xv4i1_caller([1 x <vscale x 4 x i1>] %arg1, [1 x <vscale x 4 x i1>] %arg2) nounwind {
+ %res = call [1 x <vscale x 4 x i1>] @sve_signature_pred_1xv4i1([1 x <vscale x 4 x i1>] %arg2, [1 x <vscale x 4 x i1>] %arg1)
+ ret [1 x <vscale x 4 x i1>] %res
+}
+
+; CHECK-LABEL: name: sve_signature_pred_2xv4i1_caller
+; CHECK-DAG: [[ARG2_2:%[0-9]+]]:ppr = COPY $p3
+; CHECK-DAG: [[ARG2_1:%[0-9]+]]:ppr = COPY $p2
+; CHECK-DAG: [[ARG1_2:%[0-9]+]]:ppr = COPY $p1
+; CHECK-DAG: [[ARG1_1:%[0-9]+]]:ppr = COPY $p0
+; CHECK-DAG: $p0 = COPY [[ARG2_1]]
+; CHECK-DAG: $p1 = COPY [[ARG2_2]]
+; CHECK-DAG: $p2 = COPY [[ARG1_1]]
+; CHECK-DAG: $p3 = COPY [[ARG1_2]]
+; CHECK-NEXT: BL @sve_signature_pred_2xv4i1, csr_aarch64_sve_aapcs
+; CHECK: [[RES0:%[0-9]+]]:ppr = COPY $p0
+; CHECK: [[RES1:%[0-9]+]]:ppr = COPY $p1
+; CHECK: $p0 = COPY [[RES0]]
+; CHECK: $p1 = COPY [[RES1]]
+; CHECK: RET_ReallyLR implicit $p0, implicit $p1
+define [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1_caller([2 x <vscale x 4 x i1>] %arg1, [2 x <vscale x 4 x i1>] %arg2) nounwind {
+ %res = call [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1([2 x <vscale x 4 x i1>] %arg2, [2 x <vscale x 4 x i1>] %arg1)
+ ret [2 x <vscale x 4 x i1>] %res
+}
+
+; CHECK-LABEL: name: sve_signature_pred_1xv32i1_caller
+; CHECK-DAG: [[ARG2_2:%[0-9]+]]:ppr = COPY $p3
+; CHECK-DAG: [[ARG2_1:%[0-9]+]]:ppr = COPY $p2
+; CHECK-DAG: [[ARG1_2:%[0-9]+]]:ppr = COPY $p1
+; CHECK-DAG: [[ARG1_1:%[0-9]+]]:ppr = COPY $p0
+; CHECK-DAG: $p0 = COPY [[ARG2_1]]
+; CHECK-DAG: $p1 = COPY [[ARG2_2]]
+; CHECK-DAG: $p2 = COPY [[ARG1_1]]
+; CHECK-DAG: $p3 = COPY [[ARG1_2]]
+; CHECK-NEXT: BL @sve_signature_pred_1xv32i1, csr_aarch64_sve_aapcs
+; CHECK: [[RES0:%[0-9]+]]:ppr = COPY $p0
+; CHECK: [[RES1:%[0-9]+]]:ppr = COPY $p1
+; CHECK: $p0 = COPY [[RES0]]
+; CHECK: $p1 = COPY [[RES1]]
+; CHECK: RET_ReallyLR implicit $p0, implicit $p1
+define [1 x <vscale x 32 x i1>] @sve_signature_pred_1xv32i1_caller([1 x <vscale x 32 x i1>] %arg1, [1 x <vscale x 32 x i1>] %arg2) nounwind {
+ %res = call [1 x <vscale x 32 x i1>] @sve_signature_pred_1xv32i1([1 x <vscale x 32 x i1>] %arg2, [1 x <vscale x 32 x i1>] %arg1)
+ ret [1 x <vscale x 32 x i1>] %res
+}
+
+; CHECK-LABEL: name: sve_signature_pred_2xv32i1_caller
+; CHECK-DAG: [[ARG3:%[0-9]+]]:ppr = COPY $p3
+; CHECK-DAG: [[ARG2:%[0-9]+]]:ppr = COPY $p2
+; CHECK-DAG: [[ARG1:%[0-9]+]]:ppr = COPY $p1
+; CHECK-DAG: [[ARG0:%[0-9]+]]:ppr = COPY $p0
+; CHECK-DAG: $p0 = COPY [[ARG0]]
+; CHECK-DAG: $p1 = COPY [[ARG1]]
+; CHECK-DAG: $p2 = COPY [[ARG2]]
+; CHECK-DAG: $p3 = COPY [[ARG3]]
+; CHECK-NEXT: BL @sve_signature_pred_2xv32i1, csr_aarch64_sve_aapcs
+; CHECK: [[RES0:%[0-9]+]]:ppr = COPY $p0
+; CHECK: [[RES1:%[0-9]+]]:ppr = COPY $p1
+; CHECK: [[RES2:%[0-9]+]]:ppr = COPY $p2
+; CHECK: [[RES3:%[0-9]+]]:ppr = COPY $p3
+; CHECK: $p0 = COPY [[RES0]]
+; CHECK: $p1 = COPY [[RES1]]
+; CHECK: $p2 = COPY [[RES2]]
+; CHECK: $p3 = COPY [[RES3]]
+; CHECK: RET_ReallyLR implicit $p0, implicit $p1
+define [2 x <vscale x 32 x i1>] @sve_signature_pred_2xv32i1_caller([2 x <vscale x 32 x i1>] %arg1) {
+ %res = call [2 x <vscale x 32 x i1>] @sve_signature_pred_2xv32i1([2 x <vscale x 32 x i1>] %arg1)
+ ret [2 x <vscale x 32 x i1>] %res
+}
+
; Test that functions returning or taking SVE arguments use the correct
; callee-saved set when using the default C calling convention (as opposed
; to aarch64_sve_vector_pcs)
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
index 28094c7b68e7..276f23703df3 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle-tbl.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s -check-prefixes=CHECK,SVE2_128
-; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve -aarch64-sve-vector-bits-min=128 < %s | FileCheck %s -check-prefixes=CHECK,SVE2_128_NOMAX
-; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve < %s | FileCheck %s -check-prefixes=CHECK,SVE2_NOMIN_NOMAX
-; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -check-prefixes=CHECK,SVE2_MIN_256_NOMAX
+; RUN: llc -mattr=+sve2 -force-streaming-compatible -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s -check-prefixes=CHECK,SVE2_128
+; RUN: llc -mattr=+sve2 -force-streaming-compatible -aarch64-sve-vector-bits-min=128 < %s | FileCheck %s -check-prefixes=CHECK,SVE2_128_NOMAX
+; RUN: llc -mattr=+sve2 -force-streaming-compatible < %s | FileCheck %s -check-prefixes=CHECK,SVE2_NOMIN_NOMAX
+; RUN: llc -mattr=+sve2 -force-streaming-compatible -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -check-prefixes=CHECK,SVE2_MIN_256_NOMAX
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll
index 1a2ab8d4253a..b0b6a6a530dd 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll
@@ -2,7 +2,7 @@
; RUN: llc -mattr=+sve < %s | FileCheck %s
; Streaming-compatible SVE doesn't include FADDA, so this shouldn't compile!
-; RUN: not --crash llc -mattr=+sve -force-streaming-compatible-sve < %s
+; RUN: not --crash llc -mattr=+sve -force-streaming-compatible < %s
target triple = "aarch64-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-pr92779.ll b/llvm/test/CodeGen/AArch64/sve-pr92779.ll
new file mode 100644
index 000000000000..e25794817add
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-pr92779.ll
@@ -0,0 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64 -mattr=+sve2 < %s | FileCheck %s
+
+define void @main(ptr %0) {
+; CHECK-LABEL: main:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov z0.d, #0 // =0x0
+; CHECK-NEXT: ptrue p0.d, vl1
+; CHECK-NEXT: mov z1.d, z0.d
+; CHECK-NEXT: ext z1.b, z1.b, z0.b, #8
+; CHECK-NEXT: uzp1 v1.2s, v0.2s, v1.2s
+; CHECK-NEXT: neg v1.2s, v1.2s
+; CHECK-NEXT: smov x8, v1.s[0]
+; CHECK-NEXT: smov x9, v1.s[1]
+; CHECK-NEXT: mov z0.d, p0/m, x8
+; CHECK-NEXT: mov z0.d, p0/m, x9
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: st1d { z0.d }, p0, [x0]
+; CHECK-NEXT: ret
+"entry":
+ %1 = bitcast <vscale x 2 x i64> zeroinitializer to <vscale x 4 x i32>
+ %a = extractelement <vscale x 4 x i32> %1, i64 0
+ %b = insertelement <2 x i32> zeroinitializer, i32 %a, i64 0
+ %2 = bitcast <vscale x 2 x i64> zeroinitializer to <vscale x 4 x i32>
+ %c = extractelement <vscale x 4 x i32> %2, i64 2
+ %d = insertelement <2 x i32> %b, i32 %c, i64 1
+ %e = sub <2 x i32> zeroinitializer, %d
+ %f = extractelement <2 x i32> %e, i64 0
+ %g = sext i32 %f to i64
+ %h = insertelement <vscale x 2 x i64> zeroinitializer, i64 %g, i64 0
+ %i = extractelement <2 x i32> %e, i64 1
+ %j = sext i32 %i to i64
+ %k = insertelement <vscale x 2 x i64> %h, i64 %j, i64 0
+ store <vscale x 2 x i64> %k, ptr %0, align 16
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll
index d81f725eaefc..e843537c10a3 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll
index d547f99a0230..aa42d5c2a8c1 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
index e3cc74f766ee..260ad16581f1 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
index 74a4aab15597..9a07bd8bd5ac 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
index 0c490a662a79..aec434b4819d 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
index 86494c4be501..82e75d6efda3 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
index 0aefba2d4c6a..040e5861e981 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
index 25ecd7a8d7e3..45a804becbc5 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll
index a752e119b2fb..9c3b5e14289d 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
index f017eead92cf..21ce689f68e2 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE
-; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE
+; RUN: llc -mattr=+sve2 -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE2
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE2
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
index c2d6ed4e9ccf..b0a82e699939 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-compares.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-compares.ll
index 465cc179a3b9..cbd0ad66fba7 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-compares.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-compares.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll
index 9bdde14e8d83..57d072a7bcd6 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
index 244a40510173..6a2dc3c71825 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
index cbe71d715a8f..153a04f48657 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
index 94a74763aa0e..6945a6102c05 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce-fa64.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce-fa64.ll
index b56e67d95ba0..e239ff5e35fd 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce-fa64.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce-fa64.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sme-fa64 -force-streaming-compatible-sve < %s | FileCheck %s -check-prefix=FA64
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s -check-prefix=NO-FA64
+; RUN: llc -mattr=+sme-fa64 -force-streaming-compatible < %s | FileCheck %s -check-prefix=FA64
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s -check-prefix=NO-FA64
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
index df9613a30e40..78ae7bb6cf30 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
index 7ddc641f366c..412c27cb82f1 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll
index 7d36925fdc57..89697cde848b 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
index bf8a335a8503..5840ffb20994 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
index 30a4f04a3d2b..c1c7b5c05f5d 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
index 4aa965777c74..ff38db8c10c0 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
index 8baa87c6d686..ee1706bc7c35 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE
-; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE
+; RUN: llc -mattr=+sve2 -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE2
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE2
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll
index 73c1eac99dd3..c2f3bbfb51dd 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compares.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
index 5158dda37a8b..e6fd775b4cfb 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE
-; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE
+; RUN: llc -mattr=+sve2 -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE2
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE2
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
index c7a89612d278..e40668a8696e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE
-; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE
+; RUN: llc -mattr=+sve2 -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE2
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE2
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll
index f028b3eeca25..54276bb4ba01 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll
index 4d70c1dd1c91..40824ba9ae9c 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
index 50cf9b73d9a7..74ee5482a60c 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mla-neon-fa64.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mla-neon-fa64.ll
index 149ad6d1e267..3ff6983210a0 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mla-neon-fa64.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mla-neon-fa64.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sme-fa64 -force-streaming-compatible-sve < %s | FileCheck %s -check-prefix=FA64
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s -check-prefix=NO-FA64
+; RUN: llc -mattr=+sme-fa64 -force-streaming-compatible < %s | FileCheck %s -check-prefix=FA64
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s -check-prefix=NO-FA64
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
index cb7fa53eac51..8917f43002da 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE
-; RUN: llc -mattr=+sve2 -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s --check-prefixes=CHECK,SVE2
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE
+; RUN: llc -mattr=+sve2 -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE2
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s --check-prefixes=CHECK,SVE2
; This test only tests the legal types for a given vector width, as mulh nodes
; do not get generated for non-legal types.
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
index 751f43768a51..1123907f3389 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
index d373a9063f85..4ae7586fca16 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll
index 906112f7ac39..bfffe4b6315d 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
index 9ed52e321d9a..9319bd69c25f 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
index a9b52c93006d..27dbfc9a23a8 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
index 81bbaa92d4b4..3775a64a89a0 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
index 318285ded5a8..0b6152340f65 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -force-streaming-compatible < %s | FileCheck %s --check-prefix=NONEON-NOSVE
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll
index 885030861469..918f0ccc0cf6 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-limit-duplane.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
index 8ca8e6980913..8c69d5b0bb37 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll
index c4aeb4465c53..ef52eadc5d3b 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-log-reduce.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll
index ca58099244cf..4f8f8c2e4b24 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll
index f2b3f9b12ea7..bd6b96889b4c 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
index b5adea594242..aef446a90df6 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll
index 00413302798c..6d91253caae5 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-rev.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
index cb73030306b0..8808ad9a23d7 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll
index ab7c42b3e9e3..8039bd096bcb 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
index 362612518787..9741147b332e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll
index bfa931044bc5..726fd28c90ae 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll
index 9dd42e7831e0..c022bf85e67e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
index 6f82c97f3b87..38aaf860b729 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s --check-prefix=NONEON-NOSVE
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -force-streaming-compatible < %s | FileCheck %s --check-prefix=NONEON-NOSVE
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
index 323d5278592f..649b13fa8a1e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll
index 06709ca3685c..c7435bdbec94 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll
index 838db0ce8185..9e04fc236836 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
; Test we can code generater patterns of the form:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
index 7e3a175c40d2..b34fe438a063 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll
index 70219dd30f76..9e56462df388 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll
index 175731480407..304823c9e641 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-test-register-mov.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-test-register-mov.ll
index 337a2134de5b..6c9c05560566 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-test-register-mov.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-test-register-mov.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming-compatible-sve < %s | FileCheck %s
+; RUN: llc -mattr=+sve -force-streaming-compatible < %s | FileCheck %s
+; RUN: llc -mattr=+sme -force-streaming-compatible < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll b/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
index 18cd4cc2111a..c4a58ba12dc6 100644
--- a/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
@@ -571,29 +571,27 @@ define void @trunc_v8i19_to_v8i8_in_loop(ptr %A, ptr %dst) {
; CHECK-NEXT: mov x8, xzr
; CHECK-NEXT: LBB5_1: ; %loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldp x10, x9, [x0]
-; CHECK-NEXT: ldrb w13, [x0, #18]
-; CHECK-NEXT: ldrh w14, [x0, #16]
+; CHECK-NEXT: ldp x9, x10, [x0]
+; CHECK-NEXT: ldrb w14, [x0, #18]
+; CHECK-NEXT: ldrh w15, [x0, #16]
; CHECK-NEXT: add x0, x0, #32
-; CHECK-NEXT: ubfx x12, x9, #12, #20
-; CHECK-NEXT: fmov s0, w10
-; CHECK-NEXT: lsr x11, x10, #19
-; CHECK-NEXT: lsr x15, x9, #31
-; CHECK-NEXT: fmov s1, w12
-; CHECK-NEXT: lsr x12, x9, #50
-; CHECK-NEXT: mov.s v0[1], w11
-; CHECK-NEXT: orr w11, w14, w13, lsl #16
-; CHECK-NEXT: lsr x13, x10, #38
-; CHECK-NEXT: lsr x10, x10, #57
-; CHECK-NEXT: mov.s v1[1], w15
-; CHECK-NEXT: orr w12, w12, w11, lsl #14
-; CHECK-NEXT: orr w9, w10, w9, lsl #7
-; CHECK-NEXT: lsr w10, w11, #5
-; CHECK-NEXT: mov.s v0[2], w13
+; CHECK-NEXT: ubfx x12, x10, #12, #20
+; CHECK-NEXT: fmov s1, w9
+; CHECK-NEXT: lsr x11, x9, #19
+; CHECK-NEXT: lsr x13, x10, #31
+; CHECK-NEXT: fmov s0, w12
+; CHECK-NEXT: lsr x12, x9, #38
+; CHECK-NEXT: extr x9, x10, x9, #57
+; CHECK-NEXT: mov.s v1[1], w11
+; CHECK-NEXT: orr x11, x15, x14, lsl #16
+; CHECK-NEXT: mov.s v0[1], w13
+; CHECK-NEXT: extr x13, x11, x10, #50
+; CHECK-NEXT: ubfx x10, x11, #5, #27
; CHECK-NEXT: mov.s v1[2], w12
-; CHECK-NEXT: mov.s v0[3], w9
-; CHECK-NEXT: mov.s v1[3], w10
-; CHECK-NEXT: uzp1.8h v0, v0, v1
+; CHECK-NEXT: mov.s v0[2], w13
+; CHECK-NEXT: mov.s v1[3], w9
+; CHECK-NEXT: mov.s v0[3], w10
+; CHECK-NEXT: uzp1.8h v0, v1, v0
; CHECK-NEXT: xtn.8b v0, v0
; CHECK-NEXT: str d0, [x1, x8, lsl #3]
; CHECK-NEXT: add x8, x8, #1
@@ -608,35 +606,34 @@ define void @trunc_v8i19_to_v8i8_in_loop(ptr %A, ptr %dst) {
; CHECK-BE-NEXT: .LBB5_1: // %loop
; CHECK-BE-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-BE-NEXT: ldp x10, x9, [x0]
-; CHECK-BE-NEXT: ldrb w16, [x0, #18]
-; CHECK-BE-NEXT: lsr x11, x9, #40
-; CHECK-BE-NEXT: ubfx x12, x9, #33, #7
-; CHECK-BE-NEXT: lsr x15, x10, #45
-; CHECK-BE-NEXT: lsr x13, x10, #40
-; CHECK-BE-NEXT: ubfx x14, x10, #26, #14
-; CHECK-BE-NEXT: orr w11, w12, w11, lsl #7
-; CHECK-BE-NEXT: ldrh w12, [x0, #16]
-; CHECK-BE-NEXT: fmov s0, w15
-; CHECK-BE-NEXT: orr w13, w14, w13, lsl #14
-; CHECK-BE-NEXT: ubfx x14, x9, #14, #18
+; CHECK-BE-NEXT: ldrh w16, [x0, #16]
+; CHECK-BE-NEXT: ldrb w17, [x0, #18]
; CHECK-BE-NEXT: add x0, x0, #32
-; CHECK-BE-NEXT: fmov s1, w11
-; CHECK-BE-NEXT: orr w11, w16, w12, lsl #8
-; CHECK-BE-NEXT: lsl x12, x9, #24
-; CHECK-BE-NEXT: mov v0.s[1], w13
+; CHECK-BE-NEXT: lsl x11, x9, #24
+; CHECK-BE-NEXT: lsr x12, x9, #40
+; CHECK-BE-NEXT: lsr x13, x10, #45
+; CHECK-BE-NEXT: lsl x14, x10, #24
+; CHECK-BE-NEXT: lsr x15, x10, #40
+; CHECK-BE-NEXT: extr x12, x12, x11, #57
+; CHECK-BE-NEXT: fmov s0, w13
; CHECK-BE-NEXT: ubfx x13, x10, #7, #25
+; CHECK-BE-NEXT: extr x14, x15, x14, #50
+; CHECK-BE-NEXT: ubfx x15, x9, #14, #18
; CHECK-BE-NEXT: extr x9, x10, x9, #40
-; CHECK-BE-NEXT: orr w12, w11, w12
-; CHECK-BE-NEXT: mov v1.s[1], w14
-; CHECK-BE-NEXT: lsr w12, w12, #19
+; CHECK-BE-NEXT: fmov s1, w12
+; CHECK-BE-NEXT: orr w12, w17, w16, lsl #8
+; CHECK-BE-NEXT: mov v0.s[1], w14
; CHECK-BE-NEXT: ubfx x9, x9, #12, #20
+; CHECK-BE-NEXT: orr w11, w12, w11
+; CHECK-BE-NEXT: mov v1.s[1], w15
+; CHECK-BE-NEXT: lsr w11, w11, #19
; CHECK-BE-NEXT: mov v0.s[2], w13
-; CHECK-BE-NEXT: mov v1.s[2], w12
+; CHECK-BE-NEXT: mov v1.s[2], w11
; CHECK-BE-NEXT: mov v0.s[3], w9
; CHECK-BE-NEXT: add x9, x1, x8, lsl #3
; CHECK-BE-NEXT: add x8, x8, #1
; CHECK-BE-NEXT: cmp x8, #1000
-; CHECK-BE-NEXT: mov v1.s[3], w11
+; CHECK-BE-NEXT: mov v1.s[3], w12
; CHECK-BE-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-BE-NEXT: xtn v0.8b, v0.8h
; CHECK-BE-NEXT: st1 { v0.8b }, [x9]
@@ -650,35 +647,34 @@ define void @trunc_v8i19_to_v8i8_in_loop(ptr %A, ptr %dst) {
; CHECK-DISABLE-NEXT: .LBB5_1: // %loop
; CHECK-DISABLE-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-DISABLE-NEXT: ldp x10, x9, [x0]
-; CHECK-DISABLE-NEXT: ldrb w16, [x0, #18]
-; CHECK-DISABLE-NEXT: lsr x11, x9, #40
-; CHECK-DISABLE-NEXT: ubfx x12, x9, #33, #7
-; CHECK-DISABLE-NEXT: lsr x15, x10, #45
-; CHECK-DISABLE-NEXT: lsr x13, x10, #40
-; CHECK-DISABLE-NEXT: ubfx x14, x10, #26, #14
-; CHECK-DISABLE-NEXT: orr w11, w12, w11, lsl #7
-; CHECK-DISABLE-NEXT: ldrh w12, [x0, #16]
-; CHECK-DISABLE-NEXT: fmov s0, w15
-; CHECK-DISABLE-NEXT: orr w13, w14, w13, lsl #14
-; CHECK-DISABLE-NEXT: ubfx x14, x9, #14, #18
+; CHECK-DISABLE-NEXT: ldrh w16, [x0, #16]
+; CHECK-DISABLE-NEXT: ldrb w17, [x0, #18]
; CHECK-DISABLE-NEXT: add x0, x0, #32
-; CHECK-DISABLE-NEXT: fmov s1, w11
-; CHECK-DISABLE-NEXT: orr w11, w16, w12, lsl #8
-; CHECK-DISABLE-NEXT: lsl x12, x9, #24
-; CHECK-DISABLE-NEXT: mov v0.s[1], w13
+; CHECK-DISABLE-NEXT: lsl x11, x9, #24
+; CHECK-DISABLE-NEXT: lsr x12, x9, #40
+; CHECK-DISABLE-NEXT: lsr x13, x10, #45
+; CHECK-DISABLE-NEXT: lsl x14, x10, #24
+; CHECK-DISABLE-NEXT: lsr x15, x10, #40
+; CHECK-DISABLE-NEXT: extr x12, x12, x11, #57
+; CHECK-DISABLE-NEXT: fmov s0, w13
; CHECK-DISABLE-NEXT: ubfx x13, x10, #7, #25
+; CHECK-DISABLE-NEXT: extr x14, x15, x14, #50
+; CHECK-DISABLE-NEXT: ubfx x15, x9, #14, #18
; CHECK-DISABLE-NEXT: extr x9, x10, x9, #40
-; CHECK-DISABLE-NEXT: orr w12, w11, w12
-; CHECK-DISABLE-NEXT: mov v1.s[1], w14
-; CHECK-DISABLE-NEXT: lsr w12, w12, #19
+; CHECK-DISABLE-NEXT: fmov s1, w12
+; CHECK-DISABLE-NEXT: orr w12, w17, w16, lsl #8
+; CHECK-DISABLE-NEXT: mov v0.s[1], w14
; CHECK-DISABLE-NEXT: ubfx x9, x9, #12, #20
+; CHECK-DISABLE-NEXT: orr w11, w12, w11
+; CHECK-DISABLE-NEXT: mov v1.s[1], w15
+; CHECK-DISABLE-NEXT: lsr w11, w11, #19
; CHECK-DISABLE-NEXT: mov v0.s[2], w13
-; CHECK-DISABLE-NEXT: mov v1.s[2], w12
+; CHECK-DISABLE-NEXT: mov v1.s[2], w11
; CHECK-DISABLE-NEXT: mov v0.s[3], w9
; CHECK-DISABLE-NEXT: add x9, x1, x8, lsl #3
; CHECK-DISABLE-NEXT: add x8, x8, #1
; CHECK-DISABLE-NEXT: cmp x8, #1000
-; CHECK-DISABLE-NEXT: mov v1.s[3], w11
+; CHECK-DISABLE-NEXT: mov v1.s[3], w12
; CHECK-DISABLE-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-DISABLE-NEXT: xtn v0.8b, v0.8h
; CHECK-DISABLE-NEXT: st1 { v0.8b }, [x9]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
index fed277d7d10d..85cfb9b320f1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
@@ -81,14 +81,12 @@ body: |
; CHECK: liveins: $vgpr0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
- ; CHECK-NEXT: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND]](s32)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[CTLZ_ZERO_UNDEF]], [[C1]]
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
- ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
- ; CHECK-NEXT: $vgpr0 = COPY [[AND1]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[AMDGPU_FFBH_U32:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[SHL]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[AMDGPU_FFBH_U32]], [[C1]]
+ ; CHECK-NEXT: $vgpr0 = COPY [[AND]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s16) = G_TRUNC %0
%2:_(s16) = G_CTLZ_ZERO_UNDEF %1
@@ -149,18 +147,15 @@ body: |
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[BITCAST]], [[C]](s32)
+ ; CHECK-NEXT: [[AMDGPU_FFBH_U32:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[SHL]](s32)
+ ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LSHR]], [[C]](s32)
+ ; CHECK-NEXT: [[AMDGPU_FFBH_U321:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[SHL2]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[BITCAST]], [[C1]]
- ; CHECK-NEXT: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND]](s32)
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[CTLZ_ZERO_UNDEF]], [[C]]
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
- ; CHECK-NEXT: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[LSHR]](s32)
- ; CHECK-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[CTLZ_ZERO_UNDEF1]], [[C]]
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SUB1]](s32)
- ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
- ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C1]]
- ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C]](s32)
- ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND1]], [[SHL]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[AMDGPU_FFBH_U32]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AMDGPU_FFBH_U321]], [[C1]]
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; CHECK-NEXT: $vgpr0 = COPY [[BITCAST1]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
@@ -179,14 +174,12 @@ body: |
; CHECK: liveins: $vgpr0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
- ; CHECK-NEXT: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND]](s32)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 25
- ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[CTLZ_ZERO_UNDEF]], [[C1]]
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
- ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
- ; CHECK-NEXT: $vgpr0 = COPY [[AND1]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 25
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[FFBH:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[SHL]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FFBH]], [[C1]]
+ ; CHECK-NEXT: $vgpr0 = COPY [[AND]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s7) = G_TRUNC %0
%2:_(s7) = G_CTLZ_ZERO_UNDEF %1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap-gfx11.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap-gfx11.mir
index ac98dca00be3..e3d31c702482 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap-gfx11.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap-gfx11.mir
@@ -1,18 +1,28 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
-# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1100 -o - -run-pass=legalizer %s | FileCheck -check-prefix=GFX1100 %s
-# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx11-generic --amdhsa-code-object-version=6 -o - -run-pass=legalizer %s | FileCheck -check-prefix=GFX1100 %s
-# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1150 -o - -run-pass=legalizer %s | FileCheck -check-prefix=GFX1150 %s
+# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1100 -o - -run-pass=legalizer %s -verify-machineinstrs | FileCheck -check-prefix=GFX1100 %s
+# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx11-generic --amdhsa-code-object-version=6 -o - -run-pass=legalizer %s -verify-machineinstrs | FileCheck -check-prefix=GFX1100 %s
+# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1150 -o - -run-pass=legalizer %s -verify-machineinstrs | FileCheck -check-prefix=GFX1150 %s
---
name: test_trap
body: |
bb.0:
; GFX1100-LABEL: name: test_trap
- ; GFX1100: successors: %bb.2(0x80000000)
+ ; GFX1100: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; GFX1100-NEXT: {{ $}}
; GFX1100-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX1100-NEXT: [[C1:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
; GFX1100-NEXT: G_STORE [[C]](s32), [[C1]](p1) :: (store (s8), addrspace 1)
+ ; GFX1100-NEXT: S_CBRANCH_EXECNZ %bb.2, implicit $exec
+ ; GFX1100-NEXT: {{ $}}
+ ; GFX1100-NEXT: .1:
+ ; GFX1100-NEXT: successors:
+ ; GFX1100-NEXT: {{ $}}
+ ; GFX1100-NEXT: G_STORE [[C]](s32), [[C1]](p1) :: (store (s8), addrspace 1)
+ ; GFX1100-NEXT: {{ $}}
+ ; GFX1100-NEXT: .2:
+ ; GFX1100-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1100-NEXT: {{ $}}
; GFX1100-NEXT: S_TRAP 2
; GFX1100-NEXT: [[S_SENDMSG_RTN_B32_:%[0-9]+]]:sreg_32 = S_SENDMSG_RTN_B32 128
; GFX1100-NEXT: $ttmp2 = S_MOV_B32 $m0
@@ -21,18 +31,13 @@ body: |
; GFX1100-NEXT: $m0 = S_MOV_B32 [[S_OR_B32_]]
; GFX1100-NEXT: S_SENDMSG 1, implicit $exec, implicit $m0
; GFX1100-NEXT: $m0 = S_MOV_B32 $ttmp2
- ; GFX1100-NEXT: S_BRANCH %bb.2
- ; GFX1100-NEXT: {{ $}}
- ; GFX1100-NEXT: .1:
- ; GFX1100-NEXT: successors:
+ ; GFX1100-NEXT: S_BRANCH %bb.3
; GFX1100-NEXT: {{ $}}
- ; GFX1100-NEXT: G_STORE [[C]](s32), [[C1]](p1) :: (store (s8), addrspace 1)
- ; GFX1100-NEXT: {{ $}}
- ; GFX1100-NEXT: .2:
- ; GFX1100-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1100-NEXT: .3:
+ ; GFX1100-NEXT: successors: %bb.3(0x80000000)
; GFX1100-NEXT: {{ $}}
; GFX1100-NEXT: S_SETHALT 5
- ; GFX1100-NEXT: S_BRANCH %bb.2
+ ; GFX1100-NEXT: S_BRANCH %bb.3
;
; GFX1150-LABEL: name: test_trap
; GFX1150: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
@@ -45,5 +50,63 @@ body: |
G_STORE %0, %1 :: (store 1, addrspace 1)
G_TRAP
G_STORE %0, %1 :: (store 1, addrspace 1)
+...
+
+---
+name: test_fallthrough_trap
+body: |
+ ; GFX1100-LABEL: name: test_fallthrough_trap
+ ; GFX1100: bb.0:
+ ; GFX1100-NEXT: successors: %bb.1(0x80000000), %bb.2(0x00000000)
+ ; GFX1100-NEXT: {{ $}}
+ ; GFX1100-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX1100-NEXT: [[C1:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
+ ; GFX1100-NEXT: G_STORE [[C]](s32), [[C1]](p1) :: (store (s8), addrspace 1)
+ ; GFX1100-NEXT: S_CBRANCH_EXECNZ %bb.2, implicit $exec
+ ; GFX1100-NEXT: {{ $}}
+ ; GFX1100-NEXT: bb.1:
+ ; GFX1100-NEXT: successors:
+ ; GFX1100-NEXT: {{ $}}
+ ; GFX1100-NEXT: G_STORE [[C]](s32), [[C1]](p1) :: (store (s8), addrspace 1)
+ ; GFX1100-NEXT: {{ $}}
+ ; GFX1100-NEXT: bb.2:
+ ; GFX1100-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1100-NEXT: {{ $}}
+ ; GFX1100-NEXT: S_TRAP 2
+ ; GFX1100-NEXT: [[S_SENDMSG_RTN_B32_:%[0-9]+]]:sreg_32 = S_SENDMSG_RTN_B32 128
+ ; GFX1100-NEXT: $ttmp2 = S_MOV_B32 $m0
+ ; GFX1100-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_SENDMSG_RTN_B32_]], 1023, implicit-def $scc
+ ; GFX1100-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[S_AND_B32_]], 1024, implicit-def $scc
+ ; GFX1100-NEXT: $m0 = S_MOV_B32 [[S_OR_B32_]]
+ ; GFX1100-NEXT: S_SENDMSG 1, implicit $exec, implicit $m0
+ ; GFX1100-NEXT: $m0 = S_MOV_B32 $ttmp2
+ ; GFX1100-NEXT: S_BRANCH %bb.3
+ ; GFX1100-NEXT: {{ $}}
+ ; GFX1100-NEXT: bb.3:
+ ; GFX1100-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1100-NEXT: {{ $}}
+ ; GFX1100-NEXT: S_SETHALT 5
+ ; GFX1100-NEXT: S_BRANCH %bb.3
+ ;
+ ; GFX1150-LABEL: name: test_fallthrough_trap
+ ; GFX1150: bb.0:
+ ; GFX1150-NEXT: successors: %bb.1(0x80000000)
+ ; GFX1150-NEXT: {{ $}}
+ ; GFX1150-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX1150-NEXT: [[C1:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
+ ; GFX1150-NEXT: G_STORE [[C]](s32), [[C1]](p1) :: (store (s8), addrspace 1)
+ ; GFX1150-NEXT: S_TRAP 2
+ ; GFX1150-NEXT: {{ $}}
+ ; GFX1150-NEXT: bb.1:
+ ; GFX1150-NEXT: G_STORE [[C]](s32), [[C1]](p1) :: (store (s8), addrspace 1)
+ bb.0:
+ successors: %bb.1
+
+ %0:_(s8) = G_CONSTANT i8 0
+ %1:_(p1) = G_CONSTANT i64 0
+ G_STORE %0, %1 :: (store 1, addrspace 1)
+ G_TRAP
+ bb.1:
+ G_STORE %0, %1 :: (store 1, addrspace 1)
...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll
new file mode 100644
index 000000000000..8eb05bb9565f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll
@@ -0,0 +1,1515 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -mcpu=tahiti -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX67,GFX6
+; RUN: llc -global-isel -mcpu=hawaii -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX67,GFX7
+; RUN: llc -global-isel -mcpu=fiji -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX8
+; RUN: llc -global-isel -mcpu=gfx900 -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX910
+; RUN: llc -global-isel -mcpu=gfx1010 -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX910
+; RUN: llc -global-isel -mcpu=gfx1100 -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX11
+; RUN: llc -global-isel -mcpu=gfx1200 -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX12
+
+define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[BUFFER_LOAD_UBYTE_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_OFFSET]].sub0
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_OFFSET]].sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_BYTE_ADDR64 [[COPY8]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY9]], [[REG_SEQUENCE2]], [[REG_SEQUENCE6]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[BUFFER_LOAD_UBYTE_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_OFFSET]].sub0
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_OFFSET]].sub1
+ ; GFX8-NEXT: FLAT_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[BUFFER_LOAD_UBYTE_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_OFFSET]].sub0
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_OFFSET]].sub1
+ ; GFX910-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[BUFFER_LOAD_UBYTE_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_OFFSET]].sub0
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_OFFSET]].sub1
+ ; GFX11-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { i8, i32 } @llvm.amdgcn.raw.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { i8, i32 } %res, 0
+ store i8 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i8, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[BUFFER_LOAD_USHORT_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub0
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_SHORT_ADDR64 [[COPY8]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY9]], [[REG_SEQUENCE2]], [[REG_SEQUENCE6]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[BUFFER_LOAD_USHORT_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub0
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub1
+ ; GFX8-NEXT: FLAT_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[BUFFER_LOAD_USHORT_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub0
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub1
+ ; GFX910-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[BUFFER_LOAD_USHORT_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub0
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub1
+ ; GFX11-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { i16, i32 } @llvm.amdgcn.raw.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { i16, i32 } %res, 0
+ store i16 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i16, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[BUFFER_LOAD_USHORT_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub0
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_SHORT_ADDR64 [[COPY8]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY9]], [[REG_SEQUENCE2]], [[REG_SEQUENCE6]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[BUFFER_LOAD_USHORT_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub0
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub1
+ ; GFX8-NEXT: FLAT_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[BUFFER_LOAD_USHORT_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub0
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub1
+ ; GFX910-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[BUFFER_LOAD_USHORT_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub0
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_OFFSET]].sub1
+ ; GFX11-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { half, i32 } @llvm.amdgcn.raw.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { half, i32 } %res, 0
+ store half %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { half, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[BUFFER_LOAD_DWORD_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_OFFSET]].sub0
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_OFFSET]].sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY8]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY9]], [[REG_SEQUENCE2]], [[REG_SEQUENCE6]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORD_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_OFFSET]].sub0
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_OFFSET]].sub1
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORD_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_OFFSET]].sub0
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_OFFSET]].sub1
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORD_TFE_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_OFFSET]].sub0
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_OFFSET]].sub1
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { i32, i32 } @llvm.amdgcn.raw.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { i32, i32 } %res, 0
+ store i32 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i32, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub0
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub1
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub2
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORDX2_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY10]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub0
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub1
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub2
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX8-NEXT: FLAT_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub0
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub1
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub2
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub0
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub1
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub2
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <2 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <2 x i32>, i32 } %res, 0
+ store <2 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <2 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub0
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub1
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub2
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORDX2_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY10]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub0
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub1
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub2
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX8-NEXT: FLAT_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub0
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub1
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub2
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub0
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub1
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_OFFSET]].sub2
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <2 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <2 x float>, i32 } %res, 0
+ store <2 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <2 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX6-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX6: bb.1 (%ir-block.0):
+ ; GFX6-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX6-NEXT: {{ $}}
+ ; GFX6-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX6-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX6-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX6-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX6-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX6-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX6-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX6-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX6-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX6-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX6-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0
+ ; GFX6-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub1
+ ; GFX6-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2
+ ; GFX6-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub3
+ ; GFX6-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0_sub1
+ ; GFX6-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2_sub3
+ ; GFX6-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORDX2_ADDR64 [[COPY12]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX6-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY10]], [[REG_SEQUENCE1]], [[REG_SEQUENCE6]], 0, 8, 0, 0, implicit $exec :: (store (s32) into %ir.data_addr + 8, align 8, basealign 16, addrspace 1)
+ ; GFX6-NEXT: [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_5]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_2:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE8:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_2]], %subreg.sub0_sub1, [[REG_SEQUENCE7]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY11]], [[REG_SEQUENCE2]], [[REG_SEQUENCE8]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX6-NEXT: S_ENDPGM 0
+ ;
+ ; GFX7-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX7: bb.1 (%ir-block.0):
+ ; GFX7-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX7-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX7-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX7-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX7-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX7-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX7-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX7-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX7-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX7-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX7-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX7-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0
+ ; GFX7-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub1
+ ; GFX7-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2
+ ; GFX7-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub3
+ ; GFX7-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX7-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX7-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX7-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX7-NEXT: BUFFER_STORE_DWORDX3_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX7-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX7-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX7-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX7-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY11]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX7-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub1
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2
+ ; GFX8-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub3
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX8-NEXT: FLAT_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub1
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2
+ ; GFX910-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub3
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub1
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2
+ ; GFX11-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub3
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <3 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <3 x i32>, i32 } %res, 0
+ store <3 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <3 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX6-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX6: bb.1 (%ir-block.0):
+ ; GFX6-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX6-NEXT: {{ $}}
+ ; GFX6-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX6-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX6-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX6-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX6-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX6-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX6-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX6-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX6-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX6-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX6-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0
+ ; GFX6-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub1
+ ; GFX6-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2
+ ; GFX6-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub3
+ ; GFX6-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0_sub1
+ ; GFX6-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2_sub3
+ ; GFX6-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORDX2_ADDR64 [[COPY12]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX6-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY10]], [[REG_SEQUENCE1]], [[REG_SEQUENCE6]], 0, 8, 0, 0, implicit $exec :: (store (s32) into %ir.data_addr + 8, align 8, basealign 16, addrspace 1)
+ ; GFX6-NEXT: [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_5]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_2:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE8:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_2]], %subreg.sub0_sub1, [[REG_SEQUENCE7]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY11]], [[REG_SEQUENCE2]], [[REG_SEQUENCE8]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX6-NEXT: S_ENDPGM 0
+ ;
+ ; GFX7-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX7: bb.1 (%ir-block.0):
+ ; GFX7-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX7-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX7-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX7-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX7-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX7-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX7-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX7-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX7-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX7-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX7-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX7-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0
+ ; GFX7-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub1
+ ; GFX7-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2
+ ; GFX7-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub3
+ ; GFX7-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX7-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX7-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX7-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX7-NEXT: BUFFER_STORE_DWORDX3_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX7-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX7-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX7-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX7-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY11]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX7-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub1
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2
+ ; GFX8-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub3
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX8-NEXT: FLAT_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub1
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2
+ ; GFX910-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub3
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub0
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub1
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub2
+ ; GFX11-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_OFFSET]].sub3
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <3 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <3 x float>, i32 } %res, 0
+ store <3 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <3 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub0
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub1
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub2
+ ; GFX67-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub3
+ ; GFX67-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub4
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORDX4_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY12]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub0
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub1
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub2
+ ; GFX8-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub3
+ ; GFX8-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub4
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX8-NEXT: FLAT_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub0
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub1
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub2
+ ; GFX910-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub3
+ ; GFX910-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub4
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub0
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub1
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub2
+ ; GFX11-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub3
+ ; GFX11-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub4
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <4 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <4 x i32>, i32 } %res, 0
+ store <4 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <4 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub0
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub1
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub2
+ ; GFX67-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub3
+ ; GFX67-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub4
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORDX4_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY12]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub0
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub1
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub2
+ ; GFX8-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub3
+ ; GFX8-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub4
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX8-NEXT: FLAT_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub0
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub1
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub2
+ ; GFX910-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub3
+ ; GFX910-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub4
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub0
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub1
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub2
+ ; GFX11-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub3
+ ; GFX11-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_OFFSET]].sub4
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <4 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <4 x float>, i32 } %res, 0
+ store <4 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <4 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+declare { i8, i32 } @llvm.amdgcn.raw.buffer.load.sl_i8i32s(<4 x i32>, i32, i32, i32)
+declare { i16, i32 } @llvm.amdgcn.raw.buffer.load.sl_i16i32s(<4 x i32>, i32, i32, i32)
+declare { half, i32 } @llvm.amdgcn.raw.buffer.load.sl_f16i32s(<4 x i32>, i32, i32, i32)
+declare { i32, i32 } @llvm.amdgcn.raw.buffer.load.sl_i32i32s(<4 x i32>, i32, i32, i32)
+declare { <2 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2i32i32s(<4 x i32>, i32, i32, i32)
+declare { <2 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2f32i32s(<4 x i32>, i32, i32, i32)
+declare { <3 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3i32i32s(<4 x i32>, i32, i32, i32)
+declare { <3 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3f32i32s(<4 x i32>, i32, i32, i32)
+declare { <4 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4i32i32s(<4 x i32>, i32, i32, i32)
+declare { <4 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4f32i32s(<4 x i32>, i32, i32, i32)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll
new file mode 100644
index 000000000000..62254af0a593
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll
@@ -0,0 +1,1577 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -mcpu=tahiti -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX67,GFX6
+; RUN: llc -global-isel -mcpu=hawaii -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX67,GFX7
+; RUN: llc -global-isel -mcpu=fiji -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX8
+; RUN: llc -global-isel -mcpu=gfx900 -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX910
+; RUN: llc -global-isel -mcpu=gfx1010 -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX910
+; RUN: llc -global-isel -mcpu=gfx1100 -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX11
+; RUN: llc -global-isel -mcpu=gfx1200 -mtriple=amdgcn-- -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX12
+
+define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX67-NEXT: [[BUFFER_LOAD_UBYTE_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_IDXEN]].sub0
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_IDXEN]].sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_BYTE_ADDR64 [[COPY9]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY10]], [[REG_SEQUENCE2]], [[REG_SEQUENCE6]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX8-NEXT: [[BUFFER_LOAD_UBYTE_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_IDXEN]].sub0
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_IDXEN]].sub1
+ ; GFX8-NEXT: FLAT_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX910-NEXT: [[BUFFER_LOAD_UBYTE_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_IDXEN]].sub0
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_IDXEN]].sub1
+ ; GFX910-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX11-NEXT: [[BUFFER_LOAD_UBYTE_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_IDXEN]].sub0
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_IDXEN]].sub1
+ ; GFX11-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX12-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { i8, i32 } @llvm.amdgcn.struct.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { i8, i32 } %res, 0
+ store i8 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i8, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX67-NEXT: [[BUFFER_LOAD_USHORT_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub0
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_SHORT_ADDR64 [[COPY9]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY10]], [[REG_SEQUENCE2]], [[REG_SEQUENCE6]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX8-NEXT: [[BUFFER_LOAD_USHORT_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub0
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub1
+ ; GFX8-NEXT: FLAT_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX910-NEXT: [[BUFFER_LOAD_USHORT_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub0
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub1
+ ; GFX910-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX11-NEXT: [[BUFFER_LOAD_USHORT_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub0
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub1
+ ; GFX11-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { i16, i32 } @llvm.amdgcn.struct.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { i16, i32 } %res, 0
+ store i16 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i16, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX67-NEXT: [[BUFFER_LOAD_USHORT_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub0
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_SHORT_ADDR64 [[COPY9]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY10]], [[REG_SEQUENCE2]], [[REG_SEQUENCE6]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX8-NEXT: [[BUFFER_LOAD_USHORT_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub0
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub1
+ ; GFX8-NEXT: FLAT_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX910-NEXT: [[BUFFER_LOAD_USHORT_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub0
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub1
+ ; GFX910-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX11-NEXT: [[BUFFER_LOAD_USHORT_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub0
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_IDXEN]].sub1
+ ; GFX11-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { half, i32 } @llvm.amdgcn.struct.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { half, i32 } %res, 0
+ store half %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { half, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX67-NEXT: [[BUFFER_LOAD_DWORD_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_IDXEN]].sub0
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_IDXEN]].sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY9]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY10]], [[REG_SEQUENCE2]], [[REG_SEQUENCE6]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORD_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_IDXEN]].sub0
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_IDXEN]].sub1
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORD_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_IDXEN]].sub0
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_IDXEN]].sub1
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORD_TFE_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_IDXEN]].sub0
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_IDXEN]].sub1
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { i32, i32 } @llvm.amdgcn.struct.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { i32, i32 } %res, 0
+ store i32 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i32, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX67-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub0
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub1
+ ; GFX67-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub2
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORDX2_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY11]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub0
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub1
+ ; GFX8-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub2
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX8-NEXT: FLAT_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub0
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub1
+ ; GFX910-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub2
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub0
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub1
+ ; GFX11-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub2
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <2 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <2 x i32>, i32 } %res, 0
+ store <2 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <2 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX67-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub0
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub1
+ ; GFX67-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub2
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORDX2_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY11]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub0
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub1
+ ; GFX8-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub2
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX8-NEXT: FLAT_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub0
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub1
+ ; GFX910-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub2
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub0
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub1
+ ; GFX11-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_IDXEN]].sub2
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <2 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <2 x float>, i32 } %res, 0
+ store <2 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <2 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX6-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX6: bb.1 (%ir-block.0):
+ ; GFX6-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX6-NEXT: {{ $}}
+ ; GFX6-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX6-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX6-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX6-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX6-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX6-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX6-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX6-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX6-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX6-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX6-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX6-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0
+ ; GFX6-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub1
+ ; GFX6-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2
+ ; GFX6-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub3
+ ; GFX6-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0_sub1
+ ; GFX6-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2_sub3
+ ; GFX6-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORDX2_ADDR64 [[COPY13]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX6-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY11]], [[REG_SEQUENCE1]], [[REG_SEQUENCE6]], 0, 8, 0, 0, implicit $exec :: (store (s32) into %ir.data_addr + 8, align 8, basealign 16, addrspace 1)
+ ; GFX6-NEXT: [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_5]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_2:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE8:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_2]], %subreg.sub0_sub1, [[REG_SEQUENCE7]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY12]], [[REG_SEQUENCE2]], [[REG_SEQUENCE8]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX6-NEXT: S_ENDPGM 0
+ ;
+ ; GFX7-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX7: bb.1 (%ir-block.0):
+ ; GFX7-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX7-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX7-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX7-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX7-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX7-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX7-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX7-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX7-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX7-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX7-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX7-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX7-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0
+ ; GFX7-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub1
+ ; GFX7-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2
+ ; GFX7-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub3
+ ; GFX7-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX7-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX7-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX7-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX7-NEXT: BUFFER_STORE_DWORDX3_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX7-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX7-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX7-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX7-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY12]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX7-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub1
+ ; GFX8-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2
+ ; GFX8-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub3
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX8-NEXT: FLAT_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub1
+ ; GFX910-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2
+ ; GFX910-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub3
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub1
+ ; GFX11-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2
+ ; GFX11-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub3
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <3 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <3 x i32>, i32 } %res, 0
+ store <3 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <3 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX6-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX6: bb.1 (%ir-block.0):
+ ; GFX6-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX6-NEXT: {{ $}}
+ ; GFX6-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX6-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX6-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX6-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX6-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX6-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX6-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX6-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX6-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX6-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX6-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX6-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0
+ ; GFX6-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub1
+ ; GFX6-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2
+ ; GFX6-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub3
+ ; GFX6-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0_sub1
+ ; GFX6-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2_sub3
+ ; GFX6-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE3]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORDX2_ADDR64 [[COPY13]], [[REG_SEQUENCE1]], [[REG_SEQUENCE4]], 0, 0, 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX6-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE5]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY11]], [[REG_SEQUENCE1]], [[REG_SEQUENCE6]], 0, 8, 0, 0, implicit $exec :: (store (s32) into %ir.data_addr + 8, align 8, basealign 16, addrspace 1)
+ ; GFX6-NEXT: [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX6-NEXT: [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX6-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_5]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+ ; GFX6-NEXT: [[S_MOV_B64_2:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX6-NEXT: [[REG_SEQUENCE8:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_2]], %subreg.sub0_sub1, [[REG_SEQUENCE7]], %subreg.sub2_sub3
+ ; GFX6-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY12]], [[REG_SEQUENCE2]], [[REG_SEQUENCE8]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX6-NEXT: S_ENDPGM 0
+ ;
+ ; GFX7-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX7: bb.1 (%ir-block.0):
+ ; GFX7-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX7-NEXT: {{ $}}
+ ; GFX7-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX7-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX7-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX7-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX7-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX7-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX7-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX7-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX7-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX7-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX7-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX7-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX7-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0
+ ; GFX7-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub1
+ ; GFX7-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2
+ ; GFX7-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub3
+ ; GFX7-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX7-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX7-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX7-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX7-NEXT: BUFFER_STORE_DWORDX3_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX7-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX7-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX7-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX7-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX7-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX7-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY12]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX7-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub1
+ ; GFX8-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2
+ ; GFX8-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub3
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX8-NEXT: FLAT_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub1
+ ; GFX910-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2
+ ; GFX910-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub3
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub0
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub1
+ ; GFX11-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub2
+ ; GFX11-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_IDXEN]].sub3
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <3 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <3 x float>, i32 } %res, 0
+ store <3 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <3 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX67-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub0
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub1
+ ; GFX67-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub2
+ ; GFX67-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub3
+ ; GFX67-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub4
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORDX4_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY13]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub0
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub1
+ ; GFX8-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub2
+ ; GFX8-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub3
+ ; GFX8-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub4
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX8-NEXT: FLAT_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub0
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub1
+ ; GFX910-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub2
+ ; GFX910-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub3
+ ; GFX910-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub4
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub0
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub1
+ ; GFX11-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub2
+ ; GFX11-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub3
+ ; GFX11-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub4
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <4 x i32>, i32 } %res, 0
+ store <4 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <4 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+ ; GFX67-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX67: bb.1 (%ir-block.0):
+ ; GFX67-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX67-NEXT: {{ $}}
+ ; GFX67-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX67-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX67-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX67-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX67-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX67-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX67-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX67-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX67-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX67-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX67-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX67-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub0
+ ; GFX67-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub1
+ ; GFX67-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub2
+ ; GFX67-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub3
+ ; GFX67-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub4
+ ; GFX67-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX67-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_]], %subreg.sub0_sub1, [[REG_SEQUENCE4]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORDX4_ADDR64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], [[REG_SEQUENCE5]], 0, 0, 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX67-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX67-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
+ ; GFX67-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_3]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1
+ ; GFX67-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; GFX67-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B64_1]], %subreg.sub0_sub1, [[REG_SEQUENCE6]], %subreg.sub2_sub3
+ ; GFX67-NEXT: BUFFER_STORE_DWORD_ADDR64 [[COPY13]], [[REG_SEQUENCE2]], [[REG_SEQUENCE7]], 0, 0, 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX67-NEXT: S_ENDPGM 0
+ ;
+ ; GFX8-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX8: bb.1 (%ir-block.0):
+ ; GFX8-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX8-NEXT: {{ $}}
+ ; GFX8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX8-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX8-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX8-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX8-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX8-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX8-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX8-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX8-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX8-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX8-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX8-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub0
+ ; GFX8-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub1
+ ; GFX8-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub2
+ ; GFX8-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub3
+ ; GFX8-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub4
+ ; GFX8-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX8-NEXT: FLAT_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX8-NEXT: FLAT_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX8-NEXT: S_ENDPGM 0
+ ;
+ ; GFX910-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX910: bb.1 (%ir-block.0):
+ ; GFX910-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX910-NEXT: {{ $}}
+ ; GFX910-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX910-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX910-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX910-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX910-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX910-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX910-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX910-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX910-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX910-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX910-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX910-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX910-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX910-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub0
+ ; GFX910-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub1
+ ; GFX910-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub2
+ ; GFX910-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub3
+ ; GFX910-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub4
+ ; GFX910-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX910-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX910-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX910-NEXT: S_ENDPGM 0
+ ;
+ ; GFX11-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX11: bb.1 (%ir-block.0):
+ ; GFX11-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX11-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX11-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX11-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_IDXEN [[COPY8]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub0
+ ; GFX11-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub1
+ ; GFX11-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub2
+ ; GFX11-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub3
+ ; GFX11-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_IDXEN]].sub4
+ ; GFX11-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX11-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX11-NEXT: S_ENDPGM 0
+ ;
+ ; GFX12-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX12: bb.1 (%ir-block.0):
+ ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX12-NEXT: S_ENDPGM 0
+ %res = call { <4 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <4 x float>, i32 } %res, 0
+ store <4 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <4 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+declare { i8, i32 } @llvm.amdgcn.struct.buffer.load.sl_i8i32s(<4 x i32>, i32, i32, i32, i32)
+declare { i16, i32 } @llvm.amdgcn.struct.buffer.load.sl_i16i32s(<4 x i32>, i32, i32, i32, i32)
+declare { half, i32 } @llvm.amdgcn.struct.buffer.load.sl_f16i32s(<4 x i32>, i32, i32, i32, i32)
+declare { i32, i32 } @llvm.amdgcn.struct.buffer.load.sl_i32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <2 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2i32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <2 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2f32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <3 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3i32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <3 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3f32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4i32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <4 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4f32i32s(<4 x i32>, i32, i32, i32, i32)
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll
index 6bda962d1b9c..b69afa3ab1f3 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fdiv.ll
@@ -2151,7 +2151,7 @@ define amdgpu_kernel void @rsq_f32_vector_fpmath(ptr addrspace(1) %out, <2 x flo
; IEEE-GOODFREXP-NEXT: [[TMP29:%.*]] = extractvalue { float, i32 } [[TMP28]], 0
; IEEE-GOODFREXP-NEXT: [[TMP30:%.*]] = extractvalue { float, i32 } [[TMP28]], 1
; IEEE-GOODFREXP-NEXT: [[TMP31:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP29]])
-; IEEE-GOODFREXP-NEXT: [[TMP32:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; IEEE-GOODFREXP-NEXT: [[TMP32:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; IEEE-GOODFREXP-NEXT: [[TMP33:%.*]] = extractvalue { float, i32 } [[TMP32]], 0
; IEEE-GOODFREXP-NEXT: [[TMP34:%.*]] = extractvalue { float, i32 } [[TMP32]], 1
; IEEE-GOODFREXP-NEXT: [[TMP35:%.*]] = fmul contract float [[TMP33]], [[TMP31]]
@@ -2222,9 +2222,9 @@ define amdgpu_kernel void @rsq_f32_vector_fpmath(ptr addrspace(1) %out, <2 x flo
; IEEE-BADFREXP-NEXT: [[TMP29:%.*]] = extractvalue { float, i32 } [[TMP28]], 0
; IEEE-BADFREXP-NEXT: [[TMP30:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float [[TMP19]])
; IEEE-BADFREXP-NEXT: [[TMP31:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP29]])
-; IEEE-BADFREXP-NEXT: [[TMP32:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; IEEE-BADFREXP-NEXT: [[TMP32:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; IEEE-BADFREXP-NEXT: [[TMP33:%.*]] = extractvalue { float, i32 } [[TMP32]], 0
-; IEEE-BADFREXP-NEXT: [[TMP34:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float undef)
+; IEEE-BADFREXP-NEXT: [[TMP34:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float poison)
; IEEE-BADFREXP-NEXT: [[TMP35:%.*]] = fmul contract float [[TMP33]], [[TMP31]]
; IEEE-BADFREXP-NEXT: [[TMP36:%.*]] = sub i32 [[TMP34]], [[TMP30]]
; IEEE-BADFREXP-NEXT: [[TMP37:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP35]], i32 [[TMP36]])
@@ -2281,7 +2281,7 @@ define amdgpu_kernel void @rsq_f32_vector_fpmath(ptr addrspace(1) %out, <2 x flo
; DAZ-NEXT: [[TMP17:%.*]] = extractvalue { float, i32 } [[TMP16]], 0
; DAZ-NEXT: [[TMP18:%.*]] = extractvalue { float, i32 } [[TMP16]], 1
; DAZ-NEXT: [[TMP19:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP17]])
-; DAZ-NEXT: [[TMP20:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; DAZ-NEXT: [[TMP20:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; DAZ-NEXT: [[TMP21:%.*]] = extractvalue { float, i32 } [[TMP20]], 0
; DAZ-NEXT: [[TMP22:%.*]] = extractvalue { float, i32 } [[TMP20]], 1
; DAZ-NEXT: [[TMP23:%.*]] = fmul contract float [[TMP21]], [[TMP19]]
@@ -2313,7 +2313,7 @@ define amdgpu_kernel void @rsq_f32_vector_fpmath(ptr addrspace(1) %out, <2 x flo
; Matches the rsq instruction accuracy
%sqrt.md.1ulp.undef = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %x), !fpmath !2
- %md.1ulp.undef = fdiv contract <2 x float> <float 1.0, float undef>, %sqrt.md.1ulp.undef, !fpmath !2
+ %md.1ulp.undef = fdiv contract <2 x float> <float 1.0, float poison>, %sqrt.md.1ulp.undef, !fpmath !2
store volatile <2 x float> %md.1ulp.undef, ptr addrspace(1) %out, align 4
; Test mismatched metadata/flags between the sqrt and fdiv
@@ -3121,7 +3121,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator(<4 x float> %arg) {
; IEEE-GOODFREXP-NEXT: [[TMP32:%.*]] = extractvalue { float, i32 } [[TMP31]], 0
; IEEE-GOODFREXP-NEXT: [[TMP33:%.*]] = extractvalue { float, i32 } [[TMP31]], 1
; IEEE-GOODFREXP-NEXT: [[TMP34:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP32]])
-; IEEE-GOODFREXP-NEXT: [[TMP35:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; IEEE-GOODFREXP-NEXT: [[TMP35:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; IEEE-GOODFREXP-NEXT: [[TMP36:%.*]] = extractvalue { float, i32 } [[TMP35]], 0
; IEEE-GOODFREXP-NEXT: [[TMP37:%.*]] = extractvalue { float, i32 } [[TMP35]], 1
; IEEE-GOODFREXP-NEXT: [[TMP38:%.*]] = fmul contract float [[TMP36]], [[TMP34]]
@@ -3170,9 +3170,9 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator(<4 x float> %arg) {
; IEEE-BADFREXP-NEXT: [[TMP32:%.*]] = extractvalue { float, i32 } [[TMP31]], 0
; IEEE-BADFREXP-NEXT: [[TMP33:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float [[TMP4]])
; IEEE-BADFREXP-NEXT: [[TMP34:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP32]])
-; IEEE-BADFREXP-NEXT: [[TMP35:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; IEEE-BADFREXP-NEXT: [[TMP35:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; IEEE-BADFREXP-NEXT: [[TMP36:%.*]] = extractvalue { float, i32 } [[TMP35]], 0
-; IEEE-BADFREXP-NEXT: [[TMP37:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float undef)
+; IEEE-BADFREXP-NEXT: [[TMP37:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float poison)
; IEEE-BADFREXP-NEXT: [[TMP38:%.*]] = fmul contract float [[TMP36]], [[TMP34]]
; IEEE-BADFREXP-NEXT: [[TMP39:%.*]] = sub i32 [[TMP37]], [[TMP33]]
; IEEE-BADFREXP-NEXT: [[TMP40:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP38]], i32 [[TMP39]])
@@ -3217,7 +3217,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator(<4 x float> %arg) {
; DAZ-NEXT: [[TMP30:%.*]] = extractvalue { float, i32 } [[TMP29]], 0
; DAZ-NEXT: [[TMP31:%.*]] = extractvalue { float, i32 } [[TMP29]], 1
; DAZ-NEXT: [[TMP32:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP30]])
-; DAZ-NEXT: [[TMP33:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; DAZ-NEXT: [[TMP33:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; DAZ-NEXT: [[TMP34:%.*]] = extractvalue { float, i32 } [[TMP33]], 0
; DAZ-NEXT: [[TMP35:%.*]] = extractvalue { float, i32 } [[TMP33]], 1
; DAZ-NEXT: [[TMP36:%.*]] = fmul contract float [[TMP34]], [[TMP32]]
@@ -3230,7 +3230,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator(<4 x float> %arg) {
; DAZ-NEXT: ret <4 x float> [[PARTIAL_RSQ]]
;
%denom = call contract <4 x float> @llvm.sqrt.v4f32(<4 x float> %arg), !fpmath !2
- %partial.rsq = fdiv contract <4 x float> <float 1.0, float -1.0, float 4.0, float undef>, %denom, !fpmath !2
+ %partial.rsq = fdiv contract <4 x float> <float 1.0, float -1.0, float 4.0, float poison>, %denom, !fpmath !2
ret <4 x float> %partial.rsq
}
@@ -3272,7 +3272,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_afn_sqrt(<4 x float>
; IEEE-GOODFREXP-NEXT: [[TMP32:%.*]] = extractvalue { float, i32 } [[TMP31]], 0
; IEEE-GOODFREXP-NEXT: [[TMP33:%.*]] = extractvalue { float, i32 } [[TMP31]], 1
; IEEE-GOODFREXP-NEXT: [[TMP34:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP32]])
-; IEEE-GOODFREXP-NEXT: [[TMP35:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; IEEE-GOODFREXP-NEXT: [[TMP35:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; IEEE-GOODFREXP-NEXT: [[TMP36:%.*]] = extractvalue { float, i32 } [[TMP35]], 0
; IEEE-GOODFREXP-NEXT: [[TMP37:%.*]] = extractvalue { float, i32 } [[TMP35]], 1
; IEEE-GOODFREXP-NEXT: [[TMP38:%.*]] = fmul contract float [[TMP36]], [[TMP34]]
@@ -3321,9 +3321,9 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_afn_sqrt(<4 x float>
; IEEE-BADFREXP-NEXT: [[TMP32:%.*]] = extractvalue { float, i32 } [[TMP31]], 0
; IEEE-BADFREXP-NEXT: [[TMP33:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float [[TMP4]])
; IEEE-BADFREXP-NEXT: [[TMP34:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP32]])
-; IEEE-BADFREXP-NEXT: [[TMP35:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; IEEE-BADFREXP-NEXT: [[TMP35:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; IEEE-BADFREXP-NEXT: [[TMP36:%.*]] = extractvalue { float, i32 } [[TMP35]], 0
-; IEEE-BADFREXP-NEXT: [[TMP37:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float undef)
+; IEEE-BADFREXP-NEXT: [[TMP37:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float poison)
; IEEE-BADFREXP-NEXT: [[TMP38:%.*]] = fmul contract float [[TMP36]], [[TMP34]]
; IEEE-BADFREXP-NEXT: [[TMP39:%.*]] = sub i32 [[TMP37]], [[TMP33]]
; IEEE-BADFREXP-NEXT: [[TMP40:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP38]], i32 [[TMP39]])
@@ -3361,7 +3361,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_afn_sqrt(<4 x float>
; DAZ-NEXT: [[TMP23:%.*]] = extractvalue { float, i32 } [[TMP22]], 0
; DAZ-NEXT: [[TMP24:%.*]] = extractvalue { float, i32 } [[TMP22]], 1
; DAZ-NEXT: [[TMP25:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP23]])
-; DAZ-NEXT: [[TMP26:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; DAZ-NEXT: [[TMP26:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; DAZ-NEXT: [[TMP27:%.*]] = extractvalue { float, i32 } [[TMP26]], 0
; DAZ-NEXT: [[TMP28:%.*]] = extractvalue { float, i32 } [[TMP26]], 1
; DAZ-NEXT: [[TMP29:%.*]] = fmul contract float [[TMP27]], [[TMP25]]
@@ -3374,7 +3374,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_afn_sqrt(<4 x float>
; DAZ-NEXT: ret <4 x float> [[PARTIAL_RSQ]]
;
%denom = call contract afn <4 x float> @llvm.sqrt.v4f32(<4 x float> %arg)
- %partial.rsq = fdiv contract <4 x float> <float 1.0, float -1.0, float 4.0, float undef>, %denom, !fpmath !2
+ %partial.rsq = fdiv contract <4 x float> <float 1.0, float -1.0, float 4.0, float poison>, %denom, !fpmath !2
ret <4 x float> %partial.rsq
}
@@ -3382,7 +3382,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_afn_div(<4 x float>
; IEEE-LABEL: define <4 x float> @rsq_f32_vector_mixed_constant_numerator_afn_div(
; IEEE-SAME: <4 x float> [[ARG:%.*]]) #[[ATTR1]] {
; IEEE-NEXT: [[DENOM:%.*]] = call contract <4 x float> @llvm.sqrt.v4f32(<4 x float> [[ARG]]), !fpmath [[META2:![0-9]+]]
-; IEEE-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv contract afn <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float undef>, [[DENOM]]
+; IEEE-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv contract afn <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float poison>, [[DENOM]]
; IEEE-NEXT: ret <4 x float> [[PARTIAL_RSQ]]
;
; DAZ-LABEL: define <4 x float> @rsq_f32_vector_mixed_constant_numerator_afn_div(
@@ -3399,11 +3399,11 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_afn_div(<4 x float>
; DAZ-NEXT: [[TMP10:%.*]] = insertelement <4 x float> [[TMP9]], float [[TMP6]], i64 1
; DAZ-NEXT: [[TMP11:%.*]] = insertelement <4 x float> [[TMP10]], float [[TMP7]], i64 2
; DAZ-NEXT: [[DENOM:%.*]] = insertelement <4 x float> [[TMP11]], float [[TMP8]], i64 3
-; DAZ-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv contract afn <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float undef>, [[DENOM]]
+; DAZ-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv contract afn <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float poison>, [[DENOM]]
; DAZ-NEXT: ret <4 x float> [[PARTIAL_RSQ]]
;
%denom = call contract <4 x float> @llvm.sqrt.v4f32(<4 x float> %arg), !fpmath !2
- %partial.rsq = fdiv contract afn <4 x float> <float 1.0, float -1.0, float 4.0, float undef>, %denom
+ %partial.rsq = fdiv contract afn <4 x float> <float 1.0, float -1.0, float 4.0, float poison>, %denom
ret <4 x float> %partial.rsq
}
@@ -3411,7 +3411,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_correct_fdiv(<4 x fl
; IEEE-LABEL: define <4 x float> @rsq_f32_vector_mixed_constant_numerator_correct_fdiv(
; IEEE-SAME: <4 x float> [[ARG:%.*]]) #[[ATTR1]] {
; IEEE-NEXT: [[DENOM:%.*]] = call contract <4 x float> @llvm.sqrt.v4f32(<4 x float> [[ARG]]), !fpmath [[META2]]
-; IEEE-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv contract <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float undef>, [[DENOM]]
+; IEEE-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv contract <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float poison>, [[DENOM]]
; IEEE-NEXT: ret <4 x float> [[PARTIAL_RSQ]]
;
; DAZ-LABEL: define <4 x float> @rsq_f32_vector_mixed_constant_numerator_correct_fdiv(
@@ -3428,11 +3428,11 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_correct_fdiv(<4 x fl
; DAZ-NEXT: [[TMP10:%.*]] = insertelement <4 x float> [[TMP9]], float [[TMP6]], i64 1
; DAZ-NEXT: [[TMP11:%.*]] = insertelement <4 x float> [[TMP10]], float [[TMP7]], i64 2
; DAZ-NEXT: [[DENOM:%.*]] = insertelement <4 x float> [[TMP11]], float [[TMP8]], i64 3
-; DAZ-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv contract <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float undef>, [[DENOM]]
+; DAZ-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv contract <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float poison>, [[DENOM]]
; DAZ-NEXT: ret <4 x float> [[PARTIAL_RSQ]]
;
%denom = call contract <4 x float> @llvm.sqrt.v4f32(<4 x float> %arg), !fpmath !2
- %partial.rsq = fdiv contract <4 x float> <float 1.0, float -1.0, float 4.0, float undef>, %denom
+ %partial.rsq = fdiv contract <4 x float> <float 1.0, float -1.0, float 4.0, float poison>, %denom
ret <4 x float> %partial.rsq
}
@@ -3471,7 +3471,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_correct_sqrt(<4 x fl
; IEEE-GOODFREXP-NEXT: [[TMP29:%.*]] = extractvalue { float, i32 } [[TMP28]], 0
; IEEE-GOODFREXP-NEXT: [[TMP30:%.*]] = extractvalue { float, i32 } [[TMP28]], 1
; IEEE-GOODFREXP-NEXT: [[TMP31:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP29]])
-; IEEE-GOODFREXP-NEXT: [[TMP32:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; IEEE-GOODFREXP-NEXT: [[TMP32:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; IEEE-GOODFREXP-NEXT: [[TMP33:%.*]] = extractvalue { float, i32 } [[TMP32]], 0
; IEEE-GOODFREXP-NEXT: [[TMP34:%.*]] = extractvalue { float, i32 } [[TMP32]], 1
; IEEE-GOODFREXP-NEXT: [[TMP35:%.*]] = fmul contract float [[TMP33]], [[TMP31]]
@@ -3517,9 +3517,9 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_correct_sqrt(<4 x fl
; IEEE-BADFREXP-NEXT: [[TMP29:%.*]] = extractvalue { float, i32 } [[TMP28]], 0
; IEEE-BADFREXP-NEXT: [[TMP30:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float [[TMP4]])
; IEEE-BADFREXP-NEXT: [[TMP31:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP29]])
-; IEEE-BADFREXP-NEXT: [[TMP32:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; IEEE-BADFREXP-NEXT: [[TMP32:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; IEEE-BADFREXP-NEXT: [[TMP33:%.*]] = extractvalue { float, i32 } [[TMP32]], 0
-; IEEE-BADFREXP-NEXT: [[TMP34:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float undef)
+; IEEE-BADFREXP-NEXT: [[TMP34:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float poison)
; IEEE-BADFREXP-NEXT: [[TMP35:%.*]] = fmul contract float [[TMP33]], [[TMP31]]
; IEEE-BADFREXP-NEXT: [[TMP36:%.*]] = sub i32 [[TMP34]], [[TMP30]]
; IEEE-BADFREXP-NEXT: [[TMP37:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP35]], i32 [[TMP36]])
@@ -3553,7 +3553,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_correct_sqrt(<4 x fl
; DAZ-NEXT: [[TMP19:%.*]] = extractvalue { float, i32 } [[TMP18]], 0
; DAZ-NEXT: [[TMP20:%.*]] = extractvalue { float, i32 } [[TMP18]], 1
; DAZ-NEXT: [[TMP21:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP19]])
-; DAZ-NEXT: [[TMP22:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; DAZ-NEXT: [[TMP22:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; DAZ-NEXT: [[TMP23:%.*]] = extractvalue { float, i32 } [[TMP22]], 0
; DAZ-NEXT: [[TMP24:%.*]] = extractvalue { float, i32 } [[TMP22]], 1
; DAZ-NEXT: [[TMP25:%.*]] = fmul contract float [[TMP23]], [[TMP21]]
@@ -3566,7 +3566,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_correct_sqrt(<4 x fl
; DAZ-NEXT: ret <4 x float> [[PARTIAL_RSQ]]
;
%denom = call contract <4 x float> @llvm.sqrt.v4f32(<4 x float> %arg)
- %partial.rsq = fdiv contract <4 x float> <float 1.0, float -1.0, float 4.0, float undef>, %denom, !fpmath !2
+ %partial.rsq = fdiv contract <4 x float> <float 1.0, float -1.0, float 4.0, float poison>, %denom, !fpmath !2
ret <4 x float> %partial.rsq
}
@@ -3607,7 +3607,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_arcp(<4 x float> %ar
; IEEE-GOODFREXP-NEXT: [[TMP31:%.*]] = sub i32 0, [[TMP30]]
; IEEE-GOODFREXP-NEXT: [[TMP32:%.*]] = call arcp contract float @llvm.amdgcn.rcp.f32(float [[TMP29]])
; IEEE-GOODFREXP-NEXT: [[TMP33:%.*]] = call arcp contract float @llvm.ldexp.f32.i32(float [[TMP32]], i32 [[TMP31]])
-; IEEE-GOODFREXP-NEXT: [[TMP34:%.*]] = fmul arcp contract float undef, [[TMP33]]
+; IEEE-GOODFREXP-NEXT: [[TMP34:%.*]] = fmul arcp contract float poison, [[TMP33]]
; IEEE-GOODFREXP-NEXT: [[TMP35:%.*]] = insertelement <4 x float> poison, float [[TMP14]], i64 0
; IEEE-GOODFREXP-NEXT: [[TMP36:%.*]] = insertelement <4 x float> [[TMP35]], float [[TMP20]], i64 1
; IEEE-GOODFREXP-NEXT: [[TMP37:%.*]] = insertelement <4 x float> [[TMP36]], float [[TMP27]], i64 2
@@ -3650,7 +3650,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_arcp(<4 x float> %ar
; IEEE-BADFREXP-NEXT: [[TMP31:%.*]] = sub i32 0, [[TMP30]]
; IEEE-BADFREXP-NEXT: [[TMP32:%.*]] = call arcp contract float @llvm.amdgcn.rcp.f32(float [[TMP29]])
; IEEE-BADFREXP-NEXT: [[TMP33:%.*]] = call arcp contract float @llvm.ldexp.f32.i32(float [[TMP32]], i32 [[TMP31]])
-; IEEE-BADFREXP-NEXT: [[TMP34:%.*]] = fmul arcp contract float undef, [[TMP33]]
+; IEEE-BADFREXP-NEXT: [[TMP34:%.*]] = fmul arcp contract float poison, [[TMP33]]
; IEEE-BADFREXP-NEXT: [[TMP35:%.*]] = insertelement <4 x float> poison, float [[TMP14]], i64 0
; IEEE-BADFREXP-NEXT: [[TMP36:%.*]] = insertelement <4 x float> [[TMP35]], float [[TMP20]], i64 1
; IEEE-BADFREXP-NEXT: [[TMP37:%.*]] = insertelement <4 x float> [[TMP36]], float [[TMP27]], i64 2
@@ -3681,7 +3681,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_arcp(<4 x float> %ar
; DAZ-NEXT: [[TMP19:%.*]] = call arcp contract float @llvm.amdgcn.rcp.f32(float [[TMP14]])
; DAZ-NEXT: [[TMP20:%.*]] = fmul arcp contract float 4.000000e+00, [[TMP19]]
; DAZ-NEXT: [[TMP21:%.*]] = call arcp contract float @llvm.amdgcn.rcp.f32(float [[TMP15]])
-; DAZ-NEXT: [[TMP22:%.*]] = fmul arcp contract float undef, [[TMP21]]
+; DAZ-NEXT: [[TMP22:%.*]] = fmul arcp contract float poison, [[TMP21]]
; DAZ-NEXT: [[TMP23:%.*]] = insertelement <4 x float> poison, float [[TMP16]], i64 0
; DAZ-NEXT: [[TMP24:%.*]] = insertelement <4 x float> [[TMP23]], float [[TMP18]], i64 1
; DAZ-NEXT: [[TMP25:%.*]] = insertelement <4 x float> [[TMP24]], float [[TMP20]], i64 2
@@ -3689,7 +3689,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_arcp(<4 x float> %ar
; DAZ-NEXT: ret <4 x float> [[PARTIAL_RSQ]]
;
%denom = call contract <4 x float> @llvm.sqrt.v4f32(<4 x float> %arg), !fpmath !2
- %partial.rsq = fdiv contract arcp <4 x float> <float 1.0, float -1.0, float 4.0, float undef>, %denom, !fpmath !2
+ %partial.rsq = fdiv contract arcp <4 x float> <float 1.0, float -1.0, float 4.0, float poison>, %denom, !fpmath !2
ret <4 x float> %partial.rsq
}
@@ -3697,7 +3697,7 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_arcp_correct(<4 x fl
; IEEE-LABEL: define <4 x float> @rsq_f32_vector_mixed_constant_numerator_arcp_correct(
; IEEE-SAME: <4 x float> [[ARG:%.*]]) #[[ATTR1]] {
; IEEE-NEXT: [[DENOM:%.*]] = call contract <4 x float> @llvm.sqrt.v4f32(<4 x float> [[ARG]]), !fpmath [[META2]]
-; IEEE-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv arcp contract <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float undef>, [[DENOM]]
+; IEEE-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv arcp contract <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float poison>, [[DENOM]]
; IEEE-NEXT: ret <4 x float> [[PARTIAL_RSQ]]
;
; DAZ-LABEL: define <4 x float> @rsq_f32_vector_mixed_constant_numerator_arcp_correct(
@@ -3714,11 +3714,11 @@ define <4 x float> @rsq_f32_vector_mixed_constant_numerator_arcp_correct(<4 x fl
; DAZ-NEXT: [[TMP10:%.*]] = insertelement <4 x float> [[TMP9]], float [[TMP6]], i64 1
; DAZ-NEXT: [[TMP11:%.*]] = insertelement <4 x float> [[TMP10]], float [[TMP7]], i64 2
; DAZ-NEXT: [[DENOM:%.*]] = insertelement <4 x float> [[TMP11]], float [[TMP8]], i64 3
-; DAZ-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv arcp contract <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float undef>, [[DENOM]]
+; DAZ-NEXT: [[PARTIAL_RSQ:%.*]] = fdiv arcp contract <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float poison>, [[DENOM]]
; DAZ-NEXT: ret <4 x float> [[PARTIAL_RSQ]]
;
%denom = call contract <4 x float> @llvm.sqrt.v4f32(<4 x float> %arg), !fpmath !2
- %partial.rsq = fdiv contract arcp <4 x float> <float 1.0, float -1.0, float 4.0, float undef>, %denom
+ %partial.rsq = fdiv contract arcp <4 x float> <float 1.0, float -1.0, float 4.0, float poison>, %denom
ret <4 x float> %partial.rsq
}
@@ -3755,7 +3755,7 @@ define <4 x float> @rcp_f32_vector_mixed_constant_numerator_arcp(<4 x float> %ar
; IEEE-GOODFREXP-NEXT: [[TMP28:%.*]] = sub i32 0, [[TMP27]]
; IEEE-GOODFREXP-NEXT: [[TMP29:%.*]] = call arcp float @llvm.amdgcn.rcp.f32(float [[TMP26]])
; IEEE-GOODFREXP-NEXT: [[TMP30:%.*]] = call arcp float @llvm.ldexp.f32.i32(float [[TMP29]], i32 [[TMP28]])
-; IEEE-GOODFREXP-NEXT: [[TMP31:%.*]] = fmul arcp float undef, [[TMP30]]
+; IEEE-GOODFREXP-NEXT: [[TMP31:%.*]] = fmul arcp float poison, [[TMP30]]
; IEEE-GOODFREXP-NEXT: [[TMP32:%.*]] = insertelement <4 x float> poison, float [[TMP10]], i64 0
; IEEE-GOODFREXP-NEXT: [[TMP33:%.*]] = insertelement <4 x float> [[TMP32]], float [[TMP17]], i64 1
; IEEE-GOODFREXP-NEXT: [[TMP34:%.*]] = insertelement <4 x float> [[TMP33]], float [[TMP24]], i64 2
@@ -3794,7 +3794,7 @@ define <4 x float> @rcp_f32_vector_mixed_constant_numerator_arcp(<4 x float> %ar
; IEEE-BADFREXP-NEXT: [[TMP28:%.*]] = sub i32 0, [[TMP27]]
; IEEE-BADFREXP-NEXT: [[TMP29:%.*]] = call arcp float @llvm.amdgcn.rcp.f32(float [[TMP26]])
; IEEE-BADFREXP-NEXT: [[TMP30:%.*]] = call arcp float @llvm.ldexp.f32.i32(float [[TMP29]], i32 [[TMP28]])
-; IEEE-BADFREXP-NEXT: [[TMP31:%.*]] = fmul arcp float undef, [[TMP30]]
+; IEEE-BADFREXP-NEXT: [[TMP31:%.*]] = fmul arcp float poison, [[TMP30]]
; IEEE-BADFREXP-NEXT: [[TMP32:%.*]] = insertelement <4 x float> poison, float [[TMP10]], i64 0
; IEEE-BADFREXP-NEXT: [[TMP33:%.*]] = insertelement <4 x float> [[TMP32]], float [[TMP17]], i64 1
; IEEE-BADFREXP-NEXT: [[TMP34:%.*]] = insertelement <4 x float> [[TMP33]], float [[TMP24]], i64 2
@@ -3813,24 +3813,24 @@ define <4 x float> @rcp_f32_vector_mixed_constant_numerator_arcp(<4 x float> %ar
; DAZ-NEXT: [[TMP8:%.*]] = call arcp float @llvm.amdgcn.rcp.f32(float [[TMP3]])
; DAZ-NEXT: [[TMP9:%.*]] = fmul arcp float 4.000000e+00, [[TMP8]]
; DAZ-NEXT: [[TMP10:%.*]] = call arcp float @llvm.amdgcn.rcp.f32(float [[TMP4]])
-; DAZ-NEXT: [[TMP11:%.*]] = fmul arcp float undef, [[TMP10]]
+; DAZ-NEXT: [[TMP11:%.*]] = fmul arcp float poison, [[TMP10]]
; DAZ-NEXT: [[TMP12:%.*]] = insertelement <4 x float> poison, float [[TMP5]], i64 0
; DAZ-NEXT: [[TMP13:%.*]] = insertelement <4 x float> [[TMP12]], float [[TMP7]], i64 1
; DAZ-NEXT: [[TMP14:%.*]] = insertelement <4 x float> [[TMP13]], float [[TMP9]], i64 2
; DAZ-NEXT: [[PARTIAL_RCP:%.*]] = insertelement <4 x float> [[TMP14]], float [[TMP11]], i64 3
; DAZ-NEXT: ret <4 x float> [[PARTIAL_RCP]]
;
- %partial.rcp = fdiv arcp <4 x float> <float 1.0, float -1.0, float 4.0, float undef>, %arg, !fpmath !2
+ %partial.rcp = fdiv arcp <4 x float> <float 1.0, float -1.0, float 4.0, float poison>, %arg, !fpmath !2
ret <4 x float> %partial.rcp
}
define <4 x float> @rcp_f32_vector_mixed_constant_numerator_arcp_correct(<4 x float> %arg) {
; CHECK-LABEL: define <4 x float> @rcp_f32_vector_mixed_constant_numerator_arcp_correct(
; CHECK-SAME: <4 x float> [[ARG:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: [[PARTIAL_RCP:%.*]] = fdiv arcp <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float undef>, [[ARG]]
+; CHECK-NEXT: [[PARTIAL_RCP:%.*]] = fdiv arcp <4 x float> <float 1.000000e+00, float -1.000000e+00, float 4.000000e+00, float poison>, [[ARG]]
; CHECK-NEXT: ret <4 x float> [[PARTIAL_RCP]]
;
- %partial.rcp = fdiv arcp <4 x float> <float 1.0, float -1.0, float 4.0, float undef>, %arg
+ %partial.rcp = fdiv arcp <4 x float> <float 1.0, float -1.0, float 4.0, float poison>, %arg
ret <4 x float> %partial.rcp
}
@@ -3841,7 +3841,7 @@ define <4 x float> @rsq_f32_vector_const_denom(ptr addrspace(1) %out, <2 x float
; IEEE-GOODFREXP-NEXT: [[TMP1:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 4.000000e+00)
; IEEE-GOODFREXP-NEXT: [[TMP2:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 2.000000e+00)
; IEEE-GOODFREXP-NEXT: [[TMP3:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 8.000000e+00)
-; IEEE-GOODFREXP-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.sqrt.f32(float undef)
+; IEEE-GOODFREXP-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.sqrt.f32(float poison)
; IEEE-GOODFREXP-NEXT: [[TMP5:%.*]] = insertelement <4 x float> poison, float [[TMP1]], i64 0
; IEEE-GOODFREXP-NEXT: [[TMP6:%.*]] = insertelement <4 x float> [[TMP5]], float [[TMP2]], i64 1
; IEEE-GOODFREXP-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[TMP6]], float [[TMP3]], i64 2
@@ -3857,21 +3857,21 @@ define <4 x float> @rsq_f32_vector_const_denom(ptr addrspace(1) %out, <2 x float
; IEEE-GOODFREXP-NEXT: [[TMP16:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP13]])
; IEEE-GOODFREXP-NEXT: [[TMP17:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP16]], i32 [[TMP15]])
; IEEE-GOODFREXP-NEXT: [[TMP18:%.*]] = fneg contract float [[TMP9]]
-; IEEE-GOODFREXP-NEXT: [[TMP25:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[TMP18]])
-; IEEE-GOODFREXP-NEXT: [[TMP26:%.*]] = extractvalue { float, i32 } [[TMP25]], 0
-; IEEE-GOODFREXP-NEXT: [[TMP27:%.*]] = extractvalue { float, i32 } [[TMP25]], 1
-; IEEE-GOODFREXP-NEXT: [[TMP22:%.*]] = sub i32 0, [[TMP27]]
-; IEEE-GOODFREXP-NEXT: [[TMP28:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP26]])
-; IEEE-GOODFREXP-NEXT: [[TMP24:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP28]], i32 [[TMP22]])
-; IEEE-GOODFREXP-NEXT: [[TMP48:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[TMP10]])
+; IEEE-GOODFREXP-NEXT: [[TMP48:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[TMP18]])
; IEEE-GOODFREXP-NEXT: [[TMP49:%.*]] = extractvalue { float, i32 } [[TMP48]], 0
; IEEE-GOODFREXP-NEXT: [[TMP50:%.*]] = extractvalue { float, i32 } [[TMP48]], 1
+; IEEE-GOODFREXP-NEXT: [[TMP22:%.*]] = sub i32 0, [[TMP50]]
; IEEE-GOODFREXP-NEXT: [[TMP51:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP49]])
-; IEEE-GOODFREXP-NEXT: [[TMP29:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; IEEE-GOODFREXP-NEXT: [[TMP24:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP51]], i32 [[TMP22]])
+; IEEE-GOODFREXP-NEXT: [[TMP29:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[TMP10]])
; IEEE-GOODFREXP-NEXT: [[TMP30:%.*]] = extractvalue { float, i32 } [[TMP29]], 0
; IEEE-GOODFREXP-NEXT: [[TMP31:%.*]] = extractvalue { float, i32 } [[TMP29]], 1
-; IEEE-GOODFREXP-NEXT: [[TMP32:%.*]] = fmul contract float [[TMP30]], [[TMP51]]
-; IEEE-GOODFREXP-NEXT: [[TMP33:%.*]] = sub i32 [[TMP31]], [[TMP50]]
+; IEEE-GOODFREXP-NEXT: [[TMP28:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP30]])
+; IEEE-GOODFREXP-NEXT: [[TMP52:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
+; IEEE-GOODFREXP-NEXT: [[TMP53:%.*]] = extractvalue { float, i32 } [[TMP52]], 0
+; IEEE-GOODFREXP-NEXT: [[TMP54:%.*]] = extractvalue { float, i32 } [[TMP52]], 1
+; IEEE-GOODFREXP-NEXT: [[TMP32:%.*]] = fmul contract float [[TMP53]], [[TMP28]]
+; IEEE-GOODFREXP-NEXT: [[TMP33:%.*]] = sub i32 [[TMP54]], [[TMP31]]
; IEEE-GOODFREXP-NEXT: [[TMP34:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP32]], i32 [[TMP33]])
; IEEE-GOODFREXP-NEXT: [[TMP35:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[TMP11]])
; IEEE-GOODFREXP-NEXT: [[TMP36:%.*]] = extractvalue { float, i32 } [[TMP35]], 0
@@ -3894,7 +3894,7 @@ define <4 x float> @rsq_f32_vector_const_denom(ptr addrspace(1) %out, <2 x float
; IEEE-BADFREXP-NEXT: [[TMP1:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 4.000000e+00)
; IEEE-BADFREXP-NEXT: [[TMP2:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 2.000000e+00)
; IEEE-BADFREXP-NEXT: [[TMP3:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 8.000000e+00)
-; IEEE-BADFREXP-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.sqrt.f32(float undef)
+; IEEE-BADFREXP-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.sqrt.f32(float poison)
; IEEE-BADFREXP-NEXT: [[TMP5:%.*]] = insertelement <4 x float> poison, float [[TMP1]], i64 0
; IEEE-BADFREXP-NEXT: [[TMP6:%.*]] = insertelement <4 x float> [[TMP5]], float [[TMP2]], i64 1
; IEEE-BADFREXP-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[TMP6]], float [[TMP3]], i64 2
@@ -3910,20 +3910,20 @@ define <4 x float> @rsq_f32_vector_const_denom(ptr addrspace(1) %out, <2 x float
; IEEE-BADFREXP-NEXT: [[TMP16:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP13]])
; IEEE-BADFREXP-NEXT: [[TMP17:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP16]], i32 [[TMP15]])
; IEEE-BADFREXP-NEXT: [[TMP18:%.*]] = fneg contract float [[TMP9]]
-; IEEE-BADFREXP-NEXT: [[TMP25:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[TMP18]])
-; IEEE-BADFREXP-NEXT: [[TMP26:%.*]] = extractvalue { float, i32 } [[TMP25]], 0
+; IEEE-BADFREXP-NEXT: [[TMP48:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[TMP18]])
+; IEEE-BADFREXP-NEXT: [[TMP49:%.*]] = extractvalue { float, i32 } [[TMP48]], 0
; IEEE-BADFREXP-NEXT: [[TMP21:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float [[TMP18]])
; IEEE-BADFREXP-NEXT: [[TMP22:%.*]] = sub i32 0, [[TMP21]]
-; IEEE-BADFREXP-NEXT: [[TMP28:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP26]])
-; IEEE-BADFREXP-NEXT: [[TMP24:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP28]], i32 [[TMP22]])
-; IEEE-BADFREXP-NEXT: [[TMP48:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[TMP10]])
-; IEEE-BADFREXP-NEXT: [[TMP49:%.*]] = extractvalue { float, i32 } [[TMP48]], 0
-; IEEE-BADFREXP-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float [[TMP10]])
; IEEE-BADFREXP-NEXT: [[TMP50:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP49]])
-; IEEE-BADFREXP-NEXT: [[TMP29:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; IEEE-BADFREXP-NEXT: [[TMP24:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP50]], i32 [[TMP22]])
+; IEEE-BADFREXP-NEXT: [[TMP29:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[TMP10]])
; IEEE-BADFREXP-NEXT: [[TMP30:%.*]] = extractvalue { float, i32 } [[TMP29]], 0
-; IEEE-BADFREXP-NEXT: [[TMP31:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float undef)
-; IEEE-BADFREXP-NEXT: [[TMP32:%.*]] = fmul contract float [[TMP30]], [[TMP50]]
+; IEEE-BADFREXP-NEXT: [[TMP27:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float [[TMP10]])
+; IEEE-BADFREXP-NEXT: [[TMP28:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP30]])
+; IEEE-BADFREXP-NEXT: [[TMP51:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
+; IEEE-BADFREXP-NEXT: [[TMP52:%.*]] = extractvalue { float, i32 } [[TMP51]], 0
+; IEEE-BADFREXP-NEXT: [[TMP31:%.*]] = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float poison)
+; IEEE-BADFREXP-NEXT: [[TMP32:%.*]] = fmul contract float [[TMP52]], [[TMP28]]
; IEEE-BADFREXP-NEXT: [[TMP33:%.*]] = sub i32 [[TMP31]], [[TMP27]]
; IEEE-BADFREXP-NEXT: [[TMP34:%.*]] = call contract float @llvm.ldexp.f32.i32(float [[TMP32]], i32 [[TMP33]])
; IEEE-BADFREXP-NEXT: [[TMP35:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[TMP11]])
@@ -3947,7 +3947,7 @@ define <4 x float> @rsq_f32_vector_const_denom(ptr addrspace(1) %out, <2 x float
; DAZ-NEXT: [[TMP1:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 4.000000e+00)
; DAZ-NEXT: [[TMP2:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 2.000000e+00)
; DAZ-NEXT: [[TMP3:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 8.000000e+00)
-; DAZ-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.sqrt.f32(float undef)
+; DAZ-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.sqrt.f32(float poison)
; DAZ-NEXT: [[TMP5:%.*]] = insertelement <4 x float> poison, float [[TMP1]], i64 0
; DAZ-NEXT: [[TMP6:%.*]] = insertelement <4 x float> [[TMP5]], float [[TMP2]], i64 1
; DAZ-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[TMP6]], float [[TMP3]], i64 2
@@ -3963,7 +3963,7 @@ define <4 x float> @rsq_f32_vector_const_denom(ptr addrspace(1) %out, <2 x float
; DAZ-NEXT: [[TMP16:%.*]] = extractvalue { float, i32 } [[TMP15]], 0
; DAZ-NEXT: [[TMP17:%.*]] = extractvalue { float, i32 } [[TMP15]], 1
; DAZ-NEXT: [[TMP18:%.*]] = call contract float @llvm.amdgcn.rcp.f32(float [[TMP16]])
-; DAZ-NEXT: [[TMP19:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float undef)
+; DAZ-NEXT: [[TMP19:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float poison)
; DAZ-NEXT: [[TMP20:%.*]] = extractvalue { float, i32 } [[TMP19]], 0
; DAZ-NEXT: [[TMP21:%.*]] = extractvalue { float, i32 } [[TMP19]], 1
; DAZ-NEXT: [[TMP22:%.*]] = fmul contract float [[TMP20]], [[TMP18]]
@@ -3985,8 +3985,8 @@ define <4 x float> @rsq_f32_vector_const_denom(ptr addrspace(1) %out, <2 x float
; DAZ-NEXT: [[PARTIAL_RSQ:%.*]] = insertelement <4 x float> [[TMP37]], float [[TMP34]], i64 3
; DAZ-NEXT: ret <4 x float> [[PARTIAL_RSQ]]
;
- %sqrt = call contract <4 x float> @llvm.sqrt.v4f32(<4 x float> <float 4.0, float 2.0, float 8.0, float undef>), !fpmath !2
- %partial.rsq = fdiv contract <4 x float> <float 1.0, float -1.0, float undef, float 2.0>, %sqrt, !fpmath !2
+ %sqrt = call contract <4 x float> @llvm.sqrt.v4f32(<4 x float> <float 4.0, float 2.0, float 8.0, float poison>), !fpmath !2
+ %partial.rsq = fdiv contract <4 x float> <float 1.0, float -1.0, float poison, float 2.0>, %sqrt, !fpmath !2
ret <4 x float> %partial.rsq
}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
index 2e64a3456c24..7932f8d1fc5b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
@@ -272,8 +272,8 @@ define half @test_rootn_f16_1(half %x) {
define half @test_rootn_f16_2(half %x) {
; CHECK-LABEL: define half @test_rootn_f16_2(
; CHECK-SAME: half [[X:%.*]]) {
-; CHECK-NEXT: [[__ROOTN2SQRT:%.*]] = call half @_Z4sqrtDh(half [[X]])
-; CHECK-NEXT: ret half [[__ROOTN2SQRT]]
+; CHECK-NEXT: [[CALL:%.*]] = call half @llvm.sqrt.f16(half [[X]]), !fpmath [[META0:![0-9]+]]
+; CHECK-NEXT: ret half [[CALL]]
;
%call = tail call half @_Z5rootnDhi(half %x, i32 2)
ret half %call
@@ -302,7 +302,8 @@ define half @test_rootn_f16_neg1(half %x) {
define half @test_rootn_f16_neg2(half %x) {
; CHECK-LABEL: define half @test_rootn_f16_neg2(
; CHECK-SAME: half [[X:%.*]]) {
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call half @_Z5rsqrtDh(half [[X]])
+; CHECK-NEXT: [[TMP1:%.*]] = call contract half @llvm.sqrt.f16(half [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv contract half 0xH3C00, [[TMP1]], !fpmath [[META0]]
; CHECK-NEXT: ret half [[__ROOTN2RSQRT]]
;
%call = tail call half @_Z5rootnDhi(half %x, i32 -2)
@@ -342,8 +343,7 @@ define <2 x half> @test_rootn_v2f16_0(<2 x half> %x) {
define <2 x half> @test_rootn_v2f16_1(<2 x half> %x) {
; CHECK-LABEL: define <2 x half> @test_rootn_v2f16_1(
; CHECK-SAME: <2 x half> [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> [[X]], <2 x i32> <i32 1, i32 1>)
-; CHECK-NEXT: ret <2 x half> [[CALL]]
+; CHECK-NEXT: ret <2 x half> [[X]]
;
%call = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> %x, <2 x i32> <i32 1, i32 1>)
ret <2 x half> %call
@@ -352,7 +352,7 @@ define <2 x half> @test_rootn_v2f16_1(<2 x half> %x) {
define <2 x half> @test_rootn_v2f16_2(<2 x half> %x) {
; CHECK-LABEL: define <2 x half> @test_rootn_v2f16_2(
; CHECK-SAME: <2 x half> [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> [[X]], <2 x i32> <i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <2 x half> @llvm.sqrt.v2f16(<2 x half> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <2 x half> [[CALL]]
;
%call = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> %x, <2 x i32> <i32 2, i32 2>)
@@ -362,8 +362,8 @@ define <2 x half> @test_rootn_v2f16_2(<2 x half> %x) {
define <2 x half> @test_rootn_v2f16_neg1(<2 x half> %x) {
; CHECK-LABEL: define <2 x half> @test_rootn_v2f16_neg1(
; CHECK-SAME: <2 x half> [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> [[X]], <2 x i32> <i32 -1, i32 -1>)
-; CHECK-NEXT: ret <2 x half> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <2 x half> <half 0xH3C00, half 0xH3C00>, [[X]]
+; CHECK-NEXT: ret <2 x half> [[__ROOTN2DIV]]
;
%call = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> %x, <2 x i32> <i32 -1, i32 -1>)
ret <2 x half> %call
@@ -372,8 +372,9 @@ define <2 x half> @test_rootn_v2f16_neg1(<2 x half> %x) {
define <2 x half> @test_rootn_v2f16_neg2(<2 x half> %x) {
; CHECK-LABEL: define <2 x half> @test_rootn_v2f16_neg2(
; CHECK-SAME: <2 x half> [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> [[X]], <2 x i32> <i32 -2, i32 -2>)
-; CHECK-NEXT: ret <2 x half> [[CALL]]
+; CHECK-NEXT: [[TMP1:%.*]] = call contract <2 x half> @llvm.sqrt.v2f16(<2 x half> [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv contract <2 x half> <half 0xH3C00, half 0xH3C00>, [[TMP1]], !fpmath [[META0]]
+; CHECK-NEXT: ret <2 x half> [[__ROOTN2RSQRT]]
;
%call = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> %x, <2 x i32> <i32 -2, i32 -2>)
ret <2 x half> %call
@@ -512,7 +513,8 @@ define float @test_rootn_f32__y_1__strictfp(float %x) #1 {
; CHECK-LABEL: define float @test_rootn_f32__y_1__strictfp(
; CHECK-SAME: float [[X:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: ret float [[X]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 1) #[[ATTR0]]
+; CHECK-NEXT: ret float [[CALL]]
;
entry:
%call = tail call float @_Z5rootnfi(float %x, i32 1) #1
@@ -523,8 +525,7 @@ define <2 x float> @test_rootn_v2f32__y_1(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_1(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 1, i32 1>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: ret <2 x float> [[X]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 1, i32 1>)
@@ -547,8 +548,7 @@ define <2 x float> @test_rootn_v2f32__y_1_undef(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_1_undef(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 1, i32 poison>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: ret <2 x float> [[X]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 1, i32 poison>)
@@ -559,8 +559,7 @@ define <3 x float> @test_rootn_v3f32__y_1(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_1(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 1, i32 1, i32 1>)
-; CHECK-NEXT: ret <3 x float> [[CALL]]
+; CHECK-NEXT: ret <3 x float> [[X]]
;
entry:
%call = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> %x, <3 x i32> <i32 1, i32 1, i32 1>)
@@ -571,8 +570,7 @@ define <3 x float> @test_rootn_v3f32__y_1_undef(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_1_undef(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 1, i32 1, i32 poison>)
-; CHECK-NEXT: ret <3 x float> [[CALL]]
+; CHECK-NEXT: ret <3 x float> [[X]]
;
entry:
%call = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> %x, <3 x i32> <i32 1, i32 1, i32 poison>)
@@ -583,8 +581,7 @@ define <4 x float> @test_rootn_v4f32__y_1(<4 x float> %x) {
; CHECK-LABEL: define <4 x float> @test_rootn_v4f32__y_1(
; CHECK-SAME: <4 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <4 x float> @_Z5rootnDv4_fDv4_i(<4 x float> [[X]], <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
-; CHECK-NEXT: ret <4 x float> [[CALL]]
+; CHECK-NEXT: ret <4 x float> [[X]]
;
entry:
%call = tail call <4 x float> @_Z5rootnDv4_fDv4_i(<4 x float> %x, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
@@ -595,8 +592,7 @@ define <8 x float> @test_rootn_v8f32__y_1(<8 x float> %x) {
; CHECK-LABEL: define <8 x float> @test_rootn_v8f32__y_1(
; CHECK-SAME: <8 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <8 x float> @_Z5rootnDv8_fDv8_i(<8 x float> [[X]], <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
-; CHECK-NEXT: ret <8 x float> [[CALL]]
+; CHECK-NEXT: ret <8 x float> [[X]]
;
entry:
%call = tail call <8 x float> @_Z5rootnDv8_fDv8_i(<8 x float> %x, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
@@ -607,8 +603,7 @@ define <16 x float> @test_rootn_v16f32__y_1(<16 x float> %x) {
; CHECK-LABEL: define <16 x float> @test_rootn_v16f32__y_1(
; CHECK-SAME: <16 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <16 x float> @_Z5rootnDv16_fDv16_i(<16 x float> [[X]], <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
-; CHECK-NEXT: ret <16 x float> [[CALL]]
+; CHECK-NEXT: ret <16 x float> [[X]]
;
entry:
%call = tail call <16 x float> @_Z5rootnDv16_fDv16_i(<16 x float> %x, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
@@ -619,8 +614,8 @@ define float @test_rootn_f32__y_2(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_2(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2SQRT:%.*]] = call float @_Z4sqrtf(float [[X]])
-; CHECK-NEXT: ret float [[__ROOTN2SQRT]]
+; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.sqrt.f32(float [[X]]), !fpmath [[META0]]
+; CHECK-NEXT: ret float [[CALL]]
;
entry:
%call = tail call float @_Z5rootnfi(float %x, i32 2)
@@ -631,8 +626,8 @@ define float @test_rootn_f32__y_2_flags(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_2_flags(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2SQRT:%.*]] = call nnan nsz float @_Z4sqrtf(float [[X]])
-; CHECK-NEXT: ret float [[__ROOTN2SQRT]]
+; CHECK-NEXT: [[CALL:%.*]] = call nnan nsz float @llvm.sqrt.f32(float [[X]]), !fpmath [[META0]]
+; CHECK-NEXT: ret float [[CALL]]
;
entry:
%call = tail call nnan nsz float @_Z5rootnfi(float %x, i32 2)
@@ -644,8 +639,8 @@ define float @test_rootn_f32__y_2_fpmath_3(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_2_fpmath_3(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2SQRT:%.*]] = call nnan nsz float @_Z4sqrtf(float [[X]])
-; CHECK-NEXT: ret float [[__ROOTN2SQRT]]
+; CHECK-NEXT: [[CALL:%.*]] = call nnan nsz float @llvm.sqrt.f32(float [[X]]), !fpmath [[META1:![0-9]+]]
+; CHECK-NEXT: ret float [[CALL]]
;
entry:
%call = tail call nnan nsz float @_Z5rootnfi(float %x, i32 2), !fpmath !0
@@ -656,7 +651,7 @@ define <2 x float> @test_rootn_v2f32__y_2_flags(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_2_flags(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call nnan nsz <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call nnan nsz <2 x float> @llvm.sqrt.v2f32(<2 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <2 x float> [[CALL]]
;
entry:
@@ -668,7 +663,7 @@ define <3 x float> @test_rootn_v3f32__y_2(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_2(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 2, i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <3 x float> @llvm.sqrt.v3f32(<3 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <3 x float> [[CALL]]
;
entry:
@@ -680,7 +675,7 @@ define <3 x float> @test_rootn_v3f32__y_2_undef(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_2_undef(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 2, i32 poison, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <3 x float> @llvm.sqrt.v3f32(<3 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <3 x float> [[CALL]]
;
entry:
@@ -692,7 +687,7 @@ define <4 x float> @test_rootn_v4f32__y_2(<4 x float> %x) {
; CHECK-LABEL: define <4 x float> @test_rootn_v4f32__y_2(
; CHECK-SAME: <4 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <4 x float> @_Z5rootnDv4_fDv4_i(<4 x float> [[X]], <4 x i32> <i32 2, i32 2, i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <4 x float> [[CALL]]
;
entry:
@@ -704,7 +699,7 @@ define <8 x float> @test_rootn_v8f32__y_2(<8 x float> %x) {
; CHECK-LABEL: define <8 x float> @test_rootn_v8f32__y_2(
; CHECK-SAME: <8 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <8 x float> @_Z5rootnDv8_fDv8_i(<8 x float> [[X]], <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <8 x float> @llvm.sqrt.v8f32(<8 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <8 x float> [[CALL]]
;
entry:
@@ -716,7 +711,7 @@ define <16 x float> @test_rootn_v16f32__y_2(<16 x float> %x) {
; CHECK-LABEL: define <16 x float> @test_rootn_v16f32__y_2(
; CHECK-SAME: <16 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <16 x float> @_Z5rootnDv16_fDv16_i(<16 x float> [[X]], <16 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <16 x float> @llvm.sqrt.v16f32(<16 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <16 x float> [[CALL]]
;
entry:
@@ -740,8 +735,8 @@ define <2 x float> @test_rootn_v2f32__y_3(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_3(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 3, i32 3>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2CBRT:%.*]] = call <2 x float> @_Z4cbrtDv2_f(<2 x float> [[X]])
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2CBRT]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 3, i32 3>)
@@ -764,7 +759,7 @@ define <2 x float> @test_rootn_v2f32__y_nonsplat_2_poison(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_nonsplat_2_poison(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 2, i32 poison>)
+; CHECK-NEXT: [[CALL:%.*]] = call <2 x float> @llvm.sqrt.v2f32(<2 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <2 x float> [[CALL]]
;
entry:
@@ -800,8 +795,8 @@ define <2 x float> @test_rootn_v2f32__y_neg1(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_neg1(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 -1, i32 -1>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <2 x float> <float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 -1, i32 -1>)
@@ -812,8 +807,8 @@ define <3 x float> @test_rootn_v3f32__y_neg1(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_neg1(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 -1, i32 -1, i32 -1>)
-; CHECK-NEXT: ret <3 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <3 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <3 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> %x, <3 x i32> <i32 -1, i32 -1, i32 -1>)
@@ -824,8 +819,8 @@ define <3 x float> @test_rootn_v3f32__y_neg1_undef(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_neg1_undef(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 -1, i32 -1, i32 poison>)
-; CHECK-NEXT: ret <3 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <3 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <3 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> %x, <3 x i32> <i32 -1, i32 -1, i32 poison>)
@@ -836,8 +831,8 @@ define <4 x float> @test_rootn_v4f32__y_neg1(<4 x float> %x) {
; CHECK-LABEL: define <4 x float> @test_rootn_v4f32__y_neg1(
; CHECK-SAME: <4 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <4 x float> @_Z5rootnDv4_fDv4_i(<4 x float> [[X]], <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
-; CHECK-NEXT: ret <4 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <4 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <4 x float> @_Z5rootnDv4_fDv4_i(<4 x float> %x, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
@@ -848,8 +843,8 @@ define <8 x float> @test_rootn_v8f32__y_neg1(<8 x float> %x) {
; CHECK-LABEL: define <8 x float> @test_rootn_v8f32__y_neg1(
; CHECK-SAME: <8 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <8 x float> @_Z5rootnDv8_fDv8_i(<8 x float> [[X]], <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>)
-; CHECK-NEXT: ret <8 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <8 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <8 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <8 x float> @_Z5rootnDv8_fDv8_i(<8 x float> %x, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>)
@@ -860,8 +855,8 @@ define <16 x float> @test_rootn_v16f32__y_neg1(<16 x float> %x) {
; CHECK-LABEL: define <16 x float> @test_rootn_v16f32__y_neg1(
; CHECK-SAME: <16 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <16 x float> @_Z5rootnDv16_fDv16_i(<16 x float> [[X]], <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>)
-; CHECK-NEXT: ret <16 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <16 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <16 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <16 x float> @_Z5rootnDv16_fDv16_i(<16 x float> %x, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>)
@@ -872,7 +867,8 @@ define float @test_rootn_f32__y_neg2(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]])
+; CHECK-NEXT: [[TMP0:%.*]] = call contract float @llvm.sqrt.f32(float [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv contract float 1.000000e+00, [[TMP0]], !fpmath [[META0]]
; CHECK-NEXT: ret float [[__ROOTN2RSQRT]]
;
entry:
@@ -884,7 +880,8 @@ define float @test_rootn_f32__y_neg2__flags(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2__flags(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call nnan nsz float @_Z5rsqrtf(float [[X]])
+; CHECK-NEXT: [[TMP0:%.*]] = call nnan nsz contract float @llvm.sqrt.f32(float [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv nnan nsz contract float 1.000000e+00, [[TMP0]], !fpmath [[META0]]
; CHECK-NEXT: ret float [[__ROOTN2RSQRT]]
;
entry:
@@ -896,7 +893,7 @@ define float @test_rootn_f32__y_neg2__strictfp(float %x) #1 {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2__strictfp(
; CHECK-SAME: float [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]]) #[[ATTR0]]
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR0]]
; CHECK-NEXT: ret float [[__ROOTN2RSQRT]]
;
entry:
@@ -908,7 +905,7 @@ define float @test_rootn_f32__y_neg2__noinline(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2__noinline(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: ret float [[__ROOTN2RSQRT]]
;
entry:
@@ -920,7 +917,7 @@ define float @test_rootn_f32__y_neg2__nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2__nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR4:[0-9]+]]
; CHECK-NEXT: ret float [[CALL]]
;
entry:
@@ -932,8 +929,9 @@ define <2 x float> @test_rootn_v2f32__y_neg2(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_neg2(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 -2, i32 -2>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[TMP0:%.*]] = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv contract <2 x float> <float 1.000000e+00, float 1.000000e+00>, [[TMP0]], !fpmath [[META0]]
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2RSQRT]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 -2, i32 -2>)
@@ -944,8 +942,9 @@ define <2 x float> @test_rootn_v2f32__y_neg2__flags(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_neg2__flags(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call nnan nsz <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 -2, i32 -2>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[TMP0:%.*]] = call nnan nsz contract <2 x float> @llvm.sqrt.v2f32(<2 x float> [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv nnan nsz contract <2 x float> <float 1.000000e+00, float 1.000000e+00>, [[TMP0]], !fpmath [[META0]]
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2RSQRT]]
;
entry:
%call = tail call nsz nnan <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 -2, i32 -2>)
@@ -956,8 +955,8 @@ define <2 x float> @test_rootn_v2f32__y_neg2__strictfp(<2 x float> %x) #1 {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_neg2__strictfp(
; CHECK-SAME: <2 x float> [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 -2, i32 -2>) #[[ATTR0]]
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 -2, i32 -2>) #[[ATTR0]]
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2RSQRT]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 -2, i32 -2>) #1
@@ -1132,7 +1131,7 @@ define float @test_rootn_fast_f32_nobuiltin(float %x, i32 %y) {
; CHECK-LABEL: define float @test_rootn_fast_f32_nobuiltin(
; CHECK-SAME: float [[X:%.*]], i32 [[Y:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call fast float @_Z5rootnfi(float [[X]], i32 [[Y]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call fast float @_Z5rootnfi(float [[X]], i32 [[Y]]) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
entry:
@@ -1266,8 +1265,8 @@ define <2 x float> @test_rootn_afn_nnan_ninf_v2f32__y_3(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_afn_nnan_ninf_v2f32__y_3(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call nnan ninf afn <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 3, i32 3>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2CBRT:%.*]] = call nnan ninf afn <2 x float> @_Z4cbrtDv2_f(<2 x float> [[X]])
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2CBRT]]
;
entry:
%call = tail call afn nnan ninf <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 3, i32 3>)
@@ -1427,7 +1426,7 @@ entry:
define float @test_rootn_f32__y_0_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_0_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 0) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 0) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 0) #0
@@ -1437,7 +1436,7 @@ define float @test_rootn_f32__y_0_nobuiltin(float %x) {
define float @test_rootn_f32__y_1_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_1_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 1) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 1) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 1) #0
@@ -1447,7 +1446,7 @@ define float @test_rootn_f32__y_1_nobuiltin(float %x) {
define float @test_rootn_f32__y_2_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_2_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 2) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 2) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 2) #0
@@ -1457,7 +1456,7 @@ define float @test_rootn_f32__y_2_nobuiltin(float %x) {
define float @test_rootn_f32__y_3_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_3_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 3) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 3) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 3) #0
@@ -1467,7 +1466,7 @@ define float @test_rootn_f32__y_3_nobuiltin(float %x) {
define float @test_rootn_f32__y_neg1_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg1_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -1) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -1) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 -1) #0
@@ -1477,7 +1476,7 @@ define float @test_rootn_f32__y_neg1_nobuiltin(float %x) {
define float @test_rootn_f32__y_neg2_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 -2) #0
@@ -1492,6 +1491,11 @@ attributes #2 = { noinline }
!0 = !{float 3.0}
;.
; CHECK: attributes #[[ATTR0]] = { strictfp }
-; CHECK: attributes #[[ATTR1:[0-9]+]] = { nounwind memory(read) }
-; CHECK: attributes #[[ATTR2]] = { nobuiltin }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nounwind memory(read) }
+; CHECK: attributes #[[ATTR3]] = { noinline }
+; CHECK: attributes #[[ATTR4]] = { nobuiltin }
+;.
+; CHECK: [[META0]] = !{float 2.000000e+00}
+; CHECK: [[META1]] = !{float 3.000000e+00}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll b/llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll
new file mode 100644
index 000000000000..0c4974f347a8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll
@@ -0,0 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -simplify-mir -stop-after=finalize-isel < %s | FileCheck %s
+
+; Check that call / asm get an implicit-def $mode added to them in
+; strictfp functions.
+
+declare protected void @maybe_defs_mode() #0
+
+define float @call_changes_mode(float %x, float %y) #0 {
+ ; CHECK-LABEL: name: call_changes_mode
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+ ; CHECK-NEXT: [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @maybe_defs_mode, target-flags(amdgpu-rel32-hi) @maybe_defs_mode, implicit-def dead $scc
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
+ ; CHECK-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[SI_PC_ADD_REL_OFFSET]], @maybe_defs_mode, csr_amdgpu, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $mode
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+ ; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_F32_e64_]]
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0
+ call void @maybe_defs_mode()
+ %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret float %val
+}
+
+define void @tail_call_changes_mode() #0 {
+ ; CHECK-LABEL: name: tail_call_changes_mode
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:ccr_sgpr_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @maybe_defs_mode, target-flags(amdgpu-rel32-hi) @maybe_defs_mode, implicit-def dead $scc
+ ; CHECK-NEXT: SI_TCRETURN killed [[SI_PC_ADD_REL_OFFSET]], @maybe_defs_mode, 0, csr_amdgpu, implicit-def $mode
+ tail call void @maybe_defs_mode()
+ ret void
+}
+
+define float @asm_changes_mode(float %x, float %y) #0 {
+ ; CHECK-LABEL: name: asm_changes_mode
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: INLINEASM &"; maybe defs mode", 1 /* sideeffect attdialect */, implicit-def $mode
+ ; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_F32_e64_]]
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0
+ call void asm sideeffect "; maybe defs mode", ""()
+ %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret float %val
+}
+
+declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
+
+attributes #0 = { strictfp "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" }
diff --git a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
index 54adde38d6d2..756b81909968 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
@@ -322,9 +322,8 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_and_b32 s2, s2, 0xff
-; SI-NEXT: s_flbit_i32_b32 s2, s2
-; SI-NEXT: s_sub_i32 s4, s2, 24
+; SI-NEXT: s_lshl_b32 s2, s2, 24
+; SI-NEXT: s_flbit_i32_b32 s4, s2
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
@@ -335,9 +334,8 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_and_b32 s2, s2, 0xff
+; VI-NEXT: s_lshl_b32 s2, s2, 24
; VI-NEXT: s_flbit_i32_b32 s2, s2
-; VI-NEXT: s_sub_i32 s2, s2, 24
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
@@ -357,13 +355,13 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, 0.0,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: FFBH_UINT T0.W, T0.X,
+; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: FFBH_UINT T0.W, PV.W,
; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
-; EG-NEXT: ADD_INT * T0.W, PV.W, literal.x,
-; EG-NEXT: -24(nan), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
-; EG-NEXT: LSHL * T1.W, T1.W, literal.y,
+; EG-NEXT: LSHL * T1.W, PS, literal.y,
; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
; EG-NEXT: LSHL T0.X, PV.W, PS,
; EG-NEXT: LSHL * T0.W, literal.x, PS,
@@ -379,9 +377,8 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; GFX9-GISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-GISEL-NEXT: s_and_b32 s0, s4, 0xff
+; GFX9-GISEL-NEXT: s_lshl_b32 s0, s4, 24
; GFX9-GISEL-NEXT: s_flbit_i32_b32 s0, s0
-; GFX9-GISEL-NEXT: s_sub_i32 s0, s0, 24
; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX9-GISEL-NEXT: global_store_byte v1, v0, s[2:3]
; GFX9-GISEL-NEXT: s_endpgm
@@ -399,9 +396,8 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_and_b32 s2, s2, 0xffff
-; SI-NEXT: s_flbit_i32_b32 s2, s2
-; SI-NEXT: s_add_i32 s4, s2, -16
+; SI-NEXT: s_lshl_b32 s2, s2, 16
+; SI-NEXT: s_flbit_i32_b32 s4, s2
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
@@ -434,13 +430,13 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, 0.0,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: FFBH_UINT T0.W, T0.X,
+; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: FFBH_UINT T0.W, PV.W,
; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
-; EG-NEXT: ADD_INT * T0.W, PV.W, literal.x,
-; EG-NEXT: -16(nan), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
-; EG-NEXT: LSHL * T1.W, T1.W, literal.y,
+; EG-NEXT: LSHL * T1.W, PS, literal.y,
; EG-NEXT: 65535(9.183409e-41), 3(4.203895e-45)
; EG-NEXT: LSHL T0.X, PV.W, PS,
; EG-NEXT: LSHL * T0.W, literal.x, PS,
@@ -456,9 +452,8 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; GFX9-GISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-GISEL-NEXT: s_and_b32 s0, s4, 0xffff
+; GFX9-GISEL-NEXT: s_lshl_b32 s0, s4, 16
; GFX9-GISEL-NEXT: s_flbit_i32_b32 s0, s0
-; GFX9-GISEL-NEXT: s_sub_i32 s0, s0, 16
; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX9-GISEL-NEXT: global_store_short v1, v0, s[2:3]
; GFX9-GISEL-NEXT: s_endpgm
@@ -598,8 +593,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_ffbh_u32_e32 v1, v0
-; SI-NEXT: v_subrev_i32_e32 v1, vcc, 24, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v0
+; SI-NEXT: v_ffbh_u32_e32 v1, v1
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
@@ -613,8 +608,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: flat_load_ubyte v0, v[0:1]
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_ffbh_u32_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; VI-NEXT: v_subrev_u32_e32 v1, vcc, 24, v1
+; VI-NEXT: v_lshlrev_b32_e32 v1, 24, v0
+; VI-NEXT: v_ffbh_u32_e32 v1, v1
; VI-NEXT: v_cmp_ne_u16_e32 vcc, 0, v0
; VI-NEXT: v_cndmask_b32_e32 v2, 32, v1, vcc
; VI-NEXT: v_mov_b32_e32 v0, s0
@@ -626,7 +621,7 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
-; EG-NEXT: ALU 15, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: ALU 16, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
; EG-NEXT: CF_END
; EG-NEXT: PAD
@@ -635,10 +630,11 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: FFBH_UINT * T0.W, T0.X,
-; EG-NEXT: ADD_INT T0.W, PV.W, literal.x,
-; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.y,
-; EG-NEXT: -24(nan), 3(4.203895e-45)
+; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: FFBH_UINT T0.W, PV.W,
+; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
; EG-NEXT: CNDE_INT * T0.W, T0.X, literal.x, PV.W,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
@@ -659,8 +655,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-GISEL-NEXT: global_load_ubyte v1, v0, s[2:3]
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v1
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v2, 24, v2
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v2, 24, v1
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v2
; GFX9-GISEL-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX9-GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, 32, v2, vcc
@@ -693,8 +689,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_ffbh_u32_e32 v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, -16, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v0
+; SI-NEXT: v_ffbh_u32_e32 v1, v1
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
@@ -729,7 +725,7 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
-; EG-NEXT: ALU 15, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: ALU 16, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
; EG-NEXT: CF_END
; EG-NEXT: PAD
@@ -738,10 +734,11 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: FFBH_UINT * T0.W, T0.X,
-; EG-NEXT: ADD_INT T0.W, PV.W, literal.x,
-; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.y,
-; EG-NEXT: -16(nan), 3(4.203895e-45)
+; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: FFBH_UINT T0.W, PV.W,
+; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
; EG-NEXT: CNDE_INT * T0.W, T0.X, literal.x, PV.W,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
@@ -764,8 +761,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; GFX9-GISEL-NEXT: global_load_ubyte v2, v0, s[2:3] offset:1
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX9-GISEL-NEXT: v_lshl_or_b32 v1, v2, 8, v1
-; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v1
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v2, 16, v2
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v2
; GFX9-GISEL-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX9-GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, 32, v2, vcc
@@ -1110,8 +1107,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8(ptr addrspace(1) noalias %out, p
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
; SI-NEXT: v_ffbh_u32_e32 v0, v0
-; SI-NEXT: v_subrev_i32_e32 v0, vcc, 24, v0
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
@@ -1124,8 +1121,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8(ptr addrspace(1) noalias %out, p
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: flat_load_ubyte v0, v[0:1]
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_ffbh_u32_e32 v0, v0
-; VI-NEXT: v_subrev_u32_e32 v2, vcc, 24, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; VI-NEXT: v_ffbh_u32_e32 v2, v0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_store_byte v[0:1], v2
@@ -1144,13 +1141,13 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8(ptr addrspace(1) noalias %out, p
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, T0.X,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: FFBH_UINT T0.W, T0.X,
+; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
+; EG-NEXT: 24(3.363116e-44), 0(0.000000e+00)
+; EG-NEXT: FFBH_UINT T0.W, PV.W,
; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
-; EG-NEXT: ADD_INT * T0.W, PV.W, literal.x,
-; EG-NEXT: -24(nan), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
-; EG-NEXT: LSHL * T1.W, T1.W, literal.y,
+; EG-NEXT: LSHL * T1.W, PS, literal.y,
; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
; EG-NEXT: LSHL T0.X, PV.W, PS,
; EG-NEXT: LSHL * T0.W, literal.x, PS,
@@ -1172,8 +1169,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8(ptr addrspace(1) noalias %out, p
; GFX9-GISEL-NEXT: global_load_ubyte v0, v[0:1], off
; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 24, v0
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, 24, v0
; GFX9-GISEL-NEXT: global_store_byte v1, v0, s[0:1]
; GFX9-GISEL-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -1709,12 +1706,12 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_sel_eq_neg1(ptr addrspace(1) noa
; GFX9-GISEL-NEXT: v_add_co_u32_e32 v0, vcc, v1, v0
; GFX9-GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, v2, v3, vcc
; GFX9-GISEL-NEXT: global_load_ubyte v0, v[0:1], off
-; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v1, 24, v1
-; GFX9-GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v0, v1, -1, vcc
; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v2, 24, v0
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v2
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_sdwa s[2:3], v0, v1 src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, -1, s[2:3]
; GFX9-GISEL-NEXT: global_store_byte v1, v0, s[0:1]
; GFX9-GISEL-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -2193,9 +2190,8 @@ define i7 @v_ctlz_zero_undef_i7(i7 %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_i7:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0x7f, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 25, v0
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, 25, v0
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call i7 @llvm.ctlz.i7(i7 %val, i1 true)
ret i7 %ctlz
@@ -2286,9 +2282,8 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i18(ptr addrspace(1) noalias %out,
; GFX9-GISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-GISEL-NEXT: s_and_b32 s0, s4, 0x3ffff
+; GFX9-GISEL-NEXT: s_lshl_b32 s0, s4, 14
; GFX9-GISEL-NEXT: s_flbit_i32_b32 s0, s0
-; GFX9-GISEL-NEXT: s_sub_i32 s0, s0, 14
; GFX9-GISEL-NEXT: s_and_b32 s0, s0, 0x3ffff
; GFX9-GISEL-NEXT: s_lshr_b32 s1, s0, 16
; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s0
@@ -2326,9 +2321,8 @@ define i18 @v_ctlz_zero_undef_i18(i18 %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_i18:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0x3ffff, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 14, v0
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, 14, v0
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call i18 @llvm.ctlz.i18(i18 %val, i1 true)
ret i18 %ctlz
@@ -2365,12 +2359,10 @@ define <2 x i18> @v_ctlz_zero_undef_v2i18(<2 x i18> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v2i18:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0x3ffff, v0
-; GFX9-GISEL-NEXT: v_and_b32_e32 v1, 0x3ffff, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 14, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 14, v1
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, 14, v0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v1, 14, v1
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call <2 x i18> @llvm.ctlz.v2i18(<2 x i18> %val, i1 true)
ret <2 x i18> %ctlz
@@ -2380,16 +2372,12 @@ define <2 x i16> @v_ctlz_zero_undef_v2i16(<2 x i16> %val) {
; SI-LABEL: v_ctlz_zero_undef_v2i16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_ffbh_u32_e32 v1, v1
-; SI-NEXT: v_ffbh_u32_e32 v0, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, -16, v1
-; SI-NEXT: v_add_i32_e32 v0, vcc, -16, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_ffbh_u32_e32 v0, v0
; SI-NEXT: v_or_b32_e32 v0, v0, v2
-; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_ctlz_zero_undef_v2i16:
@@ -2410,12 +2398,13 @@ define <2 x i16> @v_ctlz_zero_undef_v2i16(<2 x i16> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v2i16:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v1, 16, v1
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, 16, v0
-; GFX9-GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v0, 16, v1
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call <2 x i16> @llvm.ctlz.v2i16(<2 x i16> %val, i1 true)
ret <2 x i16> %ctlz
@@ -2425,20 +2414,15 @@ define <3 x i16> @v_ctlz_zero_undef_v3i16(<3 x i16> %val) {
; SI-LABEL: v_ctlz_zero_undef_v3i16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_ffbh_u32_e32 v1, v1
; SI-NEXT: v_ffbh_u32_e32 v0, v0
-; SI-NEXT: v_ffbh_u32_e32 v2, v2
+; SI-NEXT: v_ffbh_u32_e32 v3, v2
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_add_i32_e32 v0, vcc, -16, v0
-; SI-NEXT: v_add_i32_e32 v3, vcc, -16, v2
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0xfff00000, v0
-; SI-NEXT: v_or_b32_e32 v2, 0x100000, v2
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_or_b32_e32 v2, 0x200000, v3
; SI-NEXT: v_alignbit_b32 v1, v3, v0, 16
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -2462,14 +2446,15 @@ define <3 x i16> @v_ctlz_zero_undef_v3i16(<3 x i16> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v3i16:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v2, 16, v2
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, 16, v0
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX9-GISEL-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v1, 16, v1
-; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v0, 16, v2
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v2
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v2, 16, v0
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call <3 x i16> @llvm.ctlz.v3i16(<3 x i16> %val, i1 true)
ret <3 x i16> %ctlz
@@ -2479,24 +2464,18 @@ define <4 x i16> @v_ctlz_zero_undef_v4i16(<4 x i16> %val) {
; SI-LABEL: v_ctlz_zero_undef_v4i16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_ffbh_u32_e32 v3, v3
; SI-NEXT: v_ffbh_u32_e32 v2, v2
; SI-NEXT: v_ffbh_u32_e32 v1, v1
; SI-NEXT: v_ffbh_u32_e32 v0, v0
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_add_i32_e32 v2, vcc, -16, v2
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_add_i32_e32 v0, vcc, -16, v0
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v2, vcc, 0xfff00000, v2
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0xfff00000, v0
+; SI-NEXT: v_or_b32_e32 v2, v2, v3
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -2524,18 +2503,20 @@ define <4 x i16> @v_ctlz_zero_undef_v4i16(<4 x i16> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v4i16:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v2, 16, v2
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, 16, v0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v3, 16, v3
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-GISEL-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v1, 16, v1
-; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v0, 16, v2
-; GFX9-GISEL-NEXT: v_and_b32_e32 v2, 0xffff, v3
-; GFX9-GISEL-NEXT: v_lshl_or_b32 v1, v1, 16, v2
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v2
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v3, v3
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v1, v3, 16, v1
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %val, i1 true)
ret <4 x i16> %ctlz
@@ -2545,27 +2526,24 @@ define <2 x i8> @v_ctlz_zero_undef_v2i8(<2 x i8> %val) {
; SI-LABEL: v_ctlz_zero_undef_v2i8:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
; SI-NEXT: v_ffbh_u32_e32 v1, v1
+; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v1
; SI-NEXT: v_ffbh_u32_e32 v0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; SI-NEXT: v_subrev_i32_e32 v0, vcc, 24, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0xffffe800, v0
-; SI-NEXT: v_bfe_u32 v1, v0, 8, 8
+; SI-NEXT: v_or_b32_e32 v0, v0, v2
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_ctlz_zero_undef_v2i8:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_ffbh_u32_sdwa v1, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0
-; VI-NEXT: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
-; VI-NEXT: v_add_u16_e32 v1, 0xe800, v1
-; VI-NEXT: v_subrev_u16_e32 v0, 24, v0
-; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_lshrrev_b16_e32 v1, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; VI-NEXT: v_ffbh_u32_e32 v1, v1
+; VI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v1
+; VI-NEXT: v_ffbh_u32_e32 v0, v0
+; VI-NEXT: v_or_b32_e32 v0, v0, v2
+; VI-NEXT: v_and_b32_e32 v1, 0xff, v1
; VI-NEXT: s_setpc_b64 s[30:31]
;
; EG-LABEL: v_ctlz_zero_undef_v2i8:
@@ -2576,10 +2554,10 @@ define <2 x i8> @v_ctlz_zero_undef_v2i8(<2 x i8> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v2i8:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, 24, v0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v1, 24, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %val, i1 true)
ret <2 x i8> %ctlz
@@ -2621,12 +2599,10 @@ define <2 x i7> @v_ctlz_zero_undef_v2i7(<2 x i7> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v2i7:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0x7f, v0
-; GFX9-GISEL-NEXT: v_and_b32_e32 v1, 0x7f, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 25, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 25, v1
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v0, 25, v0
-; GFX9-GISEL-NEXT: v_subrev_u32_e32 v1, 25, v1
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call <2 x i7> @llvm.ctlz.v2i7(<2 x i7> %val, i1 true)
ret <2 x i7> %ctlz
diff --git a/llvm/test/CodeGen/AMDGPU/dpp_combine.ll b/llvm/test/CodeGen/AMDGPU/dpp_combine.ll
index cfc166ec798f..5162092f78ac 100644
--- a/llvm/test/CodeGen/AMDGPU/dpp_combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/dpp_combine.ll
@@ -47,6 +47,21 @@ define amdgpu_kernel void @dpp_fadd(ptr addrspace(1) %arg) {
ret void
}
+; Fails to combine because v_mul_lo_u32 has no e32 or dpp form.
+; GCN-LABEL: {{^}}dpp_mul:
+; GCN: global_load_{{dword|b32}} [[V:v[0-9]+]],
+; GCN: v_mov_b32_e32 [[V2:v[0-9]+]], [[V]]
+; GCN: v_mov_b32_dpp [[V2]], [[V2]] quad_perm:[1,0,0,0] row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}}
+; GCN: v_mul_lo_u32 [[V]], [[V2]], [[V]]{{$}}
+define amdgpu_kernel void @dpp_mul(ptr addrspace(1) %arg) {
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds i32, ptr addrspace(1) %arg, i32 %id
+ %load = load i32, ptr addrspace(1) %gep
+ %tmp0 = call i32 @llvm.amdgcn.update.dpp.i32(i32 %load, i32 %load, i32 1, i32 15, i32 15, i1 1)
+ %mul = mul i32 %tmp0, %load
+ store i32 %mul, ptr addrspace(1) %gep
+ ret void
+}
declare i32 @llvm.amdgcn.workitem.id.x()
declare i32 @llvm.amdgcn.update.dpp.i32(i32, i32, i32, i32, i32, i1) #0
diff --git a/llvm/test/CodeGen/AMDGPU/fmaximum3.ll b/llvm/test/CodeGen/AMDGPU/fmaximum3.ll
index 6e45084dc4b8..9690e126dfcf 100644
--- a/llvm/test/CodeGen/AMDGPU/fmaximum3.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmaximum3.ll
@@ -1,98 +1,3251 @@
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN %s
-
-; GCN-LABEL: {{^}}test_fmaximum3_olt_0_f32:
-; GCN: buffer_load_b32 [[REGC:v[0-9]+]]
-; GCN: buffer_load_b32 [[REGB:v[0-9]+]]
-; GCN: buffer_load_b32 [[REGA:v[0-9]+]]
-; GCN: v_maximum3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
-; GCN: buffer_store_b32 [[RESULT]],
-define amdgpu_kernel void @test_fmaximum3_olt_0_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) {
- %a = load volatile float, ptr addrspace(1) %aptr, align 4
- %b = load volatile float, ptr addrspace(1) %bptr, align 4
- %c = load volatile float, ptr addrspace(1) %cptr, align 4
- %f0 = call float @llvm.maximum.f32(float %a, float %b)
- %f1 = call float @llvm.maximum.f32(float %f0, float %c)
- store float %f1, ptr addrspace(1) %out, align 4
- ret void
-}
-
-; Commute operand of second fmaximum
-; GCN-LABEL: {{^}}test_fmaximum3_olt_1_f32:
-; GCN: buffer_load_b32 [[REGB:v[0-9]+]]
-; GCN: buffer_load_b32 [[REGA:v[0-9]+]]
-; GCN: buffer_load_b32 [[REGC:v[0-9]+]]
-; GCN: v_maximum3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
-; GCN: buffer_store_b32 [[RESULT]],
-define amdgpu_kernel void @test_fmaximum3_olt_1_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) {
- %a = load volatile float, ptr addrspace(1) %aptr, align 4
- %b = load volatile float, ptr addrspace(1) %bptr, align 4
- %c = load volatile float, ptr addrspace(1) %cptr, align 4
- %f0 = call float @llvm.maximum.f32(float %a, float %b)
- %f1 = call float @llvm.maximum.f32(float %c, float %f0)
- store float %f1, ptr addrspace(1) %out, align 4
- ret void
-}
-
-; GCN-LABEL: {{^}}test_fmaximum3_olt_0_f16:
-; GCN: buffer_load_u16 [[REGC:v[0-9]+]]
-; GCN: buffer_load_u16 [[REGB:v[0-9]+]]
-; GCN: buffer_load_u16 [[REGA:v[0-9]+]]
-; GCN: v_maximum3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
-; GCN: buffer_store_b16 [[RESULT]],
-define amdgpu_kernel void @test_fmaximum3_olt_0_f16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) {
- %a = load volatile half, ptr addrspace(1) %aptr, align 2
- %b = load volatile half, ptr addrspace(1) %bptr, align 2
- %c = load volatile half, ptr addrspace(1) %cptr, align 2
- %f0 = call half @llvm.maximum.f16(half %a, half %b)
- %f1 = call half @llvm.maximum.f16(half %f0, half %c)
- store half %f1, ptr addrspace(1) %out, align 2
- ret void
-}
-
-; GCN-LABEL: {{^}}test_fmaximum3_olt_1_f16:
-; GCN: buffer_load_u16 [[REGA:v[0-9]+]]
-; GCN: buffer_load_u16 [[REGB:v[0-9]+]]
-; GCN: buffer_load_u16 [[REGC:v[0-9]+]]
-; GCN: v_maximum3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGA]], [[REGB]]
-; GCN: buffer_store_b16 [[RESULT]],
-define amdgpu_kernel void @test_fmaximum3_olt_1_f16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) {
- %a = load volatile half, ptr addrspace(1) %aptr, align 2
- %b = load volatile half, ptr addrspace(1) %bptr, align 2
- %c = load volatile half, ptr addrspace(1) %cptr, align 2
- %f0 = call half @llvm.maximum.f16(half %a, half %b)
- %f1 = call half @llvm.maximum.f16(half %c, half %f0)
- store half %f1, ptr addrspace(1) %out, align 2
- ret void
-}
-
-; Checks whether the test passes; performMinMaxCombine() should not optimize vector patterns of maximum3
-; since there are no pack instructions for fmaximum3.
-; GCN-LABEL: {{^}}no_fmaximum3_v2f16:
-; GCN: v_pk_maximum_f16 v0, v0, v1
-; GCN: v_pk_maximum_f16 v0, v2, v0
-; GCN: v_pk_maximum_f16 v0, v0, v3
-; GCN-NEXT: s_setpc_b64
-define <2 x half> @no_fmaximum3_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x half> %d) {
-entry:
- %max = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> %b)
- %max1 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %c, <2 x half> %max)
- %res = call <2 x half> @llvm.maximum.v2f16(<2 x half> %max1, <2 x half> %d)
- ret <2 x half> %res
-}
-
-; GCN-LABEL: {{^}}no_fmaximum3_olt_0_f64:
-; GCN-COUNT-2: v_maximum_f64
-define amdgpu_kernel void @no_fmaximum3_olt_0_f64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) {
- %a = load volatile double, ptr addrspace(1) %aptr, align 4
- %b = load volatile double, ptr addrspace(1) %bptr, align 4
- %c = load volatile double, ptr addrspace(1) %cptr, align 4
- %f0 = call double @llvm.maximum.f64(double %a, double %b)
- %f1 = call double @llvm.maximum.f64(double %f0, double %c)
- store double %f1, ptr addrspace(1) %out, align 4
- ret void
-}
-
-declare double @llvm.maximum.f64(double, double)
-declare float @llvm.maximum.f32(float, float)
-declare half @llvm.maximum.f16(half, half)
-declare <2 x half> @llvm.maximum.v2f16(<2 x half>, <2 x half>)
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
+
+define float @v_fmaximum3_f32(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.maximum.f32(float %a, float %b)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_commute(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v2, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f32_e32 v1, v2, v0
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.maximum.f32(float %a, float %b)
+ %max1 = call float @llvm.maximum.f32(float %c, float %max0)
+ ret float %max1
+}
+
+define amdgpu_ps i32 @s_fmaximum3_f32(float inreg %a, float inreg %b, float inreg %c) {
+; GFX12-LABEL: s_fmaximum3_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v0, s2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_maximum3_f32 v0, s0, s1, v0
+; GFX12-NEXT: v_readfirstlane_b32 s0, v0
+; GFX12-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_fmaximum3_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-NEXT: v_max_f32_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_max_f32_e32 v1, s2, v0
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s2, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+ %max0 = call float @llvm.maximum.f32(float %a, float %b)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c)
+ %cast = bitcast float %max1 to i32
+ %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %cast)
+ ret i32 %readfirstlane
+}
+
+define float @v_fmaximum3_f32_fabs0(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_fabs0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, |v0|, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_fabs0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e64 v3, |v0|, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, |v0|, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call float @llvm.fabs.f32(float %a)
+ %max0 = call float @llvm.maximum.f32(float %a.fabs, float %b)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_fabs1(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_fabs1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, |v1|, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_fabs1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e64 v3, v0, |v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, |v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fabs = call float @llvm.fabs.f32(float %b)
+ %max0 = call float @llvm.maximum.f32(float %a, float %b.fabs)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_fabs2(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_fabs2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, v1, |v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_fabs2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f32_e64 v1, v0, |v2|
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fabs = call float @llvm.fabs.f32(float %c)
+ %max0 = call float @llvm.maximum.f32(float %a, float %b)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c.fabs)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_fabs_all(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, |v0|, |v1|, |v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e64 v3, |v0|, |v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, |v0|, |v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f32_e64 v1, v0, |v2|
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call float @llvm.fabs.f32(float %a)
+ %b.fabs = call float @llvm.fabs.f32(float %b)
+ %c.fabs = call float @llvm.fabs.f32(float %c)
+ %max0 = call float @llvm.maximum.f32(float %a.fabs, float %b.fabs)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c.fabs)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_fneg_all(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, -v0, -v1, -v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e64 v3, -v0, -v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, -v0, -v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f32_e64 v1, v0, -v2
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg float %a
+ %b.fneg = fneg float %b
+ %c.fneg = fneg float %c
+ %max0 = call float @llvm.maximum.f32(float %a.fneg, float %b.fneg)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c.fneg)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_fneg_fabs_all(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_fneg_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, -|v0|, -|v1|, -|v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_fneg_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e64 v3, -|v0|, -|v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, -|v0|, -|v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f32_e64 v1, v0, -|v2|
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -|v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call float @llvm.fabs.f32(float %a)
+ %b.fabs = call float @llvm.fabs.f32(float %b)
+ %c.fabs = call float @llvm.fabs.f32(float %c)
+ %a.fneg.fabs = fneg float %a.fabs
+ %b.fneg.fabs = fneg float %b.fabs
+ %c.fneg.fabs = fneg float %c.fabs
+ %max0 = call float @llvm.maximum.f32(float %a.fneg.fabs, float %b.fneg.fabs)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c.fneg.fabs)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_fneg0(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_fneg0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, -v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_fneg0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e64 v3, -v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, -v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg float %a
+ %max0 = call float @llvm.maximum.f32(float %a.fneg, float %b)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_fneg1(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_fneg1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, -v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_fneg1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e64 v3, v0, -v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fneg = fneg float %b
+ %max0 = call float @llvm.maximum.f32(float %a, float %b.fneg)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_fneg2(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_fneg2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, v1, -v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_fneg2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f32_e64 v1, v0, -v2
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fneg = fneg float %c
+ %max0 = call float @llvm.maximum.f32(float %a, float %b)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c.fneg)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_const0(float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_const0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, 0x41000000, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_const0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v2, 0x41000000, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.maximum.f32(float 8.0, float %b)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32__const2(float %a, float %b) {
+; GFX12-LABEL: v_fmaximum3_f32__const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, v1, 0x41000000
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32__const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_max_f32_e32 v1, 0x41000000, v0
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.maximum.f32(float %a, float %b)
+ %max1 = call float @llvm.maximum.f32(float %max0, float 8.0)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_inlineimm0(float %b, float %c) {
+; GFX12-LABEL: v_fmaximum3_f32_inlineimm0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, 4.0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_inlineimm0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v2, 4.0, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.maximum.f32(float 4.0, float %b)
+ %max1 = call float @llvm.maximum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32__inlineimm(float %a, float %b) {
+; GFX12-LABEL: v_fmaximum3_f32__inlineimm:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, v1, 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32__inlineimm:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_max_f32_e32 v1, 4.0, v0
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.maximum.f32(float %a, float %b)
+ %max1 = call float @llvm.maximum.f32(float %max0, float 4.0)
+ ret float %max1
+}
+
+define float @v_fmaximum3_f32_const1_const2(float %a) {
+; GFX12-LABEL: v_fmaximum3_f32_const1_const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_mov_b32 s0, 0x41000000
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_maximum3_f32 v0, v0, s0, 0x41800000
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f32_const1_const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v1, 0x41000000, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_max_f32_e32 v1, 0x41800000, v0
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.maximum.f32(float %a, float 8.0)
+ %max1 = call float @llvm.maximum.f32(float %max0, float 16.0)
+ ret float %max1
+}
+
+define <2 x float> @v_fmaximum3_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; GFX12-LABEL: v_fmaximum3_v2f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v4, v0, v2
+; GFX12-NEXT: v_maximum3_f32 v1, v5, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v4, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v0, v4, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v4, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v5, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v1, v5, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v5, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v5, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %a, <2 x float> %b)
+ %max1 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %c, <2 x float> %max0)
+ ret <2 x float> %max1
+}
+
+define <2 x float> @v_fmaximum3_v2f32_commute(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; GFX12-LABEL: v_fmaximum3_v2f32_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, v2, v4
+; GFX12-NEXT: v_maximum3_f32 v1, v1, v3, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f32_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v1, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v5, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %a, <2 x float> %b)
+ %max1 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %max0, <2 x float> %c)
+ ret <2 x float> %max1
+}
+
+define <2 x float> @v_fmaximum3_v2f32__fabs_all(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; GFX12-LABEL: v_fmaximum3_v2f32__fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, |v0|, |v2|, |v4|
+; GFX12-NEXT: v_maximum3_f32 v1, |v1|, |v3|, |v5|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f32__fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e64 vcc, |v1|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], |v1|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v7, |v6|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v1|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v6, |v1|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v3|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, |v3|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e64 vcc, |v0|, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], |v0|, |v2|
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v7, |v3|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v0|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, |v0|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v2|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, |v2|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e64 s[4:5], v0, |v4|
+; GFX9-NEXT: v_cndmask_b32_e64 v2, |v4|, v0, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, |v4|
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v4|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, |v4|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cmp_gt_f32_e64 s[4:5], v1, |v5|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, |v5|, v1, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v1, |v5|
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v5|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, |v5|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %a)
+ %b.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %b)
+ %c.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %c)
+ %max0 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %a.fabs, <2 x float> %b.fabs)
+ %max1 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %max0, <2 x float> %c.fabs)
+ ret <2 x float> %max1
+}
+
+define <2 x float> @v_fmaximum3_v2f32__fneg_all(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; GFX12-LABEL: v_fmaximum3_v2f32__fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, -v0, -v2, -v4
+; GFX12-NEXT: v_maximum3_f32 v1, -v1, -v3, -v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f32__fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e64 vcc, -v1, -v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], -v1, -v3
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v7, -v6, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v1, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v6, -v1, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v3, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -v3, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e64 vcc, -v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], -v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v7, -v3, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v0, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, -v0, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v2, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -v2, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e64 s[4:5], v0, -v4
+; GFX9-NEXT: v_cndmask_b32_e64 v2, -v4, v0, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -v4
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v4, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -v4, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cmp_gt_f32_e64 s[4:5], v1, -v5
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, -v5, v1, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v1, -v5
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v5, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -v5, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg <2 x float> %a
+ %b.fneg = fneg <2 x float> %b
+ %c.fneg = fneg <2 x float> %c
+ %max0 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %a.fneg, <2 x float> %b.fneg)
+ %max1 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %max0, <2 x float> %c.fneg)
+ ret <2 x float> %max1
+}
+
+define <2 x float> @v_fmaximum3_v2f32__inlineimm1(<2 x float> %a, <2 x float> %c) {
+; GFX12-LABEL: v_fmaximum3_v2f32__inlineimm1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, 2.0, v2
+; GFX12-NEXT: v_maximum3_f32 v1, v1, 2.0, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f32__inlineimm1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, 2.0, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v4, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v4, 2.0, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %a, <2 x float> <float 2.0, float 2.0>)
+ %max1 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %max0, <2 x float> %c)
+ ret <2 x float> %max1
+}
+
+define <2 x float> @v_fmaximum3_v2f32__inlineimm2(<2 x float> %a, <2 x float> %b) {
+; GFX12-LABEL: v_fmaximum3_v2f32__inlineimm2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, v2, 4.0
+; GFX12-NEXT: v_maximum3_f32 v1, v1, v3, 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f32__inlineimm2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, 4.0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, 4.0, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, 4.0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, 4.0, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %a, <2 x float> %b)
+ %max1 = call <2 x float> @llvm.maximum.v2f32(<2 x float> %max0, <2 x float> <float 4.0, float 4.0>)
+ ret <2 x float> %max1
+}
+
+define <3 x float> @v_fmaximum3_v3f32(<3 x float> %a, <3 x float> %b, <3 x float> %c) {
+; GFX12-LABEL: v_fmaximum3_v3f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v6, v0, v3
+; GFX12-NEXT: v_maximum3_f32 v1, v7, v1, v4
+; GFX12-NEXT: v_maximum3_f32 v2, v8, v2, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v5, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v10, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v10, v9, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v10, v5, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v10, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v6, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v0, v6, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v6, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v6, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v6, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v7, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v7, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v7, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v7, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v7, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v8, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v8, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v8, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v8, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v8, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %a, <3 x float> %b)
+ %max1 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %c, <3 x float> %max0)
+ ret <3 x float> %max1
+}
+
+define <3 x float> @v_fmaximum3_v3f32_commute(<3 x float> %a, <3 x float> %b, <3 x float> %c) {
+; GFX12-LABEL: v_fmaximum3_v3f32_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, v3, v6
+; GFX12-NEXT: v_maximum3_f32 v1, v1, v4, v7
+; GFX12-NEXT: v_maximum3_f32 v2, v2, v5, v8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f32_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v5, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v10, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v10, v9, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v10, v5, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v10, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v6, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v6, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v1, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v7, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v2, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v2, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v8, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %a, <3 x float> %b)
+ %max1 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %max0, <3 x float> %c)
+ ret <3 x float> %max1
+}
+
+define <3 x float> @v_fmaximum3_v3f32__fabs_all(<3 x float> %a, <3 x float> %b, <3 x float> %c) {
+; GFX12-LABEL: v_fmaximum3_v3f32__fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, |v0|, |v3|, |v6|
+; GFX12-NEXT: v_maximum3_f32 v1, |v1|, |v4|, |v7|
+; GFX12-NEXT: v_maximum3_f32 v2, |v2|, |v5|, |v8|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f32__fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e64 vcc, |v2|, |v5|
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v5, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v10, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], |v2|, |v5|
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v10, |v9|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v2|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v9, |v2|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v5|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, |v5|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e64 vcc, |v1|, |v4|
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], |v1|, |v4|
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v10, |v5|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v1|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, |v1|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v4|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, |v4|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e64 vcc, |v0|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], |v0|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v10, |v4|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v0|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, |v0|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v3|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, |v3|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e64 s[4:5], v0, |v6|
+; GFX9-NEXT: v_cndmask_b32_e64 v3, |v6|, v0, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, |v6|
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v6|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, |v6|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cmp_gt_f32_e64 s[4:5], v1, |v7|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, |v7|, v1, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v1, |v7|
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v7|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, |v7|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cmp_gt_f32_e64 s[4:5], v2, |v8|
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, |v8|, v2, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v2, |v8|
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v8|, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, |v8|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %a)
+ %b.fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %b)
+ %c.fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %c)
+ %max0 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %a.fabs, <3 x float> %b.fabs)
+ %max1 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %max0, <3 x float> %c.fabs)
+ ret <3 x float> %max1
+}
+
+define <3 x float> @v_fmaximum3_v3f32__fneg_all(<3 x float> %a, <3 x float> %b, <3 x float> %c) {
+; GFX12-LABEL: v_fmaximum3_v3f32__fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, -v0, -v3, -v6
+; GFX12-NEXT: v_maximum3_f32 v1, -v1, -v4, -v7
+; GFX12-NEXT: v_maximum3_f32 v2, -v2, -v5, -v8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f32__fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e64 vcc, -v2, -v5
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v5, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v10, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], -v2, -v5
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v10, -v9, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v2, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v9, -v2, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v5, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, -v5, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e64 vcc, -v1, -v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], -v1, -v4
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v10, -v5, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v1, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, -v1, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v4, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -v4, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e64 vcc, -v0, -v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], -v0, -v3
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v10, -v4, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v0, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, -v0, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v3, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -v3, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e64 s[4:5], v0, -v6
+; GFX9-NEXT: v_cndmask_b32_e64 v3, -v6, v0, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -v6
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v6, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -v6, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cmp_gt_f32_e64 s[4:5], v1, -v7
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, -v7, v1, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v1, -v7
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v7, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -v7, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cmp_gt_f32_e64 s[4:5], v2, -v8
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, -v8, v2, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v2, -v8
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v8, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, -v8, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg <3 x float> %a
+ %b.fneg = fneg <3 x float> %b
+ %c.fneg = fneg <3 x float> %c
+ %max0 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %a.fneg, <3 x float> %b.fneg)
+ %max1 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %max0, <3 x float> %c.fneg)
+ ret <3 x float> %max1
+}
+
+define <3 x float> @v_fmaximum3_v3f32__inlineimm1(<3 x float> %a, <3 x float> %c) {
+; GFX12-LABEL: v_fmaximum3_v3f32__inlineimm1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, 2.0, v3
+; GFX12-NEXT: v_maximum3_f32 v1, v1, 2.0, v4
+; GFX12-NEXT: v_maximum3_f32 v2, v2, 2.0, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f32__inlineimm1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v6, 2.0, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v6, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, 2.0, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v6, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v6, 2.0, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v2, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %a, <3 x float> <float 2.0, float 2.0, float 2.0>)
+ %max1 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %max0, <3 x float> %c)
+ ret <3 x float> %max1
+}
+
+define <3 x float> @v_fmaximum3_v3f32__inlineimm2(<3 x float> %a, <3 x float> %b) {
+; GFX12-LABEL: v_fmaximum3_v3f32__inlineimm2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f32 v0, v0, v3, 4.0
+; GFX12-NEXT: v_maximum3_f32 v1, v1, v4, 4.0
+; GFX12-NEXT: v_maximum3_f32 v2, v2, v5, 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f32__inlineimm2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v5, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v7, v5, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 64
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, 4.0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, 4.0, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v3, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, 4.0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, 4.0, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v3, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, 4.0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, 4.0, v2, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %a, <3 x float> %b)
+ %max1 = call <3 x float> @llvm.maximum.v3f32(<3 x float> %max0, <3 x float> <float 4.0, float 4.0, float 4.0>)
+ ret <3 x float> %max1
+}
+
+
+define half @v_fmaximum3_f16(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f16_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.maximum.f16(half %a, half %b)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_commute(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, v2, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f16_e32 v1, v2, v0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v2, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.maximum.f16(half %a, half %b)
+ %max1 = call half @llvm.maximum.f16(half %c, half %max0)
+ ret half %max1
+}
+
+define amdgpu_ps i32 @s_fmaximum3_f16(half inreg %a, half inreg %b, half inreg %c) {
+; GFX12-LABEL: s_fmaximum3_f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v0, s2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_maximum3_f16 v0, s0, s1, v0
+; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_readfirstlane_b32 s0, v0
+; GFX12-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_fmaximum3_f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-NEXT: v_max_f16_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_max_f16_e32 v1, s2, v0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s2, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+ %max0 = call half @llvm.maximum.f16(half %a, half %b)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c)
+ %cast = bitcast half %max1 to i16
+ %zext = zext i16 %cast to i32
+ %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %zext)
+ ret i32 %readfirstlane
+}
+
+define half @v_fmaximum3_f16_fabs0(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_fabs0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, |v0|, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_fabs0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e64 v3, |v0|, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f16_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call half @llvm.fabs.f16(half %a)
+ %max0 = call half @llvm.maximum.f16(half %a.fabs, half %b)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_fabs1(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_fabs1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, v0, |v1|, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_fabs1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e64 v3, v0, |v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f16_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fabs = call half @llvm.fabs.f16(half %b)
+ %max0 = call half @llvm.maximum.f16(half %a, half %b.fabs)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_fabs2(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_fabs2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, v0, v1, |v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_fabs2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f16_e64 v1, v0, |v2|
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fabs = call half @llvm.fabs.f16(half %c)
+ %max0 = call half @llvm.maximum.f16(half %a, half %b)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c.fabs)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_fabs_all(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, |v0|, |v1|, |v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e64 v3, |v0|, |v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f16_e64 v1, v0, |v2|
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call half @llvm.fabs.f16(half %a)
+ %b.fabs = call half @llvm.fabs.f16(half %b)
+ %c.fabs = call half @llvm.fabs.f16(half %c)
+ %max0 = call half @llvm.maximum.f16(half %a.fabs, half %b.fabs)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c.fabs)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_fneg_all(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, -v0, -v1, -v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e64 v3, -v0, -v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f16_e64 v1, v0, -v2
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg half %a
+ %b.fneg = fneg half %b
+ %c.fneg = fneg half %c
+ %max0 = call half @llvm.maximum.f16(half %a.fneg, half %b.fneg)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c.fneg)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_fneg_fabs_all(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_fneg_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, -|v0|, -|v1|, -|v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_fneg_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e64 v3, -|v0|, -|v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -|v0|, -|v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f16_e64 v1, v0, -|v2|
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -|v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call half @llvm.fabs.f16(half %a)
+ %b.fabs = call half @llvm.fabs.f16(half %b)
+ %c.fabs = call half @llvm.fabs.f16(half %c)
+ %a.fneg.fabs = fneg half %a.fabs
+ %b.fneg.fabs = fneg half %b.fabs
+ %c.fneg.fabs = fneg half %c.fabs
+ %max0 = call half @llvm.maximum.f16(half %a.fneg.fabs, half %b.fneg.fabs)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c.fneg.fabs)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_fneg0(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_fneg0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, -v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_fneg0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e64 v3, -v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f16_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg half %a
+ %max0 = call half @llvm.maximum.f16(half %a.fneg, half %b)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_fneg1(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_fneg1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, v0, -v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_fneg1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e64 v3, v0, -v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f16_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fneg = fneg half %b
+ %max0 = call half @llvm.maximum.f16(half %a, half %b.fneg)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_fneg2(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_fneg2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, v0, v1, -v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_fneg2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_max_f16_e64 v1, v0, -v2
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fneg = fneg half %c
+ %max0 = call half @llvm.maximum.f16(half %a, half %b)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c.fneg)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_const0(half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_const0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, v0, 0x4800, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_const0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v2, 0x4800, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.maximum.f16(half 8.0, half %b)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16__const2(half %a, half %b) {
+; GFX12-LABEL: v_fmaximum3_f16__const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, v0, v1, 0x4800
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16__const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_max_f16_e32 v1, 0x4800, v0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.maximum.f16(half %a, half %b)
+ %max1 = call half @llvm.maximum.f16(half %max0, half 8.0)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_inlineimm0(half %b, half %c) {
+; GFX12-LABEL: v_fmaximum3_f16_inlineimm0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, v0, 4.0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_inlineimm0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v2, 4.0, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.maximum.f16(half 4.0, half %b)
+ %max1 = call half @llvm.maximum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16__inlineimm(half %a, half %b) {
+; GFX12-LABEL: v_fmaximum3_f16__inlineimm:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum3_f16 v0, v0, v1, 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16__inlineimm:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_max_f16_e32 v1, 4.0, v0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.maximum.f16(half %a, half %b)
+ %max1 = call half @llvm.maximum.f16(half %max0, half 4.0)
+ ret half %max1
+}
+
+define half @v_fmaximum3_f16_const1_const2(half %a) {
+; GFX12-LABEL: v_fmaximum3_f16_const1_const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_movk_i32 s0, 0x4800
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_maximum3_f16 v0, v0, s0, 0x4c00
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f16_const1_const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v1, 0x4800, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_max_f16_e32 v1, 0x4c00, v0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.maximum.f16(half %a, half 8.0)
+ %max1 = call half @llvm.maximum.f16(half %max0, half 16.0)
+ ret half %max1
+}
+
+define <2 x half> @v_fmaximum3_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v2f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v2, v0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v3, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v0, v5, s4
+; GFX9-NEXT: v_pk_max_f16 v1, v2, v1
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v1, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v2, v0 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> %b)
+ %max1 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %c, <2 x half> %max0)
+ ret <2 x half> %max1
+}
+
+define <2 x half> @v_fmaximum3_v2f16_commute(<2 x half> %a, <2 x half> %b, <2 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v2f16_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f16_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v3, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v0, v5, s4
+; GFX9-NEXT: v_pk_max_f16 v1, v1, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v5, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v1, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> %b)
+ %max1 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %max0, <2 x half> %c)
+ ret <2 x half> %max1
+}
+
+define <2 x half> @v_fmaximum3_v2f16__fabs_all(<2 x half> %a, <2 x half> %b, <2 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v2f16__fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0
+; GFX12-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v1
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f16__fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v0
+; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff7fff, v1
+; GFX9-NEXT: v_pk_max_f16 v3, v3, v4
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, |v0|, |v1| src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v3, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_and_b32_e32 v5, 0x7fff7fff, v2
+; GFX9-NEXT: v_perm_b32 v1, v4, v0, s4
+; GFX9-NEXT: v_pk_max_f16 v1, v1, v5
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v4, |v2| src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v1, vcc
+; GFX9-NEXT: v_perm_b32 v0, v3, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %a)
+ %b.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %b)
+ %c.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %c)
+ %max0 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a.fabs, <2 x half> %b.fabs)
+ %max1 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %max0, <2 x half> %c.fabs)
+ ret <2 x half> %max1
+}
+
+define <2 x half> @v_fmaximum3_v2f16__fneg_all(<2 x half> %a, <2 x half> %b, <2 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v2f16__fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v1 neg_lo:[1,1] neg_hi:[1,1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f16__fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v3, v0, v1 neg_lo:[1,1] neg_hi:[1,1]
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v3, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, -v0, -v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v0, v5, s4
+; GFX9-NEXT: v_pk_max_f16 v1, v1, v2 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v5, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v1, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, -v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg <2 x half> %a
+ %b.fneg = fneg <2 x half> %b
+ %c.fneg = fneg <2 x half> %c
+ %max0 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a.fneg, <2 x half> %b.fneg)
+ %max1 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %max0, <2 x half> %c.fneg)
+ ret <2 x half> %max1
+}
+
+define <2 x half> @v_fmaximum3_v2f16__inlineimm1(<2 x half> %a, <2 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v2f16__inlineimm1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, 2.0 op_sel_hi:[1,0]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f16__inlineimm1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v2, v0, 2.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v0 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v2, v3, v0, s4
+; GFX9-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v3, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v3, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> <half 2.0, half 2.0>)
+ %max1 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %max0, <2 x half> %c)
+ ret <2 x half> %max1
+}
+
+define <2 x half> @v_fmaximum3_v2f16__inlineimm2(<2 x half> %a, <2 x half> %b) {
+; GFX12-LABEL: v_fmaximum3_v2f16__inlineimm2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, 4.0 op_sel_hi:[1,0]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v2f16__inlineimm2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v0, v4, s4
+; GFX9-NEXT: v_pk_max_f16 v1, v1, 4.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v4, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v1, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v2, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> %b)
+ %max1 = call <2 x half> @llvm.maximum.v2f16(<2 x half> %max0, <2 x half> <half 4.0, half 4.0>)
+ ret <2 x half> %max1
+}
+
+define <3 x half> @v_fmaximum3_v3f16(<3 x half> %a, <3 x half> %b, <3 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v3f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v4, v0
+; GFX12-NEXT: v_pk_maximum_f16 v1, v5, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v6, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v1, v6, s4
+; GFX9-NEXT: v_pk_max_f16 v1, v5, v1
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v5, v6
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v1, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v4, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v4, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v4, v0 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %a, <3 x half> %b)
+ %max1 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %c, <3 x half> %max0)
+ ret <3 x half> %max1
+}
+
+define <3 x half> @v_fmaximum3_v3f16_commute(<3 x half> %a, <3 x half> %b, <3 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v3f16_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v4
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f16_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v6, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v1, v6, s4
+; GFX9-NEXT: v_pk_max_f16 v1, v1, v5
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v6, v5
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v1, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v2, v4
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v8, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %a, <3 x half> %b)
+ %max1 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %max0, <3 x half> %c)
+ ret <3 x half> %max1
+}
+
+define <3 x half> @v_fmaximum3_v3f16__fabs_all(<3 x half> %a, <3 x half> %b, <3 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v3f16__fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0
+; GFX12-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v3
+; GFX12-NEXT: v_and_b32_e32 v5, 0x7fff7fff, v5
+; GFX12-NEXT: v_and_b32_e32 v4, 0x7fff7fff, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v4
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f16__fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v7, 0x7fff7fff, v1
+; GFX9-NEXT: v_and_b32_e32 v9, 0x7fff7fff, v3
+; GFX9-NEXT: v_and_b32_e32 v6, 0x7fff7fff, v0
+; GFX9-NEXT: v_and_b32_e32 v8, 0x7fff7fff, v2
+; GFX9-NEXT: v_pk_max_f16 v7, v7, v9
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v7
+; GFX9-NEXT: v_mov_b32_e32 v12, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, |v1|, |v3| src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_pk_max_f16 v6, v6, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v12, v9, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, |v0|, |v2| src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v12, v8, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v1|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v12, v7, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v6, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_and_b32_e32 v11, 0x7fff7fff, v4
+; GFX9-NEXT: v_perm_b32 v2, v8, v0, s4
+; GFX9-NEXT: v_pk_max_f16 v2, v2, v11
+; GFX9-NEXT: v_and_b32_e32 v10, 0x7fff7fff, v5
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v8, |v4| src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_perm_b32 v6, v9, v1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v12, v3, vcc
+; GFX9-NEXT: v_pk_max_f16 v6, v6, v10
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v1, |v5|
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v12, v6, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v4|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v3, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %a)
+ %b.fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %b)
+ %c.fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %c)
+ %max0 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %a.fabs, <3 x half> %b.fabs)
+ %max1 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %max0, <3 x half> %c.fabs)
+ ret <3 x half> %max1
+}
+
+define <3 x half> @v_fmaximum3_v3f16__fneg_all(<3 x half> %a, <3 x half> %b, <3 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v3f16__fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2 neg_lo:[1,1] neg_hi:[1,1]
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3 neg_lo:[1,1] neg_hi:[1,1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v4 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v5 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f16__fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v6, v0, v2 neg_lo:[1,1] neg_hi:[1,1]
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, -v0, -v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v1, v3 neg_lo:[1,1] neg_hi:[1,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v1, -v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, -v1, -v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v1, v6, s4
+; GFX9-NEXT: v_pk_max_f16 v1, v1, v5 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v6, -v5
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v1, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v2, v4 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v8, -v4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, -v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg <3 x half> %a
+ %b.fneg = fneg <3 x half> %b
+ %c.fneg = fneg <3 x half> %c
+ %max0 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %a.fneg, <3 x half> %b.fneg)
+ %max1 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %max0, <3 x half> %c.fneg)
+ ret <3 x half> %max1
+}
+
+define <3 x half> @v_fmaximum3_v3f16__inlineimm1(<3 x half> %a, <3 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v3f16__inlineimm1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, 2.0 op_sel_hi:[1,0]
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, 2.0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f16__inlineimm1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v4, v0, 2.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v4
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v0 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX9-NEXT: v_pk_max_f16 v7, v1, 2.0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v4, vcc
+; GFX9-NEXT: s_mov_b32 s5, 0x5040100
+; GFX9-NEXT: v_perm_b32 v4, v5, v0, s5
+; GFX9-NEXT: v_pk_max_f16 v4, v4, v2
+; GFX9-NEXT: s_movk_i32 s4, 0x7e00
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v4
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v5, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc
+; GFX9-NEXT: v_pack_b32_f16 v7, v1, s4
+; GFX9-NEXT: v_pk_max_f16 v7, v7, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v4, vcc
+; GFX9-NEXT: v_perm_b32 v0, v5, v0, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %a, <3 x half> <half 2.0, half 2.0, half 2.0>)
+ %max1 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %max0, <3 x half> %c)
+ ret <3 x half> %max1
+}
+
+define <3 x half> @v_fmaximum3_v3f16__inlineimm2(<3 x half> %a, <3 x half> %b) {
+; GFX12-LABEL: v_fmaximum3_v3f16__inlineimm2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, 4.0 op_sel_hi:[1,0]
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v3f16__inlineimm2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v4, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v5, v4, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v1, v4, s4
+; GFX9-NEXT: v_pk_max_f16 v1, v1, 4.0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v4, v4
+; GFX9-NEXT: v_perm_b32 v2, v0, v6, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v2, 4.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v6, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %a, <3 x half> %b)
+ %max1 = call <3 x half> @llvm.maximum.v3f16(<3 x half> %max0, <3 x half> <half 4.0, half 4.0, half 4.0>)
+ ret <3 x half> %max1
+}
+
+define <4 x half> @v_fmaximum3_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v4f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v4, v0
+; GFX12-NEXT: v_pk_maximum_f16 v1, v5, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v4f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v6, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v2, v1, v6, s4
+; GFX9-NEXT: v_pk_max_f16 v2, v5, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v5, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v5, v1 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_pk_max_f16 v2, v4, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v4, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v4, v0 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v5, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a, <4 x half> %b)
+ %max1 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %c, <4 x half> %max0)
+ ret <4 x half> %max1
+}
+
+define <4 x half> @v_fmaximum3_v4f16_commute(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v4f16_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v4
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v4f16_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v6, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v2, v1, v6, s4
+; GFX9-NEXT: v_pk_max_f16 v2, v2, v5
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v6, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v5 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_pk_max_f16 v2, v2, v4
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v8, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v5, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a, <4 x half> %b)
+ %max1 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %max0, <4 x half> %c)
+ ret <4 x half> %max1
+}
+
+define <4 x half> @v_fmaximum3_v4f16__fabs_all(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v4f16__fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0
+; GFX12-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v3
+; GFX12-NEXT: v_and_b32_e32 v5, 0x7fff7fff, v5
+; GFX12-NEXT: v_and_b32_e32 v4, 0x7fff7fff, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v4
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v4f16__fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v7, 0x7fff7fff, v0
+; GFX9-NEXT: v_and_b32_e32 v9, 0x7fff7fff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0x7fff7fff, v1
+; GFX9-NEXT: v_and_b32_e32 v8, 0x7fff7fff, v3
+; GFX9-NEXT: v_pk_max_f16 v7, v7, v9
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v7
+; GFX9-NEXT: v_mov_b32_e32 v12, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, |v0|, |v2| src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_pk_max_f16 v6, v6, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v12, v9, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, |v1|, |v3| src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v12, v8, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v7, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v1|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v12, v6, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_and_b32_e32 v11, 0x7fff7fff, v5
+; GFX9-NEXT: v_perm_b32 v2, v8, v1, s4
+; GFX9-NEXT: v_and_b32_e32 v10, 0x7fff7fff, v4
+; GFX9-NEXT: v_pk_max_f16 v2, v2, v11
+; GFX9-NEXT: v_perm_b32 v6, v9, v0, s4
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v8, |v5| src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_pk_max_f16 v6, v6, v10
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v12, v3, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v9, |v4| src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v12, v7, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v1, |v5|
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v12, v2, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v4|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v6, vcc
+; GFX9-NEXT: v_perm_b32 v0, v7, v0, s4
+; GFX9-NEXT: v_perm_b32 v1, v3, v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %a)
+ %b.fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %b)
+ %c.fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %c)
+ %max0 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a.fabs, <4 x half> %b.fabs)
+ %max1 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %max0, <4 x half> %c.fabs)
+ ret <4 x half> %max1
+}
+
+define <4 x half> @v_fmaximum3_v4f16__fneg_all(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v4f16__fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2 neg_lo:[1,1] neg_hi:[1,1]
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3 neg_lo:[1,1] neg_hi:[1,1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v4 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v5 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v4f16__fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v6, v0, v2 neg_lo:[1,1] neg_hi:[1,1]
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, -v0, -v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v1, v3 neg_lo:[1,1] neg_hi:[1,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v1, -v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, -v1, -v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v2, v1, v6, s4
+; GFX9-NEXT: v_pk_max_f16 v2, v2, v5 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v6, -v5
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, -v5 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_pk_max_f16 v2, v2, v4 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v8, -v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, -v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v5, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg <4 x half> %a
+ %b.fneg = fneg <4 x half> %b
+ %c.fneg = fneg <4 x half> %c
+ %max0 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a.fneg, <4 x half> %b.fneg)
+ %max1 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %max0, <4 x half> %c.fneg)
+ ret <4 x half> %max1
+}
+
+define <4 x half> @v_fmaximum3_v4f16__inlineimm1(<4 x half> %a, <4 x half> %c) {
+; GFX12-LABEL: v_fmaximum3_v4f16__inlineimm1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, 2.0 op_sel_hi:[1,0]
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, 2.0 op_sel_hi:[1,0]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v4f16__inlineimm1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v4, v0, 2.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v4
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v0 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_pk_max_f16 v7, v1, 2.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v7
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v6, v8, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v4, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v4, v8, v1, s4
+; GFX9-NEXT: v_pk_max_f16 v4, v4, v3
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v8, v3 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_perm_b32 v8, v5, v0, s4
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v4
+; GFX9-NEXT: v_pk_max_f16 v8, v8, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v6, v7, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v8
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v5, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v9, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v4, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc
+; GFX9-NEXT: v_perm_b32 v0, v5, v0, s4
+; GFX9-NEXT: v_perm_b32 v1, v7, v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a, <4 x half> <half 2.0, half 2.0, half 2.0, half 2.0>)
+ %max1 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %max0, <4 x half> %c)
+ ret <4 x half> %max1
+}
+
+define <4 x half> @v_fmaximum3_v4f16__inlineimm2(<4 x half> %a, <4 x half> %b) {
+; GFX12-LABEL: v_fmaximum3_v4f16__inlineimm2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, 4.0 op_sel_hi:[1,0]
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, 4.0 op_sel_hi:[1,0]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_v4f16__inlineimm2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v4, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v5, v4, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
+; GFX9-NEXT: v_pk_max_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v2, v1, v4, s4
+; GFX9-NEXT: v_pk_max_f16 v2, v2, 4.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v4, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
+; GFX9-NEXT: v_perm_b32 v2, v0, v6, s4
+; GFX9-NEXT: v_pk_max_f16 v2, v2, 4.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v6, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v4, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a, <4 x half> %b)
+ %max1 = call <4 x half> @llvm.maximum.v4f16(<4 x half> %max0, <4 x half> <half 4.0, half 4.0, half 4.0, half 4.0>)
+ ret <4 x half> %max1
+}
+
+define double @v_fmaximum3_f64(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.maximum.f64(double %a, double %b)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_commute(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[4:5], v[0:1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.maximum.f64(double %a, double %b)
+ %max1 = call double @llvm.maximum.f64(double %c, double %max0)
+ ret double %max1
+}
+
+define amdgpu_ps <2 x i32> @s_fmaximum3_f64(double inreg %a, double inreg %b, double inreg %c) {
+; GFX12-LABEL: s_fmaximum3_f64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f64 v[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], s[4:5]
+; GFX12-NEXT: v_readfirstlane_b32 s0, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_readfirstlane_b32 s1, v1
+; GFX12-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_fmaximum3_f64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: v_max_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], s[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, s[4:5], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v1
+; GFX9-NEXT: v_readfirstlane_b32 s1, v0
+; GFX9-NEXT: ; return to shader part epilog
+ %max0 = call double @llvm.maximum.f64(double %a, double %b)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c)
+ %cast = bitcast double %max1 to <2 x i32>
+ %elt0 = extractelement <2 x i32> %cast, i32 0
+ %elt1 = extractelement <2 x i32> %cast, i32 1
+ %readlane0 = call i32 @llvm.amdgcn.readfirstlane(i32 %elt0)
+ %readlane1 = call i32 @llvm.amdgcn.readfirstlane(i32 %elt1)
+ %insert.0 = insertelement <2 x i32> poison, i32 %readlane0, i32 0
+ %insert.1 = insertelement <2 x i32> %insert.0, i32 %readlane1, i32 1
+ ret <2 x i32> %insert.1
+}
+
+define double @v_fmaximum3_f64_fabs0(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_fabs0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], |v[0:1]|, v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_fabs0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], |v[0:1]|, v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, |v[0:1]|, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call double @llvm.fabs.f64(double %a)
+ %max0 = call double @llvm.maximum.f64(double %a.fabs, double %b)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_fabs1(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_fabs1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], |v[2:3]|
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_fabs1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], v[0:1], |v[2:3]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], |v[2:3]|
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fabs = call double @llvm.fabs.f64(double %b)
+ %max0 = call double @llvm.maximum.f64(double %a, double %b.fabs)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_fabs2(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_fabs2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], |v[4:5]|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_fabs2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], |v[4:5]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], |v[4:5]|
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fabs = call double @llvm.fabs.f64(double %c)
+ %max0 = call double @llvm.maximum.f64(double %a, double %b)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c.fabs)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_fabs_all(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], |v[0:1]|, |v[2:3]|
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], |v[4:5]|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], |v[0:1]|, |v[2:3]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, |v[0:1]|, |v[2:3]|
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], |v[4:5]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], |v[4:5]|
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call double @llvm.fabs.f64(double %a)
+ %b.fabs = call double @llvm.fabs.f64(double %b)
+ %c.fabs = call double @llvm.fabs.f64(double %c)
+ %max0 = call double @llvm.maximum.f64(double %a.fabs, double %b.fabs)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c.fabs)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_fneg_all(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], -v[0:1], -v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], -v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], -v[0:1], -v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, -v[0:1], -v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], -v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], -v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg double %a
+ %b.fneg = fneg double %b
+ %c.fneg = fneg double %c
+ %max0 = call double @llvm.maximum.f64(double %a.fneg, double %b.fneg)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c.fneg)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_fneg_fabs_all(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_fneg_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], -|v[0:1]|, -|v[2:3]|
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], -|v[4:5]|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_fneg_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], -|v[0:1]|, -|v[2:3]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, -|v[0:1]|, -|v[2:3]|
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], -|v[4:5]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], -|v[4:5]|
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call double @llvm.fabs.f64(double %a)
+ %b.fabs = call double @llvm.fabs.f64(double %b)
+ %c.fabs = call double @llvm.fabs.f64(double %c)
+ %a.fneg.fabs = fneg double %a.fabs
+ %b.fneg.fabs = fneg double %b.fabs
+ %c.fneg.fabs = fneg double %c.fabs
+ %max0 = call double @llvm.maximum.f64(double %a.fneg.fabs, double %b.fneg.fabs)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c.fneg.fabs)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_fneg0(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_fneg0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], -v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_fneg0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], -v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, -v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg double %a
+ %max0 = call double @llvm.maximum.f64(double %a.fneg, double %b)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_fneg1(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_fneg1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], -v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_fneg1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], v[0:1], -v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], -v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fneg = fneg double %b
+ %max0 = call double @llvm.maximum.f64(double %a, double %b.fneg)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_fneg2(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_fneg2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], -v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_fneg2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], -v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], -v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fneg = fneg double %c
+ %max0 = call double @llvm.maximum.f64(double %a, double %b)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c.fneg)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_const0(double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_const0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], 0x40200000, v[0:1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_const0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0
+; GFX9-NEXT: s_mov_b32 s5, 0x40200000
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], s[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.maximum.f64(double 8.0, double %b)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64__const2(double %a, double %b) {
+; GFX12-LABEL: v_fmaximum3_f64__const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], 0x40200000, v[0:1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64__const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-NEXT: s_mov_b32 s4, 0
+; GFX9-NEXT: s_mov_b32 s5, 0x40200000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], s[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.maximum.f64(double %a, double %b)
+ %max1 = call double @llvm.maximum.f64(double %max0, double 8.0)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_inlineimm0(double %b, double %c) {
+; GFX12-LABEL: v_fmaximum3_f64_inlineimm0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], 4.0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_inlineimm0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], 4.0
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.maximum.f64(double 4.0, double %b)
+ %max1 = call double @llvm.maximum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64__inlineimm(double %a, double %b) {
+; GFX12-LABEL: v_fmaximum3_f64__inlineimm:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64__inlineimm:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], 4.0
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.maximum.f64(double %a, double %b)
+ %max1 = call double @llvm.maximum.f64(double %max0, double 4.0)
+ ret double %max1
+}
+
+define double @v_fmaximum3_f64_const1_const2(double %a) {
+; GFX12-LABEL: v_fmaximum3_f64_const1_const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maximum_f64 v[0:1], 0x40200000, v[0:1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_maximum_f64 v[0:1], 0x40300000, v[0:1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fmaximum3_f64_const1_const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0
+; GFX9-NEXT: s_mov_b32 s5, 0x40200000
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], s[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7ff80000
+; GFX9-NEXT: s_mov_b32 s4, 0
+; GFX9-NEXT: s_mov_b32 s5, 0x40300000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_max_f64 v[2:3], v[0:1], s[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.maximum.f64(double %a, double 8.0)
+ %max1 = call double @llvm.maximum.f64(double %max0, double 16.0)
+ ret double %max1
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fminimum3.ll b/llvm/test/CodeGen/AMDGPU/fminimum3.ll
index eef271e69a38..7481fff251d8 100644
--- a/llvm/test/CodeGen/AMDGPU/fminimum3.ll
+++ b/llvm/test/CodeGen/AMDGPU/fminimum3.ll
@@ -1,98 +1,3251 @@
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN %s
-
-; GCN-LABEL: {{^}}test_fminimum3_olt_0_f32:
-; GCN: buffer_load_b32 [[REGC:v[0-9]+]]
-; GCN: buffer_load_b32 [[REGB:v[0-9]+]]
-; GCN: buffer_load_b32 [[REGA:v[0-9]+]]
-; GCN: v_minimum3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
-; GCN: buffer_store_b32 [[RESULT]],
-define amdgpu_kernel void @test_fminimum3_olt_0_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) {
- %a = load volatile float, ptr addrspace(1) %aptr, align 4
- %b = load volatile float, ptr addrspace(1) %bptr, align 4
- %c = load volatile float, ptr addrspace(1) %cptr, align 4
- %f0 = call float @llvm.minimum.f32(float %a, float %b)
- %f1 = call float @llvm.minimum.f32(float %f0, float %c)
- store float %f1, ptr addrspace(1) %out, align 4
- ret void
-}
-
-; Commute operand of second fminimum
-; GCN-LABEL: {{^}}test_fminimum3_olt_1_f32:
-; GCN: buffer_load_b32 [[REGB:v[0-9]+]]
-; GCN: buffer_load_b32 [[REGA:v[0-9]+]]
-; GCN: buffer_load_b32 [[REGC:v[0-9]+]]
-; GCN: v_minimum3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
-; GCN: buffer_store_b32 [[RESULT]],
-define amdgpu_kernel void @test_fminimum3_olt_1_f32(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) {
- %a = load volatile float, ptr addrspace(1) %aptr, align 4
- %b = load volatile float, ptr addrspace(1) %bptr, align 4
- %c = load volatile float, ptr addrspace(1) %cptr, align 4
- %f0 = call float @llvm.minimum.f32(float %a, float %b)
- %f1 = call float @llvm.minimum.f32(float %c, float %f0)
- store float %f1, ptr addrspace(1) %out, align 4
- ret void
-}
-
-; GCN-LABEL: {{^}}test_fminimum3_olt_0_f16:
-; GCN: buffer_load_u16 [[REGC:v[0-9]+]]
-; GCN: buffer_load_u16 [[REGB:v[0-9]+]]
-; GCN: buffer_load_u16 [[REGA:v[0-9]+]]
-; GCN: v_minimum3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
-; GCN: buffer_store_b16 [[RESULT]],
-define amdgpu_kernel void @test_fminimum3_olt_0_f16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) {
- %a = load volatile half, ptr addrspace(1) %aptr, align 2
- %b = load volatile half, ptr addrspace(1) %bptr, align 2
- %c = load volatile half, ptr addrspace(1) %cptr, align 2
- %f0 = call half @llvm.minimum.f16(half %a, half %b)
- %f1 = call half @llvm.minimum.f16(half %f0, half %c)
- store half %f1, ptr addrspace(1) %out, align 2
- ret void
-}
-
-; GCN-LABEL: {{^}}test_fminimum3_olt_1_f16:
-; GCN: buffer_load_u16 [[REGA:v[0-9]+]]
-; GCN: buffer_load_u16 [[REGB:v[0-9]+]]
-; GCN: buffer_load_u16 [[REGC:v[0-9]+]]
-; GCN: v_minimum3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGA]], [[REGB]]
-; GCN: buffer_store_b16 [[RESULT]],
-define amdgpu_kernel void @test_fminimum3_olt_1_f16(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) {
- %a = load volatile half, ptr addrspace(1) %aptr, align 2
- %b = load volatile half, ptr addrspace(1) %bptr, align 2
- %c = load volatile half, ptr addrspace(1) %cptr, align 2
- %f0 = call half @llvm.minimum.f16(half %a, half %b)
- %f1 = call half @llvm.minimum.f16(half %c, half %f0)
- store half %f1, ptr addrspace(1) %out, align 2
- ret void
-}
-
-; Checks whether the test passes; performMinMaxCombine() should not optimize vector patterns of minimum3
-; since there are no pack instructions for fminimum3.
-; GCN-LABEL: {{^}}no_fminimum3_v2f16:
-; GCN: v_pk_minimum_f16 v0, v0, v1
-; GCN: v_pk_minimum_f16 v0, v2, v0
-; GCN: v_pk_minimum_f16 v0, v0, v3
-; GCN-NEXT: s_setpc_b64
-define <2 x half> @no_fminimum3_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x half> %d) {
-entry:
- %min = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> %b)
- %min1 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %c, <2 x half> %min)
- %res = call <2 x half> @llvm.minimum.v2f16(<2 x half> %min1, <2 x half> %d)
- ret <2 x half> %res
-}
-
-; GCN-LABEL: {{^}}no_fminimum3_olt_0_f64:
-; GCN-COUNT-2: v_minimum_f64
-define amdgpu_kernel void @no_fminimum3_olt_0_f64(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr, ptr addrspace(1) %cptr) {
- %a = load volatile double, ptr addrspace(1) %aptr, align 4
- %b = load volatile double, ptr addrspace(1) %bptr, align 4
- %c = load volatile double, ptr addrspace(1) %cptr, align 4
- %f0 = call double @llvm.minimum.f64(double %a, double %b)
- %f1 = call double @llvm.minimum.f64(double %f0, double %c)
- store double %f1, ptr addrspace(1) %out, align 4
- ret void
-}
-
-declare double @llvm.minimum.f64(double, double)
-declare float @llvm.minimum.f32(float, float)
-declare half @llvm.minimum.f16(half, half)
-declare <2 x half> @llvm.minimum.v2f16(<2 x half>, <2 x half>)
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
+
+define float @v_fminimum3_f32(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.minimum.f32(float %a, float %b)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_commute(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v2, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f32_e32 v1, v2, v0
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.minimum.f32(float %a, float %b)
+ %max1 = call float @llvm.minimum.f32(float %c, float %max0)
+ ret float %max1
+}
+
+define amdgpu_ps i32 @s_fminimum3_f32(float inreg %a, float inreg %b, float inreg %c) {
+; GFX12-LABEL: s_fminimum3_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v0, s2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_minimum3_f32 v0, s0, s1, v0
+; GFX12-NEXT: v_readfirstlane_b32 s0, v0
+; GFX12-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_fminimum3_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-NEXT: v_min_f32_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_min_f32_e32 v1, s2, v0
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s2, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+ %max0 = call float @llvm.minimum.f32(float %a, float %b)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c)
+ %cast = bitcast float %max1 to i32
+ %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %cast)
+ ret i32 %readfirstlane
+}
+
+define float @v_fminimum3_f32_fabs0(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_fabs0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, |v0|, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_fabs0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e64 v3, |v0|, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, |v0|, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call float @llvm.fabs.f32(float %a)
+ %max0 = call float @llvm.minimum.f32(float %a.fabs, float %b)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_fabs1(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_fabs1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, |v1|, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_fabs1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e64 v3, v0, |v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, |v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fabs = call float @llvm.fabs.f32(float %b)
+ %max0 = call float @llvm.minimum.f32(float %a, float %b.fabs)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_fabs2(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_fabs2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, v1, |v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_fabs2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f32_e64 v1, v0, |v2|
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fabs = call float @llvm.fabs.f32(float %c)
+ %max0 = call float @llvm.minimum.f32(float %a, float %b)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c.fabs)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_fabs_all(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, |v0|, |v1|, |v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e64 v3, |v0|, |v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, |v0|, |v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f32_e64 v1, v0, |v2|
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call float @llvm.fabs.f32(float %a)
+ %b.fabs = call float @llvm.fabs.f32(float %b)
+ %c.fabs = call float @llvm.fabs.f32(float %c)
+ %max0 = call float @llvm.minimum.f32(float %a.fabs, float %b.fabs)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c.fabs)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_fneg_all(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, -v0, -v1, -v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e64 v3, -v0, -v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, -v0, -v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f32_e64 v1, v0, -v2
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg float %a
+ %b.fneg = fneg float %b
+ %c.fneg = fneg float %c
+ %max0 = call float @llvm.minimum.f32(float %a.fneg, float %b.fneg)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c.fneg)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_fneg_fabs_all(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_fneg_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, -|v0|, -|v1|, -|v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_fneg_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e64 v3, -|v0|, -|v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, -|v0|, -|v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f32_e64 v1, v0, -|v2|
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -|v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call float @llvm.fabs.f32(float %a)
+ %b.fabs = call float @llvm.fabs.f32(float %b)
+ %c.fabs = call float @llvm.fabs.f32(float %c)
+ %a.fneg.fabs = fneg float %a.fabs
+ %b.fneg.fabs = fneg float %b.fabs
+ %c.fneg.fabs = fneg float %c.fabs
+ %max0 = call float @llvm.minimum.f32(float %a.fneg.fabs, float %b.fneg.fabs)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c.fneg.fabs)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_fneg0(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_fneg0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, -v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_fneg0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e64 v3, -v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, -v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg float %a
+ %max0 = call float @llvm.minimum.f32(float %a.fneg, float %b)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_fneg1(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_fneg1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, -v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_fneg1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e64 v3, v0, -v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f32_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fneg = fneg float %b
+ %max0 = call float @llvm.minimum.f32(float %a, float %b.fneg)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_fneg2(float %a, float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_fneg2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, v1, -v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_fneg2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f32_e64 v1, v0, -v2
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fneg = fneg float %c
+ %max0 = call float @llvm.minimum.f32(float %a, float %b)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c.fneg)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_const0(float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_const0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, 0x41000000, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_const0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v2, 0x41000000, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.minimum.f32(float 8.0, float %b)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32__const2(float %a, float %b) {
+; GFX12-LABEL: v_fminimum3_f32__const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, v1, 0x41000000
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32__const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_min_f32_e32 v1, 0x41000000, v0
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.minimum.f32(float %a, float %b)
+ %max1 = call float @llvm.minimum.f32(float %max0, float 8.0)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_inlineimm0(float %b, float %c) {
+; GFX12-LABEL: v_fminimum3_f32_inlineimm0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, 4.0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_inlineimm0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v2, 4.0, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.minimum.f32(float 4.0, float %b)
+ %max1 = call float @llvm.minimum.f32(float %max0, float %c)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32__inlineimm(float %a, float %b) {
+; GFX12-LABEL: v_fminimum3_f32__inlineimm:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, v1, 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32__inlineimm:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_min_f32_e32 v1, 4.0, v0
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.minimum.f32(float %a, float %b)
+ %max1 = call float @llvm.minimum.f32(float %max0, float 4.0)
+ ret float %max1
+}
+
+define float @v_fminimum3_f32_const1_const2(float %a) {
+; GFX12-LABEL: v_fminimum3_f32_const1_const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_mov_b32 s0, 0x41000000
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_minimum3_f32 v0, v0, s0, 0x41800000
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f32_const1_const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v1, 0x41000000, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_min_f32_e32 v1, 0x41800000, v0
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call float @llvm.minimum.f32(float %a, float 8.0)
+ %max1 = call float @llvm.minimum.f32(float %max0, float 16.0)
+ ret float %max1
+}
+
+define <2 x float> @v_fminimum3_v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; GFX12-LABEL: v_fminimum3_v2f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v4, v0, v2
+; GFX12-NEXT: v_minimum3_f32 v1, v5, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v4, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v0, v4, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v4, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v5, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v1, v5, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v5, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v5, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %a, <2 x float> %b)
+ %max1 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %c, <2 x float> %max0)
+ ret <2 x float> %max1
+}
+
+define <2 x float> @v_fminimum3_v2f32_commute(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; GFX12-LABEL: v_fminimum3_v2f32_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, v2, v4
+; GFX12-NEXT: v_minimum3_f32 v1, v1, v3, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f32_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v1, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v5, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %a, <2 x float> %b)
+ %max1 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %max0, <2 x float> %c)
+ ret <2 x float> %max1
+}
+
+define <2 x float> @v_fminimum3_v2f32__fabs_all(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; GFX12-LABEL: v_fminimum3_v2f32__fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, |v0|, |v2|, |v4|
+; GFX12-NEXT: v_minimum3_f32 v1, |v1|, |v3|, |v5|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f32__fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e64 vcc, |v1|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], |v1|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v7, |v6|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v1|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v6, |v1|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v3|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, |v3|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e64 vcc, |v0|, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], |v0|, |v2|
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v7, |v3|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v0|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, |v0|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v2|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, |v2|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], v0, |v4|
+; GFX9-NEXT: v_cndmask_b32_e64 v2, |v4|, v0, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, |v4|
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v4|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, |v4|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], v1, |v5|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, |v5|, v1, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v1, |v5|
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v5|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, |v5|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %a)
+ %b.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %b)
+ %c.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %c)
+ %max0 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %a.fabs, <2 x float> %b.fabs)
+ %max1 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %max0, <2 x float> %c.fabs)
+ ret <2 x float> %max1
+}
+
+define <2 x float> @v_fminimum3_v2f32__fneg_all(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; GFX12-LABEL: v_fminimum3_v2f32__fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, -v0, -v2, -v4
+; GFX12-NEXT: v_minimum3_f32 v1, -v1, -v3, -v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f32__fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e64 vcc, -v1, -v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], -v1, -v3
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v7, -v6, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v1, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v6, -v1, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v3, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -v3, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e64 vcc, -v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], -v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v7, -v3, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v0, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, -v0, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v2, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -v2, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], v0, -v4
+; GFX9-NEXT: v_cndmask_b32_e64 v2, -v4, v0, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -v4
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v4, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -v4, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], v1, -v5
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, -v5, v1, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v1, -v5
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v5, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -v5, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg <2 x float> %a
+ %b.fneg = fneg <2 x float> %b
+ %c.fneg = fneg <2 x float> %c
+ %max0 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %a.fneg, <2 x float> %b.fneg)
+ %max1 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %max0, <2 x float> %c.fneg)
+ ret <2 x float> %max1
+}
+
+define <2 x float> @v_fminimum3_v2f32__inlineimm1(<2 x float> %a, <2 x float> %c) {
+; GFX12-LABEL: v_fminimum3_v2f32__inlineimm1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, 2.0, v2
+; GFX12-NEXT: v_minimum3_f32 v1, v1, 2.0, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f32__inlineimm1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, 2.0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, 2.0, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v4, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, 2.0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v4, 2.0, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %a, <2 x float> <float 2.0, float 2.0>)
+ %max1 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %max0, <2 x float> %c)
+ ret <2 x float> %max1
+}
+
+define <2 x float> @v_fminimum3_v2f32__inlineimm2(<2 x float> %a, <2 x float> %b) {
+; GFX12-LABEL: v_fminimum3_v2f32__inlineimm2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, v2, 4.0
+; GFX12-NEXT: v_minimum3_f32 v1, v1, v3, 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f32__inlineimm2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, 4.0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, 4.0, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, 4.0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v2, 4.0, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %a, <2 x float> %b)
+ %max1 = call <2 x float> @llvm.minimum.v2f32(<2 x float> %max0, <2 x float> <float 4.0, float 4.0>)
+ ret <2 x float> %max1
+}
+
+define <3 x float> @v_fminimum3_v3f32(<3 x float> %a, <3 x float> %b, <3 x float> %c) {
+; GFX12-LABEL: v_fminimum3_v3f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v6, v0, v3
+; GFX12-NEXT: v_minimum3_f32 v1, v7, v1, v4
+; GFX12-NEXT: v_minimum3_f32 v2, v8, v2, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v5, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v10, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v10, v9, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v10, v5, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v10, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v6, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v0, v6, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v6, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v6, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v6, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v7, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v7, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v7, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v7, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v7, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v8, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v8, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v8, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v8, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v8, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %a, <3 x float> %b)
+ %max1 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %c, <3 x float> %max0)
+ ret <3 x float> %max1
+}
+
+define <3 x float> @v_fminimum3_v3f32_commute(<3 x float> %a, <3 x float> %b, <3 x float> %c) {
+; GFX12-LABEL: v_fminimum3_v3f32_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, v3, v6
+; GFX12-NEXT: v_minimum3_f32 v1, v1, v4, v7
+; GFX12-NEXT: v_minimum3_f32 v2, v2, v5, v8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f32_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v5, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v10, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v10, v9, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v10, v5, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v10, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v6, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v6, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v1, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v7, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v2, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v2, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v8, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %a, <3 x float> %b)
+ %max1 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %max0, <3 x float> %c)
+ ret <3 x float> %max1
+}
+
+define <3 x float> @v_fminimum3_v3f32__fabs_all(<3 x float> %a, <3 x float> %b, <3 x float> %c) {
+; GFX12-LABEL: v_fminimum3_v3f32__fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, |v0|, |v3|, |v6|
+; GFX12-NEXT: v_minimum3_f32 v1, |v1|, |v4|, |v7|
+; GFX12-NEXT: v_minimum3_f32 v2, |v2|, |v5|, |v8|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f32__fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e64 vcc, |v2|, |v5|
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v5, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v10, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], |v2|, |v5|
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v10, |v9|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v2|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v9, |v2|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v5|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, |v5|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e64 vcc, |v1|, |v4|
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], |v1|, |v4|
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v10, |v5|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v1|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, |v1|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v4|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, |v4|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e64 vcc, |v0|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], |v0|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v10, |v4|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v0|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, |v0|, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v3|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, |v3|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], v0, |v6|
+; GFX9-NEXT: v_cndmask_b32_e64 v3, |v6|, v0, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, |v6|
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v6|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, |v6|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], v1, |v7|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, |v7|, v1, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v1, |v7|
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v7|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, |v7|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], v2, |v8|
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, |v8|, v2, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v2, |v8|
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], |v8|, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, |v8|, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %a)
+ %b.fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %b)
+ %c.fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %c)
+ %max0 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %a.fabs, <3 x float> %b.fabs)
+ %max1 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %max0, <3 x float> %c.fabs)
+ ret <3 x float> %max1
+}
+
+define <3 x float> @v_fminimum3_v3f32__fneg_all(<3 x float> %a, <3 x float> %b, <3 x float> %c) {
+; GFX12-LABEL: v_fminimum3_v3f32__fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, -v0, -v3, -v6
+; GFX12-NEXT: v_minimum3_f32 v1, -v1, -v4, -v7
+; GFX12-NEXT: v_minimum3_f32 v2, -v2, -v5, -v8
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f32__fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e64 vcc, -v2, -v5
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v5, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v10, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], -v2, -v5
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v10, -v9, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v2, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v9, -v2, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v5, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, -v5, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v2, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e64 vcc, -v1, -v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], -v1, -v4
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v10, -v5, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v1, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, -v1, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v4, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -v4, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e64 vcc, -v0, -v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], -v0, -v3
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v10, -v4, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v0, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, -v0, s[4:5]
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v3, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -v3, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], v0, -v6
+; GFX9-NEXT: v_cndmask_b32_e64 v3, -v6, v0, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v0, -v6
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v6, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -v6, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], v1, -v7
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, -v7, v1, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v1, -v7
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v7, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -v7, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cmp_lt_f32_e64 s[4:5], v2, -v8
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, -v8, v2, s[4:5]
+; GFX9-NEXT: v_cmp_o_f32_e64 vcc, v2, -v8
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 s[4:5], -v8, 32
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, -v8, s[4:5]
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg <3 x float> %a
+ %b.fneg = fneg <3 x float> %b
+ %c.fneg = fneg <3 x float> %c
+ %max0 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %a.fneg, <3 x float> %b.fneg)
+ %max1 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %max0, <3 x float> %c.fneg)
+ ret <3 x float> %max1
+}
+
+define <3 x float> @v_fminimum3_v3f32__inlineimm1(<3 x float> %a, <3 x float> %c) {
+; GFX12-LABEL: v_fminimum3_v3f32__inlineimm1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, 2.0, v3
+; GFX12-NEXT: v_minimum3_f32 v1, v1, 2.0, v4
+; GFX12-NEXT: v_minimum3_f32 v2, v2, 2.0, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f32__inlineimm1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, 2.0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v6, 2.0, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v6, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, 2.0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, 2.0, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v6, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, 2.0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v6, 2.0, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v2, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %a, <3 x float> <float 2.0, float 2.0, float 2.0>)
+ %max1 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %max0, <3 x float> %c)
+ ret <3 x float> %max1
+}
+
+define <3 x float> @v_fminimum3_v3f32__inlineimm2(<3 x float> %a, <3 x float> %b) {
+; GFX12-LABEL: v_fminimum3_v3f32__inlineimm2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f32 v0, v0, v3, 4.0
+; GFX12-NEXT: v_minimum3_f32 v1, v1, v4, 4.0
+; GFX12-NEXT: v_minimum3_f32 v2, v2, v5, 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f32__inlineimm2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v5, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v2, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v5, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v7, v5, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v1, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v4, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_lt_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v7, v4, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v0, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_class_f32_e64 vcc, v3, 32
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
+; GFX9-NEXT: v_cmp_eq_f32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, 4.0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, 4.0, v0, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v3, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, 4.0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, 4.0, v1, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v3, vcc
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, 4.0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, 4.0, v2, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %a, <3 x float> %b)
+ %max1 = call <3 x float> @llvm.minimum.v3f32(<3 x float> %max0, <3 x float> <float 4.0, float 4.0, float 4.0>)
+ ret <3 x float> %max1
+}
+
+
+define half @v_fminimum3_f16(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f16_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.minimum.f16(half %a, half %b)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_commute(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, v2, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f16_e32 v1, v2, v0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v2, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.minimum.f16(half %a, half %b)
+ %max1 = call half @llvm.minimum.f16(half %c, half %max0)
+ ret half %max1
+}
+
+define amdgpu_ps i32 @s_fminimum3_f16(half inreg %a, half inreg %b, half inreg %c) {
+; GFX12-LABEL: s_fminimum3_f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v0, s2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_minimum3_f16 v0, s0, s1, v0
+; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_readfirstlane_b32 s0, v0
+; GFX12-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_fminimum3_f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-NEXT: v_min_f16_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_min_f16_e32 v1, s2, v0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s2, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+ %max0 = call half @llvm.minimum.f16(half %a, half %b)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c)
+ %cast = bitcast half %max1 to i16
+ %zext = zext i16 %cast to i32
+ %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %zext)
+ ret i32 %readfirstlane
+}
+
+define half @v_fminimum3_f16_fabs0(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_fabs0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, |v0|, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_fabs0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e64 v3, |v0|, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f16_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call half @llvm.fabs.f16(half %a)
+ %max0 = call half @llvm.minimum.f16(half %a.fabs, half %b)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_fabs1(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_fabs1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, v0, |v1|, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_fabs1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e64 v3, v0, |v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f16_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fabs = call half @llvm.fabs.f16(half %b)
+ %max0 = call half @llvm.minimum.f16(half %a, half %b.fabs)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_fabs2(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_fabs2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, v0, v1, |v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_fabs2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f16_e64 v1, v0, |v2|
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fabs = call half @llvm.fabs.f16(half %c)
+ %max0 = call half @llvm.minimum.f16(half %a, half %b)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c.fabs)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_fabs_all(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, |v0|, |v1|, |v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e64 v3, |v0|, |v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f16_e64 v1, v0, |v2|
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call half @llvm.fabs.f16(half %a)
+ %b.fabs = call half @llvm.fabs.f16(half %b)
+ %c.fabs = call half @llvm.fabs.f16(half %c)
+ %max0 = call half @llvm.minimum.f16(half %a.fabs, half %b.fabs)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c.fabs)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_fneg_all(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, -v0, -v1, -v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e64 v3, -v0, -v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f16_e64 v1, v0, -v2
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg half %a
+ %b.fneg = fneg half %b
+ %c.fneg = fneg half %c
+ %max0 = call half @llvm.minimum.f16(half %a.fneg, half %b.fneg)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c.fneg)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_fneg_fabs_all(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_fneg_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, -|v0|, -|v1|, -|v2|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_fneg_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e64 v3, -|v0|, -|v1|
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -|v0|, -|v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f16_e64 v1, v0, -|v2|
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -|v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call half @llvm.fabs.f16(half %a)
+ %b.fabs = call half @llvm.fabs.f16(half %b)
+ %c.fabs = call half @llvm.fabs.f16(half %c)
+ %a.fneg.fabs = fneg half %a.fabs
+ %b.fneg.fabs = fneg half %b.fabs
+ %c.fneg.fabs = fneg half %c.fabs
+ %max0 = call half @llvm.minimum.f16(half %a.fneg.fabs, half %b.fneg.fabs)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c.fneg.fabs)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_fneg0(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_fneg0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, -v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_fneg0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e64 v3, -v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f16_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg half %a
+ %max0 = call half @llvm.minimum.f16(half %a.fneg, half %b)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_fneg1(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_fneg1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, v0, -v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_fneg1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e64 v3, v0, -v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f16_e32 v1, v0, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fneg = fneg half %b
+ %max0 = call half @llvm.minimum.f16(half %a, half %b.fneg)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_fneg2(half %a, half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_fneg2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, v0, v1, -v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_fneg2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e32 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: v_min_f16_e64 v1, v0, -v2
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fneg = fneg half %c
+ %max0 = call half @llvm.minimum.f16(half %a, half %b)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c.fneg)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_const0(half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_const0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, v0, 0x4800, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_const0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e32 v2, 0x4800, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.minimum.f16(half 8.0, half %b)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16__const2(half %a, half %b) {
+; GFX12-LABEL: v_fminimum3_f16__const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, v0, v1, 0x4800
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16__const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_min_f16_e32 v1, 0x4800, v0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.minimum.f16(half %a, half %b)
+ %max1 = call half @llvm.minimum.f16(half %max0, half 8.0)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_inlineimm0(half %b, half %c) {
+; GFX12-LABEL: v_fminimum3_f16_inlineimm0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, v0, 4.0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_inlineimm0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e32 v2, 4.0, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.minimum.f16(half 4.0, half %b)
+ %max1 = call half @llvm.minimum.f16(half %max0, half %c)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16__inlineimm(half %a, half %b) {
+; GFX12-LABEL: v_fminimum3_f16__inlineimm:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum3_f16 v0, v0, v1, 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16__inlineimm:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: v_min_f16_e32 v1, 4.0, v0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.minimum.f16(half %a, half %b)
+ %max1 = call half @llvm.minimum.f16(half %max0, half 4.0)
+ ret half %max1
+}
+
+define half @v_fminimum3_f16_const1_const2(half %a) {
+; GFX12-LABEL: v_fminimum3_f16_const1_const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_movk_i32 s0, 0x4800
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_minimum3_f16 v0, v0, s0, 0x4c00
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f16_const1_const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e32 v1, 0x4800, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_min_f16_e32 v1, 0x4c00, v0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call half @llvm.minimum.f16(half %a, half 8.0)
+ %max1 = call half @llvm.minimum.f16(half %max0, half 16.0)
+ ret half %max1
+}
+
+define <2 x half> @v_fminimum3_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v2f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v2, v0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v3, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v0, v5, s4
+; GFX9-NEXT: v_pk_min_f16 v1, v2, v1
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v1, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v2, v0 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> %b)
+ %max1 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %c, <2 x half> %max0)
+ ret <2 x half> %max1
+}
+
+define <2 x half> @v_fminimum3_v2f16_commute(<2 x half> %a, <2 x half> %b, <2 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v2f16_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f16_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v3, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v3, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v0, v5, s4
+; GFX9-NEXT: v_pk_min_f16 v1, v1, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v5, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v1, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> %b)
+ %max1 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %max0, <2 x half> %c)
+ ret <2 x half> %max1
+}
+
+define <2 x half> @v_fminimum3_v2f16__fabs_all(<2 x half> %a, <2 x half> %b, <2 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v2f16__fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0
+; GFX12-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v1
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f16__fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v0
+; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff7fff, v1
+; GFX9-NEXT: v_pk_min_f16 v3, v3, v4
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, |v0|, |v1| src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v1|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v3, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_and_b32_e32 v5, 0x7fff7fff, v2
+; GFX9-NEXT: v_perm_b32 v1, v4, v0, s4
+; GFX9-NEXT: v_pk_min_f16 v1, v1, v5
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v4, |v2| src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v1, vcc
+; GFX9-NEXT: v_perm_b32 v0, v3, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %a)
+ %b.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %b)
+ %c.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %c)
+ %max0 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a.fabs, <2 x half> %b.fabs)
+ %max1 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %max0, <2 x half> %c.fabs)
+ ret <2 x half> %max1
+}
+
+define <2 x half> @v_fminimum3_v2f16__fneg_all(<2 x half> %a, <2 x half> %b, <2 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v2f16__fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v1 neg_lo:[1,1] neg_hi:[1,1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f16__fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v3, v0, v1 neg_lo:[1,1] neg_hi:[1,1]
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v3, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, -v0, -v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v0, v5, s4
+; GFX9-NEXT: v_pk_min_f16 v1, v1, v2 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v5, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v1, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, -v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg <2 x half> %a
+ %b.fneg = fneg <2 x half> %b
+ %c.fneg = fneg <2 x half> %c
+ %max0 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a.fneg, <2 x half> %b.fneg)
+ %max1 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %max0, <2 x half> %c.fneg)
+ ret <2 x half> %max1
+}
+
+define <2 x half> @v_fminimum3_v2f16__inlineimm1(<2 x half> %a, <2 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v2f16__inlineimm1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, 2.0 op_sel_hi:[1,0]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f16__inlineimm1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v2, v0, 2.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v0 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v2, v3, v0, s4
+; GFX9-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v3, v1 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v3, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> <half 2.0, half 2.0>)
+ %max1 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %max0, <2 x half> %c)
+ ret <2 x half> %max1
+}
+
+define <2 x half> @v_fminimum3_v2f16__inlineimm2(<2 x half> %a, <2 x half> %b) {
+; GFX12-LABEL: v_fminimum3_v2f16__inlineimm2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, 4.0 op_sel_hi:[1,0]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v2f16__inlineimm2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v0, v4, s4
+; GFX9-NEXT: v_pk_min_f16 v1, v1, 4.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v4, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v1, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v2, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> %b)
+ %max1 = call <2 x half> @llvm.minimum.v2f16(<2 x half> %max0, <2 x half> <half 4.0, half 4.0>)
+ ret <2 x half> %max1
+}
+
+define <3 x half> @v_fminimum3_v3f16(<3 x half> %a, <3 x half> %b, <3 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v3f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v4, v0
+; GFX12-NEXT: v_pk_minimum_f16 v1, v5, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v6, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v1, v6, s4
+; GFX9-NEXT: v_pk_min_f16 v1, v5, v1
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v5, v6
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v1, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v4, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v4, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v4, v0 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %a, <3 x half> %b)
+ %max1 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %c, <3 x half> %max0)
+ ret <3 x half> %max1
+}
+
+define <3 x half> @v_fminimum3_v3f16_commute(<3 x half> %a, <3 x half> %b, <3 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v3f16_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v4
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f16_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v6, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v1, v6, s4
+; GFX9-NEXT: v_pk_min_f16 v1, v1, v5
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v6, v5
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v1, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v2, v4
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v8, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %a, <3 x half> %b)
+ %max1 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %max0, <3 x half> %c)
+ ret <3 x half> %max1
+}
+
+define <3 x half> @v_fminimum3_v3f16__fabs_all(<3 x half> %a, <3 x half> %b, <3 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v3f16__fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0
+; GFX12-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v3
+; GFX12-NEXT: v_and_b32_e32 v5, 0x7fff7fff, v5
+; GFX12-NEXT: v_and_b32_e32 v4, 0x7fff7fff, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v4
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f16__fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v7, 0x7fff7fff, v1
+; GFX9-NEXT: v_and_b32_e32 v9, 0x7fff7fff, v3
+; GFX9-NEXT: v_and_b32_e32 v6, 0x7fff7fff, v0
+; GFX9-NEXT: v_and_b32_e32 v8, 0x7fff7fff, v2
+; GFX9-NEXT: v_pk_min_f16 v7, v7, v9
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v7
+; GFX9-NEXT: v_mov_b32_e32 v12, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, |v1|, |v3| src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_pk_min_f16 v6, v6, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v12, v9, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, |v0|, |v2| src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v12, v8, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v1|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v12, v7, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v6, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_and_b32_e32 v11, 0x7fff7fff, v4
+; GFX9-NEXT: v_perm_b32 v2, v8, v0, s4
+; GFX9-NEXT: v_pk_min_f16 v2, v2, v11
+; GFX9-NEXT: v_and_b32_e32 v10, 0x7fff7fff, v5
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v8, |v4| src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_perm_b32 v6, v9, v1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v12, v3, vcc
+; GFX9-NEXT: v_pk_min_f16 v6, v6, v10
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v1, |v5|
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v12, v6, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v4|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v3, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %a)
+ %b.fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %b)
+ %c.fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %c)
+ %max0 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %a.fabs, <3 x half> %b.fabs)
+ %max1 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %max0, <3 x half> %c.fabs)
+ ret <3 x half> %max1
+}
+
+define <3 x half> @v_fminimum3_v3f16__fneg_all(<3 x half> %a, <3 x half> %b, <3 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v3f16__fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2 neg_lo:[1,1] neg_hi:[1,1]
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3 neg_lo:[1,1] neg_hi:[1,1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v4 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v5 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f16__fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v6, v0, v2 neg_lo:[1,1] neg_hi:[1,1]
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, -v0, -v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v1, v3 neg_lo:[1,1] neg_hi:[1,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v1, -v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, -v1, -v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v1, v6, s4
+; GFX9-NEXT: v_pk_min_f16 v1, v1, v5 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v6, -v5
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v1, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v2, v4 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v8, -v4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, -v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg <3 x half> %a
+ %b.fneg = fneg <3 x half> %b
+ %c.fneg = fneg <3 x half> %c
+ %max0 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %a.fneg, <3 x half> %b.fneg)
+ %max1 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %max0, <3 x half> %c.fneg)
+ ret <3 x half> %max1
+}
+
+define <3 x half> @v_fminimum3_v3f16__inlineimm1(<3 x half> %a, <3 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v3f16__inlineimm1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, 2.0 op_sel_hi:[1,0]
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, 2.0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f16__inlineimm1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v4, v0, 2.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v4
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v0 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX9-NEXT: v_pk_min_f16 v7, v1, 2.0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v4, vcc
+; GFX9-NEXT: s_mov_b32 s5, 0x5040100
+; GFX9-NEXT: v_perm_b32 v4, v5, v0, s5
+; GFX9-NEXT: v_pk_min_f16 v4, v4, v2
+; GFX9-NEXT: s_movk_i32 s4, 0x7e00
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v4
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v5, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc
+; GFX9-NEXT: v_pack_b32_f16 v7, v1, s4
+; GFX9-NEXT: v_pk_min_f16 v7, v7, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v4, vcc
+; GFX9-NEXT: v_perm_b32 v0, v5, v0, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %a, <3 x half> <half 2.0, half 2.0, half 2.0>)
+ %max1 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %max0, <3 x half> %c)
+ ret <3 x half> %max1
+}
+
+define <3 x half> @v_fminimum3_v3f16__inlineimm2(<3 x half> %a, <3 x half> %b) {
+; GFX12-LABEL: v_fminimum3_v3f16__inlineimm2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, 4.0 op_sel_hi:[1,0]
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v3f16__inlineimm2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v4, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v5, v4, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v1, v1, v4, s4
+; GFX9-NEXT: v_pk_min_f16 v1, v1, 4.0
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v4, v4
+; GFX9-NEXT: v_perm_b32 v2, v0, v6, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v2, 4.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v6, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %a, <3 x half> %b)
+ %max1 = call <3 x half> @llvm.minimum.v3f16(<3 x half> %max0, <3 x half> <half 4.0, half 4.0, half 4.0>)
+ ret <3 x half> %max1
+}
+
+define <4 x half> @v_fminimum3_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v4f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v4, v0
+; GFX12-NEXT: v_pk_minimum_f16 v1, v5, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v4f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v6, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v2, v1, v6, s4
+; GFX9-NEXT: v_pk_min_f16 v2, v5, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v5, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v5, v1 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_pk_min_f16 v2, v4, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v4, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v4, v0 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v5, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a, <4 x half> %b)
+ %max1 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %c, <4 x half> %max0)
+ ret <4 x half> %max1
+}
+
+define <4 x half> @v_fminimum3_v4f16_commute(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v4f16_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v4
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v4f16_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v6, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v2, v1, v6, s4
+; GFX9-NEXT: v_pk_min_f16 v2, v2, v5
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v6, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v5 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_pk_min_f16 v2, v2, v4
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v8, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v5, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a, <4 x half> %b)
+ %max1 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %max0, <4 x half> %c)
+ ret <4 x half> %max1
+}
+
+define <4 x half> @v_fminimum3_v4f16__fabs_all(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v4f16__fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0
+; GFX12-NEXT: v_and_b32_e32 v1, 0x7fff7fff, v1
+; GFX12-NEXT: v_and_b32_e32 v2, 0x7fff7fff, v2
+; GFX12-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v3
+; GFX12-NEXT: v_and_b32_e32 v5, 0x7fff7fff, v5
+; GFX12-NEXT: v_and_b32_e32 v4, 0x7fff7fff, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v4
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v4f16__fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v7, 0x7fff7fff, v0
+; GFX9-NEXT: v_and_b32_e32 v9, 0x7fff7fff, v2
+; GFX9-NEXT: v_and_b32_e32 v6, 0x7fff7fff, v1
+; GFX9-NEXT: v_and_b32_e32 v8, 0x7fff7fff, v3
+; GFX9-NEXT: v_pk_min_f16 v7, v7, v9
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v7
+; GFX9-NEXT: v_mov_b32_e32 v12, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, |v0|, |v2| src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_pk_min_f16 v6, v6, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v12, v9, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, |v1|, |v3| src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v12, v8, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v0|, |v2|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v7, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, |v1|, |v3|
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v12, v6, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_and_b32_e32 v11, 0x7fff7fff, v5
+; GFX9-NEXT: v_perm_b32 v2, v8, v1, s4
+; GFX9-NEXT: v_and_b32_e32 v10, 0x7fff7fff, v4
+; GFX9-NEXT: v_pk_min_f16 v2, v2, v11
+; GFX9-NEXT: v_perm_b32 v6, v9, v0, s4
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v8, |v5| src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_pk_min_f16 v6, v6, v10
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v12, v3, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v9, |v4| src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v12, v7, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v1, |v5|
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v12, v2, vcc
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v0, |v4|
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v6, vcc
+; GFX9-NEXT: v_perm_b32 v0, v7, v0, s4
+; GFX9-NEXT: v_perm_b32 v1, v3, v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %a)
+ %b.fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %b)
+ %c.fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %c)
+ %max0 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a.fabs, <4 x half> %b.fabs)
+ %max1 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %max0, <4 x half> %c.fabs)
+ ret <4 x half> %max1
+}
+
+define <4 x half> @v_fminimum3_v4f16__fneg_all(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v4f16__fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2 neg_lo:[1,1] neg_hi:[1,1]
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3 neg_lo:[1,1] neg_hi:[1,1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v4 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v5 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v4f16__fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v6, v0, v2 neg_lo:[1,1] neg_hi:[1,1]
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v0, -v2
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v7, v6, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, -v0, -v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v1, v3 neg_lo:[1,1] neg_hi:[1,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, -v1, -v3
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, -v1, -v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v2, v1, v6, s4
+; GFX9-NEXT: v_pk_min_f16 v2, v2, v5 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v6, -v5
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, -v5 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v2, v0, v8, s4
+; GFX9-NEXT: v_pk_min_f16 v2, v2, v4 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-NEXT: v_cmp_o_f16_e64 vcc, v8, -v4
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v7, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, -v4 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v5, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg <4 x half> %a
+ %b.fneg = fneg <4 x half> %b
+ %c.fneg = fneg <4 x half> %c
+ %max0 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a.fneg, <4 x half> %b.fneg)
+ %max1 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %max0, <4 x half> %c.fneg)
+ ret <4 x half> %max1
+}
+
+define <4 x half> @v_fminimum3_v4f16__inlineimm1(<4 x half> %a, <4 x half> %c) {
+; GFX12-LABEL: v_fminimum3_v4f16__inlineimm1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, 2.0 op_sel_hi:[1,0]
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, 2.0 op_sel_hi:[1,0]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v4f16__inlineimm1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v4, v0, 2.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v4
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v0 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_pk_min_f16 v7, v1, 2.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v7
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v6, v8, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v4, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v4, v8, v1, s4
+; GFX9-NEXT: v_pk_min_f16 v4, v4, v3
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v8, v3 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_perm_b32 v8, v5, v0, s4
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v4
+; GFX9-NEXT: v_pk_min_f16 v8, v8, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v6, v7, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v8
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v5, v2 src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v9, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v4, vcc
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v8, vcc
+; GFX9-NEXT: v_perm_b32 v0, v5, v0, s4
+; GFX9-NEXT: v_perm_b32 v1, v7, v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a, <4 x half> <half 2.0, half 2.0, half 2.0, half 2.0>)
+ %max1 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %max0, <4 x half> %c)
+ ret <4 x half> %max1
+}
+
+define <4 x half> @v_fminimum3_v4f16__inlineimm2(<4 x half> %a, <4 x half> %b) {
+; GFX12-LABEL: v_fminimum3_v4f16__inlineimm2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, 4.0 op_sel_hi:[1,0]
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, 4.0 op_sel_hi:[1,0]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_v4f16__inlineimm2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v4, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v5, v4, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
+; GFX9-NEXT: v_pk_min_f16 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v2, v1, v4, s4
+; GFX9-NEXT: v_pk_min_f16 v2, v2, 4.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v4, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
+; GFX9-NEXT: v_perm_b32 v2, v0, v6, s4
+; GFX9-NEXT: v_pk_min_f16 v2, v2, 4.0 op_sel_hi:[1,0]
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v6, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
+; GFX9-NEXT: v_perm_b32 v0, v0, v4, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a, <4 x half> %b)
+ %max1 = call <4 x half> @llvm.minimum.v4f16(<4 x half> %max0, <4 x half> <half 4.0, half 4.0, half 4.0, half 4.0>)
+ ret <4 x half> %max1
+}
+
+define double @v_fminimum3_f64(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.minimum.f64(double %a, double %b)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_commute(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_commute:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[4:5], v[0:1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_commute:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.minimum.f64(double %a, double %b)
+ %max1 = call double @llvm.minimum.f64(double %c, double %max0)
+ ret double %max1
+}
+
+define amdgpu_ps <2 x i32> @s_fminimum3_f64(double inreg %a, double inreg %b, double inreg %c) {
+; GFX12-LABEL: s_fminimum3_f64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f64 v[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], s[4:5]
+; GFX12-NEXT: v_readfirstlane_b32 s0, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_readfirstlane_b32 s1, v1
+; GFX12-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_fminimum3_f64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: v_min_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], s[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, s[4:5], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v1
+; GFX9-NEXT: v_readfirstlane_b32 s1, v0
+; GFX9-NEXT: ; return to shader part epilog
+ %max0 = call double @llvm.minimum.f64(double %a, double %b)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c)
+ %cast = bitcast double %max1 to <2 x i32>
+ %elt0 = extractelement <2 x i32> %cast, i32 0
+ %elt1 = extractelement <2 x i32> %cast, i32 1
+ %readlane0 = call i32 @llvm.amdgcn.readfirstlane(i32 %elt0)
+ %readlane1 = call i32 @llvm.amdgcn.readfirstlane(i32 %elt1)
+ %insert.0 = insertelement <2 x i32> poison, i32 %readlane0, i32 0
+ %insert.1 = insertelement <2 x i32> %insert.0, i32 %readlane1, i32 1
+ ret <2 x i32> %insert.1
+}
+
+define double @v_fminimum3_f64_fabs0(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_fabs0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], |v[0:1]|, v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_fabs0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], |v[0:1]|, v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, |v[0:1]|, v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call double @llvm.fabs.f64(double %a)
+ %max0 = call double @llvm.minimum.f64(double %a.fabs, double %b)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_fabs1(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_fabs1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], |v[2:3]|
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_fabs1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], v[0:1], |v[2:3]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], |v[2:3]|
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fabs = call double @llvm.fabs.f64(double %b)
+ %max0 = call double @llvm.minimum.f64(double %a, double %b.fabs)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_fabs2(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_fabs2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], |v[4:5]|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_fabs2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], |v[4:5]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], |v[4:5]|
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fabs = call double @llvm.fabs.f64(double %c)
+ %max0 = call double @llvm.minimum.f64(double %a, double %b)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c.fabs)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_fabs_all(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], |v[0:1]|, |v[2:3]|
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], |v[4:5]|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], |v[0:1]|, |v[2:3]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, |v[0:1]|, |v[2:3]|
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], |v[4:5]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], |v[4:5]|
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call double @llvm.fabs.f64(double %a)
+ %b.fabs = call double @llvm.fabs.f64(double %b)
+ %c.fabs = call double @llvm.fabs.f64(double %c)
+ %max0 = call double @llvm.minimum.f64(double %a.fabs, double %b.fabs)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c.fabs)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_fneg_all(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_fneg_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], -v[0:1], -v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], -v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_fneg_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], -v[0:1], -v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, -v[0:1], -v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], -v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], -v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg double %a
+ %b.fneg = fneg double %b
+ %c.fneg = fneg double %c
+ %max0 = call double @llvm.minimum.f64(double %a.fneg, double %b.fneg)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c.fneg)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_fneg_fabs_all(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_fneg_fabs_all:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], -|v[0:1]|, -|v[2:3]|
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], -|v[4:5]|
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_fneg_fabs_all:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], -|v[0:1]|, -|v[2:3]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, -|v[0:1]|, -|v[2:3]|
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], -|v[4:5]|
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], -|v[4:5]|
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fabs = call double @llvm.fabs.f64(double %a)
+ %b.fabs = call double @llvm.fabs.f64(double %b)
+ %c.fabs = call double @llvm.fabs.f64(double %c)
+ %a.fneg.fabs = fneg double %a.fabs
+ %b.fneg.fabs = fneg double %b.fabs
+ %c.fneg.fabs = fneg double %c.fabs
+ %max0 = call double @llvm.minimum.f64(double %a.fneg.fabs, double %b.fneg.fabs)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c.fneg.fabs)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_fneg0(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_fneg0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], -v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_fneg0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], -v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, -v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %a.fneg = fneg double %a
+ %max0 = call double @llvm.minimum.f64(double %a.fneg, double %b)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_fneg1(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_fneg1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], -v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_fneg1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], v[0:1], -v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], -v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %b.fneg = fneg double %b
+ %max0 = call double @llvm.minimum.f64(double %a, double %b.fneg)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_fneg2(double %a, double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_fneg2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], -v[4:5]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_fneg2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[6:7], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v8, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v6, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], -v[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e64 vcc, v[0:1], -v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %c.fneg = fneg double %c
+ %max0 = call double @llvm.minimum.f64(double %a, double %b)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c.fneg)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_const0(double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_const0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], 0x40200000, v[0:1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_const0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0
+; GFX9-NEXT: s_mov_b32 s5, 0x40200000
+; GFX9-NEXT: v_min_f64 v[4:5], v[0:1], s[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.minimum.f64(double 8.0, double %b)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64__const2(double %a, double %b) {
+; GFX12-LABEL: v_fminimum3_f64__const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], 0x40200000, v[0:1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64__const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-NEXT: s_mov_b32 s4, 0
+; GFX9-NEXT: s_mov_b32 s5, 0x40200000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], s[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.minimum.f64(double %a, double %b)
+ %max1 = call double @llvm.minimum.f64(double %max0, double 8.0)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_inlineimm0(double %b, double %c) {
+; GFX12-LABEL: v_fminimum3_f64_inlineimm0:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], 4.0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_inlineimm0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[4:5], v[0:1], 4.0
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.minimum.f64(double 4.0, double %b)
+ %max1 = call double @llvm.minimum.f64(double %max0, double %c)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64__inlineimm(double %a, double %b) {
+; GFX12-LABEL: v_fminimum3_f64__inlineimm:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], 4.0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64__inlineimm:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], 4.0
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.minimum.f64(double %a, double %b)
+ %max1 = call double @llvm.minimum.f64(double %max0, double 4.0)
+ ret double %max1
+}
+
+define double @v_fminimum3_f64_const1_const2(double %a) {
+; GFX12-LABEL: v_fminimum3_f64_const1_const2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minimum_f64 v[0:1], 0x40200000, v[0:1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_minimum_f64 v[0:1], 0x40300000, v[0:1]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_fminimum3_f64_const1_const2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0
+; GFX9-NEXT: s_mov_b32 s5, 0x40200000
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], s[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, 0x7ff80000
+; GFX9-NEXT: s_mov_b32 s4, 0
+; GFX9-NEXT: s_mov_b32 s5, 0x40300000
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_min_f64 v[2:3], v[0:1], s[4:5]
+; GFX9-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %max0 = call double @llvm.minimum.f64(double %a, double 8.0)
+ %max1 = call double @llvm.minimum.f64(double %max0, double 16.0)
+ ret double %max1
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
index 64063f65e288..04ef30bd26aa 100644
--- a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
@@ -253,25 +253,25 @@ define amdgpu_kernel void @fp_to_sint_i64 (ptr addrspace(1) %out, float %in) {
; EG-NEXT: ADD_INT * T2.W, PV.W, literal.y,
; EG-NEXT: 8388608(1.175494e-38), -150(nan)
; EG-NEXT: ADD_INT T0.X, T0.W, literal.x,
-; EG-NEXT: SUB_INT T0.Y, literal.y, T0.W,
-; EG-NEXT: AND_INT T0.Z, PS, literal.z,
+; EG-NEXT: AND_INT T0.Y, PS, literal.y,
+; EG-NEXT: SUB_INT T0.Z, literal.z, T0.W,
; EG-NEXT: NOT_INT T0.W, PS,
; EG-NEXT: LSHR * T3.W, PV.W, 1,
-; EG-NEXT: -127(nan), 150(2.101948e-43)
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: -127(nan), 31(4.344025e-44)
+; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
; EG-NEXT: BIT_ALIGN_INT T1.X, 0.0, PS, PV.W,
-; EG-NEXT: LSHL T1.Y, T1.W, PV.Z,
-; EG-NEXT: AND_INT T0.Z, T2.W, literal.x, BS:VEC_120/SCL_212
-; EG-NEXT: BIT_ALIGN_INT T0.W, 0.0, T1.W, PV.Y, BS:VEC_021/SCL_122
-; EG-NEXT: AND_INT * T1.W, PV.Y, literal.x,
+; EG-NEXT: AND_INT T1.Y, PV.Z, literal.x,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, 0.0, T1.W, PV.Z,
+; EG-NEXT: LSHL T0.W, T1.W, PV.Y,
+; EG-NEXT: AND_INT * T1.W, T2.W, literal.x,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; EG-NEXT: CNDE_INT T0.Y, PS, PV.W, 0.0,
-; EG-NEXT: CNDE_INT T1.Z, PV.Z, PV.Y, 0.0,
-; EG-NEXT: CNDE_INT T0.W, PV.Z, PV.X, PV.Y,
+; EG-NEXT: CNDE_INT T0.Z, PV.Y, PV.Z, 0.0,
+; EG-NEXT: CNDE_INT T0.W, PS, PV.X, PV.W,
; EG-NEXT: SETGT_INT * T1.W, T0.X, literal.x,
; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T0.Z, PS, 0.0, PV.W,
-; EG-NEXT: CNDE_INT T0.W, PS, PV.Y, PV.Z,
+; EG-NEXT: CNDE_INT T1.Z, PS, 0.0, PV.W,
+; EG-NEXT: CNDE_INT T0.W, PS, PV.Z, PV.Y,
; EG-NEXT: ASHR * T1.W, KC0[2].Z, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
; EG-NEXT: XOR_INT T0.W, PV.W, PS,
@@ -364,79 +364,78 @@ define amdgpu_kernel void @fp_to_sint_v2i64(ptr addrspace(1) %out, <2 x float> %
;
; EG-LABEL: fp_to_sint_v2i64:
; EG: ; %bb.0:
-; EG-NEXT: ALU 75, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT: ALU 74, @4, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T0.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
; EG-NEXT: MOV * T0.W, literal.x,
; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
-; EG-NEXT: BFE_UINT * T1.W, KC0[2].W, literal.x, PV.W,
-; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: AND_INT T0.Z, KC0[2].W, literal.x,
-; EG-NEXT: BFE_UINT T0.W, KC0[3].X, literal.y, T0.W,
-; EG-NEXT: ADD_INT * T2.W, PV.W, literal.z,
-; EG-NEXT: 8388607(1.175494e-38), 23(3.222986e-44)
+; EG-NEXT: BFE_UINT T0.Z, KC0[3].X, literal.x, PV.W,
+; EG-NEXT: BFE_UINT T0.W, KC0[2].W, literal.x, PV.W,
+; EG-NEXT: AND_INT * T1.Z, KC0[2].W, literal.y,
+; EG-NEXT: 23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT: ADD_INT T1.W, PV.W, literal.x,
+; EG-NEXT: ADD_INT * T2.W, PV.Z, literal.x,
; EG-NEXT: -150(nan), 0(0.000000e+00)
-; EG-NEXT: SUB_INT T0.X, literal.x, PV.W,
-; EG-NEXT: SUB_INT T0.Y, literal.x, T1.W,
-; EG-NEXT: AND_INT T1.Z, PS, literal.y,
-; EG-NEXT: OR_INT T3.W, PV.Z, literal.z,
+; EG-NEXT: AND_INT T0.X, PS, literal.x,
+; EG-NEXT: AND_INT T0.Y, PV.W, literal.x,
+; EG-NEXT: OR_INT T1.Z, T1.Z, literal.y,
+; EG-NEXT: SUB_INT T3.W, literal.z, T0.W,
; EG-NEXT: AND_INT * T4.W, KC0[3].X, literal.w,
-; EG-NEXT: 150(2.101948e-43), 31(4.344025e-44)
-; EG-NEXT: 8388608(1.175494e-38), 8388607(1.175494e-38)
+; EG-NEXT: 31(4.344025e-44), 8388608(1.175494e-38)
+; EG-NEXT: 150(2.101948e-43), 8388607(1.175494e-38)
; EG-NEXT: OR_INT T1.X, PS, literal.x,
-; EG-NEXT: LSHL T1.Y, PV.W, PV.Z,
-; EG-NEXT: AND_INT T0.Z, T2.W, literal.y,
-; EG-NEXT: BIT_ALIGN_INT T4.W, 0.0, PV.W, PV.Y,
-; EG-NEXT: AND_INT * T5.W, PV.Y, literal.y,
+; EG-NEXT: AND_INT T1.Y, PV.W, literal.y,
+; EG-NEXT: BIT_ALIGN_INT T2.Z, 0.0, PV.Z, PV.W,
+; EG-NEXT: LSHL T3.W, PV.Z, PV.Y,
+; EG-NEXT: AND_INT * T4.W, T1.W, literal.y,
; EG-NEXT: 8388608(1.175494e-38), 32(4.484155e-44)
-; EG-NEXT: CNDE_INT T2.X, PS, PV.W, 0.0,
-; EG-NEXT: CNDE_INT T0.Y, PV.Z, PV.Y, 0.0,
-; EG-NEXT: ADD_INT T1.Z, T0.W, literal.x,
-; EG-NEXT: BIT_ALIGN_INT T4.W, 0.0, PV.X, T0.X,
-; EG-NEXT: AND_INT * T5.W, T0.X, literal.y,
-; EG-NEXT: -150(nan), 32(4.484155e-44)
+; EG-NEXT: CNDE_INT T0.Y, PS, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T2.Z, PV.Y, PV.Z, 0.0,
+; EG-NEXT: LSHL T5.W, PV.X, T0.X,
+; EG-NEXT: AND_INT * T6.W, T2.W, literal.x,
+; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; EG-NEXT: CNDE_INT T0.X, PS, PV.W, 0.0,
-; EG-NEXT: NOT_INT T2.Y, T2.W,
-; EG-NEXT: AND_INT T2.Z, PV.Z, literal.x,
-; EG-NEXT: NOT_INT T2.W, PV.Z,
-; EG-NEXT: LSHR * T4.W, T1.X, 1,
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: LSHR T3.X, T3.W, 1,
-; EG-NEXT: ADD_INT T3.Y, T0.W, literal.x, BS:VEC_120/SCL_212
-; EG-NEXT: BIT_ALIGN_INT T3.Z, 0.0, PS, PV.W,
-; EG-NEXT: LSHL T0.W, T1.X, PV.Z,
-; EG-NEXT: AND_INT * T2.W, T1.Z, literal.y,
+; EG-NEXT: NOT_INT T1.Y, T1.W,
+; EG-NEXT: SUB_INT T3.Z, literal.x, T0.Z,
+; EG-NEXT: NOT_INT T1.W, T2.W, BS:VEC_120/SCL_212
+; EG-NEXT: LSHR * T2.W, T1.X, 1,
+; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
+; EG-NEXT: LSHR T2.X, T1.Z, 1,
+; EG-NEXT: ADD_INT T2.Y, T0.Z, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: BIT_ALIGN_INT T0.Z, 0.0, PS, PV.W,
+; EG-NEXT: BIT_ALIGN_INT T1.W, 0.0, T1.X, PV.Z,
+; EG-NEXT: AND_INT * T2.W, PV.Z, literal.y,
; EG-NEXT: -127(nan), 32(4.484155e-44)
; EG-NEXT: CNDE_INT T1.X, PS, PV.W, 0.0,
-; EG-NEXT: CNDE_INT T4.Y, PS, PV.Z, PV.W,
-; EG-NEXT: SETGT_INT T1.Z, PV.Y, literal.x,
-; EG-NEXT: BIT_ALIGN_INT T0.W, 0.0, PV.X, T2.Y,
-; EG-NEXT: ADD_INT * T1.W, T1.W, literal.y,
+; EG-NEXT: CNDE_INT T3.Y, T6.W, PV.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGT_INT T0.Z, PV.Y, literal.x,
+; EG-NEXT: BIT_ALIGN_INT T1.W, 0.0, PV.X, T1.Y,
+; EG-NEXT: ADD_INT * T0.W, T0.W, literal.y,
; EG-NEXT: 23(3.222986e-44), -127(nan)
-; EG-NEXT: CNDE_INT T3.X, T0.Z, PV.W, T1.Y,
+; EG-NEXT: CNDE_INT T2.X, T4.W, PV.W, T3.W,
; EG-NEXT: SETGT_INT T1.Y, PS, literal.x,
-; EG-NEXT: CNDE_INT T0.Z, PV.Z, 0.0, PV.Y,
-; EG-NEXT: CNDE_INT T0.W, PV.Z, T0.X, PV.X,
+; EG-NEXT: CNDE_INT T1.Z, PV.Z, 0.0, PV.Y,
+; EG-NEXT: CNDE_INT T1.W, PV.Z, PV.X, T0.X,
; EG-NEXT: ASHR * T2.W, KC0[3].X, literal.y,
; EG-NEXT: 23(3.222986e-44), 31(4.344025e-44)
; EG-NEXT: XOR_INT T0.X, PV.W, PS,
-; EG-NEXT: XOR_INT T2.Y, PV.Z, PS,
+; EG-NEXT: XOR_INT T3.Y, PV.Z, PS,
; EG-NEXT: CNDE_INT T0.Z, PV.Y, 0.0, PV.X,
-; EG-NEXT: CNDE_INT T0.W, PV.Y, T2.X, T0.Y,
+; EG-NEXT: CNDE_INT T1.W, PV.Y, T2.Z, T0.Y,
; EG-NEXT: ASHR * T3.W, KC0[2].W, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
; EG-NEXT: XOR_INT T0.Y, PV.W, PS,
; EG-NEXT: XOR_INT T0.Z, PV.Z, PS,
-; EG-NEXT: SUB_INT T0.W, PV.Y, T2.W,
+; EG-NEXT: SUB_INT T1.W, PV.Y, T2.W,
; EG-NEXT: SUBB_UINT * T4.W, PV.X, T2.W,
; EG-NEXT: SUB_INT T1.Y, PV.W, PS,
-; EG-NEXT: SETGT_INT T1.Z, 0.0, T3.Y,
-; EG-NEXT: SUB_INT T0.W, PV.Z, T3.W,
+; EG-NEXT: SETGT_INT T1.Z, 0.0, T2.Y,
+; EG-NEXT: SUB_INT T1.W, PV.Z, T3.W,
; EG-NEXT: SUBB_UINT * T4.W, PV.Y, T3.W,
; EG-NEXT: SUB_INT T0.Z, PV.W, PS,
-; EG-NEXT: SETGT_INT T0.W, 0.0, T1.W,
+; EG-NEXT: SETGT_INT T0.W, 0.0, T0.W,
; EG-NEXT: CNDE_INT * T1.W, PV.Z, PV.Y, 0.0,
; EG-NEXT: CNDE_INT T1.Y, PV.W, PV.Z, 0.0,
; EG-NEXT: SUB_INT * T2.W, T0.X, T2.W,
@@ -567,170 +566,168 @@ define amdgpu_kernel void @fp_to_sint_v4i64(ptr addrspace(1) %out, <4 x float> %
;
; EG-LABEL: fp_to_sint_v4i64:
; EG: ; %bb.0:
-; EG-NEXT: ALU 101, @6, KC0[CB0:0-32], KC1[]
-; EG-NEXT: ALU 54, @108, KC0[CB0:0-32], KC1[]
-; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T4.XYZW, T0.X, 0
-; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.XYZW, T2.X, 1
+; EG-NEXT: ALU 99, @6, KC0[CB0:0-32], KC1[]
+; EG-NEXT: ALU 54, @106, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T2.X, 0
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.XYZW, T0.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 6:
; EG-NEXT: MOV * T0.W, literal.x,
; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
-; EG-NEXT: BFE_UINT T1.W, KC0[4].X, literal.x, PV.W,
-; EG-NEXT: AND_INT * T2.W, KC0[4].X, literal.y,
+; EG-NEXT: BFE_UINT T1.W, KC0[3].Z, literal.x, PV.W,
+; EG-NEXT: AND_INT * T2.W, KC0[3].Z, literal.y,
; EG-NEXT: 23(3.222986e-44), 8388607(1.175494e-38)
-; EG-NEXT: OR_INT T0.Z, PS, literal.x,
-; EG-NEXT: BFE_UINT T2.W, KC0[3].Z, literal.y, T0.W,
-; EG-NEXT: ADD_INT * T3.W, PV.W, literal.z,
-; EG-NEXT: 8388608(1.175494e-38), 23(3.222986e-44)
-; EG-NEXT: -150(nan), 0(0.000000e+00)
-; EG-NEXT: ADD_INT T0.Y, PV.W, literal.x,
-; EG-NEXT: AND_INT T1.Z, PS, literal.y,
-; EG-NEXT: NOT_INT T4.W, PS,
-; EG-NEXT: LSHR * T5.W, PV.Z, 1,
-; EG-NEXT: -127(nan), 31(4.344025e-44)
+; EG-NEXT: OR_INT T2.W, PS, literal.x,
+; EG-NEXT: ADD_INT * T3.W, PV.W, literal.y,
+; EG-NEXT: 8388608(1.175494e-38), -150(nan)
; EG-NEXT: ADD_INT T0.X, T1.W, literal.x,
-; EG-NEXT: BIT_ALIGN_INT T1.Y, 0.0, PS, PV.W,
-; EG-NEXT: AND_INT T2.Z, T3.W, literal.y, BS:VEC_201
-; EG-NEXT: LSHL T3.W, T0.Z, PV.Z,
-; EG-NEXT: SUB_INT * T1.W, literal.z, T1.W,
-; EG-NEXT: -127(nan), 32(4.484155e-44)
-; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
-; EG-NEXT: AND_INT T1.X, PS, literal.x,
-; EG-NEXT: BIT_ALIGN_INT T2.Y, 0.0, T0.Z, PS,
-; EG-NEXT: AND_INT T0.Z, KC0[3].Z, literal.y,
-; EG-NEXT: CNDE_INT T1.W, PV.Z, PV.Y, PV.W,
-; EG-NEXT: SETGT_INT * T4.W, PV.X, literal.z,
+; EG-NEXT: BFE_UINT T0.Y, KC0[4].X, literal.y, T0.W,
+; EG-NEXT: AND_INT T0.Z, PS, literal.z,
+; EG-NEXT: NOT_INT T4.W, PS,
+; EG-NEXT: LSHR * T5.W, PV.W, 1,
+; EG-NEXT: -127(nan), 23(3.222986e-44)
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T1.X, 0.0, PS, PV.W,
+; EG-NEXT: AND_INT T1.Y, T3.W, literal.x,
+; EG-NEXT: LSHL T0.Z, T2.W, PV.Z, BS:VEC_120/SCL_212
+; EG-NEXT: AND_INT T3.W, KC0[4].X, literal.y,
+; EG-NEXT: ADD_INT * T4.W, PV.Y, literal.z,
; EG-NEXT: 32(4.484155e-44), 8388607(1.175494e-38)
+; EG-NEXT: -150(nan), 0(0.000000e+00)
+; EG-NEXT: AND_INT T2.Y, PS, literal.x,
+; EG-NEXT: OR_INT T1.Z, PV.W, literal.y,
+; EG-NEXT: CNDE_INT T3.W, PV.Y, PV.X, PV.Z,
+; EG-NEXT: SETGT_INT * T5.W, T0.X, literal.z,
+; EG-NEXT: 31(4.344025e-44), 8388608(1.175494e-38)
; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T2.X, PS, 0.0, PV.W,
-; EG-NEXT: OR_INT T1.Y, PV.Z, literal.x,
-; EG-NEXT: ADD_INT T0.Z, T2.W, literal.y,
-; EG-NEXT: CNDE_INT T1.W, PV.X, PV.Y, 0.0,
-; EG-NEXT: CNDE_INT * T3.W, T2.Z, T3.W, 0.0,
-; EG-NEXT: 8388608(1.175494e-38), -150(nan)
-; EG-NEXT: CNDE_INT T1.X, T4.W, PV.W, PS,
-; EG-NEXT: ASHR T2.Y, KC0[4].X, literal.x,
-; EG-NEXT: AND_INT T1.Z, PV.Z, literal.x,
-; EG-NEXT: NOT_INT T1.W, PV.Z,
-; EG-NEXT: LSHR * T3.W, PV.Y, 1,
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: BIT_ALIGN_INT T3.X, 0.0, PS, PV.W,
-; EG-NEXT: LSHL T3.Y, T1.Y, PV.Z,
-; EG-NEXT: XOR_INT T1.Z, PV.X, PV.Y,
-; EG-NEXT: XOR_INT T1.W, T2.X, PV.Y,
-; EG-NEXT: SUB_INT * T2.W, literal.x, T2.W,
-; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
-; EG-NEXT: AND_INT T1.X, T0.Z, literal.x,
-; EG-NEXT: AND_INT T4.Y, PS, literal.x,
-; EG-NEXT: BIT_ALIGN_INT T0.Z, 0.0, T1.Y, PS, BS:VEC_021/SCL_122
-; EG-NEXT: SUB_INT T1.W, PV.W, T2.Y,
-; EG-NEXT: SUBB_UINT * T2.W, PV.Z, T2.Y,
-; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: SUB_INT T2.X, PV.W, PS,
-; EG-NEXT: CNDE_INT T1.Y, PV.Y, PV.Z, 0.0,
-; EG-NEXT: CNDE_INT T0.Z, PV.X, T3.Y, 0.0,
-; EG-NEXT: CNDE_INT T1.W, PV.X, T3.X, T3.Y, BS:VEC_021/SCL_122
-; EG-NEXT: SETGT_INT * T2.W, T0.Y, literal.x,
+; EG-NEXT: CNDE_INT T3.Y, PS, 0.0, PV.W,
+; EG-NEXT: SUB_INT T2.Z, literal.x, T1.W,
+; EG-NEXT: LSHL T1.W, PV.Z, PV.Y,
+; EG-NEXT: AND_INT * T3.W, T4.W, literal.y,
+; EG-NEXT: 150(2.101948e-43), 32(4.484155e-44)
+; EG-NEXT: CNDE_INT T1.X, PS, PV.W, 0.0,
+; EG-NEXT: AND_INT T2.Y, PV.Z, literal.x,
+; EG-NEXT: SUB_INT T3.Z, literal.y, T0.Y,
+; EG-NEXT: NOT_INT T4.W, T4.W,
+; EG-NEXT: LSHR * T6.W, T1.Z, 1,
+; EG-NEXT: 32(4.484155e-44), 150(2.101948e-43)
+; EG-NEXT: BIT_ALIGN_INT T2.X, 0.0, T2.W, T2.Z,
+; EG-NEXT: ADD_INT T0.Y, T0.Y, literal.x,
+; EG-NEXT: BIT_ALIGN_INT T2.Z, 0.0, PS, PV.W,
+; EG-NEXT: BIT_ALIGN_INT T2.W, 0.0, T1.Z, PV.Z,
+; EG-NEXT: AND_INT * T4.W, PV.Z, literal.y,
+; EG-NEXT: -127(nan), 32(4.484155e-44)
+; EG-NEXT: CNDE_INT T3.X, PS, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T4.Y, T3.W, PV.Z, T1.W,
+; EG-NEXT: SETGT_INT T1.Z, PV.Y, literal.x,
+; EG-NEXT: CNDE_INT T1.W, T1.Y, T0.Z, 0.0,
+; EG-NEXT: CNDE_INT * T2.W, T2.Y, PV.X, 0.0,
; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: BFE_UINT T1.X, KC0[3].W, literal.x, T0.W,
-; EG-NEXT: AND_INT T3.Y, KC0[3].W, literal.y,
-; EG-NEXT: CNDE_INT T2.Z, PS, 0.0, PV.W,
-; EG-NEXT: CNDE_INT T1.W, PS, PV.Y, PV.Z,
-; EG-NEXT: ASHR * T2.W, KC0[3].Z, literal.z,
-; EG-NEXT: 23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT: CNDE_INT T2.X, T5.W, PS, PV.W,
+; EG-NEXT: ASHR T1.Y, KC0[3].Z, literal.x,
+; EG-NEXT: CNDE_INT T0.Z, PV.Z, 0.0, PV.Y,
+; EG-NEXT: CNDE_INT T1.W, PV.Z, PV.X, T1.X,
+; EG-NEXT: ASHR * T2.W, KC0[4].X, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: BFE_UINT T3.X, KC0[3].Y, literal.x, T0.W,
-; EG-NEXT: XOR_INT T1.Y, PV.W, PS,
+; EG-NEXT: XOR_INT T2.Y, PV.W, PS,
; EG-NEXT: XOR_INT T0.Z, PV.Z, PS,
-; EG-NEXT: OR_INT T0.W, PV.Y, literal.y,
-; EG-NEXT: SUB_INT * T1.W, literal.z, PV.X,
-; EG-NEXT: 23(3.222986e-44), 8388608(1.175494e-38)
+; EG-NEXT: XOR_INT T1.W, PV.X, PV.Y,
+; EG-NEXT: XOR_INT * T3.W, T3.Y, PV.Y,
+; EG-NEXT: SUB_INT T3.Y, PS, T1.Y,
+; EG-NEXT: SUBB_UINT T1.Z, PV.W, T1.Y,
+; EG-NEXT: SUB_INT T3.W, PV.Z, T2.W,
+; EG-NEXT: SUBB_UINT * T4.W, PV.Y, T2.W,
+; EG-NEXT: SUB_INT T4.Y, PV.W, PS,
+; EG-NEXT: SUB_INT T0.Z, PV.Y, PV.Z,
+; EG-NEXT: BFE_UINT T3.W, KC0[3].Y, literal.x, T0.W,
+; EG-NEXT: AND_INT * T4.W, KC0[3].Y, literal.y,
+; EG-NEXT: 23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT: SETGT_INT T0.X, 0.0, T0.X,
+; EG-NEXT: ADD_INT T3.Y, PV.W, literal.x,
+; EG-NEXT: OR_INT T1.Z, PS, literal.y,
+; EG-NEXT: BFE_UINT T0.W, KC0[3].W, literal.z, T0.W,
+; EG-NEXT: ADD_INT * T4.W, PV.W, literal.w,
+; EG-NEXT: -127(nan), 8388608(1.175494e-38)
+; EG-NEXT: 23(3.222986e-44), -150(nan)
+; EG-NEXT: AND_INT T1.X, KC0[3].W, literal.x,
+; EG-NEXT: ADD_INT T5.Y, PV.W, literal.y,
+; EG-NEXT: SUB_INT T2.Z, literal.z, T3.W,
+; EG-NEXT: NOT_INT T3.W, PS,
+; EG-NEXT: LSHR * T5.W, PV.Z, 1,
+; EG-NEXT: 8388607(1.175494e-38), -150(nan)
; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
-; EG-NEXT: AND_INT T4.X, KC0[3].Y, literal.x,
-; EG-NEXT: AND_INT T3.Y, PS, literal.y,
-; EG-NEXT: BIT_ALIGN_INT T2.Z, 0.0, PV.W, PS,
-; EG-NEXT: SUB_INT T1.W, PV.Z, T2.W,
-; EG-NEXT: SUBB_UINT * T3.W, PV.Y, T2.W,
-; EG-NEXT: 8388607(1.175494e-38), 32(4.484155e-44)
-; EG-NEXT: SUB_INT T5.X, PV.W, PS,
-; EG-NEXT: SETGT_INT T0.Y, 0.0, T0.Y,
-; EG-NEXT: CNDE_INT T0.Z, PV.Y, PV.Z, 0.0,
-; EG-NEXT: OR_INT T1.W, PV.X, literal.x,
-; EG-NEXT: ADD_INT * T3.W, T3.X, literal.y,
-; EG-NEXT: 8388608(1.175494e-38), -150(nan)
-; EG-NEXT: ADD_INT T4.X, T3.X, literal.x,
-; EG-NEXT: SUB_INT T3.Y, literal.y, T3.X,
-; EG-NEXT: AND_INT T2.Z, PS, literal.z,
-; EG-NEXT: NOT_INT T4.W, PS,
-; EG-NEXT: LSHR * T5.W, PV.W, 1,
-; EG-NEXT: -127(nan), 150(2.101948e-43)
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: BIT_ALIGN_INT T3.X, 0.0, PS, PV.W,
-; EG-NEXT: LSHL T4.Y, T1.W, PV.Z,
-; EG-NEXT: AND_INT T2.Z, T3.W, literal.x, BS:VEC_120/SCL_212
-; EG-NEXT: BIT_ALIGN_INT T1.W, 0.0, T1.W, PV.Y, BS:VEC_021/SCL_122
-; EG-NEXT: AND_INT * T3.W, PV.Y, literal.x,
+; EG-NEXT: BIT_ALIGN_INT T2.X, 0.0, PS, PV.W,
+; EG-NEXT: AND_INT T6.Y, PV.Z, literal.x,
+; EG-NEXT: AND_INT T3.Z, PV.Y, literal.y,
+; EG-NEXT: OR_INT T3.W, PV.X, literal.z,
+; EG-NEXT: AND_INT * T5.W, T4.W, literal.y,
+; EG-NEXT: 32(4.484155e-44), 31(4.344025e-44)
+; EG-NEXT: 8388608(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T1.X, 0.0, T1.Z, T2.Z,
+; EG-NEXT: LSHL T7.Y, T1.Z, PS,
+; EG-NEXT: AND_INT T1.Z, T4.W, literal.x,
+; EG-NEXT: LSHL T4.W, PV.W, PV.Z,
+; EG-NEXT: AND_INT * T5.W, T5.Y, literal.x,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: ADD_INT T6.X, T1.X, literal.x,
-; EG-NEXT: CNDE_INT T3.Y, PS, PV.W, 0.0,
-; EG-NEXT: CNDE_INT * T3.Z, PV.Z, PV.Y, 0.0,
-; EG-NEXT: -150(nan), 0(0.000000e+00)
-; EG-NEXT: ALU clause starting at 108:
-; EG-NEXT: CNDE_INT T1.W, T2.Z, T3.X, T4.Y,
-; EG-NEXT: SETGT_INT * T3.W, T4.X, literal.x,
+; EG-NEXT: CNDE_INT T3.X, PS, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T8.Y, PV.Z, PV.Y, 0.0,
+; EG-NEXT: CNDE_INT * T2.Z, T6.Y, PV.X, 0.0,
+; EG-NEXT: ALU clause starting at 106:
+; EG-NEXT: CNDE_INT T6.W, T1.Z, T2.X, T7.Y, BS:VEC_021/SCL_122
+; EG-NEXT: SETGT_INT * T7.W, T3.Y, literal.x,
; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T3.X, PS, 0.0, PV.W,
-; EG-NEXT: CNDE_INT T3.Y, PS, T3.Y, T3.Z,
-; EG-NEXT: AND_INT T2.Z, T6.X, literal.x,
-; EG-NEXT: NOT_INT T1.W, T6.X,
-; EG-NEXT: LSHR * T3.W, T0.W, 1,
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: ASHR T7.X, KC0[3].Y, literal.x,
-; EG-NEXT: ADD_INT T4.Y, T1.X, literal.y,
-; EG-NEXT: BIT_ALIGN_INT T3.Z, 0.0, PS, PV.W,
-; EG-NEXT: LSHL T0.W, T0.W, PV.Z,
-; EG-NEXT: AND_INT * T1.W, T6.X, literal.z,
+; EG-NEXT: CNDE_INT T1.X, PS, 0.0, PV.W,
+; EG-NEXT: CNDE_INT T6.Y, PS, T2.Z, T8.Y,
+; EG-NEXT: SUB_INT T1.Z, literal.x, T0.W,
+; EG-NEXT: NOT_INT T6.W, T5.Y,
+; EG-NEXT: LSHR * T7.W, T3.W, 1,
+; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
+; EG-NEXT: ASHR T2.X, KC0[3].Y, literal.x,
+; EG-NEXT: ADD_INT T5.Y, T0.W, literal.y,
+; EG-NEXT: BIT_ALIGN_INT T2.Z, 0.0, PS, PV.W,
+; EG-NEXT: BIT_ALIGN_INT T0.W, 0.0, T3.W, PV.Z,
+; EG-NEXT: AND_INT * T3.W, PV.Z, literal.z,
; EG-NEXT: 31(4.344025e-44), -127(nan)
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T1.X, PS, PV.W, 0.0,
-; EG-NEXT: CNDE_INT T5.Y, PS, PV.Z, PV.W,
-; EG-NEXT: SETGT_INT T2.Z, PV.Y, literal.x,
-; EG-NEXT: XOR_INT T0.W, T3.Y, PV.X,
-; EG-NEXT: XOR_INT * T1.W, T3.X, PV.X,
+; EG-NEXT: CNDE_INT T4.X, PS, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T7.Y, T5.W, PV.Z, T4.W,
+; EG-NEXT: SETGT_INT T1.Z, PV.Y, literal.x,
+; EG-NEXT: XOR_INT T0.W, T6.Y, PV.X,
+; EG-NEXT: XOR_INT * T3.W, T1.X, PV.X,
; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: SUB_INT T3.X, PS, T7.X,
-; EG-NEXT: SUBB_UINT T3.Y, PV.W, T7.X,
-; EG-NEXT: CNDE_INT T3.Z, PV.Z, 0.0, PV.Y,
-; EG-NEXT: CNDE_INT T1.W, PV.Z, T0.Z, PV.X,
-; EG-NEXT: ASHR * T3.W, KC0[3].W, literal.x,
+; EG-NEXT: SUB_INT T1.X, PS, T2.X,
+; EG-NEXT: SUBB_UINT T6.Y, PV.W, T2.X,
+; EG-NEXT: CNDE_INT T2.Z, PV.Z, 0.0, PV.Y,
+; EG-NEXT: CNDE_INT T3.W, PV.Z, PV.X, T3.X,
+; EG-NEXT: ASHR * T4.W, KC0[3].W, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: XOR_INT T1.X, PV.W, PS,
-; EG-NEXT: XOR_INT T5.Y, PV.Z, PS,
-; EG-NEXT: SUB_INT T0.Z, PV.X, PV.Y,
-; EG-NEXT: SETGT_INT T1.W, 0.0, T4.X, BS:VEC_021/SCL_122
-; EG-NEXT: CNDE_INT * T6.W, T0.Y, T5.X, 0.0,
-; EG-NEXT: SETGT_INT T0.X, 0.0, T0.X,
+; EG-NEXT: XOR_INT T3.X, PV.W, PS,
+; EG-NEXT: XOR_INT T7.Y, PV.Z, PS,
+; EG-NEXT: SUB_INT T1.Z, PV.X, PV.Y,
+; EG-NEXT: SETGT_INT T3.W, 0.0, T3.Y,
+; EG-NEXT: CNDE_INT * T6.W, T0.X, T0.Z, 0.0,
+; EG-NEXT: SETGT_INT T1.X, 0.0, T0.Y,
; EG-NEXT: CNDE_INT T6.Y, PV.W, PV.Z, 0.0,
-; EG-NEXT: SUB_INT T0.Z, T1.Y, T2.W, BS:VEC_021/SCL_122
-; EG-NEXT: SUB_INT T2.W, PV.Y, T3.W,
-; EG-NEXT: SUBB_UINT * T4.W, PV.X, T3.W,
-; EG-NEXT: SUB_INT T3.X, PV.W, PS,
-; EG-NEXT: SETGT_INT T1.Y, 0.0, T4.Y,
-; EG-NEXT: CNDE_INT T6.Z, T0.Y, PV.Z, 0.0,
-; EG-NEXT: SUB_INT T0.W, T0.W, T7.X, BS:VEC_021/SCL_122
-; EG-NEXT: CNDE_INT * T4.W, PV.X, T2.X, 0.0,
-; EG-NEXT: CNDE_INT T6.X, T1.W, PV.W, 0.0,
-; EG-NEXT: CNDE_INT T4.Y, PV.Y, PV.X, 0.0,
-; EG-NEXT: SUB_INT T0.W, T1.Z, T2.Y,
-; EG-NEXT: LSHR * T2.X, KC0[2].Y, literal.x,
+; EG-NEXT: SUB_INT T0.Z, T1.W, T1.Y, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T1.W, PV.Y, T4.W,
+; EG-NEXT: SUBB_UINT * T5.W, PV.X, T4.W,
+; EG-NEXT: SUB_INT T4.X, PV.W, PS,
+; EG-NEXT: SETGT_INT T0.Y, 0.0, T5.Y, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T6.Z, T0.X, PV.Z, 0.0,
+; EG-NEXT: SUB_INT T0.W, T0.W, T2.X,
+; EG-NEXT: CNDE_INT * T1.W, PV.X, T4.Y, 0.0,
+; EG-NEXT: CNDE_INT T6.X, T3.W, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T1.Y, PV.Y, PV.X, 0.0,
+; EG-NEXT: SUB_INT T0.W, T2.Y, T2.W,
+; EG-NEXT: LSHR * T0.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T4.Z, T0.X, PV.W, 0.0,
-; EG-NEXT: SUB_INT * T0.W, T1.X, T3.W, BS:VEC_120/SCL_212
-; EG-NEXT: CNDE_INT T4.X, T1.Y, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T1.Z, T1.X, PV.W, 0.0,
+; EG-NEXT: SUB_INT * T0.W, T3.X, T4.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T1.X, T0.Y, PV.W, 0.0,
; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x,
; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
-; EG-NEXT: LSHR * T0.X, PV.W, literal.x,
+; EG-NEXT: LSHR * T2.X, PV.W, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%conv = fptosi <4 x float> %x to <4 x i64>
store <4 x i64> %conv, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
index 5170f9c76db2..5abf82aa1aab 100644
--- a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
@@ -200,25 +200,25 @@ define amdgpu_kernel void @fp_to_uint_f32_to_i64(ptr addrspace(1) %out, float %x
; EG-NEXT: ADD_INT * T2.W, PV.W, literal.y,
; EG-NEXT: 8388608(1.175494e-38), -150(nan)
; EG-NEXT: ADD_INT T0.X, T0.W, literal.x,
-; EG-NEXT: SUB_INT T0.Y, literal.y, T0.W,
-; EG-NEXT: AND_INT T0.Z, PS, literal.z,
+; EG-NEXT: AND_INT T0.Y, PS, literal.y,
+; EG-NEXT: SUB_INT T0.Z, literal.z, T0.W,
; EG-NEXT: NOT_INT T0.W, PS,
; EG-NEXT: LSHR * T3.W, PV.W, 1,
-; EG-NEXT: -127(nan), 150(2.101948e-43)
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: -127(nan), 31(4.344025e-44)
+; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
; EG-NEXT: BIT_ALIGN_INT T1.X, 0.0, PS, PV.W,
-; EG-NEXT: LSHL T1.Y, T1.W, PV.Z,
-; EG-NEXT: AND_INT T0.Z, T2.W, literal.x, BS:VEC_120/SCL_212
-; EG-NEXT: BIT_ALIGN_INT T0.W, 0.0, T1.W, PV.Y, BS:VEC_021/SCL_122
-; EG-NEXT: AND_INT * T1.W, PV.Y, literal.x,
+; EG-NEXT: AND_INT T1.Y, PV.Z, literal.x,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, 0.0, T1.W, PV.Z,
+; EG-NEXT: LSHL T0.W, T1.W, PV.Y,
+; EG-NEXT: AND_INT * T1.W, T2.W, literal.x,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; EG-NEXT: CNDE_INT T0.Y, PS, PV.W, 0.0,
-; EG-NEXT: CNDE_INT T1.Z, PV.Z, PV.Y, 0.0,
-; EG-NEXT: CNDE_INT T0.W, PV.Z, PV.X, PV.Y,
+; EG-NEXT: CNDE_INT T0.Z, PV.Y, PV.Z, 0.0,
+; EG-NEXT: CNDE_INT T0.W, PS, PV.X, PV.W,
; EG-NEXT: SETGT_INT * T1.W, T0.X, literal.x,
; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T0.Z, PS, 0.0, PV.W,
-; EG-NEXT: CNDE_INT T0.W, PS, PV.Y, PV.Z,
+; EG-NEXT: CNDE_INT T1.Z, PS, 0.0, PV.W,
+; EG-NEXT: CNDE_INT T0.W, PS, PV.Z, PV.Y,
; EG-NEXT: ASHR * T1.W, KC0[2].Z, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
; EG-NEXT: XOR_INT T0.W, PV.W, PS,
@@ -288,79 +288,78 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i64(ptr addrspace(1) %out, <2 x
;
; EG-LABEL: fp_to_uint_v2f32_to_v2i64:
; EG: ; %bb.0:
-; EG-NEXT: ALU 75, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT: ALU 74, @4, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T0.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
; EG-NEXT: MOV * T0.W, literal.x,
; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
-; EG-NEXT: BFE_UINT * T1.W, KC0[2].W, literal.x, PV.W,
-; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: AND_INT T0.Z, KC0[2].W, literal.x,
-; EG-NEXT: BFE_UINT T0.W, KC0[3].X, literal.y, T0.W,
-; EG-NEXT: ADD_INT * T2.W, PV.W, literal.z,
-; EG-NEXT: 8388607(1.175494e-38), 23(3.222986e-44)
+; EG-NEXT: BFE_UINT T0.Z, KC0[3].X, literal.x, PV.W,
+; EG-NEXT: BFE_UINT T0.W, KC0[2].W, literal.x, PV.W,
+; EG-NEXT: AND_INT * T1.Z, KC0[2].W, literal.y,
+; EG-NEXT: 23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT: ADD_INT T1.W, PV.W, literal.x,
+; EG-NEXT: ADD_INT * T2.W, PV.Z, literal.x,
; EG-NEXT: -150(nan), 0(0.000000e+00)
-; EG-NEXT: SUB_INT T0.X, literal.x, PV.W,
-; EG-NEXT: SUB_INT T0.Y, literal.x, T1.W,
-; EG-NEXT: AND_INT T1.Z, PS, literal.y,
-; EG-NEXT: OR_INT T3.W, PV.Z, literal.z,
+; EG-NEXT: AND_INT T0.X, PS, literal.x,
+; EG-NEXT: AND_INT T0.Y, PV.W, literal.x,
+; EG-NEXT: OR_INT T1.Z, T1.Z, literal.y,
+; EG-NEXT: SUB_INT T3.W, literal.z, T0.W,
; EG-NEXT: AND_INT * T4.W, KC0[3].X, literal.w,
-; EG-NEXT: 150(2.101948e-43), 31(4.344025e-44)
-; EG-NEXT: 8388608(1.175494e-38), 8388607(1.175494e-38)
+; EG-NEXT: 31(4.344025e-44), 8388608(1.175494e-38)
+; EG-NEXT: 150(2.101948e-43), 8388607(1.175494e-38)
; EG-NEXT: OR_INT T1.X, PS, literal.x,
-; EG-NEXT: LSHL T1.Y, PV.W, PV.Z,
-; EG-NEXT: AND_INT T0.Z, T2.W, literal.y,
-; EG-NEXT: BIT_ALIGN_INT T4.W, 0.0, PV.W, PV.Y,
-; EG-NEXT: AND_INT * T5.W, PV.Y, literal.y,
+; EG-NEXT: AND_INT T1.Y, PV.W, literal.y,
+; EG-NEXT: BIT_ALIGN_INT T2.Z, 0.0, PV.Z, PV.W,
+; EG-NEXT: LSHL T3.W, PV.Z, PV.Y,
+; EG-NEXT: AND_INT * T4.W, T1.W, literal.y,
; EG-NEXT: 8388608(1.175494e-38), 32(4.484155e-44)
-; EG-NEXT: CNDE_INT T2.X, PS, PV.W, 0.0,
-; EG-NEXT: CNDE_INT T0.Y, PV.Z, PV.Y, 0.0,
-; EG-NEXT: ADD_INT T1.Z, T0.W, literal.x,
-; EG-NEXT: BIT_ALIGN_INT T4.W, 0.0, PV.X, T0.X,
-; EG-NEXT: AND_INT * T5.W, T0.X, literal.y,
-; EG-NEXT: -150(nan), 32(4.484155e-44)
+; EG-NEXT: CNDE_INT T0.Y, PS, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T2.Z, PV.Y, PV.Z, 0.0,
+; EG-NEXT: LSHL T5.W, PV.X, T0.X,
+; EG-NEXT: AND_INT * T6.W, T2.W, literal.x,
+; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
; EG-NEXT: CNDE_INT T0.X, PS, PV.W, 0.0,
-; EG-NEXT: NOT_INT T2.Y, T2.W,
-; EG-NEXT: AND_INT T2.Z, PV.Z, literal.x,
-; EG-NEXT: NOT_INT T2.W, PV.Z,
-; EG-NEXT: LSHR * T4.W, T1.X, 1,
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: LSHR T3.X, T3.W, 1,
-; EG-NEXT: ADD_INT T3.Y, T0.W, literal.x, BS:VEC_120/SCL_212
-; EG-NEXT: BIT_ALIGN_INT T3.Z, 0.0, PS, PV.W,
-; EG-NEXT: LSHL T0.W, T1.X, PV.Z,
-; EG-NEXT: AND_INT * T2.W, T1.Z, literal.y,
+; EG-NEXT: NOT_INT T1.Y, T1.W,
+; EG-NEXT: SUB_INT T3.Z, literal.x, T0.Z,
+; EG-NEXT: NOT_INT T1.W, T2.W, BS:VEC_120/SCL_212
+; EG-NEXT: LSHR * T2.W, T1.X, 1,
+; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
+; EG-NEXT: LSHR T2.X, T1.Z, 1,
+; EG-NEXT: ADD_INT T2.Y, T0.Z, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: BIT_ALIGN_INT T0.Z, 0.0, PS, PV.W,
+; EG-NEXT: BIT_ALIGN_INT T1.W, 0.0, T1.X, PV.Z,
+; EG-NEXT: AND_INT * T2.W, PV.Z, literal.y,
; EG-NEXT: -127(nan), 32(4.484155e-44)
; EG-NEXT: CNDE_INT T1.X, PS, PV.W, 0.0,
-; EG-NEXT: CNDE_INT T4.Y, PS, PV.Z, PV.W,
-; EG-NEXT: SETGT_INT T1.Z, PV.Y, literal.x,
-; EG-NEXT: BIT_ALIGN_INT T0.W, 0.0, PV.X, T2.Y,
-; EG-NEXT: ADD_INT * T1.W, T1.W, literal.y,
+; EG-NEXT: CNDE_INT T3.Y, T6.W, PV.Z, T5.W, BS:VEC_021/SCL_122
+; EG-NEXT: SETGT_INT T0.Z, PV.Y, literal.x,
+; EG-NEXT: BIT_ALIGN_INT T1.W, 0.0, PV.X, T1.Y,
+; EG-NEXT: ADD_INT * T0.W, T0.W, literal.y,
; EG-NEXT: 23(3.222986e-44), -127(nan)
-; EG-NEXT: CNDE_INT T3.X, T0.Z, PV.W, T1.Y,
+; EG-NEXT: CNDE_INT T2.X, T4.W, PV.W, T3.W,
; EG-NEXT: SETGT_INT T1.Y, PS, literal.x,
-; EG-NEXT: CNDE_INT T0.Z, PV.Z, 0.0, PV.Y,
-; EG-NEXT: CNDE_INT T0.W, PV.Z, T0.X, PV.X,
+; EG-NEXT: CNDE_INT T1.Z, PV.Z, 0.0, PV.Y,
+; EG-NEXT: CNDE_INT T1.W, PV.Z, PV.X, T0.X,
; EG-NEXT: ASHR * T2.W, KC0[3].X, literal.y,
; EG-NEXT: 23(3.222986e-44), 31(4.344025e-44)
; EG-NEXT: XOR_INT T0.X, PV.W, PS,
-; EG-NEXT: XOR_INT T2.Y, PV.Z, PS,
+; EG-NEXT: XOR_INT T3.Y, PV.Z, PS,
; EG-NEXT: CNDE_INT T0.Z, PV.Y, 0.0, PV.X,
-; EG-NEXT: CNDE_INT T0.W, PV.Y, T2.X, T0.Y,
+; EG-NEXT: CNDE_INT T1.W, PV.Y, T2.Z, T0.Y,
; EG-NEXT: ASHR * T3.W, KC0[2].W, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
; EG-NEXT: XOR_INT T0.Y, PV.W, PS,
; EG-NEXT: XOR_INT T0.Z, PV.Z, PS,
-; EG-NEXT: SUB_INT T0.W, PV.Y, T2.W,
+; EG-NEXT: SUB_INT T1.W, PV.Y, T2.W,
; EG-NEXT: SUBB_UINT * T4.W, PV.X, T2.W,
; EG-NEXT: SUB_INT T1.Y, PV.W, PS,
-; EG-NEXT: SETGT_INT T1.Z, 0.0, T3.Y,
-; EG-NEXT: SUB_INT T0.W, PV.Z, T3.W,
+; EG-NEXT: SETGT_INT T1.Z, 0.0, T2.Y,
+; EG-NEXT: SUB_INT T1.W, PV.Z, T3.W,
; EG-NEXT: SUBB_UINT * T4.W, PV.Y, T3.W,
; EG-NEXT: SUB_INT T0.Z, PV.W, PS,
-; EG-NEXT: SETGT_INT T0.W, 0.0, T1.W,
+; EG-NEXT: SETGT_INT T0.W, 0.0, T0.W,
; EG-NEXT: CNDE_INT * T1.W, PV.Z, PV.Y, 0.0,
; EG-NEXT: CNDE_INT T1.Y, PV.W, PV.Z, 0.0,
; EG-NEXT: SUB_INT * T2.W, T0.X, T2.W,
@@ -449,170 +448,168 @@ define amdgpu_kernel void @fp_to_uint_v4f32_to_v4i64(ptr addrspace(1) %out, <4 x
;
; EG-LABEL: fp_to_uint_v4f32_to_v4i64:
; EG: ; %bb.0:
-; EG-NEXT: ALU 101, @6, KC0[CB0:0-32], KC1[]
-; EG-NEXT: ALU 54, @108, KC0[CB0:0-32], KC1[]
-; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T4.XYZW, T0.X, 0
-; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.XYZW, T2.X, 1
+; EG-NEXT: ALU 99, @6, KC0[CB0:0-32], KC1[]
+; EG-NEXT: ALU 54, @106, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T2.X, 0
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T6.XYZW, T0.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 6:
; EG-NEXT: MOV * T0.W, literal.x,
; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
-; EG-NEXT: BFE_UINT T1.W, KC0[4].X, literal.x, PV.W,
-; EG-NEXT: AND_INT * T2.W, KC0[4].X, literal.y,
+; EG-NEXT: BFE_UINT T1.W, KC0[3].Z, literal.x, PV.W,
+; EG-NEXT: AND_INT * T2.W, KC0[3].Z, literal.y,
; EG-NEXT: 23(3.222986e-44), 8388607(1.175494e-38)
-; EG-NEXT: OR_INT T0.Z, PS, literal.x,
-; EG-NEXT: BFE_UINT T2.W, KC0[3].Z, literal.y, T0.W,
-; EG-NEXT: ADD_INT * T3.W, PV.W, literal.z,
-; EG-NEXT: 8388608(1.175494e-38), 23(3.222986e-44)
-; EG-NEXT: -150(nan), 0(0.000000e+00)
-; EG-NEXT: ADD_INT T0.Y, PV.W, literal.x,
-; EG-NEXT: AND_INT T1.Z, PS, literal.y,
-; EG-NEXT: NOT_INT T4.W, PS,
-; EG-NEXT: LSHR * T5.W, PV.Z, 1,
-; EG-NEXT: -127(nan), 31(4.344025e-44)
+; EG-NEXT: OR_INT T2.W, PS, literal.x,
+; EG-NEXT: ADD_INT * T3.W, PV.W, literal.y,
+; EG-NEXT: 8388608(1.175494e-38), -150(nan)
; EG-NEXT: ADD_INT T0.X, T1.W, literal.x,
-; EG-NEXT: BIT_ALIGN_INT T1.Y, 0.0, PS, PV.W,
-; EG-NEXT: AND_INT T2.Z, T3.W, literal.y, BS:VEC_201
-; EG-NEXT: LSHL T3.W, T0.Z, PV.Z,
-; EG-NEXT: SUB_INT * T1.W, literal.z, T1.W,
-; EG-NEXT: -127(nan), 32(4.484155e-44)
-; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
-; EG-NEXT: AND_INT T1.X, PS, literal.x,
-; EG-NEXT: BIT_ALIGN_INT T2.Y, 0.0, T0.Z, PS,
-; EG-NEXT: AND_INT T0.Z, KC0[3].Z, literal.y,
-; EG-NEXT: CNDE_INT T1.W, PV.Z, PV.Y, PV.W,
-; EG-NEXT: SETGT_INT * T4.W, PV.X, literal.z,
+; EG-NEXT: BFE_UINT T0.Y, KC0[4].X, literal.y, T0.W,
+; EG-NEXT: AND_INT T0.Z, PS, literal.z,
+; EG-NEXT: NOT_INT T4.W, PS,
+; EG-NEXT: LSHR * T5.W, PV.W, 1,
+; EG-NEXT: -127(nan), 23(3.222986e-44)
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T1.X, 0.0, PS, PV.W,
+; EG-NEXT: AND_INT T1.Y, T3.W, literal.x,
+; EG-NEXT: LSHL T0.Z, T2.W, PV.Z, BS:VEC_120/SCL_212
+; EG-NEXT: AND_INT T3.W, KC0[4].X, literal.y,
+; EG-NEXT: ADD_INT * T4.W, PV.Y, literal.z,
; EG-NEXT: 32(4.484155e-44), 8388607(1.175494e-38)
+; EG-NEXT: -150(nan), 0(0.000000e+00)
+; EG-NEXT: AND_INT T2.Y, PS, literal.x,
+; EG-NEXT: OR_INT T1.Z, PV.W, literal.y,
+; EG-NEXT: CNDE_INT T3.W, PV.Y, PV.X, PV.Z,
+; EG-NEXT: SETGT_INT * T5.W, T0.X, literal.z,
+; EG-NEXT: 31(4.344025e-44), 8388608(1.175494e-38)
; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T2.X, PS, 0.0, PV.W,
-; EG-NEXT: OR_INT T1.Y, PV.Z, literal.x,
-; EG-NEXT: ADD_INT T0.Z, T2.W, literal.y,
-; EG-NEXT: CNDE_INT T1.W, PV.X, PV.Y, 0.0,
-; EG-NEXT: CNDE_INT * T3.W, T2.Z, T3.W, 0.0,
-; EG-NEXT: 8388608(1.175494e-38), -150(nan)
-; EG-NEXT: CNDE_INT T1.X, T4.W, PV.W, PS,
-; EG-NEXT: ASHR T2.Y, KC0[4].X, literal.x,
-; EG-NEXT: AND_INT T1.Z, PV.Z, literal.x,
-; EG-NEXT: NOT_INT T1.W, PV.Z,
-; EG-NEXT: LSHR * T3.W, PV.Y, 1,
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: BIT_ALIGN_INT T3.X, 0.0, PS, PV.W,
-; EG-NEXT: LSHL T3.Y, T1.Y, PV.Z,
-; EG-NEXT: XOR_INT T1.Z, PV.X, PV.Y,
-; EG-NEXT: XOR_INT T1.W, T2.X, PV.Y,
-; EG-NEXT: SUB_INT * T2.W, literal.x, T2.W,
-; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
-; EG-NEXT: AND_INT T1.X, T0.Z, literal.x,
-; EG-NEXT: AND_INT T4.Y, PS, literal.x,
-; EG-NEXT: BIT_ALIGN_INT T0.Z, 0.0, T1.Y, PS, BS:VEC_021/SCL_122
-; EG-NEXT: SUB_INT T1.W, PV.W, T2.Y,
-; EG-NEXT: SUBB_UINT * T2.W, PV.Z, T2.Y,
-; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: SUB_INT T2.X, PV.W, PS,
-; EG-NEXT: CNDE_INT T1.Y, PV.Y, PV.Z, 0.0,
-; EG-NEXT: CNDE_INT T0.Z, PV.X, T3.Y, 0.0,
-; EG-NEXT: CNDE_INT T1.W, PV.X, T3.X, T3.Y, BS:VEC_021/SCL_122
-; EG-NEXT: SETGT_INT * T2.W, T0.Y, literal.x,
+; EG-NEXT: CNDE_INT T3.Y, PS, 0.0, PV.W,
+; EG-NEXT: SUB_INT T2.Z, literal.x, T1.W,
+; EG-NEXT: LSHL T1.W, PV.Z, PV.Y,
+; EG-NEXT: AND_INT * T3.W, T4.W, literal.y,
+; EG-NEXT: 150(2.101948e-43), 32(4.484155e-44)
+; EG-NEXT: CNDE_INT T1.X, PS, PV.W, 0.0,
+; EG-NEXT: AND_INT T2.Y, PV.Z, literal.x,
+; EG-NEXT: SUB_INT T3.Z, literal.y, T0.Y,
+; EG-NEXT: NOT_INT T4.W, T4.W,
+; EG-NEXT: LSHR * T6.W, T1.Z, 1,
+; EG-NEXT: 32(4.484155e-44), 150(2.101948e-43)
+; EG-NEXT: BIT_ALIGN_INT T2.X, 0.0, T2.W, T2.Z,
+; EG-NEXT: ADD_INT T0.Y, T0.Y, literal.x,
+; EG-NEXT: BIT_ALIGN_INT T2.Z, 0.0, PS, PV.W,
+; EG-NEXT: BIT_ALIGN_INT T2.W, 0.0, T1.Z, PV.Z,
+; EG-NEXT: AND_INT * T4.W, PV.Z, literal.y,
+; EG-NEXT: -127(nan), 32(4.484155e-44)
+; EG-NEXT: CNDE_INT T3.X, PS, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T4.Y, T3.W, PV.Z, T1.W,
+; EG-NEXT: SETGT_INT T1.Z, PV.Y, literal.x,
+; EG-NEXT: CNDE_INT T1.W, T1.Y, T0.Z, 0.0,
+; EG-NEXT: CNDE_INT * T2.W, T2.Y, PV.X, 0.0,
; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: BFE_UINT T1.X, KC0[3].W, literal.x, T0.W,
-; EG-NEXT: AND_INT T3.Y, KC0[3].W, literal.y,
-; EG-NEXT: CNDE_INT T2.Z, PS, 0.0, PV.W,
-; EG-NEXT: CNDE_INT T1.W, PS, PV.Y, PV.Z,
-; EG-NEXT: ASHR * T2.W, KC0[3].Z, literal.z,
-; EG-NEXT: 23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT: CNDE_INT T2.X, T5.W, PS, PV.W,
+; EG-NEXT: ASHR T1.Y, KC0[3].Z, literal.x,
+; EG-NEXT: CNDE_INT T0.Z, PV.Z, 0.0, PV.Y,
+; EG-NEXT: CNDE_INT T1.W, PV.Z, PV.X, T1.X,
+; EG-NEXT: ASHR * T2.W, KC0[4].X, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: BFE_UINT T3.X, KC0[3].Y, literal.x, T0.W,
-; EG-NEXT: XOR_INT T1.Y, PV.W, PS,
+; EG-NEXT: XOR_INT T2.Y, PV.W, PS,
; EG-NEXT: XOR_INT T0.Z, PV.Z, PS,
-; EG-NEXT: OR_INT T0.W, PV.Y, literal.y,
-; EG-NEXT: SUB_INT * T1.W, literal.z, PV.X,
-; EG-NEXT: 23(3.222986e-44), 8388608(1.175494e-38)
+; EG-NEXT: XOR_INT T1.W, PV.X, PV.Y,
+; EG-NEXT: XOR_INT * T3.W, T3.Y, PV.Y,
+; EG-NEXT: SUB_INT T3.Y, PS, T1.Y,
+; EG-NEXT: SUBB_UINT T1.Z, PV.W, T1.Y,
+; EG-NEXT: SUB_INT T3.W, PV.Z, T2.W,
+; EG-NEXT: SUBB_UINT * T4.W, PV.Y, T2.W,
+; EG-NEXT: SUB_INT T4.Y, PV.W, PS,
+; EG-NEXT: SUB_INT T0.Z, PV.Y, PV.Z,
+; EG-NEXT: BFE_UINT T3.W, KC0[3].Y, literal.x, T0.W,
+; EG-NEXT: AND_INT * T4.W, KC0[3].Y, literal.y,
+; EG-NEXT: 23(3.222986e-44), 8388607(1.175494e-38)
+; EG-NEXT: SETGT_INT T0.X, 0.0, T0.X,
+; EG-NEXT: ADD_INT T3.Y, PV.W, literal.x,
+; EG-NEXT: OR_INT T1.Z, PS, literal.y,
+; EG-NEXT: BFE_UINT T0.W, KC0[3].W, literal.z, T0.W,
+; EG-NEXT: ADD_INT * T4.W, PV.W, literal.w,
+; EG-NEXT: -127(nan), 8388608(1.175494e-38)
+; EG-NEXT: 23(3.222986e-44), -150(nan)
+; EG-NEXT: AND_INT T1.X, KC0[3].W, literal.x,
+; EG-NEXT: ADD_INT T5.Y, PV.W, literal.y,
+; EG-NEXT: SUB_INT T2.Z, literal.z, T3.W,
+; EG-NEXT: NOT_INT T3.W, PS,
+; EG-NEXT: LSHR * T5.W, PV.Z, 1,
+; EG-NEXT: 8388607(1.175494e-38), -150(nan)
; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
-; EG-NEXT: AND_INT T4.X, KC0[3].Y, literal.x,
-; EG-NEXT: AND_INT T3.Y, PS, literal.y,
-; EG-NEXT: BIT_ALIGN_INT T2.Z, 0.0, PV.W, PS,
-; EG-NEXT: SUB_INT T1.W, PV.Z, T2.W,
-; EG-NEXT: SUBB_UINT * T3.W, PV.Y, T2.W,
-; EG-NEXT: 8388607(1.175494e-38), 32(4.484155e-44)
-; EG-NEXT: SUB_INT T5.X, PV.W, PS,
-; EG-NEXT: SETGT_INT T0.Y, 0.0, T0.Y,
-; EG-NEXT: CNDE_INT T0.Z, PV.Y, PV.Z, 0.0,
-; EG-NEXT: OR_INT T1.W, PV.X, literal.x,
-; EG-NEXT: ADD_INT * T3.W, T3.X, literal.y,
-; EG-NEXT: 8388608(1.175494e-38), -150(nan)
-; EG-NEXT: ADD_INT T4.X, T3.X, literal.x,
-; EG-NEXT: SUB_INT T3.Y, literal.y, T3.X,
-; EG-NEXT: AND_INT T2.Z, PS, literal.z,
-; EG-NEXT: NOT_INT T4.W, PS,
-; EG-NEXT: LSHR * T5.W, PV.W, 1,
-; EG-NEXT: -127(nan), 150(2.101948e-43)
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: BIT_ALIGN_INT T3.X, 0.0, PS, PV.W,
-; EG-NEXT: LSHL T4.Y, T1.W, PV.Z,
-; EG-NEXT: AND_INT T2.Z, T3.W, literal.x, BS:VEC_120/SCL_212
-; EG-NEXT: BIT_ALIGN_INT T1.W, 0.0, T1.W, PV.Y, BS:VEC_021/SCL_122
-; EG-NEXT: AND_INT * T3.W, PV.Y, literal.x,
+; EG-NEXT: BIT_ALIGN_INT T2.X, 0.0, PS, PV.W,
+; EG-NEXT: AND_INT T6.Y, PV.Z, literal.x,
+; EG-NEXT: AND_INT T3.Z, PV.Y, literal.y,
+; EG-NEXT: OR_INT T3.W, PV.X, literal.z,
+; EG-NEXT: AND_INT * T5.W, T4.W, literal.y,
+; EG-NEXT: 32(4.484155e-44), 31(4.344025e-44)
+; EG-NEXT: 8388608(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T1.X, 0.0, T1.Z, T2.Z,
+; EG-NEXT: LSHL T7.Y, T1.Z, PS,
+; EG-NEXT: AND_INT T1.Z, T4.W, literal.x,
+; EG-NEXT: LSHL T4.W, PV.W, PV.Z,
+; EG-NEXT: AND_INT * T5.W, T5.Y, literal.x,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: ADD_INT T6.X, T1.X, literal.x,
-; EG-NEXT: CNDE_INT T3.Y, PS, PV.W, 0.0,
-; EG-NEXT: CNDE_INT * T3.Z, PV.Z, PV.Y, 0.0,
-; EG-NEXT: -150(nan), 0(0.000000e+00)
-; EG-NEXT: ALU clause starting at 108:
-; EG-NEXT: CNDE_INT T1.W, T2.Z, T3.X, T4.Y,
-; EG-NEXT: SETGT_INT * T3.W, T4.X, literal.x,
+; EG-NEXT: CNDE_INT T3.X, PS, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T8.Y, PV.Z, PV.Y, 0.0,
+; EG-NEXT: CNDE_INT * T2.Z, T6.Y, PV.X, 0.0,
+; EG-NEXT: ALU clause starting at 106:
+; EG-NEXT: CNDE_INT T6.W, T1.Z, T2.X, T7.Y, BS:VEC_021/SCL_122
+; EG-NEXT: SETGT_INT * T7.W, T3.Y, literal.x,
; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T3.X, PS, 0.0, PV.W,
-; EG-NEXT: CNDE_INT T3.Y, PS, T3.Y, T3.Z,
-; EG-NEXT: AND_INT T2.Z, T6.X, literal.x,
-; EG-NEXT: NOT_INT T1.W, T6.X,
-; EG-NEXT: LSHR * T3.W, T0.W, 1,
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: ASHR T7.X, KC0[3].Y, literal.x,
-; EG-NEXT: ADD_INT T4.Y, T1.X, literal.y,
-; EG-NEXT: BIT_ALIGN_INT T3.Z, 0.0, PS, PV.W,
-; EG-NEXT: LSHL T0.W, T0.W, PV.Z,
-; EG-NEXT: AND_INT * T1.W, T6.X, literal.z,
+; EG-NEXT: CNDE_INT T1.X, PS, 0.0, PV.W,
+; EG-NEXT: CNDE_INT T6.Y, PS, T2.Z, T8.Y,
+; EG-NEXT: SUB_INT T1.Z, literal.x, T0.W,
+; EG-NEXT: NOT_INT T6.W, T5.Y,
+; EG-NEXT: LSHR * T7.W, T3.W, 1,
+; EG-NEXT: 150(2.101948e-43), 0(0.000000e+00)
+; EG-NEXT: ASHR T2.X, KC0[3].Y, literal.x,
+; EG-NEXT: ADD_INT T5.Y, T0.W, literal.y,
+; EG-NEXT: BIT_ALIGN_INT T2.Z, 0.0, PS, PV.W,
+; EG-NEXT: BIT_ALIGN_INT T0.W, 0.0, T3.W, PV.Z,
+; EG-NEXT: AND_INT * T3.W, PV.Z, literal.z,
; EG-NEXT: 31(4.344025e-44), -127(nan)
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T1.X, PS, PV.W, 0.0,
-; EG-NEXT: CNDE_INT T5.Y, PS, PV.Z, PV.W,
-; EG-NEXT: SETGT_INT T2.Z, PV.Y, literal.x,
-; EG-NEXT: XOR_INT T0.W, T3.Y, PV.X,
-; EG-NEXT: XOR_INT * T1.W, T3.X, PV.X,
+; EG-NEXT: CNDE_INT T4.X, PS, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T7.Y, T5.W, PV.Z, T4.W,
+; EG-NEXT: SETGT_INT T1.Z, PV.Y, literal.x,
+; EG-NEXT: XOR_INT T0.W, T6.Y, PV.X,
+; EG-NEXT: XOR_INT * T3.W, T1.X, PV.X,
; EG-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; EG-NEXT: SUB_INT T3.X, PS, T7.X,
-; EG-NEXT: SUBB_UINT T3.Y, PV.W, T7.X,
-; EG-NEXT: CNDE_INT T3.Z, PV.Z, 0.0, PV.Y,
-; EG-NEXT: CNDE_INT T1.W, PV.Z, T0.Z, PV.X,
-; EG-NEXT: ASHR * T3.W, KC0[3].W, literal.x,
+; EG-NEXT: SUB_INT T1.X, PS, T2.X,
+; EG-NEXT: SUBB_UINT T6.Y, PV.W, T2.X,
+; EG-NEXT: CNDE_INT T2.Z, PV.Z, 0.0, PV.Y,
+; EG-NEXT: CNDE_INT T3.W, PV.Z, PV.X, T3.X,
+; EG-NEXT: ASHR * T4.W, KC0[3].W, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: XOR_INT T1.X, PV.W, PS,
-; EG-NEXT: XOR_INT T5.Y, PV.Z, PS,
-; EG-NEXT: SUB_INT T0.Z, PV.X, PV.Y,
-; EG-NEXT: SETGT_INT T1.W, 0.0, T4.X, BS:VEC_021/SCL_122
-; EG-NEXT: CNDE_INT * T6.W, T0.Y, T5.X, 0.0,
-; EG-NEXT: SETGT_INT T0.X, 0.0, T0.X,
+; EG-NEXT: XOR_INT T3.X, PV.W, PS,
+; EG-NEXT: XOR_INT T7.Y, PV.Z, PS,
+; EG-NEXT: SUB_INT T1.Z, PV.X, PV.Y,
+; EG-NEXT: SETGT_INT T3.W, 0.0, T3.Y,
+; EG-NEXT: CNDE_INT * T6.W, T0.X, T0.Z, 0.0,
+; EG-NEXT: SETGT_INT T1.X, 0.0, T0.Y,
; EG-NEXT: CNDE_INT T6.Y, PV.W, PV.Z, 0.0,
-; EG-NEXT: SUB_INT T0.Z, T1.Y, T2.W, BS:VEC_021/SCL_122
-; EG-NEXT: SUB_INT T2.W, PV.Y, T3.W,
-; EG-NEXT: SUBB_UINT * T4.W, PV.X, T3.W,
-; EG-NEXT: SUB_INT T3.X, PV.W, PS,
-; EG-NEXT: SETGT_INT T1.Y, 0.0, T4.Y,
-; EG-NEXT: CNDE_INT T6.Z, T0.Y, PV.Z, 0.0,
-; EG-NEXT: SUB_INT T0.W, T0.W, T7.X, BS:VEC_021/SCL_122
-; EG-NEXT: CNDE_INT * T4.W, PV.X, T2.X, 0.0,
-; EG-NEXT: CNDE_INT T6.X, T1.W, PV.W, 0.0,
-; EG-NEXT: CNDE_INT T4.Y, PV.Y, PV.X, 0.0,
-; EG-NEXT: SUB_INT T0.W, T1.Z, T2.Y,
-; EG-NEXT: LSHR * T2.X, KC0[2].Y, literal.x,
+; EG-NEXT: SUB_INT T0.Z, T1.W, T1.Y, BS:VEC_021/SCL_122
+; EG-NEXT: SUB_INT T1.W, PV.Y, T4.W,
+; EG-NEXT: SUBB_UINT * T5.W, PV.X, T4.W,
+; EG-NEXT: SUB_INT T4.X, PV.W, PS,
+; EG-NEXT: SETGT_INT T0.Y, 0.0, T5.Y, BS:VEC_021/SCL_122
+; EG-NEXT: CNDE_INT T6.Z, T0.X, PV.Z, 0.0,
+; EG-NEXT: SUB_INT T0.W, T0.W, T2.X,
+; EG-NEXT: CNDE_INT * T1.W, PV.X, T4.Y, 0.0,
+; EG-NEXT: CNDE_INT T6.X, T3.W, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T1.Y, PV.Y, PV.X, 0.0,
+; EG-NEXT: SUB_INT T0.W, T2.Y, T2.W,
+; EG-NEXT: LSHR * T0.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T4.Z, T0.X, PV.W, 0.0,
-; EG-NEXT: SUB_INT * T0.W, T1.X, T3.W, BS:VEC_120/SCL_212
-; EG-NEXT: CNDE_INT T4.X, T1.Y, PV.W, 0.0,
+; EG-NEXT: CNDE_INT T1.Z, T1.X, PV.W, 0.0,
+; EG-NEXT: SUB_INT * T0.W, T3.X, T4.W, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T1.X, T0.Y, PV.W, 0.0,
; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x,
; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
-; EG-NEXT: LSHR * T0.X, PV.W, literal.x,
+; EG-NEXT: LSHR * T2.X, PV.W, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%conv = fptoui <4 x float> %x to <4 x i64>
store <4 x i64> %conv, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
index b71728096093..03434caee233 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
@@ -144,8 +144,8 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_scope_agent_sco
ret float %result
}
-define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7:[0-9]+]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
; IR-ITERATIVE: 2:
@@ -177,7 +177,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
; IR-ITERATIVE-NEXT: ret float [[TMP25]]
;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8:[0-9]+]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
; IR-DPP: 2:
@@ -213,8 +213,8 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
ret float %result
}
-define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]]
; IR-ITERATIVE: 2:
@@ -262,7 +262,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un
; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]]
;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]]
; IR-DPP: 2:
@@ -618,8 +618,8 @@ define amdgpu_ps float @global_atomic_fmin_uni_address_div_value_agent_scope_uns
ret float %result
}
-define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP20:%.*]]
; IR-ITERATIVE: 2:
@@ -647,7 +647,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_uns
; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP19]], [[TMP12]] ]
; IR-ITERATIVE-NEXT: ret float [[TMP21]]
;
-; IR-DPP-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP20:%.*]]
; IR-DPP: 2:
@@ -679,8 +679,8 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_uns
ret float %result
}
-define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]]
; IR-ITERATIVE: 2:
@@ -728,7 +728,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns
; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]]
;
-; IR-DPP-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]]
; IR-DPP: 2:
@@ -968,8 +968,8 @@ define amdgpu_ps float @global_atomic_fadd_div_address_div_value_agent_scope_uns
ret float %result
}
-define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
-; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
; IR-NEXT: ret float [[RESULT]]
;
@@ -977,8 +977,8 @@ define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_one_as_scope_un
ret float %result
}
-define amdgpu_ps float @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1 {
-; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float %val) #1 {
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
; IR-NEXT: ret float [[RESULT]]
;
@@ -1022,8 +1022,8 @@ define amdgpu_ps float @global_atomic_fmin_div_address_div_value_agent_scope(ptr
ret float %result
}
-define amdgpu_ps float @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1{
-; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float inreg %val) #1{
+; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
; IR-NEXT: ret float [[RESULT]]
;
@@ -1031,8 +1031,8 @@ define amdgpu_ps float @global_atomic_fmax_div_address_uni_value_agent_scope_uns
ret float %result
}
-define amdgpu_ps float @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1{
-; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float %val) #1{
+; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
; IR-NEXT: ret float [[RESULT]]
;
@@ -1110,8 +1110,8 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_scope_a
ret double %result
}
-define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
; IR-ITERATIVE: 2:
@@ -1149,7 +1149,7 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_
; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
; IR-ITERATIVE-NEXT: ret double [[TMP31]]
;
-; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
; IR-DPP: 2:
@@ -1191,8 +1191,8 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_
ret double %result
}
-define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
@@ -1338,8 +1338,8 @@ define amdgpu_ps double @global_atomic_fmin_double_uni_address_div_value_agent_s
ret double %result
}
-define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
; IR-ITERATIVE: 2:
@@ -1373,7 +1373,7 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_
; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP25]], [[TMP12]] ]
; IR-ITERATIVE-NEXT: ret double [[TMP27]]
;
-; IR-DPP-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
; IR-DPP: 2:
@@ -1411,8 +1411,8 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_
ret double %result
}
-define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1{
-; IR-LABEL: @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
@@ -1528,8 +1528,8 @@ define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_agent_s
ret double %result
}
-define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
@@ -1537,8 +1537,8 @@ define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_one_as_
ret double %result
}
-define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
@@ -1582,8 +1582,8 @@ define amdgpu_ps double @global_atomic_fmin_double_div_address_div_value_agent_s
ret double %result
}
-define amdgpu_ps double @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1{
-; IR-LABEL: @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double inreg %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
@@ -1591,8 +1591,8 @@ define amdgpu_ps double @global_atomic__fmax_double_div_address_uni_value_agent_
ret double %result
}
-define amdgpu_ps double @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1{
-; IR-LABEL: @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
index b9234f47df19..239fe274d523 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
@@ -117,8 +117,8 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_scope_agent_scop
ret void
}
-define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7:[0-9]+]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
@@ -142,7 +142,7 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_one_as_scope_uns
; IR-ITERATIVE: 17:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8:[0-9]+]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-DPP: 2:
@@ -170,8 +170,8 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_one_as_scope_uns
ret void
}
-define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
@@ -208,7 +208,7 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_uns
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP24]], label [[TMP10:%.*]], label [[TMP12]]
;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP33:%.*]]
; IR-DPP: 2:
@@ -494,8 +494,8 @@ define amdgpu_ps void @global_atomic_fmin_uni_address_div_value_agent_scope_unsa
ret void
}
-define amdgpu_ps void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
@@ -515,7 +515,7 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsa
; IR-ITERATIVE: 13:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-DPP: 2:
@@ -539,8 +539,8 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsa
ret void
}
-define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
@@ -577,7 +577,7 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsa
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP24]], label [[TMP10:%.*]], label [[TMP12]]
;
-; IR-DPP-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP33:%.*]]
; IR-DPP: 2:
@@ -774,8 +774,8 @@ define amdgpu_ps void @global_atomic_fadd_div_address_div_value_agent_scope_unsa
ret void
}
-define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
-; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
; IR-NEXT: ret void
;
@@ -783,8 +783,8 @@ define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_one_as_scope_uns
ret void
}
-define amdgpu_ps void @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1 {
-; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float %val) #1 {
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
; IR-NEXT: ret void
;
@@ -828,8 +828,8 @@ define amdgpu_ps void @global_atomic_fmin_div_address_div_value_agent_scope(ptr
ret void
}
-define amdgpu_ps void @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1{
-; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float inreg %val) #1{
+; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
; IR-NEXT: ret void
;
@@ -837,8 +837,8 @@ define amdgpu_ps void @global_atomic_fmax_div_address_uni_value_agent_scope_unsa
ret void
}
-define amdgpu_ps void @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1{
-; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float %val) #1{
+; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
; IR-NEXT: ret void
;
@@ -902,8 +902,8 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_scope_age
ret void
}
-define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
@@ -927,7 +927,7 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_sc
; IR-ITERATIVE: 17:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-DPP: 2:
@@ -955,8 +955,8 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_sc
ret void
}
-define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret void
;
@@ -1060,8 +1060,8 @@ define amdgpu_ps void @global_atomic_fmin_double_uni_address_div_value_agent_sco
ret void
}
-define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
@@ -1081,7 +1081,7 @@ define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_sco
; IR-ITERATIVE: 13:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-DPP: 2:
@@ -1105,8 +1105,8 @@ define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_sco
ret void
}
-define amdgpu_ps void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1{
-; IR-LABEL: @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret void
;
@@ -1194,8 +1194,8 @@ define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_agent_sco
ret void
}
-define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret void
;
@@ -1203,8 +1203,8 @@ define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_one_as_sc
ret void
}
-define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret void
;
@@ -1248,8 +1248,8 @@ define amdgpu_ps void @global_atomic_fmin_double_div_address_div_value_agent_sco
ret void
}
-define amdgpu_ps void @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1{
-; IR-LABEL: @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double inreg %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret void
;
@@ -1257,8 +1257,8 @@ define amdgpu_ps void @global_atomic_fmax_double_div_address_uni_value_agent_sco
ret void
}
-define amdgpu_ps void @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1{
-; IR-LABEL: @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret void
;
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
index d7773f746c6a..6555ceb3ed33 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
@@ -1052,8 +1052,8 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
ret void
}
-define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX7LESS-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1099,7 +1099,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX7LESS-NEXT: .LBB2_3:
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1141,7 +1141,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX9-NEXT: .LBB2_3:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1181,7 +1181,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1064-NEXT: .LBB2_3:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1220,7 +1220,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1032-NEXT: .LBB2_3:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -1263,7 +1263,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1164-NEXT: .LBB2_3:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1303,7 +1303,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1132-NEXT: .LBB2_3:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1345,7 +1345,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX9-DPP-NEXT: .LBB2_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1385,7 +1385,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1064-DPP-NEXT: .LBB2_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1424,7 +1424,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1032-DPP-NEXT: .LBB2_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -1467,7 +1467,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1164-DPP-NEXT: .LBB2_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1511,8 +1511,8 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
}
-define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -1562,7 +1562,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1628,7 +1628,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX9-NEXT: .LBB3_5:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1694,7 +1694,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1064-NEXT: .LBB3_5:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1759,7 +1759,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1032-NEXT: .LBB3_5:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-NEXT: s_mov_b32 s14, s8
@@ -1820,7 +1820,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1164-NEXT: .LBB3_5:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-NEXT: v_mov_b32_e32 v31, v0
@@ -1880,7 +1880,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1132-NEXT: .LBB3_5:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1964,7 +1964,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX9-DPP-NEXT: .LBB3_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -2046,7 +2046,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1064-DPP-NEXT: .LBB3_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -2122,7 +2122,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1032-DPP-NEXT: .LBB3_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
@@ -2204,7 +2204,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1164-DPP-NEXT: .LBB3_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
@@ -3461,8 +3461,8 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
}
-define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -3512,7 +3512,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3578,7 +3578,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX9-NEXT: .LBB6_5:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3644,7 +3644,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1064-NEXT: .LBB6_5:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3709,7 +3709,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1032-NEXT: .LBB6_5:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-NEXT: s_mov_b32 s14, s8
@@ -3757,7 +3757,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1164-NEXT: .LBB6_4:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-NEXT: v_mov_b32_e32 v31, v0
@@ -3804,7 +3804,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1132-NEXT: .LBB6_4:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3888,7 +3888,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX9-DPP-NEXT: .LBB6_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3970,7 +3970,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1064-DPP-NEXT: .LBB6_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -4046,7 +4046,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1032-DPP-NEXT: .LBB6_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
@@ -4115,7 +4115,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1164-DPP-NEXT: .LBB6_2:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
@@ -5412,1589 +5412,875 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_default_scop
define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
-; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
-; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB9_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
-; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX7LESS-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[41:42]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
; GFX7LESS-NEXT: .LBB9_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
-; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_cbranch_execz .LBB9_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
-; GFX9-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB9_2
; GFX9-NEXT: .LBB9_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_mov_b32 s33, s2
; GFX1064-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB9_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB9_2
; GFX1064-NEXT: .LBB9_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s33, s2
-; GFX1032-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB9_2
; GFX1032-NEXT: .LBB9_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b32 s33, s2
; GFX1164-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], exec
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB9_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB9_2
; GFX1164-NEXT: .LBB9_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
-; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1132-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB9_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB9_2
; GFX1132-NEXT: .LBB9_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
-; GFX9-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX9-DPP-NEXT: .LBB9_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1064-DPP-NEXT: .LBB9_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1032-DPP-NEXT: .LBB9_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], exec
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1164-DPP-NEXT: .LBB9_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1132-DPP-NEXT: .LBB9_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
; GFX7LESS-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[40:41]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB10_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB10_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB10_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB10_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB10_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.float.value()
- %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 8
ret void
}
-define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
@@ -7043,7 +6329,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX7LESS-NEXT: .LBB11_3:
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7086,7 +6372,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX9-NEXT: .LBB11_3:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7127,7 +6413,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1064-NEXT: .LBB11_3:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7167,7 +6453,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1032-NEXT: .LBB11_3:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -7211,7 +6497,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1164-NEXT: .LBB11_3:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -7251,7 +6537,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1132-NEXT: .LBB11_3:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7294,7 +6580,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX9-DPP-NEXT: .LBB11_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7335,7 +6621,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1064-DPP-NEXT: .LBB11_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7375,7 +6661,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1032-DPP-NEXT: .LBB11_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -7419,7 +6705,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1164-DPP-NEXT: .LBB11_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -7462,8 +6748,8 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
ret void
}
-define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -7516,7 +6802,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7562,7 +6848,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7608,7 +6894,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7654,7 +6940,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b32 s14, s8
; GFX1164-NEXT: s_add_u32 s8, s2, 44
@@ -7691,7 +6977,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_add_u32 s8, s2, 44
; GFX1132-NEXT: s_addc_u32 s9, s3, 0
@@ -7726,7 +7012,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7772,7 +7058,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7818,7 +7104,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7864,7 +7150,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
@@ -7901,7 +7187,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
@@ -8887,8 +8173,8 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
ret void
}
-define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -8941,7 +8227,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -8987,7 +8273,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9033,7 +8319,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9079,7 +8365,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b32 s14, s8
; GFX1164-NEXT: s_add_u32 s8, s2, 44
@@ -9116,7 +8402,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_add_u32 s8, s2, 44
; GFX1132-NEXT: s_addc_u32 s9, s3, 0
@@ -9151,7 +8437,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9197,7 +8483,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9243,7 +8529,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9289,7 +8575,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
@@ -9326,7 +8612,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
@@ -9368,1621 +8654,947 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp(ptr addrspace(1) %ptr) #2 {
; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
-; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
-; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB16_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
-; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX7LESS-NEXT: s_mov_b32 s1, 0x43300000
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX7LESS-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX7LESS-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[41:42]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB16_2
; GFX7LESS-NEXT: .LBB16_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
-; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_cbranch_execz .LBB16_3
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX9-NEXT: s_mov_b32 s1, 0x43300000
-; GFX9-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX9-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB16_2
; GFX9-NEXT: .LBB16_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
; GFX1064-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB16_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB16_2
; GFX1064-NEXT: .LBB16_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s33, s2
-; GFX1032-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB16_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB16_2
; GFX1032-NEXT: .LBB16_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_bcnt1_i32_b64 s0, exec
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1164-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1164-NEXT: s_cbranch_execz .LBB16_3
; GFX1164-NEXT: ; %bb.1:
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB16_2
; GFX1164-NEXT: .LBB16_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1132-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off offset:16
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1132-NEXT: s_cbranch_execz .LBB16_3
; GFX1132-NEXT: ; %bb.1:
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB16_2
; GFX1132-NEXT: .LBB16_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX9-DPP-NEXT: ; %bb.1:
; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX9-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX9-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX9-DPP-NEXT: .LBB16_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1064-DPP-NEXT: .LBB16_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1032-DPP-NEXT: .LBB16_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, exec
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1164-DPP-NEXT: ; %bb.1:
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1164-DPP-NEXT: .LBB16_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1132-DPP-NEXT: ; %bb.1:
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1132-DPP-NEXT: .LBB16_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp(ptr addrspace(1) %ptr) #2 {
; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
; GFX7LESS-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[40:41]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB17_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB17_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB17_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB17_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB17_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB17_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.float.value() strictfp
- %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue monotonic, align 8
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
index 98c09dfaa2d5..6548792180a0 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
@@ -3554,1550 +3554,859 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_default_scop
define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB6_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], 4.0
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB6_2
; GFX7LESS-NEXT: .LBB6_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-NEXT: s_cbranch_execz .LBB6_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b32 s33, s2
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB6_2
; GFX9-NEXT: .LBB6_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB6_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: s_mov_b32 s33, s2
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB6_2
; GFX1064-NEXT: .LBB6_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB6_2
; GFX1032-NEXT: .LBB6_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB6_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB6_2
; GFX1164-NEXT: .LBB6_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB6_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB6_2
; GFX1132-NEXT: .LBB6_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX9-DPP-NEXT: .LBB6_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1064-DPP-NEXT: .LBB6_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1032-DPP-NEXT: .LBB6_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1164-DPP-NEXT: .LBB6_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1132-DPP-NEXT: .LBB6_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
-; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
; GFX7LESS-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB7_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB7_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB7_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB7_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB7_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB7_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.double.value()
- %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 8
ret void
}
@@ -5963,1550 +5272,859 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_one_a
define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB10_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], 4.0
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_2
; GFX7LESS-NEXT: .LBB10_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-NEXT: s_cbranch_execz .LBB10_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b32 s33, s2
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB10_2
; GFX9-NEXT: .LBB10_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB10_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: s_mov_b32 s33, s2
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB10_2
; GFX1064-NEXT: .LBB10_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB10_2
; GFX1032-NEXT: .LBB10_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB10_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB10_2
; GFX1164-NEXT: .LBB10_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB10_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB10_2
; GFX1132-NEXT: .LBB10_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX9-DPP-NEXT: .LBB10_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1064-DPP-NEXT: .LBB10_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1032-DPP-NEXT: .LBB10_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1164-DPP-NEXT: .LBB10_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1132-DPP-NEXT: .LBB10_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
-; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
; GFX7LESS-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB11_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB11_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB11_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB11_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB11_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.double.value()
- %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue monotonic, align 8
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
index 1fb0db0e1f0d..6936cdc4d379 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
@@ -3554,1550 +3554,859 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_default_scop
define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB6_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_min_f64 v[0:1], v[2:3], 4.0
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB6_2
; GFX7LESS-NEXT: .LBB6_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-NEXT: s_cbranch_execz .LBB6_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b32 s33, s2
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB6_2
; GFX9-NEXT: .LBB6_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB6_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: s_mov_b32 s33, s2
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB6_2
; GFX1064-NEXT: .LBB6_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB6_2
; GFX1032-NEXT: .LBB6_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB6_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB6_2
; GFX1164-NEXT: .LBB6_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB6_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB6_2
; GFX1132-NEXT: .LBB6_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX9-DPP-NEXT: .LBB6_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1064-DPP-NEXT: .LBB6_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1032-DPP-NEXT: .LBB6_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1164-DPP-NEXT: .LBB6_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1132-DPP-NEXT: .LBB6_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
-; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
; GFX7LESS-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB7_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB7_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB7_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB7_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB7_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB7_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.double.value()
- %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 8
ret void
}
@@ -5963,1550 +5272,859 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_one_a
define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB10_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_min_f64 v[0:1], v[2:3], 4.0
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_2
; GFX7LESS-NEXT: .LBB10_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-NEXT: s_cbranch_execz .LBB10_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b32 s33, s2
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB10_2
; GFX9-NEXT: .LBB10_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB10_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: s_mov_b32 s33, s2
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB10_2
; GFX1064-NEXT: .LBB10_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB10_2
; GFX1032-NEXT: .LBB10_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB10_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB10_2
; GFX1164-NEXT: .LBB10_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB10_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB10_2
; GFX1132-NEXT: .LBB10_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX9-DPP-NEXT: .LBB10_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1064-DPP-NEXT: .LBB10_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1032-DPP-NEXT: .LBB10_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1164-DPP-NEXT: .LBB10_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1132-DPP-NEXT: .LBB10_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
-; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
; GFX7LESS-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB11_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB11_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB11_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB11_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB11_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.double.value()
- %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue monotonic, align 8
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
index c5f7980d1e3a..5cb57703c01d 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
@@ -1156,8 +1156,8 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
ret void
}
-define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX7LESS-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1203,7 +1203,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX7LESS-NEXT: .LBB2_3:
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1245,7 +1245,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX9-NEXT: .LBB2_3:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1285,7 +1285,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1064-NEXT: .LBB2_3:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1324,7 +1324,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1032-NEXT: .LBB2_3:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -1367,7 +1367,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1164-NEXT: .LBB2_3:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1407,7 +1407,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1132-NEXT: .LBB2_3:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1449,7 +1449,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX9-DPP-NEXT: .LBB2_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1489,7 +1489,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1064-DPP-NEXT: .LBB2_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1528,7 +1528,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1032-DPP-NEXT: .LBB2_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -1571,7 +1571,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1164-DPP-NEXT: .LBB2_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1615,8 +1615,8 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
}
-define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -1666,7 +1666,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1732,7 +1732,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX9-NEXT: .LBB3_5:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1798,7 +1798,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1064-NEXT: .LBB3_5:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1863,7 +1863,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1032-NEXT: .LBB3_5:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-NEXT: s_mov_b32 s14, s8
@@ -1924,7 +1924,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1164-NEXT: .LBB3_5:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-NEXT: v_mov_b32_e32 v31, v0
@@ -1984,7 +1984,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1132-NEXT: .LBB3_5:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -2068,7 +2068,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX9-DPP-NEXT: .LBB3_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -2150,7 +2150,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1064-DPP-NEXT: .LBB3_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -2226,7 +2226,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1032-DPP-NEXT: .LBB3_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
@@ -2308,7 +2308,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1164-DPP-NEXT: .LBB3_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
@@ -3617,8 +3617,8 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
}
-define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -3668,7 +3668,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3734,7 +3734,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX9-NEXT: .LBB6_5:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3800,7 +3800,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1064-NEXT: .LBB6_5:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3865,7 +3865,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1032-NEXT: .LBB6_5:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-NEXT: s_mov_b32 s14, s8
@@ -3926,7 +3926,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1164-NEXT: .LBB6_5:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-NEXT: v_mov_b32_e32 v31, v0
@@ -3986,7 +3986,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1132-NEXT: .LBB6_5:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -4070,7 +4070,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX9-DPP-NEXT: .LBB6_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -4152,7 +4152,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1064-DPP-NEXT: .LBB6_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -4228,7 +4228,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1032-DPP-NEXT: .LBB6_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
@@ -4310,7 +4310,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1164-DPP-NEXT: .LBB6_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
@@ -5620,1589 +5620,875 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_default_scop
define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
-; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
-; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB9_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
-; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX7LESS-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[41:42]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
; GFX7LESS-NEXT: .LBB9_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
-; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_cbranch_execz .LBB9_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
-; GFX9-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB9_2
; GFX9-NEXT: .LBB9_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_mov_b32 s33, s2
; GFX1064-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB9_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB9_2
; GFX1064-NEXT: .LBB9_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s33, s2
-; GFX1032-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB9_2
; GFX1032-NEXT: .LBB9_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b32 s33, s2
; GFX1164-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], exec
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB9_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB9_2
; GFX1164-NEXT: .LBB9_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
-; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1132-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB9_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB9_2
; GFX1132-NEXT: .LBB9_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
-; GFX9-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX9-DPP-NEXT: .LBB9_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1064-DPP-NEXT: .LBB9_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1032-DPP-NEXT: .LBB9_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], exec
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1164-DPP-NEXT: .LBB9_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1132-DPP-NEXT: .LBB9_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
; GFX7LESS-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[40:41]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB10_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB10_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB10_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB10_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB10_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.float.value()
- %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 8
ret void
}
-define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
@@ -7251,7 +6537,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX7LESS-NEXT: .LBB11_3:
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7294,7 +6580,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX9-NEXT: .LBB11_3:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7335,7 +6621,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1064-NEXT: .LBB11_3:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7375,7 +6661,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1032-NEXT: .LBB11_3:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -7419,7 +6705,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1164-NEXT: .LBB11_3:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -7459,7 +6745,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1132-NEXT: .LBB11_3:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7502,7 +6788,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX9-DPP-NEXT: .LBB11_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7543,7 +6829,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1064-DPP-NEXT: .LBB11_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7583,7 +6869,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1032-DPP-NEXT: .LBB11_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -7627,7 +6913,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1164-DPP-NEXT: .LBB11_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -7669,8 +6955,8 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
%result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
ret void
}
-define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -7723,7 +7009,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7769,7 +7055,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7815,7 +7101,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7861,7 +7147,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b32 s14, s8
; GFX1164-NEXT: s_add_u32 s8, s2, 44
@@ -7898,7 +7184,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_add_u32 s8, s2, 44
; GFX1132-NEXT: s_addc_u32 s9, s3, 0
@@ -7933,7 +7219,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7979,7 +7265,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -8025,7 +7311,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -8071,7 +7357,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
@@ -8108,7 +7394,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
@@ -9094,8 +8380,8 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
ret void
}
-define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -9148,7 +8434,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9194,7 +8480,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9240,7 +8526,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9286,7 +8572,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b32 s14, s8
; GFX1164-NEXT: s_add_u32 s8, s2, 44
@@ -9323,7 +8609,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_add_u32 s8, s2, 44
; GFX1132-NEXT: s_addc_u32 s9, s3, 0
@@ -9358,7 +8644,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9404,7 +8690,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9450,7 +8736,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9496,7 +8782,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
@@ -9533,7 +8819,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
@@ -9574,1621 +8860,947 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp(ptr addrspace(1) %ptr) #2 {
; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
-; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
-; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB16_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
-; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX7LESS-NEXT: s_mov_b32 s1, 0x43300000
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX7LESS-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX7LESS-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[41:42]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB16_2
; GFX7LESS-NEXT: .LBB16_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
-; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_cbranch_execz .LBB16_3
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX9-NEXT: s_mov_b32 s1, 0x43300000
-; GFX9-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX9-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB16_2
; GFX9-NEXT: .LBB16_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
; GFX1064-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB16_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB16_2
; GFX1064-NEXT: .LBB16_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s33, s2
-; GFX1032-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB16_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB16_2
; GFX1032-NEXT: .LBB16_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_bcnt1_i32_b64 s0, exec
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1164-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1164-NEXT: s_cbranch_execz .LBB16_3
; GFX1164-NEXT: ; %bb.1:
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB16_2
; GFX1164-NEXT: .LBB16_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1132-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off offset:16
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1132-NEXT: s_cbranch_execz .LBB16_3
; GFX1132-NEXT: ; %bb.1:
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB16_2
; GFX1132-NEXT: .LBB16_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX9-DPP-NEXT: ; %bb.1:
; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX9-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX9-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX9-DPP-NEXT: .LBB16_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1064-DPP-NEXT: .LBB16_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1032-DPP-NEXT: .LBB16_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, exec
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1164-DPP-NEXT: ; %bb.1:
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1164-DPP-NEXT: .LBB16_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1132-DPP-NEXT: ; %bb.1:
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1132-DPP-NEXT: .LBB16_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp(ptr addrspace(1) %ptr) #2 {
; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
; GFX7LESS-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[40:41]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB17_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB17_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB17_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB17_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB17_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB17_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.float.value() strictfp
- %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue monotonic, align 8
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/kernel_code_t_recurse.ll b/llvm/test/CodeGen/AMDGPU/kernel_code_t_recurse.ll
new file mode 100644
index 000000000000..cdd6e88dd103
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/kernel_code_t_recurse.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d < %s | FileCheck %s
+
+; CHECK-LABEL: non_kernel_recursion:
+define void @non_kernel_recursion(i32 %val) #2 {
+ %cmp = icmp eq i32 %val, 0
+ br i1 %cmp, label %ret, label %call
+
+call:
+ %val.sub1 = sub i32 %val, 1
+ call void @non_kernel_recursion(i32 %val.sub1)
+ br label %ret
+
+ret:
+ ret void
+}
+
+; CHECK-LABEL: kernel_caller_recursion:
+; CHECK: .amd_kernel_code_t
+; CHECK: is_dynamic_callstack = 1
+; CHECK: .end_amd_kernel_code_t
+define amdgpu_kernel void @kernel_caller_recursion(i32 %n) #0 {
+ call void @non_kernel_recursion(i32 %n)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.pops.exiting.wave.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.pops.exiting.wave.id.ll
new file mode 100644
index 000000000000..4927c2ffcdf3
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.pops.exiting.wave.id.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s -check-prefix=SDAG
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s -check-prefix=GFX9-GISEL
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck %s -check-prefix=SDAG
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck %s -check-prefix=GFX10-GISEL
+
+define amdgpu_ps void @test(ptr addrspace(1) inreg %ptr) {
+; SDAG-LABEL: test:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: s_mov_b32 s2, src_pops_exiting_wave_id
+; SDAG-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-NEXT: v_mov_b32_e32 v1, s2
+; SDAG-NEXT: global_store_dword v0, v1, s[0:1]
+; SDAG-NEXT: s_endpgm
+;
+; GFX9-GISEL-LABEL: test:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_mov_b32 s2, src_pops_exiting_wave_id
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-GISEL-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX9-GISEL-NEXT: s_endpgm
+;
+; GFX10-GISEL-LABEL: test:
+; GFX10-GISEL: ; %bb.0:
+; GFX10-GISEL-NEXT: s_mov_b32 s2, src_pops_exiting_wave_id
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-GISEL-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX10-GISEL-NEXT: s_endpgm
+ %id = call i32 @llvm.amdgcn.pops.exiting.wave.id()
+ store i32 %id, ptr addrspace(1) %ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll
new file mode 100644
index 000000000000..af8023788d2e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll
@@ -0,0 +1,820 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mcpu=tahiti -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX67,GFX6
+; RUN: llc -mcpu=hawaii -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX67,GFX7
+; RUN: llc -mcpu=fiji -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX8
+; RUN: llc -mcpu=gfx900 -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX910,GFX9
+; RUN: llc -mcpu=gfx1010 -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX910,GFX10
+; RUN: llc -mcpu=gfx1100 -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX11
+; RUN: llc -mcpu=gfx1200 -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX12
+
+define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: raw_buffer_load_i8_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: buffer_load_ubyte v[4:5], off, s[0:3], 0 tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v5, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: raw_buffer_load_i8_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: buffer_load_ubyte v[4:5], off, s[0:3], 0 tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_byte v[0:1], v4
+; GFX8-NEXT: flat_store_dword v[2:3], v5
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: raw_buffer_load_i8_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: buffer_load_ubyte v[4:5], off, s[0:3], 0 tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_byte v[0:1], v4, off
+; GFX910-NEXT: global_store_dword v[2:3], v5, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: raw_buffer_load_i8_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: buffer_load_u8 v[4:5], off, s[0:3], 0 tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b8 v[0:1], v4, off
+; GFX11-NEXT: global_store_b32 v[2:3], v5, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_buffer_load_i8_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v5, v4
+; GFX12-NEXT: buffer_load_u8 v[4:5], off, s[0:3], null tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b8 v[0:1], v4, off
+; GFX12-NEXT: global_store_b32 v[2:3], v5, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { i8, i32 } @llvm.amdgcn.raw.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { i8, i32 } %res, 0
+ store i8 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i8, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: raw_buffer_load_i16_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: buffer_load_ushort v[4:5], off, s[0:3], 0 tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_short v4, v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v5, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: raw_buffer_load_i16_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: buffer_load_ushort v[4:5], off, s[0:3], 0 tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_short v[0:1], v4
+; GFX8-NEXT: flat_store_dword v[2:3], v5
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: raw_buffer_load_i16_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: buffer_load_ushort v[4:5], off, s[0:3], 0 tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_short v[0:1], v4, off
+; GFX910-NEXT: global_store_dword v[2:3], v5, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: raw_buffer_load_i16_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: buffer_load_u16 v[4:5], off, s[0:3], 0 tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b16 v[0:1], v4, off
+; GFX11-NEXT: global_store_b32 v[2:3], v5, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_buffer_load_i16_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v5, v4
+; GFX12-NEXT: buffer_load_u16 v[4:5], off, s[0:3], null tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b16 v[0:1], v4, off
+; GFX12-NEXT: global_store_b32 v[2:3], v5, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { i16, i32 } @llvm.amdgcn.raw.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { i16, i32 } %res, 0
+ store i16 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i16, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: raw_buffer_load_f16_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: buffer_load_ushort v[4:5], off, s[0:3], 0 tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_short v4, v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v5, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: raw_buffer_load_f16_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: buffer_load_ushort v[4:5], off, s[0:3], 0 tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_short v[0:1], v4
+; GFX8-NEXT: flat_store_dword v[2:3], v5
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: raw_buffer_load_f16_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: buffer_load_ushort v[4:5], off, s[0:3], 0 tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_short v[0:1], v4, off
+; GFX910-NEXT: global_store_dword v[2:3], v5, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: raw_buffer_load_f16_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: buffer_load_u16 v[4:5], off, s[0:3], 0 tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b16 v[0:1], v4, off
+; GFX11-NEXT: global_store_b32 v[2:3], v5, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_buffer_load_f16_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v5, v4
+; GFX12-NEXT: buffer_load_u16 v[4:5], off, s[0:3], null tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b16 v[0:1], v4, off
+; GFX12-NEXT: global_store_b32 v[2:3], v5, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { half, i32 } @llvm.amdgcn.raw.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { half, i32 } %res, 0
+ store half %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { half, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: raw_buffer_load_i32_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: buffer_load_dword v[4:5], off, s[0:3], 0 tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_dword v4, v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v5, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: raw_buffer_load_i32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: buffer_load_dword v[4:5], off, s[0:3], 0 tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dword v[0:1], v4
+; GFX8-NEXT: flat_store_dword v[2:3], v5
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: raw_buffer_load_i32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: buffer_load_dword v[4:5], off, s[0:3], 0 tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dword v[0:1], v4, off
+; GFX910-NEXT: global_store_dword v[2:3], v5, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: raw_buffer_load_i32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: buffer_load_b32 v[4:5], off, s[0:3], 0 tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b32 v[0:1], v4, off
+; GFX11-NEXT: global_store_b32 v[2:3], v5, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_buffer_load_i32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v5, v4
+; GFX12-NEXT: buffer_load_b32 v[4:5], off, s[0:3], null tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b32 v[0:1], v4, off
+; GFX12-NEXT: global_store_b32 v[2:3], v5, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { i32, i32 } @llvm.amdgcn.raw.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { i32, i32 } %res, 0
+ store i32 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i32, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX6-LABEL: raw_buffer_load_v2i32_tfe:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v4, 0
+; GFX6-NEXT: v_mov_b32_e32 v5, v4
+; GFX6-NEXT: v_mov_b32_e32 v6, v4
+; GFX6-NEXT: v_mov_b32_e32 v7, v4
+; GFX6-NEXT: buffer_load_dwordx3 v[4:7], off, s[0:3], 0 tfe
+; GFX6-NEXT: s_mov_b32 s2, 0
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s0, s2
+; GFX6-NEXT: s_mov_b32 s1, s2
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX6-NEXT: buffer_store_dword v6, v[2:3], s[0:3], 0 addr64
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: raw_buffer_load_v2i32_tfe:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: v_mov_b32_e32 v4, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, v4
+; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: buffer_load_dwordx2 v[4:6], off, s[0:3], 0 tfe
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s0, s2
+; GFX7-NEXT: s_mov_b32 s1, s2
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: buffer_store_dword v6, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: raw_buffer_load_v2i32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: buffer_load_dwordx2 v[4:6], off, s[0:3], 0 tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; GFX8-NEXT: flat_store_dword v[2:3], v6
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: raw_buffer_load_v2i32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: buffer_load_dwordx2 v[4:6], off, s[0:3], 0 tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx2 v[0:1], v[4:5], off
+; GFX910-NEXT: global_store_dword v[2:3], v6, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: raw_buffer_load_v2i32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: buffer_load_b64 v[4:6], off, s[0:3], 0 tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX11-NEXT: global_store_b32 v[2:3], v6, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_buffer_load_v2i32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: buffer_load_b64 v[4:6], off, s[0:3], null tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX12-NEXT: global_store_b32 v[2:3], v6, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <2 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <2 x i32>, i32 } %res, 0
+ store <2 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <2 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX6-LABEL: raw_buffer_load_v2f32_tfe:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v4, 0
+; GFX6-NEXT: v_mov_b32_e32 v5, v4
+; GFX6-NEXT: v_mov_b32_e32 v6, v4
+; GFX6-NEXT: v_mov_b32_e32 v7, v4
+; GFX6-NEXT: buffer_load_dwordx3 v[4:7], off, s[0:3], 0 tfe
+; GFX6-NEXT: s_mov_b32 s2, 0
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s0, s2
+; GFX6-NEXT: s_mov_b32 s1, s2
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX6-NEXT: buffer_store_dword v6, v[2:3], s[0:3], 0 addr64
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: raw_buffer_load_v2f32_tfe:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: v_mov_b32_e32 v4, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, v4
+; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: buffer_load_dwordx2 v[4:6], off, s[0:3], 0 tfe
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s0, s2
+; GFX7-NEXT: s_mov_b32 s1, s2
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: buffer_store_dword v6, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: raw_buffer_load_v2f32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: buffer_load_dwordx2 v[4:6], off, s[0:3], 0 tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; GFX8-NEXT: flat_store_dword v[2:3], v6
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: raw_buffer_load_v2f32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: buffer_load_dwordx2 v[4:6], off, s[0:3], 0 tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx2 v[0:1], v[4:5], off
+; GFX910-NEXT: global_store_dword v[2:3], v6, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: raw_buffer_load_v2f32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: buffer_load_b64 v[4:6], off, s[0:3], 0 tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX11-NEXT: global_store_b32 v[2:3], v6, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_buffer_load_v2f32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: buffer_load_b64 v[4:6], off, s[0:3], null tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX12-NEXT: global_store_b32 v[2:3], v6, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <2 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <2 x float>, i32 } %res, 0
+ store <2 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <2 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX6-LABEL: raw_buffer_load_v3i32_tfe:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v4, 0
+; GFX6-NEXT: v_mov_b32_e32 v5, v4
+; GFX6-NEXT: v_mov_b32_e32 v6, v4
+; GFX6-NEXT: v_mov_b32_e32 v7, v4
+; GFX6-NEXT: buffer_load_dwordx3 v[4:7], off, s[0:3], 0 tfe
+; GFX6-NEXT: s_mov_b32 s2, 0
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s0, s2
+; GFX6-NEXT: s_mov_b32 s1, s2
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_store_dword v6, v[0:1], s[0:3], 0 addr64 offset:8
+; GFX6-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX6-NEXT: buffer_store_dword v7, v[2:3], s[0:3], 0 addr64
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: raw_buffer_load_v3i32_tfe:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: v_mov_b32_e32 v4, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, v4
+; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_mov_b32_e32 v7, v4
+; GFX7-NEXT: buffer_load_dwordx3 v[4:7], off, s[0:3], 0 tfe
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s0, s2
+; GFX7-NEXT: s_mov_b32 s1, s2
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_store_dwordx3 v[4:6], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: buffer_store_dword v7, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: raw_buffer_load_v3i32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v4
+; GFX8-NEXT: buffer_load_dwordx3 v[4:7], off, s[0:3], 0 tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx3 v[0:1], v[4:6]
+; GFX8-NEXT: flat_store_dword v[2:3], v7
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: raw_buffer_load_v3i32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: v_mov_b32_e32 v7, v4
+; GFX910-NEXT: buffer_load_dwordx3 v[4:7], off, s[0:3], 0 tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx3 v[0:1], v[4:6], off
+; GFX910-NEXT: global_store_dword v[2:3], v7, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: raw_buffer_load_v3i32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v7, v4
+; GFX11-NEXT: buffer_load_b96 v[4:7], off, s[0:3], 0 tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b96 v[0:1], v[4:6], off
+; GFX11-NEXT: global_store_b32 v[2:3], v7, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_buffer_load_v3i32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: v_mov_b32_e32 v7, v4
+; GFX12-NEXT: buffer_load_b96 v[4:7], off, s[0:3], null tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b96 v[0:1], v[4:6], off
+; GFX12-NEXT: global_store_b32 v[2:3], v7, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <3 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <3 x i32>, i32 } %res, 0
+ store <3 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <3 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX6-LABEL: raw_buffer_load_v3f32_tfe:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v4, 0
+; GFX6-NEXT: v_mov_b32_e32 v5, v4
+; GFX6-NEXT: v_mov_b32_e32 v6, v4
+; GFX6-NEXT: v_mov_b32_e32 v7, v4
+; GFX6-NEXT: buffer_load_dwordx3 v[4:7], off, s[0:3], 0 tfe
+; GFX6-NEXT: s_mov_b32 s2, 0
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s0, s2
+; GFX6-NEXT: s_mov_b32 s1, s2
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_store_dword v6, v[0:1], s[0:3], 0 addr64 offset:8
+; GFX6-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX6-NEXT: buffer_store_dword v7, v[2:3], s[0:3], 0 addr64
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: raw_buffer_load_v3f32_tfe:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: v_mov_b32_e32 v4, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, v4
+; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_mov_b32_e32 v7, v4
+; GFX7-NEXT: buffer_load_dwordx3 v[4:7], off, s[0:3], 0 tfe
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s0, s2
+; GFX7-NEXT: s_mov_b32 s1, s2
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_store_dwordx3 v[4:6], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: buffer_store_dword v7, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: raw_buffer_load_v3f32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v4
+; GFX8-NEXT: buffer_load_dwordx3 v[4:7], off, s[0:3], 0 tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx3 v[0:1], v[4:6]
+; GFX8-NEXT: flat_store_dword v[2:3], v7
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: raw_buffer_load_v3f32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: v_mov_b32_e32 v7, v4
+; GFX910-NEXT: buffer_load_dwordx3 v[4:7], off, s[0:3], 0 tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx3 v[0:1], v[4:6], off
+; GFX910-NEXT: global_store_dword v[2:3], v7, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: raw_buffer_load_v3f32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v7, v4
+; GFX11-NEXT: buffer_load_b96 v[4:7], off, s[0:3], 0 tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b96 v[0:1], v[4:6], off
+; GFX11-NEXT: global_store_b32 v[2:3], v7, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_buffer_load_v3f32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: v_mov_b32_e32 v7, v4
+; GFX12-NEXT: buffer_load_b96 v[4:7], off, s[0:3], null tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b96 v[0:1], v[4:6], off
+; GFX12-NEXT: global_store_b32 v[2:3], v7, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <3 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <3 x float>, i32 } %res, 0
+ store <3 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <3 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: raw_buffer_load_v4i32_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: v_mov_b32_e32 v6, v4
+; GFX67-NEXT: v_mov_b32_e32 v7, v4
+; GFX67-NEXT: v_mov_b32_e32 v8, v4
+; GFX67-NEXT: buffer_load_dwordx4 v[4:8], off, s[0:3], 0 tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_dwordx4 v[4:7], v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v8, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: raw_buffer_load_v4i32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v4
+; GFX8-NEXT: v_mov_b32_e32 v8, v4
+; GFX8-NEXT: buffer_load_dwordx4 v[4:8], off, s[0:3], 0 tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; GFX8-NEXT: flat_store_dword v[2:3], v8
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: raw_buffer_load_v4i32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: v_mov_b32_e32 v7, v4
+; GFX910-NEXT: v_mov_b32_e32 v8, v4
+; GFX910-NEXT: buffer_load_dwordx4 v[4:8], off, s[0:3], 0 tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; GFX910-NEXT: global_store_dword v[2:3], v8, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: raw_buffer_load_v4i32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v7, v4
+; GFX11-NEXT: v_mov_b32_e32 v8, v4
+; GFX11-NEXT: buffer_load_b128 v[4:8], off, s[0:3], 0 tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b128 v[0:1], v[4:7], off
+; GFX11-NEXT: global_store_b32 v[2:3], v8, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_buffer_load_v4i32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_mov_b32 v8, v4
+; GFX12-NEXT: buffer_load_b128 v[4:8], off, s[0:3], null tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b128 v[0:1], v[4:7], off
+; GFX12-NEXT: global_store_b32 v[2:3], v8, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <4 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <4 x i32>, i32 } %res, 0
+ store <4 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <4 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @raw_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: raw_buffer_load_v4f32_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: v_mov_b32_e32 v6, v4
+; GFX67-NEXT: v_mov_b32_e32 v7, v4
+; GFX67-NEXT: v_mov_b32_e32 v8, v4
+; GFX67-NEXT: buffer_load_dwordx4 v[4:8], off, s[0:3], 0 tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_dwordx4 v[4:7], v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v8, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: raw_buffer_load_v4f32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v4
+; GFX8-NEXT: v_mov_b32_e32 v8, v4
+; GFX8-NEXT: buffer_load_dwordx4 v[4:8], off, s[0:3], 0 tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; GFX8-NEXT: flat_store_dword v[2:3], v8
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: raw_buffer_load_v4f32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: v_mov_b32_e32 v7, v4
+; GFX910-NEXT: v_mov_b32_e32 v8, v4
+; GFX910-NEXT: buffer_load_dwordx4 v[4:8], off, s[0:3], 0 tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; GFX910-NEXT: global_store_dword v[2:3], v8, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: raw_buffer_load_v4f32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v7, v4
+; GFX11-NEXT: v_mov_b32_e32 v8, v4
+; GFX11-NEXT: buffer_load_b128 v[4:8], off, s[0:3], 0 tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b128 v[0:1], v[4:7], off
+; GFX11-NEXT: global_store_b32 v[2:3], v8, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_buffer_load_v4f32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_mov_b32 v8, v4
+; GFX12-NEXT: buffer_load_b128 v[4:8], off, s[0:3], null tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b128 v[0:1], v[4:7], off
+; GFX12-NEXT: global_store_b32 v[2:3], v8, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <4 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
+ %data = extractvalue { <4 x float>, i32 } %res, 0
+ store <4 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <4 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+declare { i8, i32 } @llvm.amdgcn.raw.buffer.load.sl_i8i32s(<4 x i32>, i32, i32, i32)
+declare { i16, i32 } @llvm.amdgcn.raw.buffer.load.sl_i16i32s(<4 x i32>, i32, i32, i32)
+declare { half, i32 } @llvm.amdgcn.raw.buffer.load.sl_f16i32s(<4 x i32>, i32, i32, i32)
+declare { i32, i32 } @llvm.amdgcn.raw.buffer.load.sl_i32i32s(<4 x i32>, i32, i32, i32)
+declare { <2 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2i32i32s(<4 x i32>, i32, i32, i32)
+declare { <2 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2f32i32s(<4 x i32>, i32, i32, i32)
+declare { <3 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3i32i32s(<4 x i32>, i32, i32, i32)
+declare { <3 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3f32i32s(<4 x i32>, i32, i32, i32)
+declare { <4 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4i32i32s(<4 x i32>, i32, i32, i32)
+declare { <4 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4f32i32s(<4 x i32>, i32, i32, i32)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX10: {{.*}}
+; GFX9: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll
new file mode 100644
index 000000000000..c99a082afe2d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll
@@ -0,0 +1,820 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mcpu=tahiti -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX67,GFX6
+; RUN: llc -mcpu=hawaii -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX67,GFX7
+; RUN: llc -mcpu=fiji -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX8
+; RUN: llc -mcpu=gfx900 -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX910,GFX9
+; RUN: llc -mcpu=gfx1010 -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GFX910,GFX10
+; RUN: llc -mcpu=gfx1100 -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX11
+; RUN: llc -mcpu=gfx1200 -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s -check-prefix=GFX12
+
+define amdgpu_ps void @struct_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: struct_buffer_load_i8_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: buffer_load_ubyte v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v5, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: struct_buffer_load_i8_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: buffer_load_ubyte v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_byte v[0:1], v4
+; GFX8-NEXT: flat_store_dword v[2:3], v5
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: struct_buffer_load_i8_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: buffer_load_ubyte v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_byte v[0:1], v4, off
+; GFX910-NEXT: global_store_dword v[2:3], v5, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: struct_buffer_load_i8_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: buffer_load_u8 v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b8 v[0:1], v4, off
+; GFX11-NEXT: global_store_b32 v[2:3], v5, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_load_i8_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v5, v4
+; GFX12-NEXT: buffer_load_u8 v[4:5], v4, s[0:3], null idxen tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b8 v[0:1], v4, off
+; GFX12-NEXT: global_store_b32 v[2:3], v5, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { i8, i32 } @llvm.amdgcn.struct.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { i8, i32 } %res, 0
+ store i8 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i8, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @struct_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: struct_buffer_load_i16_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: buffer_load_ushort v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_short v4, v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v5, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: struct_buffer_load_i16_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: buffer_load_ushort v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_short v[0:1], v4
+; GFX8-NEXT: flat_store_dword v[2:3], v5
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: struct_buffer_load_i16_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: buffer_load_ushort v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_short v[0:1], v4, off
+; GFX910-NEXT: global_store_dword v[2:3], v5, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: struct_buffer_load_i16_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: buffer_load_u16 v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b16 v[0:1], v4, off
+; GFX11-NEXT: global_store_b32 v[2:3], v5, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_load_i16_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v5, v4
+; GFX12-NEXT: buffer_load_u16 v[4:5], v4, s[0:3], null idxen tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b16 v[0:1], v4, off
+; GFX12-NEXT: global_store_b32 v[2:3], v5, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { i16, i32 } @llvm.amdgcn.struct.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { i16, i32 } %res, 0
+ store i16 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i16, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @struct_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: struct_buffer_load_f16_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: buffer_load_ushort v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_short v4, v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v5, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: struct_buffer_load_f16_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: buffer_load_ushort v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_short v[0:1], v4
+; GFX8-NEXT: flat_store_dword v[2:3], v5
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: struct_buffer_load_f16_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: buffer_load_ushort v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_short v[0:1], v4, off
+; GFX910-NEXT: global_store_dword v[2:3], v5, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: struct_buffer_load_f16_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: buffer_load_u16 v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b16 v[0:1], v4, off
+; GFX11-NEXT: global_store_b32 v[2:3], v5, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_load_f16_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v5, v4
+; GFX12-NEXT: buffer_load_u16 v[4:5], v4, s[0:3], null idxen tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b16 v[0:1], v4, off
+; GFX12-NEXT: global_store_b32 v[2:3], v5, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { half, i32 } @llvm.amdgcn.struct.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { half, i32 } %res, 0
+ store half %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { half, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @struct_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: struct_buffer_load_i32_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: buffer_load_dword v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_dword v4, v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v5, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: struct_buffer_load_i32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: buffer_load_dword v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dword v[0:1], v4
+; GFX8-NEXT: flat_store_dword v[2:3], v5
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: struct_buffer_load_i32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: buffer_load_dword v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dword v[0:1], v4, off
+; GFX910-NEXT: global_store_dword v[2:3], v5, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: struct_buffer_load_i32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: buffer_load_b32 v[4:5], v4, s[0:3], 0 idxen tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b32 v[0:1], v4, off
+; GFX11-NEXT: global_store_b32 v[2:3], v5, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_load_i32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v5, v4
+; GFX12-NEXT: buffer_load_b32 v[4:5], v4, s[0:3], null idxen tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b32 v[0:1], v4, off
+; GFX12-NEXT: global_store_b32 v[2:3], v5, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { i32, i32 } @llvm.amdgcn.struct.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { i32, i32 } %res, 0
+ store i32 %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { i32, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @struct_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX6-LABEL: struct_buffer_load_v2i32_tfe:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v4, 0
+; GFX6-NEXT: v_mov_b32_e32 v5, v4
+; GFX6-NEXT: v_mov_b32_e32 v6, v4
+; GFX6-NEXT: v_mov_b32_e32 v7, v4
+; GFX6-NEXT: buffer_load_dwordx3 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX6-NEXT: s_mov_b32 s2, 0
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s0, s2
+; GFX6-NEXT: s_mov_b32 s1, s2
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX6-NEXT: buffer_store_dword v6, v[2:3], s[0:3], 0 addr64
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: struct_buffer_load_v2i32_tfe:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: v_mov_b32_e32 v4, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, v4
+; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: buffer_load_dwordx2 v[4:6], v4, s[0:3], 0 idxen tfe
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s0, s2
+; GFX7-NEXT: s_mov_b32 s1, s2
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: buffer_store_dword v6, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: struct_buffer_load_v2i32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: buffer_load_dwordx2 v[4:6], v4, s[0:3], 0 idxen tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; GFX8-NEXT: flat_store_dword v[2:3], v6
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: struct_buffer_load_v2i32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: buffer_load_dwordx2 v[4:6], v4, s[0:3], 0 idxen tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx2 v[0:1], v[4:5], off
+; GFX910-NEXT: global_store_dword v[2:3], v6, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: struct_buffer_load_v2i32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: buffer_load_b64 v[4:6], v4, s[0:3], 0 idxen tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX11-NEXT: global_store_b32 v[2:3], v6, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_load_v2i32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: buffer_load_b64 v[4:6], v4, s[0:3], null idxen tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX12-NEXT: global_store_b32 v[2:3], v6, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <2 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <2 x i32>, i32 } %res, 0
+ store <2 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <2 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @struct_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX6-LABEL: struct_buffer_load_v2f32_tfe:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v4, 0
+; GFX6-NEXT: v_mov_b32_e32 v5, v4
+; GFX6-NEXT: v_mov_b32_e32 v6, v4
+; GFX6-NEXT: v_mov_b32_e32 v7, v4
+; GFX6-NEXT: buffer_load_dwordx3 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX6-NEXT: s_mov_b32 s2, 0
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s0, s2
+; GFX6-NEXT: s_mov_b32 s1, s2
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX6-NEXT: buffer_store_dword v6, v[2:3], s[0:3], 0 addr64
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: struct_buffer_load_v2f32_tfe:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: v_mov_b32_e32 v4, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, v4
+; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: buffer_load_dwordx2 v[4:6], v4, s[0:3], 0 idxen tfe
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s0, s2
+; GFX7-NEXT: s_mov_b32 s1, s2
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: buffer_store_dword v6, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: struct_buffer_load_v2f32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: buffer_load_dwordx2 v[4:6], v4, s[0:3], 0 idxen tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; GFX8-NEXT: flat_store_dword v[2:3], v6
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: struct_buffer_load_v2f32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: buffer_load_dwordx2 v[4:6], v4, s[0:3], 0 idxen tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx2 v[0:1], v[4:5], off
+; GFX910-NEXT: global_store_dword v[2:3], v6, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: struct_buffer_load_v2f32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: buffer_load_b64 v[4:6], v4, s[0:3], 0 idxen tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX11-NEXT: global_store_b32 v[2:3], v6, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_load_v2f32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: buffer_load_b64 v[4:6], v4, s[0:3], null idxen tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX12-NEXT: global_store_b32 v[2:3], v6, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <2 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <2 x float>, i32 } %res, 0
+ store <2 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <2 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @struct_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX6-LABEL: struct_buffer_load_v3i32_tfe:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v4, 0
+; GFX6-NEXT: v_mov_b32_e32 v5, v4
+; GFX6-NEXT: v_mov_b32_e32 v6, v4
+; GFX6-NEXT: v_mov_b32_e32 v7, v4
+; GFX6-NEXT: buffer_load_dwordx3 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX6-NEXT: s_mov_b32 s2, 0
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s0, s2
+; GFX6-NEXT: s_mov_b32 s1, s2
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_store_dword v6, v[0:1], s[0:3], 0 addr64 offset:8
+; GFX6-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX6-NEXT: buffer_store_dword v7, v[2:3], s[0:3], 0 addr64
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: struct_buffer_load_v3i32_tfe:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: v_mov_b32_e32 v4, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, v4
+; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_mov_b32_e32 v7, v4
+; GFX7-NEXT: buffer_load_dwordx3 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s0, s2
+; GFX7-NEXT: s_mov_b32 s1, s2
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_store_dwordx3 v[4:6], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: buffer_store_dword v7, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: struct_buffer_load_v3i32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v4
+; GFX8-NEXT: buffer_load_dwordx3 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx3 v[0:1], v[4:6]
+; GFX8-NEXT: flat_store_dword v[2:3], v7
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: struct_buffer_load_v3i32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: v_mov_b32_e32 v7, v4
+; GFX910-NEXT: buffer_load_dwordx3 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx3 v[0:1], v[4:6], off
+; GFX910-NEXT: global_store_dword v[2:3], v7, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: struct_buffer_load_v3i32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v7, v4
+; GFX11-NEXT: buffer_load_b96 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b96 v[0:1], v[4:6], off
+; GFX11-NEXT: global_store_b32 v[2:3], v7, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_load_v3i32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: v_mov_b32_e32 v7, v4
+; GFX12-NEXT: buffer_load_b96 v[4:7], v4, s[0:3], null idxen tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b96 v[0:1], v[4:6], off
+; GFX12-NEXT: global_store_b32 v[2:3], v7, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <3 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <3 x i32>, i32 } %res, 0
+ store <3 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <3 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @struct_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX6-LABEL: struct_buffer_load_v3f32_tfe:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v4, 0
+; GFX6-NEXT: v_mov_b32_e32 v5, v4
+; GFX6-NEXT: v_mov_b32_e32 v6, v4
+; GFX6-NEXT: v_mov_b32_e32 v7, v4
+; GFX6-NEXT: buffer_load_dwordx3 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX6-NEXT: s_mov_b32 s2, 0
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s0, s2
+; GFX6-NEXT: s_mov_b32 s1, s2
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_store_dword v6, v[0:1], s[0:3], 0 addr64 offset:8
+; GFX6-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64
+; GFX6-NEXT: buffer_store_dword v7, v[2:3], s[0:3], 0 addr64
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: struct_buffer_load_v3f32_tfe:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: v_mov_b32_e32 v4, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, v4
+; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_mov_b32_e32 v7, v4
+; GFX7-NEXT: buffer_load_dwordx3 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s0, s2
+; GFX7-NEXT: s_mov_b32 s1, s2
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_store_dwordx3 v[4:6], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: buffer_store_dword v7, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: struct_buffer_load_v3f32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v4
+; GFX8-NEXT: buffer_load_dwordx3 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx3 v[0:1], v[4:6]
+; GFX8-NEXT: flat_store_dword v[2:3], v7
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: struct_buffer_load_v3f32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: v_mov_b32_e32 v7, v4
+; GFX910-NEXT: buffer_load_dwordx3 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx3 v[0:1], v[4:6], off
+; GFX910-NEXT: global_store_dword v[2:3], v7, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: struct_buffer_load_v3f32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v7, v4
+; GFX11-NEXT: buffer_load_b96 v[4:7], v4, s[0:3], 0 idxen tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b96 v[0:1], v[4:6], off
+; GFX11-NEXT: global_store_b32 v[2:3], v7, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_load_v3f32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: v_mov_b32_e32 v7, v4
+; GFX12-NEXT: buffer_load_b96 v[4:7], v4, s[0:3], null idxen tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b96 v[0:1], v[4:6], off
+; GFX12-NEXT: global_store_b32 v[2:3], v7, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <3 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <3 x float>, i32 } %res, 0
+ store <3 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <3 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @struct_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: struct_buffer_load_v4i32_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: v_mov_b32_e32 v6, v4
+; GFX67-NEXT: v_mov_b32_e32 v7, v4
+; GFX67-NEXT: v_mov_b32_e32 v8, v4
+; GFX67-NEXT: buffer_load_dwordx4 v[4:8], v4, s[0:3], 0 idxen tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_dwordx4 v[4:7], v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v8, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: struct_buffer_load_v4i32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v4
+; GFX8-NEXT: v_mov_b32_e32 v8, v4
+; GFX8-NEXT: buffer_load_dwordx4 v[4:8], v4, s[0:3], 0 idxen tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; GFX8-NEXT: flat_store_dword v[2:3], v8
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: struct_buffer_load_v4i32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: v_mov_b32_e32 v7, v4
+; GFX910-NEXT: v_mov_b32_e32 v8, v4
+; GFX910-NEXT: buffer_load_dwordx4 v[4:8], v4, s[0:3], 0 idxen tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; GFX910-NEXT: global_store_dword v[2:3], v8, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: struct_buffer_load_v4i32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v7, v4
+; GFX11-NEXT: v_mov_b32_e32 v8, v4
+; GFX11-NEXT: buffer_load_b128 v[4:8], v4, s[0:3], 0 idxen tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b128 v[0:1], v[4:7], off
+; GFX11-NEXT: global_store_b32 v[2:3], v8, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_load_v4i32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_mov_b32 v8, v4
+; GFX12-NEXT: buffer_load_b128 v[4:8], v4, s[0:3], null idxen tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b128 v[0:1], v[4:7], off
+; GFX12-NEXT: global_store_b32 v[2:3], v8, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <4 x i32>, i32 } %res, 0
+ store <4 x i32> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <4 x i32>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+define amdgpu_ps void @struct_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
+; GFX67-LABEL: struct_buffer_load_v4f32_tfe:
+; GFX67: ; %bb.0:
+; GFX67-NEXT: v_mov_b32_e32 v4, 0
+; GFX67-NEXT: v_mov_b32_e32 v5, v4
+; GFX67-NEXT: v_mov_b32_e32 v6, v4
+; GFX67-NEXT: v_mov_b32_e32 v7, v4
+; GFX67-NEXT: v_mov_b32_e32 v8, v4
+; GFX67-NEXT: buffer_load_dwordx4 v[4:8], v4, s[0:3], 0 idxen tfe
+; GFX67-NEXT: s_mov_b32 s2, 0
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s0, s2
+; GFX67-NEXT: s_mov_b32 s1, s2
+; GFX67-NEXT: s_waitcnt vmcnt(0)
+; GFX67-NEXT: buffer_store_dwordx4 v[4:7], v[0:1], s[0:3], 0 addr64
+; GFX67-NEXT: buffer_store_dword v8, v[2:3], s[0:3], 0 addr64
+; GFX67-NEXT: s_endpgm
+;
+; GFX8-LABEL: struct_buffer_load_v4f32_tfe:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, v4
+; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v4
+; GFX8-NEXT: v_mov_b32_e32 v8, v4
+; GFX8-NEXT: buffer_load_dwordx4 v[4:8], v4, s[0:3], 0 idxen tfe
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; GFX8-NEXT: flat_store_dword v[2:3], v8
+; GFX8-NEXT: s_endpgm
+;
+; GFX910-LABEL: struct_buffer_load_v4f32_tfe:
+; GFX910: ; %bb.0:
+; GFX910-NEXT: v_mov_b32_e32 v4, 0
+; GFX910-NEXT: v_mov_b32_e32 v5, v4
+; GFX910-NEXT: v_mov_b32_e32 v6, v4
+; GFX910-NEXT: v_mov_b32_e32 v7, v4
+; GFX910-NEXT: v_mov_b32_e32 v8, v4
+; GFX910-NEXT: buffer_load_dwordx4 v[4:8], v4, s[0:3], 0 idxen tfe
+; GFX910-NEXT: s_waitcnt vmcnt(0)
+; GFX910-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
+; GFX910-NEXT: global_store_dword v[2:3], v8, off
+; GFX910-NEXT: s_endpgm
+;
+; GFX11-LABEL: struct_buffer_load_v4f32_tfe:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v4
+; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_mov_b32_e32 v7, v4
+; GFX11-NEXT: v_mov_b32_e32 v8, v4
+; GFX11-NEXT: buffer_load_b128 v[4:8], v4, s[0:3], 0 idxen tfe
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_store_b128 v[0:1], v[4:7], off
+; GFX11-NEXT: global_store_b32 v[2:3], v8, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_load_v4f32_tfe:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v6, v4
+; GFX12-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_mov_b32 v8, v4
+; GFX12-NEXT: buffer_load_b128 v[4:8], v4, s[0:3], null idxen tfe
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b128 v[0:1], v[4:7], off
+; GFX12-NEXT: global_store_b32 v[2:3], v8, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %res = call { <4 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %data = extractvalue { <4 x float>, i32 } %res, 0
+ store <4 x float> %data, ptr addrspace(1) %data_addr
+ %tfe = extractvalue { <4 x float>, i32 } %res, 1
+ store i32 %tfe, ptr addrspace(1) %tfe_addr
+ ret void
+}
+
+declare { i8, i32 } @llvm.amdgcn.struct.buffer.load.sl_i8i32s(<4 x i32>, i32, i32, i32, i32)
+declare { i16, i32 } @llvm.amdgcn.struct.buffer.load.sl_i16i32s(<4 x i32>, i32, i32, i32, i32)
+declare { half, i32 } @llvm.amdgcn.struct.buffer.load.sl_f16i32s(<4 x i32>, i32, i32, i32, i32)
+declare { i32, i32 } @llvm.amdgcn.struct.buffer.load.sl_i32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <2 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2i32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <2 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2f32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <3 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3i32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <3 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3f32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4i32i32s(<4 x i32>, i32, i32, i32, i32)
+declare { <4 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4f32i32s(<4 x i32>, i32, i32, i32, i32)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX10: {{.*}}
+; GFX9: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
index 7a0450761e1f..3a867879bb80 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
@@ -228,23 +228,23 @@ define amdgpu_kernel void @s_exp_f32(ptr addrspace(1) %out, float %in) {
; R600-NEXT: MUL_IEEE * T2.W, PS, literal.z,
; R600-NEXT: -127(nan), 254(3.559298e-43)
; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T3.X, T1.X, literal.x,
-; R600-NEXT: MUL_IEEE T0.Y, PS, literal.y,
+; R600-NEXT: MUL_IEEE T3.X, PS, literal.x,
+; R600-NEXT: MUL_IEEE T0.Y, T1.X, literal.y,
; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Z, T0.Z,
; R600-NEXT: CNDE_INT T3.W, PV.Y, PV.X, T0.X,
; R600-NEXT: SETGT_INT * T4.W, T0.Z, literal.z,
-; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
+; R600-NEXT: 209715200(1.972152e-31), 2130706432(1.701412e+38)
; R600-NEXT: 127(1.779649e-43), 0(0.000000e+00)
; R600-NEXT: CNDE_INT T0.Z, PS, PV.Z, PV.W,
-; R600-NEXT: CNDE_INT T0.W, T0.W, PV.Y, T2.W,
-; R600-NEXT: MUL_IEEE * T2.W, PV.X, literal.x,
+; R600-NEXT: MUL_IEEE T3.W, PV.Y, literal.x,
+; R600-NEXT: CNDE_INT * T0.W, T0.W, PV.X, T2.W,
; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T1.Z, T1.Y, T3.X, PS,
-; R600-NEXT: CNDE_INT T0.W, T1.W, PV.W, T1.X,
+; R600-NEXT: CNDE_INT T1.Z, T1.W, PS, T1.X,
+; R600-NEXT: CNDE_INT T0.W, T1.Y, T0.Y, PV.W,
; R600-NEXT: LSHL * T1.W, PV.Z, literal.x,
; R600-NEXT: 23(3.222986e-44), 0(0.000000e+00)
; R600-NEXT: ADD_INT T1.W, PS, literal.x,
-; R600-NEXT: CNDE_INT * T0.W, T4.W, PV.W, PV.Z,
+; R600-NEXT: CNDE_INT * T0.W, T4.W, PV.Z, PV.W,
; R600-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
; R600-NEXT: MUL_IEEE T0.W, PS, PV.W,
; R600-NEXT: SETGT * T1.W, literal.x, KC0[2].Z,
@@ -258,65 +258,63 @@ define amdgpu_kernel void @s_exp_f32(ptr addrspace(1) %out, float %in) {
;
; CM-LABEL: s_exp_f32:
; CM: ; %bb.0:
-; CM-NEXT: ALU 64, @4, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 62, @4, KC0[CB0:0-32], KC1[]
; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
; CM-NEXT: CF_END
; CM-NEXT: PAD
; CM-NEXT: ALU clause starting at 4:
; CM-NEXT: AND_INT * T0.W, KC0[2].Z, literal.x,
; CM-NEXT: -4096(nan), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T0.Z, PV.W, literal.x,
; CM-NEXT: ADD * T1.W, KC0[2].Z, -PV.W,
-; CM-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T1.Z, PV.W, literal.x,
-; CM-NEXT: RNDNE * T2.W, PV.Z,
-; CM-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; CM-NEXT: TRUNC T2.Z, PV.W,
+; CM-NEXT: MUL_IEEE T0.Z, PV.W, literal.x,
+; CM-NEXT: MUL_IEEE * T2.W, T0.W, literal.y,
+; CM-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
+; CM-NEXT: RNDNE T1.Z, PV.W,
; CM-NEXT: MULADD_IEEE * T1.W, T1.W, literal.x, PV.Z,
; CM-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; CM-NEXT: MULADD_IEEE T0.Y, T0.W, literal.x, PV.W,
-; CM-NEXT: ADD T0.Z, T0.Z, -T2.W,
-; CM-NEXT: FLT_TO_INT * T0.W, PV.Z,
+; CM-NEXT: MULADD_IEEE T0.Z, T0.W, literal.x, PV.W,
+; CM-NEXT: ADD * T0.W, T2.W, -PV.Z, BS:VEC_120/SCL_212
; CM-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; CM-NEXT: MIN_INT T1.Z, PV.W, literal.x,
-; CM-NEXT: ADD * T1.W, PV.Z, PV.Y,
+; CM-NEXT: TRUNC T1.Z, T1.Z,
+; CM-NEXT: ADD * T0.W, PV.W, PV.Z,
+; CM-NEXT: EXP_IEEE T0.X, T0.W,
+; CM-NEXT: EXP_IEEE T0.Y (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE * T0.W (MASKED), T0.W,
+; CM-NEXT: FLT_TO_INT T0.Z, T1.Z,
+; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.x,
+; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T0.Y, PV.W, literal.x,
+; CM-NEXT: MAX_INT T1.Z, PV.Z, literal.y,
+; CM-NEXT: MIN_INT * T1.W, PV.Z, literal.z,
+; CM-NEXT: 209715200(1.972152e-31), -330(nan)
; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
-; CM-NEXT: EXP_IEEE T0.X, T1.W,
-; CM-NEXT: EXP_IEEE T0.Y (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
-; CM-NEXT: MUL_IEEE T0.Y, PV.X, literal.x,
-; CM-NEXT: ADD_INT T0.Z, T1.Z, literal.y,
-; CM-NEXT: MAX_INT * T1.W, T0.W, literal.z,
-; CM-NEXT: 2130706432(1.701412e+38), -254(nan)
-; CM-NEXT: -330(nan), 0(0.000000e+00)
-; CM-NEXT: ADD_INT T1.X, T0.W, literal.x,
-; CM-NEXT: ADD_INT T1.Y, PV.W, literal.y,
-; CM-NEXT: ADD_INT T1.Z, T0.W, literal.z,
-; CM-NEXT: SETGT_UINT * T1.W, T0.W, literal.w,
-; CM-NEXT: -127(nan), 204(2.858649e-43)
+; CM-NEXT: ADD_INT T1.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T1.Y, PV.Z, literal.y,
+; CM-NEXT: ADD_INT T1.Z, T0.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T1.W, T0.Z, literal.w,
+; CM-NEXT: -254(nan), 204(2.858649e-43)
; CM-NEXT: 102(1.429324e-43), -229(nan)
-; CM-NEXT: SETGT_UINT T2.X, T0.W, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: SETGT_INT T1.Z, T0.W, literal.y,
-; CM-NEXT: MUL_IEEE * T2.W, T0.X, literal.z,
-; CM-NEXT: 254(3.559298e-43), -127(nan)
-; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T3.X, PV.W, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.Z, PV.Y, T0.W,
-; CM-NEXT: CNDE_INT T0.Z, PV.X, T1.X, T0.Z,
-; CM-NEXT: SETGT_INT * T0.W, T0.W, literal.y,
-; CM-NEXT: 209715200(1.972152e-31), 127(1.779649e-43)
+; CM-NEXT: ADD_INT T2.X, T0.Z, literal.x,
+; CM-NEXT: SETGT_UINT T2.Y, T0.Z, literal.y,
+; CM-NEXT: CNDE_INT T1.Z, PV.W, PV.Y, PV.Z,
+; CM-NEXT: SETGT_INT * T2.W, T0.Z, literal.x,
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: MUL_IEEE T3.X, T0.X, literal.x,
+; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Z, T0.Z,
+; CM-NEXT: CNDE_INT T1.Z, PV.Y, PV.X, T1.X,
+; CM-NEXT: SETGT_INT * T3.W, T0.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 127(1.779649e-43)
; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: CNDE_INT T0.Z, T1.W, PV.X, T2.W,
-; CM-NEXT: MUL_IEEE * T1.W, T0.Y, literal.x,
+; CM-NEXT: MUL_IEEE T0.Z, PV.X, literal.x,
+; CM-NEXT: CNDE_INT * T0.W, T1.W, T0.Y, T0.W,
; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T0.Y, T2.X, T0.Y, PV.W,
-; CM-NEXT: CNDE_INT T0.Z, T1.Z, PV.Z, T0.X,
-; CM-NEXT: LSHL * T1.W, PV.Y, literal.x,
+; CM-NEXT: CNDE_INT T0.Y, T2.W, PV.W, T0.X,
+; CM-NEXT: CNDE_INT T0.Z, T2.Y, T3.X, PV.Z,
+; CM-NEXT: LSHL * T0.W, PV.Y, literal.x,
; CM-NEXT: 23(3.222986e-44), 0(0.000000e+00)
; CM-NEXT: ADD_INT T1.Z, PV.W, literal.x,
-; CM-NEXT: CNDE_INT * T0.W, T0.W, PV.Z, PV.Y,
+; CM-NEXT: CNDE_INT * T0.W, T3.W, PV.Y, PV.Z,
; CM-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
; CM-NEXT: MUL_IEEE T0.Z, PV.W, PV.Z,
; CM-NEXT: SETGT * T0.W, literal.x, KC0[2].Z,
@@ -610,105 +608,105 @@ define amdgpu_kernel void @s_exp_v2f32(ptr addrspace(1) %out, <2 x float> %in) {
; R600-NEXT: AND_INT * T0.W, KC0[3].X, literal.x,
; R600-NEXT: -4096(nan), 0(0.000000e+00)
; R600-NEXT: ADD * T1.W, KC0[3].X, -PV.W,
-; R600-NEXT: AND_INT T0.Z, KC0[2].W, literal.x,
-; R600-NEXT: MUL_IEEE T2.W, PV.W, literal.y,
-; R600-NEXT: MUL_IEEE * T3.W, T0.W, literal.z,
-; R600-NEXT: -4096(nan), 967029397(3.122284e-04)
-; R600-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; R600-NEXT: RNDNE T1.Z, PS,
+; R600-NEXT: MUL_IEEE T2.W, PV.W, literal.x,
+; R600-NEXT: MUL_IEEE * T3.W, T0.W, literal.y,
+; R600-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
+; R600-NEXT: RNDNE T0.Z, PS,
; R600-NEXT: MULADD_IEEE T1.W, T1.W, literal.x, PV.W,
-; R600-NEXT: ADD * T2.W, KC0[2].W, -PV.Z,
-; R600-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T0.Y, PS, literal.x,
-; R600-NEXT: MUL_IEEE T2.Z, T0.Z, literal.y,
+; R600-NEXT: AND_INT * T2.W, KC0[2].W, literal.y,
+; R600-NEXT: 1069064192(1.442383e+00), -4096(nan)
+; R600-NEXT: ADD T1.Z, KC0[2].W, -PS,
; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.x, PV.W,
; R600-NEXT: ADD * T1.W, T3.W, -PV.Z,
+; R600-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
+; R600-NEXT: ADD T2.Z, PS, PV.W,
+; R600-NEXT: MUL_IEEE T0.W, PV.Z, literal.x,
+; R600-NEXT: MUL_IEEE * T1.W, T2.W, literal.y,
; R600-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
-; R600-NEXT: ADD T3.Z, PS, PV.W,
-; R600-NEXT: RNDNE T0.W, PV.Z,
-; R600-NEXT: MULADD_IEEE * T1.W, T2.W, literal.x, PV.Y, BS:VEC_021/SCL_122
-; R600-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; R600-NEXT: TRUNC T0.Y, T1.Z,
-; R600-NEXT: MULADD_IEEE T0.Z, T0.Z, literal.x, PS, BS:VEC_120/SCL_212
-; R600-NEXT: ADD T1.W, T2.Z, -PV.W, BS:VEC_201
+; R600-NEXT: RNDNE T0.Y, PS,
+; R600-NEXT: MULADD_IEEE T1.Z, T1.Z, literal.x, PV.W,
+; R600-NEXT: TRUNC T0.W, T0.Z, BS:VEC_120/SCL_212
; R600-NEXT: EXP_IEEE * T0.X, PV.Z,
-; R600-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; R600-NEXT: ADD T0.Z, PV.W, PV.Z,
-; R600-NEXT: FLT_TO_INT T1.W, PV.Y,
-; R600-NEXT: MUL_IEEE * T2.W, PS, literal.x,
-; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T1.Z, PS, literal.x,
-; R600-NEXT: SETGT_UINT T3.W, PV.W, literal.y,
-; R600-NEXT: EXP_IEEE * T0.Y, PV.Z,
-; R600-NEXT: 2130706432(1.701412e+38), 254(3.559298e-43)
-; R600-NEXT: CNDE_INT T1.X, PV.W, T2.W, PV.Z,
-; R600-NEXT: MUL_IEEE T1.Y, PS, literal.x,
-; R600-NEXT: MAX_INT T0.Z, T1.W, literal.y,
-; R600-NEXT: MIN_INT T2.W, T1.W, literal.z,
-; R600-NEXT: TRUNC * T0.W, T0.W,
+; R600-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
+; R600-NEXT: FLT_TO_INT T1.Y, PV.W,
+; R600-NEXT: MUL_IEEE T0.Z, PS, literal.x,
+; R600-NEXT: MULADD_IEEE T0.W, T2.W, literal.y, PV.Z,
+; R600-NEXT: ADD * T1.W, T1.W, -PV.Y,
+; R600-NEXT: 209715200(1.972152e-31), 967029397(3.122284e-04)
+; R600-NEXT: ADD T1.Z, PS, PV.W,
+; R600-NEXT: MUL_IEEE T0.W, PV.Z, literal.x,
+; R600-NEXT: SETGT_UINT * T1.W, PV.Y, literal.y,
+; R600-NEXT: 209715200(1.972152e-31), -229(nan)
+; R600-NEXT: CNDE_INT T0.Z, PS, PV.W, T0.Z,
+; R600-NEXT: SETGT_INT T0.W, T1.Y, literal.x,
+; R600-NEXT: EXP_IEEE * T1.X, PV.Z,
+; R600-NEXT: -127(nan), 0(0.000000e+00)
+; R600-NEXT: CNDE_INT T0.Z, PV.W, PV.Z, T0.X,
+; R600-NEXT: MAX_INT T2.W, T1.Y, literal.x,
+; R600-NEXT: MUL_IEEE * T3.W, PS, literal.y,
+; R600-NEXT: -330(nan), 209715200(1.972152e-31)
+; R600-NEXT: MUL_IEEE T2.X, PS, literal.x,
+; R600-NEXT: ADD_INT T2.Y, PV.W, literal.y,
+; R600-NEXT: ADD_INT T1.Z, T1.Y, literal.z,
+; R600-NEXT: MIN_INT T2.W, T1.Y, literal.w,
+; R600-NEXT: TRUNC * T4.W, T0.Y,
+; R600-NEXT: 209715200(1.972152e-31), 204(2.858649e-43)
+; R600-NEXT: 102(1.429324e-43), 381(5.338947e-43)
+; R600-NEXT: FLT_TO_INT T3.X, PS,
+; R600-NEXT: ADD_INT T0.Y, PV.W, literal.x,
+; R600-NEXT: ADD_INT T2.Z, T1.Y, literal.y,
+; R600-NEXT: SETGT_UINT T2.W, T1.Y, literal.z,
+; R600-NEXT: CNDE_INT * T1.W, T1.W, PV.Y, PV.Z,
+; R600-NEXT: -254(nan), -127(nan)
+; R600-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T4.X, T1.X, literal.x,
+; R600-NEXT: MUL_IEEE T2.Y, T0.X, literal.x, BS:VEC_120/SCL_212
+; R600-NEXT: CNDE_INT T1.Z, T0.W, PS, T1.Y,
+; R600-NEXT: CNDE_INT T0.W, PV.W, PV.Z, PV.Y,
+; R600-NEXT: MAX_INT * T1.W, PV.X, literal.y,
; R600-NEXT: 2130706432(1.701412e+38), -330(nan)
-; R600-NEXT: 381(5.338947e-43), 0(0.000000e+00)
-; R600-NEXT: FLT_TO_INT T2.X, PS,
-; R600-NEXT: ADD_INT T2.Y, PV.W, literal.x,
-; R600-NEXT: ADD_INT T0.Z, PV.Z, literal.y,
-; R600-NEXT: ADD_INT T0.W, T1.W, literal.z,
-; R600-NEXT: SETGT_UINT * T2.W, T1.W, literal.w,
-; R600-NEXT: -254(nan), 204(2.858649e-43)
-; R600-NEXT: 102(1.429324e-43), -229(nan)
-; R600-NEXT: ADD_INT T3.X, T1.W, literal.x,
-; R600-NEXT: CNDE_INT T3.Y, PS, PV.Z, PV.W,
-; R600-NEXT: SETGT_INT T0.Z, T1.W, literal.x,
-; R600-NEXT: MUL_IEEE T0.W, T0.X, literal.y,
-; R600-NEXT: MUL_IEEE * T4.W, T0.Y, literal.y,
-; R600-NEXT: -127(nan), 209715200(1.972152e-31)
-; R600-NEXT: MUL_IEEE T4.X, PS, literal.x,
-; R600-NEXT: MUL_IEEE T4.Y, PV.W, literal.x,
-; R600-NEXT: CNDE_INT T1.Z, PV.Z, PV.Y, T1.W,
-; R600-NEXT: CNDE_INT T3.W, T3.W, PV.X, T2.Y,
-; R600-NEXT: MAX_INT * T5.W, T2.X, literal.y,
-; R600-NEXT: 209715200(1.972152e-31), -330(nan)
-; R600-NEXT: SETGT_INT T3.X, T1.W, literal.x,
-; R600-NEXT: ADD_INT T2.Y, PS, literal.y,
-; R600-NEXT: ADD_INT T2.Z, T2.X, literal.z,
-; R600-NEXT: SETGT_UINT * T1.W, T2.X, literal.w,
+; R600-NEXT: SETGT_INT T0.X, T1.Y, literal.x,
+; R600-NEXT: ADD_INT T0.Y, PS, literal.y,
+; R600-NEXT: ADD_INT T2.Z, T3.X, literal.z,
+; R600-NEXT: SETGT_UINT * T1.W, T3.X, literal.w,
; R600-NEXT: 127(1.779649e-43), 204(2.858649e-43)
; R600-NEXT: 102(1.429324e-43), -229(nan)
-; R600-NEXT: MIN_INT * T5.W, T2.X, literal.x,
+; R600-NEXT: MIN_INT * T4.W, T3.X, literal.x,
; R600-NEXT: 381(5.338947e-43), 0(0.000000e+00)
; R600-NEXT: ADD_INT T5.X, PV.W, literal.x,
-; R600-NEXT: ADD_INT T3.Y, T2.X, literal.y,
-; R600-NEXT: SETGT_UINT T3.Z, T2.X, literal.z,
-; R600-NEXT: CNDE_INT T5.W, T1.W, T2.Y, T2.Z,
-; R600-NEXT: SETGT_INT * T6.W, T2.X, literal.y,
+; R600-NEXT: ADD_INT T1.Y, T3.X, literal.y,
+; R600-NEXT: SETGT_UINT T3.Z, T3.X, literal.z,
+; R600-NEXT: CNDE_INT T4.W, T1.W, T0.Y, T2.Z,
+; R600-NEXT: SETGT_INT * T5.W, T3.X, literal.y,
; R600-NEXT: -254(nan), -127(nan)
; R600-NEXT: 254(3.559298e-43), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T6.X, PS, PV.W, T2.X,
-; R600-NEXT: CNDE_INT T2.Y, PV.Z, PV.Y, PV.X,
-; R600-NEXT: SETGT_INT T2.Z, T2.X, literal.x, BS:VEC_120/SCL_212
-; R600-NEXT: CNDE_INT T3.W, T3.X, T1.Z, T3.W, BS:VEC_021/SCL_122
-; R600-NEXT: CNDE_INT * T0.W, T2.W, T4.Y, T0.W,
-; R600-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T0.X, T0.Z, PS, T0.X,
-; R600-NEXT: LSHL T3.Y, PV.W, literal.x,
-; R600-NEXT: CNDE_INT T0.Z, PV.Z, PV.X, PV.Y,
-; R600-NEXT: CNDE_INT T0.W, T1.W, T4.X, T4.W,
-; R600-NEXT: MUL_IEEE * T1.W, T1.Y, literal.y,
+; R600-NEXT: CNDE_INT T6.X, PS, PV.W, T3.X,
+; R600-NEXT: CNDE_INT T0.Y, PV.Z, PV.Y, PV.X,
+; R600-NEXT: SETGT_INT T2.Z, T3.X, literal.x,
+; R600-NEXT: CNDE_INT T0.W, T0.X, T1.Z, T0.W, BS:VEC_120/SCL_212
+; R600-NEXT: MUL_IEEE * T4.W, T2.Y, literal.y,
+; R600-NEXT: 127(1.779649e-43), 2130706432(1.701412e+38)
+; R600-NEXT: CNDE_INT T3.X, T2.W, T2.Y, PS, BS:VEC_120/SCL_212
+; R600-NEXT: LSHL T1.Y, PV.W, literal.x,
+; R600-NEXT: CNDE_INT T1.Z, PV.Z, PV.X, PV.Y,
+; R600-NEXT: MUL_IEEE T0.W, T4.X, literal.y,
+; R600-NEXT: CNDE_INT * T1.W, T1.W, T2.X, T3.W,
; R600-NEXT: 23(3.222986e-44), 2130706432(1.701412e+38)
-; R600-NEXT: CNDE_INT T2.X, T3.Z, T1.Y, PS,
-; R600-NEXT: CNDE_INT T0.Y, T6.W, PV.W, T0.Y,
-; R600-NEXT: LSHL T0.Z, PV.Z, literal.x,
+; R600-NEXT: CNDE_INT T1.X, T5.W, PS, T1.X, BS:VEC_021/SCL_122
+; R600-NEXT: CNDE_INT T0.Y, T3.Z, T4.X, PV.W, BS:VEC_201
+; R600-NEXT: LSHL T1.Z, PV.Z, literal.x,
; R600-NEXT: ADD_INT T0.W, PV.Y, literal.y,
-; R600-NEXT: CNDE_INT * T1.W, T3.X, PV.X, T1.X,
+; R600-NEXT: CNDE_INT * T1.W, T0.X, T0.Z, PV.X,
; R600-NEXT: 23(3.222986e-44), 1065353216(1.000000e+00)
; R600-NEXT: MUL_IEEE T1.Y, PS, PV.W,
-; R600-NEXT: SETGT T1.Z, literal.x, KC0[3].X,
+; R600-NEXT: SETGT T0.Z, literal.x, KC0[3].X,
; R600-NEXT: ADD_INT * T0.W, PV.Z, literal.y,
; R600-NEXT: -1026650416(-1.032789e+02), 1065353216(1.000000e+00)
; R600-NEXT: ALU clause starting at 101:
-; R600-NEXT: CNDE_INT * T1.W, T2.Z, T0.Y, T2.X,
+; R600-NEXT: CNDE_INT * T1.W, T2.Z, T1.X, T0.Y,
; R600-NEXT: MUL_IEEE T0.Y, PV.W, T0.W,
-; R600-NEXT: SETGT T0.Z, literal.x, KC0[2].W,
-; R600-NEXT: CNDE T0.W, T1.Z, T1.Y, 0.0,
+; R600-NEXT: SETGT T1.Z, literal.x, KC0[2].W,
+; R600-NEXT: CNDE T0.W, T0.Z, T1.Y, 0.0,
; R600-NEXT: SETGT * T1.W, KC0[3].X, literal.y,
; R600-NEXT: -1026650416(-1.032789e+02), 1118925336(8.872284e+01)
; R600-NEXT: CNDE T1.Y, PS, PV.W, literal.x,
@@ -721,118 +719,116 @@ define amdgpu_kernel void @s_exp_v2f32(ptr addrspace(1) %out, <2 x float> %in) {
;
; CM-LABEL: s_exp_v2f32:
; CM: ; %bb.0:
-; CM-NEXT: ALU 100, @4, KC0[CB0:0-32], KC1[]
-; CM-NEXT: ALU 18, @105, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 98, @4, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 18, @103, KC0[CB0:0-32], KC1[]
; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0, T1.X
; CM-NEXT: CF_END
; CM-NEXT: ALU clause starting at 4:
; CM-NEXT: AND_INT * T0.W, KC0[2].W, literal.x,
; CM-NEXT: -4096(nan), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T0.Z, PV.W, literal.x,
; CM-NEXT: ADD * T1.W, KC0[2].W, -PV.W,
+; CM-NEXT: MUL_IEEE T0.Y, PV.W, literal.x,
+; CM-NEXT: MUL_IEEE T0.Z, T0.W, literal.y,
+; CM-NEXT: AND_INT * T2.W, KC0[3].X, literal.z,
+; CM-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
+; CM-NEXT: -4096(nan), 0(0.000000e+00)
+; CM-NEXT: ADD T1.Y, KC0[3].X, -PV.W,
+; CM-NEXT: RNDNE T1.Z, PV.Z,
+; CM-NEXT: MULADD_IEEE * T1.W, T1.W, literal.x, PV.Y,
; CM-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T1.Z, PV.W, literal.x,
-; CM-NEXT: RNDNE * T2.W, PV.Z,
-; CM-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; CM-NEXT: TRUNC T0.Y, PV.W,
-; CM-NEXT: AND_INT T2.Z, KC0[3].X, literal.x,
-; CM-NEXT: MULADD_IEEE * T1.W, T1.W, literal.y, PV.Z,
-; CM-NEXT: -4096(nan), 1069064192(1.442383e+00)
; CM-NEXT: MULADD_IEEE T0.X, T0.W, literal.x, PV.W,
-; CM-NEXT: MUL_IEEE T1.Y, PV.Z, literal.y,
-; CM-NEXT: FLT_TO_INT T1.Z, PV.Y,
-; CM-NEXT: ADD * T0.W, KC0[3].X, -PV.Z,
+; CM-NEXT: ADD T0.Y, T0.Z, -PV.Z,
+; CM-NEXT: MUL_IEEE T0.Z, PV.Y, literal.x,
+; CM-NEXT: MUL_IEEE * T0.W, T2.W, literal.y, BS:VEC_120/SCL_212
; CM-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
-; CM-NEXT: ADD T1.X, T0.Z, -T2.W,
-; CM-NEXT: MUL_IEEE T0.Y, PV.W, literal.x,
-; CM-NEXT: MAX_INT T0.Z, PV.Z, literal.y,
-; CM-NEXT: RNDNE * T1.W, PV.Y,
-; CM-NEXT: 967029397(3.122284e-04), -330(nan)
-; CM-NEXT: TRUNC T2.X, PV.W,
-; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.x,
-; CM-NEXT: MULADD_IEEE T0.Z, T0.W, literal.y, PV.Y,
-; CM-NEXT: ADD * T0.W, PV.X, T0.X,
-; CM-NEXT: 204(2.858649e-43), 1069064192(1.442383e+00)
-; CM-NEXT: EXP_IEEE T0.X, T0.W,
-; CM-NEXT: EXP_IEEE T0.Y (MASKED), T0.W,
-; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
-; CM-NEXT: EXP_IEEE * T0.W (MASKED), T0.W,
-; CM-NEXT: ADD_INT T1.X, T1.Z, literal.x,
-; CM-NEXT: MULADD_IEEE T0.Y, T2.Z, literal.y, T0.Z, BS:VEC_102/SCL_221
-; CM-NEXT: ADD T0.Z, T1.Y, -T1.W,
-; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.z,
-; CM-NEXT: 102(1.429324e-43), 967029397(3.122284e-04)
-; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: SETGT_UINT T3.X, T1.Z, literal.x,
-; CM-NEXT: MUL_IEEE T1.Y, PV.W, literal.y,
-; CM-NEXT: SETGT_UINT T2.Z, T1.Z, literal.z,
-; CM-NEXT: ADD * T1.W, PV.Z, PV.Y,
-; CM-NEXT: -229(nan), 2130706432(1.701412e+38)
-; CM-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; CM-NEXT: TRUNC T1.X, T1.Z,
+; CM-NEXT: RNDNE T2.Y, PV.W,
+; CM-NEXT: MULADD_IEEE T0.Z, T1.Y, literal.x, PV.Z,
+; CM-NEXT: ADD * T1.W, PV.Y, PV.X,
+; CM-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
+; CM-NEXT: EXP_IEEE T0.X, T1.W,
+; CM-NEXT: EXP_IEEE T0.Y (MASKED), T1.W,
+; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
+; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
+; CM-NEXT: MULADD_IEEE T2.X, T2.W, literal.x, T0.Z,
+; CM-NEXT: ADD T0.Y, T0.W, -T2.Y, BS:VEC_120/SCL_212
+; CM-NEXT: FLT_TO_INT T0.Z, T1.X,
+; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.y,
+; CM-NEXT: 967029397(3.122284e-04), 209715200(1.972152e-31)
+; CM-NEXT: MUL_IEEE T1.X, PV.W, literal.x,
+; CM-NEXT: SETGT_UINT T1.Y, PV.Z, literal.y,
+; CM-NEXT: TRUNC T1.Z, T2.Y,
+; CM-NEXT: ADD * T1.W, PV.Y, PV.X,
+; CM-NEXT: 209715200(1.972152e-31), -229(nan)
; CM-NEXT: EXP_IEEE T0.X (MASKED), T1.W,
; CM-NEXT: EXP_IEEE T0.Y, T1.W,
; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
-; CM-NEXT: CNDE_INT T4.X, T2.Z, T0.W, T1.Y,
-; CM-NEXT: CNDE_INT T1.Y, T3.X, T2.Y, T1.X,
-; CM-NEXT: FLT_TO_INT T0.Z, T2.X, BS:VEC_120/SCL_212
-; CM-NEXT: MUL_IEEE * T0.W, PV.Y, literal.x,
-; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: SETGT_INT T1.X, T1.Z, literal.x,
-; CM-NEXT: MUL_IEEE T2.Y, T0.X, literal.y,
-; CM-NEXT: MUL_IEEE T3.Z, PV.W, literal.z,
-; CM-NEXT: SETGT_UINT * T1.W, PV.Z, literal.w,
-; CM-NEXT: -127(nan), 209715200(1.972152e-31)
-; CM-NEXT: 2130706432(1.701412e+38), 254(3.559298e-43)
-; CM-NEXT: CNDE_INT T2.X, PV.W, T0.W, PV.Z,
+; CM-NEXT: FLT_TO_INT T2.X, T1.Z,
+; CM-NEXT: MUL_IEEE T2.Y, PV.Y, literal.x,
+; CM-NEXT: CNDE_INT T1.Z, T1.Y, T1.X, T0.W,
+; CM-NEXT: SETGT_INT * T0.W, T0.Z, literal.y, BS:VEC_120/SCL_212
+; CM-NEXT: 209715200(1.972152e-31), -127(nan)
+; CM-NEXT: CNDE_INT T1.X, PV.W, PV.Z, T0.X,
; CM-NEXT: MUL_IEEE T3.Y, PV.Y, literal.x,
-; CM-NEXT: CNDE_INT T3.Z, PV.X, T1.Y, T1.Z,
-; CM-NEXT: MAX_INT * T0.W, T0.Z, literal.y,
-; CM-NEXT: 209715200(1.972152e-31), -330(nan)
-; CM-NEXT: ADD_INT T5.X, PV.W, literal.x,
-; CM-NEXT: ADD_INT T1.Y, T0.Z, literal.y,
-; CM-NEXT: SETGT_UINT T4.Z, T0.Z, literal.z,
-; CM-NEXT: MUL_IEEE * T0.W, T0.Y, literal.w,
+; CM-NEXT: SETGT_UINT T1.Z, PV.X, literal.y,
+; CM-NEXT: MAX_INT * T1.W, T0.Z, literal.z,
+; CM-NEXT: 209715200(1.972152e-31), -229(nan)
+; CM-NEXT: -330(nan), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T3.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T4.Y, T0.Z, literal.y,
+; CM-NEXT: CNDE_INT T2.Z, PV.Z, PV.Y, T2.Y,
+; CM-NEXT: SETGT_INT * T1.W, T2.X, literal.z,
; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
-; CM-NEXT: -229(nan), 209715200(1.972152e-31)
-; CM-NEXT: MUL_IEEE T6.X, PV.W, literal.x,
-; CM-NEXT: MIN_INT T4.Y, T0.Z, literal.y,
-; CM-NEXT: CNDE_INT T5.Z, PV.Z, PV.X, PV.Y,
-; CM-NEXT: SETGT_INT * T2.W, T0.Z, literal.z,
-; CM-NEXT: 209715200(1.972152e-31), 381(5.338947e-43)
-; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T5.X, PV.W, PV.Z, T0.Z,
-; CM-NEXT: MIN_INT T1.Y, T1.Z, literal.x,
-; CM-NEXT: ADD_INT T5.Z, PV.Y, literal.y,
-; CM-NEXT: ADD_INT * T3.W, T0.Z, literal.z, BS:VEC_120/SCL_212
-; CM-NEXT: 381(5.338947e-43), -254(nan)
; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T7.X, T1.W, PV.W, PV.Z,
-; CM-NEXT: SETGT_INT T4.Y, T0.Z, literal.x,
-; CM-NEXT: ADD_INT T0.Z, PV.Y, literal.y,
-; CM-NEXT: ADD_INT * T1.W, T1.Z, literal.z, BS:VEC_120/SCL_212
+; CM-NEXT: CNDE_INT T4.X, PV.W, PV.Z, T0.Y,
+; CM-NEXT: MUL_IEEE T2.Y, T0.X, literal.x,
+; CM-NEXT: MAX_INT T2.Z, T2.X, literal.y, BS:VEC_120/SCL_212
+; CM-NEXT: CNDE_INT * T2.W, T1.Y, PV.X, PV.Y,
+; CM-NEXT: 2130706432(1.701412e+38), -330(nan)
+; CM-NEXT: CNDE_INT T0.X, T0.W, PV.W, T0.Z,
+; CM-NEXT: ADD_INT T1.Y, PV.Z, literal.x,
+; CM-NEXT: ADD_INT T2.Z, T2.X, literal.y,
+; CM-NEXT: MIN_INT * T0.W, T2.X, literal.z,
+; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T3.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T3.Y, T2.X, literal.y,
+; CM-NEXT: SETGT_UINT T3.Z, T2.X, literal.z,
+; CM-NEXT: CNDE_INT * T0.W, T1.Z, PV.Y, PV.Z,
+; CM-NEXT: -254(nan), -127(nan)
+; CM-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T5.X, T0.Y, literal.x,
+; CM-NEXT: CNDE_INT T0.Y, T1.W, PV.W, T2.X,
+; CM-NEXT: CNDE_INT T1.Z, PV.Z, PV.Y, PV.X,
+; CM-NEXT: MIN_INT * T0.W, T0.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 381(5.338947e-43)
+; CM-NEXT: SETGT_INT T2.X, T2.X, literal.x,
+; CM-NEXT: ADD_INT T1.Y, PV.W, literal.y,
+; CM-NEXT: ADD_INT T2.Z, T0.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T0.W, T0.Z, literal.w,
; CM-NEXT: 127(1.779649e-43), -254(nan)
-; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T8.X, T2.Z, PV.W, PV.Z,
-; CM-NEXT: SETGT_INT T1.Y, T1.Z, literal.x, BS:VEC_120/SCL_212
-; CM-NEXT: CNDE_INT T0.Z, PV.Y, T5.X, PV.X,
-; CM-NEXT: CNDE_INT * T0.W, T4.Z, T6.X, T0.W, BS:VEC_201
-; CM-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T5.X, T2.W, PV.W, T0.Y,
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: CNDE_INT T3.X, PV.W, PV.Z, PV.Y,
+; CM-NEXT: SETGT_INT T1.Y, T0.Z, literal.x,
+; CM-NEXT: CNDE_INT T0.Z, PV.X, T0.Y, T1.Z,
+; CM-NEXT: MUL_IEEE * T1.W, T5.X, literal.y,
+; CM-NEXT: 127(1.779649e-43), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T5.X, T3.Z, T5.X, PV.W,
; CM-NEXT: LSHL T0.Y, PV.Z, literal.x,
-; CM-NEXT: CNDE_INT T0.Z, PV.Y, T3.Z, PV.X,
-; CM-NEXT: CNDE_INT * T0.W, T3.X, T3.Y, T2.Y, BS:VEC_201
-; CM-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T0.X, T1.X, PV.W, T0.X,
+; CM-NEXT: CNDE_INT T0.Z, PV.Y, T0.X, PV.X, BS:VEC_021/SCL_122
+; CM-NEXT: MUL_IEEE * T1.W, T2.Y, literal.y,
+; CM-NEXT: 23(3.222986e-44), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T0.X, T0.W, T2.Y, PV.W,
; CM-NEXT: LSHL T2.Y, PV.Z, literal.x,
; CM-NEXT: ADD_INT * T0.Z, PV.Y, literal.y,
; CM-NEXT: 23(3.222986e-44), 1065353216(1.000000e+00)
-; CM-NEXT: ALU clause starting at 105:
-; CM-NEXT: CNDE_INT * T0.W, T4.Y, T5.X, T2.X,
-; CM-NEXT: MUL_IEEE T1.X, PV.W, T0.Z,
+; CM-NEXT: ALU clause starting at 103:
+; CM-NEXT: CNDE_INT * T0.W, T2.X, T4.X, T5.X,
+; CM-NEXT: MUL_IEEE T2.X, PV.W, T0.Z,
; CM-NEXT: SETGT T0.Y, literal.x, KC0[3].X,
; CM-NEXT: ADD_INT T0.Z, T2.Y, literal.y,
-; CM-NEXT: CNDE_INT * T0.W, T1.Y, T0.X, T4.X, BS:VEC_120/SCL_212
+; CM-NEXT: CNDE_INT * T0.W, T1.Y, T1.X, T0.X, BS:VEC_120/SCL_212
; CM-NEXT: -1026650416(-1.032789e+02), 1065353216(1.000000e+00)
; CM-NEXT: MUL_IEEE T0.X, PV.W, PV.Z,
; CM-NEXT: SETGT T1.Y, literal.x, KC0[2].W,
@@ -1215,8 +1211,8 @@ define amdgpu_kernel void @s_exp_v3f32(ptr addrspace(1) %out, <3 x float> %in) {
;
; R600-LABEL: s_exp_v3f32:
; R600: ; %bb.0:
-; R600-NEXT: ALU 100, @6, KC0[CB0:0-32], KC1[]
-; R600-NEXT: ALU 69, @107, KC0[CB0:0-32], KC1[]
+; R600-NEXT: ALU 99, @6, KC0[CB0:0-32], KC1[]
+; R600-NEXT: ALU 69, @106, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T2.X, T3.X, 0
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; R600-NEXT: CF_END
@@ -1224,69 +1220,68 @@ define amdgpu_kernel void @s_exp_v3f32(ptr addrspace(1) %out, <3 x float> %in) {
; R600-NEXT: ALU clause starting at 6:
; R600-NEXT: AND_INT * T0.W, KC0[3].Y, literal.x,
; R600-NEXT: -4096(nan), 0(0.000000e+00)
-; R600-NEXT: ADD T1.W, KC0[3].Y, -PV.W,
-; R600-NEXT: MUL_IEEE * T2.W, PV.W, literal.x,
+; R600-NEXT: MUL_IEEE T1.W, PV.W, literal.x,
+; R600-NEXT: ADD * T2.W, KC0[3].Y, -PV.W,
; R600-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; R600-NEXT: RNDNE T3.W, PS,
-; R600-NEXT: MUL_IEEE * T4.W, PV.W, literal.x,
+; R600-NEXT: RNDNE * T3.W, PV.W,
+; R600-NEXT: TRUNC T4.W, PV.W,
+; R600-NEXT: MUL_IEEE * T5.W, T2.W, literal.x,
; R600-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; R600-NEXT: MULADD_IEEE T1.W, T1.W, literal.x, PS,
-; R600-NEXT: TRUNC * T4.W, PV.W,
+; R600-NEXT: MULADD_IEEE T2.W, T2.W, literal.x, PS,
+; R600-NEXT: FLT_TO_INT * T4.W, PV.W,
; R600-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; R600-NEXT: FLT_TO_INT T0.Z, PS,
-; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.x, PV.W,
-; R600-NEXT: ADD * T1.W, T2.W, -T3.W,
-; R600-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; R600-NEXT: ADD T0.W, PS, PV.W,
-; R600-NEXT: MAX_INT * T1.W, PV.Z, literal.x,
-; R600-NEXT: -330(nan), 0(0.000000e+00)
-; R600-NEXT: ADD_INT T0.Y, PS, literal.x,
-; R600-NEXT: ADD_INT T1.Z, T0.Z, literal.y,
-; R600-NEXT: SETGT_UINT T1.W, T0.Z, literal.z,
-; R600-NEXT: EXP_IEEE * T0.X, PV.W,
+; R600-NEXT: MAX_INT T0.Z, PS, literal.x,
+; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.y, PV.W,
+; R600-NEXT: ADD * T1.W, T1.W, -T3.W,
+; R600-NEXT: -330(nan), 967029397(3.122284e-04)
+; R600-NEXT: ADD T0.Y, PS, PV.W,
+; R600-NEXT: ADD_INT T0.Z, PV.Z, literal.x,
+; R600-NEXT: ADD_INT T0.W, T4.W, literal.y,
+; R600-NEXT: SETGT_UINT * T1.W, T4.W, literal.z,
; R600-NEXT: 204(2.858649e-43), 102(1.429324e-43)
; R600-NEXT: -229(nan), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Y, PV.Z,
-; R600-NEXT: SETGT_INT T0.W, T0.Z, literal.x,
-; R600-NEXT: MUL_IEEE * T2.W, PS, literal.y,
-; R600-NEXT: -127(nan), 209715200(1.972152e-31)
-; R600-NEXT: MUL_IEEE T0.Y, PS, literal.x,
-; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Z, T0.Z,
-; R600-NEXT: MIN_INT T3.W, T0.Z, literal.y,
-; R600-NEXT: AND_INT * T4.W, KC0[3].W, literal.z,
-; R600-NEXT: 209715200(1.972152e-31), 381(5.338947e-43)
-; R600-NEXT: -4096(nan), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T1.X, T0.X, literal.x,
-; R600-NEXT: ADD T1.Y, KC0[3].W, -PS,
-; R600-NEXT: ADD_INT T2.Z, PV.W, literal.y,
-; R600-NEXT: ADD_INT T3.W, T0.Z, literal.z,
-; R600-NEXT: SETGT_UINT * T5.W, T0.Z, literal.w,
-; R600-NEXT: 2130706432(1.701412e+38), -254(nan)
+; R600-NEXT: CNDE_INT T0.Z, PS, PV.Z, PV.W,
+; R600-NEXT: SETGT_INT T0.W, T4.W, literal.x,
+; R600-NEXT: EXP_IEEE * T0.X, PV.Y,
+; R600-NEXT: -127(nan), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T1.X, PS, literal.x,
+; R600-NEXT: CNDE_INT T0.Y, PV.W, PV.Z, T4.W,
+; R600-NEXT: MIN_INT T0.Z, T4.W, literal.y,
+; R600-NEXT: AND_INT T2.W, KC0[3].W, literal.z,
+; R600-NEXT: MUL_IEEE * T3.W, PS, literal.w,
+; R600-NEXT: 2130706432(1.701412e+38), 381(5.338947e-43)
+; R600-NEXT: -4096(nan), 209715200(1.972152e-31)
+; R600-NEXT: MUL_IEEE T2.X, PS, literal.x,
+; R600-NEXT: ADD T1.Y, KC0[3].W, -PV.W,
+; R600-NEXT: ADD_INT T0.Z, PV.Z, literal.y,
+; R600-NEXT: ADD_INT T5.W, T4.W, literal.z,
+; R600-NEXT: SETGT_UINT * T6.W, T4.W, literal.w,
+; R600-NEXT: 209715200(1.972152e-31), -254(nan)
; R600-NEXT: -127(nan), 254(3.559298e-43)
-; R600-NEXT: CNDE_INT T2.X, PS, PV.W, PV.Z,
-; R600-NEXT: SETGT_INT T2.Y, T0.Z, literal.x,
+; R600-NEXT: CNDE_INT T3.X, PS, PV.W, PV.Z,
+; R600-NEXT: SETGT_INT T2.Y, T4.W, literal.x,
; R600-NEXT: MUL_IEEE T0.Z, PV.Y, literal.y,
-; R600-NEXT: MUL_IEEE T3.W, T4.W, literal.z,
-; R600-NEXT: MUL_IEEE * T6.W, PV.X, literal.w,
+; R600-NEXT: MUL_IEEE * T4.W, T2.W, literal.z, BS:VEC_120/SCL_212
; R600-NEXT: 127(1.779649e-43), 967029397(3.122284e-04)
-; R600-NEXT: 1069064192(1.442383e+00), 2130706432(1.701412e+38)
-; R600-NEXT: CNDE_INT T1.X, T5.W, T1.X, PS, BS:VEC_120/SCL_212
-; R600-NEXT: RNDNE T3.Y, PV.W,
-; R600-NEXT: MULADD_IEEE T0.Z, T1.Y, literal.x, PV.Z,
-; R600-NEXT: CNDE_INT T5.W, PV.Y, T1.Z, PV.X,
-; R600-NEXT: CNDE_INT * T1.W, T1.W, T0.Y, T2.W,
; R600-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T0.X, T0.W, PS, T0.X,
+; R600-NEXT: CNDE_INT * T1.W, T1.W, T2.X, T3.W,
+; R600-NEXT: CNDE_INT T0.X, T0.W, PV.W, T0.X, BS:VEC_021/SCL_122
+; R600-NEXT: RNDNE T3.Y, T4.W, BS:VEC_120/SCL_212
+; R600-NEXT: MULADD_IEEE T0.Z, T1.Y, literal.x, T0.Z,
+; R600-NEXT: CNDE_INT T0.W, T2.Y, T0.Y, T3.X, BS:VEC_120/SCL_212
+; R600-NEXT: MUL_IEEE * T1.W, T1.X, literal.y,
+; R600-NEXT: 1069064192(1.442383e+00), 2130706432(1.701412e+38)
+; R600-NEXT: CNDE_INT T1.X, T6.W, T1.X, PS,
; R600-NEXT: LSHL T0.Y, PV.W, literal.x,
; R600-NEXT: AND_INT T1.Z, KC0[3].Z, literal.y,
-; R600-NEXT: MULADD_IEEE T0.W, T4.W, literal.z, PV.Z, BS:VEC_120/SCL_212
-; R600-NEXT: ADD * T1.W, T3.W, -PV.Y,
+; R600-NEXT: MULADD_IEEE T0.W, T2.W, literal.z, PV.Z, BS:VEC_120/SCL_212
+; R600-NEXT: ADD * T1.W, T4.W, -PV.Y,
; R600-NEXT: 23(3.222986e-44), -4096(nan)
; R600-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
; R600-NEXT: ADD T1.Y, PS, PV.W,
; R600-NEXT: MUL_IEEE T0.Z, PV.Z, literal.x,
; R600-NEXT: ADD_INT T0.W, PV.Y, literal.y,
-; R600-NEXT: CNDE_INT * T1.W, T2.Y, PV.X, T1.X,
+; R600-NEXT: CNDE_INT * T1.W, T2.Y, T0.X, PV.X,
; R600-NEXT: 1069064192(1.442383e+00), 1065353216(1.000000e+00)
; R600-NEXT: MUL_IEEE T0.X, PS, PV.W,
; R600-NEXT: ADD T0.Y, KC0[3].Z, -T1.Z,
@@ -1300,12 +1295,12 @@ define amdgpu_kernel void @s_exp_v3f32(ptr addrspace(1) %out, <3 x float> %in) {
; R600-NEXT: MUL_IEEE * T1.W, PS, literal.z,
; R600-NEXT: -1026650416(-1.032789e+02), 967029397(3.122284e-04)
; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T3.X, T1.X, literal.x,
-; R600-NEXT: MUL_IEEE T2.Y, PS, literal.y,
+; R600-NEXT: MUL_IEEE T3.X, PS, literal.x,
+; R600-NEXT: MUL_IEEE T2.Y, T1.X, literal.y,
; R600-NEXT: MULADD_IEEE T4.Z, T0.Y, literal.z, PV.W,
; R600-NEXT: FLT_TO_INT T0.W, PV.Z,
; R600-NEXT: MIN_INT * T2.W, PV.Y, literal.w,
-; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
+; R600-NEXT: 209715200(1.972152e-31), 2130706432(1.701412e+38)
; R600-NEXT: 1069064192(1.442383e+00), 381(5.338947e-43)
; R600-NEXT: ADD_INT T4.X, PS, literal.x,
; R600-NEXT: MAX_INT T0.Y, PV.W, literal.y,
@@ -1323,7 +1318,7 @@ define amdgpu_kernel void @s_exp_v3f32(ptr addrspace(1) %out, <3 x float> %in) {
; R600-NEXT: 102(1.429324e-43), -229(nan)
; R600-NEXT: ADD_INT * T6.X, T0.W, literal.x,
; R600-NEXT: -127(nan), 0(0.000000e+00)
-; R600-NEXT: ALU clause starting at 107:
+; R600-NEXT: ALU clause starting at 106:
; R600-NEXT: SETGT_UINT T0.Y, T0.W, literal.x,
; R600-NEXT: CNDE_INT T0.Z, T3.W, T0.Z, T2.W, BS:VEC_102/SCL_221
; R600-NEXT: SETGT_INT T2.W, T0.W, literal.y,
@@ -1339,25 +1334,25 @@ define amdgpu_kernel void @s_exp_v3f32(ptr addrspace(1) %out, <3 x float> %in) {
; R600-NEXT: SETGT_UINT T5.X, T1.Y, literal.x,
; R600-NEXT: CNDE_INT T4.Y, PS, PV.Z, PV.W,
; R600-NEXT: MAX_INT T0.Z, T1.Y, literal.y,
-; R600-NEXT: MUL_IEEE T4.W, T1.Z, literal.z,
-; R600-NEXT: MUL_IEEE * T5.W, PV.Y, literal.w,
+; R600-NEXT: MUL_IEEE T4.W, PV.Y, literal.z,
+; R600-NEXT: MUL_IEEE * T5.W, T1.Z, literal.w,
; R600-NEXT: 254(3.559298e-43), -330(nan)
-; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
-; R600-NEXT: CNDE_INT T6.X, T3.W, PS, T3.Y, BS:VEC_021/SCL_122
-; R600-NEXT: MUL_IEEE T3.Y, PV.W, literal.x,
+; R600-NEXT: 209715200(1.972152e-31), 2130706432(1.701412e+38)
+; R600-NEXT: MUL_IEEE T6.X, PS, literal.x,
+; R600-NEXT: CNDE_INT T3.Y, T3.W, PV.W, T3.Y, BS:VEC_021/SCL_122
; R600-NEXT: ADD_INT T0.Z, PV.Z, literal.y,
; R600-NEXT: ADD_INT T3.W, T1.Y, literal.z,
-; R600-NEXT: SETGT_UINT * T5.W, T1.Y, literal.w,
+; R600-NEXT: SETGT_UINT * T4.W, T1.Y, literal.w,
; R600-NEXT: 2130706432(1.701412e+38), 204(2.858649e-43)
; R600-NEXT: 102(1.429324e-43), -229(nan)
; R600-NEXT: CNDE_INT T8.X, PS, PV.Z, PV.W,
; R600-NEXT: SETGT_INT T5.Y, T1.Y, literal.x,
-; R600-NEXT: CNDE_INT T0.Z, T0.Y, T4.W, PV.Y, BS:VEC_120/SCL_212
-; R600-NEXT: CNDE_INT T2.W, T2.W, PV.X, T1.Z,
+; R600-NEXT: CNDE_INT T0.Z, T2.W, PV.Y, T1.Z,
+; R600-NEXT: CNDE_INT T2.W, T0.Y, T5.W, PV.X, BS:VEC_120/SCL_212
; R600-NEXT: LSHL * T3.W, T4.Y, literal.y,
; R600-NEXT: -127(nan), 23(3.222986e-44)
; R600-NEXT: ADD_INT T6.X, PS, literal.x,
-; R600-NEXT: CNDE_INT T0.Y, T0.W, PV.W, PV.Z,
+; R600-NEXT: CNDE_INT T0.Y, T0.W, PV.Z, PV.W,
; R600-NEXT: CNDE_INT T0.Z, PV.Y, PV.X, T1.Y,
; R600-NEXT: CNDE_INT T0.W, T5.X, T7.X, T4.X,
; R600-NEXT: SETGT_INT * T2.W, T1.Y, literal.y,
@@ -1365,18 +1360,18 @@ define amdgpu_kernel void @s_exp_v3f32(ptr addrspace(1) %out, <3 x float> %in) {
; R600-NEXT: CNDE_INT T4.X, PS, PV.Z, PV.W,
; R600-NEXT: MUL_IEEE T0.Y, PV.Y, PV.X,
; R600-NEXT: SETGT T0.Z, literal.x, KC0[3].Z,
-; R600-NEXT: CNDE_INT T0.W, T5.W, T2.Y, T1.W,
-; R600-NEXT: MUL_IEEE * T1.W, T3.X, literal.y,
+; R600-NEXT: MUL_IEEE T0.W, T2.Y, literal.y,
+; R600-NEXT: CNDE_INT * T1.W, T4.W, T3.X, T1.W,
; R600-NEXT: -1026650416(-1.032789e+02), 2130706432(1.701412e+38)
-; R600-NEXT: CNDE_INT T3.X, T5.X, T3.X, PS,
-; R600-NEXT: CNDE_INT T1.Y, T5.Y, PV.W, T1.X,
+; R600-NEXT: CNDE_INT T1.X, T5.Y, PS, T1.X,
+; R600-NEXT: CNDE_INT T1.Y, T5.X, T2.Y, PV.W,
; R600-NEXT: CNDE T0.Z, PV.Z, PV.Y, 0.0,
; R600-NEXT: SETGT T0.W, KC0[3].Z, literal.x,
; R600-NEXT: LSHL * T1.W, PV.X, literal.y,
; R600-NEXT: 1118925336(8.872284e+01), 23(3.222986e-44)
-; R600-NEXT: ADD_INT T1.X, PS, literal.x,
+; R600-NEXT: ADD_INT T3.X, PS, literal.x,
; R600-NEXT: CNDE T0.Y, PV.W, PV.Z, literal.y,
-; R600-NEXT: CNDE_INT T0.Z, T2.W, PV.Y, PV.X,
+; R600-NEXT: CNDE_INT T0.Z, T2.W, PV.X, PV.Y,
; R600-NEXT: CNDE T0.W, T2.X, T0.X, 0.0,
; R600-NEXT: SETGT * T1.W, KC0[3].Y, literal.z,
; R600-NEXT: 1065353216(1.000000e+00), 2139095040(INF)
@@ -1397,197 +1392,193 @@ define amdgpu_kernel void @s_exp_v3f32(ptr addrspace(1) %out, <3 x float> %in) {
;
; CM-LABEL: s_exp_v3f32:
; CM: ; %bb.0:
-; CM-NEXT: ALU 102, @6, KC0[CB0:0-32], KC1[]
-; CM-NEXT: ALU 80, @109, KC0[CB0:0-32], KC1[]
-; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T1, T3.X
-; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T2.X, T0.X
+; CM-NEXT: ALU 101, @6, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 77, @108, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0, T1.X
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T2.X, T3.X
; CM-NEXT: CF_END
; CM-NEXT: PAD
; CM-NEXT: ALU clause starting at 6:
; CM-NEXT: AND_INT * T0.W, KC0[3].Y, literal.x,
; CM-NEXT: -4096(nan), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T0.Z, PV.W, literal.x,
; CM-NEXT: ADD * T1.W, KC0[3].Y, -PV.W,
-; CM-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T1.Z, PV.W, literal.x,
-; CM-NEXT: RNDNE * T2.W, PV.Z,
-; CM-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; CM-NEXT: TRUNC T2.Z, PV.W,
+; CM-NEXT: MUL_IEEE T0.Z, PV.W, literal.x,
+; CM-NEXT: MUL_IEEE * T2.W, T0.W, literal.y,
+; CM-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
+; CM-NEXT: RNDNE T1.Z, PV.W,
; CM-NEXT: MULADD_IEEE * T1.W, T1.W, literal.x, PV.Z,
; CM-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; CM-NEXT: MULADD_IEEE T0.Y, T0.W, literal.x, PV.W,
-; CM-NEXT: ADD T0.Z, T0.Z, -T2.W,
-; CM-NEXT: FLT_TO_INT * T0.W, PV.Z,
+; CM-NEXT: MULADD_IEEE T0.Z, T0.W, literal.x, PV.W,
+; CM-NEXT: ADD * T0.W, T2.W, -PV.Z, BS:VEC_120/SCL_212
; CM-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; CM-NEXT: MIN_INT T1.Z, PV.W, literal.x,
-; CM-NEXT: ADD * T1.W, PV.Z, PV.Y,
+; CM-NEXT: TRUNC T1.Z, T1.Z,
+; CM-NEXT: ADD * T0.W, PV.W, PV.Z,
+; CM-NEXT: EXP_IEEE T0.X, T0.W,
+; CM-NEXT: EXP_IEEE T0.Y (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE * T0.W (MASKED), T0.W,
+; CM-NEXT: FLT_TO_INT T0.Z, T1.Z,
+; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.x,
+; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T0.Y, PV.W, literal.x,
+; CM-NEXT: MAX_INT T1.Z, PV.Z, literal.y,
+; CM-NEXT: MIN_INT * T1.W, PV.Z, literal.z,
+; CM-NEXT: 209715200(1.972152e-31), -330(nan)
; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
-; CM-NEXT: EXP_IEEE T0.X, T1.W,
-; CM-NEXT: EXP_IEEE T0.Y (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
-; CM-NEXT: MUL_IEEE T0.Y, PV.X, literal.x,
-; CM-NEXT: ADD_INT T0.Z, T1.Z, literal.y,
-; CM-NEXT: MAX_INT * T1.W, T0.W, literal.z,
-; CM-NEXT: 2130706432(1.701412e+38), -254(nan)
-; CM-NEXT: -330(nan), 0(0.000000e+00)
-; CM-NEXT: ADD_INT T1.X, T0.W, literal.x,
-; CM-NEXT: ADD_INT T1.Y, PV.W, literal.y,
-; CM-NEXT: ADD_INT T1.Z, T0.W, literal.z,
-; CM-NEXT: SETGT_UINT * T1.W, T0.W, literal.w,
-; CM-NEXT: -127(nan), 204(2.858649e-43)
+; CM-NEXT: ADD_INT T1.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T1.Y, PV.Z, literal.y,
+; CM-NEXT: ADD_INT T1.Z, T0.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T1.W, T0.Z, literal.w,
+; CM-NEXT: -254(nan), 204(2.858649e-43)
; CM-NEXT: 102(1.429324e-43), -229(nan)
-; CM-NEXT: SETGT_UINT T2.X, T0.W, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: SETGT_INT T1.Z, T0.W, literal.y,
-; CM-NEXT: MUL_IEEE * T2.W, T0.X, literal.z,
-; CM-NEXT: 254(3.559298e-43), -127(nan)
-; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T3.X, PV.W, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.Z, PV.Y, T0.W,
-; CM-NEXT: CNDE_INT T0.Z, PV.X, T1.X, T0.Z,
-; CM-NEXT: SETGT_INT * T0.W, T0.W, literal.y,
-; CM-NEXT: 209715200(1.972152e-31), 127(1.779649e-43)
+; CM-NEXT: ADD_INT T2.X, T0.Z, literal.x,
+; CM-NEXT: SETGT_UINT T2.Y, T0.Z, literal.y,
+; CM-NEXT: CNDE_INT T1.Z, PV.W, PV.Y, PV.Z,
+; CM-NEXT: SETGT_INT * T2.W, T0.Z, literal.x,
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: MUL_IEEE T3.X, T0.X, literal.x,
+; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Z, T0.Z,
+; CM-NEXT: CNDE_INT T1.Z, PV.Y, PV.X, T1.X,
+; CM-NEXT: SETGT_INT * T3.W, T0.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 127(1.779649e-43)
; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: CNDE_INT T0.Z, T1.W, PV.X, T2.W,
-; CM-NEXT: MUL_IEEE * T1.W, T0.Y, literal.x,
+; CM-NEXT: MUL_IEEE T0.Z, PV.X, literal.x,
+; CM-NEXT: CNDE_INT * T0.W, T1.W, T0.Y, T0.W,
; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T1.X, T2.X, T0.Y, PV.W,
-; CM-NEXT: CNDE_INT T0.Y, T1.Z, PV.Z, T0.X,
+; CM-NEXT: CNDE_INT T0.X, T2.W, PV.W, T0.X,
+; CM-NEXT: CNDE_INT T0.Y, T2.Y, T3.X, PV.Z,
; CM-NEXT: LSHL T0.Z, PV.Y, literal.x,
-; CM-NEXT: AND_INT * T1.W, KC0[3].Z, literal.y,
+; CM-NEXT: AND_INT * T0.W, KC0[3].Z, literal.y,
; CM-NEXT: 23(3.222986e-44), -4096(nan)
-; CM-NEXT: MUL_IEEE T0.X, PV.W, literal.x,
; CM-NEXT: ADD T1.Y, KC0[3].Z, -PV.W,
-; CM-NEXT: ADD_INT T0.Z, PV.Z, literal.y,
-; CM-NEXT: CNDE_INT * T0.W, T0.W, PV.Y, PV.X,
-; CM-NEXT: 1069064192(1.442383e+00), 1065353216(1.000000e+00)
-; CM-NEXT: MUL_IEEE T0.Y, PV.W, PV.Z,
-; CM-NEXT: MUL_IEEE T0.Z, PV.Y, literal.x,
-; CM-NEXT: RNDNE * T0.W, PV.X,
-; CM-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T0.Z, PV.Z, literal.x,
+; CM-NEXT: CNDE_INT * T1.W, T3.W, PV.X, PV.Y,
+; CM-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T0.X, PV.W, PV.Z,
+; CM-NEXT: MUL_IEEE T0.Y, PV.Y, literal.x,
+; CM-NEXT: MUL_IEEE T0.Z, T0.W, literal.y,
+; CM-NEXT: AND_INT * T1.W, KC0[3].W, literal.z,
+; CM-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
+; CM-NEXT: -4096(nan), 0(0.000000e+00)
; CM-NEXT: SETGT T1.X, literal.x, KC0[3].Y,
-; CM-NEXT: TRUNC T2.Y, PV.W,
-; CM-NEXT: AND_INT T1.Z, KC0[3].W, literal.y,
-; CM-NEXT: MULADD_IEEE * T2.W, T1.Y, literal.z, PV.Z,
-; CM-NEXT: -1026650416(-1.032789e+02), -4096(nan)
-; CM-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; CM-NEXT: MULADD_IEEE T2.X, T1.W, literal.x, PV.W,
-; CM-NEXT: MUL_IEEE T1.Y, PV.Z, literal.y,
-; CM-NEXT: FLT_TO_INT T0.Z, PV.Y,
-; CM-NEXT: ADD * T1.W, KC0[3].W, -PV.Z,
+; CM-NEXT: ADD T2.Y, KC0[3].W, -PV.W,
+; CM-NEXT: RNDNE T1.Z, PV.Z,
+; CM-NEXT: MULADD_IEEE * T2.W, T1.Y, literal.y, PV.Y,
+; CM-NEXT: -1026650416(-1.032789e+02), 1069064192(1.442383e+00)
+; CM-NEXT: MULADD_IEEE T2.X, T0.W, literal.x, PV.W,
+; CM-NEXT: ADD T0.Y, T0.Z, -PV.Z,
+; CM-NEXT: MUL_IEEE T0.Z, PV.Y, literal.x,
+; CM-NEXT: MUL_IEEE * T0.W, T1.W, literal.y, BS:VEC_120/SCL_212
; CM-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
-; CM-NEXT: ADD T0.X, T0.X, -T0.W,
-; CM-NEXT: MUL_IEEE T2.Y, PV.W, literal.x,
-; CM-NEXT: MAX_INT T2.Z, PV.Z, literal.y,
-; CM-NEXT: RNDNE * T0.W, PV.Y,
-; CM-NEXT: 967029397(3.122284e-04), -330(nan)
-; CM-NEXT: TRUNC T3.X, PV.W,
-; CM-NEXT: ADD_INT T3.Y, PV.Z, literal.x,
-; CM-NEXT: MULADD_IEEE T2.Z, T1.W, literal.y, PV.Y,
-; CM-NEXT: ADD * T1.W, PV.X, T2.X,
-; CM-NEXT: 204(2.858649e-43), 1069064192(1.442383e+00)
-; CM-NEXT: EXP_IEEE T0.X, T1.W,
-; CM-NEXT: EXP_IEEE T0.Y (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
-; CM-NEXT: ADD_INT T2.X, T0.Z, literal.x,
-; CM-NEXT: MULADD_IEEE T2.Y, T1.Z, literal.y, T2.Z, BS:VEC_102/SCL_221
-; CM-NEXT: ADD T1.Z, T1.Y, -T0.W,
-; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.z,
-; CM-NEXT: 102(1.429324e-43), 967029397(3.122284e-04)
-; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: SETGT_UINT T4.X, T0.Z, literal.x,
-; CM-NEXT: MUL_IEEE T1.Y, PV.W, literal.y,
-; CM-NEXT: SETGT_UINT T2.Z, T0.Z, literal.z,
-; CM-NEXT: ADD * T1.W, PV.Z, PV.Y,
-; CM-NEXT: -229(nan), 2130706432(1.701412e+38)
-; CM-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; CM-NEXT: TRUNC T3.X, T1.Z,
+; CM-NEXT: RNDNE T1.Y, PV.W,
+; CM-NEXT: MULADD_IEEE T0.Z, T2.Y, literal.x, PV.Z,
+; CM-NEXT: ADD * T2.W, PV.Y, PV.X,
+; CM-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
+; CM-NEXT: EXP_IEEE T0.X (MASKED), T2.W,
+; CM-NEXT: EXP_IEEE T0.Y, T2.W,
+; CM-NEXT: EXP_IEEE T0.Z (MASKED), T2.W,
+; CM-NEXT: EXP_IEEE * T0.W (MASKED), T2.W,
+; CM-NEXT: MULADD_IEEE T2.X, T1.W, literal.x, T0.Z,
+; CM-NEXT: ADD T2.Y, T0.W, -T1.Y, BS:VEC_120/SCL_212
+; CM-NEXT: FLT_TO_INT T0.Z, T3.X,
+; CM-NEXT: MUL_IEEE * T0.W, PV.Y, literal.y,
+; CM-NEXT: 967029397(3.122284e-04), 209715200(1.972152e-31)
+; CM-NEXT: MUL_IEEE T3.X, PV.W, literal.x,
+; CM-NEXT: SETGT_UINT T3.Y, PV.Z, literal.y,
+; CM-NEXT: TRUNC T1.Z, T1.Y,
+; CM-NEXT: ADD * T1.W, PV.Y, PV.X,
+; CM-NEXT: 209715200(1.972152e-31), -229(nan)
; CM-NEXT: EXP_IEEE T1.X (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE T1.Y (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE T1.Z, T1.W,
+; CM-NEXT: EXP_IEEE T1.Y, T1.W,
+; CM-NEXT: EXP_IEEE T1.Z (MASKED), T1.W,
; CM-NEXT: EXP_IEEE * T1.W (MASKED), T1.W,
-; CM-NEXT: ALU clause starting at 109:
-; CM-NEXT: CNDE_INT T5.X, T2.Z, T0.W, T1.Y,
-; CM-NEXT: CNDE_INT T1.Y, T4.X, T3.Y, T2.X,
-; CM-NEXT: FLT_TO_INT T3.Z, T3.X, BS:VEC_120/SCL_212
-; CM-NEXT: MUL_IEEE * T0.W, T1.Z, literal.x, BS:VEC_120/SCL_212
-; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: SETGT_INT T2.X, T0.Z, literal.x,
-; CM-NEXT: MUL_IEEE T2.Y, T0.X, literal.y,
-; CM-NEXT: MUL_IEEE T4.Z, PV.W, literal.z,
-; CM-NEXT: SETGT_UINT * T1.W, PV.Z, literal.w,
-; CM-NEXT: -127(nan), 209715200(1.972152e-31)
-; CM-NEXT: 2130706432(1.701412e+38), 254(3.559298e-43)
-; CM-NEXT: CNDE_INT T3.X, PV.W, T0.W, PV.Z,
-; CM-NEXT: MUL_IEEE T3.Y, PV.Y, literal.x,
-; CM-NEXT: CNDE_INT T4.Z, PV.X, T1.Y, T0.Z,
-; CM-NEXT: MAX_INT * T0.W, T3.Z, literal.y,
-; CM-NEXT: 209715200(1.972152e-31), -330(nan)
-; CM-NEXT: ADD_INT T6.X, PV.W, literal.x,
-; CM-NEXT: ADD_INT T1.Y, T3.Z, literal.y,
-; CM-NEXT: SETGT_UINT T5.Z, T3.Z, literal.z,
-; CM-NEXT: MUL_IEEE * T0.W, T1.Z, literal.w, BS:VEC_120/SCL_212
+; CM-NEXT: FLT_TO_INT T2.X, T1.Z,
+; CM-NEXT: MUL_IEEE T2.Y, PV.Y, literal.x,
+; CM-NEXT: CNDE_INT T1.Z, T3.Y, T3.X, T0.W,
+; CM-NEXT: SETGT_INT * T0.W, T0.Z, literal.y, BS:VEC_120/SCL_212
+; CM-NEXT: 209715200(1.972152e-31), -127(nan)
+; CM-NEXT: CNDE_INT T3.X, PV.W, PV.Z, T0.Y,
+; CM-NEXT: MUL_IEEE * T4.Y, PV.Y, literal.x,
+; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; CM-NEXT: ALU clause starting at 108:
+; CM-NEXT: SETGT_UINT T1.Z, T2.X, literal.x,
+; CM-NEXT: MAX_INT * T1.W, T0.Z, literal.y,
+; CM-NEXT: -229(nan), -330(nan)
+; CM-NEXT: ADD_INT T4.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T5.Y, T0.Z, literal.y,
+; CM-NEXT: CNDE_INT T2.Z, PV.Z, T4.Y, T2.Y,
+; CM-NEXT: SETGT_INT * T1.W, T2.X, literal.z,
; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
-; CM-NEXT: -229(nan), 209715200(1.972152e-31)
-; CM-NEXT: MUL_IEEE T7.X, PV.W, literal.x,
-; CM-NEXT: MIN_INT T4.Y, T3.Z, literal.y,
-; CM-NEXT: CNDE_INT T6.Z, PV.Z, PV.X, PV.Y,
-; CM-NEXT: SETGT_INT * T2.W, T3.Z, literal.z,
-; CM-NEXT: 209715200(1.972152e-31), 381(5.338947e-43)
; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T6.X, PV.W, PV.Z, T3.Z,
-; CM-NEXT: MIN_INT T1.Y, T0.Z, literal.x,
-; CM-NEXT: ADD_INT T6.Z, PV.Y, literal.y,
-; CM-NEXT: ADD_INT * T3.W, T3.Z, literal.z, BS:VEC_120/SCL_212
-; CM-NEXT: 381(5.338947e-43), -254(nan)
-; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T8.X, T1.W, PV.W, PV.Z,
-; CM-NEXT: SETGT_INT T4.Y, T3.Z, literal.x,
-; CM-NEXT: ADD_INT T3.Z, PV.Y, literal.y,
-; CM-NEXT: ADD_INT * T1.W, T0.Z, literal.z, BS:VEC_120/SCL_212
+; CM-NEXT: CNDE_INT T5.X, PV.W, PV.Z, T1.Y,
+; CM-NEXT: MUL_IEEE T0.Y, T0.Y, literal.x,
+; CM-NEXT: MAX_INT T2.Z, T2.X, literal.y,
+; CM-NEXT: CNDE_INT * T2.W, T3.Y, PV.X, PV.Y, BS:VEC_120/SCL_212
+; CM-NEXT: 2130706432(1.701412e+38), -330(nan)
+; CM-NEXT: CNDE_INT T4.X, T0.W, PV.W, T0.Z,
+; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.x,
+; CM-NEXT: ADD_INT T2.Z, T2.X, literal.y,
+; CM-NEXT: MIN_INT * T0.W, T2.X, literal.z,
+; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T6.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T3.Y, T2.X, literal.y,
+; CM-NEXT: SETGT_UINT T3.Z, T2.X, literal.z,
+; CM-NEXT: CNDE_INT * T0.W, T1.Z, PV.Y, PV.Z,
+; CM-NEXT: -254(nan), -127(nan)
+; CM-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T7.X, T1.Y, literal.x,
+; CM-NEXT: CNDE_INT T1.Y, T1.W, PV.W, T2.X,
+; CM-NEXT: CNDE_INT T1.Z, PV.Z, PV.Y, PV.X,
+; CM-NEXT: MIN_INT * T0.W, T0.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 381(5.338947e-43)
+; CM-NEXT: SETGT_INT T2.X, T2.X, literal.x,
+; CM-NEXT: ADD_INT T2.Y, PV.W, literal.y,
+; CM-NEXT: ADD_INT T2.Z, T0.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T0.W, T0.Z, literal.w,
; CM-NEXT: 127(1.779649e-43), -254(nan)
-; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T9.X, T2.Z, PV.W, PV.Z,
-; CM-NEXT: SETGT_INT T1.Y, T0.Z, literal.x, BS:VEC_120/SCL_212
-; CM-NEXT: CNDE_INT T0.Z, PV.Y, T6.X, PV.X,
-; CM-NEXT: CNDE_INT * T0.W, T5.Z, T7.X, T0.W, BS:VEC_201
-; CM-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T6.X, T2.W, PV.W, T1.Z,
-; CM-NEXT: LSHL T5.Y, PV.Z, literal.x,
-; CM-NEXT: CNDE_INT T0.Z, PV.Y, T4.Z, PV.X,
-; CM-NEXT: CNDE_INT * T0.W, T4.X, T3.Y, T2.Y,
-; CM-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T0.X, T2.X, PV.W, T0.X,
-; CM-NEXT: LSHL T2.Y, PV.Z, literal.x,
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: CNDE_INT T6.X, PV.W, PV.Z, PV.Y,
+; CM-NEXT: SETGT_INT T2.Y, T0.Z, literal.x,
+; CM-NEXT: CNDE_INT T0.Z, PV.X, T1.Y, T1.Z,
+; CM-NEXT: MUL_IEEE * T1.W, T7.X, literal.y,
+; CM-NEXT: 127(1.779649e-43), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T7.X, T3.Z, T7.X, PV.W,
+; CM-NEXT: LSHL T1.Y, PV.Z, literal.x,
+; CM-NEXT: CNDE_INT T0.Z, PV.Y, T4.X, PV.X, BS:VEC_021/SCL_122
+; CM-NEXT: MUL_IEEE * T1.W, T0.Y, literal.y,
+; CM-NEXT: 23(3.222986e-44), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T4.X, T0.W, T0.Y, PV.W,
+; CM-NEXT: LSHL T0.Y, PV.Z, literal.x,
; CM-NEXT: ADD_INT T0.Z, PV.Y, literal.y,
-; CM-NEXT: CNDE_INT * T0.W, T4.Y, PV.X, T3.X, BS:VEC_021/SCL_122
+; CM-NEXT: CNDE_INT * T0.W, T2.X, T5.X, PV.X,
; CM-NEXT: 23(3.222986e-44), 1065353216(1.000000e+00)
; CM-NEXT: MUL_IEEE T2.X, PV.W, PV.Z,
-; CM-NEXT: SETGT T3.Y, literal.x, KC0[3].W,
+; CM-NEXT: SETGT T1.Y, literal.x, KC0[3].W,
; CM-NEXT: ADD_INT T0.Z, PV.Y, literal.y,
-; CM-NEXT: CNDE_INT * T0.W, T1.Y, PV.X, T5.X,
+; CM-NEXT: CNDE_INT * T0.W, T2.Y, T3.X, PV.X,
; CM-NEXT: -1026650416(-1.032789e+02), 1065353216(1.000000e+00)
-; CM-NEXT: MUL_IEEE T0.X, PV.W, PV.Z,
-; CM-NEXT: SETGT T1.Y, literal.x, KC0[3].Z,
+; CM-NEXT: MUL_IEEE T3.X, PV.W, PV.Z,
+; CM-NEXT: SETGT T0.Y, literal.x, KC0[3].Z,
; CM-NEXT: CNDE T0.Z, PV.Y, PV.X, 0.0,
; CM-NEXT: SETGT * T0.W, KC0[3].W, literal.y,
; CM-NEXT: -1026650416(-1.032789e+02), 1118925336(8.872284e+01)
; CM-NEXT: CNDE T2.X, PV.W, PV.Z, literal.x,
-; CM-NEXT: CNDE T1.Y, PV.Y, PV.X, 0.0,
+; CM-NEXT: CNDE T0.Y, PV.Y, PV.X, 0.0,
; CM-NEXT: SETGT T0.Z, KC0[3].Z, literal.y,
; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.z,
; CM-NEXT: 2139095040(INF), 1118925336(8.872284e+01)
; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
-; CM-NEXT: LSHR T0.X, PV.W, literal.x,
-; CM-NEXT: CNDE T1.Y, PV.Z, PV.Y, literal.y,
-; CM-NEXT: CNDE T0.Z, T1.X, T0.Y, 0.0,
+; CM-NEXT: LSHR T3.X, PV.W, literal.x,
+; CM-NEXT: CNDE T0.Y, PV.Z, PV.Y, literal.y,
+; CM-NEXT: CNDE T0.Z, T1.X, T0.X, 0.0,
; CM-NEXT: SETGT * T0.W, KC0[3].Y, literal.z,
; CM-NEXT: 2(2.802597e-45), 2139095040(INF)
; CM-NEXT: 1118925336(8.872284e+01), 0(0.000000e+00)
-; CM-NEXT: CNDE * T1.X, PV.W, PV.Z, literal.x,
+; CM-NEXT: CNDE * T0.X, PV.W, PV.Z, literal.x,
; CM-NEXT: 2139095040(INF), 0(0.000000e+00)
-; CM-NEXT: LSHR * T3.X, KC0[2].Y, literal.x,
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%result = call <3 x float> @llvm.exp.v3f32(<3 x float> %in)
store <3 x float> %result, ptr addrspace(1) %out
@@ -2050,227 +2041,224 @@ define amdgpu_kernel void @s_exp_v4f32(ptr addrspace(1) %out, <4 x float> %in) {
; R600-LABEL: s_exp_v4f32:
; R600: ; %bb.0:
; R600-NEXT: ALU 98, @6, KC0[CB0:0-32], KC1[]
-; R600-NEXT: ALU 98, @105, KC0[CB0:0-32], KC1[]
-; R600-NEXT: ALU 24, @204, KC0[CB0:0-32], KC1[]
+; R600-NEXT: ALU 95, @105, KC0[CB0:0-32], KC1[]
+; R600-NEXT: ALU 24, @201, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T0.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 6:
; R600-NEXT: AND_INT * T0.W, KC0[3].Z, literal.x,
; R600-NEXT: -4096(nan), 0(0.000000e+00)
-; R600-NEXT: ADD T1.W, KC0[3].Z, -PV.W,
-; R600-NEXT: MUL_IEEE * T2.W, PV.W, literal.x,
+; R600-NEXT: ADD * T1.W, KC0[3].Z, -PV.W,
+; R600-NEXT: MUL_IEEE T2.W, PV.W, literal.x,
+; R600-NEXT: MUL_IEEE * T3.W, T0.W, literal.y,
+; R600-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
+; R600-NEXT: RNDNE T4.W, PS,
+; R600-NEXT: MULADD_IEEE * T1.W, T1.W, literal.x, PV.W, BS:VEC_021/SCL_122
; R600-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; R600-NEXT: RNDNE T3.W, PS,
-; R600-NEXT: MUL_IEEE * T4.W, PV.W, literal.x,
+; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.x, PS,
+; R600-NEXT: ADD * T1.W, T3.W, -PV.W,
; R600-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; R600-NEXT: MULADD_IEEE T1.W, T1.W, literal.x, PS,
-; R600-NEXT: TRUNC * T4.W, PV.W,
-; R600-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; R600-NEXT: FLT_TO_INT T0.Z, PS,
-; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.x, PV.W,
-; R600-NEXT: ADD * T1.W, T2.W, -T3.W,
-; R600-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; R600-NEXT: ADD T1.Z, PS, PV.W,
-; R600-NEXT: MAX_INT T0.W, PV.Z, literal.x,
-; R600-NEXT: MIN_INT * T1.W, PV.Z, literal.y,
-; R600-NEXT: -330(nan), 381(5.338947e-43)
-; R600-NEXT: ADD_INT T0.X, PS, literal.x,
-; R600-NEXT: ADD_INT T0.Y, PV.W, literal.y,
-; R600-NEXT: ADD_INT T2.Z, T0.Z, literal.z,
-; R600-NEXT: SETGT_UINT T0.W, T0.Z, literal.w,
-; R600-NEXT: EXP_IEEE * T1.X, PV.Z,
-; R600-NEXT: -254(nan), 204(2.858649e-43)
-; R600-NEXT: 102(1.429324e-43), -229(nan)
-; R600-NEXT: ADD_INT T2.X, T0.Z, literal.x,
-; R600-NEXT: SETGT_UINT T1.Y, T0.Z, literal.y,
-; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Y, PV.Z,
-; R600-NEXT: SETGT_INT T1.W, T0.Z, literal.x,
-; R600-NEXT: MUL_IEEE * T2.W, PS, literal.z,
-; R600-NEXT: -127(nan), 254(3.559298e-43)
-; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T3.X, T1.X, literal.x,
-; R600-NEXT: MUL_IEEE T0.Y, PS, literal.y,
-; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Z, T0.Z,
-; R600-NEXT: CNDE_INT T3.W, PV.Y, PV.X, T0.X,
-; R600-NEXT: SETGT_INT * T4.W, T0.Z, literal.z,
-; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
-; R600-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; R600-NEXT: AND_INT T2.Y, KC0[4].X, literal.x,
-; R600-NEXT: CNDE_INT T0.Z, PS, PV.Z, PV.W,
-; R600-NEXT: CNDE_INT T0.W, T0.W, PV.Y, T2.W,
-; R600-NEXT: MUL_IEEE * T2.W, PV.X, literal.y,
-; R600-NEXT: -4096(nan), 2130706432(1.701412e+38)
-; R600-NEXT: CNDE_INT T0.X, T1.Y, T3.X, PS,
-; R600-NEXT: CNDE_INT T0.Y, T1.W, PV.W, T1.X,
-; R600-NEXT: LSHL T0.Z, PV.Z, literal.x,
-; R600-NEXT: ADD T0.W, KC0[4].X, -PV.Y,
-; R600-NEXT: MUL_IEEE * T1.W, PV.Y, literal.y,
-; R600-NEXT: 23(3.222986e-44), 1069064192(1.442383e+00)
-; R600-NEXT: RNDNE T1.Y, PS,
-; R600-NEXT: MUL_IEEE T1.Z, PV.W, literal.x,
-; R600-NEXT: ADD_INT T2.W, PV.Z, literal.y,
-; R600-NEXT: CNDE_INT * T3.W, T4.W, PV.Y, PV.X,
-; R600-NEXT: 967029397(3.122284e-04), 1065353216(1.000000e+00)
-; R600-NEXT: MUL_IEEE T0.Y, PS, PV.W,
-; R600-NEXT: AND_INT T0.Z, KC0[3].W, literal.x,
-; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.y, PV.Z,
-; R600-NEXT: TRUNC * T2.W, PV.Y,
-; R600-NEXT: -4096(nan), 1069064192(1.442383e+00)
-; R600-NEXT: SETGT T0.X, literal.x, KC0[3].Z,
-; R600-NEXT: FLT_TO_INT T3.Y, PS,
-; R600-NEXT: MULADD_IEEE T1.Z, T2.Y, literal.y, PV.W,
-; R600-NEXT: ADD T0.W, T1.W, -T1.Y,
-; R600-NEXT: MUL_IEEE * T1.W, PV.Z, literal.z,
-; R600-NEXT: -1026650416(-1.032789e+02), 967029397(3.122284e-04)
-; R600-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
-; R600-NEXT: RNDNE T1.X, PS,
-; R600-NEXT: AND_INT T1.Y, KC0[3].Y, literal.x,
-; R600-NEXT: ADD T1.Z, PV.W, PV.Z,
-; R600-NEXT: MAX_INT T0.W, PV.Y, literal.y,
-; R600-NEXT: MIN_INT * T2.W, PV.Y, literal.z,
-; R600-NEXT: -4096(nan), -330(nan)
+; R600-NEXT: ADD T0.W, PS, PV.W,
+; R600-NEXT: TRUNC * T1.W, T4.W,
+; R600-NEXT: FLT_TO_INT T1.W, PS,
+; R600-NEXT: EXP_IEEE * T0.X, PV.W,
+; R600-NEXT: MUL_IEEE T0.Z, PS, literal.x,
+; R600-NEXT: MAX_INT T0.W, PV.W, literal.y,
+; R600-NEXT: MIN_INT * T2.W, PV.W, literal.z,
+; R600-NEXT: 209715200(1.972152e-31), -330(nan)
; R600-NEXT: 381(5.338947e-43), 0(0.000000e+00)
-; R600-NEXT: ADD_INT T2.X, PS, literal.x,
-; R600-NEXT: ADD_INT T2.Y, PV.W, literal.y,
-; R600-NEXT: ADD_INT T2.Z, T3.Y, literal.z,
-; R600-NEXT: SETGT_UINT T0.W, T3.Y, literal.w,
-; R600-NEXT: EXP_IEEE * T1.Z, PV.Z,
-; R600-NEXT: -254(nan), 204(2.858649e-43)
-; R600-NEXT: 102(1.429324e-43), -229(nan)
-; R600-NEXT: ADD_INT T3.X, T3.Y, literal.x,
-; R600-NEXT: SETGT_UINT T4.Y, T3.Y, literal.y,
-; R600-NEXT: CNDE_INT T2.Z, PV.W, PV.Y, PV.Z,
-; R600-NEXT: SETGT_INT T2.W, T3.Y, literal.x,
-; R600-NEXT: MUL_IEEE * T3.W, PS, literal.z,
+; R600-NEXT: ADD_INT T1.X, PS, literal.x,
+; R600-NEXT: AND_INT T0.Y, KC0[4].X, literal.y,
+; R600-NEXT: ADD_INT T1.Z, PV.W, literal.z,
+; R600-NEXT: ADD_INT * T0.W, T1.W, literal.w,
+; R600-NEXT: -254(nan), -4096(nan)
+; R600-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; R600-NEXT: SETGT_UINT * T2.W, T1.W, literal.x,
+; R600-NEXT: -229(nan), 0(0.000000e+00)
+; R600-NEXT: ADD_INT T2.X, T1.W, literal.x,
+; R600-NEXT: SETGT_UINT T1.Y, T1.W, literal.y,
+; R600-NEXT: CNDE_INT T1.Z, PV.W, T1.Z, T0.W,
+; R600-NEXT: SETGT_INT T0.W, T1.W, literal.x,
+; R600-NEXT: ADD * T3.W, KC0[4].X, -T0.Y,
; R600-NEXT: -127(nan), 254(3.559298e-43)
-; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T4.X, T1.Z, literal.x,
-; R600-NEXT: MUL_IEEE T2.Y, PS, literal.y,
-; R600-NEXT: CNDE_INT T2.Z, PV.W, PV.Z, T3.Y,
-; R600-NEXT: CNDE_INT T4.W, PV.Y, PV.X, T2.X,
-; R600-NEXT: SETGT_INT * T5.W, T3.Y, literal.z,
-; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
+; R600-NEXT: MUL_IEEE T3.X, PS, literal.x,
+; R600-NEXT: MUL_IEEE T2.Y, T0.Y, literal.y,
+; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Z, T1.W,
+; R600-NEXT: CNDE_INT T4.W, PV.Y, PV.X, T1.X,
+; R600-NEXT: SETGT_INT * T1.W, T1.W, literal.z,
+; R600-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
; R600-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; R600-NEXT: ADD T2.X, KC0[3].W, -T0.Z,
-; R600-NEXT: CNDE_INT T3.Y, PS, PV.Z, PV.W,
-; R600-NEXT: CNDE_INT * T2.Z, T0.W, PV.Y, T3.W,
-; R600-NEXT: ALU clause starting at 105:
-; R600-NEXT: MUL_IEEE T0.W, T4.X, literal.x,
-; R600-NEXT: ADD * T3.W, KC0[3].Y, -T1.Y,
+; R600-NEXT: CNDE_INT T1.X, PS, PV.Z, PV.W,
+; R600-NEXT: RNDNE T3.Y, PV.Y,
+; R600-NEXT: MULADD_IEEE T1.Z, T3.W, literal.x, PV.X,
+; R600-NEXT: MUL_IEEE T3.W, T0.Z, literal.y,
+; R600-NEXT: MUL_IEEE * T4.W, T0.X, literal.z,
+; R600-NEXT: 1069064192(1.442383e+00), 209715200(1.972152e-31)
; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T2.X, PS, literal.x,
+; R600-NEXT: CNDE_INT T4.Y, T2.W, PV.W, T0.Z,
+; R600-NEXT: MULADD_IEEE T0.Z, T0.Y, literal.y, PV.Z,
+; R600-NEXT: ADD T2.W, T2.Y, -PV.Y, BS:VEC_120/SCL_212
+; R600-NEXT: AND_INT * T3.W, KC0[3].Y, literal.z,
+; R600-NEXT: 2130706432(1.701412e+38), 967029397(3.122284e-04)
+; R600-NEXT: -4096(nan), 0(0.000000e+00)
; R600-NEXT: MUL_IEEE T3.X, PS, literal.x,
-; R600-NEXT: MUL_IEEE T2.Y, T1.Y, literal.y,
-; R600-NEXT: CNDE_INT T3.Z, T4.Y, T4.X, PV.W, BS:VEC_120/SCL_212
-; R600-NEXT: CNDE_INT T0.W, T2.W, T2.Z, T1.Z,
-; R600-NEXT: LSHL * T2.W, T3.Y, literal.z,
-; R600-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
-; R600-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; R600-NEXT: ADD_INT T4.X, PS, literal.x,
-; R600-NEXT: CNDE_INT T3.Y, T5.W, PV.W, PV.Z,
-; R600-NEXT: RNDNE T1.Z, PV.Y,
-; R600-NEXT: MULADD_IEEE T0.W, T3.W, literal.y, PV.X, BS:VEC_120/SCL_212
-; R600-NEXT: MUL_IEEE * T2.W, T2.X, literal.z,
+; R600-NEXT: ADD T0.Y, PV.W, PV.Z,
+; R600-NEXT: CNDE_INT T0.Z, T0.W, PV.Y, T0.X, BS:VEC_021/SCL_122
+; R600-NEXT: CNDE_INT T0.W, T1.Y, T4.W, PV.X,
+; R600-NEXT: LSHL * T2.W, T1.X, literal.y,
+; R600-NEXT: 1069064192(1.442383e+00), 23(3.222986e-44)
+; R600-NEXT: AND_INT T0.X, KC0[3].W, literal.x,
+; R600-NEXT: TRUNC T1.Y, T3.Y,
+; R600-NEXT: ADD_INT T1.Z, PS, literal.y,
+; R600-NEXT: CNDE_INT T0.W, T1.W, PV.Z, PV.W,
+; R600-NEXT: EXP_IEEE * T0.Y, PV.Y,
+; R600-NEXT: -4096(nan), 1065353216(1.000000e+00)
+; R600-NEXT: MUL_IEEE T1.X, PV.W, PV.Z,
+; R600-NEXT: FLT_TO_INT T1.Y, PV.Y,
+; R600-NEXT: MUL_IEEE T0.Z, PS, literal.x,
+; R600-NEXT: ADD T0.W, KC0[3].W, -PV.X,
+; R600-NEXT: RNDNE * T1.W, T3.X,
+; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; R600-NEXT: SETGT T2.X, literal.x, KC0[3].Z,
+; R600-NEXT: TRUNC T2.Y, PS,
+; R600-NEXT: MUL_IEEE T1.Z, PV.W, literal.y,
+; R600-NEXT: MUL_IEEE T2.W, PV.Z, literal.z,
+; R600-NEXT: MAX_INT * T4.W, PV.Y, literal.w,
+; R600-NEXT: -1026650416(-1.032789e+02), 967029397(3.122284e-04)
+; R600-NEXT: 209715200(1.972152e-31), -330(nan)
+; R600-NEXT: ADD T4.X, KC0[3].Y, -T3.W,
+; R600-NEXT: ADD_INT T3.Y, PS, literal.x,
+; R600-NEXT: ADD_INT T2.Z, T1.Y, literal.y,
+; R600-NEXT: SETGT_UINT T4.W, T1.Y, literal.z,
+; R600-NEXT: MIN_INT * T5.W, T1.Y, literal.w,
+; R600-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; R600-NEXT: -229(nan), 381(5.338947e-43)
+; R600-NEXT: ADD_INT T5.X, PS, literal.x,
+; R600-NEXT: ADD_INT T4.Y, T1.Y, literal.y,
+; R600-NEXT: SETGT_UINT T3.Z, T1.Y, literal.z,
+; R600-NEXT: CNDE_INT T5.W, PV.W, PV.Y, PV.Z,
+; R600-NEXT: SETGT_INT * T6.W, T1.Y, literal.y,
+; R600-NEXT: -254(nan), -127(nan)
+; R600-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T6.X, T0.Y, literal.x,
+; R600-NEXT: CNDE_INT T3.Y, PS, PV.W, T1.Y,
+; R600-NEXT: CNDE_INT * T2.Z, PV.Z, PV.Y, PV.X,
+; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
+; R600-NEXT: ALU clause starting at 105:
+; R600-NEXT: SETGT_INT T5.W, T1.Y, literal.x,
+; R600-NEXT: MUL_IEEE * T7.W, T4.X, literal.y,
+; R600-NEXT: 127(1.779649e-43), 967029397(3.122284e-04)
+; R600-NEXT: MUL_IEEE T5.X, T0.X, literal.x,
+; R600-NEXT: MULADD_IEEE T1.Y, T4.X, literal.x, PS, BS:VEC_120/SCL_212
+; R600-NEXT: CNDE_INT T2.Z, PV.W, T3.Y, T2.Z,
+; R600-NEXT: MUL_IEEE T7.W, T6.X, literal.y, BS:VEC_201
+; R600-NEXT: CNDE_INT * T2.W, T4.W, T2.W, T0.Z,
+; R600-NEXT: 1069064192(1.442383e+00), 2130706432(1.701412e+38)
+; R600-NEXT: CNDE_INT T4.X, T6.W, PS, T0.Y,
+; R600-NEXT: CNDE_INT T0.Y, T3.Z, T6.X, PV.W,
+; R600-NEXT: LSHL T0.Z, PV.Z, literal.x,
+; R600-NEXT: MULADD_IEEE T2.W, T3.W, literal.y, PV.Y, BS:VEC_201
+; R600-NEXT: ADD * T1.W, T3.X, -T1.W,
+; R600-NEXT: 23(3.222986e-44), 967029397(3.122284e-04)
+; R600-NEXT: ADD T3.X, PS, PV.W,
+; R600-NEXT: ADD_INT T1.Y, PV.Z, literal.x,
+; R600-NEXT: CNDE_INT T0.Z, T5.W, PV.X, PV.Y,
+; R600-NEXT: RNDNE T1.W, T5.X,
+; R600-NEXT: MULADD_IEEE * T0.W, T0.W, literal.y, T1.Z, BS:VEC_021/SCL_122
; R600-NEXT: 1065353216(1.000000e+00), 1069064192(1.442383e+00)
-; R600-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; R600-NEXT: MULADD_IEEE T2.X, T2.X, literal.x, PS,
-; R600-NEXT: MULADD_IEEE T1.Y, T1.Y, literal.y, PV.W,
-; R600-NEXT: ADD T2.Z, T2.Y, -PV.Z, BS:VEC_120/SCL_212
-; R600-NEXT: MUL_IEEE T0.W, PV.Y, PV.X,
-; R600-NEXT: SETGT * T2.W, literal.z, KC0[4].X,
-; R600-NEXT: 1069064192(1.442383e+00), 967029397(3.122284e-04)
-; R600-NEXT: -1026650416(-1.032789e+02), 0(0.000000e+00)
-; R600-NEXT: CNDE T3.X, PS, PV.W, 0.0,
-; R600-NEXT: ADD T1.Y, PV.Z, PV.Y,
-; R600-NEXT: TRUNC T1.Z, T1.Z,
-; R600-NEXT: MULADD_IEEE T0.W, T0.Z, literal.x, PV.X, BS:VEC_120/SCL_212
-; R600-NEXT: ADD * T1.W, T1.W, -T1.X,
-; R600-NEXT: 967029397(3.122284e-04), 0(0.000000e+00)
-; R600-NEXT: SETGT T2.X, KC0[4].X, literal.x,
-; R600-NEXT: ADD T2.Y, PS, PV.W,
-; R600-NEXT: FLT_TO_INT T0.Z, PV.Z,
-; R600-NEXT: TRUNC T0.W, T1.X,
-; R600-NEXT: EXP_IEEE * T1.X, PV.Y,
-; R600-NEXT: 1118925336(8.872284e+01), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T4.X, PS, literal.x,
-; R600-NEXT: FLT_TO_INT T1.Y, PV.W,
-; R600-NEXT: MAX_INT T1.Z, PV.Z, literal.y,
-; R600-NEXT: MUL_IEEE T0.W, PS, literal.z,
-; R600-NEXT: EXP_IEEE * T1.W, PV.Y,
-; R600-NEXT: 2130706432(1.701412e+38), -330(nan)
+; R600-NEXT: MULADD_IEEE T0.X, T0.X, literal.x, PS,
+; R600-NEXT: ADD T0.Y, T5.X, -PV.W, BS:VEC_120/SCL_212
+; R600-NEXT: MUL_IEEE T0.Z, PV.Z, PV.Y,
+; R600-NEXT: SETGT T0.W, literal.y, KC0[4].X,
+; R600-NEXT: EXP_IEEE * T1.Y, PV.X,
+; R600-NEXT: 967029397(3.122284e-04), -1026650416(-1.032789e+02)
+; R600-NEXT: CNDE T3.X, PV.W, PV.Z, 0.0,
+; R600-NEXT: ADD T0.Y, PV.Y, PV.X,
+; R600-NEXT: FLT_TO_INT T0.Z, T2.Y,
+; R600-NEXT: TRUNC T0.W, T1.W,
+; R600-NEXT: MUL_IEEE * T1.W, PS, literal.x,
; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T5.X, PV.W, literal.x,
-; R600-NEXT: MUL_IEEE T2.Y, PS, literal.x,
-; R600-NEXT: ADD_INT T1.Z, PV.Z, literal.y,
-; R600-NEXT: ADD_INT T2.W, T0.Z, literal.z,
-; R600-NEXT: MAX_INT * T3.W, PV.Y, literal.w,
-; R600-NEXT: 209715200(1.972152e-31), 204(2.858649e-43)
-; R600-NEXT: 102(1.429324e-43), -330(nan)
-; R600-NEXT: SETGT_UINT T6.X, T0.Z, literal.x,
-; R600-NEXT: ADD_INT T3.Y, PS, literal.y,
-; R600-NEXT: ADD_INT T2.Z, T1.Y, literal.z,
-; R600-NEXT: SETGT_UINT T3.W, T1.Y, literal.x,
-; R600-NEXT: MIN_INT * T4.W, T1.Y, literal.w,
+; R600-NEXT: SETGT T0.X, KC0[4].X, literal.x,
+; R600-NEXT: MUL_IEEE T2.Y, PS, literal.y,
+; R600-NEXT: FLT_TO_INT T1.Z, PV.W,
+; R600-NEXT: MAX_INT T0.W, PV.Z, literal.z,
+; R600-NEXT: EXP_IEEE * T0.Y, PV.Y,
+; R600-NEXT: 1118925336(8.872284e+01), 209715200(1.972152e-31)
+; R600-NEXT: -330(nan), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T4.X, T1.Y, literal.x,
+; R600-NEXT: MUL_IEEE T3.Y, PS, literal.y,
+; R600-NEXT: ADD_INT T2.Z, PV.W, literal.z,
+; R600-NEXT: ADD_INT * T0.W, T0.Z, literal.w,
+; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
+; R600-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; R600-NEXT: MAX_INT * T2.W, T1.Z, literal.x,
+; R600-NEXT: -330(nan), 0(0.000000e+00)
+; R600-NEXT: SETGT_UINT T5.X, T0.Z, literal.x,
+; R600-NEXT: ADD_INT T4.Y, PV.W, literal.y,
+; R600-NEXT: ADD_INT T3.Z, T1.Z, literal.z, BS:VEC_120/SCL_212
+; R600-NEXT: SETGT_UINT T2.W, T1.Z, literal.x, BS:VEC_120/SCL_212
+; R600-NEXT: MIN_INT * T3.W, T1.Z, literal.w,
; R600-NEXT: -229(nan), 204(2.858649e-43)
; R600-NEXT: 102(1.429324e-43), 381(5.338947e-43)
-; R600-NEXT: ADD_INT T7.X, PS, literal.x,
-; R600-NEXT: ADD_INT T4.Y, T1.Y, literal.y,
-; R600-NEXT: SETGT_UINT T3.Z, T1.Y, literal.z,
-; R600-NEXT: CNDE_INT T4.W, PV.W, PV.Y, PV.Z,
-; R600-NEXT: SETGT_INT * T5.W, T1.Y, literal.y,
+; R600-NEXT: ADD_INT T6.X, PS, literal.x,
+; R600-NEXT: ADD_INT T5.Y, T1.Z, literal.y,
+; R600-NEXT: SETGT_UINT T4.Z, T1.Z, literal.z,
+; R600-NEXT: CNDE_INT T3.W, PV.W, PV.Y, PV.Z,
+; R600-NEXT: SETGT_INT * T4.W, T1.Z, literal.y,
; R600-NEXT: -254(nan), -127(nan)
; R600-NEXT: 254(3.559298e-43), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T8.X, PS, PV.W, T1.Y,
-; R600-NEXT: CNDE_INT T3.Y, PV.Z, PV.Y, PV.X,
-; R600-NEXT: SETGT_INT T2.Z, T1.Y, literal.x,
-; R600-NEXT: CNDE_INT T2.W, T6.X, T1.Z, T2.W,
-; R600-NEXT: SETGT_INT * T4.W, T0.Z, literal.y,
+; R600-NEXT: CNDE_INT T7.X, PS, PV.W, T1.Z, BS:VEC_021/SCL_122
+; R600-NEXT: CNDE_INT T4.Y, PV.Z, PV.Y, PV.X,
+; R600-NEXT: SETGT_INT T1.Z, T1.Z, literal.x, BS:VEC_120/SCL_212
+; R600-NEXT: CNDE_INT T0.W, T5.X, T2.Z, T0.W, BS:VEC_102/SCL_221
+; R600-NEXT: SETGT_INT * T3.W, T0.Z, literal.y,
; R600-NEXT: 127(1.779649e-43), -127(nan)
-; R600-NEXT: CNDE_INT T7.X, PS, PV.W, T0.Z,
-; R600-NEXT: CNDE_INT T1.Y, PV.Z, PV.X, PV.Y,
-; R600-NEXT: MIN_INT T1.Z, T0.Z, literal.x,
-; R600-NEXT: MUL_IEEE T2.W, T1.W, literal.y,
-; R600-NEXT: MUL_IEEE * T6.W, T2.Y, literal.z,
-; R600-NEXT: 381(5.338947e-43), 2130706432(1.701412e+38)
-; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T8.X, T3.W, PS, T2.Y,
-; R600-NEXT: MUL_IEEE T2.Y, PV.W, literal.x,
-; R600-NEXT: ADD_INT T1.Z, PV.Z, literal.y,
-; R600-NEXT: ADD_INT T3.W, T0.Z, literal.z,
-; R600-NEXT: SETGT_UINT * T6.W, T0.Z, literal.w,
+; R600-NEXT: CNDE_INT T6.X, PS, PV.W, T0.Z,
+; R600-NEXT: CNDE_INT T4.Y, PV.Z, PV.X, PV.Y,
+; R600-NEXT: MIN_INT T2.Z, T0.Z, literal.x,
+; R600-NEXT: MUL_IEEE T0.W, T3.Y, literal.y,
+; R600-NEXT: MUL_IEEE * T5.W, T0.Y, literal.z,
+; R600-NEXT: 381(5.338947e-43), 209715200(1.972152e-31)
+; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T7.X, PS, literal.x,
+; R600-NEXT: CNDE_INT T3.Y, T2.W, PV.W, T3.Y,
+; R600-NEXT: ADD_INT T2.Z, PV.Z, literal.y,
+; R600-NEXT: ADD_INT T0.W, T0.Z, literal.z,
+; R600-NEXT: SETGT_UINT * T2.W, T0.Z, literal.w,
; R600-NEXT: 2130706432(1.701412e+38), -254(nan)
; R600-NEXT: -127(nan), 254(3.559298e-43)
-; R600-NEXT: CNDE_INT T9.X, PS, PV.W, PV.Z,
-; R600-NEXT: SETGT_INT T3.Y, T0.Z, literal.x,
-; R600-NEXT: CNDE_INT T0.Z, T3.Z, T2.W, PV.Y, BS:VEC_120/SCL_212
-; R600-NEXT: CNDE_INT T1.W, T5.W, PV.X, T1.W, BS:VEC_021/SCL_122
-; R600-NEXT: LSHL * T2.W, T1.Y, literal.y,
+; R600-NEXT: CNDE_INT T8.X, PS, PV.W, PV.Z,
+; R600-NEXT: SETGT_INT T5.Y, T0.Z, literal.x,
+; R600-NEXT: CNDE_INT T0.Z, T4.W, PV.Y, T0.Y, BS:VEC_021/SCL_122
+; R600-NEXT: CNDE_INT T0.W, T4.Z, T5.W, PV.X, BS:VEC_120/SCL_212
+; R600-NEXT: LSHL * T4.W, T4.Y, literal.y,
; R600-NEXT: 127(1.779649e-43), 23(3.222986e-44)
-; R600-NEXT: ADD_INT T8.X, PS, literal.x,
-; R600-NEXT: CNDE_INT T1.Y, T2.Z, PV.W, PV.Z,
-; R600-NEXT: CNDE_INT T0.Z, PV.Y, T7.X, PV.X,
-; R600-NEXT: CNDE_INT * T0.W, T6.X, T5.X, T0.W, BS:VEC_021/SCL_122
-; R600-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE * T1.W, T4.X, literal.x,
-; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T4.X, T6.W, T4.X, PV.W,
-; R600-NEXT: CNDE_INT * T2.Y, T4.W, T0.W, T1.X, BS:VEC_120/SCL_212
-; R600-NEXT: ALU clause starting at 204:
+; R600-NEXT: ADD_INT T7.X, PS, literal.x,
+; R600-NEXT: CNDE_INT T0.Y, T1.Z, PV.Z, PV.W,
+; R600-NEXT: CNDE_INT T0.Z, PV.Y, T6.X, PV.X,
+; R600-NEXT: MUL_IEEE T0.W, T4.X, literal.y,
+; R600-NEXT: CNDE_INT * T1.W, T5.X, T2.Y, T1.W,
+; R600-NEXT: 1065353216(1.000000e+00), 2130706432(1.701412e+38)
+; R600-NEXT: CNDE_INT T5.X, T3.W, PS, T1.Y,
+; R600-NEXT: CNDE_INT * T1.Y, T2.W, T4.X, PV.W, BS:VEC_120/SCL_212
+; R600-NEXT: ALU clause starting at 201:
; R600-NEXT: LSHL T0.Z, T0.Z, literal.x,
-; R600-NEXT: MUL_IEEE T0.W, T1.Y, T8.X,
+; R600-NEXT: MUL_IEEE T0.W, T0.Y, T7.X,
; R600-NEXT: SETGT * T1.W, literal.y, KC0[3].W,
; R600-NEXT: 23(3.222986e-44), -1026650416(-1.032789e+02)
-; R600-NEXT: CNDE T1.X, PS, PV.W, 0.0,
-; R600-NEXT: SETGT T1.Y, KC0[3].W, literal.x,
+; R600-NEXT: CNDE T4.X, PS, PV.W, 0.0,
+; R600-NEXT: SETGT T0.Y, KC0[3].W, literal.x,
; R600-NEXT: ADD_INT T0.Z, PV.Z, literal.y,
-; R600-NEXT: CNDE_INT T0.W, T3.Y, T2.Y, T4.X, BS:VEC_120/SCL_212
-; R600-NEXT: CNDE * T1.W, T2.X, T3.X, literal.z,
+; R600-NEXT: CNDE_INT T0.W, T5.Y, T5.X, T1.Y, BS:VEC_102/SCL_221
+; R600-NEXT: CNDE * T1.W, T0.X, T3.X, literal.z,
; R600-NEXT: 1118925336(8.872284e+01), 1065353216(1.000000e+00)
; R600-NEXT: 2139095040(INF), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T2.X, PV.W, PV.Z,
+; R600-NEXT: MUL_IEEE T0.X, PV.W, PV.Z,
; R600-NEXT: SETGT T2.Y, literal.x, KC0[3].Y,
; R600-NEXT: CNDE T1.Z, PV.Y, PV.X, literal.y,
-; R600-NEXT: CNDE T0.W, T0.X, T0.Y, 0.0,
+; R600-NEXT: CNDE T0.W, T2.X, T1.X, 0.0,
; R600-NEXT: SETGT * T2.W, KC0[3].Z, literal.z,
; R600-NEXT: -1026650416(-1.032789e+02), 2139095040(INF)
; R600-NEXT: 1118925336(8.872284e+01), 0(0.000000e+00)
@@ -2285,8 +2273,8 @@ define amdgpu_kernel void @s_exp_v4f32(ptr addrspace(1) %out, <4 x float> %in) {
; CM-LABEL: s_exp_v4f32:
; CM: ; %bb.0:
; CM-NEXT: ALU 97, @6, KC0[CB0:0-32], KC1[]
-; CM-NEXT: ALU 100, @104, KC0[CB0:0-32], KC1[]
-; CM-NEXT: ALU 36, @205, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 97, @104, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 35, @202, KC0[CB0:0-32], KC1[]
; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0, T1.X
; CM-NEXT: CF_END
; CM-NEXT: PAD
@@ -2305,224 +2293,220 @@ define amdgpu_kernel void @s_exp_v4f32(ptr addrspace(1) %out, <4 x float> %in) {
; CM-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
; CM-NEXT: MULADD_IEEE T0.X, T0.W, literal.x, PV.W,
; CM-NEXT: ADD T0.Y, T0.Z, -PV.Z,
-; CM-NEXT: MUL_IEEE T0.Z, PV.Y, literal.x,
-; CM-NEXT: MUL_IEEE * T0.W, T2.W, literal.y, BS:VEC_120/SCL_212
+; CM-NEXT: MUL_IEEE T0.Z, T2.W, literal.y, BS:VEC_120/SCL_212
+; CM-NEXT: MUL_IEEE * T0.W, PV.Y, literal.x,
; CM-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
; CM-NEXT: TRUNC T1.X, T1.Z,
-; CM-NEXT: RNDNE T2.Y, PV.W,
-; CM-NEXT: MULADD_IEEE T0.Z, T1.Y, literal.x, PV.Z,
-; CM-NEXT: ADD * T1.W, PV.Y, PV.X,
+; CM-NEXT: MULADD_IEEE T1.Y, T1.Y, literal.x, PV.W,
+; CM-NEXT: RNDNE T1.Z, PV.Z,
+; CM-NEXT: ADD * T0.W, PV.Y, PV.X,
; CM-NEXT: 1069064192(1.442383e+00), 0(0.000000e+00)
+; CM-NEXT: EXP_IEEE T0.X, T0.W,
+; CM-NEXT: EXP_IEEE T0.Y (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE * T0.W (MASKED), T0.W,
+; CM-NEXT: TRUNC T2.X, T1.Z,
+; CM-NEXT: MULADD_IEEE T0.Y, T2.W, literal.x, T1.Y,
+; CM-NEXT: FLT_TO_INT T2.Z, T1.X,
+; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.y,
+; CM-NEXT: 967029397(3.122284e-04), 209715200(1.972152e-31)
+; CM-NEXT: ADD T1.X, T0.Z, -T1.Z,
+; CM-NEXT: MUL_IEEE T1.Y, PV.W, literal.x,
+; CM-NEXT: MAX_INT T0.Z, PV.Z, literal.y,
+; CM-NEXT: MIN_INT * T1.W, PV.Z, literal.z,
+; CM-NEXT: 209715200(1.972152e-31), -330(nan)
+; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T3.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.y,
+; CM-NEXT: ADD_INT T0.Z, T2.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T1.W, T2.Z, literal.w,
+; CM-NEXT: -254(nan), 204(2.858649e-43)
+; CM-NEXT: 102(1.429324e-43), -229(nan)
+; CM-NEXT: ADD_INT T4.X, T2.Z, literal.x,
+; CM-NEXT: SETGT_UINT T3.Y, T2.Z, literal.y,
+; CM-NEXT: CNDE_INT T0.Z, PV.W, PV.Y, PV.Z,
+; CM-NEXT: SETGT_INT * T2.W, T2.Z, literal.x,
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: MUL_IEEE T5.X, T0.X, literal.x,
+; CM-NEXT: CNDE_INT T2.Y, PV.W, PV.Z, T2.Z,
+; CM-NEXT: CNDE_INT T0.Z, PV.Y, PV.X, T3.X,
+; CM-NEXT: SETGT_INT * T3.W, T2.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 127(1.779649e-43)
+; CM-NEXT: AND_INT T3.X, KC0[3].Z, literal.x,
+; CM-NEXT: CNDE_INT T2.Y, PV.W, PV.Y, PV.Z,
+; CM-NEXT: MUL_IEEE T0.Z, PV.X, literal.y,
+; CM-NEXT: CNDE_INT * T0.W, T1.W, T1.Y, T0.W,
+; CM-NEXT: -4096(nan), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T0.X, T2.W, PV.W, T0.X,
+; CM-NEXT: CNDE_INT T1.Y, T3.Y, T5.X, PV.Z,
+; CM-NEXT: LSHL T0.Z, PV.Y, literal.x,
+; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.y,
+; CM-NEXT: 23(3.222986e-44), 1069064192(1.442383e+00)
+; CM-NEXT: RNDNE T4.X, PV.W,
+; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.x,
+; CM-NEXT: CNDE_INT T0.Z, T3.W, PV.X, PV.Y,
+; CM-NEXT: ADD * T1.W, T1.X, T0.Y,
+; CM-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
; CM-NEXT: EXP_IEEE T0.X, T1.W,
; CM-NEXT: EXP_IEEE T0.Y (MASKED), T1.W,
; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
-; CM-NEXT: MULADD_IEEE T2.X, T2.W, literal.x, T0.Z,
-; CM-NEXT: ADD T0.Y, T0.W, -T2.Y, BS:VEC_120/SCL_212
-; CM-NEXT: FLT_TO_INT T0.Z, T1.X,
-; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.y,
-; CM-NEXT: 967029397(3.122284e-04), 209715200(1.972152e-31)
-; CM-NEXT: MUL_IEEE T1.X, PV.W, literal.x,
+; CM-NEXT: MUL_IEEE T1.X, T0.Z, T2.Y,
+; CM-NEXT: TRUNC T0.Y, T4.X,
+; CM-NEXT: FLT_TO_INT T0.Z, T2.X, BS:VEC_120/SCL_212
+; CM-NEXT: MUL_IEEE * T1.W, PV.X, literal.x,
+; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T2.X, PV.W, literal.x,
; CM-NEXT: MUL_IEEE T1.Y, T0.X, literal.y,
; CM-NEXT: MAX_INT T1.Z, PV.Z, literal.z,
-; CM-NEXT: MIN_INT * T1.W, PV.Z, literal.w,
+; CM-NEXT: MIN_INT * T2.W, PV.Z, literal.w,
; CM-NEXT: 209715200(1.972152e-31), 2130706432(1.701412e+38)
; CM-NEXT: -330(nan), 381(5.338947e-43)
-; CM-NEXT: ADD_INT T3.X, PV.W, literal.x,
-; CM-NEXT: ADD_INT T3.Y, PV.Z, literal.y,
+; CM-NEXT: ADD_INT T5.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.y,
; CM-NEXT: ADD_INT T1.Z, T0.Z, literal.z,
-; CM-NEXT: SETGT_UINT * T1.W, T0.Z, literal.w,
+; CM-NEXT: SETGT_UINT * T2.W, T0.Z, literal.w,
; CM-NEXT: -254(nan), 204(2.858649e-43)
; CM-NEXT: 102(1.429324e-43), -229(nan)
-; CM-NEXT: ADD_INT T4.X, T0.Z, literal.x,
-; CM-NEXT: SETGT_UINT T4.Y, T0.Z, literal.y,
+; CM-NEXT: ADD_INT T6.X, T0.Z, literal.x,
+; CM-NEXT: SETGT_UINT T3.Y, T0.Z, literal.y,
; CM-NEXT: CNDE_INT T1.Z, PV.W, PV.Y, PV.Z,
-; CM-NEXT: SETGT_INT * T2.W, T0.Z, literal.x,
+; CM-NEXT: SETGT_INT * T3.W, T0.Z, literal.x,
; CM-NEXT: -127(nan), 254(3.559298e-43)
-; CM-NEXT: CNDE_INT T5.X, PV.W, PV.Z, T0.Z,
-; CM-NEXT: CNDE_INT T3.Y, PV.Y, PV.X, T3.X,
-; CM-NEXT: SETGT_INT T0.Z, T0.Z, literal.x,
-; CM-NEXT: MUL_IEEE * T3.W, T1.Y, literal.y,
-; CM-NEXT: 127(1.779649e-43), 2130706432(1.701412e+38)
-; CM-NEXT: CNDE_INT T3.X, T4.Y, T1.Y, PV.W,
-; CM-NEXT: AND_INT T1.Y, KC0[3].Z, literal.x,
-; CM-NEXT: CNDE_INT T1.Z, PV.Z, PV.X, PV.Y,
-; CM-NEXT: CNDE_INT * T0.W, T1.W, T1.X, T0.W,
-; CM-NEXT: -4096(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T0.X, T2.W, PV.W, T0.X,
-; CM-NEXT: LSHL T3.Y, PV.Z, literal.x,
-; CM-NEXT: TRUNC T1.Z, T2.Y,
-; CM-NEXT: ADD * T0.W, KC0[3].Z, -PV.Y,
-; CM-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T1.X, PV.W, literal.x,
-; CM-NEXT: FLT_TO_INT T2.Y, PV.Z,
-; CM-NEXT: ADD_INT T1.Z, PV.Y, literal.y,
-; CM-NEXT: CNDE_INT * T1.W, T0.Z, PV.X, T3.X,
-; CM-NEXT: 967029397(3.122284e-04), 1065353216(1.000000e+00)
-; CM-NEXT: MUL_IEEE T0.X, PV.W, PV.Z,
-; CM-NEXT: MIN_INT T3.Y, PV.Y, literal.x,
-; CM-NEXT: MULADD_IEEE T0.Z, T0.W, literal.y, PV.X,
-; CM-NEXT: ADD * T0.W, T0.Y, T2.X,
-; CM-NEXT: 381(5.338947e-43), 1069064192(1.442383e+00)
-; CM-NEXT: EXP_IEEE T0.X (MASKED), T0.W,
-; CM-NEXT: EXP_IEEE T0.Y, T0.W,
-; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
-; CM-NEXT: EXP_IEEE * T0.W (MASKED), T0.W,
-; CM-NEXT: MULADD_IEEE T1.X, T1.Y, literal.x, T0.Z,
-; CM-NEXT: MUL_IEEE T4.Y, PV.Y, literal.y,
-; CM-NEXT: ADD_INT T0.Z, T3.Y, literal.z, BS:VEC_120/SCL_212
-; CM-NEXT: MAX_INT * T0.W, T2.Y, literal.w, BS:VEC_201
-; CM-NEXT: 967029397(3.122284e-04), 2130706432(1.701412e+38)
-; CM-NEXT: -254(nan), -330(nan)
-; CM-NEXT: ADD_INT T2.X, T2.Y, literal.x,
-; CM-NEXT: ADD_INT T3.Y, PV.W, literal.y,
-; CM-NEXT: ADD_INT T1.Z, T2.Y, literal.z,
-; CM-NEXT: SETGT_UINT * T0.W, T2.Y, literal.w,
-; CM-NEXT: -127(nan), 204(2.858649e-43)
-; CM-NEXT: 102(1.429324e-43), -229(nan)
-; CM-NEXT: SETGT_UINT T3.X, T2.Y, literal.x,
-; CM-NEXT: CNDE_INT T3.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: SETGT_INT T1.Z, T2.Y, literal.y,
-; CM-NEXT: MUL_IEEE * T1.W, T0.Y, literal.z, BS:VEC_120/SCL_212
-; CM-NEXT: 254(3.559298e-43), -127(nan)
-; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T4.X, PV.W, literal.x,
-; CM-NEXT: CNDE_INT * T3.Y, PV.Z, PV.Y, T2.Y,
-; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; CM-NEXT: ALU clause starting at 104:
-; CM-NEXT: CNDE_INT T0.Z, T3.X, T2.X, T0.Z,
-; CM-NEXT: SETGT_INT * T2.W, T2.Y, literal.x,
+; CM-NEXT: CNDE_INT T7.X, PV.W, PV.Z, T0.Z,
+; CM-NEXT: CNDE_INT T2.Y, PV.Y, PV.X, T5.X,
+; CM-NEXT: SETGT_INT * T0.Z, T0.Z, literal.x,
; CM-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T2.X, T1.Y, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.W, T3.Y, PV.Z,
-; CM-NEXT: CNDE_INT T0.Z, T0.W, T4.X, T1.W,
-; CM-NEXT: MUL_IEEE * T0.W, T4.Y, literal.y, BS:VEC_201
-; CM-NEXT: 1069064192(1.442383e+00), 2130706432(1.701412e+38)
-; CM-NEXT: AND_INT T4.X, KC0[4].X, literal.x,
-; CM-NEXT: CNDE_INT T2.Y, T3.X, T4.Y, PV.W,
-; CM-NEXT: CNDE_INT T0.Z, T1.Z, PV.Z, T0.Y,
-; CM-NEXT: LSHL * T0.W, PV.Y, literal.y,
-; CM-NEXT: -4096(nan), 23(3.222986e-44)
-; CM-NEXT: ADD_INT T3.X, PV.W, literal.x,
-; CM-NEXT: CNDE_INT T0.Y, T2.W, PV.Z, PV.Y,
-; CM-NEXT: MUL_IEEE T0.Z, PV.X, literal.y,
-; CM-NEXT: RNDNE * T0.W, T2.X,
-; CM-NEXT: 1065353216(1.000000e+00), 1069064192(1.442383e+00)
-; CM-NEXT: ADD T2.X, T2.X, -PV.W,
-; CM-NEXT: RNDNE T1.Y, PV.Z,
-; CM-NEXT: MUL_IEEE T1.Z, PV.Y, PV.X,
-; CM-NEXT: SETGT * T1.W, literal.x, KC0[3].W,
-; CM-NEXT: -1026650416(-1.032789e+02), 0(0.000000e+00)
-; CM-NEXT: CNDE T3.X, PV.W, PV.Z, 0.0,
-; CM-NEXT: TRUNC T0.Y, T0.W,
-; CM-NEXT: TRUNC T1.Z, PV.Y,
-; CM-NEXT: ADD * T0.W, PV.X, T1.X,
+; CM-NEXT: ALU clause starting at 104:
+; CM-NEXT: ADD * T4.W, KC0[3].Z, -T3.X,
+; CM-NEXT: MUL_IEEE T5.X, PV.W, literal.x,
+; CM-NEXT: CNDE_INT T2.Y, T0.Z, T7.X, T2.Y,
+; CM-NEXT: MUL_IEEE T1.Z, T1.Y, literal.y,
+; CM-NEXT: CNDE_INT * T1.W, T2.W, T2.X, T1.W, BS:VEC_021/SCL_122
+; CM-NEXT: 967029397(3.122284e-04), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T0.X, T3.W, PV.W, T0.X,
+; CM-NEXT: CNDE_INT T1.Y, T3.Y, T1.Y, PV.Z,
+; CM-NEXT: LSHL T1.Z, PV.Y, literal.x,
+; CM-NEXT: MULADD_IEEE * T1.W, T4.W, literal.y, PV.X, BS:VEC_120/SCL_212
+; CM-NEXT: 23(3.222986e-44), 1069064192(1.442383e+00)
+; CM-NEXT: MULADD_IEEE T2.X, T3.X, literal.x, PV.W,
+; CM-NEXT: ADD T2.Y, T0.W, -T4.X,
+; CM-NEXT: ADD_INT T1.Z, PV.Z, literal.y,
+; CM-NEXT: CNDE_INT * T0.W, T0.Z, PV.X, PV.Y,
+; CM-NEXT: 967029397(3.122284e-04), 1065353216(1.000000e+00)
+; CM-NEXT: AND_INT T0.X, KC0[4].X, literal.x,
+; CM-NEXT: MUL_IEEE T1.Y, PV.W, PV.Z,
+; CM-NEXT: SETGT T0.Z, literal.y, KC0[3].W,
+; CM-NEXT: ADD * T0.W, PV.Y, PV.X,
+; CM-NEXT: -4096(nan), -1026650416(-1.032789e+02)
; CM-NEXT: EXP_IEEE T0.X (MASKED), T0.W,
; CM-NEXT: EXP_IEEE T0.Y (MASKED), T0.W,
; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
; CM-NEXT: EXP_IEEE * T0.W, T0.W,
-; CM-NEXT: FLT_TO_INT T1.X, T1.Z,
-; CM-NEXT: FLT_TO_INT T0.Y, T0.Y,
-; CM-NEXT: MUL_IEEE T1.Z, PV.W, literal.x,
-; CM-NEXT: ADD * T1.W, KC0[4].X, -T4.X,
-; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T2.X, PV.W, literal.x,
-; CM-NEXT: MUL_IEEE T2.Y, T0.W, literal.y,
-; CM-NEXT: MUL_IEEE T2.Z, PV.Z, literal.z,
-; CM-NEXT: SETGT_UINT * T2.W, PV.Y, literal.w,
-; CM-NEXT: 967029397(3.122284e-04), 209715200(1.972152e-31)
-; CM-NEXT: 2130706432(1.701412e+38), 254(3.559298e-43)
-; CM-NEXT: CNDE_INT T5.X, PV.W, T1.Z, PV.Z,
-; CM-NEXT: MUL_IEEE T3.Y, PV.Y, literal.x,
-; CM-NEXT: MULADD_IEEE T1.Z, T1.W, literal.y, PV.X,
-; CM-NEXT: MAX_INT * T1.W, T1.X, literal.z,
-; CM-NEXT: 209715200(1.972152e-31), 1069064192(1.442383e+00)
-; CM-NEXT: -330(nan), 0(0.000000e+00)
-; CM-NEXT: ADD_INT T2.X, PV.W, literal.x,
-; CM-NEXT: ADD_INT T4.Y, T1.X, literal.y,
-; CM-NEXT: MULADD_IEEE T1.Z, T4.X, literal.z, PV.Z, BS:VEC_120/SCL_212
-; CM-NEXT: MAX_INT * T1.W, T0.Y, literal.w,
-; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; CM-NEXT: CNDE T2.X, T0.Z, T1.Y, 0.0,
+; CM-NEXT: ADD T1.Y, KC0[4].X, -T0.X,
+; CM-NEXT: FLT_TO_INT T0.Z, T0.Y,
+; CM-NEXT: MUL_IEEE * T1.W, PV.W, literal.x,
+; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T3.X, PV.W, literal.x,
+; CM-NEXT: SETGT_UINT T0.Y, PV.Z, literal.y,
+; CM-NEXT: MUL_IEEE T1.Z, PV.Y, literal.z,
+; CM-NEXT: MUL_IEEE * T2.W, T0.X, literal.w,
+; CM-NEXT: 209715200(1.972152e-31), -229(nan)
+; CM-NEXT: 967029397(3.122284e-04), 1069064192(1.442383e+00)
+; CM-NEXT: RNDNE T4.X, PV.W,
+; CM-NEXT: MULADD_IEEE T1.Y, T1.Y, literal.x, PV.Z,
+; CM-NEXT: CNDE_INT T1.Z, PV.Y, PV.X, T1.W,
+; CM-NEXT: SETGT_INT * T1.W, T0.Z, literal.y,
+; CM-NEXT: 1069064192(1.442383e+00), -127(nan)
+; CM-NEXT: CNDE_INT T3.X, PV.W, PV.Z, T0.W,
+; CM-NEXT: MULADD_IEEE T1.Y, T0.X, literal.x, PV.Y,
+; CM-NEXT: ADD T1.Z, T2.W, -PV.X,
+; CM-NEXT: MAX_INT * T2.W, T0.Z, literal.y,
; CM-NEXT: 967029397(3.122284e-04), -330(nan)
-; CM-NEXT: ADD T4.X, T0.Z, -T1.Y,
-; CM-NEXT: ADD_INT T1.Y, PV.W, literal.x,
-; CM-NEXT: ADD_INT T0.Z, T0.Y, literal.y,
-; CM-NEXT: SETGT_UINT * T1.W, T0.Y, literal.z,
+; CM-NEXT: ADD_INT T0.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T2.Y, T0.Z, literal.y,
+; CM-NEXT: TRUNC T2.Z, T4.X,
+; CM-NEXT: ADD * T2.W, PV.Z, PV.Y,
; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
-; CM-NEXT: -229(nan), 0(0.000000e+00)
-; CM-NEXT: SETGT_UINT T6.X, T1.X, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: SETGT_INT T0.Z, T0.Y, literal.y,
-; CM-NEXT: ADD * T3.W, PV.X, T1.Z,
-; CM-NEXT: -229(nan), -127(nan)
-; CM-NEXT: EXP_IEEE T1.X (MASKED), T3.W,
-; CM-NEXT: EXP_IEEE T1.Y (MASKED), T3.W,
-; CM-NEXT: EXP_IEEE T1.Z, T3.W,
-; CM-NEXT: EXP_IEEE * T1.W (MASKED), T3.W,
-; CM-NEXT: CNDE_INT T4.X, T0.Z, T1.Y, T0.Y,
-; CM-NEXT: CNDE_INT T1.Y, T6.X, T2.X, T4.Y, BS:VEC_120/SCL_212
-; CM-NEXT: SETGT_INT T2.Z, T1.X, literal.x,
-; CM-NEXT: MUL_IEEE * T3.W, PV.Z, literal.y,
-; CM-NEXT: -127(nan), 209715200(1.972152e-31)
-; CM-NEXT: MUL_IEEE T2.X, T1.Z, literal.x,
-; CM-NEXT: MUL_IEEE T4.Y, PV.W, literal.y,
-; CM-NEXT: CNDE_INT T3.Z, PV.Z, PV.Y, T1.X,
-; CM-NEXT: MIN_INT * T4.W, T1.X, literal.z,
+; CM-NEXT: EXP_IEEE T1.X (MASKED), T2.W,
+; CM-NEXT: EXP_IEEE T1.Y, T2.W,
+; CM-NEXT: EXP_IEEE T1.Z (MASKED), T2.W,
+; CM-NEXT: EXP_IEEE * T1.W (MASKED), T2.W,
+; CM-NEXT: MUL_IEEE T4.X, T0.W, literal.x,
+; CM-NEXT: FLT_TO_INT T3.Y, T2.Z,
+; CM-NEXT: MUL_IEEE T1.Z, PV.Y, literal.y,
+; CM-NEXT: CNDE_INT * T0.W, T0.Y, T0.X, T2.Y,
; CM-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
+; CM-NEXT: CNDE_INT T0.X, T1.W, PV.W, T0.Z,
+; CM-NEXT: MUL_IEEE T0.Y, PV.Z, literal.x,
+; CM-NEXT: MAX_INT T2.Z, PV.Y, literal.y,
+; CM-NEXT: MIN_INT * T0.W, PV.Y, literal.z,
+; CM-NEXT: 209715200(1.972152e-31), -330(nan)
; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
-; CM-NEXT: MIN_INT T7.X, T0.Y, literal.x,
-; CM-NEXT: ADD_INT T1.Y, PV.W, literal.y,
-; CM-NEXT: ADD_INT T4.Z, T1.X, literal.z,
-; CM-NEXT: SETGT_UINT * T4.W, T1.X, literal.w,
-; CM-NEXT: 381(5.338947e-43), -254(nan)
+; CM-NEXT: ADD_INT T5.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.y,
+; CM-NEXT: ADD_INT T2.Z, T3.Y, literal.z,
+; CM-NEXT: SETGT_UINT * T0.W, T3.Y, literal.w,
+; CM-NEXT: -254(nan), 204(2.858649e-43)
+; CM-NEXT: 102(1.429324e-43), -229(nan)
+; CM-NEXT: ADD_INT T6.X, T3.Y, literal.x,
+; CM-NEXT: SETGT_UINT T4.Y, T3.Y, literal.y,
+; CM-NEXT: CNDE_INT T2.Z, PV.W, PV.Y, PV.Z,
+; CM-NEXT: SETGT_INT * T1.W, T3.Y, literal.x,
; CM-NEXT: -127(nan), 254(3.559298e-43)
-; CM-NEXT: CNDE_INT T8.X, PV.W, PV.Z, PV.Y,
-; CM-NEXT: SETGT_INT T1.Y, T1.X, literal.x,
-; CM-NEXT: ADD_INT T4.Z, PV.X, literal.y,
-; CM-NEXT: ADD_INT * T5.W, T0.Y, literal.z,
+; CM-NEXT: MUL_IEEE T7.X, T1.Y, literal.x,
+; CM-NEXT: CNDE_INT T2.Y, PV.W, PV.Z, T3.Y,
+; CM-NEXT: CNDE_INT T2.Z, PV.Y, PV.X, T5.X,
+; CM-NEXT: MIN_INT * T2.W, T0.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 381(5.338947e-43)
+; CM-NEXT: SETGT_INT T5.X, T3.Y, literal.x,
+; CM-NEXT: ADD_INT T3.Y, PV.W, literal.y,
+; CM-NEXT: ADD_INT T3.Z, T0.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T2.W, T0.Z, literal.w,
; CM-NEXT: 127(1.779649e-43), -254(nan)
-; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T1.X, T2.W, PV.W, PV.Z,
-; CM-NEXT: CNDE_INT T5.Y, PV.Y, T3.Z, PV.X,
-; CM-NEXT: CNDE_INT T3.Z, T6.X, T4.Y, T3.W,
-; CM-NEXT: MUL_IEEE * T2.W, T2.X, literal.x, BS:VEC_120/SCL_212
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: CNDE_INT T6.X, PV.W, PV.Z, PV.Y,
+; CM-NEXT: CNDE_INT T2.Y, PV.X, T2.Y, T2.Z,
+; CM-NEXT: MUL_IEEE T2.Z, T7.X, literal.x,
+; CM-NEXT: CNDE_INT * T0.W, T0.W, T0.Y, T1.Z, BS:VEC_021/SCL_122
; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: SETGT_INT T6.X, T0.Y, literal.x,
-; CM-NEXT: CNDE_INT T0.Y, T4.W, T2.X, PV.W,
-; CM-NEXT: CNDE_INT * T1.Z, T2.Z, PV.Z, T1.Z,
-; CM-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; CM-NEXT: ALU clause starting at 205:
-; CM-NEXT: LSHL * T2.W, T5.Y, literal.x,
-; CM-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; CM-NEXT: ADD_INT T2.X, PV.W, literal.x,
-; CM-NEXT: CNDE_INT T0.Y, T1.Y, T1.Z, T0.Y,
-; CM-NEXT: CNDE_INT * T1.Z, T6.X, T4.X, T1.X,
+; CM-NEXT: SETGT_INT T8.X, T0.Z, literal.x,
+; CM-NEXT: CNDE_INT T0.Y, T1.W, PV.W, T1.Y,
+; CM-NEXT: CNDE_INT T0.Z, T4.Y, T7.X, PV.Z,
+; CM-NEXT: LSHL * T0.W, PV.Y, literal.y,
+; CM-NEXT: 127(1.779649e-43), 23(3.222986e-44)
+; CM-NEXT: ALU clause starting at 202:
+; CM-NEXT: ADD_INT T7.X, T0.W, literal.x,
+; CM-NEXT: CNDE_INT * T0.Y, T5.X, T0.Y, T0.Z,
; CM-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT * T1.W, T1.W, T3.Y, T2.Y,
-; CM-NEXT: CNDE_INT T1.X, T0.Z, PV.W, T0.W,
-; CM-NEXT: LSHL T1.Y, T1.Z, literal.x, BS:VEC_120/SCL_212
-; CM-NEXT: MUL_IEEE T0.Z, T0.Y, T2.X,
+; CM-NEXT: CNDE_INT * T0.Z, T8.X, T0.X, T6.X,
+; CM-NEXT: MUL_IEEE * T0.W, T4.X, literal.x,
+; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
+; CM-NEXT: CNDE_INT T0.X, T2.W, T4.X, PV.W,
+; CM-NEXT: LSHL T1.Y, T0.Z, literal.x,
+; CM-NEXT: MUL_IEEE T0.Z, T0.Y, T7.X, BS:VEC_021/SCL_122
; CM-NEXT: SETGT * T0.W, literal.y, KC0[4].X,
; CM-NEXT: 23(3.222986e-44), -1026650416(-1.032789e+02)
-; CM-NEXT: CNDE T2.X, PV.W, PV.Z, 0.0,
+; CM-NEXT: CNDE T4.X, PV.W, PV.Z, 0.0,
; CM-NEXT: SETGT T0.Y, KC0[4].X, literal.x,
; CM-NEXT: ADD_INT T0.Z, PV.Y, literal.y,
-; CM-NEXT: CNDE_INT * T0.W, T6.X, PV.X, T5.X,
+; CM-NEXT: CNDE_INT * T0.W, T8.X, T3.X, PV.X,
; CM-NEXT: 1118925336(8.872284e+01), 1065353216(1.000000e+00)
-; CM-NEXT: SETGT T1.X, KC0[3].W, literal.x,
+; CM-NEXT: SETGT T0.X, KC0[3].W, literal.x,
; CM-NEXT: MUL_IEEE T1.Y, PV.W, PV.Z,
; CM-NEXT: SETGT T0.Z, literal.y, KC0[3].Z,
; CM-NEXT: CNDE * T0.W, PV.Y, PV.X, literal.z,
; CM-NEXT: 1118925336(8.872284e+01), -1026650416(-1.032789e+02)
; CM-NEXT: 2139095040(INF), 0(0.000000e+00)
-; CM-NEXT: SETGT T2.X, literal.x, KC0[3].Y,
+; CM-NEXT: SETGT T3.X, literal.x, KC0[3].Y,
; CM-NEXT: CNDE T0.Y, PV.Z, PV.Y, 0.0,
-; CM-NEXT: CNDE T0.Z, PV.X, T3.X, literal.y,
+; CM-NEXT: CNDE T0.Z, PV.X, T2.X, literal.y,
; CM-NEXT: SETGT * T1.W, KC0[3].Z, literal.z,
; CM-NEXT: -1026650416(-1.032789e+02), 2139095040(INF)
; CM-NEXT: 1118925336(8.872284e+01), 0(0.000000e+00)
; CM-NEXT: CNDE T0.Y, PV.W, PV.Y, literal.x,
-; CM-NEXT: CNDE T1.Z, PV.X, T0.X, 0.0,
+; CM-NEXT: CNDE T1.Z, PV.X, T1.X, 0.0,
; CM-NEXT: SETGT * T1.W, KC0[3].Y, literal.y,
; CM-NEXT: 2139095040(INF), 1118925336(8.872284e+01)
; CM-NEXT: CNDE * T0.X, PV.W, PV.Z, literal.x,
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
index 544c1de6c7bb..a16294958748 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
@@ -230,23 +230,23 @@ define amdgpu_kernel void @s_exp10_f32(ptr addrspace(1) %out, float %in) {
; R600-NEXT: MUL_IEEE * T2.W, PS, literal.z,
; R600-NEXT: -127(nan), 254(3.559298e-43)
; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T3.X, T1.X, literal.x,
-; R600-NEXT: MUL_IEEE T0.Y, PS, literal.y,
+; R600-NEXT: MUL_IEEE T3.X, PS, literal.x,
+; R600-NEXT: MUL_IEEE T0.Y, T1.X, literal.y,
; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Z, T0.Z,
; R600-NEXT: CNDE_INT T3.W, PV.Y, PV.X, T0.X,
; R600-NEXT: SETGT_INT * T4.W, T0.Z, literal.z,
-; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
+; R600-NEXT: 209715200(1.972152e-31), 2130706432(1.701412e+38)
; R600-NEXT: 127(1.779649e-43), 0(0.000000e+00)
; R600-NEXT: CNDE_INT T0.Z, PS, PV.Z, PV.W,
-; R600-NEXT: CNDE_INT T0.W, T0.W, PV.Y, T2.W,
-; R600-NEXT: MUL_IEEE * T2.W, PV.X, literal.x,
+; R600-NEXT: MUL_IEEE T3.W, PV.Y, literal.x,
+; R600-NEXT: CNDE_INT * T0.W, T0.W, PV.X, T2.W,
; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T1.Z, T1.Y, T3.X, PS,
-; R600-NEXT: CNDE_INT T0.W, T1.W, PV.W, T1.X,
+; R600-NEXT: CNDE_INT T1.Z, T1.W, PS, T1.X,
+; R600-NEXT: CNDE_INT T0.W, T1.Y, T0.Y, PV.W,
; R600-NEXT: LSHL * T1.W, PV.Z, literal.x,
; R600-NEXT: 23(3.222986e-44), 0(0.000000e+00)
; R600-NEXT: ADD_INT T1.W, PS, literal.x,
-; R600-NEXT: CNDE_INT * T0.W, T4.W, PV.W, PV.Z,
+; R600-NEXT: CNDE_INT * T0.W, T4.W, PV.Z, PV.W,
; R600-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
; R600-NEXT: MUL_IEEE T0.W, PS, PV.W,
; R600-NEXT: SETGT * T1.W, literal.x, KC0[2].Z,
@@ -260,65 +260,63 @@ define amdgpu_kernel void @s_exp10_f32(ptr addrspace(1) %out, float %in) {
;
; CM-LABEL: s_exp10_f32:
; CM: ; %bb.0:
-; CM-NEXT: ALU 64, @4, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 62, @4, KC0[CB0:0-32], KC1[]
; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
; CM-NEXT: CF_END
; CM-NEXT: PAD
; CM-NEXT: ALU clause starting at 4:
; CM-NEXT: AND_INT * T0.W, KC0[2].Z, literal.x,
; CM-NEXT: -4096(nan), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T0.Z, PV.W, literal.x,
; CM-NEXT: ADD * T1.W, KC0[2].Z, -PV.W,
-; CM-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T1.Z, PV.W, literal.x,
-; CM-NEXT: RNDNE * T2.W, PV.Z,
-; CM-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; CM-NEXT: TRUNC T2.Z, PV.W,
+; CM-NEXT: MUL_IEEE T0.Z, PV.W, literal.x,
+; CM-NEXT: MUL_IEEE * T2.W, T0.W, literal.y,
+; CM-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
+; CM-NEXT: RNDNE T1.Z, PV.W,
; CM-NEXT: MULADD_IEEE * T1.W, T1.W, literal.x, PV.Z,
; CM-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; CM-NEXT: MULADD_IEEE T0.Y, T0.W, literal.x, PV.W,
-; CM-NEXT: ADD T0.Z, T0.Z, -T2.W,
-; CM-NEXT: FLT_TO_INT * T0.W, PV.Z,
+; CM-NEXT: MULADD_IEEE T0.Z, T0.W, literal.x, PV.W,
+; CM-NEXT: ADD * T0.W, T2.W, -PV.Z, BS:VEC_120/SCL_212
; CM-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; CM-NEXT: MIN_INT T1.Z, PV.W, literal.x,
-; CM-NEXT: ADD * T1.W, PV.Z, PV.Y,
+; CM-NEXT: TRUNC T1.Z, T1.Z,
+; CM-NEXT: ADD * T0.W, PV.W, PV.Z,
+; CM-NEXT: EXP_IEEE T0.X, T0.W,
+; CM-NEXT: EXP_IEEE T0.Y (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE * T0.W (MASKED), T0.W,
+; CM-NEXT: FLT_TO_INT T0.Z, T1.Z,
+; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.x,
+; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T0.Y, PV.W, literal.x,
+; CM-NEXT: MAX_INT T1.Z, PV.Z, literal.y,
+; CM-NEXT: MIN_INT * T1.W, PV.Z, literal.z,
+; CM-NEXT: 209715200(1.972152e-31), -330(nan)
; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
-; CM-NEXT: EXP_IEEE T0.X, T1.W,
-; CM-NEXT: EXP_IEEE T0.Y (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
-; CM-NEXT: MUL_IEEE T0.Y, PV.X, literal.x,
-; CM-NEXT: ADD_INT T0.Z, T1.Z, literal.y,
-; CM-NEXT: MAX_INT * T1.W, T0.W, literal.z,
-; CM-NEXT: 2130706432(1.701412e+38), -254(nan)
-; CM-NEXT: -330(nan), 0(0.000000e+00)
-; CM-NEXT: ADD_INT T1.X, T0.W, literal.x,
-; CM-NEXT: ADD_INT T1.Y, PV.W, literal.y,
-; CM-NEXT: ADD_INT T1.Z, T0.W, literal.z,
-; CM-NEXT: SETGT_UINT * T1.W, T0.W, literal.w,
-; CM-NEXT: -127(nan), 204(2.858649e-43)
+; CM-NEXT: ADD_INT T1.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T1.Y, PV.Z, literal.y,
+; CM-NEXT: ADD_INT T1.Z, T0.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T1.W, T0.Z, literal.w,
+; CM-NEXT: -254(nan), 204(2.858649e-43)
; CM-NEXT: 102(1.429324e-43), -229(nan)
-; CM-NEXT: SETGT_UINT T2.X, T0.W, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: SETGT_INT T1.Z, T0.W, literal.y,
-; CM-NEXT: MUL_IEEE * T2.W, T0.X, literal.z,
-; CM-NEXT: 254(3.559298e-43), -127(nan)
-; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T3.X, PV.W, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.Z, PV.Y, T0.W,
-; CM-NEXT: CNDE_INT T0.Z, PV.X, T1.X, T0.Z,
-; CM-NEXT: SETGT_INT * T0.W, T0.W, literal.y,
-; CM-NEXT: 209715200(1.972152e-31), 127(1.779649e-43)
+; CM-NEXT: ADD_INT T2.X, T0.Z, literal.x,
+; CM-NEXT: SETGT_UINT T2.Y, T0.Z, literal.y,
+; CM-NEXT: CNDE_INT T1.Z, PV.W, PV.Y, PV.Z,
+; CM-NEXT: SETGT_INT * T2.W, T0.Z, literal.x,
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: MUL_IEEE T3.X, T0.X, literal.x,
+; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Z, T0.Z,
+; CM-NEXT: CNDE_INT T1.Z, PV.Y, PV.X, T1.X,
+; CM-NEXT: SETGT_INT * T3.W, T0.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 127(1.779649e-43)
; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: CNDE_INT T0.Z, T1.W, PV.X, T2.W,
-; CM-NEXT: MUL_IEEE * T1.W, T0.Y, literal.x,
+; CM-NEXT: MUL_IEEE T0.Z, PV.X, literal.x,
+; CM-NEXT: CNDE_INT * T0.W, T1.W, T0.Y, T0.W,
; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T0.Y, T2.X, T0.Y, PV.W,
-; CM-NEXT: CNDE_INT T0.Z, T1.Z, PV.Z, T0.X,
-; CM-NEXT: LSHL * T1.W, PV.Y, literal.x,
+; CM-NEXT: CNDE_INT T0.Y, T2.W, PV.W, T0.X,
+; CM-NEXT: CNDE_INT T0.Z, T2.Y, T3.X, PV.Z,
+; CM-NEXT: LSHL * T0.W, PV.Y, literal.x,
; CM-NEXT: 23(3.222986e-44), 0(0.000000e+00)
; CM-NEXT: ADD_INT T1.Z, PV.W, literal.x,
-; CM-NEXT: CNDE_INT * T0.W, T0.W, PV.Z, PV.Y,
+; CM-NEXT: CNDE_INT * T0.W, T3.W, PV.Y, PV.Z,
; CM-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
; CM-NEXT: MUL_IEEE T0.Z, PV.W, PV.Z,
; CM-NEXT: SETGT * T0.W, literal.x, KC0[2].Z,
@@ -612,105 +610,105 @@ define amdgpu_kernel void @s_exp10_v2f32(ptr addrspace(1) %out, <2 x float> %in)
; R600-NEXT: AND_INT * T0.W, KC0[3].X, literal.x,
; R600-NEXT: -4096(nan), 0(0.000000e+00)
; R600-NEXT: ADD * T1.W, KC0[3].X, -PV.W,
-; R600-NEXT: AND_INT T0.Z, KC0[2].W, literal.x,
-; R600-NEXT: MUL_IEEE T2.W, PV.W, literal.y,
-; R600-NEXT: MUL_IEEE * T3.W, T0.W, literal.z,
-; R600-NEXT: -4096(nan), 975668412(6.390323e-04)
-; R600-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; R600-NEXT: RNDNE T1.Z, PS,
+; R600-NEXT: MUL_IEEE T2.W, PV.W, literal.x,
+; R600-NEXT: MUL_IEEE * T3.W, T0.W, literal.y,
+; R600-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
+; R600-NEXT: RNDNE T0.Z, PS,
; R600-NEXT: MULADD_IEEE T1.W, T1.W, literal.x, PV.W,
-; R600-NEXT: ADD * T2.W, KC0[2].W, -PV.Z,
-; R600-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T0.Y, PS, literal.x,
-; R600-NEXT: MUL_IEEE T2.Z, T0.Z, literal.y,
+; R600-NEXT: AND_INT * T2.W, KC0[2].W, literal.y,
+; R600-NEXT: 1079283712(3.321289e+00), -4096(nan)
+; R600-NEXT: ADD T1.Z, KC0[2].W, -PS,
; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.x, PV.W,
; R600-NEXT: ADD * T1.W, T3.W, -PV.Z,
+; R600-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
+; R600-NEXT: ADD T2.Z, PS, PV.W,
+; R600-NEXT: MUL_IEEE T0.W, PV.Z, literal.x,
+; R600-NEXT: MUL_IEEE * T1.W, T2.W, literal.y,
; R600-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
-; R600-NEXT: ADD T3.Z, PS, PV.W,
-; R600-NEXT: RNDNE T0.W, PV.Z,
-; R600-NEXT: MULADD_IEEE * T1.W, T2.W, literal.x, PV.Y, BS:VEC_021/SCL_122
-; R600-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; R600-NEXT: TRUNC T0.Y, T1.Z,
-; R600-NEXT: MULADD_IEEE T0.Z, T0.Z, literal.x, PS, BS:VEC_120/SCL_212
-; R600-NEXT: ADD T1.W, T2.Z, -PV.W, BS:VEC_201
+; R600-NEXT: RNDNE T0.Y, PS,
+; R600-NEXT: MULADD_IEEE T1.Z, T1.Z, literal.x, PV.W,
+; R600-NEXT: TRUNC T0.W, T0.Z, BS:VEC_120/SCL_212
; R600-NEXT: EXP_IEEE * T0.X, PV.Z,
-; R600-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; R600-NEXT: ADD T0.Z, PV.W, PV.Z,
-; R600-NEXT: FLT_TO_INT T1.W, PV.Y,
-; R600-NEXT: MUL_IEEE * T2.W, PS, literal.x,
-; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T1.Z, PS, literal.x,
-; R600-NEXT: SETGT_UINT T3.W, PV.W, literal.y,
-; R600-NEXT: EXP_IEEE * T0.Y, PV.Z,
-; R600-NEXT: 2130706432(1.701412e+38), 254(3.559298e-43)
-; R600-NEXT: CNDE_INT T1.X, PV.W, T2.W, PV.Z,
-; R600-NEXT: MUL_IEEE T1.Y, PS, literal.x,
-; R600-NEXT: MAX_INT T0.Z, T1.W, literal.y,
-; R600-NEXT: MIN_INT T2.W, T1.W, literal.z,
-; R600-NEXT: TRUNC * T0.W, T0.W,
+; R600-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
+; R600-NEXT: FLT_TO_INT T1.Y, PV.W,
+; R600-NEXT: MUL_IEEE T0.Z, PS, literal.x,
+; R600-NEXT: MULADD_IEEE T0.W, T2.W, literal.y, PV.Z,
+; R600-NEXT: ADD * T1.W, T1.W, -PV.Y,
+; R600-NEXT: 209715200(1.972152e-31), 975668412(6.390323e-04)
+; R600-NEXT: ADD T1.Z, PS, PV.W,
+; R600-NEXT: MUL_IEEE T0.W, PV.Z, literal.x,
+; R600-NEXT: SETGT_UINT * T1.W, PV.Y, literal.y,
+; R600-NEXT: 209715200(1.972152e-31), -229(nan)
+; R600-NEXT: CNDE_INT T0.Z, PS, PV.W, T0.Z,
+; R600-NEXT: SETGT_INT T0.W, T1.Y, literal.x,
+; R600-NEXT: EXP_IEEE * T1.X, PV.Z,
+; R600-NEXT: -127(nan), 0(0.000000e+00)
+; R600-NEXT: CNDE_INT T0.Z, PV.W, PV.Z, T0.X,
+; R600-NEXT: MAX_INT T2.W, T1.Y, literal.x,
+; R600-NEXT: MUL_IEEE * T3.W, PS, literal.y,
+; R600-NEXT: -330(nan), 209715200(1.972152e-31)
+; R600-NEXT: MUL_IEEE T2.X, PS, literal.x,
+; R600-NEXT: ADD_INT T2.Y, PV.W, literal.y,
+; R600-NEXT: ADD_INT T1.Z, T1.Y, literal.z,
+; R600-NEXT: MIN_INT T2.W, T1.Y, literal.w,
+; R600-NEXT: TRUNC * T4.W, T0.Y,
+; R600-NEXT: 209715200(1.972152e-31), 204(2.858649e-43)
+; R600-NEXT: 102(1.429324e-43), 381(5.338947e-43)
+; R600-NEXT: FLT_TO_INT T3.X, PS,
+; R600-NEXT: ADD_INT T0.Y, PV.W, literal.x,
+; R600-NEXT: ADD_INT T2.Z, T1.Y, literal.y,
+; R600-NEXT: SETGT_UINT T2.W, T1.Y, literal.z,
+; R600-NEXT: CNDE_INT * T1.W, T1.W, PV.Y, PV.Z,
+; R600-NEXT: -254(nan), -127(nan)
+; R600-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T4.X, T1.X, literal.x,
+; R600-NEXT: MUL_IEEE T2.Y, T0.X, literal.x, BS:VEC_120/SCL_212
+; R600-NEXT: CNDE_INT T1.Z, T0.W, PS, T1.Y,
+; R600-NEXT: CNDE_INT T0.W, PV.W, PV.Z, PV.Y,
+; R600-NEXT: MAX_INT * T1.W, PV.X, literal.y,
; R600-NEXT: 2130706432(1.701412e+38), -330(nan)
-; R600-NEXT: 381(5.338947e-43), 0(0.000000e+00)
-; R600-NEXT: FLT_TO_INT T2.X, PS,
-; R600-NEXT: ADD_INT T2.Y, PV.W, literal.x,
-; R600-NEXT: ADD_INT T0.Z, PV.Z, literal.y,
-; R600-NEXT: ADD_INT T0.W, T1.W, literal.z,
-; R600-NEXT: SETGT_UINT * T2.W, T1.W, literal.w,
-; R600-NEXT: -254(nan), 204(2.858649e-43)
-; R600-NEXT: 102(1.429324e-43), -229(nan)
-; R600-NEXT: ADD_INT T3.X, T1.W, literal.x,
-; R600-NEXT: CNDE_INT T3.Y, PS, PV.Z, PV.W,
-; R600-NEXT: SETGT_INT T0.Z, T1.W, literal.x,
-; R600-NEXT: MUL_IEEE T0.W, T0.X, literal.y,
-; R600-NEXT: MUL_IEEE * T4.W, T0.Y, literal.y,
-; R600-NEXT: -127(nan), 209715200(1.972152e-31)
-; R600-NEXT: MUL_IEEE T4.X, PS, literal.x,
-; R600-NEXT: MUL_IEEE T4.Y, PV.W, literal.x,
-; R600-NEXT: CNDE_INT T1.Z, PV.Z, PV.Y, T1.W,
-; R600-NEXT: CNDE_INT T3.W, T3.W, PV.X, T2.Y,
-; R600-NEXT: MAX_INT * T5.W, T2.X, literal.y,
-; R600-NEXT: 209715200(1.972152e-31), -330(nan)
-; R600-NEXT: SETGT_INT T3.X, T1.W, literal.x,
-; R600-NEXT: ADD_INT T2.Y, PS, literal.y,
-; R600-NEXT: ADD_INT T2.Z, T2.X, literal.z,
-; R600-NEXT: SETGT_UINT * T1.W, T2.X, literal.w,
+; R600-NEXT: SETGT_INT T0.X, T1.Y, literal.x,
+; R600-NEXT: ADD_INT T0.Y, PS, literal.y,
+; R600-NEXT: ADD_INT T2.Z, T3.X, literal.z,
+; R600-NEXT: SETGT_UINT * T1.W, T3.X, literal.w,
; R600-NEXT: 127(1.779649e-43), 204(2.858649e-43)
; R600-NEXT: 102(1.429324e-43), -229(nan)
-; R600-NEXT: MIN_INT * T5.W, T2.X, literal.x,
+; R600-NEXT: MIN_INT * T4.W, T3.X, literal.x,
; R600-NEXT: 381(5.338947e-43), 0(0.000000e+00)
; R600-NEXT: ADD_INT T5.X, PV.W, literal.x,
-; R600-NEXT: ADD_INT T3.Y, T2.X, literal.y,
-; R600-NEXT: SETGT_UINT T3.Z, T2.X, literal.z,
-; R600-NEXT: CNDE_INT T5.W, T1.W, T2.Y, T2.Z,
-; R600-NEXT: SETGT_INT * T6.W, T2.X, literal.y,
+; R600-NEXT: ADD_INT T1.Y, T3.X, literal.y,
+; R600-NEXT: SETGT_UINT T3.Z, T3.X, literal.z,
+; R600-NEXT: CNDE_INT T4.W, T1.W, T0.Y, T2.Z,
+; R600-NEXT: SETGT_INT * T5.W, T3.X, literal.y,
; R600-NEXT: -254(nan), -127(nan)
; R600-NEXT: 254(3.559298e-43), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T6.X, PS, PV.W, T2.X,
-; R600-NEXT: CNDE_INT T2.Y, PV.Z, PV.Y, PV.X,
-; R600-NEXT: SETGT_INT T2.Z, T2.X, literal.x, BS:VEC_120/SCL_212
-; R600-NEXT: CNDE_INT T3.W, T3.X, T1.Z, T3.W, BS:VEC_021/SCL_122
-; R600-NEXT: CNDE_INT * T0.W, T2.W, T4.Y, T0.W,
-; R600-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T0.X, T0.Z, PS, T0.X,
-; R600-NEXT: LSHL T3.Y, PV.W, literal.x,
-; R600-NEXT: CNDE_INT T0.Z, PV.Z, PV.X, PV.Y,
-; R600-NEXT: CNDE_INT T0.W, T1.W, T4.X, T4.W,
-; R600-NEXT: MUL_IEEE * T1.W, T1.Y, literal.y,
+; R600-NEXT: CNDE_INT T6.X, PS, PV.W, T3.X,
+; R600-NEXT: CNDE_INT T0.Y, PV.Z, PV.Y, PV.X,
+; R600-NEXT: SETGT_INT T2.Z, T3.X, literal.x,
+; R600-NEXT: CNDE_INT T0.W, T0.X, T1.Z, T0.W, BS:VEC_120/SCL_212
+; R600-NEXT: MUL_IEEE * T4.W, T2.Y, literal.y,
+; R600-NEXT: 127(1.779649e-43), 2130706432(1.701412e+38)
+; R600-NEXT: CNDE_INT T3.X, T2.W, T2.Y, PS, BS:VEC_120/SCL_212
+; R600-NEXT: LSHL T1.Y, PV.W, literal.x,
+; R600-NEXT: CNDE_INT T1.Z, PV.Z, PV.X, PV.Y,
+; R600-NEXT: MUL_IEEE T0.W, T4.X, literal.y,
+; R600-NEXT: CNDE_INT * T1.W, T1.W, T2.X, T3.W,
; R600-NEXT: 23(3.222986e-44), 2130706432(1.701412e+38)
-; R600-NEXT: CNDE_INT T2.X, T3.Z, T1.Y, PS,
-; R600-NEXT: CNDE_INT T0.Y, T6.W, PV.W, T0.Y,
-; R600-NEXT: LSHL T0.Z, PV.Z, literal.x,
+; R600-NEXT: CNDE_INT T1.X, T5.W, PS, T1.X, BS:VEC_021/SCL_122
+; R600-NEXT: CNDE_INT T0.Y, T3.Z, T4.X, PV.W, BS:VEC_201
+; R600-NEXT: LSHL T1.Z, PV.Z, literal.x,
; R600-NEXT: ADD_INT T0.W, PV.Y, literal.y,
-; R600-NEXT: CNDE_INT * T1.W, T3.X, PV.X, T1.X,
+; R600-NEXT: CNDE_INT * T1.W, T0.X, T0.Z, PV.X,
; R600-NEXT: 23(3.222986e-44), 1065353216(1.000000e+00)
; R600-NEXT: MUL_IEEE T1.Y, PS, PV.W,
-; R600-NEXT: SETGT T1.Z, literal.x, KC0[3].X,
+; R600-NEXT: SETGT T0.Z, literal.x, KC0[3].X,
; R600-NEXT: ADD_INT * T0.W, PV.Z, literal.y,
; R600-NEXT: -1036817932(-4.485347e+01), 1065353216(1.000000e+00)
; R600-NEXT: ALU clause starting at 101:
-; R600-NEXT: CNDE_INT * T1.W, T2.Z, T0.Y, T2.X,
+; R600-NEXT: CNDE_INT * T1.W, T2.Z, T1.X, T0.Y,
; R600-NEXT: MUL_IEEE T0.Y, PV.W, T0.W,
-; R600-NEXT: SETGT T0.Z, literal.x, KC0[2].W,
-; R600-NEXT: CNDE T0.W, T1.Z, T1.Y, 0.0,
+; R600-NEXT: SETGT T1.Z, literal.x, KC0[2].W,
+; R600-NEXT: CNDE T0.W, T0.Z, T1.Y, 0.0,
; R600-NEXT: SETGT * T1.W, KC0[3].X, literal.y,
; R600-NEXT: -1036817932(-4.485347e+01), 1109008539(3.853184e+01)
; R600-NEXT: CNDE T1.Y, PS, PV.W, literal.x,
@@ -723,118 +721,116 @@ define amdgpu_kernel void @s_exp10_v2f32(ptr addrspace(1) %out, <2 x float> %in)
;
; CM-LABEL: s_exp10_v2f32:
; CM: ; %bb.0:
-; CM-NEXT: ALU 100, @4, KC0[CB0:0-32], KC1[]
-; CM-NEXT: ALU 18, @105, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 98, @4, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 18, @103, KC0[CB0:0-32], KC1[]
; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0, T1.X
; CM-NEXT: CF_END
; CM-NEXT: ALU clause starting at 4:
; CM-NEXT: AND_INT * T0.W, KC0[2].W, literal.x,
; CM-NEXT: -4096(nan), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T0.Z, PV.W, literal.x,
; CM-NEXT: ADD * T1.W, KC0[2].W, -PV.W,
+; CM-NEXT: MUL_IEEE T0.Y, PV.W, literal.x,
+; CM-NEXT: MUL_IEEE T0.Z, T0.W, literal.y,
+; CM-NEXT: AND_INT * T2.W, KC0[3].X, literal.z,
+; CM-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
+; CM-NEXT: -4096(nan), 0(0.000000e+00)
+; CM-NEXT: ADD T1.Y, KC0[3].X, -PV.W,
+; CM-NEXT: RNDNE T1.Z, PV.Z,
+; CM-NEXT: MULADD_IEEE * T1.W, T1.W, literal.x, PV.Y,
; CM-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T1.Z, PV.W, literal.x,
-; CM-NEXT: RNDNE * T2.W, PV.Z,
-; CM-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; CM-NEXT: TRUNC T0.Y, PV.W,
-; CM-NEXT: AND_INT T2.Z, KC0[3].X, literal.x,
-; CM-NEXT: MULADD_IEEE * T1.W, T1.W, literal.y, PV.Z,
-; CM-NEXT: -4096(nan), 1079283712(3.321289e+00)
; CM-NEXT: MULADD_IEEE T0.X, T0.W, literal.x, PV.W,
-; CM-NEXT: MUL_IEEE T1.Y, PV.Z, literal.y,
-; CM-NEXT: FLT_TO_INT T1.Z, PV.Y,
-; CM-NEXT: ADD * T0.W, KC0[3].X, -PV.Z,
+; CM-NEXT: ADD T0.Y, T0.Z, -PV.Z,
+; CM-NEXT: MUL_IEEE T0.Z, PV.Y, literal.x,
+; CM-NEXT: MUL_IEEE * T0.W, T2.W, literal.y, BS:VEC_120/SCL_212
; CM-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
-; CM-NEXT: ADD T1.X, T0.Z, -T2.W,
-; CM-NEXT: MUL_IEEE T0.Y, PV.W, literal.x,
-; CM-NEXT: MAX_INT T0.Z, PV.Z, literal.y,
-; CM-NEXT: RNDNE * T1.W, PV.Y,
-; CM-NEXT: 975668412(6.390323e-04), -330(nan)
-; CM-NEXT: TRUNC T2.X, PV.W,
-; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.x,
-; CM-NEXT: MULADD_IEEE T0.Z, T0.W, literal.y, PV.Y,
-; CM-NEXT: ADD * T0.W, PV.X, T0.X,
-; CM-NEXT: 204(2.858649e-43), 1079283712(3.321289e+00)
-; CM-NEXT: EXP_IEEE T0.X, T0.W,
-; CM-NEXT: EXP_IEEE T0.Y (MASKED), T0.W,
-; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
-; CM-NEXT: EXP_IEEE * T0.W (MASKED), T0.W,
-; CM-NEXT: ADD_INT T1.X, T1.Z, literal.x,
-; CM-NEXT: MULADD_IEEE T0.Y, T2.Z, literal.y, T0.Z, BS:VEC_102/SCL_221
-; CM-NEXT: ADD T0.Z, T1.Y, -T1.W,
-; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.z,
-; CM-NEXT: 102(1.429324e-43), 975668412(6.390323e-04)
-; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: SETGT_UINT T3.X, T1.Z, literal.x,
-; CM-NEXT: MUL_IEEE T1.Y, PV.W, literal.y,
-; CM-NEXT: SETGT_UINT T2.Z, T1.Z, literal.z,
-; CM-NEXT: ADD * T1.W, PV.Z, PV.Y,
-; CM-NEXT: -229(nan), 2130706432(1.701412e+38)
-; CM-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; CM-NEXT: TRUNC T1.X, T1.Z,
+; CM-NEXT: RNDNE T2.Y, PV.W,
+; CM-NEXT: MULADD_IEEE T0.Z, T1.Y, literal.x, PV.Z,
+; CM-NEXT: ADD * T1.W, PV.Y, PV.X,
+; CM-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
+; CM-NEXT: EXP_IEEE T0.X, T1.W,
+; CM-NEXT: EXP_IEEE T0.Y (MASKED), T1.W,
+; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
+; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
+; CM-NEXT: MULADD_IEEE T2.X, T2.W, literal.x, T0.Z,
+; CM-NEXT: ADD T0.Y, T0.W, -T2.Y, BS:VEC_120/SCL_212
+; CM-NEXT: FLT_TO_INT T0.Z, T1.X,
+; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.y,
+; CM-NEXT: 975668412(6.390323e-04), 209715200(1.972152e-31)
+; CM-NEXT: MUL_IEEE T1.X, PV.W, literal.x,
+; CM-NEXT: SETGT_UINT T1.Y, PV.Z, literal.y,
+; CM-NEXT: TRUNC T1.Z, T2.Y,
+; CM-NEXT: ADD * T1.W, PV.Y, PV.X,
+; CM-NEXT: 209715200(1.972152e-31), -229(nan)
; CM-NEXT: EXP_IEEE T0.X (MASKED), T1.W,
; CM-NEXT: EXP_IEEE T0.Y, T1.W,
; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
-; CM-NEXT: CNDE_INT T4.X, T2.Z, T0.W, T1.Y,
-; CM-NEXT: CNDE_INT T1.Y, T3.X, T2.Y, T1.X,
-; CM-NEXT: FLT_TO_INT T0.Z, T2.X, BS:VEC_120/SCL_212
-; CM-NEXT: MUL_IEEE * T0.W, PV.Y, literal.x,
-; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: SETGT_INT T1.X, T1.Z, literal.x,
-; CM-NEXT: MUL_IEEE T2.Y, T0.X, literal.y,
-; CM-NEXT: MUL_IEEE T3.Z, PV.W, literal.z,
-; CM-NEXT: SETGT_UINT * T1.W, PV.Z, literal.w,
-; CM-NEXT: -127(nan), 209715200(1.972152e-31)
-; CM-NEXT: 2130706432(1.701412e+38), 254(3.559298e-43)
-; CM-NEXT: CNDE_INT T2.X, PV.W, T0.W, PV.Z,
+; CM-NEXT: FLT_TO_INT T2.X, T1.Z,
+; CM-NEXT: MUL_IEEE T2.Y, PV.Y, literal.x,
+; CM-NEXT: CNDE_INT T1.Z, T1.Y, T1.X, T0.W,
+; CM-NEXT: SETGT_INT * T0.W, T0.Z, literal.y, BS:VEC_120/SCL_212
+; CM-NEXT: 209715200(1.972152e-31), -127(nan)
+; CM-NEXT: CNDE_INT T1.X, PV.W, PV.Z, T0.X,
; CM-NEXT: MUL_IEEE T3.Y, PV.Y, literal.x,
-; CM-NEXT: CNDE_INT T3.Z, PV.X, T1.Y, T1.Z,
-; CM-NEXT: MAX_INT * T0.W, T0.Z, literal.y,
-; CM-NEXT: 209715200(1.972152e-31), -330(nan)
-; CM-NEXT: ADD_INT T5.X, PV.W, literal.x,
-; CM-NEXT: ADD_INT T1.Y, T0.Z, literal.y,
-; CM-NEXT: SETGT_UINT T4.Z, T0.Z, literal.z,
-; CM-NEXT: MUL_IEEE * T0.W, T0.Y, literal.w,
+; CM-NEXT: SETGT_UINT T1.Z, PV.X, literal.y,
+; CM-NEXT: MAX_INT * T1.W, T0.Z, literal.z,
+; CM-NEXT: 209715200(1.972152e-31), -229(nan)
+; CM-NEXT: -330(nan), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T3.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T4.Y, T0.Z, literal.y,
+; CM-NEXT: CNDE_INT T2.Z, PV.Z, PV.Y, T2.Y,
+; CM-NEXT: SETGT_INT * T1.W, T2.X, literal.z,
; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
-; CM-NEXT: -229(nan), 209715200(1.972152e-31)
-; CM-NEXT: MUL_IEEE T6.X, PV.W, literal.x,
-; CM-NEXT: MIN_INT T4.Y, T0.Z, literal.y,
-; CM-NEXT: CNDE_INT T5.Z, PV.Z, PV.X, PV.Y,
-; CM-NEXT: SETGT_INT * T2.W, T0.Z, literal.z,
-; CM-NEXT: 209715200(1.972152e-31), 381(5.338947e-43)
-; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T5.X, PV.W, PV.Z, T0.Z,
-; CM-NEXT: MIN_INT T1.Y, T1.Z, literal.x,
-; CM-NEXT: ADD_INT T5.Z, PV.Y, literal.y,
-; CM-NEXT: ADD_INT * T3.W, T0.Z, literal.z, BS:VEC_120/SCL_212
-; CM-NEXT: 381(5.338947e-43), -254(nan)
; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T7.X, T1.W, PV.W, PV.Z,
-; CM-NEXT: SETGT_INT T4.Y, T0.Z, literal.x,
-; CM-NEXT: ADD_INT T0.Z, PV.Y, literal.y,
-; CM-NEXT: ADD_INT * T1.W, T1.Z, literal.z, BS:VEC_120/SCL_212
+; CM-NEXT: CNDE_INT T4.X, PV.W, PV.Z, T0.Y,
+; CM-NEXT: MUL_IEEE T2.Y, T0.X, literal.x,
+; CM-NEXT: MAX_INT T2.Z, T2.X, literal.y, BS:VEC_120/SCL_212
+; CM-NEXT: CNDE_INT * T2.W, T1.Y, PV.X, PV.Y,
+; CM-NEXT: 2130706432(1.701412e+38), -330(nan)
+; CM-NEXT: CNDE_INT T0.X, T0.W, PV.W, T0.Z,
+; CM-NEXT: ADD_INT T1.Y, PV.Z, literal.x,
+; CM-NEXT: ADD_INT T2.Z, T2.X, literal.y,
+; CM-NEXT: MIN_INT * T0.W, T2.X, literal.z,
+; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T3.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T3.Y, T2.X, literal.y,
+; CM-NEXT: SETGT_UINT T3.Z, T2.X, literal.z,
+; CM-NEXT: CNDE_INT * T0.W, T1.Z, PV.Y, PV.Z,
+; CM-NEXT: -254(nan), -127(nan)
+; CM-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T5.X, T0.Y, literal.x,
+; CM-NEXT: CNDE_INT T0.Y, T1.W, PV.W, T2.X,
+; CM-NEXT: CNDE_INT T1.Z, PV.Z, PV.Y, PV.X,
+; CM-NEXT: MIN_INT * T0.W, T0.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 381(5.338947e-43)
+; CM-NEXT: SETGT_INT T2.X, T2.X, literal.x,
+; CM-NEXT: ADD_INT T1.Y, PV.W, literal.y,
+; CM-NEXT: ADD_INT T2.Z, T0.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T0.W, T0.Z, literal.w,
; CM-NEXT: 127(1.779649e-43), -254(nan)
-; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T8.X, T2.Z, PV.W, PV.Z,
-; CM-NEXT: SETGT_INT T1.Y, T1.Z, literal.x, BS:VEC_120/SCL_212
-; CM-NEXT: CNDE_INT T0.Z, PV.Y, T5.X, PV.X,
-; CM-NEXT: CNDE_INT * T0.W, T4.Z, T6.X, T0.W, BS:VEC_201
-; CM-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T5.X, T2.W, PV.W, T0.Y,
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: CNDE_INT T3.X, PV.W, PV.Z, PV.Y,
+; CM-NEXT: SETGT_INT T1.Y, T0.Z, literal.x,
+; CM-NEXT: CNDE_INT T0.Z, PV.X, T0.Y, T1.Z,
+; CM-NEXT: MUL_IEEE * T1.W, T5.X, literal.y,
+; CM-NEXT: 127(1.779649e-43), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T5.X, T3.Z, T5.X, PV.W,
; CM-NEXT: LSHL T0.Y, PV.Z, literal.x,
-; CM-NEXT: CNDE_INT T0.Z, PV.Y, T3.Z, PV.X,
-; CM-NEXT: CNDE_INT * T0.W, T3.X, T3.Y, T2.Y, BS:VEC_201
-; CM-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T0.X, T1.X, PV.W, T0.X,
+; CM-NEXT: CNDE_INT T0.Z, PV.Y, T0.X, PV.X, BS:VEC_021/SCL_122
+; CM-NEXT: MUL_IEEE * T1.W, T2.Y, literal.y,
+; CM-NEXT: 23(3.222986e-44), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T0.X, T0.W, T2.Y, PV.W,
; CM-NEXT: LSHL T2.Y, PV.Z, literal.x,
; CM-NEXT: ADD_INT * T0.Z, PV.Y, literal.y,
; CM-NEXT: 23(3.222986e-44), 1065353216(1.000000e+00)
-; CM-NEXT: ALU clause starting at 105:
-; CM-NEXT: CNDE_INT * T0.W, T4.Y, T5.X, T2.X,
-; CM-NEXT: MUL_IEEE T1.X, PV.W, T0.Z,
+; CM-NEXT: ALU clause starting at 103:
+; CM-NEXT: CNDE_INT * T0.W, T2.X, T4.X, T5.X,
+; CM-NEXT: MUL_IEEE T2.X, PV.W, T0.Z,
; CM-NEXT: SETGT T0.Y, literal.x, KC0[3].X,
; CM-NEXT: ADD_INT T0.Z, T2.Y, literal.y,
-; CM-NEXT: CNDE_INT * T0.W, T1.Y, T0.X, T4.X, BS:VEC_120/SCL_212
+; CM-NEXT: CNDE_INT * T0.W, T1.Y, T1.X, T0.X, BS:VEC_120/SCL_212
; CM-NEXT: -1036817932(-4.485347e+01), 1065353216(1.000000e+00)
; CM-NEXT: MUL_IEEE T0.X, PV.W, PV.Z,
; CM-NEXT: SETGT T1.Y, literal.x, KC0[2].W,
@@ -1217,8 +1213,8 @@ define amdgpu_kernel void @s_exp10_v3f32(ptr addrspace(1) %out, <3 x float> %in)
;
; R600-LABEL: s_exp10_v3f32:
; R600: ; %bb.0:
-; R600-NEXT: ALU 100, @6, KC0[CB0:0-32], KC1[]
-; R600-NEXT: ALU 69, @107, KC0[CB0:0-32], KC1[]
+; R600-NEXT: ALU 99, @6, KC0[CB0:0-32], KC1[]
+; R600-NEXT: ALU 69, @106, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T2.X, T3.X, 0
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
; R600-NEXT: CF_END
@@ -1226,69 +1222,68 @@ define amdgpu_kernel void @s_exp10_v3f32(ptr addrspace(1) %out, <3 x float> %in)
; R600-NEXT: ALU clause starting at 6:
; R600-NEXT: AND_INT * T0.W, KC0[3].Y, literal.x,
; R600-NEXT: -4096(nan), 0(0.000000e+00)
-; R600-NEXT: ADD T1.W, KC0[3].Y, -PV.W,
-; R600-NEXT: MUL_IEEE * T2.W, PV.W, literal.x,
+; R600-NEXT: MUL_IEEE T1.W, PV.W, literal.x,
+; R600-NEXT: ADD * T2.W, KC0[3].Y, -PV.W,
; R600-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; R600-NEXT: RNDNE T3.W, PS,
-; R600-NEXT: MUL_IEEE * T4.W, PV.W, literal.x,
+; R600-NEXT: RNDNE * T3.W, PV.W,
+; R600-NEXT: TRUNC T4.W, PV.W,
+; R600-NEXT: MUL_IEEE * T5.W, T2.W, literal.x,
; R600-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; R600-NEXT: MULADD_IEEE T1.W, T1.W, literal.x, PS,
-; R600-NEXT: TRUNC * T4.W, PV.W,
+; R600-NEXT: MULADD_IEEE T2.W, T2.W, literal.x, PS,
+; R600-NEXT: FLT_TO_INT * T4.W, PV.W,
; R600-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; R600-NEXT: FLT_TO_INT T0.Z, PS,
-; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.x, PV.W,
-; R600-NEXT: ADD * T1.W, T2.W, -T3.W,
-; R600-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; R600-NEXT: ADD T0.W, PS, PV.W,
-; R600-NEXT: MAX_INT * T1.W, PV.Z, literal.x,
-; R600-NEXT: -330(nan), 0(0.000000e+00)
-; R600-NEXT: ADD_INT T0.Y, PS, literal.x,
-; R600-NEXT: ADD_INT T1.Z, T0.Z, literal.y,
-; R600-NEXT: SETGT_UINT T1.W, T0.Z, literal.z,
-; R600-NEXT: EXP_IEEE * T0.X, PV.W,
+; R600-NEXT: MAX_INT T0.Z, PS, literal.x,
+; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.y, PV.W,
+; R600-NEXT: ADD * T1.W, T1.W, -T3.W,
+; R600-NEXT: -330(nan), 975668412(6.390323e-04)
+; R600-NEXT: ADD T0.Y, PS, PV.W,
+; R600-NEXT: ADD_INT T0.Z, PV.Z, literal.x,
+; R600-NEXT: ADD_INT T0.W, T4.W, literal.y,
+; R600-NEXT: SETGT_UINT * T1.W, T4.W, literal.z,
; R600-NEXT: 204(2.858649e-43), 102(1.429324e-43)
; R600-NEXT: -229(nan), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Y, PV.Z,
-; R600-NEXT: SETGT_INT T0.W, T0.Z, literal.x,
-; R600-NEXT: MUL_IEEE * T2.W, PS, literal.y,
-; R600-NEXT: -127(nan), 209715200(1.972152e-31)
-; R600-NEXT: MUL_IEEE T0.Y, PS, literal.x,
-; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Z, T0.Z,
-; R600-NEXT: MIN_INT T3.W, T0.Z, literal.y,
-; R600-NEXT: AND_INT * T4.W, KC0[3].W, literal.z,
-; R600-NEXT: 209715200(1.972152e-31), 381(5.338947e-43)
-; R600-NEXT: -4096(nan), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T1.X, T0.X, literal.x,
-; R600-NEXT: ADD T1.Y, KC0[3].W, -PS,
-; R600-NEXT: ADD_INT T2.Z, PV.W, literal.y,
-; R600-NEXT: ADD_INT T3.W, T0.Z, literal.z,
-; R600-NEXT: SETGT_UINT * T5.W, T0.Z, literal.w,
-; R600-NEXT: 2130706432(1.701412e+38), -254(nan)
+; R600-NEXT: CNDE_INT T0.Z, PS, PV.Z, PV.W,
+; R600-NEXT: SETGT_INT T0.W, T4.W, literal.x,
+; R600-NEXT: EXP_IEEE * T0.X, PV.Y,
+; R600-NEXT: -127(nan), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T1.X, PS, literal.x,
+; R600-NEXT: CNDE_INT T0.Y, PV.W, PV.Z, T4.W,
+; R600-NEXT: MIN_INT T0.Z, T4.W, literal.y,
+; R600-NEXT: AND_INT T2.W, KC0[3].W, literal.z,
+; R600-NEXT: MUL_IEEE * T3.W, PS, literal.w,
+; R600-NEXT: 2130706432(1.701412e+38), 381(5.338947e-43)
+; R600-NEXT: -4096(nan), 209715200(1.972152e-31)
+; R600-NEXT: MUL_IEEE T2.X, PS, literal.x,
+; R600-NEXT: ADD T1.Y, KC0[3].W, -PV.W,
+; R600-NEXT: ADD_INT T0.Z, PV.Z, literal.y,
+; R600-NEXT: ADD_INT T5.W, T4.W, literal.z,
+; R600-NEXT: SETGT_UINT * T6.W, T4.W, literal.w,
+; R600-NEXT: 209715200(1.972152e-31), -254(nan)
; R600-NEXT: -127(nan), 254(3.559298e-43)
-; R600-NEXT: CNDE_INT T2.X, PS, PV.W, PV.Z,
-; R600-NEXT: SETGT_INT T2.Y, T0.Z, literal.x,
+; R600-NEXT: CNDE_INT T3.X, PS, PV.W, PV.Z,
+; R600-NEXT: SETGT_INT T2.Y, T4.W, literal.x,
; R600-NEXT: MUL_IEEE T0.Z, PV.Y, literal.y,
-; R600-NEXT: MUL_IEEE T3.W, T4.W, literal.z,
-; R600-NEXT: MUL_IEEE * T6.W, PV.X, literal.w,
+; R600-NEXT: MUL_IEEE * T4.W, T2.W, literal.z, BS:VEC_120/SCL_212
; R600-NEXT: 127(1.779649e-43), 975668412(6.390323e-04)
-; R600-NEXT: 1079283712(3.321289e+00), 2130706432(1.701412e+38)
-; R600-NEXT: CNDE_INT T1.X, T5.W, T1.X, PS, BS:VEC_120/SCL_212
-; R600-NEXT: RNDNE T3.Y, PV.W,
-; R600-NEXT: MULADD_IEEE T0.Z, T1.Y, literal.x, PV.Z,
-; R600-NEXT: CNDE_INT T5.W, PV.Y, T1.Z, PV.X,
-; R600-NEXT: CNDE_INT * T1.W, T1.W, T0.Y, T2.W,
; R600-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T0.X, T0.W, PS, T0.X,
+; R600-NEXT: CNDE_INT * T1.W, T1.W, T2.X, T3.W,
+; R600-NEXT: CNDE_INT T0.X, T0.W, PV.W, T0.X, BS:VEC_021/SCL_122
+; R600-NEXT: RNDNE T3.Y, T4.W, BS:VEC_120/SCL_212
+; R600-NEXT: MULADD_IEEE T0.Z, T1.Y, literal.x, T0.Z,
+; R600-NEXT: CNDE_INT T0.W, T2.Y, T0.Y, T3.X, BS:VEC_120/SCL_212
+; R600-NEXT: MUL_IEEE * T1.W, T1.X, literal.y,
+; R600-NEXT: 1079283712(3.321289e+00), 2130706432(1.701412e+38)
+; R600-NEXT: CNDE_INT T1.X, T6.W, T1.X, PS,
; R600-NEXT: LSHL T0.Y, PV.W, literal.x,
; R600-NEXT: AND_INT T1.Z, KC0[3].Z, literal.y,
-; R600-NEXT: MULADD_IEEE T0.W, T4.W, literal.z, PV.Z, BS:VEC_120/SCL_212
-; R600-NEXT: ADD * T1.W, T3.W, -PV.Y,
+; R600-NEXT: MULADD_IEEE T0.W, T2.W, literal.z, PV.Z, BS:VEC_120/SCL_212
+; R600-NEXT: ADD * T1.W, T4.W, -PV.Y,
; R600-NEXT: 23(3.222986e-44), -4096(nan)
; R600-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
; R600-NEXT: ADD T1.Y, PS, PV.W,
; R600-NEXT: MUL_IEEE T0.Z, PV.Z, literal.x,
; R600-NEXT: ADD_INT T0.W, PV.Y, literal.y,
-; R600-NEXT: CNDE_INT * T1.W, T2.Y, PV.X, T1.X,
+; R600-NEXT: CNDE_INT * T1.W, T2.Y, T0.X, PV.X,
; R600-NEXT: 1079283712(3.321289e+00), 1065353216(1.000000e+00)
; R600-NEXT: MUL_IEEE T0.X, PS, PV.W,
; R600-NEXT: ADD T0.Y, KC0[3].Z, -T1.Z,
@@ -1302,12 +1297,12 @@ define amdgpu_kernel void @s_exp10_v3f32(ptr addrspace(1) %out, <3 x float> %in)
; R600-NEXT: MUL_IEEE * T1.W, PS, literal.z,
; R600-NEXT: -1036817932(-4.485347e+01), 975668412(6.390323e-04)
; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T3.X, T1.X, literal.x,
-; R600-NEXT: MUL_IEEE T2.Y, PS, literal.y,
+; R600-NEXT: MUL_IEEE T3.X, PS, literal.x,
+; R600-NEXT: MUL_IEEE T2.Y, T1.X, literal.y,
; R600-NEXT: MULADD_IEEE T4.Z, T0.Y, literal.z, PV.W,
; R600-NEXT: FLT_TO_INT T0.W, PV.Z,
; R600-NEXT: MIN_INT * T2.W, PV.Y, literal.w,
-; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
+; R600-NEXT: 209715200(1.972152e-31), 2130706432(1.701412e+38)
; R600-NEXT: 1079283712(3.321289e+00), 381(5.338947e-43)
; R600-NEXT: ADD_INT T4.X, PS, literal.x,
; R600-NEXT: MAX_INT T0.Y, PV.W, literal.y,
@@ -1325,7 +1320,7 @@ define amdgpu_kernel void @s_exp10_v3f32(ptr addrspace(1) %out, <3 x float> %in)
; R600-NEXT: 102(1.429324e-43), -229(nan)
; R600-NEXT: ADD_INT * T6.X, T0.W, literal.x,
; R600-NEXT: -127(nan), 0(0.000000e+00)
-; R600-NEXT: ALU clause starting at 107:
+; R600-NEXT: ALU clause starting at 106:
; R600-NEXT: SETGT_UINT T0.Y, T0.W, literal.x,
; R600-NEXT: CNDE_INT T0.Z, T3.W, T0.Z, T2.W, BS:VEC_102/SCL_221
; R600-NEXT: SETGT_INT T2.W, T0.W, literal.y,
@@ -1341,25 +1336,25 @@ define amdgpu_kernel void @s_exp10_v3f32(ptr addrspace(1) %out, <3 x float> %in)
; R600-NEXT: SETGT_UINT T5.X, T1.Y, literal.x,
; R600-NEXT: CNDE_INT T4.Y, PS, PV.Z, PV.W,
; R600-NEXT: MAX_INT T0.Z, T1.Y, literal.y,
-; R600-NEXT: MUL_IEEE T4.W, T1.Z, literal.z,
-; R600-NEXT: MUL_IEEE * T5.W, PV.Y, literal.w,
+; R600-NEXT: MUL_IEEE T4.W, PV.Y, literal.z,
+; R600-NEXT: MUL_IEEE * T5.W, T1.Z, literal.w,
; R600-NEXT: 254(3.559298e-43), -330(nan)
-; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
-; R600-NEXT: CNDE_INT T6.X, T3.W, PS, T3.Y, BS:VEC_021/SCL_122
-; R600-NEXT: MUL_IEEE T3.Y, PV.W, literal.x,
+; R600-NEXT: 209715200(1.972152e-31), 2130706432(1.701412e+38)
+; R600-NEXT: MUL_IEEE T6.X, PS, literal.x,
+; R600-NEXT: CNDE_INT T3.Y, T3.W, PV.W, T3.Y, BS:VEC_021/SCL_122
; R600-NEXT: ADD_INT T0.Z, PV.Z, literal.y,
; R600-NEXT: ADD_INT T3.W, T1.Y, literal.z,
-; R600-NEXT: SETGT_UINT * T5.W, T1.Y, literal.w,
+; R600-NEXT: SETGT_UINT * T4.W, T1.Y, literal.w,
; R600-NEXT: 2130706432(1.701412e+38), 204(2.858649e-43)
; R600-NEXT: 102(1.429324e-43), -229(nan)
; R600-NEXT: CNDE_INT T8.X, PS, PV.Z, PV.W,
; R600-NEXT: SETGT_INT T5.Y, T1.Y, literal.x,
-; R600-NEXT: CNDE_INT T0.Z, T0.Y, T4.W, PV.Y, BS:VEC_120/SCL_212
-; R600-NEXT: CNDE_INT T2.W, T2.W, PV.X, T1.Z,
+; R600-NEXT: CNDE_INT T0.Z, T2.W, PV.Y, T1.Z,
+; R600-NEXT: CNDE_INT T2.W, T0.Y, T5.W, PV.X, BS:VEC_120/SCL_212
; R600-NEXT: LSHL * T3.W, T4.Y, literal.y,
; R600-NEXT: -127(nan), 23(3.222986e-44)
; R600-NEXT: ADD_INT T6.X, PS, literal.x,
-; R600-NEXT: CNDE_INT T0.Y, T0.W, PV.W, PV.Z,
+; R600-NEXT: CNDE_INT T0.Y, T0.W, PV.Z, PV.W,
; R600-NEXT: CNDE_INT T0.Z, PV.Y, PV.X, T1.Y,
; R600-NEXT: CNDE_INT T0.W, T5.X, T7.X, T4.X,
; R600-NEXT: SETGT_INT * T2.W, T1.Y, literal.y,
@@ -1367,18 +1362,18 @@ define amdgpu_kernel void @s_exp10_v3f32(ptr addrspace(1) %out, <3 x float> %in)
; R600-NEXT: CNDE_INT T4.X, PS, PV.Z, PV.W,
; R600-NEXT: MUL_IEEE T0.Y, PV.Y, PV.X,
; R600-NEXT: SETGT T0.Z, literal.x, KC0[3].Z,
-; R600-NEXT: CNDE_INT T0.W, T5.W, T2.Y, T1.W,
-; R600-NEXT: MUL_IEEE * T1.W, T3.X, literal.y,
+; R600-NEXT: MUL_IEEE T0.W, T2.Y, literal.y,
+; R600-NEXT: CNDE_INT * T1.W, T4.W, T3.X, T1.W,
; R600-NEXT: -1036817932(-4.485347e+01), 2130706432(1.701412e+38)
-; R600-NEXT: CNDE_INT T3.X, T5.X, T3.X, PS,
-; R600-NEXT: CNDE_INT T1.Y, T5.Y, PV.W, T1.X,
+; R600-NEXT: CNDE_INT T1.X, T5.Y, PS, T1.X,
+; R600-NEXT: CNDE_INT T1.Y, T5.X, T2.Y, PV.W,
; R600-NEXT: CNDE T0.Z, PV.Z, PV.Y, 0.0,
; R600-NEXT: SETGT T0.W, KC0[3].Z, literal.x,
; R600-NEXT: LSHL * T1.W, PV.X, literal.y,
; R600-NEXT: 1109008539(3.853184e+01), 23(3.222986e-44)
-; R600-NEXT: ADD_INT T1.X, PS, literal.x,
+; R600-NEXT: ADD_INT T3.X, PS, literal.x,
; R600-NEXT: CNDE T0.Y, PV.W, PV.Z, literal.y,
-; R600-NEXT: CNDE_INT T0.Z, T2.W, PV.Y, PV.X,
+; R600-NEXT: CNDE_INT T0.Z, T2.W, PV.X, PV.Y,
; R600-NEXT: CNDE T0.W, T2.X, T0.X, 0.0,
; R600-NEXT: SETGT * T1.W, KC0[3].Y, literal.z,
; R600-NEXT: 1065353216(1.000000e+00), 2139095040(INF)
@@ -1399,197 +1394,193 @@ define amdgpu_kernel void @s_exp10_v3f32(ptr addrspace(1) %out, <3 x float> %in)
;
; CM-LABEL: s_exp10_v3f32:
; CM: ; %bb.0:
-; CM-NEXT: ALU 102, @6, KC0[CB0:0-32], KC1[]
-; CM-NEXT: ALU 80, @109, KC0[CB0:0-32], KC1[]
-; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T1, T3.X
-; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T2.X, T0.X
+; CM-NEXT: ALU 101, @6, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 77, @108, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0, T1.X
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T2.X, T3.X
; CM-NEXT: CF_END
; CM-NEXT: PAD
; CM-NEXT: ALU clause starting at 6:
; CM-NEXT: AND_INT * T0.W, KC0[3].Y, literal.x,
; CM-NEXT: -4096(nan), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T0.Z, PV.W, literal.x,
; CM-NEXT: ADD * T1.W, KC0[3].Y, -PV.W,
-; CM-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T1.Z, PV.W, literal.x,
-; CM-NEXT: RNDNE * T2.W, PV.Z,
-; CM-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; CM-NEXT: TRUNC T2.Z, PV.W,
+; CM-NEXT: MUL_IEEE T0.Z, PV.W, literal.x,
+; CM-NEXT: MUL_IEEE * T2.W, T0.W, literal.y,
+; CM-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
+; CM-NEXT: RNDNE T1.Z, PV.W,
; CM-NEXT: MULADD_IEEE * T1.W, T1.W, literal.x, PV.Z,
; CM-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; CM-NEXT: MULADD_IEEE T0.Y, T0.W, literal.x, PV.W,
-; CM-NEXT: ADD T0.Z, T0.Z, -T2.W,
-; CM-NEXT: FLT_TO_INT * T0.W, PV.Z,
+; CM-NEXT: MULADD_IEEE T0.Z, T0.W, literal.x, PV.W,
+; CM-NEXT: ADD * T0.W, T2.W, -PV.Z, BS:VEC_120/SCL_212
; CM-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; CM-NEXT: MIN_INT T1.Z, PV.W, literal.x,
-; CM-NEXT: ADD * T1.W, PV.Z, PV.Y,
+; CM-NEXT: TRUNC T1.Z, T1.Z,
+; CM-NEXT: ADD * T0.W, PV.W, PV.Z,
+; CM-NEXT: EXP_IEEE T0.X, T0.W,
+; CM-NEXT: EXP_IEEE T0.Y (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE * T0.W (MASKED), T0.W,
+; CM-NEXT: FLT_TO_INT T0.Z, T1.Z,
+; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.x,
+; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T0.Y, PV.W, literal.x,
+; CM-NEXT: MAX_INT T1.Z, PV.Z, literal.y,
+; CM-NEXT: MIN_INT * T1.W, PV.Z, literal.z,
+; CM-NEXT: 209715200(1.972152e-31), -330(nan)
; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
-; CM-NEXT: EXP_IEEE T0.X, T1.W,
-; CM-NEXT: EXP_IEEE T0.Y (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
-; CM-NEXT: MUL_IEEE T0.Y, PV.X, literal.x,
-; CM-NEXT: ADD_INT T0.Z, T1.Z, literal.y,
-; CM-NEXT: MAX_INT * T1.W, T0.W, literal.z,
-; CM-NEXT: 2130706432(1.701412e+38), -254(nan)
-; CM-NEXT: -330(nan), 0(0.000000e+00)
-; CM-NEXT: ADD_INT T1.X, T0.W, literal.x,
-; CM-NEXT: ADD_INT T1.Y, PV.W, literal.y,
-; CM-NEXT: ADD_INT T1.Z, T0.W, literal.z,
-; CM-NEXT: SETGT_UINT * T1.W, T0.W, literal.w,
-; CM-NEXT: -127(nan), 204(2.858649e-43)
+; CM-NEXT: ADD_INT T1.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T1.Y, PV.Z, literal.y,
+; CM-NEXT: ADD_INT T1.Z, T0.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T1.W, T0.Z, literal.w,
+; CM-NEXT: -254(nan), 204(2.858649e-43)
; CM-NEXT: 102(1.429324e-43), -229(nan)
-; CM-NEXT: SETGT_UINT T2.X, T0.W, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: SETGT_INT T1.Z, T0.W, literal.y,
-; CM-NEXT: MUL_IEEE * T2.W, T0.X, literal.z,
-; CM-NEXT: 254(3.559298e-43), -127(nan)
-; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T3.X, PV.W, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.Z, PV.Y, T0.W,
-; CM-NEXT: CNDE_INT T0.Z, PV.X, T1.X, T0.Z,
-; CM-NEXT: SETGT_INT * T0.W, T0.W, literal.y,
-; CM-NEXT: 209715200(1.972152e-31), 127(1.779649e-43)
+; CM-NEXT: ADD_INT T2.X, T0.Z, literal.x,
+; CM-NEXT: SETGT_UINT T2.Y, T0.Z, literal.y,
+; CM-NEXT: CNDE_INT T1.Z, PV.W, PV.Y, PV.Z,
+; CM-NEXT: SETGT_INT * T2.W, T0.Z, literal.x,
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: MUL_IEEE T3.X, T0.X, literal.x,
+; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Z, T0.Z,
+; CM-NEXT: CNDE_INT T1.Z, PV.Y, PV.X, T1.X,
+; CM-NEXT: SETGT_INT * T3.W, T0.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 127(1.779649e-43)
; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: CNDE_INT T0.Z, T1.W, PV.X, T2.W,
-; CM-NEXT: MUL_IEEE * T1.W, T0.Y, literal.x,
+; CM-NEXT: MUL_IEEE T0.Z, PV.X, literal.x,
+; CM-NEXT: CNDE_INT * T0.W, T1.W, T0.Y, T0.W,
; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T1.X, T2.X, T0.Y, PV.W,
-; CM-NEXT: CNDE_INT T0.Y, T1.Z, PV.Z, T0.X,
+; CM-NEXT: CNDE_INT T0.X, T2.W, PV.W, T0.X,
+; CM-NEXT: CNDE_INT T0.Y, T2.Y, T3.X, PV.Z,
; CM-NEXT: LSHL T0.Z, PV.Y, literal.x,
-; CM-NEXT: AND_INT * T1.W, KC0[3].Z, literal.y,
+; CM-NEXT: AND_INT * T0.W, KC0[3].Z, literal.y,
; CM-NEXT: 23(3.222986e-44), -4096(nan)
-; CM-NEXT: MUL_IEEE T0.X, PV.W, literal.x,
; CM-NEXT: ADD T1.Y, KC0[3].Z, -PV.W,
-; CM-NEXT: ADD_INT T0.Z, PV.Z, literal.y,
-; CM-NEXT: CNDE_INT * T0.W, T0.W, PV.Y, PV.X,
-; CM-NEXT: 1079283712(3.321289e+00), 1065353216(1.000000e+00)
-; CM-NEXT: MUL_IEEE T0.Y, PV.W, PV.Z,
-; CM-NEXT: MUL_IEEE T0.Z, PV.Y, literal.x,
-; CM-NEXT: RNDNE * T0.W, PV.X,
-; CM-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T0.Z, PV.Z, literal.x,
+; CM-NEXT: CNDE_INT * T1.W, T3.W, PV.X, PV.Y,
+; CM-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T0.X, PV.W, PV.Z,
+; CM-NEXT: MUL_IEEE T0.Y, PV.Y, literal.x,
+; CM-NEXT: MUL_IEEE T0.Z, T0.W, literal.y,
+; CM-NEXT: AND_INT * T1.W, KC0[3].W, literal.z,
+; CM-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
+; CM-NEXT: -4096(nan), 0(0.000000e+00)
; CM-NEXT: SETGT T1.X, literal.x, KC0[3].Y,
-; CM-NEXT: TRUNC T2.Y, PV.W,
-; CM-NEXT: AND_INT T1.Z, KC0[3].W, literal.y,
-; CM-NEXT: MULADD_IEEE * T2.W, T1.Y, literal.z, PV.Z,
-; CM-NEXT: -1036817932(-4.485347e+01), -4096(nan)
-; CM-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; CM-NEXT: MULADD_IEEE T2.X, T1.W, literal.x, PV.W,
-; CM-NEXT: MUL_IEEE T1.Y, PV.Z, literal.y,
-; CM-NEXT: FLT_TO_INT T0.Z, PV.Y,
-; CM-NEXT: ADD * T1.W, KC0[3].W, -PV.Z,
+; CM-NEXT: ADD T2.Y, KC0[3].W, -PV.W,
+; CM-NEXT: RNDNE T1.Z, PV.Z,
+; CM-NEXT: MULADD_IEEE * T2.W, T1.Y, literal.y, PV.Y,
+; CM-NEXT: -1036817932(-4.485347e+01), 1079283712(3.321289e+00)
+; CM-NEXT: MULADD_IEEE T2.X, T0.W, literal.x, PV.W,
+; CM-NEXT: ADD T0.Y, T0.Z, -PV.Z,
+; CM-NEXT: MUL_IEEE T0.Z, PV.Y, literal.x,
+; CM-NEXT: MUL_IEEE * T0.W, T1.W, literal.y, BS:VEC_120/SCL_212
; CM-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
-; CM-NEXT: ADD T0.X, T0.X, -T0.W,
-; CM-NEXT: MUL_IEEE T2.Y, PV.W, literal.x,
-; CM-NEXT: MAX_INT T2.Z, PV.Z, literal.y,
-; CM-NEXT: RNDNE * T0.W, PV.Y,
-; CM-NEXT: 975668412(6.390323e-04), -330(nan)
-; CM-NEXT: TRUNC T3.X, PV.W,
-; CM-NEXT: ADD_INT T3.Y, PV.Z, literal.x,
-; CM-NEXT: MULADD_IEEE T2.Z, T1.W, literal.y, PV.Y,
-; CM-NEXT: ADD * T1.W, PV.X, T2.X,
-; CM-NEXT: 204(2.858649e-43), 1079283712(3.321289e+00)
-; CM-NEXT: EXP_IEEE T0.X, T1.W,
-; CM-NEXT: EXP_IEEE T0.Y (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
-; CM-NEXT: ADD_INT T2.X, T0.Z, literal.x,
-; CM-NEXT: MULADD_IEEE T2.Y, T1.Z, literal.y, T2.Z, BS:VEC_102/SCL_221
-; CM-NEXT: ADD T1.Z, T1.Y, -T0.W,
-; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.z,
-; CM-NEXT: 102(1.429324e-43), 975668412(6.390323e-04)
-; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: SETGT_UINT T4.X, T0.Z, literal.x,
-; CM-NEXT: MUL_IEEE T1.Y, PV.W, literal.y,
-; CM-NEXT: SETGT_UINT T2.Z, T0.Z, literal.z,
-; CM-NEXT: ADD * T1.W, PV.Z, PV.Y,
-; CM-NEXT: -229(nan), 2130706432(1.701412e+38)
-; CM-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; CM-NEXT: TRUNC T3.X, T1.Z,
+; CM-NEXT: RNDNE T1.Y, PV.W,
+; CM-NEXT: MULADD_IEEE T0.Z, T2.Y, literal.x, PV.Z,
+; CM-NEXT: ADD * T2.W, PV.Y, PV.X,
+; CM-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
+; CM-NEXT: EXP_IEEE T0.X (MASKED), T2.W,
+; CM-NEXT: EXP_IEEE T0.Y, T2.W,
+; CM-NEXT: EXP_IEEE T0.Z (MASKED), T2.W,
+; CM-NEXT: EXP_IEEE * T0.W (MASKED), T2.W,
+; CM-NEXT: MULADD_IEEE T2.X, T1.W, literal.x, T0.Z,
+; CM-NEXT: ADD T2.Y, T0.W, -T1.Y, BS:VEC_120/SCL_212
+; CM-NEXT: FLT_TO_INT T0.Z, T3.X,
+; CM-NEXT: MUL_IEEE * T0.W, PV.Y, literal.y,
+; CM-NEXT: 975668412(6.390323e-04), 209715200(1.972152e-31)
+; CM-NEXT: MUL_IEEE T3.X, PV.W, literal.x,
+; CM-NEXT: SETGT_UINT T3.Y, PV.Z, literal.y,
+; CM-NEXT: TRUNC T1.Z, T1.Y,
+; CM-NEXT: ADD * T1.W, PV.Y, PV.X,
+; CM-NEXT: 209715200(1.972152e-31), -229(nan)
; CM-NEXT: EXP_IEEE T1.X (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE T1.Y (MASKED), T1.W,
-; CM-NEXT: EXP_IEEE T1.Z, T1.W,
+; CM-NEXT: EXP_IEEE T1.Y, T1.W,
+; CM-NEXT: EXP_IEEE T1.Z (MASKED), T1.W,
; CM-NEXT: EXP_IEEE * T1.W (MASKED), T1.W,
-; CM-NEXT: ALU clause starting at 109:
-; CM-NEXT: CNDE_INT T5.X, T2.Z, T0.W, T1.Y,
-; CM-NEXT: CNDE_INT T1.Y, T4.X, T3.Y, T2.X,
-; CM-NEXT: FLT_TO_INT T3.Z, T3.X, BS:VEC_120/SCL_212
-; CM-NEXT: MUL_IEEE * T0.W, T1.Z, literal.x, BS:VEC_120/SCL_212
-; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: SETGT_INT T2.X, T0.Z, literal.x,
-; CM-NEXT: MUL_IEEE T2.Y, T0.X, literal.y,
-; CM-NEXT: MUL_IEEE T4.Z, PV.W, literal.z,
-; CM-NEXT: SETGT_UINT * T1.W, PV.Z, literal.w,
-; CM-NEXT: -127(nan), 209715200(1.972152e-31)
-; CM-NEXT: 2130706432(1.701412e+38), 254(3.559298e-43)
-; CM-NEXT: CNDE_INT T3.X, PV.W, T0.W, PV.Z,
-; CM-NEXT: MUL_IEEE T3.Y, PV.Y, literal.x,
-; CM-NEXT: CNDE_INT T4.Z, PV.X, T1.Y, T0.Z,
-; CM-NEXT: MAX_INT * T0.W, T3.Z, literal.y,
-; CM-NEXT: 209715200(1.972152e-31), -330(nan)
-; CM-NEXT: ADD_INT T6.X, PV.W, literal.x,
-; CM-NEXT: ADD_INT T1.Y, T3.Z, literal.y,
-; CM-NEXT: SETGT_UINT T5.Z, T3.Z, literal.z,
-; CM-NEXT: MUL_IEEE * T0.W, T1.Z, literal.w, BS:VEC_120/SCL_212
+; CM-NEXT: FLT_TO_INT T2.X, T1.Z,
+; CM-NEXT: MUL_IEEE T2.Y, PV.Y, literal.x,
+; CM-NEXT: CNDE_INT T1.Z, T3.Y, T3.X, T0.W,
+; CM-NEXT: SETGT_INT * T0.W, T0.Z, literal.y, BS:VEC_120/SCL_212
+; CM-NEXT: 209715200(1.972152e-31), -127(nan)
+; CM-NEXT: CNDE_INT T3.X, PV.W, PV.Z, T0.Y,
+; CM-NEXT: MUL_IEEE * T4.Y, PV.Y, literal.x,
+; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; CM-NEXT: ALU clause starting at 108:
+; CM-NEXT: SETGT_UINT T1.Z, T2.X, literal.x,
+; CM-NEXT: MAX_INT * T1.W, T0.Z, literal.y,
+; CM-NEXT: -229(nan), -330(nan)
+; CM-NEXT: ADD_INT T4.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T5.Y, T0.Z, literal.y,
+; CM-NEXT: CNDE_INT T2.Z, PV.Z, T4.Y, T2.Y,
+; CM-NEXT: SETGT_INT * T1.W, T2.X, literal.z,
; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
-; CM-NEXT: -229(nan), 209715200(1.972152e-31)
-; CM-NEXT: MUL_IEEE T7.X, PV.W, literal.x,
-; CM-NEXT: MIN_INT T4.Y, T3.Z, literal.y,
-; CM-NEXT: CNDE_INT T6.Z, PV.Z, PV.X, PV.Y,
-; CM-NEXT: SETGT_INT * T2.W, T3.Z, literal.z,
-; CM-NEXT: 209715200(1.972152e-31), 381(5.338947e-43)
; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T6.X, PV.W, PV.Z, T3.Z,
-; CM-NEXT: MIN_INT T1.Y, T0.Z, literal.x,
-; CM-NEXT: ADD_INT T6.Z, PV.Y, literal.y,
-; CM-NEXT: ADD_INT * T3.W, T3.Z, literal.z, BS:VEC_120/SCL_212
-; CM-NEXT: 381(5.338947e-43), -254(nan)
-; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T8.X, T1.W, PV.W, PV.Z,
-; CM-NEXT: SETGT_INT T4.Y, T3.Z, literal.x,
-; CM-NEXT: ADD_INT T3.Z, PV.Y, literal.y,
-; CM-NEXT: ADD_INT * T1.W, T0.Z, literal.z, BS:VEC_120/SCL_212
+; CM-NEXT: CNDE_INT T5.X, PV.W, PV.Z, T1.Y,
+; CM-NEXT: MUL_IEEE T0.Y, T0.Y, literal.x,
+; CM-NEXT: MAX_INT T2.Z, T2.X, literal.y,
+; CM-NEXT: CNDE_INT * T2.W, T3.Y, PV.X, PV.Y, BS:VEC_120/SCL_212
+; CM-NEXT: 2130706432(1.701412e+38), -330(nan)
+; CM-NEXT: CNDE_INT T4.X, T0.W, PV.W, T0.Z,
+; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.x,
+; CM-NEXT: ADD_INT T2.Z, T2.X, literal.y,
+; CM-NEXT: MIN_INT * T0.W, T2.X, literal.z,
+; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T6.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T3.Y, T2.X, literal.y,
+; CM-NEXT: SETGT_UINT T3.Z, T2.X, literal.z,
+; CM-NEXT: CNDE_INT * T0.W, T1.Z, PV.Y, PV.Z,
+; CM-NEXT: -254(nan), -127(nan)
+; CM-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T7.X, T1.Y, literal.x,
+; CM-NEXT: CNDE_INT T1.Y, T1.W, PV.W, T2.X,
+; CM-NEXT: CNDE_INT T1.Z, PV.Z, PV.Y, PV.X,
+; CM-NEXT: MIN_INT * T0.W, T0.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 381(5.338947e-43)
+; CM-NEXT: SETGT_INT T2.X, T2.X, literal.x,
+; CM-NEXT: ADD_INT T2.Y, PV.W, literal.y,
+; CM-NEXT: ADD_INT T2.Z, T0.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T0.W, T0.Z, literal.w,
; CM-NEXT: 127(1.779649e-43), -254(nan)
-; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T9.X, T2.Z, PV.W, PV.Z,
-; CM-NEXT: SETGT_INT T1.Y, T0.Z, literal.x, BS:VEC_120/SCL_212
-; CM-NEXT: CNDE_INT T0.Z, PV.Y, T6.X, PV.X,
-; CM-NEXT: CNDE_INT * T0.W, T5.Z, T7.X, T0.W, BS:VEC_201
-; CM-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T6.X, T2.W, PV.W, T1.Z,
-; CM-NEXT: LSHL T5.Y, PV.Z, literal.x,
-; CM-NEXT: CNDE_INT T0.Z, PV.Y, T4.Z, PV.X,
-; CM-NEXT: CNDE_INT * T0.W, T4.X, T3.Y, T2.Y,
-; CM-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T0.X, T2.X, PV.W, T0.X,
-; CM-NEXT: LSHL T2.Y, PV.Z, literal.x,
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: CNDE_INT T6.X, PV.W, PV.Z, PV.Y,
+; CM-NEXT: SETGT_INT T2.Y, T0.Z, literal.x,
+; CM-NEXT: CNDE_INT T0.Z, PV.X, T1.Y, T1.Z,
+; CM-NEXT: MUL_IEEE * T1.W, T7.X, literal.y,
+; CM-NEXT: 127(1.779649e-43), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T7.X, T3.Z, T7.X, PV.W,
+; CM-NEXT: LSHL T1.Y, PV.Z, literal.x,
+; CM-NEXT: CNDE_INT T0.Z, PV.Y, T4.X, PV.X, BS:VEC_021/SCL_122
+; CM-NEXT: MUL_IEEE * T1.W, T0.Y, literal.y,
+; CM-NEXT: 23(3.222986e-44), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T4.X, T0.W, T0.Y, PV.W,
+; CM-NEXT: LSHL T0.Y, PV.Z, literal.x,
; CM-NEXT: ADD_INT T0.Z, PV.Y, literal.y,
-; CM-NEXT: CNDE_INT * T0.W, T4.Y, PV.X, T3.X, BS:VEC_021/SCL_122
+; CM-NEXT: CNDE_INT * T0.W, T2.X, T5.X, PV.X,
; CM-NEXT: 23(3.222986e-44), 1065353216(1.000000e+00)
; CM-NEXT: MUL_IEEE T2.X, PV.W, PV.Z,
-; CM-NEXT: SETGT T3.Y, literal.x, KC0[3].W,
+; CM-NEXT: SETGT T1.Y, literal.x, KC0[3].W,
; CM-NEXT: ADD_INT T0.Z, PV.Y, literal.y,
-; CM-NEXT: CNDE_INT * T0.W, T1.Y, PV.X, T5.X,
+; CM-NEXT: CNDE_INT * T0.W, T2.Y, T3.X, PV.X,
; CM-NEXT: -1036817932(-4.485347e+01), 1065353216(1.000000e+00)
-; CM-NEXT: MUL_IEEE T0.X, PV.W, PV.Z,
-; CM-NEXT: SETGT T1.Y, literal.x, KC0[3].Z,
+; CM-NEXT: MUL_IEEE T3.X, PV.W, PV.Z,
+; CM-NEXT: SETGT T0.Y, literal.x, KC0[3].Z,
; CM-NEXT: CNDE T0.Z, PV.Y, PV.X, 0.0,
; CM-NEXT: SETGT * T0.W, KC0[3].W, literal.y,
; CM-NEXT: -1036817932(-4.485347e+01), 1109008539(3.853184e+01)
; CM-NEXT: CNDE T2.X, PV.W, PV.Z, literal.x,
-; CM-NEXT: CNDE T1.Y, PV.Y, PV.X, 0.0,
+; CM-NEXT: CNDE T0.Y, PV.Y, PV.X, 0.0,
; CM-NEXT: SETGT T0.Z, KC0[3].Z, literal.y,
; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.z,
; CM-NEXT: 2139095040(INF), 1109008539(3.853184e+01)
; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
-; CM-NEXT: LSHR T0.X, PV.W, literal.x,
-; CM-NEXT: CNDE T1.Y, PV.Z, PV.Y, literal.y,
-; CM-NEXT: CNDE T0.Z, T1.X, T0.Y, 0.0,
+; CM-NEXT: LSHR T3.X, PV.W, literal.x,
+; CM-NEXT: CNDE T0.Y, PV.Z, PV.Y, literal.y,
+; CM-NEXT: CNDE T0.Z, T1.X, T0.X, 0.0,
; CM-NEXT: SETGT * T0.W, KC0[3].Y, literal.z,
; CM-NEXT: 2(2.802597e-45), 2139095040(INF)
; CM-NEXT: 1109008539(3.853184e+01), 0(0.000000e+00)
-; CM-NEXT: CNDE * T1.X, PV.W, PV.Z, literal.x,
+; CM-NEXT: CNDE * T0.X, PV.W, PV.Z, literal.x,
; CM-NEXT: 2139095040(INF), 0(0.000000e+00)
-; CM-NEXT: LSHR * T3.X, KC0[2].Y, literal.x,
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%result = call <3 x float> @llvm.exp10.v3f32(<3 x float> %in)
store <3 x float> %result, ptr addrspace(1) %out
@@ -2052,227 +2043,224 @@ define amdgpu_kernel void @s_exp10_v4f32(ptr addrspace(1) %out, <4 x float> %in)
; R600-LABEL: s_exp10_v4f32:
; R600: ; %bb.0:
; R600-NEXT: ALU 98, @6, KC0[CB0:0-32], KC1[]
-; R600-NEXT: ALU 98, @105, KC0[CB0:0-32], KC1[]
-; R600-NEXT: ALU 24, @204, KC0[CB0:0-32], KC1[]
+; R600-NEXT: ALU 95, @105, KC0[CB0:0-32], KC1[]
+; R600-NEXT: ALU 24, @201, KC0[CB0:0-32], KC1[]
; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T0.X, 1
; R600-NEXT: CF_END
; R600-NEXT: PAD
; R600-NEXT: ALU clause starting at 6:
; R600-NEXT: AND_INT * T0.W, KC0[3].Z, literal.x,
; R600-NEXT: -4096(nan), 0(0.000000e+00)
-; R600-NEXT: ADD T1.W, KC0[3].Z, -PV.W,
-; R600-NEXT: MUL_IEEE * T2.W, PV.W, literal.x,
+; R600-NEXT: ADD * T1.W, KC0[3].Z, -PV.W,
+; R600-NEXT: MUL_IEEE T2.W, PV.W, literal.x,
+; R600-NEXT: MUL_IEEE * T3.W, T0.W, literal.y,
+; R600-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
+; R600-NEXT: RNDNE T4.W, PS,
+; R600-NEXT: MULADD_IEEE * T1.W, T1.W, literal.x, PV.W, BS:VEC_021/SCL_122
; R600-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; R600-NEXT: RNDNE T3.W, PS,
-; R600-NEXT: MUL_IEEE * T4.W, PV.W, literal.x,
+; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.x, PS,
+; R600-NEXT: ADD * T1.W, T3.W, -PV.W,
; R600-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; R600-NEXT: MULADD_IEEE T1.W, T1.W, literal.x, PS,
-; R600-NEXT: TRUNC * T4.W, PV.W,
-; R600-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; R600-NEXT: FLT_TO_INT T0.Z, PS,
-; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.x, PV.W,
-; R600-NEXT: ADD * T1.W, T2.W, -T3.W,
-; R600-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; R600-NEXT: ADD T1.Z, PS, PV.W,
-; R600-NEXT: MAX_INT T0.W, PV.Z, literal.x,
-; R600-NEXT: MIN_INT * T1.W, PV.Z, literal.y,
-; R600-NEXT: -330(nan), 381(5.338947e-43)
-; R600-NEXT: ADD_INT T0.X, PS, literal.x,
-; R600-NEXT: ADD_INT T0.Y, PV.W, literal.y,
-; R600-NEXT: ADD_INT T2.Z, T0.Z, literal.z,
-; R600-NEXT: SETGT_UINT T0.W, T0.Z, literal.w,
-; R600-NEXT: EXP_IEEE * T1.X, PV.Z,
-; R600-NEXT: -254(nan), 204(2.858649e-43)
-; R600-NEXT: 102(1.429324e-43), -229(nan)
-; R600-NEXT: ADD_INT T2.X, T0.Z, literal.x,
-; R600-NEXT: SETGT_UINT T1.Y, T0.Z, literal.y,
-; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Y, PV.Z,
-; R600-NEXT: SETGT_INT T1.W, T0.Z, literal.x,
-; R600-NEXT: MUL_IEEE * T2.W, PS, literal.z,
-; R600-NEXT: -127(nan), 254(3.559298e-43)
-; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T3.X, T1.X, literal.x,
-; R600-NEXT: MUL_IEEE T0.Y, PS, literal.y,
-; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Z, T0.Z,
-; R600-NEXT: CNDE_INT T3.W, PV.Y, PV.X, T0.X,
-; R600-NEXT: SETGT_INT * T4.W, T0.Z, literal.z,
-; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
-; R600-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; R600-NEXT: AND_INT T2.Y, KC0[4].X, literal.x,
-; R600-NEXT: CNDE_INT T0.Z, PS, PV.Z, PV.W,
-; R600-NEXT: CNDE_INT T0.W, T0.W, PV.Y, T2.W,
-; R600-NEXT: MUL_IEEE * T2.W, PV.X, literal.y,
-; R600-NEXT: -4096(nan), 2130706432(1.701412e+38)
-; R600-NEXT: CNDE_INT T0.X, T1.Y, T3.X, PS,
-; R600-NEXT: CNDE_INT T0.Y, T1.W, PV.W, T1.X,
-; R600-NEXT: LSHL T0.Z, PV.Z, literal.x,
-; R600-NEXT: ADD T0.W, KC0[4].X, -PV.Y,
-; R600-NEXT: MUL_IEEE * T1.W, PV.Y, literal.y,
-; R600-NEXT: 23(3.222986e-44), 1079283712(3.321289e+00)
-; R600-NEXT: RNDNE T1.Y, PS,
-; R600-NEXT: MUL_IEEE T1.Z, PV.W, literal.x,
-; R600-NEXT: ADD_INT T2.W, PV.Z, literal.y,
-; R600-NEXT: CNDE_INT * T3.W, T4.W, PV.Y, PV.X,
-; R600-NEXT: 975668412(6.390323e-04), 1065353216(1.000000e+00)
-; R600-NEXT: MUL_IEEE T0.Y, PS, PV.W,
-; R600-NEXT: AND_INT T0.Z, KC0[3].W, literal.x,
-; R600-NEXT: MULADD_IEEE T0.W, T0.W, literal.y, PV.Z,
-; R600-NEXT: TRUNC * T2.W, PV.Y,
-; R600-NEXT: -4096(nan), 1079283712(3.321289e+00)
-; R600-NEXT: SETGT T0.X, literal.x, KC0[3].Z,
-; R600-NEXT: FLT_TO_INT T3.Y, PS,
-; R600-NEXT: MULADD_IEEE T1.Z, T2.Y, literal.y, PV.W,
-; R600-NEXT: ADD T0.W, T1.W, -T1.Y,
-; R600-NEXT: MUL_IEEE * T1.W, PV.Z, literal.z,
-; R600-NEXT: -1036817932(-4.485347e+01), 975668412(6.390323e-04)
-; R600-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
-; R600-NEXT: RNDNE T1.X, PS,
-; R600-NEXT: AND_INT T1.Y, KC0[3].Y, literal.x,
-; R600-NEXT: ADD T1.Z, PV.W, PV.Z,
-; R600-NEXT: MAX_INT T0.W, PV.Y, literal.y,
-; R600-NEXT: MIN_INT * T2.W, PV.Y, literal.z,
-; R600-NEXT: -4096(nan), -330(nan)
+; R600-NEXT: ADD T0.W, PS, PV.W,
+; R600-NEXT: TRUNC * T1.W, T4.W,
+; R600-NEXT: FLT_TO_INT T1.W, PS,
+; R600-NEXT: EXP_IEEE * T0.X, PV.W,
+; R600-NEXT: MUL_IEEE T0.Z, PS, literal.x,
+; R600-NEXT: MAX_INT T0.W, PV.W, literal.y,
+; R600-NEXT: MIN_INT * T2.W, PV.W, literal.z,
+; R600-NEXT: 209715200(1.972152e-31), -330(nan)
; R600-NEXT: 381(5.338947e-43), 0(0.000000e+00)
-; R600-NEXT: ADD_INT T2.X, PS, literal.x,
-; R600-NEXT: ADD_INT T2.Y, PV.W, literal.y,
-; R600-NEXT: ADD_INT T2.Z, T3.Y, literal.z,
-; R600-NEXT: SETGT_UINT T0.W, T3.Y, literal.w,
-; R600-NEXT: EXP_IEEE * T1.Z, PV.Z,
-; R600-NEXT: -254(nan), 204(2.858649e-43)
-; R600-NEXT: 102(1.429324e-43), -229(nan)
-; R600-NEXT: ADD_INT T3.X, T3.Y, literal.x,
-; R600-NEXT: SETGT_UINT T4.Y, T3.Y, literal.y,
-; R600-NEXT: CNDE_INT T2.Z, PV.W, PV.Y, PV.Z,
-; R600-NEXT: SETGT_INT T2.W, T3.Y, literal.x,
-; R600-NEXT: MUL_IEEE * T3.W, PS, literal.z,
+; R600-NEXT: ADD_INT T1.X, PS, literal.x,
+; R600-NEXT: AND_INT T0.Y, KC0[4].X, literal.y,
+; R600-NEXT: ADD_INT T1.Z, PV.W, literal.z,
+; R600-NEXT: ADD_INT * T0.W, T1.W, literal.w,
+; R600-NEXT: -254(nan), -4096(nan)
+; R600-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; R600-NEXT: SETGT_UINT * T2.W, T1.W, literal.x,
+; R600-NEXT: -229(nan), 0(0.000000e+00)
+; R600-NEXT: ADD_INT T2.X, T1.W, literal.x,
+; R600-NEXT: SETGT_UINT T1.Y, T1.W, literal.y,
+; R600-NEXT: CNDE_INT T1.Z, PV.W, T1.Z, T0.W,
+; R600-NEXT: SETGT_INT T0.W, T1.W, literal.x,
+; R600-NEXT: ADD * T3.W, KC0[4].X, -T0.Y,
; R600-NEXT: -127(nan), 254(3.559298e-43)
-; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T4.X, T1.Z, literal.x,
-; R600-NEXT: MUL_IEEE T2.Y, PS, literal.y,
-; R600-NEXT: CNDE_INT T2.Z, PV.W, PV.Z, T3.Y,
-; R600-NEXT: CNDE_INT T4.W, PV.Y, PV.X, T2.X,
-; R600-NEXT: SETGT_INT * T5.W, T3.Y, literal.z,
-; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
+; R600-NEXT: MUL_IEEE T3.X, PS, literal.x,
+; R600-NEXT: MUL_IEEE T2.Y, T0.Y, literal.y,
+; R600-NEXT: CNDE_INT T1.Z, PV.W, PV.Z, T1.W,
+; R600-NEXT: CNDE_INT T4.W, PV.Y, PV.X, T1.X,
+; R600-NEXT: SETGT_INT * T1.W, T1.W, literal.z,
+; R600-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
; R600-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; R600-NEXT: ADD T2.X, KC0[3].W, -T0.Z,
-; R600-NEXT: CNDE_INT T3.Y, PS, PV.Z, PV.W,
-; R600-NEXT: CNDE_INT * T2.Z, T0.W, PV.Y, T3.W,
-; R600-NEXT: ALU clause starting at 105:
-; R600-NEXT: MUL_IEEE T0.W, T4.X, literal.x,
-; R600-NEXT: ADD * T3.W, KC0[3].Y, -T1.Y,
+; R600-NEXT: CNDE_INT T1.X, PS, PV.Z, PV.W,
+; R600-NEXT: RNDNE T3.Y, PV.Y,
+; R600-NEXT: MULADD_IEEE T1.Z, T3.W, literal.x, PV.X,
+; R600-NEXT: MUL_IEEE T3.W, T0.Z, literal.y,
+; R600-NEXT: MUL_IEEE * T4.W, T0.X, literal.z,
+; R600-NEXT: 1079283712(3.321289e+00), 209715200(1.972152e-31)
; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T2.X, PS, literal.x,
+; R600-NEXT: CNDE_INT T4.Y, T2.W, PV.W, T0.Z,
+; R600-NEXT: MULADD_IEEE T0.Z, T0.Y, literal.y, PV.Z,
+; R600-NEXT: ADD T2.W, T2.Y, -PV.Y, BS:VEC_120/SCL_212
+; R600-NEXT: AND_INT * T3.W, KC0[3].Y, literal.z,
+; R600-NEXT: 2130706432(1.701412e+38), 975668412(6.390323e-04)
+; R600-NEXT: -4096(nan), 0(0.000000e+00)
; R600-NEXT: MUL_IEEE T3.X, PS, literal.x,
-; R600-NEXT: MUL_IEEE T2.Y, T1.Y, literal.y,
-; R600-NEXT: CNDE_INT T3.Z, T4.Y, T4.X, PV.W, BS:VEC_120/SCL_212
-; R600-NEXT: CNDE_INT T0.W, T2.W, T2.Z, T1.Z,
-; R600-NEXT: LSHL * T2.W, T3.Y, literal.z,
-; R600-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
-; R600-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; R600-NEXT: ADD_INT T4.X, PS, literal.x,
-; R600-NEXT: CNDE_INT T3.Y, T5.W, PV.W, PV.Z,
-; R600-NEXT: RNDNE T1.Z, PV.Y,
-; R600-NEXT: MULADD_IEEE T0.W, T3.W, literal.y, PV.X, BS:VEC_120/SCL_212
-; R600-NEXT: MUL_IEEE * T2.W, T2.X, literal.z,
+; R600-NEXT: ADD T0.Y, PV.W, PV.Z,
+; R600-NEXT: CNDE_INT T0.Z, T0.W, PV.Y, T0.X, BS:VEC_021/SCL_122
+; R600-NEXT: CNDE_INT T0.W, T1.Y, T4.W, PV.X,
+; R600-NEXT: LSHL * T2.W, T1.X, literal.y,
+; R600-NEXT: 1079283712(3.321289e+00), 23(3.222986e-44)
+; R600-NEXT: AND_INT T0.X, KC0[3].W, literal.x,
+; R600-NEXT: TRUNC T1.Y, T3.Y,
+; R600-NEXT: ADD_INT T1.Z, PS, literal.y,
+; R600-NEXT: CNDE_INT T0.W, T1.W, PV.Z, PV.W,
+; R600-NEXT: EXP_IEEE * T0.Y, PV.Y,
+; R600-NEXT: -4096(nan), 1065353216(1.000000e+00)
+; R600-NEXT: MUL_IEEE T1.X, PV.W, PV.Z,
+; R600-NEXT: FLT_TO_INT T1.Y, PV.Y,
+; R600-NEXT: MUL_IEEE T0.Z, PS, literal.x,
+; R600-NEXT: ADD T0.W, KC0[3].W, -PV.X,
+; R600-NEXT: RNDNE * T1.W, T3.X,
+; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; R600-NEXT: SETGT T2.X, literal.x, KC0[3].Z,
+; R600-NEXT: TRUNC T2.Y, PS,
+; R600-NEXT: MUL_IEEE T1.Z, PV.W, literal.y,
+; R600-NEXT: MUL_IEEE T2.W, PV.Z, literal.z,
+; R600-NEXT: MAX_INT * T4.W, PV.Y, literal.w,
+; R600-NEXT: -1036817932(-4.485347e+01), 975668412(6.390323e-04)
+; R600-NEXT: 209715200(1.972152e-31), -330(nan)
+; R600-NEXT: ADD T4.X, KC0[3].Y, -T3.W,
+; R600-NEXT: ADD_INT T3.Y, PS, literal.x,
+; R600-NEXT: ADD_INT T2.Z, T1.Y, literal.y,
+; R600-NEXT: SETGT_UINT T4.W, T1.Y, literal.z,
+; R600-NEXT: MIN_INT * T5.W, T1.Y, literal.w,
+; R600-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; R600-NEXT: -229(nan), 381(5.338947e-43)
+; R600-NEXT: ADD_INT T5.X, PS, literal.x,
+; R600-NEXT: ADD_INT T4.Y, T1.Y, literal.y,
+; R600-NEXT: SETGT_UINT T3.Z, T1.Y, literal.z,
+; R600-NEXT: CNDE_INT T5.W, PV.W, PV.Y, PV.Z,
+; R600-NEXT: SETGT_INT * T6.W, T1.Y, literal.y,
+; R600-NEXT: -254(nan), -127(nan)
+; R600-NEXT: 254(3.559298e-43), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T6.X, T0.Y, literal.x,
+; R600-NEXT: CNDE_INT T3.Y, PS, PV.W, T1.Y,
+; R600-NEXT: CNDE_INT * T2.Z, PV.Z, PV.Y, PV.X,
+; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
+; R600-NEXT: ALU clause starting at 105:
+; R600-NEXT: SETGT_INT T5.W, T1.Y, literal.x,
+; R600-NEXT: MUL_IEEE * T7.W, T4.X, literal.y,
+; R600-NEXT: 127(1.779649e-43), 975668412(6.390323e-04)
+; R600-NEXT: MUL_IEEE T5.X, T0.X, literal.x,
+; R600-NEXT: MULADD_IEEE T1.Y, T4.X, literal.x, PS, BS:VEC_120/SCL_212
+; R600-NEXT: CNDE_INT T2.Z, PV.W, T3.Y, T2.Z,
+; R600-NEXT: MUL_IEEE T7.W, T6.X, literal.y, BS:VEC_201
+; R600-NEXT: CNDE_INT * T2.W, T4.W, T2.W, T0.Z,
+; R600-NEXT: 1079283712(3.321289e+00), 2130706432(1.701412e+38)
+; R600-NEXT: CNDE_INT T4.X, T6.W, PS, T0.Y,
+; R600-NEXT: CNDE_INT T0.Y, T3.Z, T6.X, PV.W,
+; R600-NEXT: LSHL T0.Z, PV.Z, literal.x,
+; R600-NEXT: MULADD_IEEE T2.W, T3.W, literal.y, PV.Y, BS:VEC_201
+; R600-NEXT: ADD * T1.W, T3.X, -T1.W,
+; R600-NEXT: 23(3.222986e-44), 975668412(6.390323e-04)
+; R600-NEXT: ADD T3.X, PS, PV.W,
+; R600-NEXT: ADD_INT T1.Y, PV.Z, literal.x,
+; R600-NEXT: CNDE_INT T0.Z, T5.W, PV.X, PV.Y,
+; R600-NEXT: RNDNE T1.W, T5.X,
+; R600-NEXT: MULADD_IEEE * T0.W, T0.W, literal.y, T1.Z, BS:VEC_021/SCL_122
; R600-NEXT: 1065353216(1.000000e+00), 1079283712(3.321289e+00)
-; R600-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; R600-NEXT: MULADD_IEEE T2.X, T2.X, literal.x, PS,
-; R600-NEXT: MULADD_IEEE T1.Y, T1.Y, literal.y, PV.W,
-; R600-NEXT: ADD T2.Z, T2.Y, -PV.Z, BS:VEC_120/SCL_212
-; R600-NEXT: MUL_IEEE T0.W, PV.Y, PV.X,
-; R600-NEXT: SETGT * T2.W, literal.z, KC0[4].X,
-; R600-NEXT: 1079283712(3.321289e+00), 975668412(6.390323e-04)
-; R600-NEXT: -1036817932(-4.485347e+01), 0(0.000000e+00)
-; R600-NEXT: CNDE T3.X, PS, PV.W, 0.0,
-; R600-NEXT: ADD T1.Y, PV.Z, PV.Y,
-; R600-NEXT: TRUNC T1.Z, T1.Z,
-; R600-NEXT: MULADD_IEEE T0.W, T0.Z, literal.x, PV.X, BS:VEC_120/SCL_212
-; R600-NEXT: ADD * T1.W, T1.W, -T1.X,
-; R600-NEXT: 975668412(6.390323e-04), 0(0.000000e+00)
-; R600-NEXT: SETGT T2.X, KC0[4].X, literal.x,
-; R600-NEXT: ADD T2.Y, PS, PV.W,
-; R600-NEXT: FLT_TO_INT T0.Z, PV.Z,
-; R600-NEXT: TRUNC T0.W, T1.X,
-; R600-NEXT: EXP_IEEE * T1.X, PV.Y,
-; R600-NEXT: 1109008539(3.853184e+01), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T4.X, PS, literal.x,
-; R600-NEXT: FLT_TO_INT T1.Y, PV.W,
-; R600-NEXT: MAX_INT T1.Z, PV.Z, literal.y,
-; R600-NEXT: MUL_IEEE T0.W, PS, literal.z,
-; R600-NEXT: EXP_IEEE * T1.W, PV.Y,
-; R600-NEXT: 2130706432(1.701412e+38), -330(nan)
+; R600-NEXT: MULADD_IEEE T0.X, T0.X, literal.x, PS,
+; R600-NEXT: ADD T0.Y, T5.X, -PV.W, BS:VEC_120/SCL_212
+; R600-NEXT: MUL_IEEE T0.Z, PV.Z, PV.Y,
+; R600-NEXT: SETGT T0.W, literal.y, KC0[4].X,
+; R600-NEXT: EXP_IEEE * T1.Y, PV.X,
+; R600-NEXT: 975668412(6.390323e-04), -1036817932(-4.485347e+01)
+; R600-NEXT: CNDE T3.X, PV.W, PV.Z, 0.0,
+; R600-NEXT: ADD T0.Y, PV.Y, PV.X,
+; R600-NEXT: FLT_TO_INT T0.Z, T2.Y,
+; R600-NEXT: TRUNC T0.W, T1.W,
+; R600-NEXT: MUL_IEEE * T1.W, PS, literal.x,
; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T5.X, PV.W, literal.x,
-; R600-NEXT: MUL_IEEE T2.Y, PS, literal.x,
-; R600-NEXT: ADD_INT T1.Z, PV.Z, literal.y,
-; R600-NEXT: ADD_INT T2.W, T0.Z, literal.z,
-; R600-NEXT: MAX_INT * T3.W, PV.Y, literal.w,
-; R600-NEXT: 209715200(1.972152e-31), 204(2.858649e-43)
-; R600-NEXT: 102(1.429324e-43), -330(nan)
-; R600-NEXT: SETGT_UINT T6.X, T0.Z, literal.x,
-; R600-NEXT: ADD_INT T3.Y, PS, literal.y,
-; R600-NEXT: ADD_INT T2.Z, T1.Y, literal.z,
-; R600-NEXT: SETGT_UINT T3.W, T1.Y, literal.x,
-; R600-NEXT: MIN_INT * T4.W, T1.Y, literal.w,
+; R600-NEXT: SETGT T0.X, KC0[4].X, literal.x,
+; R600-NEXT: MUL_IEEE T2.Y, PS, literal.y,
+; R600-NEXT: FLT_TO_INT T1.Z, PV.W,
+; R600-NEXT: MAX_INT T0.W, PV.Z, literal.z,
+; R600-NEXT: EXP_IEEE * T0.Y, PV.Y,
+; R600-NEXT: 1109008539(3.853184e+01), 209715200(1.972152e-31)
+; R600-NEXT: -330(nan), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T4.X, T1.Y, literal.x,
+; R600-NEXT: MUL_IEEE T3.Y, PS, literal.y,
+; R600-NEXT: ADD_INT T2.Z, PV.W, literal.z,
+; R600-NEXT: ADD_INT * T0.W, T0.Z, literal.w,
+; R600-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
+; R600-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; R600-NEXT: MAX_INT * T2.W, T1.Z, literal.x,
+; R600-NEXT: -330(nan), 0(0.000000e+00)
+; R600-NEXT: SETGT_UINT T5.X, T0.Z, literal.x,
+; R600-NEXT: ADD_INT T4.Y, PV.W, literal.y,
+; R600-NEXT: ADD_INT T3.Z, T1.Z, literal.z, BS:VEC_120/SCL_212
+; R600-NEXT: SETGT_UINT T2.W, T1.Z, literal.x, BS:VEC_120/SCL_212
+; R600-NEXT: MIN_INT * T3.W, T1.Z, literal.w,
; R600-NEXT: -229(nan), 204(2.858649e-43)
; R600-NEXT: 102(1.429324e-43), 381(5.338947e-43)
-; R600-NEXT: ADD_INT T7.X, PS, literal.x,
-; R600-NEXT: ADD_INT T4.Y, T1.Y, literal.y,
-; R600-NEXT: SETGT_UINT T3.Z, T1.Y, literal.z,
-; R600-NEXT: CNDE_INT T4.W, PV.W, PV.Y, PV.Z,
-; R600-NEXT: SETGT_INT * T5.W, T1.Y, literal.y,
+; R600-NEXT: ADD_INT T6.X, PS, literal.x,
+; R600-NEXT: ADD_INT T5.Y, T1.Z, literal.y,
+; R600-NEXT: SETGT_UINT T4.Z, T1.Z, literal.z,
+; R600-NEXT: CNDE_INT T3.W, PV.W, PV.Y, PV.Z,
+; R600-NEXT: SETGT_INT * T4.W, T1.Z, literal.y,
; R600-NEXT: -254(nan), -127(nan)
; R600-NEXT: 254(3.559298e-43), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T8.X, PS, PV.W, T1.Y,
-; R600-NEXT: CNDE_INT T3.Y, PV.Z, PV.Y, PV.X,
-; R600-NEXT: SETGT_INT T2.Z, T1.Y, literal.x,
-; R600-NEXT: CNDE_INT T2.W, T6.X, T1.Z, T2.W,
-; R600-NEXT: SETGT_INT * T4.W, T0.Z, literal.y,
+; R600-NEXT: CNDE_INT T7.X, PS, PV.W, T1.Z, BS:VEC_021/SCL_122
+; R600-NEXT: CNDE_INT T4.Y, PV.Z, PV.Y, PV.X,
+; R600-NEXT: SETGT_INT T1.Z, T1.Z, literal.x, BS:VEC_120/SCL_212
+; R600-NEXT: CNDE_INT T0.W, T5.X, T2.Z, T0.W, BS:VEC_102/SCL_221
+; R600-NEXT: SETGT_INT * T3.W, T0.Z, literal.y,
; R600-NEXT: 127(1.779649e-43), -127(nan)
-; R600-NEXT: CNDE_INT T7.X, PS, PV.W, T0.Z,
-; R600-NEXT: CNDE_INT T1.Y, PV.Z, PV.X, PV.Y,
-; R600-NEXT: MIN_INT T1.Z, T0.Z, literal.x,
-; R600-NEXT: MUL_IEEE T2.W, T1.W, literal.y,
-; R600-NEXT: MUL_IEEE * T6.W, T2.Y, literal.z,
-; R600-NEXT: 381(5.338947e-43), 2130706432(1.701412e+38)
-; R600-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T8.X, T3.W, PS, T2.Y,
-; R600-NEXT: MUL_IEEE T2.Y, PV.W, literal.x,
-; R600-NEXT: ADD_INT T1.Z, PV.Z, literal.y,
-; R600-NEXT: ADD_INT T3.W, T0.Z, literal.z,
-; R600-NEXT: SETGT_UINT * T6.W, T0.Z, literal.w,
+; R600-NEXT: CNDE_INT T6.X, PS, PV.W, T0.Z,
+; R600-NEXT: CNDE_INT T4.Y, PV.Z, PV.X, PV.Y,
+; R600-NEXT: MIN_INT T2.Z, T0.Z, literal.x,
+; R600-NEXT: MUL_IEEE T0.W, T3.Y, literal.y,
+; R600-NEXT: MUL_IEEE * T5.W, T0.Y, literal.z,
+; R600-NEXT: 381(5.338947e-43), 209715200(1.972152e-31)
+; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
+; R600-NEXT: MUL_IEEE T7.X, PS, literal.x,
+; R600-NEXT: CNDE_INT T3.Y, T2.W, PV.W, T3.Y,
+; R600-NEXT: ADD_INT T2.Z, PV.Z, literal.y,
+; R600-NEXT: ADD_INT T0.W, T0.Z, literal.z,
+; R600-NEXT: SETGT_UINT * T2.W, T0.Z, literal.w,
; R600-NEXT: 2130706432(1.701412e+38), -254(nan)
; R600-NEXT: -127(nan), 254(3.559298e-43)
-; R600-NEXT: CNDE_INT T9.X, PS, PV.W, PV.Z,
-; R600-NEXT: SETGT_INT T3.Y, T0.Z, literal.x,
-; R600-NEXT: CNDE_INT T0.Z, T3.Z, T2.W, PV.Y, BS:VEC_120/SCL_212
-; R600-NEXT: CNDE_INT T1.W, T5.W, PV.X, T1.W, BS:VEC_021/SCL_122
-; R600-NEXT: LSHL * T2.W, T1.Y, literal.y,
+; R600-NEXT: CNDE_INT T8.X, PS, PV.W, PV.Z,
+; R600-NEXT: SETGT_INT T5.Y, T0.Z, literal.x,
+; R600-NEXT: CNDE_INT T0.Z, T4.W, PV.Y, T0.Y, BS:VEC_021/SCL_122
+; R600-NEXT: CNDE_INT T0.W, T4.Z, T5.W, PV.X, BS:VEC_120/SCL_212
+; R600-NEXT: LSHL * T4.W, T4.Y, literal.y,
; R600-NEXT: 127(1.779649e-43), 23(3.222986e-44)
-; R600-NEXT: ADD_INT T8.X, PS, literal.x,
-; R600-NEXT: CNDE_INT T1.Y, T2.Z, PV.W, PV.Z,
-; R600-NEXT: CNDE_INT T0.Z, PV.Y, T7.X, PV.X,
-; R600-NEXT: CNDE_INT * T0.W, T6.X, T5.X, T0.W, BS:VEC_021/SCL_122
-; R600-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE * T1.W, T4.X, literal.x,
-; R600-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; R600-NEXT: CNDE_INT T4.X, T6.W, T4.X, PV.W,
-; R600-NEXT: CNDE_INT * T2.Y, T4.W, T0.W, T1.X, BS:VEC_120/SCL_212
-; R600-NEXT: ALU clause starting at 204:
+; R600-NEXT: ADD_INT T7.X, PS, literal.x,
+; R600-NEXT: CNDE_INT T0.Y, T1.Z, PV.Z, PV.W,
+; R600-NEXT: CNDE_INT T0.Z, PV.Y, T6.X, PV.X,
+; R600-NEXT: MUL_IEEE T0.W, T4.X, literal.y,
+; R600-NEXT: CNDE_INT * T1.W, T5.X, T2.Y, T1.W,
+; R600-NEXT: 1065353216(1.000000e+00), 2130706432(1.701412e+38)
+; R600-NEXT: CNDE_INT T5.X, T3.W, PS, T1.Y,
+; R600-NEXT: CNDE_INT * T1.Y, T2.W, T4.X, PV.W, BS:VEC_120/SCL_212
+; R600-NEXT: ALU clause starting at 201:
; R600-NEXT: LSHL T0.Z, T0.Z, literal.x,
-; R600-NEXT: MUL_IEEE T0.W, T1.Y, T8.X,
+; R600-NEXT: MUL_IEEE T0.W, T0.Y, T7.X,
; R600-NEXT: SETGT * T1.W, literal.y, KC0[3].W,
; R600-NEXT: 23(3.222986e-44), -1036817932(-4.485347e+01)
-; R600-NEXT: CNDE T1.X, PS, PV.W, 0.0,
-; R600-NEXT: SETGT T1.Y, KC0[3].W, literal.x,
+; R600-NEXT: CNDE T4.X, PS, PV.W, 0.0,
+; R600-NEXT: SETGT T0.Y, KC0[3].W, literal.x,
; R600-NEXT: ADD_INT T0.Z, PV.Z, literal.y,
-; R600-NEXT: CNDE_INT T0.W, T3.Y, T2.Y, T4.X, BS:VEC_120/SCL_212
-; R600-NEXT: CNDE * T1.W, T2.X, T3.X, literal.z,
+; R600-NEXT: CNDE_INT T0.W, T5.Y, T5.X, T1.Y, BS:VEC_102/SCL_221
+; R600-NEXT: CNDE * T1.W, T0.X, T3.X, literal.z,
; R600-NEXT: 1109008539(3.853184e+01), 1065353216(1.000000e+00)
; R600-NEXT: 2139095040(INF), 0(0.000000e+00)
-; R600-NEXT: MUL_IEEE T2.X, PV.W, PV.Z,
+; R600-NEXT: MUL_IEEE T0.X, PV.W, PV.Z,
; R600-NEXT: SETGT T2.Y, literal.x, KC0[3].Y,
; R600-NEXT: CNDE T1.Z, PV.Y, PV.X, literal.y,
-; R600-NEXT: CNDE T0.W, T0.X, T0.Y, 0.0,
+; R600-NEXT: CNDE T0.W, T2.X, T1.X, 0.0,
; R600-NEXT: SETGT * T2.W, KC0[3].Z, literal.z,
; R600-NEXT: -1036817932(-4.485347e+01), 2139095040(INF)
; R600-NEXT: 1109008539(3.853184e+01), 0(0.000000e+00)
@@ -2287,8 +2275,8 @@ define amdgpu_kernel void @s_exp10_v4f32(ptr addrspace(1) %out, <4 x float> %in)
; CM-LABEL: s_exp10_v4f32:
; CM: ; %bb.0:
; CM-NEXT: ALU 97, @6, KC0[CB0:0-32], KC1[]
-; CM-NEXT: ALU 100, @104, KC0[CB0:0-32], KC1[]
-; CM-NEXT: ALU 36, @205, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 97, @104, KC0[CB0:0-32], KC1[]
+; CM-NEXT: ALU 35, @202, KC0[CB0:0-32], KC1[]
; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0, T1.X
; CM-NEXT: CF_END
; CM-NEXT: PAD
@@ -2307,224 +2295,220 @@ define amdgpu_kernel void @s_exp10_v4f32(ptr addrspace(1) %out, <4 x float> %in)
; CM-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
; CM-NEXT: MULADD_IEEE T0.X, T0.W, literal.x, PV.W,
; CM-NEXT: ADD T0.Y, T0.Z, -PV.Z,
-; CM-NEXT: MUL_IEEE T0.Z, PV.Y, literal.x,
-; CM-NEXT: MUL_IEEE * T0.W, T2.W, literal.y, BS:VEC_120/SCL_212
+; CM-NEXT: MUL_IEEE T0.Z, T2.W, literal.y, BS:VEC_120/SCL_212
+; CM-NEXT: MUL_IEEE * T0.W, PV.Y, literal.x,
; CM-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
; CM-NEXT: TRUNC T1.X, T1.Z,
-; CM-NEXT: RNDNE T2.Y, PV.W,
-; CM-NEXT: MULADD_IEEE T0.Z, T1.Y, literal.x, PV.Z,
-; CM-NEXT: ADD * T1.W, PV.Y, PV.X,
+; CM-NEXT: MULADD_IEEE T1.Y, T1.Y, literal.x, PV.W,
+; CM-NEXT: RNDNE T1.Z, PV.Z,
+; CM-NEXT: ADD * T0.W, PV.Y, PV.X,
; CM-NEXT: 1079283712(3.321289e+00), 0(0.000000e+00)
+; CM-NEXT: EXP_IEEE T0.X, T0.W,
+; CM-NEXT: EXP_IEEE T0.Y (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
+; CM-NEXT: EXP_IEEE * T0.W (MASKED), T0.W,
+; CM-NEXT: TRUNC T2.X, T1.Z,
+; CM-NEXT: MULADD_IEEE T0.Y, T2.W, literal.x, T1.Y,
+; CM-NEXT: FLT_TO_INT T2.Z, T1.X,
+; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.y,
+; CM-NEXT: 975668412(6.390323e-04), 209715200(1.972152e-31)
+; CM-NEXT: ADD T1.X, T0.Z, -T1.Z,
+; CM-NEXT: MUL_IEEE T1.Y, PV.W, literal.x,
+; CM-NEXT: MAX_INT T0.Z, PV.Z, literal.y,
+; CM-NEXT: MIN_INT * T1.W, PV.Z, literal.z,
+; CM-NEXT: 209715200(1.972152e-31), -330(nan)
+; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T3.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.y,
+; CM-NEXT: ADD_INT T0.Z, T2.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T1.W, T2.Z, literal.w,
+; CM-NEXT: -254(nan), 204(2.858649e-43)
+; CM-NEXT: 102(1.429324e-43), -229(nan)
+; CM-NEXT: ADD_INT T4.X, T2.Z, literal.x,
+; CM-NEXT: SETGT_UINT T3.Y, T2.Z, literal.y,
+; CM-NEXT: CNDE_INT T0.Z, PV.W, PV.Y, PV.Z,
+; CM-NEXT: SETGT_INT * T2.W, T2.Z, literal.x,
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: MUL_IEEE T5.X, T0.X, literal.x,
+; CM-NEXT: CNDE_INT T2.Y, PV.W, PV.Z, T2.Z,
+; CM-NEXT: CNDE_INT T0.Z, PV.Y, PV.X, T3.X,
+; CM-NEXT: SETGT_INT * T3.W, T2.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 127(1.779649e-43)
+; CM-NEXT: AND_INT T3.X, KC0[3].Z, literal.x,
+; CM-NEXT: CNDE_INT T2.Y, PV.W, PV.Y, PV.Z,
+; CM-NEXT: MUL_IEEE T0.Z, PV.X, literal.y,
+; CM-NEXT: CNDE_INT * T0.W, T1.W, T1.Y, T0.W,
+; CM-NEXT: -4096(nan), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T0.X, T2.W, PV.W, T0.X,
+; CM-NEXT: CNDE_INT T1.Y, T3.Y, T5.X, PV.Z,
+; CM-NEXT: LSHL T0.Z, PV.Y, literal.x,
+; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.y,
+; CM-NEXT: 23(3.222986e-44), 1079283712(3.321289e+00)
+; CM-NEXT: RNDNE T4.X, PV.W,
+; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.x,
+; CM-NEXT: CNDE_INT T0.Z, T3.W, PV.X, PV.Y,
+; CM-NEXT: ADD * T1.W, T1.X, T0.Y,
+; CM-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
; CM-NEXT: EXP_IEEE T0.X, T1.W,
; CM-NEXT: EXP_IEEE T0.Y (MASKED), T1.W,
; CM-NEXT: EXP_IEEE T0.Z (MASKED), T1.W,
; CM-NEXT: EXP_IEEE * T0.W (MASKED), T1.W,
-; CM-NEXT: MULADD_IEEE T2.X, T2.W, literal.x, T0.Z,
-; CM-NEXT: ADD T0.Y, T0.W, -T2.Y, BS:VEC_120/SCL_212
-; CM-NEXT: FLT_TO_INT T0.Z, T1.X,
-; CM-NEXT: MUL_IEEE * T0.W, PV.X, literal.y,
-; CM-NEXT: 975668412(6.390323e-04), 209715200(1.972152e-31)
-; CM-NEXT: MUL_IEEE T1.X, PV.W, literal.x,
+; CM-NEXT: MUL_IEEE T1.X, T0.Z, T2.Y,
+; CM-NEXT: TRUNC T0.Y, T4.X,
+; CM-NEXT: FLT_TO_INT T0.Z, T2.X, BS:VEC_120/SCL_212
+; CM-NEXT: MUL_IEEE * T1.W, PV.X, literal.x,
+; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T2.X, PV.W, literal.x,
; CM-NEXT: MUL_IEEE T1.Y, T0.X, literal.y,
; CM-NEXT: MAX_INT T1.Z, PV.Z, literal.z,
-; CM-NEXT: MIN_INT * T1.W, PV.Z, literal.w,
+; CM-NEXT: MIN_INT * T2.W, PV.Z, literal.w,
; CM-NEXT: 209715200(1.972152e-31), 2130706432(1.701412e+38)
; CM-NEXT: -330(nan), 381(5.338947e-43)
-; CM-NEXT: ADD_INT T3.X, PV.W, literal.x,
-; CM-NEXT: ADD_INT T3.Y, PV.Z, literal.y,
+; CM-NEXT: ADD_INT T5.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.y,
; CM-NEXT: ADD_INT T1.Z, T0.Z, literal.z,
-; CM-NEXT: SETGT_UINT * T1.W, T0.Z, literal.w,
+; CM-NEXT: SETGT_UINT * T2.W, T0.Z, literal.w,
; CM-NEXT: -254(nan), 204(2.858649e-43)
; CM-NEXT: 102(1.429324e-43), -229(nan)
-; CM-NEXT: ADD_INT T4.X, T0.Z, literal.x,
-; CM-NEXT: SETGT_UINT T4.Y, T0.Z, literal.y,
+; CM-NEXT: ADD_INT T6.X, T0.Z, literal.x,
+; CM-NEXT: SETGT_UINT T3.Y, T0.Z, literal.y,
; CM-NEXT: CNDE_INT T1.Z, PV.W, PV.Y, PV.Z,
-; CM-NEXT: SETGT_INT * T2.W, T0.Z, literal.x,
+; CM-NEXT: SETGT_INT * T3.W, T0.Z, literal.x,
; CM-NEXT: -127(nan), 254(3.559298e-43)
-; CM-NEXT: CNDE_INT T5.X, PV.W, PV.Z, T0.Z,
-; CM-NEXT: CNDE_INT T3.Y, PV.Y, PV.X, T3.X,
-; CM-NEXT: SETGT_INT T0.Z, T0.Z, literal.x,
-; CM-NEXT: MUL_IEEE * T3.W, T1.Y, literal.y,
-; CM-NEXT: 127(1.779649e-43), 2130706432(1.701412e+38)
-; CM-NEXT: CNDE_INT T3.X, T4.Y, T1.Y, PV.W,
-; CM-NEXT: AND_INT T1.Y, KC0[3].Z, literal.x,
-; CM-NEXT: CNDE_INT T1.Z, PV.Z, PV.X, PV.Y,
-; CM-NEXT: CNDE_INT * T0.W, T1.W, T1.X, T0.W,
-; CM-NEXT: -4096(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T0.X, T2.W, PV.W, T0.X,
-; CM-NEXT: LSHL T3.Y, PV.Z, literal.x,
-; CM-NEXT: TRUNC T1.Z, T2.Y,
-; CM-NEXT: ADD * T0.W, KC0[3].Z, -PV.Y,
-; CM-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T1.X, PV.W, literal.x,
-; CM-NEXT: FLT_TO_INT T2.Y, PV.Z,
-; CM-NEXT: ADD_INT T1.Z, PV.Y, literal.y,
-; CM-NEXT: CNDE_INT * T1.W, T0.Z, PV.X, T3.X,
-; CM-NEXT: 975668412(6.390323e-04), 1065353216(1.000000e+00)
-; CM-NEXT: MUL_IEEE T0.X, PV.W, PV.Z,
-; CM-NEXT: MIN_INT T3.Y, PV.Y, literal.x,
-; CM-NEXT: MULADD_IEEE T0.Z, T0.W, literal.y, PV.X,
-; CM-NEXT: ADD * T0.W, T0.Y, T2.X,
-; CM-NEXT: 381(5.338947e-43), 1079283712(3.321289e+00)
-; CM-NEXT: EXP_IEEE T0.X (MASKED), T0.W,
-; CM-NEXT: EXP_IEEE T0.Y, T0.W,
-; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
-; CM-NEXT: EXP_IEEE * T0.W (MASKED), T0.W,
-; CM-NEXT: MULADD_IEEE T1.X, T1.Y, literal.x, T0.Z,
-; CM-NEXT: MUL_IEEE T4.Y, PV.Y, literal.y,
-; CM-NEXT: ADD_INT T0.Z, T3.Y, literal.z, BS:VEC_120/SCL_212
-; CM-NEXT: MAX_INT * T0.W, T2.Y, literal.w, BS:VEC_201
-; CM-NEXT: 975668412(6.390323e-04), 2130706432(1.701412e+38)
-; CM-NEXT: -254(nan), -330(nan)
-; CM-NEXT: ADD_INT T2.X, T2.Y, literal.x,
-; CM-NEXT: ADD_INT T3.Y, PV.W, literal.y,
-; CM-NEXT: ADD_INT T1.Z, T2.Y, literal.z,
-; CM-NEXT: SETGT_UINT * T0.W, T2.Y, literal.w,
-; CM-NEXT: -127(nan), 204(2.858649e-43)
-; CM-NEXT: 102(1.429324e-43), -229(nan)
-; CM-NEXT: SETGT_UINT T3.X, T2.Y, literal.x,
-; CM-NEXT: CNDE_INT T3.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: SETGT_INT T1.Z, T2.Y, literal.y,
-; CM-NEXT: MUL_IEEE * T1.W, T0.Y, literal.z, BS:VEC_120/SCL_212
-; CM-NEXT: 254(3.559298e-43), -127(nan)
-; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T4.X, PV.W, literal.x,
-; CM-NEXT: CNDE_INT * T3.Y, PV.Z, PV.Y, T2.Y,
-; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
-; CM-NEXT: ALU clause starting at 104:
-; CM-NEXT: CNDE_INT T0.Z, T3.X, T2.X, T0.Z,
-; CM-NEXT: SETGT_INT * T2.W, T2.Y, literal.x,
+; CM-NEXT: CNDE_INT T7.X, PV.W, PV.Z, T0.Z,
+; CM-NEXT: CNDE_INT T2.Y, PV.Y, PV.X, T5.X,
+; CM-NEXT: SETGT_INT * T0.Z, T0.Z, literal.x,
; CM-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T2.X, T1.Y, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.W, T3.Y, PV.Z,
-; CM-NEXT: CNDE_INT T0.Z, T0.W, T4.X, T1.W,
-; CM-NEXT: MUL_IEEE * T0.W, T4.Y, literal.y, BS:VEC_201
-; CM-NEXT: 1079283712(3.321289e+00), 2130706432(1.701412e+38)
-; CM-NEXT: AND_INT T4.X, KC0[4].X, literal.x,
-; CM-NEXT: CNDE_INT T2.Y, T3.X, T4.Y, PV.W,
-; CM-NEXT: CNDE_INT T0.Z, T1.Z, PV.Z, T0.Y,
-; CM-NEXT: LSHL * T0.W, PV.Y, literal.y,
-; CM-NEXT: -4096(nan), 23(3.222986e-44)
-; CM-NEXT: ADD_INT T3.X, PV.W, literal.x,
-; CM-NEXT: CNDE_INT T0.Y, T2.W, PV.Z, PV.Y,
-; CM-NEXT: MUL_IEEE T0.Z, PV.X, literal.y,
-; CM-NEXT: RNDNE * T0.W, T2.X,
-; CM-NEXT: 1065353216(1.000000e+00), 1079283712(3.321289e+00)
-; CM-NEXT: ADD T2.X, T2.X, -PV.W,
-; CM-NEXT: RNDNE T1.Y, PV.Z,
-; CM-NEXT: MUL_IEEE T1.Z, PV.Y, PV.X,
-; CM-NEXT: SETGT * T1.W, literal.x, KC0[3].W,
-; CM-NEXT: -1036817932(-4.485347e+01), 0(0.000000e+00)
-; CM-NEXT: CNDE T3.X, PV.W, PV.Z, 0.0,
-; CM-NEXT: TRUNC T0.Y, T0.W,
-; CM-NEXT: TRUNC T1.Z, PV.Y,
-; CM-NEXT: ADD * T0.W, PV.X, T1.X,
+; CM-NEXT: ALU clause starting at 104:
+; CM-NEXT: ADD * T4.W, KC0[3].Z, -T3.X,
+; CM-NEXT: MUL_IEEE T5.X, PV.W, literal.x,
+; CM-NEXT: CNDE_INT T2.Y, T0.Z, T7.X, T2.Y,
+; CM-NEXT: MUL_IEEE T1.Z, T1.Y, literal.y,
+; CM-NEXT: CNDE_INT * T1.W, T2.W, T2.X, T1.W, BS:VEC_021/SCL_122
+; CM-NEXT: 975668412(6.390323e-04), 2130706432(1.701412e+38)
+; CM-NEXT: CNDE_INT T0.X, T3.W, PV.W, T0.X,
+; CM-NEXT: CNDE_INT T1.Y, T3.Y, T1.Y, PV.Z,
+; CM-NEXT: LSHL T1.Z, PV.Y, literal.x,
+; CM-NEXT: MULADD_IEEE * T1.W, T4.W, literal.y, PV.X, BS:VEC_120/SCL_212
+; CM-NEXT: 23(3.222986e-44), 1079283712(3.321289e+00)
+; CM-NEXT: MULADD_IEEE T2.X, T3.X, literal.x, PV.W,
+; CM-NEXT: ADD T2.Y, T0.W, -T4.X,
+; CM-NEXT: ADD_INT T1.Z, PV.Z, literal.y,
+; CM-NEXT: CNDE_INT * T0.W, T0.Z, PV.X, PV.Y,
+; CM-NEXT: 975668412(6.390323e-04), 1065353216(1.000000e+00)
+; CM-NEXT: AND_INT T0.X, KC0[4].X, literal.x,
+; CM-NEXT: MUL_IEEE T1.Y, PV.W, PV.Z,
+; CM-NEXT: SETGT T0.Z, literal.y, KC0[3].W,
+; CM-NEXT: ADD * T0.W, PV.Y, PV.X,
+; CM-NEXT: -4096(nan), -1036817932(-4.485347e+01)
; CM-NEXT: EXP_IEEE T0.X (MASKED), T0.W,
; CM-NEXT: EXP_IEEE T0.Y (MASKED), T0.W,
; CM-NEXT: EXP_IEEE T0.Z (MASKED), T0.W,
; CM-NEXT: EXP_IEEE * T0.W, T0.W,
-; CM-NEXT: FLT_TO_INT T1.X, T1.Z,
-; CM-NEXT: FLT_TO_INT T0.Y, T0.Y,
-; CM-NEXT: MUL_IEEE T1.Z, PV.W, literal.x,
-; CM-NEXT: ADD * T1.W, KC0[4].X, -T4.X,
-; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: MUL_IEEE T2.X, PV.W, literal.x,
-; CM-NEXT: MUL_IEEE T2.Y, T0.W, literal.y,
-; CM-NEXT: MUL_IEEE T2.Z, PV.Z, literal.z,
-; CM-NEXT: SETGT_UINT * T2.W, PV.Y, literal.w,
-; CM-NEXT: 975668412(6.390323e-04), 209715200(1.972152e-31)
-; CM-NEXT: 2130706432(1.701412e+38), 254(3.559298e-43)
-; CM-NEXT: CNDE_INT T5.X, PV.W, T1.Z, PV.Z,
-; CM-NEXT: MUL_IEEE T3.Y, PV.Y, literal.x,
-; CM-NEXT: MULADD_IEEE T1.Z, T1.W, literal.y, PV.X,
-; CM-NEXT: MAX_INT * T1.W, T1.X, literal.z,
-; CM-NEXT: 209715200(1.972152e-31), 1079283712(3.321289e+00)
-; CM-NEXT: -330(nan), 0(0.000000e+00)
-; CM-NEXT: ADD_INT T2.X, PV.W, literal.x,
-; CM-NEXT: ADD_INT T4.Y, T1.X, literal.y,
-; CM-NEXT: MULADD_IEEE T1.Z, T4.X, literal.z, PV.Z, BS:VEC_120/SCL_212
-; CM-NEXT: MAX_INT * T1.W, T0.Y, literal.w,
-; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
+; CM-NEXT: CNDE T2.X, T0.Z, T1.Y, 0.0,
+; CM-NEXT: ADD T1.Y, KC0[4].X, -T0.X,
+; CM-NEXT: FLT_TO_INT T0.Z, T0.Y,
+; CM-NEXT: MUL_IEEE * T1.W, PV.W, literal.x,
+; CM-NEXT: 209715200(1.972152e-31), 0(0.000000e+00)
+; CM-NEXT: MUL_IEEE T3.X, PV.W, literal.x,
+; CM-NEXT: SETGT_UINT T0.Y, PV.Z, literal.y,
+; CM-NEXT: MUL_IEEE T1.Z, PV.Y, literal.z,
+; CM-NEXT: MUL_IEEE * T2.W, T0.X, literal.w,
+; CM-NEXT: 209715200(1.972152e-31), -229(nan)
+; CM-NEXT: 975668412(6.390323e-04), 1079283712(3.321289e+00)
+; CM-NEXT: RNDNE T4.X, PV.W,
+; CM-NEXT: MULADD_IEEE T1.Y, T1.Y, literal.x, PV.Z,
+; CM-NEXT: CNDE_INT T1.Z, PV.Y, PV.X, T1.W,
+; CM-NEXT: SETGT_INT * T1.W, T0.Z, literal.y,
+; CM-NEXT: 1079283712(3.321289e+00), -127(nan)
+; CM-NEXT: CNDE_INT T3.X, PV.W, PV.Z, T0.W,
+; CM-NEXT: MULADD_IEEE T1.Y, T0.X, literal.x, PV.Y,
+; CM-NEXT: ADD T1.Z, T2.W, -PV.X,
+; CM-NEXT: MAX_INT * T2.W, T0.Z, literal.y,
; CM-NEXT: 975668412(6.390323e-04), -330(nan)
-; CM-NEXT: ADD T4.X, T0.Z, -T1.Y,
-; CM-NEXT: ADD_INT T1.Y, PV.W, literal.x,
-; CM-NEXT: ADD_INT T0.Z, T0.Y, literal.y,
-; CM-NEXT: SETGT_UINT * T1.W, T0.Y, literal.z,
+; CM-NEXT: ADD_INT T0.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T2.Y, T0.Z, literal.y,
+; CM-NEXT: TRUNC T2.Z, T4.X,
+; CM-NEXT: ADD * T2.W, PV.Z, PV.Y,
; CM-NEXT: 204(2.858649e-43), 102(1.429324e-43)
-; CM-NEXT: -229(nan), 0(0.000000e+00)
-; CM-NEXT: SETGT_UINT T6.X, T1.X, literal.x,
-; CM-NEXT: CNDE_INT T1.Y, PV.W, PV.Y, PV.Z,
-; CM-NEXT: SETGT_INT T0.Z, T0.Y, literal.y,
-; CM-NEXT: ADD * T3.W, PV.X, T1.Z,
-; CM-NEXT: -229(nan), -127(nan)
-; CM-NEXT: EXP_IEEE T1.X (MASKED), T3.W,
-; CM-NEXT: EXP_IEEE T1.Y (MASKED), T3.W,
-; CM-NEXT: EXP_IEEE T1.Z, T3.W,
-; CM-NEXT: EXP_IEEE * T1.W (MASKED), T3.W,
-; CM-NEXT: CNDE_INT T4.X, T0.Z, T1.Y, T0.Y,
-; CM-NEXT: CNDE_INT T1.Y, T6.X, T2.X, T4.Y, BS:VEC_120/SCL_212
-; CM-NEXT: SETGT_INT T2.Z, T1.X, literal.x,
-; CM-NEXT: MUL_IEEE * T3.W, PV.Z, literal.y,
-; CM-NEXT: -127(nan), 209715200(1.972152e-31)
-; CM-NEXT: MUL_IEEE T2.X, T1.Z, literal.x,
-; CM-NEXT: MUL_IEEE T4.Y, PV.W, literal.y,
-; CM-NEXT: CNDE_INT T3.Z, PV.Z, PV.Y, T1.X,
-; CM-NEXT: MIN_INT * T4.W, T1.X, literal.z,
+; CM-NEXT: EXP_IEEE T1.X (MASKED), T2.W,
+; CM-NEXT: EXP_IEEE T1.Y, T2.W,
+; CM-NEXT: EXP_IEEE T1.Z (MASKED), T2.W,
+; CM-NEXT: EXP_IEEE * T1.W (MASKED), T2.W,
+; CM-NEXT: MUL_IEEE T4.X, T0.W, literal.x,
+; CM-NEXT: FLT_TO_INT T3.Y, T2.Z,
+; CM-NEXT: MUL_IEEE T1.Z, PV.Y, literal.y,
+; CM-NEXT: CNDE_INT * T0.W, T0.Y, T0.X, T2.Y,
; CM-NEXT: 2130706432(1.701412e+38), 209715200(1.972152e-31)
+; CM-NEXT: CNDE_INT T0.X, T1.W, PV.W, T0.Z,
+; CM-NEXT: MUL_IEEE T0.Y, PV.Z, literal.x,
+; CM-NEXT: MAX_INT T2.Z, PV.Y, literal.y,
+; CM-NEXT: MIN_INT * T0.W, PV.Y, literal.z,
+; CM-NEXT: 209715200(1.972152e-31), -330(nan)
; CM-NEXT: 381(5.338947e-43), 0(0.000000e+00)
-; CM-NEXT: MIN_INT T7.X, T0.Y, literal.x,
-; CM-NEXT: ADD_INT T1.Y, PV.W, literal.y,
-; CM-NEXT: ADD_INT T4.Z, T1.X, literal.z,
-; CM-NEXT: SETGT_UINT * T4.W, T1.X, literal.w,
-; CM-NEXT: 381(5.338947e-43), -254(nan)
+; CM-NEXT: ADD_INT T5.X, PV.W, literal.x,
+; CM-NEXT: ADD_INT T2.Y, PV.Z, literal.y,
+; CM-NEXT: ADD_INT T2.Z, T3.Y, literal.z,
+; CM-NEXT: SETGT_UINT * T0.W, T3.Y, literal.w,
+; CM-NEXT: -254(nan), 204(2.858649e-43)
+; CM-NEXT: 102(1.429324e-43), -229(nan)
+; CM-NEXT: ADD_INT T6.X, T3.Y, literal.x,
+; CM-NEXT: SETGT_UINT T4.Y, T3.Y, literal.y,
+; CM-NEXT: CNDE_INT T2.Z, PV.W, PV.Y, PV.Z,
+; CM-NEXT: SETGT_INT * T1.W, T3.Y, literal.x,
; CM-NEXT: -127(nan), 254(3.559298e-43)
-; CM-NEXT: CNDE_INT T8.X, PV.W, PV.Z, PV.Y,
-; CM-NEXT: SETGT_INT T1.Y, T1.X, literal.x,
-; CM-NEXT: ADD_INT T4.Z, PV.X, literal.y,
-; CM-NEXT: ADD_INT * T5.W, T0.Y, literal.z,
+; CM-NEXT: MUL_IEEE T7.X, T1.Y, literal.x,
+; CM-NEXT: CNDE_INT T2.Y, PV.W, PV.Z, T3.Y,
+; CM-NEXT: CNDE_INT T2.Z, PV.Y, PV.X, T5.X,
+; CM-NEXT: MIN_INT * T2.W, T0.Z, literal.y,
+; CM-NEXT: 2130706432(1.701412e+38), 381(5.338947e-43)
+; CM-NEXT: SETGT_INT T5.X, T3.Y, literal.x,
+; CM-NEXT: ADD_INT T3.Y, PV.W, literal.y,
+; CM-NEXT: ADD_INT T3.Z, T0.Z, literal.z,
+; CM-NEXT: SETGT_UINT * T2.W, T0.Z, literal.w,
; CM-NEXT: 127(1.779649e-43), -254(nan)
-; CM-NEXT: -127(nan), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT T1.X, T2.W, PV.W, PV.Z,
-; CM-NEXT: CNDE_INT T5.Y, PV.Y, T3.Z, PV.X,
-; CM-NEXT: CNDE_INT T3.Z, T6.X, T4.Y, T3.W,
-; CM-NEXT: MUL_IEEE * T2.W, T2.X, literal.x, BS:VEC_120/SCL_212
+; CM-NEXT: -127(nan), 254(3.559298e-43)
+; CM-NEXT: CNDE_INT T6.X, PV.W, PV.Z, PV.Y,
+; CM-NEXT: CNDE_INT T2.Y, PV.X, T2.Y, T2.Z,
+; CM-NEXT: MUL_IEEE T2.Z, T7.X, literal.x,
+; CM-NEXT: CNDE_INT * T0.W, T0.W, T0.Y, T1.Z, BS:VEC_021/SCL_122
; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
-; CM-NEXT: SETGT_INT T6.X, T0.Y, literal.x,
-; CM-NEXT: CNDE_INT T0.Y, T4.W, T2.X, PV.W,
-; CM-NEXT: CNDE_INT * T1.Z, T2.Z, PV.Z, T1.Z,
-; CM-NEXT: 127(1.779649e-43), 0(0.000000e+00)
-; CM-NEXT: ALU clause starting at 205:
-; CM-NEXT: LSHL * T2.W, T5.Y, literal.x,
-; CM-NEXT: 23(3.222986e-44), 0(0.000000e+00)
-; CM-NEXT: ADD_INT T2.X, PV.W, literal.x,
-; CM-NEXT: CNDE_INT T0.Y, T1.Y, T1.Z, T0.Y,
-; CM-NEXT: CNDE_INT * T1.Z, T6.X, T4.X, T1.X,
+; CM-NEXT: SETGT_INT T8.X, T0.Z, literal.x,
+; CM-NEXT: CNDE_INT T0.Y, T1.W, PV.W, T1.Y,
+; CM-NEXT: CNDE_INT T0.Z, T4.Y, T7.X, PV.Z,
+; CM-NEXT: LSHL * T0.W, PV.Y, literal.y,
+; CM-NEXT: 127(1.779649e-43), 23(3.222986e-44)
+; CM-NEXT: ALU clause starting at 202:
+; CM-NEXT: ADD_INT T7.X, T0.W, literal.x,
+; CM-NEXT: CNDE_INT * T0.Y, T5.X, T0.Y, T0.Z,
; CM-NEXT: 1065353216(1.000000e+00), 0(0.000000e+00)
-; CM-NEXT: CNDE_INT * T1.W, T1.W, T3.Y, T2.Y,
-; CM-NEXT: CNDE_INT T1.X, T0.Z, PV.W, T0.W,
-; CM-NEXT: LSHL T1.Y, T1.Z, literal.x, BS:VEC_120/SCL_212
-; CM-NEXT: MUL_IEEE T0.Z, T0.Y, T2.X,
+; CM-NEXT: CNDE_INT * T0.Z, T8.X, T0.X, T6.X,
+; CM-NEXT: MUL_IEEE * T0.W, T4.X, literal.x,
+; CM-NEXT: 2130706432(1.701412e+38), 0(0.000000e+00)
+; CM-NEXT: CNDE_INT T0.X, T2.W, T4.X, PV.W,
+; CM-NEXT: LSHL T1.Y, T0.Z, literal.x,
+; CM-NEXT: MUL_IEEE T0.Z, T0.Y, T7.X, BS:VEC_021/SCL_122
; CM-NEXT: SETGT * T0.W, literal.y, KC0[4].X,
; CM-NEXT: 23(3.222986e-44), -1036817932(-4.485347e+01)
-; CM-NEXT: CNDE T2.X, PV.W, PV.Z, 0.0,
+; CM-NEXT: CNDE T4.X, PV.W, PV.Z, 0.0,
; CM-NEXT: SETGT T0.Y, KC0[4].X, literal.x,
; CM-NEXT: ADD_INT T0.Z, PV.Y, literal.y,
-; CM-NEXT: CNDE_INT * T0.W, T6.X, PV.X, T5.X,
+; CM-NEXT: CNDE_INT * T0.W, T8.X, T3.X, PV.X,
; CM-NEXT: 1109008539(3.853184e+01), 1065353216(1.000000e+00)
-; CM-NEXT: SETGT T1.X, KC0[3].W, literal.x,
+; CM-NEXT: SETGT T0.X, KC0[3].W, literal.x,
; CM-NEXT: MUL_IEEE T1.Y, PV.W, PV.Z,
; CM-NEXT: SETGT T0.Z, literal.y, KC0[3].Z,
; CM-NEXT: CNDE * T0.W, PV.Y, PV.X, literal.z,
; CM-NEXT: 1109008539(3.853184e+01), -1036817932(-4.485347e+01)
; CM-NEXT: 2139095040(INF), 0(0.000000e+00)
-; CM-NEXT: SETGT T2.X, literal.x, KC0[3].Y,
+; CM-NEXT: SETGT T3.X, literal.x, KC0[3].Y,
; CM-NEXT: CNDE T0.Y, PV.Z, PV.Y, 0.0,
-; CM-NEXT: CNDE T0.Z, PV.X, T3.X, literal.y,
+; CM-NEXT: CNDE T0.Z, PV.X, T2.X, literal.y,
; CM-NEXT: SETGT * T1.W, KC0[3].Z, literal.z,
; CM-NEXT: -1036817932(-4.485347e+01), 2139095040(INF)
; CM-NEXT: 1109008539(3.853184e+01), 0(0.000000e+00)
; CM-NEXT: CNDE T0.Y, PV.W, PV.Y, literal.x,
-; CM-NEXT: CNDE T1.Z, PV.X, T0.X, 0.0,
+; CM-NEXT: CNDE T1.Z, PV.X, T1.X, 0.0,
; CM-NEXT: SETGT * T1.W, KC0[3].Y, literal.y,
; CM-NEXT: 2139095040(INF), 1109008539(3.853184e+01)
; CM-NEXT: CNDE * T0.X, PV.W, PV.Z, literal.x,
diff --git a/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor-constexpr-alias.ll b/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor-constexpr-alias.ll
index a883db1fa61f..95fc47469b51 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor-constexpr-alias.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor-constexpr-alias.ll
@@ -26,14 +26,14 @@ define void @bar() addrspace(1) {
}
;.
-; CHECK: @[[LLVM_GLOBAL_CTORS:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @foo.alias, ptr null }, { i32, ptr, ptr } { i32 1, ptr inttoptr (i64 4096 to ptr), ptr null }]
-; CHECK: @[[LLVM_GLOBAL_DTORS:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr addrspacecast (ptr addrspace(1) @bar to ptr), ptr null }]
-; CHECK: @[[__INIT_ARRAY_START:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__INIT_ARRAY_END:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__FINI_ARRAY_START:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__FINI_ARRAY_END:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[LLVM_USED:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [2 x ptr] [ptr @amdgcn.device.init, ptr @amdgcn.device.fini], section "llvm.metadata"
-; CHECK: @[[FOO_ALIAS:[a-zA-Z0-9_$"\\.-]+]] = hidden alias void (), ptr @foo
+; CHECK: @llvm.global_ctors = appending addrspace(1) global [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @foo.alias, ptr null }, { i32, ptr, ptr } { i32 1, ptr inttoptr (i64 4096 to ptr), ptr null }]
+; CHECK: @llvm.global_dtors = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr addrspacecast (ptr addrspace(1) @bar to ptr), ptr null }]
+; CHECK: @__init_array_start = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__init_array_end = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__fini_array_start = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__fini_array_end = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @llvm.used = appending addrspace(1) global [2 x ptr] [ptr @amdgcn.device.init, ptr @amdgcn.device.fini], section "llvm.metadata"
+; CHECK: @foo.alias = hidden alias void (), ptr @foo
;.
; CHECK-LABEL: define void @foo(
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
diff --git a/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor.ll b/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor.ll
index 58e1589d0483..c4f0821caacd 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor.ll
@@ -44,13 +44,13 @@ define internal void @bar() {
}
;.
-; CHECK: @[[LLVM_GLOBAL_CTORS:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @foo, ptr null }]
-; CHECK: @[[LLVM_GLOBAL_DTORS:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @bar, ptr null }]
-; CHECK: @[[__INIT_ARRAY_START:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__INIT_ARRAY_END:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__FINI_ARRAY_START:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__FINI_ARRAY_END:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[LLVM_USED:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [2 x ptr] [ptr @amdgcn.device.init, ptr @amdgcn.device.fini], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @foo, ptr null }]
+; CHECK: @llvm.global_dtors = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @bar, ptr null }]
+; CHECK: @__init_array_start = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__init_array_end = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__fini_array_start = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__fini_array_end = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @llvm.used = appending addrspace(1) global [2 x ptr] [ptr @amdgcn.device.init, ptr @amdgcn.device.fini], section "llvm.metadata"
;.
; CHECK-LABEL: define internal void @foo() {
; CHECK-NEXT: ret void
diff --git a/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll b/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll
new file mode 100644
index 000000000000..c7a831185b83
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 %s -o - | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 %s -o - | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 %s -o - | FileCheck -check-prefix=GFX11 %s
+
+define amdgpu_kernel void @test(ptr addrspace(1) %src, ptr addrspace(1) %dst) {
+; GFX9-LABEL: test:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dword s7, s[4:5], 0x1c
+; GFX9-NEXT: s_load_dword s8, s[4:5], 0x38
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_and_b32 s4, s7, 0xffff
+; GFX9-NEXT: s_mul_i32 s6, s6, s4
+; GFX9-NEXT: s_add_i32 s8, s8, s6
+; GFX9-NEXT: v_add_u32_e32 v0, s8, v0
+; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GFX9-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off
+; GFX9-NEXT: v_mov_b32_e32 v6, s3
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, s2, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v6, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_not_b32_e32 v3, v3
+; GFX9-NEXT: v_not_b32_e32 v2, v2
+; GFX9-NEXT: v_not_b32_e32 v1, v1
+; GFX9-NEXT: v_not_b32_e32 v0, v0
+; GFX9-NEXT: global_store_dwordx4 v[4:5], v[0:3], off
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: test:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_clause 0x2
+; GFX10-NEXT: s_load_dword s7, s[4:5], 0x1c
+; GFX10-NEXT: s_load_dword s8, s[4:5], 0x38
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_and_b32 s4, s7, 0xffff
+; GFX10-NEXT: s_mul_i32 s6, s6, s4
+; GFX10-NEXT: v_add3_u32 v0, s8, s6, v0
+; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GFX10-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, s0, v4
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, s1, v5, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, s2, v4
+; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
+; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
+; GFX10-NEXT: v_not_b32_e32 v1, v1
+; GFX10-NEXT: v_not_b32_e32 v0, v0
+; GFX10-NEXT: global_store_dwordx4 v[4:5], v[0:3], off
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: test:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_clause 0x2
+; GFX11-NEXT: s_load_b32 s4, s[0:1], 0x1c
+; GFX11-NEXT: s_load_b32 s5, s[0:1], 0x38
+; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_mul_i32 s15, s15, s4
+; GFX11-NEXT: v_add3_u32 v0, s5, s15, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GFX11-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, s0, v4
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, s1, v5, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, s2, v4
+; GFX11-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
+; GFX11-NEXT: global_load_b128 v[0:3], v[0:1], off
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_not_b32_e32 v3, v3
+; GFX11-NEXT: v_not_b32_e32 v2, v2
+; GFX11-NEXT: v_not_b32_e32 v1, v1
+; GFX11-NEXT: v_not_b32_e32 v0, v0
+; GFX11-NEXT: global_store_b128 v[4:5], v[0:3], off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+entry:
+ %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+ %arg.1.ptr = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 40
+ %arg.1 = load i64, ptr addrspace(4) %arg.1.ptr, align 8
+ %workgroup.id.x = tail call i32 @llvm.amdgcn.workgroup.id.x()
+ %arg.2.ptr = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 12
+ %arg.2 = load i16, ptr addrspace(4) %arg.2.ptr, align 4
+ %arg.2.ext = zext i16 %arg.2 to i32
+ %mul = mul i32 %workgroup.id.x, %arg.2.ext
+ %workitem.id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %add = add i32 %mul, %workitem.id.x
+ %add.ext = zext i32 %add to i64
+ %add.1 = add i64 %arg.1, %add.ext
+ %sext = shl i64 %add.1, 32
+ %idxprom = ashr exact i64 %sext, 32
+ %arrayidx = getelementptr inbounds <16 x i8>, ptr addrspace(1) %src, i64 %idxprom
+ %arrayval = load <16 x i8>, ptr addrspace(1) %arrayidx, align 16
+ %not = xor <16 x i8> %arrayval, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %arrayidx2 = getelementptr inbounds <16 x i8>, ptr addrspace(1) %dst, i64 %idxprom
+ store <16 x i8> %not, ptr addrspace(1) %arrayidx2, align 16
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-global.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-global.ll
new file mode 100644
index 000000000000..da9bc6b33113
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-global.ll
@@ -0,0 +1,1716 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx600 < %s | FileCheck --check-prefixes=GFX6 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx700 < %s | FileCheck --check-prefixes=GFX7 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1010 < %s | FileCheck --check-prefixes=GFX10-WGP %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1010 -mattr=+cumode < %s | FileCheck --check-prefixes=GFX10-CU %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -O0 -mcpu=gfx700 -amdgcn-skip-cache-invalidations < %s | FileCheck --check-prefixes=SKIP-CACHE-INV %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX90A-NOTTGSPLIT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx90a -mattr=+tgsplit < %s | FileCheck -check-prefixes=GFX90A-TGSPLIT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx940 < %s | FileCheck -check-prefixes=GFX940-NOTTGSPLIT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx940 -mattr=+tgsplit < %s | FileCheck -check-prefixes=GFX940-TGSPLIT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1100 < %s | FileCheck --check-prefixes=GFX11-WGP %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1100 -mattr=+cumode < %s | FileCheck --check-prefixes=GFX11-CU %s
+
+define amdgpu_kernel void @workgroup_acquire_fence() {
+; GFX6-LABEL: workgroup_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup") acquire, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_release_fence() {
+; GFX6-LABEL: workgroup_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup") release, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_acq_rel_fence() {
+; GFX6-LABEL: workgroup_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup") acq_rel, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_seq_cst_fence() {
+; GFX6-LABEL: workgroup_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup") seq_cst, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_one_as_acquire_fence() {
+; GFX6-LABEL: workgroup_one_as_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_one_as_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_one_as_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_one_as_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_one_as_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_one_as_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_one_as_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_one_as_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_one_as_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_one_as_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_one_as_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup-one-as") acquire, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_one_as_release_fence() {
+; GFX6-LABEL: workgroup_one_as_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_one_as_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_one_as_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_one_as_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_one_as_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_one_as_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_one_as_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_one_as_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_one_as_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_one_as_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_one_as_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup-one-as") release, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_one_as_acq_rel_fence() {
+; GFX6-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_one_as_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup-one-as") acq_rel, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_one_as_seq_cst_fence() {
+; GFX6-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_one_as_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup-one-as") seq_cst, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_acquire_fence() {
+; GFX6-LABEL: agent_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent") acquire, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_release_fence() {
+; GFX6-LABEL: agent_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent") release, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_acq_rel_fence() {
+; GFX6-LABEL: agent_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent") acq_rel, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_seq_cst_fence() {
+; GFX6-LABEL: agent_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent") seq_cst, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_one_as_acquire_fence() {
+; GFX6-LABEL: agent_one_as_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_one_as_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_one_as_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_one_as_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_one_as_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_one_as_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_one_as_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_one_as_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_one_as_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_one_as_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_one_as_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent-one-as") acquire, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_one_as_release_fence() {
+; GFX6-LABEL: agent_one_as_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_one_as_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_one_as_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_one_as_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_one_as_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_one_as_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_one_as_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_one_as_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_one_as_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_one_as_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_one_as_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent-one-as") release, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_one_as_acq_rel_fence() {
+; GFX6-LABEL: agent_one_as_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_one_as_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_one_as_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_one_as_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_one_as_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_one_as_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_one_as_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_one_as_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_one_as_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_one_as_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_one_as_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent-one-as") acq_rel, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_one_as_seq_cst_fence() {
+; GFX6-LABEL: agent_one_as_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_one_as_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_one_as_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_one_as_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_one_as_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_one_as_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_one_as_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_one_as_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_one_as_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_one_as_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_one_as_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent-one-as") seq_cst, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @system_acquire_fence() {
+; GFX6-LABEL: system_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_invl2
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_invl2
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence acquire, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @system_release_fence() {
+; GFX6-LABEL: system_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbl2
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: buffer_wbl2
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence release, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @system_acq_rel_fence() {
+; GFX6-LABEL: system_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbl2
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_invl2
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: buffer_wbl2
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_invl2
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence acq_rel, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @system_seq_cst_fence() {
+; GFX6-LABEL: system_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbl2
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_invl2
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: buffer_wbl2
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_invl2
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence seq_cst, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @system_one_as_acquire_fence() {
+; GFX6-LABEL: system_one_as_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_one_as_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_one_as_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_one_as_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_one_as_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_one_as_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_invl2
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_one_as_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_invl2
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_one_as_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_one_as_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_one_as_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_one_as_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("one-as") acquire, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @system_one_as_release_fence() {
+; GFX6-LABEL: system_one_as_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_one_as_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_one_as_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_one_as_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_one_as_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_one_as_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbl2
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_one_as_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: buffer_wbl2
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_one_as_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_one_as_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_one_as_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_one_as_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("one-as") release, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @system_one_as_acq_rel_fence() {
+; GFX6-LABEL: system_one_as_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_one_as_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_one_as_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_one_as_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_one_as_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_one_as_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbl2
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_invl2
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_one_as_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: buffer_wbl2
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_invl2
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_one_as_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_one_as_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_one_as_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_one_as_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("one-as") acq_rel, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
+
+define amdgpu_kernel void @system_one_as_seq_cst_fence() {
+; GFX6-LABEL: system_one_as_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_wbinvl1
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_one_as_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_one_as_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT: buffer_gl1_inv
+; GFX10-WGP-NEXT: buffer_gl0_inv
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_one_as_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-CU-NEXT: buffer_gl1_inv
+; GFX10-CU-NEXT: buffer_gl0_inv
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_one_as_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt vmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_one_as_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbl2
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: buffer_invl2
+; GFX90A-NOTTGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_one_as_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: buffer_wbl2
+; GFX90A-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX90A-TGSPLIT-NEXT: buffer_invl2
+; GFX90A-TGSPLIT-NEXT: buffer_wbinvl1_vol
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_one_as_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_one_as_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: buffer_wbl2 sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_waitcnt vmcnt(0)
+; GFX940-TGSPLIT-NEXT: buffer_inv sc0 sc1
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_one_as_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT: buffer_gl1_inv
+; GFX11-WGP-NEXT: buffer_gl0_inv
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_one_as_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-CU-NEXT: buffer_gl1_inv
+; GFX11-CU-NEXT: buffer_gl0_inv
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("one-as") seq_cst, !mmra !{!"amdgpu-as", !"global"}
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-local.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-local.ll
new file mode 100644
index 000000000000..601a6a60fe7b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-local.ll
@@ -0,0 +1,1296 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx600 < %s | FileCheck --check-prefixes=GFX6 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx700 < %s | FileCheck --check-prefixes=GFX7 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1010 < %s | FileCheck --check-prefixes=GFX10-WGP %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1010 -mattr=+cumode < %s | FileCheck --check-prefixes=GFX10-CU %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -O0 -mcpu=gfx700 -amdgcn-skip-cache-invalidations < %s | FileCheck --check-prefixes=SKIP-CACHE-INV %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX90A-NOTTGSPLIT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx90a -mattr=+tgsplit < %s | FileCheck -check-prefixes=GFX90A-TGSPLIT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx940 < %s | FileCheck -check-prefixes=GFX940-NOTTGSPLIT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx940 -mattr=+tgsplit < %s | FileCheck -check-prefixes=GFX940-TGSPLIT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1100 < %s | FileCheck --check-prefixes=GFX11-WGP %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1100 -mattr=+cumode < %s | FileCheck --check-prefixes=GFX11-CU %s
+
+define amdgpu_kernel void @workgroup_acquire_fence() {
+; GFX6-LABEL: workgroup_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup") acquire, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_release_fence() {
+; GFX6-LABEL: workgroup_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup") release, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_acq_rel_fence() {
+; GFX6-LABEL: workgroup_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup") acq_rel, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_seq_cst_fence() {
+; GFX6-LABEL: workgroup_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup") seq_cst, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_one_as_acquire_fence() {
+; GFX6-LABEL: workgroup_one_as_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_one_as_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_one_as_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_one_as_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_one_as_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_one_as_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_one_as_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_one_as_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_one_as_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_one_as_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_one_as_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup-one-as") acquire, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_one_as_release_fence() {
+; GFX6-LABEL: workgroup_one_as_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_one_as_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_one_as_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_one_as_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_one_as_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_one_as_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_one_as_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_one_as_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_one_as_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_one_as_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_one_as_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup-one-as") release, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_one_as_acq_rel_fence() {
+; GFX6-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_one_as_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_one_as_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup-one-as") acq_rel, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @workgroup_one_as_seq_cst_fence() {
+; GFX6-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: workgroup_one_as_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: workgroup_one_as_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("workgroup-one-as") seq_cst, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_acquire_fence() {
+; GFX6-LABEL: agent_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent") acquire, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_release_fence() {
+; GFX6-LABEL: agent_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent") release, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_acq_rel_fence() {
+; GFX6-LABEL: agent_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent") acq_rel, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_seq_cst_fence() {
+; GFX6-LABEL: agent_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent") seq_cst, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_one_as_acquire_fence() {
+; GFX6-LABEL: agent_one_as_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_one_as_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_one_as_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_one_as_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_one_as_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_one_as_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_one_as_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_one_as_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_one_as_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_one_as_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_one_as_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent-one-as") acquire, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_one_as_release_fence() {
+; GFX6-LABEL: agent_one_as_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_one_as_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_one_as_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_one_as_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_one_as_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_one_as_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_one_as_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_one_as_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_one_as_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_one_as_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_one_as_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent-one-as") release, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_one_as_acq_rel_fence() {
+; GFX6-LABEL: agent_one_as_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_one_as_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_one_as_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_one_as_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_one_as_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_one_as_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_one_as_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_one_as_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_one_as_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_one_as_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_one_as_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent-one-as") acq_rel, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @agent_one_as_seq_cst_fence() {
+; GFX6-LABEL: agent_one_as_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: agent_one_as_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: agent_one_as_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: agent_one_as_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: agent_one_as_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: agent_one_as_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: agent_one_as_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: agent_one_as_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: agent_one_as_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: agent_one_as_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: agent_one_as_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("agent-one-as") seq_cst, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @system_acquire_fence() {
+; GFX6-LABEL: system_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence acquire, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @system_release_fence() {
+; GFX6-LABEL: system_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence release, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @system_acq_rel_fence() {
+; GFX6-LABEL: system_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence acq_rel, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @system_seq_cst_fence() {
+; GFX6-LABEL: system_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_waitcnt lgkmcnt(0)
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence seq_cst, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @system_one_as_acquire_fence() {
+; GFX6-LABEL: system_one_as_acquire_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_one_as_acquire_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_one_as_acquire_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_one_as_acquire_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_one_as_acquire_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_one_as_acquire_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_one_as_acquire_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_one_as_acquire_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_one_as_acquire_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_one_as_acquire_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_one_as_acquire_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("one-as") acquire, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @system_one_as_release_fence() {
+; GFX6-LABEL: system_one_as_release_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_one_as_release_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_one_as_release_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_one_as_release_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_one_as_release_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_one_as_release_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_one_as_release_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_one_as_release_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_one_as_release_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_one_as_release_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_one_as_release_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("one-as") release, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @system_one_as_acq_rel_fence() {
+; GFX6-LABEL: system_one_as_acq_rel_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_one_as_acq_rel_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_one_as_acq_rel_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_one_as_acq_rel_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_one_as_acq_rel_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_one_as_acq_rel_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_one_as_acq_rel_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_one_as_acq_rel_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_one_as_acq_rel_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_one_as_acq_rel_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_one_as_acq_rel_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("one-as") acq_rel, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
+
+define amdgpu_kernel void @system_one_as_seq_cst_fence() {
+; GFX6-LABEL: system_one_as_seq_cst_fence:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_endpgm
+;
+; GFX7-LABEL: system_one_as_seq_cst_fence:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_endpgm
+;
+; GFX10-WGP-LABEL: system_one_as_seq_cst_fence:
+; GFX10-WGP: ; %bb.0: ; %entry
+; GFX10-WGP-NEXT: s_endpgm
+;
+; GFX10-CU-LABEL: system_one_as_seq_cst_fence:
+; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_endpgm
+;
+; SKIP-CACHE-INV-LABEL: system_one_as_seq_cst_fence:
+; SKIP-CACHE-INV: ; %bb.0: ; %entry
+; SKIP-CACHE-INV-NEXT: s_endpgm
+;
+; GFX90A-NOTTGSPLIT-LABEL: system_one_as_seq_cst_fence:
+; GFX90A-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX90A-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX90A-TGSPLIT-LABEL: system_one_as_seq_cst_fence:
+; GFX90A-TGSPLIT: ; %bb.0: ; %entry
+; GFX90A-TGSPLIT-NEXT: s_endpgm
+;
+; GFX940-NOTTGSPLIT-LABEL: system_one_as_seq_cst_fence:
+; GFX940-NOTTGSPLIT: ; %bb.0: ; %entry
+; GFX940-NOTTGSPLIT-NEXT: s_endpgm
+;
+; GFX940-TGSPLIT-LABEL: system_one_as_seq_cst_fence:
+; GFX940-TGSPLIT: ; %bb.0: ; %entry
+; GFX940-TGSPLIT-NEXT: s_endpgm
+;
+; GFX11-WGP-LABEL: system_one_as_seq_cst_fence:
+; GFX11-WGP: ; %bb.0: ; %entry
+; GFX11-WGP-NEXT: s_endpgm
+;
+; GFX11-CU-LABEL: system_one_as_seq_cst_fence:
+; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_endpgm
+entry:
+ fence syncscope("one-as") seq_cst, !mmra !{!"amdgpu-as", !"local"}
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/permute_i8.ll b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
index 8ac332197215..7ca9ae359a49 100644
--- a/llvm/test/CodeGen/AMDGPU/permute_i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
@@ -3816,13 +3816,15 @@ define hidden void @extract_v13i64(ptr addrspace(1) %in0, ptr addrspace(1) %in1,
; GFX10-LABEL: extract_v13i64:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: s_clause 0x1
-; GFX10-NEXT: global_load_dwordx4 v[8:11], v[0:1], off
-; GFX10-NEXT: global_load_dwordx4 v[12:15], v[0:1], off offset:16
+; GFX10-NEXT: s_clause 0x2
+; GFX10-NEXT: global_load_dwordx4 v[8:11], v[0:1], off offset:48
+; GFX10-NEXT: global_load_dwordx4 v[11:14], v[0:1], off
+; GFX10-NEXT: global_load_dwordx4 v[14:17], v[0:1], off offset:64
+; GFX10-NEXT: ; kill: killed $vgpr0 killed $vgpr1
; GFX10-NEXT: s_waitcnt vmcnt(1)
-; GFX10-NEXT: v_perm_b32 v0, v9, v8, 0x3020504
+; GFX10-NEXT: v_perm_b32 v0, v12, v13, 0x1000504
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_perm_b32 v1, v11, v12, 0x1000706
+; GFX10-NEXT: v_perm_b32 v1, v10, v14, 0x1000504
; GFX10-NEXT: global_store_dword v[4:5], v0, off
; GFX10-NEXT: global_store_dword v[6:7], v1, off
; GFX10-NEXT: s_setpc_b64 s[30:31]
@@ -3830,14 +3832,15 @@ define hidden void @extract_v13i64(ptr addrspace(1) %in0, ptr addrspace(1) %in1,
; GFX9-LABEL: extract_v13i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx4 v[8:11], v[0:1], off
-; GFX9-NEXT: global_load_dwordx4 v[12:15], v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b32 s4, 0x3020504
-; GFX9-NEXT: s_mov_b32 s5, 0x1000706
+; GFX9-NEXT: global_load_dwordx4 v[8:11], v[0:1], off offset:48
+; GFX9-NEXT: global_load_dwordx4 v[11:14], v[0:1], off
+; GFX9-NEXT: global_load_dwordx4 v[14:17], v[0:1], off offset:64
+; GFX9-NEXT: s_mov_b32 s4, 0x1000504
+; GFX9-NEXT: ; kill: killed $vgpr0 killed $vgpr1
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_perm_b32 v0, v9, v8, s4
+; GFX9-NEXT: v_perm_b32 v0, v12, v13, s4
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v1, v11, v12, s5
+; GFX9-NEXT: v_perm_b32 v1, v10, v14, s4
; GFX9-NEXT: global_store_dword v[4:5], v0, off
; GFX9-NEXT: global_store_dword v[6:7], v1, off
; GFX9-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
index f0e709b5a172..857bb897ead2 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
@@ -1,18 +1,14 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-NO-PRELOAD %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-PRELOAD-1 %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=2 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-PRELOAD-2 %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=4 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-PRELOAD-4 %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=8 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-PRELOAD-8 %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-NO-PRELOAD %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-PRELOAD-1 %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=2 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-PRELOAD-2 %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=4 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-PRELOAD-4 %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=8 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-PRELOAD-8 %s
-define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
-; GFX940-NO-PRELOAD-LABEL: ptr1_i8:
+define amdgpu_kernel void @ptr1_i8_kernel_preload_arg(ptr addrspace(1) %out, i8 %arg0) {
+; GFX940-NO-PRELOAD-LABEL: ptr1_i8_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
@@ -23,19 +19,7 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: ptr1_i8:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xff
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: ptr1_i8:
+; GFX940-PRELOAD-2-LABEL: ptr1_i8_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -45,17 +29,7 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: ptr1_i8:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xff
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: ptr1_i8:
+; GFX940-PRELOAD-8-LABEL: ptr1_i8_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -65,7 +39,7 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: ptr1_i8:
+; GFX90a-NO-PRELOAD-LABEL: ptr1_i8_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -76,19 +50,7 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xff
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: ptr1_i8:
+; GFX90a-PRELOAD-2-LABEL: ptr1_i8_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -98,17 +60,7 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: s_and_b32 s0, s8, 0xff
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: ptr1_i8:
+; GFX90a-PRELOAD-8-LABEL: ptr1_i8_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -122,8 +74,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
ret void
}
-define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %arg0) {
-; GFX940-NO-PRELOAD-LABEL: ptr1_i8_zext_arg:
+define amdgpu_kernel void @ptr1_i8_zext_kernel_preload_arg(ptr addrspace(1) %out, i8 zeroext %arg0) {
+; GFX940-NO-PRELOAD-LABEL: ptr1_i8_zext_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
@@ -134,19 +86,7 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xff
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: ptr1_i8_zext_arg:
+; GFX940-PRELOAD-2-LABEL: ptr1_i8_zext_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -157,18 +97,7 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: s_mov_b32 s0, 0xffff
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-4-NEXT: v_and_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: ptr1_i8_zext_arg:
+; GFX940-PRELOAD-8-LABEL: ptr1_i8_zext_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -179,7 +108,7 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: ptr1_i8_zext_arg:
+; GFX90a-NO-PRELOAD-LABEL: ptr1_i8_zext_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -190,19 +119,7 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xff
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: ptr1_i8_zext_arg:
+; GFX90a-PRELOAD-2-LABEL: ptr1_i8_zext_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -213,18 +130,7 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: s_mov_b32 s0, 0xffff
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-4-NEXT: v_and_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: ptr1_i8_zext_arg:
+; GFX90a-PRELOAD-8-LABEL: ptr1_i8_zext_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -239,8 +145,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
ret void
}
-define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0) {
-; GFX940-NO-PRELOAD-LABEL: ptr1_i16_preload_arg:
+define amdgpu_kernel void @ptr1_i16_kernel_preload_arg(ptr addrspace(1) %out, i16 %arg0) {
+; GFX940-NO-PRELOAD-LABEL: ptr1_i16_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
@@ -251,19 +157,7 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: ptr1_i16_preload_arg:
+; GFX940-PRELOAD-2-LABEL: ptr1_i16_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -273,17 +167,7 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xffff
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: ptr1_i16_preload_arg:
+; GFX940-PRELOAD-8-LABEL: ptr1_i16_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -293,7 +177,7 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: ptr1_i16_preload_arg:
+; GFX90a-NO-PRELOAD-LABEL: ptr1_i16_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -304,19 +188,7 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: ptr1_i16_preload_arg:
+; GFX90a-PRELOAD-2-LABEL: ptr1_i16_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -326,17 +198,7 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: s_and_b32 s0, s8, 0xffff
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: ptr1_i16_preload_arg:
+; GFX90a-PRELOAD-8-LABEL: ptr1_i16_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -350,8 +212,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
ret void
}
-define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0) {
-; GFX940-NO-PRELOAD-LABEL: ptr1_i32_preload_arg:
+define amdgpu_kernel void @ptr1_i32_kernel_preload_arg(ptr addrspace(1) %out, i32 %arg0) {
+; GFX940-NO-PRELOAD-LABEL: ptr1_i32_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
@@ -361,18 +223,7 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: ptr1_i32_preload_arg:
+; GFX940-PRELOAD-2-LABEL: ptr1_i32_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -381,16 +232,7 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
-; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: ptr1_i32_preload_arg:
+; GFX940-PRELOAD-8-LABEL: ptr1_i32_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -399,7 +241,7 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: ptr1_i32_preload_arg:
+; GFX90a-NO-PRELOAD-LABEL: ptr1_i32_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -409,18 +251,7 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-2-LABEL: ptr1_i32_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -429,16 +260,7 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
-; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-8-LABEL: ptr1_i32_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -451,8 +273,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
}
-define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1) %out, i32 %arg1) {
-; GFX940-NO-PRELOAD-LABEL: i32_ptr1_i32_preload_arg:
+define amdgpu_kernel void @i32_ptr1_i32_kernel_preload_arg(i32 %arg0, ptr addrspace(1) %out, i32 %arg1) {
+; GFX940-NO-PRELOAD-LABEL: i32_ptr1_i32_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x10
; GFX940-NO-PRELOAD-NEXT: s_load_dword s5, s[0:1], 0x0
@@ -464,20 +286,7 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dword s3, s[0:1], 0x10
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: s_add_i32 s0, s2, s3
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: i32_ptr1_i32_preload_arg:
+; GFX940-PRELOAD-2-LABEL: i32_ptr1_i32_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -489,17 +298,7 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: s_add_i32 s0, s2, s6
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: i32_ptr1_i32_preload_arg:
+; GFX940-PRELOAD-8-LABEL: i32_ptr1_i32_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -509,7 +308,7 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[4:5] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: i32_ptr1_i32_preload_arg:
+; GFX90a-NO-PRELOAD-LABEL: i32_ptr1_i32_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x10
; GFX90a-NO-PRELOAD-NEXT: s_load_dword s3, s[4:5], 0x0
@@ -521,20 +320,7 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dword s2, s[4:5], 0x10
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: s_add_i32 s2, s6, s2
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s2
-; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[0:1]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: i32_ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-2-LABEL: i32_ptr1_i32_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -546,17 +332,7 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[8:9]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: s_add_i32 s0, s6, s10
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[8:9]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: i32_ptr1_i32_preload_arg:
+; GFX90a-PRELOAD-8-LABEL: i32_ptr1_i32_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -570,8 +346,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
ret void
}
-define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0, i16 %arg1) {
-; GFX940-NO-PRELOAD-LABEL: ptr1_i16_i16_preload_arg:
+define amdgpu_kernel void @ptr1_i16_i16_kernel_preload_arg(ptr addrspace(1) %out, i16 %arg0, i16 %arg1) {
+; GFX940-NO-PRELOAD-LABEL: ptr1_i16_i16_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
@@ -584,21 +360,7 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: s_lshr_b32 s1, s0, 16
-; GFX940-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX940-PRELOAD-1-NEXT: s_add_i32 s0, s0, s1
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: ptr1_i16_i16_preload_arg:
+; GFX940-PRELOAD-2-LABEL: ptr1_i16_i16_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -612,19 +374,7 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
-; GFX940-PRELOAD-4-NEXT: s_and_b32 s1, s4, 0xffff
-; GFX940-PRELOAD-4-NEXT: s_add_i32 s0, s1, s0
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: ptr1_i16_i16_preload_arg:
+; GFX940-PRELOAD-8-LABEL: ptr1_i16_i16_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -636,7 +386,7 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: ptr1_i16_i16_preload_arg:
+; GFX90a-NO-PRELOAD-LABEL: ptr1_i16_i16_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -649,21 +399,7 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: s_lshr_b32 s1, s0, 16
-; GFX90a-PRELOAD-1-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX90a-PRELOAD-1-NEXT: s_add_i32 s0, s0, s1
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: ptr1_i16_i16_preload_arg:
+; GFX90a-PRELOAD-2-LABEL: ptr1_i16_i16_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -677,19 +413,7 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 16
-; GFX90a-PRELOAD-4-NEXT: s_and_b32 s1, s8, 0xffff
-; GFX90a-PRELOAD-4-NEXT: s_add_i32 s0, s1, s0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: ptr1_i16_i16_preload_arg:
+; GFX90a-PRELOAD-8-LABEL: ptr1_i16_i16_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -707,8 +431,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
ret void
}
-define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8> %in) {
-; GFX940-NO-PRELOAD-LABEL: ptr1_v2i8_preload_arg:
+define amdgpu_kernel void @ptr1_v2i8_kernel_preload_arg(ptr addrspace(1) %out, <2 x i8> %in) {
+; GFX940-NO-PRELOAD-LABEL: ptr1_v2i8_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
@@ -718,18 +442,7 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-NO-PRELOAD-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-1-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: ptr1_v2i8_preload_arg:
+; GFX940-PRELOAD-2-LABEL: ptr1_v2i8_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -740,18 +453,7 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-PRELOAD-2-NEXT: global_store_short v1, v0, s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
-; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, 0
-; GFX940-PRELOAD-4-NEXT: global_store_short v1, v0, s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: ptr1_v2i8_preload_arg:
+; GFX940-PRELOAD-8-LABEL: ptr1_v2i8_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -762,7 +464,7 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-PRELOAD-8-NEXT: global_store_short v1, v0, s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: ptr1_v2i8_preload_arg:
+; GFX90a-NO-PRELOAD-LABEL: ptr1_v2i8_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -772,18 +474,7 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-1-NEXT: global_store_short v0, v1, s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: ptr1_v2i8_preload_arg:
+; GFX90a-PRELOAD-2-LABEL: ptr1_v2i8_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -794,18 +485,7 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-PRELOAD-2-NEXT: global_store_short v1, v0, s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
-; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, 0
-; GFX90a-PRELOAD-4-NEXT: global_store_short v1, v0, s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: ptr1_v2i8_preload_arg:
+; GFX90a-PRELOAD-8-LABEL: ptr1_v2i8_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -820,8 +500,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
}
-define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspace(4) byref(i32) align(256) %in.byref, i32 %after.offset) {
-; GFX940-NO-PRELOAD-LABEL: byref_preload_arg:
+define amdgpu_kernel void @byref_kernel_preload_arg(ptr addrspace(1) %out, ptr addrspace(4) byref(i32) align(256) %in.byref, i32 %after.offset) {
+; GFX940-NO-PRELOAD-LABEL: byref_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x100
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
@@ -835,22 +515,7 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-NO-PRELOAD-NEXT: s_waitcnt vmcnt(0)
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s1
-; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_waitcnt vmcnt(0)
-; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_waitcnt vmcnt(0)
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: byref_preload_arg:
+; GFX940-PRELOAD-2-LABEL: byref_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -865,22 +530,7 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-PRELOAD-2-NEXT: s_waitcnt vmcnt(0)
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s1
-; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_waitcnt vmcnt(0)
-; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_waitcnt vmcnt(0)
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: byref_preload_arg:
+; GFX940-PRELOAD-8-LABEL: byref_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -895,7 +545,7 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-PRELOAD-8-NEXT: s_waitcnt vmcnt(0)
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: byref_preload_arg:
+; GFX90a-NO-PRELOAD-LABEL: byref_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x0
@@ -909,22 +559,7 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-NO-PRELOAD-NEXT: s_waitcnt vmcnt(0)
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s1
-; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt vmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v2, s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt vmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: byref_preload_arg:
+; GFX90a-PRELOAD-2-LABEL: byref_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -939,22 +574,7 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-PRELOAD-2-NEXT: s_waitcnt vmcnt(0)
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s1
-; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_waitcnt vmcnt(0)
-; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v2, s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_waitcnt vmcnt(0)
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: byref_preload_arg:
+; GFX90a-PRELOAD-8-LABEL: byref_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -975,8 +595,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
}
-define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32> %in) nounwind {
-; GFX940-NO-PRELOAD-LABEL: v8i32_arg:
+define amdgpu_kernel void @v8i32_kernel_preload_arg(ptr addrspace(1) nocapture %out, <8 x i32> %in) nounwind {
+; GFX940-NO-PRELOAD-LABEL: v8i32_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v4, 0
@@ -995,27 +615,7 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: v8i32_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s9
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s10
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s11
-; GFX940-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_nop 1
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s7
-; GFX940-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: v8i32_arg:
+; GFX940-PRELOAD-2-LABEL: v8i32_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -1035,27 +635,7 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: v8i32_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
-; GFX940-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s8
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s10
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s11
-; GFX940-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_nop 1
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s4
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s6
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s7
-; GFX940-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: v8i32_arg:
+; GFX940-PRELOAD-8-LABEL: v8i32_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -1075,7 +655,7 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: v8i32_arg:
+; GFX90a-NO-PRELOAD-LABEL: v8i32_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -1094,27 +674,7 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s12
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s13
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s14
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s15
-; GFX90a-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s9
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s10
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s11
-; GFX90a-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: v8i32_arg:
+; GFX90a-PRELOAD-2-LABEL: v8i32_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -1134,27 +694,7 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
-; GFX90a-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s12
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s13
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s14
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s15
-; GFX90a-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s8
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s10
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s11
-; GFX90a-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: v8i32_arg:
+; GFX90a-PRELOAD-8-LABEL: v8i32_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -1177,8 +717,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
ret void
}
-define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3 x i16> %in) nounwind {
-; GFX940-NO-PRELOAD-LABEL: v3i16_preload_arg:
+define amdgpu_kernel void @v3i16_kernel_preload_arg(ptr addrspace(1) nocapture %out, <3 x i16> %in) nounwind {
+; GFX940-NO-PRELOAD-LABEL: v3i16_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
@@ -1189,20 +729,7 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s0
-; GFX940-PRELOAD-1-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
-; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: v3i16_preload_arg:
+; GFX940-PRELOAD-2-LABEL: v3i16_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -1213,18 +740,7 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
-; GFX940-PRELOAD-4-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
-; GFX940-PRELOAD-4-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: v3i16_preload_arg:
+; GFX940-PRELOAD-8-LABEL: v3i16_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -1235,7 +751,7 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: v3i16_preload_arg:
+; GFX90a-NO-PRELOAD-LABEL: v3i16_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
@@ -1246,20 +762,7 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s0
-; GFX90a-PRELOAD-1-NEXT: global_store_short v0, v1, s[6:7] offset:4
-; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v2, s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: v3i16_preload_arg:
+; GFX90a-PRELOAD-2-LABEL: v3i16_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -1270,18 +773,7 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
-; GFX90a-PRELOAD-4-NEXT: global_store_short v0, v1, s[6:7] offset:4
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
-; GFX90a-PRELOAD-4-NEXT: global_store_dword v0, v1, s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: v3i16_preload_arg:
+; GFX90a-PRELOAD-8-LABEL: v3i16_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -1295,8 +787,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
ret void
}
-define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3 x i32> %in) nounwind {
-; GFX940-NO-PRELOAD-LABEL: v3i32_preload_arg:
+define amdgpu_kernel void @v3i32_kernel_preload_arg(ptr addrspace(1) nocapture %out, <3 x i32> %in) nounwind {
+; GFX940-NO-PRELOAD-LABEL: v3i32_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
@@ -1308,20 +800,7 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
-; GFX940-PRELOAD-1-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: v3i32_preload_arg:
+; GFX940-PRELOAD-2-LABEL: v3i32_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -1332,18 +811,7 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s7
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s8
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
-; GFX940-PRELOAD-4-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: v3i32_preload_arg:
+; GFX940-PRELOAD-8-LABEL: v3i32_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -1354,7 +822,7 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: v3i32_preload_arg:
+; GFX90a-NO-PRELOAD-LABEL: v3i32_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
@@ -1366,20 +834,7 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s0
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s2
-; GFX90a-PRELOAD-1-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: v3i32_preload_arg:
+; GFX90a-PRELOAD-2-LABEL: v3i32_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -1390,18 +845,7 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s10
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s11
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s12
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
-; GFX90a-PRELOAD-4-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: v3i32_preload_arg:
+; GFX90a-PRELOAD-8-LABEL: v3i32_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -1415,8 +859,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
ret void
}
-define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3 x float> %in) nounwind {
-; GFX940-NO-PRELOAD-LABEL: v3f32_preload_arg:
+define amdgpu_kernel void @v3f32_kernel_preload_arg(ptr addrspace(1) nocapture %out, <3 x float> %in) nounwind {
+; GFX940-NO-PRELOAD-LABEL: v3f32_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
@@ -1428,20 +872,7 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
-; GFX940-PRELOAD-1-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: v3f32_preload_arg:
+; GFX940-PRELOAD-2-LABEL: v3f32_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -1452,18 +883,7 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s7
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s8
-; GFX940-PRELOAD-4-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: v3f32_preload_arg:
+; GFX940-PRELOAD-8-LABEL: v3f32_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -1474,7 +894,7 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: v3f32_preload_arg:
+; GFX90a-NO-PRELOAD-LABEL: v3f32_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
@@ -1486,20 +906,7 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s0
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s2
-; GFX90a-PRELOAD-1-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: v3f32_preload_arg:
+; GFX90a-PRELOAD-2-LABEL: v3f32_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -1510,18 +917,7 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s10
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s11
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s12
-; GFX90a-PRELOAD-4-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: v3f32_preload_arg:
+; GFX90a-PRELOAD-8-LABEL: v3f32_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -1535,8 +931,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
ret void
}
-define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5 x i8> %in) nounwind {
-; GFX940-NO-PRELOAD-LABEL: v5i8_preload_arg:
+define amdgpu_kernel void @v5i8_kernel_preload_arg(ptr addrspace(1) nocapture %out, <5 x i8> %in) nounwind {
+; GFX940-NO-PRELOAD-LABEL: v5i8_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
@@ -1547,20 +943,7 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s0
-; GFX940-PRELOAD-1-NEXT: global_store_byte v0, v1, s[2:3] offset:4 sc0 sc1
-; GFX940-PRELOAD-1-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: v5i8_preload_arg:
+; GFX940-PRELOAD-2-LABEL: v5i8_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -1578,25 +961,7 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-PRELOAD-2-NEXT: global_store_dword v1, v0, s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
-; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 24
-; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v1, 8, s0
-; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
-; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s5
-; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, 0
-; GFX940-PRELOAD-4-NEXT: global_store_byte v1, v2, s[2:3] offset:4 sc0 sc1
-; GFX940-PRELOAD-4-NEXT: global_store_dword v1, v0, s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: v5i8_preload_arg:
+; GFX940-PRELOAD-8-LABEL: v5i8_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -1614,7 +979,7 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-PRELOAD-8-NEXT: global_store_dword v1, v0, s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: v5i8_preload_arg:
+; GFX90a-NO-PRELOAD-LABEL: v5i8_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
@@ -1625,20 +990,7 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s1
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s0
-; GFX90a-PRELOAD-1-NEXT: global_store_byte v0, v1, s[6:7] offset:4
-; GFX90a-PRELOAD-1-NEXT: global_store_dword v0, v2, s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: v5i8_preload_arg:
+; GFX90a-PRELOAD-2-LABEL: v5i8_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -1656,25 +1008,7 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-PRELOAD-2-NEXT: global_store_dword v1, v0, s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
-; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 24
-; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v1, 8, s0
-; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 16
-; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, 0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s9
-; GFX90a-PRELOAD-4-NEXT: global_store_byte v1, v2, s[6:7] offset:4
-; GFX90a-PRELOAD-4-NEXT: global_store_dword v1, v0, s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: v5i8_preload_arg:
+; GFX90a-PRELOAD-8-LABEL: v5i8_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -1695,8 +1029,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
ret void
}
-define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x double> %in) nounwind {
-; GFX940-NO-PRELOAD-LABEL: v5f64_arg:
+define amdgpu_kernel void @v5f64_kernel_preload_arg(ptr addrspace(1) nocapture %out, <5 x double> %in) nounwind {
+; GFX940-NO-PRELOAD-LABEL: v5f64_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x60
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -1718,30 +1052,7 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[12:13] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: v5f64_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s8
-; GFX940-PRELOAD-1-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3] offset:32 sc0 sc1
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s9
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s10
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s11
-; GFX940-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_nop 1
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s4
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s5
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s6
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s7
-; GFX940-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: v5f64_arg:
+; GFX940-PRELOAD-2-LABEL: v5f64_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -1764,30 +1075,7 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: v5f64_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
-; GFX940-PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
-; GFX940-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[2:3], s[12:13]
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s8
-; GFX940-PRELOAD-4-NEXT: global_store_dwordx2 v4, v[2:3], s[2:3] offset:32 sc0 sc1
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s10
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s11
-; GFX940-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] offset:16 sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_nop 1
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s4
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s6
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s7
-; GFX940-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: v5f64_arg:
+; GFX940-PRELOAD-8-LABEL: v5f64_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -1810,7 +1098,7 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: v5f64_arg:
+; GFX90a-NO-PRELOAD-LABEL: v5f64_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -1832,30 +1120,7 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_pk_mov_b32 v[2:3], s[0:1], s[0:1] op_sel:[0,1]
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s12
-; GFX90a-PRELOAD-1-NEXT: global_store_dwordx2 v4, v[2:3], s[6:7] offset:32
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s13
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s14
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s15
-; GFX90a-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, s8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v1, s9
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, s10
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, s11
-; GFX90a-PRELOAD-1-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: v5f64_arg:
+; GFX90a-PRELOAD-2-LABEL: v5f64_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -1878,30 +1143,7 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
-; GFX90a-PRELOAD-4-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
-; GFX90a-PRELOAD-4-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[2:3], s[0:1], s[0:1] op_sel:[0,1]
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s12
-; GFX90a-PRELOAD-4-NEXT: global_store_dwordx2 v4, v[2:3], s[6:7] offset:32
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s13
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s14
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s15
-; GFX90a-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] offset:16
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s8
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, s10
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, s11
-; GFX90a-PRELOAD-4-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: v5f64_arg:
+; GFX90a-PRELOAD-8-LABEL: v5f64_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -1927,8 +1169,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
ret void
}
-define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in) {
-; GFX940-NO-PRELOAD-LABEL: v8i8_preload_arg:
+define amdgpu_kernel void @v8i8_kernel_preload_arg(ptr addrspace(1) %out, <8 x i8> %in) {
+; GFX940-NO-PRELOAD-LABEL: v8i8_kernel_preload_arg:
; GFX940-NO-PRELOAD: ; %bb.0:
; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, 0
@@ -1937,18 +1179,7 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; GFX940-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-2-LABEL: v8i8_preload_arg:
+; GFX940-PRELOAD-2-LABEL: v8i8_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
@@ -1973,32 +1204,7 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 8
-; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 24
-; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v1, 8, s0
-; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 16
-; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
-; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 24
-; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v2, 8, s0
-; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
-; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v2, s0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX940-PRELOAD-8-LABEL: v8i8_preload_arg:
+; GFX940-PRELOAD-8-LABEL: v8i8_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
@@ -2023,7 +1229,7 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-PRELOAD-8-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
; GFX940-PRELOAD-8-NEXT: s_endpgm
;
-; GFX90a-NO-PRELOAD-LABEL: v8i8_preload_arg:
+; GFX90a-NO-PRELOAD-LABEL: v8i8_kernel_preload_arg:
; GFX90a-NO-PRELOAD: ; %bb.0:
; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, 0
@@ -2032,18 +1238,7 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
-; GFX90a-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-2-LABEL: v8i8_preload_arg:
+; GFX90a-PRELOAD-2-LABEL: v8i8_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
@@ -2067,31 +1262,7 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s9, 8
-; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s9, 24
-; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v1, 8, s0
-; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s9, 16
-; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
-; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
-; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 24
-; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v2, 8, s0
-; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 16
-; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v2, s0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX90a-PRELOAD-4-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
-; GFX90a-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
-; GFX90a-PRELOAD-8-LABEL: v8i8_preload_arg:
+; GFX90a-PRELOAD-8-LABEL: v8i8_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
@@ -2129,17 +1300,6 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; GFX940-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
; GFX940-PRELOAD-2-LABEL: i64_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
@@ -2149,15 +1309,6 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
-; GFX940-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
; GFX940-PRELOAD-8-LABEL: i64_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
@@ -2177,17 +1328,6 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
-; GFX90a-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
; GFX90a-PRELOAD-2-LABEL: i64_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
@@ -2197,15 +1337,6 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
-; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
-; GFX90a-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
; GFX90a-PRELOAD-8-LABEL: i64_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
@@ -2229,17 +1360,6 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] sc0 sc1
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX940-PRELOAD-1-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-1-NEXT: ; %bb.0:
-; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
-; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-PRELOAD-1-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; GFX940-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; GFX940-PRELOAD-1-NEXT: s_endpgm
-;
; GFX940-PRELOAD-2-LABEL: f64_kernel_preload_arg:
; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
@@ -2249,15 +1369,6 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
-; GFX940-PRELOAD-4-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX940-PRELOAD-4-NEXT: ; %bb.0:
-; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
-; GFX940-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] sc0 sc1
-; GFX940-PRELOAD-4-NEXT: s_endpgm
-;
; GFX940-PRELOAD-8-LABEL: f64_kernel_preload_arg:
; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
@@ -2277,17 +1388,6 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-1-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
-; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
-; GFX90a-PRELOAD-1-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90a-PRELOAD-1-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
-; GFX90a-PRELOAD-1-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX90a-PRELOAD-1-NEXT: s_endpgm
-;
; GFX90a-PRELOAD-2-LABEL: f64_kernel_preload_arg:
; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
@@ -2297,15 +1397,6 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-PRELOAD-2-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
-; GFX90a-PRELOAD-4-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
-; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
-; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
-; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
-; GFX90a-PRELOAD-4-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
-; GFX90a-PRELOAD-4-NEXT: s_endpgm
-;
; GFX90a-PRELOAD-8-LABEL: f64_kernel_preload_arg:
; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
@@ -2317,3 +1408,1115 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
store double %in, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @half_kernel_preload_arg(ptr addrspace(1) %out, half %in) {
+; GFX940-NO-PRELOAD-LABEL: half_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-NO-PRELOAD-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: half_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-2-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: half_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-8-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: half_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: half_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-2-NEXT: global_store_short v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: half_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-8-NEXT: global_store_short v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store half %in, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @bfloat_kernel_preload_arg(ptr addrspace(1) %out, bfloat %in) {
+; GFX940-NO-PRELOAD-LABEL: bfloat_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-NO-PRELOAD-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: bfloat_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-2-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: bfloat_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-8-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: bfloat_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: bfloat_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-2-NEXT: global_store_short v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: bfloat_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-8-NEXT: global_store_short v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store bfloat %in, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @v2bfloat_kernel_preload_arg(ptr addrspace(1) %out, <2 x bfloat> %in) {
+; GFX940-NO-PRELOAD-LABEL: v2bfloat_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v2bfloat_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v2bfloat_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v2bfloat_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v2bfloat_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v2bfloat_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store <2 x bfloat> %in, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @v3bfloat_kernel_preload_arg(ptr addrspace(1) %out, <3 x bfloat> %in) {
+; GFX940-NO-PRELOAD-LABEL: v3bfloat_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX940-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1] offset:4 sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v3bfloat_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-2-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v3bfloat_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-8-NEXT: global_store_short v0, v1, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v3bfloat_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1] offset:4
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v3bfloat_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-2-NEXT: global_store_short v0, v1, s[6:7] offset:4
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v3bfloat_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-8-NEXT: global_store_short v0, v1, s[6:7] offset:4
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store <3 x bfloat> %in, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @v6bfloat_kernel_preload_arg(ptr addrspace(1) %out, <6 x bfloat> %in) {
+; GFX940-NO-PRELOAD-LABEL: v6bfloat_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v6bfloat_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v6bfloat_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v6bfloat_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v6bfloat_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v6bfloat_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store <6 x bfloat> %in, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @half_v7bfloat_kernel_preload_arg(ptr addrspace(1) %out, half %in, <7 x bfloat> %in2, ptr addrspace(1) %out2) {
+; GFX940-NO-PRELOAD-LABEL: half_v7bfloat_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s10, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x20
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s10
+; GFX940-NO-PRELOAD-NEXT: global_store_short v3, v0, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s7
+; GFX940-NO-PRELOAD-NEXT: global_store_short v3, v0, s[8:9] offset:12 sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: half_v7bfloat_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x10
+; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-2-NEXT: global_store_short v3, v0, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s11
+; GFX940-PRELOAD-2-NEXT: global_store_short v3, v0, s[6:7] offset:12 sc0 sc1
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: half_v7bfloat_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-PRELOAD-8-NEXT: global_store_short v3, v0, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s9
+; GFX940-PRELOAD-8-NEXT: global_store_short v3, v0, s[10:11] offset:12 sc0 sc1
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[10:11] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: half_v7bfloat_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s10, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v3, v0, s[6:7]
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s3
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v3, v0, s[8:9] offset:12
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: half_v7bfloat_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[10:11], s[4:5], 0x20
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-PRELOAD-2-NEXT: global_store_short v3, v0, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s3
+; GFX90a-PRELOAD-2-NEXT: global_store_short v3, v0, s[10:11] offset:12
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[10:11]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: half_v7bfloat_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x20
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s8
+; GFX90a-PRELOAD-8-NEXT: global_store_short v3, v0, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s13
+; GFX90a-PRELOAD-8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-8-NEXT: global_store_short v3, v0, s[0:1] offset:12
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[0:1]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store half %in, ptr addrspace(1) %out
+ store <7 x bfloat> %in2, ptr addrspace(1) %out2
+ ret void
+}
+
+define amdgpu_kernel void @i1_kernel_preload_arg(ptr addrspace(1) %out, i1 %in) {
+; GFX940-NO-PRELOAD-LABEL: i1_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: s_and_b32 s0, s4, 1
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-NO-PRELOAD-NEXT: global_store_byte v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: i1_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_and_b32 s0, s4, 1
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-2-NEXT: global_store_byte v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: i1_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_and_b32 s0, s4, 1
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-PRELOAD-8-NEXT: global_store_byte v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: i1_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: s_and_b32 s2, s2, 1
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_byte v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: i1_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_and_b32 s0, s8, 1
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-2-NEXT: global_store_byte v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: i1_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_and_b32 s0, s8, 1
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s0
+; GFX90a-PRELOAD-8-NEXT: global_store_byte v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store i1 %in, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @fp128_kernel_preload_arg(ptr addrspace(1) %out, fp128 %in) {
+; GFX940-NO-PRELOAD-LABEL: fp128_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
+; GFX940-NO-PRELOAD-NEXT: v_mov_b64_e32 v[2:3], s[6:7]
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: fp128_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s9
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: fp128_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s9
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: fp128_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
+; GFX90a-NO-PRELOAD-NEXT: v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1]
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: fp128_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s13
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: fp128_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s13
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store fp128 %in, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @v7i8_kernel_preload_arg(ptr addrspace(1) %out, <7 x i8> %in) {
+; GFX940-NO-PRELOAD-LABEL: v7i8_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX940-NO-PRELOAD-NEXT: global_store_byte_d16_hi v0, v1, s[0:1] offset:6 sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1] offset:4 sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v7i8_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
+; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 24
+; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s5, 8
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s5
+; GFX940-PRELOAD-2-NEXT: v_or_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-2-NEXT: global_store_byte_d16_hi v2, v3, s[2:3] offset:6 sc0 sc1
+; GFX940-PRELOAD-2-NEXT: global_store_short v2, v1, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-2-NEXT: global_store_dword v2, v0, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v7i8_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
+; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 24
+; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s5, 8
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s5
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: global_store_byte_d16_hi v2, v3, s[2:3] offset:6 sc0 sc1
+; GFX940-PRELOAD-8-NEXT: global_store_short v2, v1, s[2:3] offset:4 sc0 sc1
+; GFX940-PRELOAD-8-NEXT: global_store_dword v2, v0, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v7i8_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_byte_d16_hi v0, v1, s[0:1] offset:6
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1] offset:4
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v7i8_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 8
+; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 24
+; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 16
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s9, 8
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, s9
+; GFX90a-PRELOAD-2-NEXT: v_or_b32_sdwa v1, s9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-2-NEXT: global_store_byte_d16_hi v2, v3, s[6:7] offset:6
+; GFX90a-PRELOAD-2-NEXT: global_store_short v2, v1, s[6:7] offset:4
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v2, v0, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v7i8_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 8
+; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 24
+; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 16
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v1, s0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s9, 8
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v1, 8, s0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, s9
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v1, s9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: global_store_byte_d16_hi v2, v3, s[6:7] offset:6
+; GFX90a-PRELOAD-8-NEXT: global_store_short v2, v1, s[6:7] offset:4
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v2, v0, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store <7 x i8> %in, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @v7half_kernel_preload_arg(ptr addrspace(1) %out, <7 x half> %in) {
+; GFX940-NO-PRELOAD-LABEL: v7half_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-NO-PRELOAD-NEXT: global_store_short v3, v1, s[2:3] offset:12 sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: v7half_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s9
+; GFX940-PRELOAD-2-NEXT: global_store_short v3, v0, s[2:3] offset:12 sc0 sc1
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: v7half_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s9
+; GFX940-PRELOAD-8-NEXT: global_store_short v3, v0, s[2:3] offset:12 sc0 sc1
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: v7half_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s3
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s0
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v3, v1, s[6:7] offset:12
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: v7half_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s13
+; GFX90a-PRELOAD-2-NEXT: global_store_short v3, v0, s[6:7] offset:12
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: v7half_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s13
+; GFX90a-PRELOAD-8-NEXT: global_store_short v3, v0, s[6:7] offset:12
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store <7 x half> %in, ptr addrspace(1) %out
+ ret void
+}
+
+; Test when previous argument was not dword aligned.
+define amdgpu_kernel void @i16_i32_kernel_preload_arg(ptr addrspace(1) %out, i16 %in, i32 %in2, ptr addrspace(1) %out2) {
+; GFX940-NO-PRELOAD-LABEL: i16_i32_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s6
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s7
+; GFX940-NO-PRELOAD-NEXT: global_store_short v0, v1, s[4:5] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: i16_i32_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_load_dword s5, s[0:1], 0xc
+; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x10
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-2-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-2-NEXT: global_store_dword v0, v1, s[6:7] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: i16_i32_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-8-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-8-NEXT: global_store_dword v0, v1, s[6:7] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: i16_i32_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s3
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v2, s[6:7]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: i16_i32_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_load_dword s2, s[4:5], 0xc
+; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-2-NEXT: global_store_short v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-PRELOAD-2-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: i16_i32_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-8-NEXT: global_store_short v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-8-NEXT: global_store_dword v0, v1, s[10:11]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store i16 %in, ptr addrspace(1) %out
+ store i32 %in2, ptr addrspace(1) %out2
+ ret void
+}
+
+define amdgpu_kernel void @i16_v3i32_kernel_preload_arg(ptr addrspace(1) %out, i16 %in, <3 x i32> %in2, ptr addrspace(1) %out2) {
+; GFX940-NO-PRELOAD-LABEL: i16_v3i32_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s7, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x20
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v4, s7
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s6
+; GFX940-NO-PRELOAD-NEXT: global_store_short v3, v4, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: i16_v3i32_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x10
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x20
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v4, s4
+; GFX940-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s8
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s10
+; GFX940-PRELOAD-2-NEXT: global_store_short v3, v4, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[0:1] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: i16_v3i32_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v4, s4
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s8
+; GFX940-PRELOAD-8-NEXT: global_store_short v3, v4, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[10:11] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: i16_v3i32_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s3, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v4, s3
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, s0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v3, v4, s[6:7]
+; GFX90a-NO-PRELOAD-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: i16_v3i32_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x20
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v4, s8
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s1
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, s2
+; GFX90a-PRELOAD-2-NEXT: global_store_short v3, v4, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: global_store_dwordx3 v3, v[0:2], s[4:5]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: i16_v3i32_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x20
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v4, s8
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s11
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s12
+; GFX90a-PRELOAD-8-NEXT: global_store_short v3, v4, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-8-NEXT: global_store_dwordx3 v3, v[0:2], s[0:1]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store i16 %in, ptr addrspace(1) %out
+ store <3 x i32> %in2, ptr addrspace(1) %out2
+ ret void
+}
+
+define amdgpu_kernel void @i16_i16_kernel_preload_arg(ptr addrspace(1) %out, i16 %in, i16 %in2, ptr addrspace(1) %out2) {
+; GFX940-NO-PRELOAD-LABEL: i16_i16_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s6, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s6
+; GFX940-NO-PRELOAD-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: global_store_short_d16_hi v0, v1, s[4:5] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: i16_i16_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_load_dword s5, s[0:1], 0x8
+; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x10
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-2-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-2-NEXT: global_store_short_d16_hi v0, v1, s[6:7] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: i16_i16_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-8-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: global_store_short_d16_hi v0, v1, s[6:7] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: i16_i16_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s6, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s6
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: global_store_short_d16_hi v0, v1, s[2:3]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: i16_i16_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-2-NEXT: global_store_short v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-PRELOAD-2-NEXT: global_store_short_d16_hi v0, v1, s[0:1]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: i16_i16_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-8-NEXT: global_store_short v0, v1, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: global_store_short_d16_hi v0, v1, s[10:11]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store i16 %in, ptr addrspace(1) %out
+ store i16 %in2, ptr addrspace(1) %out2
+ ret void
+}
+
+define amdgpu_kernel void @i16_v2i8_kernel_preload_arg(ptr addrspace(1) %out, i16 %in, <2 x i8> %in2, ptr addrspace(1) %out2) {
+; GFX940-NO-PRELOAD-LABEL: i16_v2i8_kernel_preload_arg:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s6, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s6
+; GFX940-NO-PRELOAD-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: global_store_short_d16_hi v0, v1, s[4:5] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-2-LABEL: i16_v2i8_kernel_preload_arg:
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-2-NEXT: ; %bb.0:
+; GFX940-PRELOAD-2-NEXT: s_load_dword s5, s[0:1], 0x8
+; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x10
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-2-NEXT: global_store_short v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-2-NEXT: global_store_short_d16_hi v0, v1, s[6:7] sc0 sc1
+; GFX940-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-8-LABEL: i16_v2i8_kernel_preload_arg:
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-8-NEXT: ; %bb.0:
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 24
+; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 16
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, 0
+; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s4
+; GFX940-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX940-PRELOAD-8-NEXT: global_store_short v1, v2, s[2:3] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: global_store_short v1, v0, s[6:7] sc0 sc1
+; GFX940-PRELOAD-8-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: i16_v2i8_kernel_preload_arg:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s6, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s6
+; GFX90a-NO-PRELOAD-NEXT: global_store_short v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: global_store_short_d16_hi v0, v1, s[2:3]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-2-LABEL: i16_v2i8_kernel_preload_arg:
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-2-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x10
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-2-NEXT: global_store_short v0, v1, s[6:7]
+; GFX90a-PRELOAD-2-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-PRELOAD-2-NEXT: global_store_short_d16_hi v0, v1, s[0:1]
+; GFX90a-PRELOAD-2-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-8-LABEL: i16_v2i8_kernel_preload_arg:
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 24
+; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
+; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 16
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, 0
+; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, s8
+; GFX90a-PRELOAD-8-NEXT: v_or_b32_sdwa v0, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX90a-PRELOAD-8-NEXT: global_store_short v1, v2, s[6:7]
+; GFX90a-PRELOAD-8-NEXT: global_store_short v1, v0, s[10:11]
+; GFX90a-PRELOAD-8-NEXT: s_endpgm
+ store i16 %in, ptr addrspace(1) %out
+ store <2 x i8> %in2, ptr addrspace(1) %out2
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/sad.ll b/llvm/test/CodeGen/AMDGPU/sad.ll
index 1b0306559295..0492c5663e66 100644
--- a/llvm/test/CodeGen/AMDGPU/sad.ll
+++ b/llvm/test/CodeGen/AMDGPU/sad.ll
@@ -1,8 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -earlycse-debug-hash -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
-; GCN-LABEL: {{^}}v_sad_u32_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_pat1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, s1
+; GCN-NEXT: v_mov_b32_e32 v1, s2
+; GCN-NEXT: v_sad_u32 v2, s0, v0, v1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: flat_store_dword v[0:1], v2
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
@@ -16,9 +27,18 @@ define amdgpu_kernel void @v_sad_u32_pat1(ptr addrspace(1) %out, i32 %a, i32 %b,
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_constant_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, 20
define amdgpu_kernel void @v_sad_u32_constant_pat1(ptr addrspace(1) %out, i32 %a) {
+; GCN-LABEL: v_sad_u32_constant_pat1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dword s2, s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GCN-NEXT: v_mov_b32_e32 v0, 0x5a
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_sad_u32 v2, s2, v0, 20
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: flat_store_dword v[0:1], v2
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, 90
%t0 = select i1 %icmp0, i32 %a, i32 90
@@ -32,9 +52,19 @@ define amdgpu_kernel void @v_sad_u32_constant_pat1(ptr addrspace(1) %out, i32 %a
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_pat2:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_pat2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, s1
+; GCN-NEXT: v_mov_b32_e32 v1, s2
+; GCN-NEXT: v_sad_u32 v2, s0, v0, v1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: flat_store_dword v[0:1], v2
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, %b
%sub0 = sub i32 %a, %b
%sub1 = sub i32 %b, %a
@@ -46,12 +76,28 @@ define amdgpu_kernel void @v_sad_u32_pat2(ptr addrspace(1) %out, i32 %a, i32 %b,
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_sub_pat1:
-; GCN: s_max_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_min_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_sub_pat1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT: s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_add_u32 s8, s8, s7
+; GCN-NEXT: s_addc_u32 s9, s9, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_max_u32 s3, s0, s1
+; GCN-NEXT: s_min_u32 s0, s0, s1
+; GCN-NEXT: s_sub_i32 s0, s3, s0
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v2, s0
+; GCN-NEXT: s_add_i32 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v2, s0
+; GCN-NEXT: flat_store_dword v[0:1], v2
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
@@ -66,9 +112,25 @@ define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat1(ptr addrspace(1) %out, i
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_add_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_multi_use_add_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_add_pat1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT: s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_add_u32 s8, s8, s7
+; GCN-NEXT: s_addc_u32 s9, s9, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v2, s1
+; GCN-NEXT: v_mov_b32_e32 v3, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: v_sad_u32 v2, s0, v2, v3
+; GCN-NEXT: buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: flat_store_dword v[0:1], v2
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
@@ -82,9 +144,27 @@ define amdgpu_kernel void @v_sad_u32_multi_use_add_pat1(ptr addrspace(1) %out, i
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_max_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_multi_use_max_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_max_pat1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT: s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_add_u32 s8, s8, s7
+; GCN-NEXT: s_addc_u32 s9, s9, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_max_u32 s3, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s1
+; GCN-NEXT: v_mov_b32_e32 v1, s2
+; GCN-NEXT: v_mov_b32_e32 v2, s3
+; GCN-NEXT: v_sad_u32 v3, s0, v0, v1
+; GCN-NEXT: buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: flat_store_dword v[0:1], v3
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
store volatile i32 %t0, ptr addrspace(5) undef
@@ -99,9 +179,27 @@ define amdgpu_kernel void @v_sad_u32_multi_use_max_pat1(ptr addrspace(1) %out, i
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_min_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_multi_use_min_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_min_pat1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT: s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_add_u32 s8, s8, s7
+; GCN-NEXT: s_addc_u32 s9, s9, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_min_u32 s3, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s1
+; GCN-NEXT: v_mov_b32_e32 v1, s2
+; GCN-NEXT: v_mov_b32_e32 v2, s3
+; GCN-NEXT: v_sad_u32 v3, s0, v0, v1
+; GCN-NEXT: buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: flat_store_dword v[0:1], v3
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
@@ -117,9 +215,27 @@ define amdgpu_kernel void @v_sad_u32_multi_use_min_pat1(ptr addrspace(1) %out, i
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_sub_pat2:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_sub_pat2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT: s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_add_u32 s8, s8, s7
+; GCN-NEXT: s_addc_u32 s9, s9, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_sub_i32 s3, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s1
+; GCN-NEXT: v_mov_b32_e32 v1, s2
+; GCN-NEXT: v_mov_b32_e32 v2, s3
+; GCN-NEXT: v_sad_u32 v3, s0, v0, v1
+; GCN-NEXT: buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: flat_store_dword v[0:1], v3
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, %b
%sub0 = sub i32 %a, %b
store volatile i32 %sub0, ptr addrspace(5) undef
@@ -132,11 +248,29 @@ define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat2(ptr addrspace(1) %out, i
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_select_pat2:
-; GCN-DAG: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_cmp_gt_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_multi_use_select_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_select_pat2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT: s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_add_u32 s8, s8, s7
+; GCN-NEXT: s_addc_u32 s9, s9, 0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_sub_i32 s3, s0, s1
+; GCN-NEXT: s_sub_i32 s6, s1, s0
+; GCN-NEXT: s_cmp_gt_u32 s0, s1
+; GCN-NEXT: s_cselect_b32 s0, s3, s6
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v2, s0
+; GCN-NEXT: s_add_i32 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v2, s0
+; GCN-NEXT: flat_store_dword v[0:1], v2
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, %b
%sub0 = sub i32 %a, %b
%sub1 = sub i32 %b, %a
@@ -149,12 +283,29 @@ define amdgpu_kernel void @v_sad_u32_multi_use_select_pat2(ptr addrspace(1) %out
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_vector_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_vector_pat1(ptr addrspace(1) %out, <4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+; GCN-LABEL: v_sad_u32_vector_pat1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x4
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xc
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, s15
+; GCN-NEXT: v_mov_b32_e32 v1, s3
+; GCN-NEXT: v_mov_b32_e32 v2, s14
+; GCN-NEXT: v_sad_u32 v3, s11, v0, v1
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: v_sad_u32 v2, s10, v2, v0
+; GCN-NEXT: v_mov_b32_e32 v0, s13
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: v_sad_u32 v1, s9, v0, v1
+; GCN-NEXT: v_mov_b32_e32 v0, s12
+; GCN-NEXT: v_mov_b32_e32 v4, s0
+; GCN-NEXT: v_sad_u32 v0, s8, v0, v4
+; GCN-NEXT: v_mov_b32_e32 v4, s4
+; GCN-NEXT: v_mov_b32_e32 v5, s5
+; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt <4 x i32> %a, %b
%t0 = select <4 x i1> %icmp0, <4 x i32> %a, <4 x i32> %b
@@ -168,12 +319,29 @@ define amdgpu_kernel void @v_sad_u32_vector_pat1(ptr addrspace(1) %out, <4 x i32
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_vector_pat2:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_vector_pat2(ptr addrspace(1) %out, <4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+; GCN-LABEL: v_sad_u32_vector_pat2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x4
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xc
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, s15
+; GCN-NEXT: v_mov_b32_e32 v1, s3
+; GCN-NEXT: v_mov_b32_e32 v2, s14
+; GCN-NEXT: v_sad_u32 v3, s11, v0, v1
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: v_sad_u32 v2, s10, v2, v0
+; GCN-NEXT: v_mov_b32_e32 v0, s13
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: v_sad_u32 v1, s9, v0, v1
+; GCN-NEXT: v_mov_b32_e32 v0, s12
+; GCN-NEXT: v_mov_b32_e32 v4, s0
+; GCN-NEXT: v_sad_u32 v0, s8, v0, v4
+; GCN-NEXT: v_mov_b32_e32 v4, s4
+; GCN-NEXT: v_mov_b32_e32 v5, s5
+; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt <4 x i32> %a, %b
%sub0 = sub <4 x i32> %a, %b
%sub1 = sub <4 x i32> %b, %a
@@ -185,10 +353,22 @@ define amdgpu_kernel void @v_sad_u32_vector_pat2(ptr addrspace(1) %out, <4 x i32
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_i16_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_i16_pat1(ptr addrspace(1) %out, i16 %a, i16 %b, i16 %c) {
-
+; GCN-LABEL: v_sad_u32_i16_pat1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dword s6, s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s4, s6, 0xffff
+; GCN-NEXT: s_lshr_b32 s0, s0, 16
+; GCN-NEXT: v_mov_b32_e32 v0, s1
+; GCN-NEXT: v_mov_b32_e32 v1, s0
+; GCN-NEXT: v_sad_u32 v2, s4, v1, v0
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: v_mov_b32_e32 v1, s3
+; GCN-NEXT: flat_store_short v[0:1], v2
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i16 %a, %b
%t0 = select i1 %icmp0, i16 %a, i16 %b
@@ -202,9 +382,22 @@ define amdgpu_kernel void @v_sad_u32_i16_pat1(ptr addrspace(1) %out, i16 %a, i16
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_i16_pat2:
-; GCN: v_sad_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_i16_pat2(ptr addrspace(1) %out) {
+; GCN-LABEL: v_sad_u32_i16_pat2:
+; GCN: ; %bb.0:
+; GCN-NEXT: flat_load_ushort v0, v[0:1] glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GCN-NEXT: flat_load_ushort v1, v[0:1] glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: flat_load_ushort v2, v[0:1] glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_sad_u32 v2, v0, v1, v2
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: flat_store_short v[0:1], v2
+; GCN-NEXT: s_endpgm
%a = load volatile i16, ptr addrspace(1) undef
%b = load volatile i16, ptr addrspace(1) undef
%c = load volatile i16, ptr addrspace(1) undef
@@ -219,9 +412,22 @@ define amdgpu_kernel void @v_sad_u32_i16_pat2(ptr addrspace(1) %out) {
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_i8_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_i8_pat1(ptr addrspace(1) %out, i8 %a, i8 %b, i8 %c) {
+; GCN-LABEL: v_sad_u32_i8_pat1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dword s2, s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s3, s2, 0xff
+; GCN-NEXT: s_bfe_u32 s4, s2, 0x80008
+; GCN-NEXT: s_lshr_b32 s2, s2, 16
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v1, s2
+; GCN-NEXT: v_sad_u32 v2, s3, v0, v1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: flat_store_byte v[0:1], v2
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i8 %a, %b
%t0 = select i1 %icmp0, i8 %a, i8 %b
@@ -235,9 +441,22 @@ define amdgpu_kernel void @v_sad_u32_i8_pat1(ptr addrspace(1) %out, i8 %a, i8 %b
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_i8_pat2:
-; GCN: v_sad_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_i8_pat2(ptr addrspace(1) %out) {
+; GCN-LABEL: v_sad_u32_i8_pat2:
+; GCN: ; %bb.0:
+; GCN-NEXT: flat_load_ubyte v0, v[0:1] glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GCN-NEXT: flat_load_ubyte v1, v[0:1] glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: flat_load_ubyte v2, v[0:1] glc
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_sad_u32 v2, v0, v1, v2
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: flat_store_byte v[0:1], v2
+; GCN-NEXT: s_endpgm
%a = load volatile i8, ptr addrspace(1) undef
%b = load volatile i8, ptr addrspace(1) undef
%c = load volatile i8, ptr addrspace(1) undef
@@ -252,15 +471,26 @@ define amdgpu_kernel void @v_sad_u32_i8_pat2(ptr addrspace(1) %out) {
ret void
}
-; GCN-LABEL: {{^}}s_sad_u32_i8_pat2:
-; GCN: s_load_dword
-; GCN-DAG: s_bfe_u32
-; GCN-DAG: s_sub_i32
-; GCN-DAG: s_and_b32
-; GCN-DAG: s_sub_i32
-; GCN-DAG: s_lshr_b32
-; GCN: s_add_i32
define amdgpu_kernel void @s_sad_u32_i8_pat2(ptr addrspace(1) %out, i8 zeroext %a, i8 zeroext %b, i8 zeroext %c) {
+; GCN-LABEL: s_sad_u32_i8_pat2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dword s2, s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_lshr_b32 s4, s2, 8
+; GCN-NEXT: s_and_b32 s3, s2, 0xff
+; GCN-NEXT: s_bfe_u32 s5, s2, 0x80008
+; GCN-NEXT: s_lshr_b32 s6, s2, 16
+; GCN-NEXT: s_sub_i32 s7, s2, s4
+; GCN-NEXT: s_sub_i32 s2, s4, s2
+; GCN-NEXT: s_cmp_gt_u32 s3, s5
+; GCN-NEXT: s_cselect_b32 s2, s7, s2
+; GCN-NEXT: s_add_i32 s2, s2, s6
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: v_mov_b32_e32 v2, s2
+; GCN-NEXT: flat_store_byte v[0:1], v2
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i8 %a, %b
%sub0 = sub i8 %a, %b
%sub1 = sub i8 %b, %a
@@ -272,12 +502,22 @@ define amdgpu_kernel void @s_sad_u32_i8_pat2(ptr addrspace(1) %out, i8 zeroext %
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_mismatched_operands_pat1:
-; GCN-DAG: s_cmp_le_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_max_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+; GCN-LABEL: v_sad_u32_mismatched_operands_pat1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_max_u32 s6, s0, s1
+; GCN-NEXT: s_cmp_le_u32 s0, s1
+; GCN-NEXT: s_cselect_b32 s0, s0, s3
+; GCN-NEXT: s_sub_i32 s0, s6, s0
+; GCN-NEXT: s_add_i32 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: v_mov_b32_e32 v2, s0
+; GCN-NEXT: flat_store_dword v[0:1], v2
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, %b
%t0 = select i1 %icmp0, i32 %a, i32 %b
@@ -291,11 +531,22 @@ define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat1(ptr addrspace(1) %
ret void
}
-; GCN-LABEL: {{^}}v_sad_u32_mismatched_operands_pat2:
-; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+; GCN-LABEL: v_sad_u32_mismatched_operands_pat2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_sub_i32 s3, s0, s3
+; GCN-NEXT: s_sub_i32 s6, s1, s0
+; GCN-NEXT: s_cmp_lt_u32 s1, s0
+; GCN-NEXT: s_cselect_b32 s0, s3, s6
+; GCN-NEXT: s_add_i32 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: v_mov_b32_e32 v2, s0
+; GCN-NEXT: flat_store_dword v[0:1], v2
+; GCN-NEXT: s_endpgm
%icmp0 = icmp ugt i32 %a, %b
%sub0 = sub i32 %a, %d
%sub1 = sub i32 %b, %a
diff --git a/llvm/test/CodeGen/AMDGPU/shl.ll b/llvm/test/CodeGen/AMDGPU/shl.ll
index b1a82daa8e7d..b3f4790df4d4 100644
--- a/llvm/test/CodeGen/AMDGPU/shl.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl.ll
@@ -795,17 +795,17 @@ define amdgpu_kernel void @shl_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; EG-NEXT: ALU clause starting at 8:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 9:
-; EG-NEXT: AND_INT T1.Y, T0.Z, literal.x,
-; EG-NEXT: LSHR T1.Z, T0.Y, 1,
+; EG-NEXT: LSHR T1.Y, T0.Y, 1,
+; EG-NEXT: NOT_INT T1.Z, T0.Z,
; EG-NEXT: BIT_ALIGN_INT T0.W, T0.Y, T0.X, 1,
-; EG-NEXT: NOT_INT * T1.W, T0.Z,
+; EG-NEXT: AND_INT * T1.W, T0.Z, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: BIT_ALIGN_INT T1.Z, PV.Z, PV.W, PS,
-; EG-NEXT: LSHL T0.W, T0.X, PV.Y,
+; EG-NEXT: LSHL T2.Z, T0.X, PS,
+; EG-NEXT: BIT_ALIGN_INT T0.W, PV.Y, PV.W, PV.Z,
; EG-NEXT: AND_INT * T1.W, T0.Z, literal.x,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT * T0.Y, PS, PV.Z, PV.W,
-; EG-NEXT: CNDE_INT T0.X, T1.W, T0.W, 0.0,
+; EG-NEXT: CNDE_INT * T0.Y, PS, PV.W, PV.Z,
+; EG-NEXT: CNDE_INT T0.X, T1.W, T2.Z, 0.0,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%b_ptr = getelementptr i64, ptr addrspace(1) %in, i64 1
@@ -858,8 +858,8 @@ define amdgpu_kernel void @shl_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %in
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 1 @6
-; EG-NEXT: ALU 22, @11, KC0[CB0:0-32], KC1[]
-; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T3.XYZW, T0.X, 1
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T2.XYZW, T0.X, 1
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: Fetch clause starting at 6:
@@ -868,27 +868,28 @@ define amdgpu_kernel void @shl_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %in
; EG-NEXT: ALU clause starting at 10:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 11:
-; EG-NEXT: AND_INT T1.Y, T1.Z, literal.x,
+; EG-NEXT: AND_INT * T1.W, T1.Z, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: LSHL T2.X, T0.Z, PV.W,
+; EG-NEXT: AND_INT T1.Y, T1.Z, literal.x, BS:VEC_120/SCL_212
; EG-NEXT: LSHR T2.Z, T0.W, 1,
-; EG-NEXT: BIT_ALIGN_INT T0.W, T0.W, T0.Z, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.W, T0.W, T0.Z, 1, BS:VEC_102/SCL_221
; EG-NEXT: NOT_INT * T1.W, T1.Z,
+; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T3.X, PV.Z, PV.W, PS,
+; EG-NEXT: LSHR T2.Y, T0.Y, 1,
+; EG-NEXT: NOT_INT T0.Z, T1.X,
+; EG-NEXT: BIT_ALIGN_INT T0.W, T0.Y, T0.X, 1,
+; EG-NEXT: AND_INT * T1.W, T1.X, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: BIT_ALIGN_INT T0.W, PV.Z, PV.W, PS,
-; EG-NEXT: LSHL * T1.W, T0.Z, PV.Y,
-; EG-NEXT: AND_INT T2.X, T1.Z, literal.x,
-; EG-NEXT: AND_INT T1.Y, T1.X, literal.y,
-; EG-NEXT: LSHR T0.Z, T0.Y, 1,
-; EG-NEXT: BIT_ALIGN_INT T2.W, T0.Y, T0.X, 1,
-; EG-NEXT: NOT_INT * T3.W, T1.X,
-; EG-NEXT: 32(4.484155e-44), 31(4.344025e-44)
-; EG-NEXT: BIT_ALIGN_INT T0.Y, PV.Z, PV.W, PS,
-; EG-NEXT: LSHL T0.Z, T0.X, PV.Y,
-; EG-NEXT: AND_INT T2.W, T1.X, literal.x, BS:VEC_120/SCL_212
-; EG-NEXT: CNDE_INT * T3.W, PV.X, T0.W, T1.W,
+; EG-NEXT: LSHL T0.Y, T0.X, PS, BS:VEC_120/SCL_212
+; EG-NEXT: AND_INT T1.Z, T1.X, literal.x, BS:VEC_201
+; EG-NEXT: BIT_ALIGN_INT T0.W, PV.Y, PV.W, PV.Z,
+; EG-NEXT: CNDE_INT * T2.W, T1.Y, PV.X, T2.X,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT T3.Y, PV.W, PV.Y, PV.Z,
-; EG-NEXT: CNDE_INT * T3.Z, T2.X, T1.W, 0.0,
-; EG-NEXT: CNDE_INT T3.X, T2.W, T0.Z, 0.0,
+; EG-NEXT: CNDE_INT T2.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: CNDE_INT * T2.Z, T1.Y, T2.X, 0.0,
+; EG-NEXT: CNDE_INT T2.X, T1.Z, T0.Y, 0.0,
; EG-NEXT: LSHR * T0.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%b_ptr = getelementptr <2 x i64>, ptr addrspace(1) %in, i64 1
@@ -955,65 +956,66 @@ define amdgpu_kernel void @shl_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %in
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @14, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 3 @6
-; EG-NEXT: ALU 47, @15, KC0[CB0:0-32], KC1[]
-; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.XYZW, T2.X, 0
-; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T4.XYZW, T0.X, 1
+; EG-NEXT: ALU 48, @15, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T2.XYZW, T0.X, 0
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T3.XYZW, T1.X, 1
; EG-NEXT: CF_END
; EG-NEXT: Fetch clause starting at 6:
-; EG-NEXT: VTX_READ_128 T1.XYZW, T0.X, 48, #1
-; EG-NEXT: VTX_READ_128 T2.XYZW, T0.X, 0, #1
-; EG-NEXT: VTX_READ_128 T3.XYZW, T0.X, 32, #1
-; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 16, #1
+; EG-NEXT: VTX_READ_128 T1.XYZW, T0.X, 32, #1
+; EG-NEXT: VTX_READ_128 T2.XYZW, T0.X, 48, #1
+; EG-NEXT: VTX_READ_128 T3.XYZW, T0.X, 16, #1
+; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
; EG-NEXT: ALU clause starting at 14:
; EG-NEXT: MOV * T0.X, KC0[2].Z,
; EG-NEXT: ALU clause starting at 15:
-; EG-NEXT: AND_INT T4.Z, T1.Z, literal.x,
-; EG-NEXT: LSHR T1.W, T0.W, 1,
-; EG-NEXT: NOT_INT * T3.W, T1.Z,
+; EG-NEXT: AND_INT * T1.W, T1.Z, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: BIT_ALIGN_INT T4.X, T0.W, T0.Z, 1,
-; EG-NEXT: AND_INT T1.Y, T3.Z, literal.x, BS:VEC_201
-; EG-NEXT: LSHR T5.Z, T2.W, 1, BS:VEC_120/SCL_212
-; EG-NEXT: BIT_ALIGN_INT T0.W, T2.W, T2.Z, 1, BS:VEC_102/SCL_221
-; EG-NEXT: NOT_INT * T2.W, T3.Z,
-; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: BIT_ALIGN_INT T3.Y, PV.Z, PV.W, PS,
-; EG-NEXT: LSHL T2.Z, T2.Z, PV.Y,
-; EG-NEXT: BIT_ALIGN_INT T0.W, T1.W, PV.X, T3.W,
-; EG-NEXT: LSHL * T1.W, T0.Z, T4.Z,
+; EG-NEXT: LSHL * T1.W, T0.Z, PV.W,
; EG-NEXT: AND_INT T4.X, T1.Z, literal.x,
-; EG-NEXT: AND_INT T1.Y, T1.X, literal.y,
-; EG-NEXT: LSHR T0.Z, T0.Y, 1,
-; EG-NEXT: BIT_ALIGN_INT T2.W, T0.Y, T0.X, 1,
-; EG-NEXT: NOT_INT * T3.W, T1.X,
+; EG-NEXT: LSHR T1.Y, T3.W, 1,
+; EG-NEXT: NOT_INT T4.Z, T2.Z, BS:VEC_201
+; EG-NEXT: BIT_ALIGN_INT T2.W, T3.W, T3.Z, 1,
+; EG-NEXT: AND_INT * T3.W, T2.Z, literal.y,
; EG-NEXT: 32(4.484155e-44), 31(4.344025e-44)
-; EG-NEXT: AND_INT T5.X, T3.Z, literal.x,
-; EG-NEXT: BIT_ALIGN_INT T0.Y, PV.Z, PV.W, PS,
-; EG-NEXT: LSHL T0.Z, T0.X, PV.Y,
-; EG-NEXT: AND_INT T2.W, T1.X, literal.x, BS:VEC_120/SCL_212
-; EG-NEXT: CNDE_INT * T4.W, PV.X, T0.W, T1.W,
+; EG-NEXT: LSHL T5.X, T3.Z, PS,
+; EG-NEXT: AND_INT T2.Y, T2.Z, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: BIT_ALIGN_INT T2.Z, PV.Y, PV.W, PV.Z,
+; EG-NEXT: LSHR T2.W, T3.Y, 1,
+; EG-NEXT: NOT_INT * T3.W, T2.X,
+; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T6.X, T3.Y, T3.X, 1,
+; EG-NEXT: AND_INT T1.Y, T2.X, literal.x,
+; EG-NEXT: LSHR T3.Z, T0.W, 1,
+; EG-NEXT: BIT_ALIGN_INT T0.W, T0.W, T0.Z, 1,
+; EG-NEXT: NOT_INT * T4.W, T1.Z,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+; EG-NEXT: BIT_ALIGN_INT T7.X, PV.Z, PV.W, PS,
+; EG-NEXT: LSHL T1.Y, T3.X, PV.Y, BS:VEC_120/SCL_212
+; EG-NEXT: AND_INT T0.Z, T2.X, literal.x, BS:VEC_201
+; EG-NEXT: BIT_ALIGN_INT T0.W, T2.W, PV.X, T3.W,
+; EG-NEXT: CNDE_INT * T3.W, T2.Y, T2.Z, T5.X,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: AND_INT T0.X, T3.X, literal.x,
-; EG-NEXT: CNDE_INT T4.Y, PV.W, PV.Y, PV.Z,
-; EG-NEXT: LSHR T1.Z, T2.Y, 1,
-; EG-NEXT: BIT_ALIGN_INT T0.W, T2.Y, T2.X, 1,
-; EG-NEXT: NOT_INT * T3.W, T3.X,
+; EG-NEXT: LSHR T2.X, T0.Y, 1,
+; EG-NEXT: CNDE_INT T3.Y, PV.Z, PV.W, PV.Y,
+; EG-NEXT: NOT_INT T1.Z, T1.X,
+; EG-NEXT: BIT_ALIGN_INT T0.W, T0.Y, T0.X, 1,
+; EG-NEXT: AND_INT * T2.W, T1.X, literal.x,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: BIT_ALIGN_INT T1.X, PV.Z, PV.W, PS,
-; EG-NEXT: LSHL T0.Y, T2.X, PV.X,
-; EG-NEXT: CNDE_INT T4.Z, T4.X, T1.W, 0.0, BS:VEC_120/SCL_212
-; EG-NEXT: AND_INT * T0.W, T3.X, literal.x, BS:VEC_201
+; EG-NEXT: LSHL T0.X, T0.X, PS,
+; EG-NEXT: AND_INT T0.Y, T1.X, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: CNDE_INT T3.Z, T2.Y, T5.X, 0.0, BS:VEC_021/SCL_122
+; EG-NEXT: BIT_ALIGN_INT * T0.W, PV.X, PV.W, PV.Z,
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT * T1.W, T5.X, T3.Y, T2.Z,
-; EG-NEXT: CNDE_INT T4.X, T2.W, T0.Z, 0.0,
-; EG-NEXT: CNDE_INT T1.Y, T0.W, T1.X, T0.Y, BS:VEC_120/SCL_212
-; EG-NEXT: ADD_INT * T2.W, KC0[2].Y, literal.x,
+; EG-NEXT: CNDE_INT * T2.W, T4.X, T7.X, T1.W,
+; EG-NEXT: CNDE_INT T3.X, T0.Z, T1.Y, 0.0,
+; EG-NEXT: CNDE_INT T2.Y, T0.Y, T0.W, T0.X,
+; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, literal.x,
; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
-; EG-NEXT: LSHR T0.X, PV.W, literal.x,
-; EG-NEXT: CNDE_INT T1.Z, T5.X, T2.Z, 0.0,
-; EG-NEXT: CNDE_INT * T1.X, T0.W, T0.Y, 0.0,
+; EG-NEXT: LSHR T1.X, PV.W, literal.x,
+; EG-NEXT: CNDE_INT T2.Z, T4.X, T1.W, 0.0,
+; EG-NEXT: CNDE_INT * T2.X, T0.Y, T0.X, 0.0,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
-; EG-NEXT: LSHR * T2.X, KC0[2].Y, literal.x,
+; EG-NEXT: LSHR * T0.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%b_ptr = getelementptr <4 x i64>, ptr addrspace(1) %in, i64 1
%a = load <4 x i64>, ptr addrspace(1) %in
@@ -1172,17 +1174,17 @@ define amdgpu_kernel void @s_shl_constant_i64(ptr addrspace(1) %out, i64 %a) {
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
-; EG-NEXT: AND_INT T0.Z, KC0[2].W, literal.x,
-; EG-NEXT: MOV T0.W, literal.y,
-; EG-NEXT: NOT_INT * T1.W, KC0[2].W,
-; EG-NEXT: 31(4.344025e-44), -1(nan)
-; EG-NEXT: BIT_ALIGN_INT T1.Z, literal.x, PV.W, PS,
-; EG-NEXT: LSHL T0.W, literal.y, PV.Z,
+; EG-NEXT: MOV T0.Z, literal.x,
+; EG-NEXT: NOT_INT T0.W, KC0[2].W,
+; EG-NEXT: AND_INT * T1.W, KC0[2].W, literal.y,
+; EG-NEXT: -1(nan), 31(4.344025e-44)
+; EG-NEXT: LSHL T1.Z, literal.x, PS,
+; EG-NEXT: BIT_ALIGN_INT T0.W, literal.y, PV.Z, PV.W,
; EG-NEXT: AND_INT * T1.W, KC0[2].W, literal.z,
-; EG-NEXT: 32767(4.591635e-41), -1(nan)
+; EG-NEXT: -1(nan), 32767(4.591635e-41)
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT * T0.Y, PS, PV.Z, PV.W,
-; EG-NEXT: CNDE_INT T0.X, T1.W, T0.W, 0.0,
+; EG-NEXT: CNDE_INT * T0.Y, PS, PV.W, PV.Z,
+; EG-NEXT: CNDE_INT T0.X, T1.W, T1.Z, 0.0,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%shl = shl i64 281474976710655, %a
@@ -1423,15 +1425,15 @@ define amdgpu_kernel void @s_shl_inline_imm_64_i64(ptr addrspace(1) %out, ptr ad
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
-; EG-NEXT: NOT_INT T0.W, KC0[2].W,
-; EG-NEXT: AND_INT * T1.W, KC0[2].W, literal.x,
+; EG-NEXT: AND_INT T0.W, KC0[2].W, literal.x,
+; EG-NEXT: NOT_INT * T1.W, KC0[2].W,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: LSHL T0.Z, literal.x, PS,
-; EG-NEXT: BIT_ALIGN_INT T0.W, 0.0, literal.y, PV.W,
-; EG-NEXT: AND_INT * T1.W, KC0[2].W, literal.y,
-; EG-NEXT: 64(8.968310e-44), 32(4.484155e-44)
-; EG-NEXT: CNDE_INT * T0.Y, PS, PV.W, PV.Z,
-; EG-NEXT: CNDE_INT T0.X, T1.W, T0.Z, 0.0,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, 0.0, literal.x, PS,
+; EG-NEXT: AND_INT T1.W, KC0[2].W, literal.x,
+; EG-NEXT: LSHL * T0.W, literal.y, PV.W,
+; EG-NEXT: 32(4.484155e-44), 64(8.968310e-44)
+; EG-NEXT: CNDE_INT * T0.Y, PV.W, PV.Z, PS,
+; EG-NEXT: CNDE_INT T0.X, T1.W, T0.W, 0.0,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%shl = shl i64 64, %a
@@ -1903,16 +1905,16 @@ define amdgpu_kernel void @s_shl_inline_imm_f32_4_0_i64(ptr addrspace(1) %out, p
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
-; EG-NEXT: NOT_INT T0.W, KC0[2].W,
-; EG-NEXT: AND_INT * T1.W, KC0[2].W, literal.x,
+; EG-NEXT: AND_INT T0.W, KC0[2].W, literal.x,
+; EG-NEXT: NOT_INT * T1.W, KC0[2].W,
; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT: LSHL T0.Z, literal.x, PS,
-; EG-NEXT: BIT_ALIGN_INT T0.W, 0.0, literal.y, PV.W,
-; EG-NEXT: AND_INT * T1.W, KC0[2].W, literal.z,
-; EG-NEXT: 1082130432(4.000000e+00), 541065216(1.626303e-19)
-; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT * T0.Y, PS, PV.W, PV.Z,
-; EG-NEXT: CNDE_INT T0.X, T1.W, T0.Z, 0.0,
+; EG-NEXT: BIT_ALIGN_INT T0.Z, 0.0, literal.x, PS,
+; EG-NEXT: AND_INT T1.W, KC0[2].W, literal.y,
+; EG-NEXT: LSHL * T0.W, literal.z, PV.W,
+; EG-NEXT: 541065216(1.626303e-19), 32(4.484155e-44)
+; EG-NEXT: 1082130432(4.000000e+00), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT * T0.Y, PV.W, PV.Z, PS,
+; EG-NEXT: CNDE_INT T0.X, T1.W, T0.W, 0.0,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%shl = shl i64 1082130432, %a
@@ -1959,17 +1961,17 @@ define amdgpu_kernel void @s_shl_inline_imm_f32_neg_4_0_i64(ptr addrspace(1) %ou
; EG-NEXT: CF_END
; EG-NEXT: PAD
; EG-NEXT: ALU clause starting at 4:
-; EG-NEXT: AND_INT T0.Z, KC0[2].W, literal.x,
-; EG-NEXT: MOV T0.W, literal.y,
-; EG-NEXT: NOT_INT * T1.W, KC0[2].W,
-; EG-NEXT: 31(4.344025e-44), -532676608(-5.534023e+19)
-; EG-NEXT: BIT_ALIGN_INT T1.Z, literal.x, PV.W, PS,
-; EG-NEXT: LSHL T0.W, literal.y, PV.Z,
+; EG-NEXT: MOV T0.Z, literal.x,
+; EG-NEXT: NOT_INT T0.W, KC0[2].W,
+; EG-NEXT: AND_INT * T1.W, KC0[2].W, literal.y,
+; EG-NEXT: -532676608(-5.534023e+19), 31(4.344025e-44)
+; EG-NEXT: LSHL T1.Z, literal.x, PS,
+; EG-NEXT: BIT_ALIGN_INT T0.W, literal.y, PV.Z, PV.W,
; EG-NEXT: AND_INT * T1.W, KC0[2].W, literal.z,
-; EG-NEXT: 2147483647(nan), -1065353216(-4.000000e+00)
+; EG-NEXT: -1065353216(-4.000000e+00), 2147483647(nan)
; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
-; EG-NEXT: CNDE_INT * T0.Y, PS, PV.Z, PV.W,
-; EG-NEXT: CNDE_INT T0.X, T1.W, T0.W, 0.0,
+; EG-NEXT: CNDE_INT * T0.Y, PS, PV.W, PV.Z,
+; EG-NEXT: CNDE_INT T0.X, T1.W, T1.Z, 0.0,
; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%shl = shl i64 -1065353216, %a
diff --git a/llvm/test/CodeGen/AMDGPU/simplify-libcalls.ll b/llvm/test/CodeGen/AMDGPU/simplify-libcalls.ll
index 54ca33401ccf..5a241f85b2e2 100644
--- a/llvm/test/CodeGen/AMDGPU/simplify-libcalls.ll
+++ b/llvm/test/CodeGen/AMDGPU/simplify-libcalls.ll
@@ -475,8 +475,7 @@ entry:
declare float @_Z5rootnfi(float, i32)
; GCN-LABEL: {{^}}define amdgpu_kernel void @test_rootn_2
-; GCN-POSTLINK: call fast float @_Z5rootnfi(float %tmp, i32 2)
-; GCN-PRELINK: %__rootn2sqrt = tail call fast float @llvm.sqrt.f32(float %tmp)
+; GCN: call fast float @llvm.sqrt.f32(float %tmp)
define amdgpu_kernel void @test_rootn_2(ptr addrspace(1) nocapture %a) {
entry:
%tmp = load float, ptr addrspace(1) %a, align 4
@@ -507,8 +506,8 @@ entry:
}
; GCN-LABEL: {{^}}define amdgpu_kernel void @test_rootn_m2
-; GCN-POSTLINK: call fast float @_Z5rootnfi(float %tmp, i32 -2)
-; GCN-PRELINK: %__rootn2rsqrt = tail call fast float @_Z5rsqrtf(float %tmp)
+; GCN: [[SQRT:%.+]] = tail call fast float @llvm.sqrt.f32(float %tmp)
+; GCN-NEXT: fdiv fast float 1.000000e+00, [[SQRT]]
define amdgpu_kernel void @test_rootn_m2(ptr addrspace(1) nocapture %a) {
entry:
%tmp = load float, ptr addrspace(1) %a, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/trap-abis.ll b/llvm/test/CodeGen/AMDGPU/trap-abis.ll
index dcc5fbd142c4..7dce633e9186 100644
--- a/llvm/test/CodeGen/AMDGPU/trap-abis.ll
+++ b/llvm/test/CodeGen/AMDGPU/trap-abis.ll
@@ -264,6 +264,142 @@ ret:
ret void
}
+define amdgpu_kernel void @trap_with_use_after(ptr addrspace(1) %arg0, ptr addrspace(1) %arg1) {
+; NOHSA-TRAP-GFX900-LABEL: trap_with_use_after:
+; NOHSA-TRAP-GFX900: ; %bb.0:
+; NOHSA-TRAP-GFX900-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; NOHSA-TRAP-GFX900-NEXT: v_mov_b32_e32 v0, 0
+; NOHSA-TRAP-GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; NOHSA-TRAP-GFX900-NEXT: global_load_dword v1, v0, s[0:1] glc
+; NOHSA-TRAP-GFX900-NEXT: s_waitcnt vmcnt(0)
+; NOHSA-TRAP-GFX900-NEXT: s_cbranch_execnz .LBB2_2
+; NOHSA-TRAP-GFX900-NEXT: ; %bb.1:
+; NOHSA-TRAP-GFX900-NEXT: global_store_dword v0, v1, s[2:3]
+; NOHSA-TRAP-GFX900-NEXT: s_waitcnt vmcnt(0)
+; NOHSA-TRAP-GFX900-NEXT: .LBB2_2:
+; NOHSA-TRAP-GFX900-NEXT: s_endpgm
+;
+; HSA-TRAP-GFX803-LABEL: trap_with_use_after:
+; HSA-TRAP-GFX803: ; %bb.0:
+; HSA-TRAP-GFX803-NEXT: s_mov_b64 s[0:1], s[4:5]
+; HSA-TRAP-GFX803-NEXT: s_load_dwordx4 s[4:7], s[6:7], 0x0
+; HSA-TRAP-GFX803-NEXT: s_waitcnt lgkmcnt(0)
+; HSA-TRAP-GFX803-NEXT: v_mov_b32_e32 v0, s4
+; HSA-TRAP-GFX803-NEXT: v_mov_b32_e32 v1, s5
+; HSA-TRAP-GFX803-NEXT: flat_load_dword v2, v[0:1] glc
+; HSA-TRAP-GFX803-NEXT: s_waitcnt vmcnt(0)
+; HSA-TRAP-GFX803-NEXT: v_mov_b32_e32 v0, s6
+; HSA-TRAP-GFX803-NEXT: v_mov_b32_e32 v1, s7
+; HSA-TRAP-GFX803-NEXT: s_trap 2
+; HSA-TRAP-GFX803-NEXT: flat_store_dword v[0:1], v2
+; HSA-TRAP-GFX803-NEXT: s_waitcnt vmcnt(0)
+; HSA-TRAP-GFX803-NEXT: s_endpgm
+;
+; HSA-TRAP-GFX900-LABEL: trap_with_use_after:
+; HSA-TRAP-GFX900: ; %bb.0:
+; HSA-TRAP-GFX900-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; HSA-TRAP-GFX900-NEXT: v_mov_b32_e32 v0, 0
+; HSA-TRAP-GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; HSA-TRAP-GFX900-NEXT: global_load_dword v1, v0, s[0:1] glc
+; HSA-TRAP-GFX900-NEXT: s_waitcnt vmcnt(0)
+; HSA-TRAP-GFX900-NEXT: s_trap 2
+; HSA-TRAP-GFX900-NEXT: global_store_dword v0, v1, s[2:3]
+; HSA-TRAP-GFX900-NEXT: s_waitcnt vmcnt(0)
+; HSA-TRAP-GFX900-NEXT: s_endpgm
+;
+; HSA-NOTRAP-GFX900-LABEL: trap_with_use_after:
+; HSA-NOTRAP-GFX900: ; %bb.0:
+; HSA-NOTRAP-GFX900-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; HSA-NOTRAP-GFX900-NEXT: v_mov_b32_e32 v0, 0
+; HSA-NOTRAP-GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; HSA-NOTRAP-GFX900-NEXT: global_load_dword v1, v0, s[0:1] glc
+; HSA-NOTRAP-GFX900-NEXT: s_waitcnt vmcnt(0)
+; HSA-NOTRAP-GFX900-NEXT: s_cbranch_execnz .LBB2_2
+; HSA-NOTRAP-GFX900-NEXT: ; %bb.1:
+; HSA-NOTRAP-GFX900-NEXT: global_store_dword v0, v1, s[2:3]
+; HSA-NOTRAP-GFX900-NEXT: s_waitcnt vmcnt(0)
+; HSA-NOTRAP-GFX900-NEXT: .LBB2_2:
+; HSA-NOTRAP-GFX900-NEXT: s_endpgm
+;
+; HSA-TRAP-GFX1100-LABEL: trap_with_use_after:
+; HSA-TRAP-GFX1100: ; %bb.0:
+; HSA-TRAP-GFX1100-NEXT: s_load_b128 s[0:3], s[0:1], 0x0
+; HSA-TRAP-GFX1100-NEXT: v_mov_b32_e32 v0, 0
+; HSA-TRAP-GFX1100-NEXT: s_waitcnt lgkmcnt(0)
+; HSA-TRAP-GFX1100-NEXT: global_load_b32 v1, v0, s[0:1] glc dlc
+; HSA-TRAP-GFX1100-NEXT: s_waitcnt vmcnt(0)
+; HSA-TRAP-GFX1100-NEXT: s_cbranch_execnz .LBB2_2
+; HSA-TRAP-GFX1100-NEXT: ; %bb.1:
+; HSA-TRAP-GFX1100-NEXT: global_store_b32 v0, v1, s[2:3] dlc
+; HSA-TRAP-GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; HSA-TRAP-GFX1100-NEXT: s_nop 0
+; HSA-TRAP-GFX1100-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; HSA-TRAP-GFX1100-NEXT: s_endpgm
+; HSA-TRAP-GFX1100-NEXT: .LBB2_2:
+; HSA-TRAP-GFX1100-NEXT: s_trap 2
+; HSA-TRAP-GFX1100-NEXT: s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_DOORBELL)
+; HSA-TRAP-GFX1100-NEXT: s_mov_b32 ttmp2, m0
+; HSA-TRAP-GFX1100-NEXT: s_waitcnt lgkmcnt(0)
+; HSA-TRAP-GFX1100-NEXT: s_and_b32 s0, s0, 0x3ff
+; HSA-TRAP-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; HSA-TRAP-GFX1100-NEXT: s_bitset1_b32 s0, 10
+; HSA-TRAP-GFX1100-NEXT: s_mov_b32 m0, s0
+; HSA-TRAP-GFX1100-NEXT: s_sendmsg sendmsg(MSG_INTERRUPT)
+; HSA-TRAP-GFX1100-NEXT: s_mov_b32 m0, ttmp2
+; HSA-TRAP-GFX1100-NEXT: .LBB2_3: ; =>This Inner Loop Header: Depth=1
+; HSA-TRAP-GFX1100-NEXT: s_sethalt 5
+; HSA-TRAP-GFX1100-NEXT: s_branch .LBB2_3
+;
+; HSA-TRAP-GFX1100-O0-LABEL: trap_with_use_after:
+; HSA-TRAP-GFX1100-O0: ; %bb.0:
+; HSA-TRAP-GFX1100-O0-NEXT: ; implicit-def: $vgpr1 : SGPR spill to VGPR lane
+; HSA-TRAP-GFX1100-O0-NEXT: v_mov_b32_e32 v0, 0
+; HSA-TRAP-GFX1100-O0-NEXT: scratch_store_b32 off, v0, off offset:8 ; 4-byte Folded Spill
+; HSA-TRAP-GFX1100-O0-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; HSA-TRAP-GFX1100-O0-NEXT: s_load_b64 s[2:3], s[4:5], 0x8
+; HSA-TRAP-GFX1100-O0-NEXT: s_waitcnt lgkmcnt(0)
+; HSA-TRAP-GFX1100-O0-NEXT: v_writelane_b32 v1, s2, 0
+; HSA-TRAP-GFX1100-O0-NEXT: v_writelane_b32 v1, s3, 1
+; HSA-TRAP-GFX1100-O0-NEXT: s_or_saveexec_b32 s6, -1
+; HSA-TRAP-GFX1100-O0-NEXT: scratch_store_b32 off, v1, off offset:4 ; 4-byte Folded Spill
+; HSA-TRAP-GFX1100-O0-NEXT: s_mov_b32 exec_lo, s6
+; HSA-TRAP-GFX1100-O0-NEXT: global_load_b32 v0, v0, s[0:1] glc dlc
+; HSA-TRAP-GFX1100-O0-NEXT: s_waitcnt vmcnt(0)
+; HSA-TRAP-GFX1100-O0-NEXT: scratch_store_b32 off, v0, off ; 4-byte Folded Spill
+; HSA-TRAP-GFX1100-O0-NEXT: s_cbranch_execnz .LBB2_2
+; HSA-TRAP-GFX1100-O0-NEXT: ; %bb.1:
+; HSA-TRAP-GFX1100-O0-NEXT: s_or_saveexec_b32 s6, -1
+; HSA-TRAP-GFX1100-O0-NEXT: scratch_load_b32 v0, off, off offset:4 ; 4-byte Folded Reload
+; HSA-TRAP-GFX1100-O0-NEXT: s_mov_b32 exec_lo, s6
+; HSA-TRAP-GFX1100-O0-NEXT: s_waitcnt vmcnt(0)
+; HSA-TRAP-GFX1100-O0-NEXT: v_readlane_b32 s0, v0, 0
+; HSA-TRAP-GFX1100-O0-NEXT: v_readlane_b32 s1, v0, 1
+; HSA-TRAP-GFX1100-O0-NEXT: scratch_load_b32 v1, off, off offset:8 ; 4-byte Folded Reload
+; HSA-TRAP-GFX1100-O0-NEXT: scratch_load_b32 v2, off, off ; 4-byte Folded Reload
+; HSA-TRAP-GFX1100-O0-NEXT: s_waitcnt vmcnt(0)
+; HSA-TRAP-GFX1100-O0-NEXT: global_store_b32 v1, v2, s[0:1] dlc
+; HSA-TRAP-GFX1100-O0-NEXT: s_waitcnt_vscnt null, 0x0
+; HSA-TRAP-GFX1100-O0-NEXT: ; kill: killed $vgpr0
+; HSA-TRAP-GFX1100-O0-NEXT: s_endpgm
+; HSA-TRAP-GFX1100-O0-NEXT: .LBB2_2:
+; HSA-TRAP-GFX1100-O0-NEXT: s_trap 2
+; HSA-TRAP-GFX1100-O0-NEXT: s_sendmsg_rtn_b32 s0, sendmsg(MSG_RTN_GET_DOORBELL)
+; HSA-TRAP-GFX1100-O0-NEXT: s_mov_b32 ttmp2, m0
+; HSA-TRAP-GFX1100-O0-NEXT: s_waitcnt lgkmcnt(0)
+; HSA-TRAP-GFX1100-O0-NEXT: s_and_b32 s0, s0, 0x3ff
+; HSA-TRAP-GFX1100-O0-NEXT: s_or_b32 s0, s0, 0x400
+; HSA-TRAP-GFX1100-O0-NEXT: s_mov_b32 m0, s0
+; HSA-TRAP-GFX1100-O0-NEXT: s_sendmsg sendmsg(MSG_INTERRUPT)
+; HSA-TRAP-GFX1100-O0-NEXT: s_mov_b32 m0, ttmp2
+; HSA-TRAP-GFX1100-O0-NEXT: .LBB2_3: ; =>This Inner Loop Header: Depth=1
+; HSA-TRAP-GFX1100-O0-NEXT: s_sethalt 5
+; HSA-TRAP-GFX1100-O0-NEXT: s_branch .LBB2_3
+ %tmp = load volatile i32, ptr addrspace(1) %arg0
+ call void @llvm.trap()
+ store volatile i32 %tmp, ptr addrspace(1) %arg1
+ ret void
+}
+
define amdgpu_kernel void @debugtrap(ptr addrspace(1) nocapture readonly %arg0) {
; NOHSA-TRAP-GFX900-LABEL: debugtrap:
; NOHSA-TRAP-GFX900: ; %bb.0:
@@ -334,6 +470,20 @@ define amdgpu_kernel void @debugtrap(ptr addrspace(1) nocapture readonly %arg0)
; HSA-TRAP-GFX1100-NEXT: s_nop 0
; HSA-TRAP-GFX1100-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; HSA-TRAP-GFX1100-NEXT: s_endpgm
+;
+; HSA-TRAP-GFX1100-O0-LABEL: debugtrap:
+; HSA-TRAP-GFX1100-O0: ; %bb.0:
+; HSA-TRAP-GFX1100-O0-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; HSA-TRAP-GFX1100-O0-NEXT: v_mov_b32_e32 v0, 0
+; HSA-TRAP-GFX1100-O0-NEXT: v_mov_b32_e32 v1, 1
+; HSA-TRAP-GFX1100-O0-NEXT: s_waitcnt lgkmcnt(0)
+; HSA-TRAP-GFX1100-O0-NEXT: global_store_b32 v0, v1, s[0:1] dlc
+; HSA-TRAP-GFX1100-O0-NEXT: s_waitcnt_vscnt null, 0x0
+; HSA-TRAP-GFX1100-O0-NEXT: s_trap 3
+; HSA-TRAP-GFX1100-O0-NEXT: v_mov_b32_e32 v1, 2
+; HSA-TRAP-GFX1100-O0-NEXT: global_store_b32 v0, v1, s[0:1] dlc
+; HSA-TRAP-GFX1100-O0-NEXT: s_waitcnt_vscnt null, 0x0
+; HSA-TRAP-GFX1100-O0-NEXT: s_endpgm
store volatile i32 1, ptr addrspace(1) %arg0
call void @llvm.debugtrap()
store volatile i32 2, ptr addrspace(1) %arg0
diff --git a/llvm/test/CodeGen/ARM/exp10-libcall-names.ll b/llvm/test/CodeGen/ARM/exp10-libcall-names.ll
new file mode 100644
index 000000000000..0ac68b3e8c46
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/exp10-libcall-names.ll
@@ -0,0 +1,39 @@
+; RUN: llc -mtriple=armv7-linux-gnu < %s | FileCheck -check-prefix=LINUX %s
+; RUN: llc -mtriple=armv7-apple-macos10.9 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=armv7-apple-ios7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=armv7-apple-tvos7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=armv7-apple-watchos7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=armv7-apple-xros7.0 < %s | FileCheck -check-prefix=APPLE %s
+
+; RUN: not llc -mtriple=armv7-apple-macos10.8 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=armv7-apple-ios6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=armv7-apple-tvos6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=armv7-apple-xros6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+
+; Check exp10/exp10f is emitted as __exp10/__exp10f on assorted systems.
+
+; ERR: no libcall available for fexp10
+
+define float @test_exp10_f32(float %x) {
+; LINUX-LABEL: test_exp10_f32:
+; LINUX: @ %bb.0:
+; LINUX-NEXT: b exp10f
+;
+; APPLE-LABEL: test_exp10_f32:
+; APPLE: @ %bb.0:
+; APPLE-NEXT: b ___exp10f
+ %ret = call float @llvm.exp10.f32(float %x)
+ ret float %ret
+}
+
+define double @test_exp10_f64(double %x) {
+; LINUX-LABEL: test_exp10_f64:
+; LINUX: @ %bb.0:
+; LINUX-NEXT: b exp10
+;
+; APPLE-LABEL: test_exp10_f64:
+; APPLE: @ %bb.0:
+; APPLE-NEXT: b ___exp10
+ %ret = call double @llvm.exp10.f64(double %x)
+ ret double %ret
+}
diff --git a/llvm/test/CodeGen/ARM/frem-power2.ll b/llvm/test/CodeGen/ARM/frem-power2.ll
index 7f52943175ac..71c2c09c0105 100644
--- a/llvm/test/CodeGen/ARM/frem-power2.ll
+++ b/llvm/test/CodeGen/ARM/frem-power2.ll
@@ -14,13 +14,29 @@ define float @frem4(float %x) {
;
; CHECK-FP-LABEL: frem4:
; CHECK-FP: @ %bb.0: @ %entry
-; CHECK-FP-NEXT: mov.w r1, #1082130432
-; CHECK-FP-NEXT: b fmodf
+; CHECK-FP-NEXT: vmov.f32 s0, #4.000000e+00
+; CHECK-FP-NEXT: vmov s2, r0
+; CHECK-FP-NEXT: lsrs r0, r0, #31
+; CHECK-FP-NEXT: vdiv.f32 s4, s2, s0
+; CHECK-FP-NEXT: vrintz.f32 s4, s4
+; CHECK-FP-NEXT: vfms.f32 s2, s4, s0
+; CHECK-FP-NEXT: vmov r1, s2
+; CHECK-FP-NEXT: bfi r1, r0, #31, #1
+; CHECK-FP-NEXT: mov r0, r1
+; CHECK-FP-NEXT: bx lr
;
; CHECK-M33-LABEL: frem4:
; CHECK-M33: @ %bb.0: @ %entry
-; CHECK-M33-NEXT: mov.w r1, #1082130432
-; CHECK-M33-NEXT: b fmodf
+; CHECK-M33-NEXT: vmov.f32 s0, #4.000000e+00
+; CHECK-M33-NEXT: vmov s2, r0
+; CHECK-M33-NEXT: lsrs r0, r0, #31
+; CHECK-M33-NEXT: vdiv.f32 s4, s2, s0
+; CHECK-M33-NEXT: vrintz.f32 s4, s4
+; CHECK-M33-NEXT: vmls.f32 s2, s4, s0
+; CHECK-M33-NEXT: vmov r1, s2
+; CHECK-M33-NEXT: bfi r1, r0, #31, #1
+; CHECK-M33-NEXT: mov r0, r1
+; CHECK-M33-NEXT: bx lr
entry:
%fmod = frem float %x, 4.0
ret float %fmod
diff --git a/llvm/test/CodeGen/BPF/xadd.ll b/llvm/test/CodeGen/BPF/xadd.ll
index 4901d9380ac4..5aeeb9baf7b8 100644
--- a/llvm/test/CodeGen/BPF/xadd.ll
+++ b/llvm/test/CodeGen/BPF/xadd.ll
@@ -22,7 +22,7 @@ entry:
call void @llvm.dbg.value(metadata ptr %ptr, metadata !13, metadata !DIExpression()), !dbg !15
%0 = atomicrmw add ptr %ptr, i32 4 seq_cst, !dbg !16
%1 = atomicrmw add ptr %ptr, i32 6 seq_cst, !dbg !17
-; CHECK: line 4: Invalid usage of the XADD return value
+; CHECK: in function test i32 (ptr): Invalid usage of the XADD return value
call void @llvm.dbg.value(metadata i32 %1, metadata !14, metadata !DIExpression()), !dbg !18
ret i32 %1, !dbg !19
}
diff --git a/llvm/test/CodeGen/Hexagon/readsteadycounter.ll b/llvm/test/CodeGen/Hexagon/readsteadycounter.ll
new file mode 100644
index 000000000000..5a78552117d9
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/readsteadycounter.ll
@@ -0,0 +1,11 @@
+
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK-LABEL: test_readsteadycounter
+; CHECK: r1:0 = c31:30
+define i64 @test_readsteadycounter() nounwind {
+ %t0 = call i64 @llvm.readsteadycounter()
+ ret i64 %t0
+}
+
+declare i64 @llvm.readsteadycounter()
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
index 2064c398948f..ab3eec240db3 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
@@ -191,8 +191,7 @@ define signext i32 @sdiv_si32_ui32_ui32(i32 %a, i32 %b) {
; LA64: # %bb.0: # %entry
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: addi.w $a0, $a0, 0
-; LA64-NEXT: div.d $a0, $a0, $a1
-; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: div.w $a0, $a0, $a1
; LA64-NEXT: ret
;
; LA32-TRAP-LABEL: sdiv_si32_ui32_ui32:
@@ -208,12 +207,11 @@ define signext i32 @sdiv_si32_ui32_ui32(i32 %a, i32 %b) {
; LA64-TRAP: # %bb.0: # %entry
; LA64-TRAP-NEXT: addi.w $a1, $a1, 0
; LA64-TRAP-NEXT: addi.w $a0, $a0, 0
-; LA64-TRAP-NEXT: div.d $a0, $a0, $a1
+; LA64-TRAP-NEXT: div.w $a0, $a0, $a1
; LA64-TRAP-NEXT: bnez $a1, .LBB5_2
; LA64-TRAP-NEXT: # %bb.1: # %entry
; LA64-TRAP-NEXT: break 7
; LA64-TRAP-NEXT: .LBB5_2: # %entry
-; LA64-TRAP-NEXT: addi.w $a0, $a0, 0
; LA64-TRAP-NEXT: ret
entry:
%r = sdiv i32 %a, %b
@@ -228,8 +226,7 @@ define signext i32 @sdiv_si32_si32_si32(i32 signext %a, i32 signext %b) {
;
; LA64-LABEL: sdiv_si32_si32_si32:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: div.d $a0, $a0, $a1
-; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: div.w $a0, $a0, $a1
; LA64-NEXT: ret
;
; LA32-TRAP-LABEL: sdiv_si32_si32_si32:
@@ -243,12 +240,11 @@ define signext i32 @sdiv_si32_si32_si32(i32 signext %a, i32 signext %b) {
;
; LA64-TRAP-LABEL: sdiv_si32_si32_si32:
; LA64-TRAP: # %bb.0: # %entry
-; LA64-TRAP-NEXT: div.d $a0, $a0, $a1
+; LA64-TRAP-NEXT: div.w $a0, $a0, $a1
; LA64-TRAP-NEXT: bnez $a1, .LBB6_2
; LA64-TRAP-NEXT: # %bb.1: # %entry
; LA64-TRAP-NEXT: break 7
; LA64-TRAP-NEXT: .LBB6_2: # %entry
-; LA64-TRAP-NEXT: addi.w $a0, $a0, 0
; LA64-TRAP-NEXT: ret
entry:
%r = sdiv i32 %a, %b
@@ -407,9 +403,9 @@ define i32 @udiv_i32(i32 %a, i32 %b) {
;
; LA64-LABEL: udiv_i32:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-NEXT: div.du $a0, $a0, $a1
+; LA64-NEXT: addi.w $a1, $a1, 0
+; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: div.wu $a0, $a0, $a1
; LA64-NEXT: ret
;
; LA32-TRAP-LABEL: udiv_i32:
@@ -423,9 +419,9 @@ define i32 @udiv_i32(i32 %a, i32 %b) {
;
; LA64-TRAP-LABEL: udiv_i32:
; LA64-TRAP: # %bb.0: # %entry
-; LA64-TRAP-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-TRAP-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-TRAP-NEXT: div.du $a0, $a0, $a1
+; LA64-TRAP-NEXT: addi.w $a1, $a1, 0
+; LA64-TRAP-NEXT: addi.w $a0, $a0, 0
+; LA64-TRAP-NEXT: div.wu $a0, $a0, $a1
; LA64-TRAP-NEXT: bnez $a1, .LBB11_2
; LA64-TRAP-NEXT: # %bb.1: # %entry
; LA64-TRAP-NEXT: break 7
@@ -444,9 +440,7 @@ define i32 @udiv_ui32_si32_si32(i32 signext %a, i32 signext %b) {
;
; LA64-LABEL: udiv_ui32_si32_si32:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-NEXT: div.du $a0, $a0, $a1
+; LA64-NEXT: div.wu $a0, $a0, $a1
; LA64-NEXT: ret
;
; LA32-TRAP-LABEL: udiv_ui32_si32_si32:
@@ -460,9 +454,7 @@ define i32 @udiv_ui32_si32_si32(i32 signext %a, i32 signext %b) {
;
; LA64-TRAP-LABEL: udiv_ui32_si32_si32:
; LA64-TRAP: # %bb.0: # %entry
-; LA64-TRAP-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-TRAP-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-TRAP-NEXT: div.du $a0, $a0, $a1
+; LA64-TRAP-NEXT: div.wu $a0, $a0, $a1
; LA64-TRAP-NEXT: bnez $a1, .LBB12_2
; LA64-TRAP-NEXT: # %bb.1: # %entry
; LA64-TRAP-NEXT: break 7
@@ -481,10 +473,9 @@ define signext i32 @udiv_si32_ui32_ui32(i32 %a, i32 %b) {
;
; LA64-LABEL: udiv_si32_ui32_ui32:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-NEXT: div.du $a0, $a0, $a1
+; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: div.wu $a0, $a0, $a1
; LA64-NEXT: ret
;
; LA32-TRAP-LABEL: udiv_si32_ui32_ui32:
@@ -498,14 +489,13 @@ define signext i32 @udiv_si32_ui32_ui32(i32 %a, i32 %b) {
;
; LA64-TRAP-LABEL: udiv_si32_ui32_ui32:
; LA64-TRAP: # %bb.0: # %entry
-; LA64-TRAP-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-TRAP-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-TRAP-NEXT: div.du $a0, $a0, $a1
+; LA64-TRAP-NEXT: addi.w $a1, $a1, 0
+; LA64-TRAP-NEXT: addi.w $a0, $a0, 0
+; LA64-TRAP-NEXT: div.wu $a0, $a0, $a1
; LA64-TRAP-NEXT: bnez $a1, .LBB13_2
; LA64-TRAP-NEXT: # %bb.1: # %entry
; LA64-TRAP-NEXT: break 7
; LA64-TRAP-NEXT: .LBB13_2: # %entry
-; LA64-TRAP-NEXT: addi.w $a0, $a0, 0
; LA64-TRAP-NEXT: ret
entry:
%r = udiv i32 %a, %b
@@ -520,10 +510,7 @@ define signext i32 @udiv_si32_si32_si32(i32 signext %a, i32 signext %b) {
;
; LA64-LABEL: udiv_si32_si32_si32:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-NEXT: div.du $a0, $a0, $a1
-; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: div.wu $a0, $a0, $a1
; LA64-NEXT: ret
;
; LA32-TRAP-LABEL: udiv_si32_si32_si32:
@@ -537,14 +524,11 @@ define signext i32 @udiv_si32_si32_si32(i32 signext %a, i32 signext %b) {
;
; LA64-TRAP-LABEL: udiv_si32_si32_si32:
; LA64-TRAP: # %bb.0: # %entry
-; LA64-TRAP-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-TRAP-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-TRAP-NEXT: div.du $a0, $a0, $a1
+; LA64-TRAP-NEXT: div.wu $a0, $a0, $a1
; LA64-TRAP-NEXT: bnez $a1, .LBB14_2
; LA64-TRAP-NEXT: # %bb.1: # %entry
; LA64-TRAP-NEXT: break 7
; LA64-TRAP-NEXT: .LBB14_2: # %entry
-; LA64-TRAP-NEXT: addi.w $a0, $a0, 0
; LA64-TRAP-NEXT: ret
entry:
%r = udiv i32 %a, %b
@@ -995,9 +979,9 @@ define i32 @urem_i32(i32 %a, i32 %b) {
;
; LA64-LABEL: urem_i32:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-NEXT: mod.du $a0, $a0, $a1
+; LA64-NEXT: addi.w $a1, $a1, 0
+; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: mod.wu $a0, $a0, $a1
; LA64-NEXT: ret
;
; LA32-TRAP-LABEL: urem_i32:
@@ -1011,9 +995,9 @@ define i32 @urem_i32(i32 %a, i32 %b) {
;
; LA64-TRAP-LABEL: urem_i32:
; LA64-TRAP: # %bb.0: # %entry
-; LA64-TRAP-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-TRAP-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-TRAP-NEXT: mod.du $a0, $a0, $a1
+; LA64-TRAP-NEXT: addi.w $a1, $a1, 0
+; LA64-TRAP-NEXT: addi.w $a0, $a0, 0
+; LA64-TRAP-NEXT: mod.wu $a0, $a0, $a1
; LA64-TRAP-NEXT: bnez $a1, .LBB27_2
; LA64-TRAP-NEXT: # %bb.1: # %entry
; LA64-TRAP-NEXT: break 7
@@ -1032,9 +1016,7 @@ define i32 @urem_ui32_si32_si32(i32 signext %a, i32 signext %b) {
;
; LA64-LABEL: urem_ui32_si32_si32:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-NEXT: mod.du $a0, $a0, $a1
+; LA64-NEXT: mod.wu $a0, $a0, $a1
; LA64-NEXT: ret
;
; LA32-TRAP-LABEL: urem_ui32_si32_si32:
@@ -1048,9 +1030,7 @@ define i32 @urem_ui32_si32_si32(i32 signext %a, i32 signext %b) {
;
; LA64-TRAP-LABEL: urem_ui32_si32_si32:
; LA64-TRAP: # %bb.0: # %entry
-; LA64-TRAP-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-TRAP-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-TRAP-NEXT: mod.du $a0, $a0, $a1
+; LA64-TRAP-NEXT: mod.wu $a0, $a0, $a1
; LA64-TRAP-NEXT: bnez $a1, .LBB28_2
; LA64-TRAP-NEXT: # %bb.1: # %entry
; LA64-TRAP-NEXT: break 7
@@ -1069,10 +1049,9 @@ define signext i32 @urem_si32_ui32_ui32(i32 %a, i32 %b) {
;
; LA64-LABEL: urem_si32_ui32_ui32:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-NEXT: mod.du $a0, $a0, $a1
+; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: mod.wu $a0, $a0, $a1
; LA64-NEXT: ret
;
; LA32-TRAP-LABEL: urem_si32_ui32_ui32:
@@ -1086,14 +1065,13 @@ define signext i32 @urem_si32_ui32_ui32(i32 %a, i32 %b) {
;
; LA64-TRAP-LABEL: urem_si32_ui32_ui32:
; LA64-TRAP: # %bb.0: # %entry
-; LA64-TRAP-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-TRAP-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-TRAP-NEXT: mod.du $a0, $a0, $a1
+; LA64-TRAP-NEXT: addi.w $a1, $a1, 0
+; LA64-TRAP-NEXT: addi.w $a0, $a0, 0
+; LA64-TRAP-NEXT: mod.wu $a0, $a0, $a1
; LA64-TRAP-NEXT: bnez $a1, .LBB29_2
; LA64-TRAP-NEXT: # %bb.1: # %entry
; LA64-TRAP-NEXT: break 7
; LA64-TRAP-NEXT: .LBB29_2: # %entry
-; LA64-TRAP-NEXT: addi.w $a0, $a0, 0
; LA64-TRAP-NEXT: ret
entry:
%r = urem i32 %a, %b
@@ -1108,10 +1086,7 @@ define signext i32 @urem_si32_si32_si32(i32 signext %a, i32 signext %b) {
;
; LA64-LABEL: urem_si32_si32_si32:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-NEXT: mod.du $a0, $a0, $a1
-; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: mod.wu $a0, $a0, $a1
; LA64-NEXT: ret
;
; LA32-TRAP-LABEL: urem_si32_si32_si32:
@@ -1125,14 +1100,11 @@ define signext i32 @urem_si32_si32_si32(i32 signext %a, i32 signext %b) {
;
; LA64-TRAP-LABEL: urem_si32_si32_si32:
; LA64-TRAP: # %bb.0: # %entry
-; LA64-TRAP-NEXT: bstrpick.d $a1, $a1, 31, 0
-; LA64-TRAP-NEXT: bstrpick.d $a0, $a0, 31, 0
-; LA64-TRAP-NEXT: mod.du $a0, $a0, $a1
+; LA64-TRAP-NEXT: mod.wu $a0, $a0, $a1
; LA64-TRAP-NEXT: bnez $a1, .LBB30_2
; LA64-TRAP-NEXT: # %bb.1: # %entry
; LA64-TRAP-NEXT: break 7
; LA64-TRAP-NEXT: .LBB30_2: # %entry
-; LA64-TRAP-NEXT: addi.w $a0, $a0, 0
; LA64-TRAP-NEXT: ret
entry:
%r = urem i32 %a, %b
diff --git a/llvm/test/CodeGen/Mips/mipsr6-minmaxnum.ll b/llvm/test/CodeGen/Mips/mipsr6-minmaxnum.ll
index e14e89916e6d..2a0ad07474c0 100644
--- a/llvm/test/CodeGen/Mips/mipsr6-minmaxnum.ll
+++ b/llvm/test/CodeGen/Mips/mipsr6-minmaxnum.ll
@@ -6,13 +6,17 @@
define float @mins(float %x, float %y) {
; MIPS32R6EL-LABEL: mins
; MIPS32R6EL: # %bb.0:
+; MIPS32R6EL-NEXT: min.s $f0, $f14, $f14
+; MIPS32R6EL-NEXT: min.s $f1, $f12, $f12
; MIPS32R6EL-NEXT: jr $ra
-; MIPS32R6EL-NEXT: min.s $f0, $f12, $f14
+; MIPS32R6EL-NEXT: min.s $f0, $f1, $f0
;
; MIPS64R6EL-LABEL: mins
; MIPS64R6EL: # %bb.0:
+; MIPS64R6EL-NEXT: min.s $f0, $f13, $f13
+; MIPS64R6EL-NEXT: min.s $f1, $f12, $f12
; MIPS64R6EL-NEXT: jr $ra
-; MIPS64R6EL-NEXT: min.s $f0, $f12, $f13
+; MIPS64R6EL-NEXT: min.s $f0, $f1, $f0
%r = tail call float @llvm.minnum.f32(float %x, float %y)
ret float %r
@@ -21,13 +25,17 @@ define float @mins(float %x, float %y) {
define float @maxs(float %x, float %y) {
; MIPS32R6EL-LABEL: maxs
; MIPS32R6EL: # %bb.0:
+; MIPS32R6EL-NEXT: min.s $f0, $f14, $f14
+; MIPS32R6EL-NEXT: min.s $f1, $f12, $f12
; MIPS32R6EL-NEXT: jr $ra
-; MIPS32R6EL-NEXT: max.s $f0, $f12, $f14
+; MIPS32R6EL-NEXT: max.s $f0, $f1, $f0
;
; MIPS64R6EL-LABEL: maxs
; MIPS64R6EL: # %bb.0:
+; MIPS64R6EL-NEXT: min.s $f0, $f13, $f13
+; MIPS64R6EL-NEXT: min.s $f1, $f12, $f12
; MIPS64R6EL-NEXT: jr $ra
-; MIPS64R6EL-NEXT: max.s $f0, $f12, $f13
+; MIPS64R6EL-NEXT: max.s $f0, $f1, $f0
%r = tail call float @llvm.maxnum.f32(float %x, float %y)
ret float %r
@@ -36,13 +44,17 @@ define float @maxs(float %x, float %y) {
define double @mind(double %x, double %y) {
; MIPS32R6EL-LABEL: mind
; MIPS32R6EL: # %bb.0:
+; MIPS32R6EL-NEXT: min.d $f0, $f14, $f14
+; MIPS32R6EL-NEXT: min.d $f1, $f12, $f12
; MIPS32R6EL-NEXT: jr $ra
-; MIPS32R6EL-NEXT: min.d $f0, $f12, $f14
+; MIPS32R6EL-NEXT: min.d $f0, $f1, $f0
;
; MIPS64R6EL-LABEL: mind
; MIPS64R6EL: # %bb.0:
+; MIPS64R6EL-NEXT: min.d $f0, $f13, $f13
+; MIPS64R6EL-NEXT: min.d $f1, $f12, $f12
; MIPS64R6EL-NEXT: jr $ra
-; MIPS64R6EL-NEXT: min.d $f0, $f12, $f13
+; MIPS64R6EL-NEXT: min.d $f0, $f1, $f0
%r = tail call double @llvm.minnum.f64(double %x, double %y)
ret double %r
@@ -51,13 +63,17 @@ define double @mind(double %x, double %y) {
define double @maxd(double %x, double %y) {
; MIPS32R6EL-LABEL: maxd
; MIPS32R6EL: # %bb.0:
+; MIPS32R6EL-NEXT: min.d $f0, $f14, $f14
+; MIPS32R6EL-NEXT: min.d $f1, $f12, $f12
; MIPS32R6EL-NEXT: jr $ra
-; MIPS32R6EL-NEXT: max.d $f0, $f12, $f14
+; MIPS32R6EL-NEXT: max.d $f0, $f1, $f0
;
; MIPS64R6EL-LABEL: maxd
; MIPS64R6EL: # %bb.0:
+; MIPS64R6EL-NEXT: min.d $f0, $f13, $f13
+; MIPS64R6EL-NEXT: min.d $f1, $f12, $f12
; MIPS64R6EL-NEXT: jr $ra
-; MIPS64R6EL-NEXT: max.d $f0, $f12, $f13
+; MIPS64R6EL-NEXT: max.d $f0, $f1, $f0
%r = tail call double @llvm.maxnum.f64(double %x, double %y)
ret double %r
diff --git a/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll b/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
index fe68bee408fc..42b0f69181ab 100644
--- a/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
+++ b/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
@@ -2466,13 +2466,14 @@ define void @fminnum(float %b) {
; MIPSR6-O32-NEXT: lui $2, %hi(_gp_disp)
; MIPSR6-O32-NEXT: addiu $2, $2, %lo(_gp_disp)
; MIPSR6-O32-NEXT: addu $1, $2, $25
+; MIPSR6-O32-NEXT: min.s $f0, $f12, $f12
; MIPSR6-O32-NEXT: lw $1, %got(g)($1)
; MIPSR6-O32-NEXT: lh $2, 0($1)
-; MIPSR6-O32-NEXT: fill.h $w0, $2
-; MIPSR6-O32-NEXT: fexupr.w $w0, $w0
-; MIPSR6-O32-NEXT: copy_s.w $2, $w0[0]
-; MIPSR6-O32-NEXT: mtc1 $2, $f0
-; MIPSR6-O32-NEXT: min.s $f0, $f0, $f12
+; MIPSR6-O32-NEXT: fill.h $w1, $2
+; MIPSR6-O32-NEXT: fexupr.w $w1, $w1
+; MIPSR6-O32-NEXT: copy_s.w $2, $w1[0]
+; MIPSR6-O32-NEXT: mtc1 $2, $f1
+; MIPSR6-O32-NEXT: min.s $f0, $f1, $f0
; MIPSR6-O32-NEXT: mfc1 $2, $f0
; MIPSR6-O32-NEXT: fill.w $w0, $2
; MIPSR6-O32-NEXT: fexdo.h $w0, $w0, $w0
@@ -2485,13 +2486,14 @@ define void @fminnum(float %b) {
; MIPSR6-N32-NEXT: lui $1, %hi(%neg(%gp_rel(fminnum)))
; MIPSR6-N32-NEXT: addu $1, $1, $25
; MIPSR6-N32-NEXT: addiu $1, $1, %lo(%neg(%gp_rel(fminnum)))
+; MIPSR6-N32-NEXT: min.s $f0, $f12, $f12
; MIPSR6-N32-NEXT: lw $1, %got_disp(g)($1)
; MIPSR6-N32-NEXT: lh $2, 0($1)
-; MIPSR6-N32-NEXT: fill.h $w0, $2
-; MIPSR6-N32-NEXT: fexupr.w $w0, $w0
-; MIPSR6-N32-NEXT: copy_s.w $2, $w0[0]
-; MIPSR6-N32-NEXT: mtc1 $2, $f0
-; MIPSR6-N32-NEXT: min.s $f0, $f0, $f12
+; MIPSR6-N32-NEXT: fill.h $w1, $2
+; MIPSR6-N32-NEXT: fexupr.w $w1, $w1
+; MIPSR6-N32-NEXT: copy_s.w $2, $w1[0]
+; MIPSR6-N32-NEXT: mtc1 $2, $f1
+; MIPSR6-N32-NEXT: min.s $f0, $f1, $f0
; MIPSR6-N32-NEXT: mfc1 $2, $f0
; MIPSR6-N32-NEXT: fill.w $w0, $2
; MIPSR6-N32-NEXT: fexdo.h $w0, $w0, $w0
@@ -2504,20 +2506,20 @@ define void @fminnum(float %b) {
; MIPSR6-N64-NEXT: lui $1, %hi(%neg(%gp_rel(fminnum)))
; MIPSR6-N64-NEXT: daddu $1, $1, $25
; MIPSR6-N64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(fminnum)))
+; MIPSR6-N64-NEXT: min.s $f0, $f12, $f12
; MIPSR6-N64-NEXT: ld $1, %got_disp(g)($1)
; MIPSR6-N64-NEXT: lh $2, 0($1)
-; MIPSR6-N64-NEXT: fill.h $w0, $2
-; MIPSR6-N64-NEXT: fexupr.w $w0, $w0
-; MIPSR6-N64-NEXT: copy_s.w $2, $w0[0]
-; MIPSR6-N64-NEXT: mtc1 $2, $f0
-; MIPSR6-N64-NEXT: min.s $f0, $f0, $f12
+; MIPSR6-N64-NEXT: fill.h $w1, $2
+; MIPSR6-N64-NEXT: fexupr.w $w1, $w1
+; MIPSR6-N64-NEXT: copy_s.w $2, $w1[0]
+; MIPSR6-N64-NEXT: mtc1 $2, $f1
+; MIPSR6-N64-NEXT: min.s $f0, $f1, $f0
; MIPSR6-N64-NEXT: mfc1 $2, $f0
; MIPSR6-N64-NEXT: fill.w $w0, $2
; MIPSR6-N64-NEXT: fexdo.h $w0, $w0, $w0
; MIPSR6-N64-NEXT: copy_u.h $2, $w0[0]
; MIPSR6-N64-NEXT: jr $ra
; MIPSR6-N64-NEXT: sh $2, 0($1)
-;
entry:
%0 = load i16, ptr @g, align 2
%1 = call float @llvm.convert.from.fp16.f32(i16 %0)
@@ -2632,17 +2634,18 @@ define void @fmaxnum(float %b) {
; MIPS64R5-N64-NEXT: daddiu $sp, $sp, 32
;
; MIPSR6-O32-LABEL: fmaxnum:
-; MIPSR6-O32: # %bb.0:
+; MIPSR6-O32: # %bb.0: # %entry
; MIPSR6-O32-NEXT: lui $2, %hi(_gp_disp)
; MIPSR6-O32-NEXT: addiu $2, $2, %lo(_gp_disp)
; MIPSR6-O32-NEXT: addu $1, $2, $25
+; MIPSR6-O32-NEXT: min.s $f0, $f12, $f12
; MIPSR6-O32-NEXT: lw $1, %got(g)($1)
; MIPSR6-O32-NEXT: lh $2, 0($1)
-; MIPSR6-O32-NEXT: fill.h $w0, $2
-; MIPSR6-O32-NEXT: fexupr.w $w0, $w0
-; MIPSR6-O32-NEXT: copy_s.w $2, $w0[0]
-; MIPSR6-O32-NEXT: mtc1 $2, $f0
-; MIPSR6-O32-NEXT: max.s $f0, $f0, $f12
+; MIPSR6-O32-NEXT: fill.h $w1, $2
+; MIPSR6-O32-NEXT: fexupr.w $w1, $w1
+; MIPSR6-O32-NEXT: copy_s.w $2, $w1[0]
+; MIPSR6-O32-NEXT: mtc1 $2, $f1
+; MIPSR6-O32-NEXT: max.s $f0, $f1, $f0
; MIPSR6-O32-NEXT: mfc1 $2, $f0
; MIPSR6-O32-NEXT: fill.w $w0, $2
; MIPSR6-O32-NEXT: fexdo.h $w0, $w0, $w0
@@ -2651,17 +2654,18 @@ define void @fmaxnum(float %b) {
; MIPSR6-O32-NEXT: sh $2, 0($1)
;
; MIPSR6-N32-LABEL: fmaxnum:
-; MIPSR6-N32: # %bb.0:
+; MIPSR6-N32: # %bb.0: # %entry
; MIPSR6-N32-NEXT: lui $1, %hi(%neg(%gp_rel(fmaxnum)))
; MIPSR6-N32-NEXT: addu $1, $1, $25
; MIPSR6-N32-NEXT: addiu $1, $1, %lo(%neg(%gp_rel(fmaxnum)))
+; MIPSR6-N32-NEXT: min.s $f0, $f12, $f12
; MIPSR6-N32-NEXT: lw $1, %got_disp(g)($1)
; MIPSR6-N32-NEXT: lh $2, 0($1)
-; MIPSR6-N32-NEXT: fill.h $w0, $2
-; MIPSR6-N32-NEXT: fexupr.w $w0, $w0
-; MIPSR6-N32-NEXT: copy_s.w $2, $w0[0]
-; MIPSR6-N32-NEXT: mtc1 $2, $f0
-; MIPSR6-N32-NEXT: max.s $f0, $f0, $f12
+; MIPSR6-N32-NEXT: fill.h $w1, $2
+; MIPSR6-N32-NEXT: fexupr.w $w1, $w1
+; MIPSR6-N32-NEXT: copy_s.w $2, $w1[0]
+; MIPSR6-N32-NEXT: mtc1 $2, $f1
+; MIPSR6-N32-NEXT: max.s $f0, $f1, $f0
; MIPSR6-N32-NEXT: mfc1 $2, $f0
; MIPSR6-N32-NEXT: fill.w $w0, $2
; MIPSR6-N32-NEXT: fexdo.h $w0, $w0, $w0
@@ -2670,17 +2674,18 @@ define void @fmaxnum(float %b) {
; MIPSR6-N32-NEXT: sh $2, 0($1)
;
; MIPSR6-N64-LABEL: fmaxnum:
-; MIPSR6-N64: # %bb.0:
+; MIPSR6-N64: # %bb.0: # %entry
; MIPSR6-N64-NEXT: lui $1, %hi(%neg(%gp_rel(fmaxnum)))
; MIPSR6-N64-NEXT: daddu $1, $1, $25
; MIPSR6-N64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(fmaxnum)))
+; MIPSR6-N64-NEXT: min.s $f0, $f12, $f12
; MIPSR6-N64-NEXT: ld $1, %got_disp(g)($1)
; MIPSR6-N64-NEXT: lh $2, 0($1)
-; MIPSR6-N64-NEXT: fill.h $w0, $2
-; MIPSR6-N64-NEXT: fexupr.w $w0, $w0
-; MIPSR6-N64-NEXT: copy_s.w $2, $w0[0]
-; MIPSR6-N64-NEXT: mtc1 $2, $f0
-; MIPSR6-N64-NEXT: max.s $f0, $f0, $f12
+; MIPSR6-N64-NEXT: fill.h $w1, $2
+; MIPSR6-N64-NEXT: fexupr.w $w1, $w1
+; MIPSR6-N64-NEXT: copy_s.w $2, $w1[0]
+; MIPSR6-N64-NEXT: mtc1 $2, $f1
+; MIPSR6-N64-NEXT: max.s $f0, $f1, $f0
; MIPSR6-N64-NEXT: mfc1 $2, $f0
; MIPSR6-N64-NEXT: fill.w $w0, $2
; MIPSR6-N64-NEXT: fexdo.h $w0, $w0, $w0
diff --git a/llvm/test/CodeGen/Mips/msa/inline-asm.ll b/llvm/test/CodeGen/Mips/msa/inline-asm.ll
index 57cd78a25647..f84b11e05387 100644
--- a/llvm/test/CodeGen/Mips/msa/inline-asm.ll
+++ b/llvm/test/CodeGen/Mips/msa/inline-asm.ll
@@ -32,3 +32,19 @@ entry:
store <4 x i32> %1, ptr @v4i32_r
ret void
}
+
+define dso_local double @test4(double noundef %a, double noundef %b, double noundef %c) {
+entry:
+ ; CHECK-LABEL: test4:
+ %0 = tail call double asm sideeffect "fmadd.d ${0:w}, ${1:w}, ${2:w}", "=f,f,f,0,~{$1}"(double %b, double %c, double %a)
+ ; CHECK: fmadd.d $w{{([0-9]|[1-3][0-9])}}, $w{{([0-9]|[1-3][0-9])}}, $w{{([0-9]|[1-3][0-9])}}
+ ret double %0
+}
+
+define dso_local float @test5(float noundef %a, float noundef %b, float noundef %c) {
+entry:
+ ; CHECK-LABEL: test5:
+ %0 = tail call float asm sideeffect "fmadd.w ${0:w}, ${1:w}, ${2:w}", "=f,f,f,0,~{$1}"(float %b, float %c, float %a)
+ ; CHECK: fmadd.w $w{{([0-9]|[1-3][0-9])}}, $w{{([0-9]|[1-3][0-9])}}, $w{{([0-9]|[1-3][0-9])}}
+ ret float %0
+}
diff --git a/llvm/test/CodeGen/NVPTX/param-overalign.ll b/llvm/test/CodeGen/NVPTX/param-overalign.ll
index 63e706982f39..5c09bb8e1a5d 100644
--- a/llvm/test/CodeGen/NVPTX/param-overalign.ll
+++ b/llvm/test/CodeGen/NVPTX/param-overalign.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=nvptx | FileCheck %s
-; RUN: %if ptxas %{ llc < %s -march=nvptx -verify-machineinstrs | %ptxas-verify %}
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -verify-machineinstrs | %ptxas-verify %}
target triple = "nvptx64-nvidia-cuda"
diff --git a/llvm/test/CodeGen/NVPTX/st-param-imm.ll b/llvm/test/CodeGen/NVPTX/st-param-imm.ll
new file mode 100644
index 000000000000..29f27c1ba6cd
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/st-param-imm.ll
@@ -0,0 +1,2002 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -march=nvptx64 | FileCheck %s
+; RUN: llc < %s -march=nvptx | FileCheck %s
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -verify-machineinstrs | %ptxas-verify %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -verify-machineinstrs | %ptxas-verify %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+%struct.A = type { i8, i16 }
+%struct.char2 = type { i8, i8 }
+%struct.char4 = type { i8, i8, i8, i8 }
+%struct.short2 = type { i16, i16 }
+%struct.short4 = type { i16, i16, i16, i16 }
+%struct.int2 = type { i32, i32 }
+%struct.int4 = type { i32, i32, i32, i32 }
+%struct.longlong2 = type { i64, i64 }
+%struct.float2 = type { float, float }
+%struct.float4 = type { float, float, float, float }
+%struct.double2 = type { double, double }
+
+define void @st_param_i8_i16() {
+; CHECK-LABEL: st_param_i8_i16(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 0, 0
+; CHECK-NEXT: .param .align 2 .b8 param0[4];
+; CHECK-NEXT: st.param.b8 [param0+0], 1;
+; CHECK-NEXT: st.param.b16 [param0+2], 2;
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_i8_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 0
+; CHECK-NEXT: ret;
+ call void @call_i8_i16(%struct.A { i8 1, i16 2 })
+ ret void
+}
+
+define void @st_param_i32() {
+; CHECK-LABEL: st_param_i32(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 1, 0
+; CHECK-NEXT: .param .b32 param0;
+; CHECK-NEXT: st.param.b32 [param0+0], 3;
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 1
+; CHECK-NEXT: ret;
+ call void @call_i32(i32 3)
+ ret void
+}
+
+define void @st_param_i64() {
+; CHECK-LABEL: st_param_i64(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 2, 0
+; CHECK-NEXT: .param .b64 param0;
+; CHECK-NEXT: st.param.b64 [param0+0], 4;
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_i64,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 2
+; CHECK-NEXT: ret;
+ call void @call_i64(i64 4)
+ ret void
+}
+
+define void @st_param_f32() {
+; CHECK-LABEL: st_param_f32(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 3, 0
+; CHECK-NEXT: .param .b32 param0;
+; CHECK-NEXT: st.param.f32 [param0+0], 0f40A00000;
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 3
+; CHECK-NEXT: ret;
+ call void @call_f32(float 5.0)
+ ret void
+}
+
+define void @st_param_f64() {
+; CHECK-LABEL: st_param_f64(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 4, 0
+; CHECK-NEXT: .param .b64 param0;
+; CHECK-NEXT: st.param.f64 [param0+0], 0d4018000000000000;
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_f64,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 4
+; CHECK-NEXT: ret;
+ call void @call_f64(double 6.0)
+ ret void
+}
+
+declare void @call_i8_i16(%struct.A)
+declare void @call_i32(i32)
+declare void @call_i64(i64)
+declare void @call_f32(float)
+declare void @call_f64(double)
+
+define void @st_param_v2_i8_ii() {
+; CHECK-LABEL: st_param_v2_i8_ii(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 5, 0
+; CHECK-NEXT: .param .align 2 .b8 param0[2];
+; CHECK-NEXT: st.param.v2.b8 [param0+0], {1, 2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 5
+; CHECK-NEXT: ret;
+ call void @call_v2_i8(%struct.char2 { i8 1, i8 2 })
+ ret void
+}
+define void @st_param_v2_i8_ir(i8 %val) {
+; CHECK-LABEL: st_param_v2_i8_ir(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v2_i8_ir_param_0];
+; CHECK-NEXT: { // callseq 6, 0
+; CHECK-NEXT: .param .align 2 .b8 param0[2];
+; CHECK-NEXT: st.param.v2.b8 [param0+0], {1, %rs1};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 6
+; CHECK-NEXT: ret;
+ %struct.ir0 = insertvalue %struct.char2 poison, i8 1, 0
+ %struct.ir1 = insertvalue %struct.char2 %struct.ir0, i8 %val, 1
+ call void @call_v2_i8(%struct.char2 %struct.ir1)
+ ret void
+}
+define void @st_param_v2_i8_ri(i8 %val) {
+; CHECK-LABEL: st_param_v2_i8_ri(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v2_i8_ri_param_0];
+; CHECK-NEXT: { // callseq 7, 0
+; CHECK-NEXT: .param .align 2 .b8 param0[2];
+; CHECK-NEXT: st.param.v2.b8 [param0+0], {%rs1, 2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 7
+; CHECK-NEXT: ret;
+ %struct.ri0 = insertvalue %struct.char2 poison, i8 %val, 0
+ %struct.ri1 = insertvalue %struct.char2 %struct.ri0, i8 2, 1
+ call void @call_v2_i8(%struct.char2 %struct.ri1)
+ ret void
+}
+
+define void @st_param_v2_i16_ii() {
+; CHECK-LABEL: st_param_v2_i16_ii(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 8, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v2.b16 [param0+0], {1, 2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 8
+; CHECK-NEXT: ret;
+ call void @call_v2_i16(%struct.short2 { i16 1, i16 2 })
+ ret void
+}
+define void @st_param_v2_i16_ir(i16 %val) {
+; CHECK-LABEL: st_param_v2_i16_ir(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v2_i16_ir_param_0];
+; CHECK-NEXT: { // callseq 9, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v2.b16 [param0+0], {1, %rs1};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 9
+; CHECK-NEXT: ret;
+ %struct.ir0 = insertvalue %struct.short2 poison, i16 1, 0
+ %struct.ir1 = insertvalue %struct.short2 %struct.ir0, i16 %val, 1
+ call void @call_v2_i16(%struct.short2 %struct.ir1)
+ ret void
+}
+define void @st_param_v2_i16_ri(i16 %val) {
+; CHECK-LABEL: st_param_v2_i16_ri(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v2_i16_ri_param_0];
+; CHECK-NEXT: { // callseq 10, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v2.b16 [param0+0], {%rs1, 2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 10
+; CHECK-NEXT: ret;
+ %struct.ri0 = insertvalue %struct.short2 poison, i16 %val, 0
+ %struct.ri1 = insertvalue %struct.short2 %struct.ri0, i16 2, 1
+ call void @call_v2_i16(%struct.short2 %struct.ri1)
+ ret void
+}
+
+define void @st_param_v2_i32_ii() {
+; CHECK-LABEL: st_param_v2_i32_ii(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 11, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v2.b32 [param0+0], {1, 2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 11
+; CHECK-NEXT: ret;
+ call void @call_v2_i32(%struct.int2 { i32 1, i32 2 })
+ ret void
+}
+define void @st_param_v2_i32_ir(i32 %val) {
+; CHECK-LABEL: st_param_v2_i32_ir(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v2_i32_ir_param_0];
+; CHECK-NEXT: { // callseq 12, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v2.b32 [param0+0], {1, %r1};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 12
+; CHECK-NEXT: ret;
+ %struct.ir0 = insertvalue %struct.int2 poison, i32 1, 0
+ %struct.ir1 = insertvalue %struct.int2 %struct.ir0, i32 %val, 1
+ call void @call_v2_i32(%struct.int2 %struct.ir1)
+ ret void
+}
+define void @st_param_v2_i32_ri(i32 %val) {
+; CHECK-LABEL: st_param_v2_i32_ri(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v2_i32_ri_param_0];
+; CHECK-NEXT: { // callseq 13, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v2.b32 [param0+0], {%r1, 2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 13
+; CHECK-NEXT: ret;
+ %struct.ri0 = insertvalue %struct.int2 poison, i32 %val, 0
+ %struct.ri1 = insertvalue %struct.int2 %struct.ri0, i32 2, 1
+ call void @call_v2_i32(%struct.int2 %struct.ri1)
+ ret void
+}
+
+define void @st_param_v2_i64_ii() {
+; CHECK-LABEL: st_param_v2_i64_ii(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 14, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v2.b64 [param0+0], {1, 2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i64,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 14
+; CHECK-NEXT: ret;
+ call void @call_v2_i64(%struct.longlong2 { i64 1, i64 2 })
+ ret void
+}
+define void @st_param_v2_i64_ir(i64 %val) {
+; CHECK-LABEL: st_param_v2_i64_ir(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [st_param_v2_i64_ir_param_0];
+; CHECK-NEXT: { // callseq 15, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v2.b64 [param0+0], {1, %rd1};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i64,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 15
+; CHECK-NEXT: ret;
+ %struct.ir0 = insertvalue %struct.longlong2 poison, i64 1, 0
+ %struct.ir1 = insertvalue %struct.longlong2 %struct.ir0, i64 %val, 1
+ call void @call_v2_i64(%struct.longlong2 %struct.ir1)
+ ret void
+}
+define void @st_param_v2_i64_ri(i64 %val) {
+; CHECK-LABEL: st_param_v2_i64_ri(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [st_param_v2_i64_ri_param_0];
+; CHECK-NEXT: { // callseq 16, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v2.b64 [param0+0], {%rd1, 2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_i64,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 16
+; CHECK-NEXT: ret;
+ %struct.ri0 = insertvalue %struct.longlong2 poison, i64 %val, 0
+ %struct.ri1 = insertvalue %struct.longlong2 %struct.ri0, i64 2, 1
+ call void @call_v2_i64(%struct.longlong2 %struct.ri1)
+ ret void
+}
+
+define void @st_param_v2_f32_ii(float %val) {
+; CHECK-LABEL: st_param_v2_f32_ii(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 17, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v2.f32 [param0+0], {0f3F800000, 0f40000000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 17
+; CHECK-NEXT: ret;
+ call void @call_v2_f32(%struct.float2 { float 1.0, float 2.0 })
+ ret void
+}
+define void @st_param_v2_f32_ir(float %val) {
+; CHECK-LABEL: st_param_v2_f32_ir(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v2_f32_ir_param_0];
+; CHECK-NEXT: { // callseq 18, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v2.f32 [param0+0], {0f3F800000, %f1};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 18
+; CHECK-NEXT: ret;
+ %struct.ir0 = insertvalue %struct.float2 poison, float 1.0, 0
+ %struct.ir1 = insertvalue %struct.float2 %struct.ir0, float %val, 1
+ call void @call_v2_f32(%struct.float2 %struct.ir1)
+ ret void
+}
+define void @st_param_v2_f32_ri(float %val) {
+; CHECK-LABEL: st_param_v2_f32_ri(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v2_f32_ri_param_0];
+; CHECK-NEXT: { // callseq 19, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v2.f32 [param0+0], {%f1, 0f40000000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 19
+; CHECK-NEXT: ret;
+ %struct.ri0 = insertvalue %struct.float2 poison, float %val, 0
+ %struct.ri1 = insertvalue %struct.float2 %struct.ri0, float 2.0, 1
+ call void @call_v2_f32(%struct.float2 %struct.ri1)
+ ret void
+}
+
+define void @st_param_v2_f64_ii(double %val) {
+; CHECK-LABEL: st_param_v2_f64_ii(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 20, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v2.f64 [param0+0], {0d3FF0000000000000, 0d4000000000000000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_f64,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 20
+; CHECK-NEXT: ret;
+ call void @call_v2_f64(%struct.double2 { double 1.0, double 2.0 })
+ ret void
+}
+define void @st_param_v2_f64_ir(double %val) {
+; CHECK-LABEL: st_param_v2_f64_ir(
+; CHECK: {
+; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f64 %fd1, [st_param_v2_f64_ir_param_0];
+; CHECK-NEXT: { // callseq 21, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v2.f64 [param0+0], {0d3FF0000000000000, %fd1};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_f64,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 21
+; CHECK-NEXT: ret;
+ %struct.ir0 = insertvalue %struct.double2 poison, double 1.0, 0
+ %struct.ir1 = insertvalue %struct.double2 %struct.ir0, double %val, 1
+ call void @call_v2_f64(%struct.double2 %struct.ir1)
+ ret void
+}
+define void @st_param_v2_f64_ri(double %val) {
+; CHECK-LABEL: st_param_v2_f64_ri(
+; CHECK: {
+; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f64 %fd1, [st_param_v2_f64_ri_param_0];
+; CHECK-NEXT: { // callseq 22, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v2.f64 [param0+0], {%fd1, 0d4000000000000000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v2_f64,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 22
+; CHECK-NEXT: ret;
+ %struct.ri0 = insertvalue %struct.double2 poison, double %val, 0
+ %struct.ri1 = insertvalue %struct.double2 %struct.ri0, double 2.0, 1
+ call void @call_v2_f64(%struct.double2 %struct.ri1)
+ ret void
+}
+
+declare void @call_v2_i8(%struct.char2 alignstack(2))
+declare void @call_v2_i16(%struct.short2 alignstack(4))
+declare void @call_v2_i32(%struct.int2 alignstack(8))
+declare void @call_v2_i64(%struct.longlong2 alignstack(16))
+declare void @call_v2_f32(%struct.float2 alignstack(8))
+declare void @call_v2_f64(%struct.double2 alignstack(16))
+
+define void @st_param_v4_i8_iiii() {
+; CHECK-LABEL: st_param_v4_i8_iiii(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 23, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, 2, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 23
+; CHECK-NEXT: ret;
+ call void @call_v4_i8(%struct.char4 { i8 1, i8 2, i8 3, i8 4 })
+ ret void
+}
+define void @st_param_v4_i8_irrr(i8 %b, i8 %c, i8 %d) {
+; CHECK-LABEL: st_param_v4_i8_irrr(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_irrr_param_0];
+; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_irrr_param_1];
+; CHECK-NEXT: ld.param.u8 %rs3, [st_param_v4_i8_irrr_param_2];
+; CHECK-NEXT: { // callseq 24, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, %rs1, %rs2, %rs3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 24
+; CHECK-NEXT: ret;
+ %struct.irrr0 = insertvalue %struct.char4 poison, i8 1, 0
+ %struct.irrr1 = insertvalue %struct.char4 %struct.irrr0, i8 %b, 1
+ %struct.irrr2 = insertvalue %struct.char4 %struct.irrr1, i8 %c, 2
+ %struct.irrr3 = insertvalue %struct.char4 %struct.irrr2, i8 %d, 3
+ call void @call_v4_i8(%struct.char4 %struct.irrr3)
+ ret void
+}
+define void @st_param_v4_i8_rirr(i8 %a, i8 %c, i8 %d) {
+; CHECK-LABEL: st_param_v4_i8_rirr(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_rirr_param_0];
+; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_rirr_param_1];
+; CHECK-NEXT: ld.param.u8 %rs3, [st_param_v4_i8_rirr_param_2];
+; CHECK-NEXT: { // callseq 25, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, 2, %rs2, %rs3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 25
+; CHECK-NEXT: ret;
+ %struct.rirr0 = insertvalue %struct.char4 poison, i8 %a, 0
+ %struct.rirr1 = insertvalue %struct.char4 %struct.rirr0, i8 2, 1
+ %struct.rirr2 = insertvalue %struct.char4 %struct.rirr1, i8 %c, 2
+ %struct.rirr3 = insertvalue %struct.char4 %struct.rirr2, i8 %d, 3
+ call void @call_v4_i8(%struct.char4 %struct.rirr3)
+ ret void
+}
+define void @st_param_v4_i8_rrir(i8 %a, i8 %b, i8 %d) {
+; CHECK-LABEL: st_param_v4_i8_rrir(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_rrir_param_0];
+; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_rrir_param_1];
+; CHECK-NEXT: ld.param.u8 %rs3, [st_param_v4_i8_rrir_param_2];
+; CHECK-NEXT: { // callseq 26, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, %rs2, 3, %rs3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 26
+; CHECK-NEXT: ret;
+ %struct.rrir0 = insertvalue %struct.char4 poison, i8 %a, 0
+ %struct.rrir1 = insertvalue %struct.char4 %struct.rrir0, i8 %b, 1
+ %struct.rrir2 = insertvalue %struct.char4 %struct.rrir1, i8 3, 2
+ %struct.rrir3 = insertvalue %struct.char4 %struct.rrir2, i8 %d, 3
+ call void @call_v4_i8(%struct.char4 %struct.rrir3)
+ ret void
+}
+define void @st_param_v4_i8_rrri(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: st_param_v4_i8_rrri(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_rrri_param_0];
+; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_rrri_param_1];
+; CHECK-NEXT: ld.param.u8 %rs3, [st_param_v4_i8_rrri_param_2];
+; CHECK-NEXT: { // callseq 27, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, %rs2, %rs3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 27
+; CHECK-NEXT: ret;
+ %struct.rrri0 = insertvalue %struct.char4 poison, i8 %a, 0
+ %struct.rrri1 = insertvalue %struct.char4 %struct.rrri0, i8 %b, 1
+ %struct.rrri2 = insertvalue %struct.char4 %struct.rrri1, i8 %c, 2
+ %struct.rrri3 = insertvalue %struct.char4 %struct.rrri2, i8 4, 3
+ call void @call_v4_i8(%struct.char4 %struct.rrri3)
+ ret void
+}
+define void @st_param_v4_i8_iirr(i8 %c, i8 %d) {
+; CHECK-LABEL: st_param_v4_i8_iirr(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_iirr_param_0];
+; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_iirr_param_1];
+; CHECK-NEXT: { // callseq 28, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, 2, %rs1, %rs2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 28
+; CHECK-NEXT: ret;
+ %struct.iirr0 = insertvalue %struct.char4 poison, i8 1, 0
+ %struct.iirr1 = insertvalue %struct.char4 %struct.iirr0, i8 2, 1
+ %struct.iirr2 = insertvalue %struct.char4 %struct.iirr1, i8 %c, 2
+ %struct.iirr3 = insertvalue %struct.char4 %struct.iirr2, i8 %d, 3
+ call void @call_v4_i8(%struct.char4 %struct.iirr3)
+ ret void
+}
+define void @st_param_v4_i8_irir(i8 %b, i8 %d) {
+; CHECK-LABEL: st_param_v4_i8_irir(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_irir_param_0];
+; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_irir_param_1];
+; CHECK-NEXT: { // callseq 29, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, %rs1, 3, %rs2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 29
+; CHECK-NEXT: ret;
+ %struct.irir0 = insertvalue %struct.char4 poison, i8 1, 0
+ %struct.irir1 = insertvalue %struct.char4 %struct.irir0, i8 %b, 1
+ %struct.irir2 = insertvalue %struct.char4 %struct.irir1, i8 3, 2
+ %struct.irir3 = insertvalue %struct.char4 %struct.irir2, i8 %d, 3
+ call void @call_v4_i8(%struct.char4 %struct.irir3)
+ ret void
+}
+define void @st_param_v4_i8_irri(i8 %b, i8 %c) {
+; CHECK-LABEL: st_param_v4_i8_irri(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_irri_param_0];
+; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_irri_param_1];
+; CHECK-NEXT: { // callseq 30, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, %rs1, %rs2, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 30
+; CHECK-NEXT: ret;
+ %struct.irri0 = insertvalue %struct.char4 poison, i8 1, 0
+ %struct.irri1 = insertvalue %struct.char4 %struct.irri0, i8 %b, 1
+ %struct.irri2 = insertvalue %struct.char4 %struct.irri1, i8 %c, 2
+ %struct.irri3 = insertvalue %struct.char4 %struct.irri2, i8 4, 3
+ call void @call_v4_i8(%struct.char4 %struct.irri3)
+ ret void
+}
+define void @st_param_v4_i8_riir(i8 %a, i8 %d) {
+; CHECK-LABEL: st_param_v4_i8_riir(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_riir_param_0];
+; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_riir_param_1];
+; CHECK-NEXT: { // callseq 31, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, 2, 3, %rs2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 31
+; CHECK-NEXT: ret;
+ %struct.riir0 = insertvalue %struct.char4 poison, i8 %a, 0
+ %struct.riir1 = insertvalue %struct.char4 %struct.riir0, i8 2, 1
+ %struct.riir2 = insertvalue %struct.char4 %struct.riir1, i8 3, 2
+ %struct.riir3 = insertvalue %struct.char4 %struct.riir2, i8 %d, 3
+ call void @call_v4_i8(%struct.char4 %struct.riir3)
+ ret void
+}
+define void @st_param_v4_i8_riri(i8 %a, i8 %c) {
+; CHECK-LABEL: st_param_v4_i8_riri(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_riri_param_0];
+; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_riri_param_1];
+; CHECK-NEXT: { // callseq 32, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, 2, %rs2, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 32
+; CHECK-NEXT: ret;
+ %struct.riri0 = insertvalue %struct.char4 poison, i8 %a, 0
+ %struct.riri1 = insertvalue %struct.char4 %struct.riri0, i8 2, 1
+ %struct.riri2 = insertvalue %struct.char4 %struct.riri1, i8 %c, 2
+ %struct.riri3 = insertvalue %struct.char4 %struct.riri2, i8 4, 3
+ call void @call_v4_i8(%struct.char4 %struct.riri3)
+ ret void
+}
+define void @st_param_v4_i8_rrii(i8 %a, i8 %b) {
+; CHECK-LABEL: st_param_v4_i8_rrii(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_rrii_param_0];
+; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_rrii_param_1];
+; CHECK-NEXT: { // callseq 33, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, %rs2, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 33
+; CHECK-NEXT: ret;
+ %struct.rrii0 = insertvalue %struct.char4 poison, i8 %a, 0
+ %struct.rrii1 = insertvalue %struct.char4 %struct.rrii0, i8 %b, 1
+ %struct.rrii2 = insertvalue %struct.char4 %struct.rrii1, i8 3, 2
+ %struct.rrii3 = insertvalue %struct.char4 %struct.rrii2, i8 4, 3
+ call void @call_v4_i8(%struct.char4 %struct.rrii3)
+ ret void
+}
+define void @st_param_v4_i8_iiir(i8 %d) {
+; CHECK-LABEL: st_param_v4_i8_iiir(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_iiir_param_0];
+; CHECK-NEXT: { // callseq 34, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, 2, 3, %rs1};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 34
+; CHECK-NEXT: ret;
+ %struct.iiir0 = insertvalue %struct.char4 poison, i8 1, 0
+ %struct.iiir1 = insertvalue %struct.char4 %struct.iiir0, i8 2, 1
+ %struct.iiir2 = insertvalue %struct.char4 %struct.iiir1, i8 3, 2
+ %struct.iiir3 = insertvalue %struct.char4 %struct.iiir2, i8 %d, 3
+ call void @call_v4_i8(%struct.char4 %struct.iiir3)
+ ret void
+}
+define void @st_param_v4_i8_iiri(i8 %c) {
+; CHECK-LABEL: st_param_v4_i8_iiri(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_iiri_param_0];
+; CHECK-NEXT: { // callseq 35, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, 2, %rs1, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 35
+; CHECK-NEXT: ret;
+ %struct.iiri0 = insertvalue %struct.char4 poison, i8 1, 0
+ %struct.iiri1 = insertvalue %struct.char4 %struct.iiri0, i8 2, 1
+ %struct.iiri2 = insertvalue %struct.char4 %struct.iiri1, i8 %c, 2
+ %struct.iiri3 = insertvalue %struct.char4 %struct.iiri2, i8 4, 3
+ call void @call_v4_i8(%struct.char4 %struct.iiri3)
+ ret void
+}
+define void @st_param_v4_i8_irii(i8 %b) {
+; CHECK-LABEL: st_param_v4_i8_irii(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_irii_param_0];
+; CHECK-NEXT: { // callseq 36, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, %rs1, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 36
+; CHECK-NEXT: ret;
+ %struct.irii0 = insertvalue %struct.char4 poison, i8 1, 0
+ %struct.irii1 = insertvalue %struct.char4 %struct.irii0, i8 %b, 1
+ %struct.irii2 = insertvalue %struct.char4 %struct.irii1, i8 3, 2
+ %struct.irii3 = insertvalue %struct.char4 %struct.irii2, i8 4, 3
+ call void @call_v4_i8(%struct.char4 %struct.irii3)
+ ret void
+}
+define void @st_param_v4_i8_riii(i8 %a) {
+; CHECK-LABEL: st_param_v4_i8_riii(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_riii_param_0];
+; CHECK-NEXT: { // callseq 37, 0
+; CHECK-NEXT: .param .align 4 .b8 param0[4];
+; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, 2, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i8,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 37
+; CHECK-NEXT: ret;
+ %struct.riii0 = insertvalue %struct.char4 poison, i8 %a, 0
+ %struct.riii1 = insertvalue %struct.char4 %struct.riii0, i8 2, 1
+ %struct.riii2 = insertvalue %struct.char4 %struct.riii1, i8 3, 2
+ %struct.riii3 = insertvalue %struct.char4 %struct.riii2, i8 4, 3
+ call void @call_v4_i8(%struct.char4 %struct.riii3)
+ ret void
+}
+
+define void @st_param_v4_i16_iiii() {
+; CHECK-LABEL: st_param_v4_i16_iiii(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 38, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, 2, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 38
+; CHECK-NEXT: ret;
+ call void @call_v4_i16(%struct.short4 { i16 1, i16 2, i16 3, i16 4 })
+ ret void
+}
+define void @st_param_v4_i16_irrr(i16 %b, i16 %c, i16 %d) {
+; CHECK-LABEL: st_param_v4_i16_irrr(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_irrr_param_0];
+; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_irrr_param_1];
+; CHECK-NEXT: ld.param.u16 %rs3, [st_param_v4_i16_irrr_param_2];
+; CHECK-NEXT: { // callseq 39, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, %rs1, %rs2, %rs3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 39
+; CHECK-NEXT: ret;
+ %struct.irrr0 = insertvalue %struct.short4 poison, i16 1, 0
+ %struct.irrr1 = insertvalue %struct.short4 %struct.irrr0, i16 %b, 1
+ %struct.irrr2 = insertvalue %struct.short4 %struct.irrr1, i16 %c, 2
+ %struct.irrr3 = insertvalue %struct.short4 %struct.irrr2, i16 %d, 3
+ call void @call_v4_i16(%struct.short4 %struct.irrr3)
+ ret void
+}
+define void @st_param_v4_i16_rirr(i16 %a, i16 %c, i16 %d) {
+; CHECK-LABEL: st_param_v4_i16_rirr(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_rirr_param_0];
+; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_rirr_param_1];
+; CHECK-NEXT: ld.param.u16 %rs3, [st_param_v4_i16_rirr_param_2];
+; CHECK-NEXT: { // callseq 40, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, 2, %rs2, %rs3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 40
+; CHECK-NEXT: ret;
+ %struct.rirr0 = insertvalue %struct.short4 poison, i16 %a, 0
+ %struct.rirr1 = insertvalue %struct.short4 %struct.rirr0, i16 2, 1
+ %struct.rirr2 = insertvalue %struct.short4 %struct.rirr1, i16 %c, 2
+ %struct.rirr3 = insertvalue %struct.short4 %struct.rirr2, i16 %d, 3
+ call void @call_v4_i16(%struct.short4 %struct.rirr3)
+ ret void
+}
+define void @st_param_v4_i16_rrir(i16 %a, i16 %b, i16 %d) {
+; CHECK-LABEL: st_param_v4_i16_rrir(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_rrir_param_0];
+; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_rrir_param_1];
+; CHECK-NEXT: ld.param.u16 %rs3, [st_param_v4_i16_rrir_param_2];
+; CHECK-NEXT: { // callseq 41, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, %rs2, 3, %rs3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 41
+; CHECK-NEXT: ret;
+ %struct.rrir0 = insertvalue %struct.short4 poison, i16 %a, 0
+ %struct.rrir1 = insertvalue %struct.short4 %struct.rrir0, i16 %b, 1
+ %struct.rrir2 = insertvalue %struct.short4 %struct.rrir1, i16 3, 2
+ %struct.rrir3 = insertvalue %struct.short4 %struct.rrir2, i16 %d, 3
+ call void @call_v4_i16(%struct.short4 %struct.rrir3)
+ ret void
+}
+define void @st_param_v4_i16_rrri(i16 %a, i16 %b, i16 %c) {
+; CHECK-LABEL: st_param_v4_i16_rrri(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_rrri_param_0];
+; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_rrri_param_1];
+; CHECK-NEXT: ld.param.u16 %rs3, [st_param_v4_i16_rrri_param_2];
+; CHECK-NEXT: { // callseq 42, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, %rs2, %rs3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 42
+; CHECK-NEXT: ret;
+ %struct.rrri0 = insertvalue %struct.short4 poison, i16 %a, 0
+ %struct.rrri1 = insertvalue %struct.short4 %struct.rrri0, i16 %b, 1
+ %struct.rrri2 = insertvalue %struct.short4 %struct.rrri1, i16 %c, 2
+ %struct.rrri3 = insertvalue %struct.short4 %struct.rrri2, i16 4, 3
+ call void @call_v4_i16(%struct.short4 %struct.rrri3)
+ ret void
+}
+define void @st_param_v4_i16_iirr(i16 %c, i16 %d) {
+; CHECK-LABEL: st_param_v4_i16_iirr(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_iirr_param_0];
+; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_iirr_param_1];
+; CHECK-NEXT: { // callseq 43, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, 2, %rs1, %rs2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 43
+; CHECK-NEXT: ret;
+ %struct.iirr0 = insertvalue %struct.short4 poison, i16 1, 0
+ %struct.iirr1 = insertvalue %struct.short4 %struct.iirr0, i16 2, 1
+ %struct.iirr2 = insertvalue %struct.short4 %struct.iirr1, i16 %c, 2
+ %struct.iirr3 = insertvalue %struct.short4 %struct.iirr2, i16 %d, 3
+ call void @call_v4_i16(%struct.short4 %struct.iirr3)
+ ret void
+}
+define void @st_param_v4_i16_irir(i16 %b, i16 %d) {
+; CHECK-LABEL: st_param_v4_i16_irir(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_irir_param_0];
+; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_irir_param_1];
+; CHECK-NEXT: { // callseq 44, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, %rs1, 3, %rs2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 44
+; CHECK-NEXT: ret;
+ %struct.irir0 = insertvalue %struct.short4 poison, i16 1, 0
+ %struct.irir1 = insertvalue %struct.short4 %struct.irir0, i16 %b, 1
+ %struct.irir2 = insertvalue %struct.short4 %struct.irir1, i16 3, 2
+ %struct.irir3 = insertvalue %struct.short4 %struct.irir2, i16 %d, 3
+ call void @call_v4_i16(%struct.short4 %struct.irir3)
+ ret void
+}
+define void @st_param_v4_i16_irri(i16 %b, i16 %c) {
+; CHECK-LABEL: st_param_v4_i16_irri(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_irri_param_0];
+; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_irri_param_1];
+; CHECK-NEXT: { // callseq 45, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, %rs1, %rs2, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 45
+; CHECK-NEXT: ret;
+ %struct.irri0 = insertvalue %struct.short4 poison, i16 1, 0
+ %struct.irri1 = insertvalue %struct.short4 %struct.irri0, i16 %b, 1
+ %struct.irri2 = insertvalue %struct.short4 %struct.irri1, i16 %c, 2
+ %struct.irri3 = insertvalue %struct.short4 %struct.irri2, i16 4, 3
+ call void @call_v4_i16(%struct.short4 %struct.irri3)
+ ret void
+}
+define void @st_param_v4_i16_riir(i16 %a, i16 %d) {
+; CHECK-LABEL: st_param_v4_i16_riir(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_riir_param_0];
+; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_riir_param_1];
+; CHECK-NEXT: { // callseq 46, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, 2, 3, %rs2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 46
+; CHECK-NEXT: ret;
+ %struct.riir0 = insertvalue %struct.short4 poison, i16 %a, 0
+ %struct.riir1 = insertvalue %struct.short4 %struct.riir0, i16 2, 1
+ %struct.riir2 = insertvalue %struct.short4 %struct.riir1, i16 3, 2
+ %struct.riir3 = insertvalue %struct.short4 %struct.riir2, i16 %d, 3
+ call void @call_v4_i16(%struct.short4 %struct.riir3)
+ ret void
+}
+define void @st_param_v4_i16_riri(i16 %a, i16 %c) {
+; CHECK-LABEL: st_param_v4_i16_riri(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_riri_param_0];
+; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_riri_param_1];
+; CHECK-NEXT: { // callseq 47, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, 2, %rs2, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 47
+; CHECK-NEXT: ret;
+ %struct.riri0 = insertvalue %struct.short4 poison, i16 %a, 0
+ %struct.riri1 = insertvalue %struct.short4 %struct.riri0, i16 2, 1
+ %struct.riri2 = insertvalue %struct.short4 %struct.riri1, i16 %c, 2
+ %struct.riri3 = insertvalue %struct.short4 %struct.riri2, i16 4, 3
+ call void @call_v4_i16(%struct.short4 %struct.riri3)
+ ret void
+}
+define void @st_param_v4_i16_rrii(i16 %a, i16 %b) {
+; CHECK-LABEL: st_param_v4_i16_rrii(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_rrii_param_0];
+; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_rrii_param_1];
+; CHECK-NEXT: { // callseq 48, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, %rs2, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 48
+; CHECK-NEXT: ret;
+ %struct.rrii0 = insertvalue %struct.short4 poison, i16 %a, 0
+ %struct.rrii1 = insertvalue %struct.short4 %struct.rrii0, i16 %b, 1
+ %struct.rrii2 = insertvalue %struct.short4 %struct.rrii1, i16 3, 2
+ %struct.rrii3 = insertvalue %struct.short4 %struct.rrii2, i16 4, 3
+ call void @call_v4_i16(%struct.short4 %struct.rrii3)
+ ret void
+}
+define void @st_param_v4_i16_iiir(i16 %d) {
+; CHECK-LABEL: st_param_v4_i16_iiir(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_iiir_param_0];
+; CHECK-NEXT: { // callseq 49, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, 2, 3, %rs1};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 49
+; CHECK-NEXT: ret;
+ %struct.iiir0 = insertvalue %struct.short4 poison, i16 1, 0
+ %struct.iiir1 = insertvalue %struct.short4 %struct.iiir0, i16 2, 1
+ %struct.iiir2 = insertvalue %struct.short4 %struct.iiir1, i16 3, 2
+ %struct.iiir3 = insertvalue %struct.short4 %struct.iiir2, i16 %d, 3
+ call void @call_v4_i16(%struct.short4 %struct.iiir3)
+ ret void
+}
+define void @st_param_v4_i16_iiri(i16 %c) {
+; CHECK-LABEL: st_param_v4_i16_iiri(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_iiri_param_0];
+; CHECK-NEXT: { // callseq 50, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, 2, %rs1, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 50
+; CHECK-NEXT: ret;
+ %struct.iiri0 = insertvalue %struct.short4 poison, i16 1, 0
+ %struct.iiri1 = insertvalue %struct.short4 %struct.iiri0, i16 2, 1
+ %struct.iiri2 = insertvalue %struct.short4 %struct.iiri1, i16 %c, 2
+ %struct.iiri3 = insertvalue %struct.short4 %struct.iiri2, i16 4, 3
+ call void @call_v4_i16(%struct.short4 %struct.iiri3)
+ ret void
+}
+define void @st_param_v4_i16_irii(i16 %b) {
+; CHECK-LABEL: st_param_v4_i16_irii(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_irii_param_0];
+; CHECK-NEXT: { // callseq 51, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, %rs1, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 51
+; CHECK-NEXT: ret;
+ %struct.irii0 = insertvalue %struct.short4 poison, i16 1, 0
+ %struct.irii1 = insertvalue %struct.short4 %struct.irii0, i16 %b, 1
+ %struct.irii2 = insertvalue %struct.short4 %struct.irii1, i16 3, 2
+ %struct.irii3 = insertvalue %struct.short4 %struct.irii2, i16 4, 3
+ call void @call_v4_i16(%struct.short4 %struct.irii3)
+ ret void
+}
+define void @st_param_v4_i16_riii(i16 %a) {
+; CHECK-LABEL: st_param_v4_i16_riii(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_riii_param_0];
+; CHECK-NEXT: { // callseq 52, 0
+; CHECK-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, 2, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i16,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 52
+; CHECK-NEXT: ret;
+ %struct.riii0 = insertvalue %struct.short4 poison, i16 %a, 0
+ %struct.riii1 = insertvalue %struct.short4 %struct.riii0, i16 2, 1
+ %struct.riii2 = insertvalue %struct.short4 %struct.riii1, i16 3, 2
+ %struct.riii3 = insertvalue %struct.short4 %struct.riii2, i16 4, 3
+ call void @call_v4_i16(%struct.short4 %struct.riii3)
+ ret void
+}
+
+define void @st_param_v4_i32_iiii() {
+; CHECK-LABEL: st_param_v4_i32_iiii(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 53, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, 2, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 53
+; CHECK-NEXT: ret;
+ call void @call_v4_i32(%struct.int4 { i32 1, i32 2, i32 3, i32 4 })
+ ret void
+}
+define void @st_param_v4_i32_irrr(i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: st_param_v4_i32_irrr(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_irrr_param_0];
+; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_irrr_param_1];
+; CHECK-NEXT: ld.param.u32 %r3, [st_param_v4_i32_irrr_param_2];
+; CHECK-NEXT: { // callseq 54, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, %r1, %r2, %r3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 54
+; CHECK-NEXT: ret;
+ %struct.irrr0 = insertvalue %struct.int4 poison, i32 1, 0
+ %struct.irrr1 = insertvalue %struct.int4 %struct.irrr0, i32 %b, 1
+ %struct.irrr2 = insertvalue %struct.int4 %struct.irrr1, i32 %c, 2
+ %struct.irrr3 = insertvalue %struct.int4 %struct.irrr2, i32 %d, 3
+ call void @call_v4_i32(%struct.int4 %struct.irrr3)
+ ret void
+}
+define void @st_param_v4_i32_rirr(i32 %a, i32 %c, i32 %d) {
+; CHECK-LABEL: st_param_v4_i32_rirr(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_rirr_param_0];
+; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_rirr_param_1];
+; CHECK-NEXT: ld.param.u32 %r3, [st_param_v4_i32_rirr_param_2];
+; CHECK-NEXT: { // callseq 55, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, 2, %r2, %r3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 55
+; CHECK-NEXT: ret;
+ %struct.rirr0 = insertvalue %struct.int4 poison, i32 %a, 0
+ %struct.rirr1 = insertvalue %struct.int4 %struct.rirr0, i32 2, 1
+ %struct.rirr2 = insertvalue %struct.int4 %struct.rirr1, i32 %c, 2
+ %struct.rirr3 = insertvalue %struct.int4 %struct.rirr2, i32 %d, 3
+ call void @call_v4_i32(%struct.int4 %struct.rirr3)
+ ret void
+}
+define void @st_param_v4_i32_rrir(i32 %a, i32 %b, i32 %d) {
+; CHECK-LABEL: st_param_v4_i32_rrir(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_rrir_param_0];
+; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_rrir_param_1];
+; CHECK-NEXT: ld.param.u32 %r3, [st_param_v4_i32_rrir_param_2];
+; CHECK-NEXT: { // callseq 56, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, %r2, 3, %r3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 56
+; CHECK-NEXT: ret;
+ %struct.rrir0 = insertvalue %struct.int4 poison, i32 %a, 0
+ %struct.rrir1 = insertvalue %struct.int4 %struct.rrir0, i32 %b, 1
+ %struct.rrir2 = insertvalue %struct.int4 %struct.rrir1, i32 3, 2
+ %struct.rrir3 = insertvalue %struct.int4 %struct.rrir2, i32 %d, 3
+ call void @call_v4_i32(%struct.int4 %struct.rrir3)
+ ret void
+}
+define void @st_param_v4_i32_rrri(i32 %a, i32 %b, i32 %c) {
+; CHECK-LABEL: st_param_v4_i32_rrri(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_rrri_param_0];
+; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_rrri_param_1];
+; CHECK-NEXT: ld.param.u32 %r3, [st_param_v4_i32_rrri_param_2];
+; CHECK-NEXT: { // callseq 57, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, %r2, %r3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 57
+; CHECK-NEXT: ret;
+ %struct.rrri0 = insertvalue %struct.int4 poison, i32 %a, 0
+ %struct.rrri1 = insertvalue %struct.int4 %struct.rrri0, i32 %b, 1
+ %struct.rrri2 = insertvalue %struct.int4 %struct.rrri1, i32 %c, 2
+ %struct.rrri3 = insertvalue %struct.int4 %struct.rrri2, i32 4, 3
+ call void @call_v4_i32(%struct.int4 %struct.rrri3)
+ ret void
+}
+define void @st_param_v4_i32_iirr(i32 %c, i32 %d) {
+; CHECK-LABEL: st_param_v4_i32_iirr(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_iirr_param_0];
+; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_iirr_param_1];
+; CHECK-NEXT: { // callseq 58, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, 2, %r1, %r2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 58
+; CHECK-NEXT: ret;
+ %struct.iirr0 = insertvalue %struct.int4 poison, i32 1, 0
+ %struct.iirr1 = insertvalue %struct.int4 %struct.iirr0, i32 2, 1
+ %struct.iirr2 = insertvalue %struct.int4 %struct.iirr1, i32 %c, 2
+ %struct.iirr3 = insertvalue %struct.int4 %struct.iirr2, i32 %d, 3
+ call void @call_v4_i32(%struct.int4 %struct.iirr3)
+ ret void
+}
+define void @st_param_v4_i32_irir(i32 %b, i32 %d) {
+; CHECK-LABEL: st_param_v4_i32_irir(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_irir_param_0];
+; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_irir_param_1];
+; CHECK-NEXT: { // callseq 59, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, %r1, 3, %r2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 59
+; CHECK-NEXT: ret;
+ %struct.irir0 = insertvalue %struct.int4 poison, i32 1, 0
+ %struct.irir1 = insertvalue %struct.int4 %struct.irir0, i32 %b, 1
+ %struct.irir2 = insertvalue %struct.int4 %struct.irir1, i32 3, 2
+ %struct.irir3 = insertvalue %struct.int4 %struct.irir2, i32 %d, 3
+ call void @call_v4_i32(%struct.int4 %struct.irir3)
+ ret void
+}
+define void @st_param_v4_i32_irri(i32 %b, i32 %c) {
+; CHECK-LABEL: st_param_v4_i32_irri(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_irri_param_0];
+; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_irri_param_1];
+; CHECK-NEXT: { // callseq 60, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, %r1, %r2, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 60
+; CHECK-NEXT: ret;
+ %struct.irri0 = insertvalue %struct.int4 poison, i32 1, 0
+ %struct.irri1 = insertvalue %struct.int4 %struct.irri0, i32 %b, 1
+ %struct.irri2 = insertvalue %struct.int4 %struct.irri1, i32 %c, 2
+ %struct.irri3 = insertvalue %struct.int4 %struct.irri2, i32 4, 3
+ call void @call_v4_i32(%struct.int4 %struct.irri3)
+ ret void
+}
+define void @st_param_v4_i32_riir(i32 %a, i32 %d) {
+; CHECK-LABEL: st_param_v4_i32_riir(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_riir_param_0];
+; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_riir_param_1];
+; CHECK-NEXT: { // callseq 61, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, 2, 3, %r2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 61
+; CHECK-NEXT: ret;
+ %struct.riir0 = insertvalue %struct.int4 poison, i32 %a, 0
+ %struct.riir1 = insertvalue %struct.int4 %struct.riir0, i32 2, 1
+ %struct.riir2 = insertvalue %struct.int4 %struct.riir1, i32 3, 2
+ %struct.riir3 = insertvalue %struct.int4 %struct.riir2, i32 %d, 3
+ call void @call_v4_i32(%struct.int4 %struct.riir3)
+ ret void
+}
+define void @st_param_v4_i32_riri(i32 %a, i32 %c) {
+; CHECK-LABEL: st_param_v4_i32_riri(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_riri_param_0];
+; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_riri_param_1];
+; CHECK-NEXT: { // callseq 62, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, 2, %r2, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 62
+; CHECK-NEXT: ret;
+ %struct.riri0 = insertvalue %struct.int4 poison, i32 %a, 0
+ %struct.riri1 = insertvalue %struct.int4 %struct.riri0, i32 2, 1
+ %struct.riri2 = insertvalue %struct.int4 %struct.riri1, i32 %c, 2
+ %struct.riri3 = insertvalue %struct.int4 %struct.riri2, i32 4, 3
+ call void @call_v4_i32(%struct.int4 %struct.riri3)
+ ret void
+}
+define void @st_param_v4_i32_rrii(i32 %a, i32 %b) {
+; CHECK-LABEL: st_param_v4_i32_rrii(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_rrii_param_0];
+; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_rrii_param_1];
+; CHECK-NEXT: { // callseq 63, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, %r2, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 63
+; CHECK-NEXT: ret;
+ %struct.rrii0 = insertvalue %struct.int4 poison, i32 %a, 0
+ %struct.rrii1 = insertvalue %struct.int4 %struct.rrii0, i32 %b, 1
+ %struct.rrii2 = insertvalue %struct.int4 %struct.rrii1, i32 3, 2
+ %struct.rrii3 = insertvalue %struct.int4 %struct.rrii2, i32 4, 3
+ call void @call_v4_i32(%struct.int4 %struct.rrii3)
+ ret void
+}
+define void @st_param_v4_i32_iiir(i32 %d) {
+; CHECK-LABEL: st_param_v4_i32_iiir(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_iiir_param_0];
+; CHECK-NEXT: { // callseq 64, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, 2, 3, %r1};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 64
+; CHECK-NEXT: ret;
+ %struct.iiir0 = insertvalue %struct.int4 poison, i32 1, 0
+ %struct.iiir1 = insertvalue %struct.int4 %struct.iiir0, i32 2, 1
+ %struct.iiir2 = insertvalue %struct.int4 %struct.iiir1, i32 3, 2
+ %struct.iiir3 = insertvalue %struct.int4 %struct.iiir2, i32 %d, 3
+ call void @call_v4_i32(%struct.int4 %struct.iiir3)
+ ret void
+}
+define void @st_param_v4_i32_iiri(i32 %c) {
+; CHECK-LABEL: st_param_v4_i32_iiri(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_iiri_param_0];
+; CHECK-NEXT: { // callseq 65, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, 2, %r1, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 65
+; CHECK-NEXT: ret;
+ %struct.iiri0 = insertvalue %struct.int4 poison, i32 1, 0
+ %struct.iiri1 = insertvalue %struct.int4 %struct.iiri0, i32 2, 1
+ %struct.iiri2 = insertvalue %struct.int4 %struct.iiri1, i32 %c, 2
+ %struct.iiri3 = insertvalue %struct.int4 %struct.iiri2, i32 4, 3
+ call void @call_v4_i32(%struct.int4 %struct.iiri3)
+ ret void
+}
+define void @st_param_v4_i32_irii(i32 %b) {
+; CHECK-LABEL: st_param_v4_i32_irii(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_irii_param_0];
+; CHECK-NEXT: { // callseq 66, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, %r1, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 66
+; CHECK-NEXT: ret;
+ %struct.irii0 = insertvalue %struct.int4 poison, i32 1, 0
+ %struct.irii1 = insertvalue %struct.int4 %struct.irii0, i32 %b, 1
+ %struct.irii2 = insertvalue %struct.int4 %struct.irii1, i32 3, 2
+ %struct.irii3 = insertvalue %struct.int4 %struct.irii2, i32 4, 3
+ call void @call_v4_i32(%struct.int4 %struct.irii3)
+ ret void
+}
+define void @st_param_v4_i32_riii(i32 %a) {
+; CHECK-LABEL: st_param_v4_i32_riii(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_riii_param_0];
+; CHECK-NEXT: { // callseq 67, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, 2, 3, 4};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_i32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 67
+; CHECK-NEXT: ret;
+ %struct.riii0 = insertvalue %struct.int4 poison, i32 %a, 0
+ %struct.riii1 = insertvalue %struct.int4 %struct.riii0, i32 2, 1
+ %struct.riii2 = insertvalue %struct.int4 %struct.riii1, i32 3, 2
+ %struct.riii3 = insertvalue %struct.int4 %struct.riii2, i32 4, 3
+ call void @call_v4_i32(%struct.int4 %struct.riii3)
+ ret void
+}
+
+define void @st_param_v4_f32_iiii() {
+; CHECK-LABEL: st_param_v4_f32_iiii(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: { // callseq 68, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, 0f40000000, 0f40400000, 0f40800000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 68
+; CHECK-NEXT: ret;
+ call void @call_v4_f32(%struct.float4 { float 1.0, float 2.0, float 3.0, float 4.0 })
+ ret void
+}
+define void @st_param_v4_f32_irrr(float %b, float %c, float %d) {
+; CHECK-LABEL: st_param_v4_f32_irrr(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_irrr_param_0];
+; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_irrr_param_1];
+; CHECK-NEXT: ld.param.f32 %f3, [st_param_v4_f32_irrr_param_2];
+; CHECK-NEXT: { // callseq 69, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, %f1, %f2, %f3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 69
+; CHECK-NEXT: ret;
+ %struct.irrr0 = insertvalue %struct.float4 poison, float 1.0, 0
+ %struct.irrr1 = insertvalue %struct.float4 %struct.irrr0, float %b, 1
+ %struct.irrr2 = insertvalue %struct.float4 %struct.irrr1, float %c, 2
+ %struct.irrr3 = insertvalue %struct.float4 %struct.irrr2, float %d, 3
+ call void @call_v4_f32(%struct.float4 %struct.irrr3)
+ ret void
+}
+define void @st_param_v4_f32_rirr(float %a, float %c, float %d) {
+; CHECK-LABEL: st_param_v4_f32_rirr(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_rirr_param_0];
+; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_rirr_param_1];
+; CHECK-NEXT: ld.param.f32 %f3, [st_param_v4_f32_rirr_param_2];
+; CHECK-NEXT: { // callseq 70, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, 0f40000000, %f2, %f3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 70
+; CHECK-NEXT: ret;
+ %struct.rirr0 = insertvalue %struct.float4 poison, float %a, 0
+ %struct.rirr1 = insertvalue %struct.float4 %struct.rirr0, float 2.0, 1
+ %struct.rirr2 = insertvalue %struct.float4 %struct.rirr1, float %c, 2
+ %struct.rirr3 = insertvalue %struct.float4 %struct.rirr2, float %d, 3
+ call void @call_v4_f32(%struct.float4 %struct.rirr3)
+ ret void
+}
+define void @st_param_v4_f32_rrir(float %a, float %b, float %d) {
+; CHECK-LABEL: st_param_v4_f32_rrir(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_rrir_param_0];
+; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_rrir_param_1];
+; CHECK-NEXT: ld.param.f32 %f3, [st_param_v4_f32_rrir_param_2];
+; CHECK-NEXT: { // callseq 71, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, %f2, 0f40400000, %f3};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 71
+; CHECK-NEXT: ret;
+ %struct.rrir0 = insertvalue %struct.float4 poison, float %a, 0
+ %struct.rrir1 = insertvalue %struct.float4 %struct.rrir0, float %b, 1
+ %struct.rrir2 = insertvalue %struct.float4 %struct.rrir1, float 3.0, 2
+ %struct.rrir3 = insertvalue %struct.float4 %struct.rrir2, float %d, 3
+ call void @call_v4_f32(%struct.float4 %struct.rrir3)
+ ret void
+}
+define void @st_param_v4_f32_rrri(float %a, float %b, float %c) {
+; CHECK-LABEL: st_param_v4_f32_rrri(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_rrri_param_0];
+; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_rrri_param_1];
+; CHECK-NEXT: ld.param.f32 %f3, [st_param_v4_f32_rrri_param_2];
+; CHECK-NEXT: { // callseq 72, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, %f2, %f3, 0f40800000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 72
+; CHECK-NEXT: ret;
+ %struct.rrri0 = insertvalue %struct.float4 poison, float %a, 0
+ %struct.rrri1 = insertvalue %struct.float4 %struct.rrri0, float %b, 1
+ %struct.rrri2 = insertvalue %struct.float4 %struct.rrri1, float %c, 2
+ %struct.rrri3 = insertvalue %struct.float4 %struct.rrri2, float 4.0, 3
+ call void @call_v4_f32(%struct.float4 %struct.rrri3)
+ ret void
+}
+define void @st_param_v4_f32_iirr(float %c, float %d) {
+; CHECK-LABEL: st_param_v4_f32_iirr(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_iirr_param_0];
+; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_iirr_param_1];
+; CHECK-NEXT: { // callseq 73, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, 0f40000000, %f1, %f2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 73
+; CHECK-NEXT: ret;
+ %struct.iirr0 = insertvalue %struct.float4 poison, float 1.0, 0
+ %struct.iirr1 = insertvalue %struct.float4 %struct.iirr0, float 2.0, 1
+ %struct.iirr2 = insertvalue %struct.float4 %struct.iirr1, float %c, 2
+ %struct.iirr3 = insertvalue %struct.float4 %struct.iirr2, float %d, 3
+ call void @call_v4_f32(%struct.float4 %struct.iirr3)
+ ret void
+}
+define void @st_param_v4_f32_irir(float %b, float %d) {
+; CHECK-LABEL: st_param_v4_f32_irir(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_irir_param_0];
+; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_irir_param_1];
+; CHECK-NEXT: { // callseq 74, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, %f1, 0f40400000, %f2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 74
+; CHECK-NEXT: ret;
+ %struct.irir0 = insertvalue %struct.float4 poison, float 1.0, 0
+ %struct.irir1 = insertvalue %struct.float4 %struct.irir0, float %b, 1
+ %struct.irir2 = insertvalue %struct.float4 %struct.irir1, float 3.0, 2
+ %struct.irir3 = insertvalue %struct.float4 %struct.irir2, float %d, 3
+ call void @call_v4_f32(%struct.float4 %struct.irir3)
+ ret void
+}
+define void @st_param_v4_f32_irri(float %b, float %c) {
+; CHECK-LABEL: st_param_v4_f32_irri(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_irri_param_0];
+; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_irri_param_1];
+; CHECK-NEXT: { // callseq 75, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, %f1, %f2, 0f40800000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 75
+; CHECK-NEXT: ret;
+ %struct.irri0 = insertvalue %struct.float4 poison, float 1.0, 0
+ %struct.irri1 = insertvalue %struct.float4 %struct.irri0, float %b, 1
+ %struct.irri2 = insertvalue %struct.float4 %struct.irri1, float %c, 2
+ %struct.irri3 = insertvalue %struct.float4 %struct.irri2, float 4.0, 3
+ call void @call_v4_f32(%struct.float4 %struct.irri3)
+ ret void
+}
+define void @st_param_v4_f32_riir(float %a, float %d) {
+; CHECK-LABEL: st_param_v4_f32_riir(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_riir_param_0];
+; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_riir_param_1];
+; CHECK-NEXT: { // callseq 76, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, 0f40000000, 0f40400000, %f2};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 76
+; CHECK-NEXT: ret;
+ %struct.riir0 = insertvalue %struct.float4 poison, float %a, 0
+ %struct.riir1 = insertvalue %struct.float4 %struct.riir0, float 2.0, 1
+ %struct.riir2 = insertvalue %struct.float4 %struct.riir1, float 3.0, 2
+ %struct.riir3 = insertvalue %struct.float4 %struct.riir2, float %d, 3
+ call void @call_v4_f32(%struct.float4 %struct.riir3)
+ ret void
+}
+define void @st_param_v4_f32_riri(float %a, float %c) {
+; CHECK-LABEL: st_param_v4_f32_riri(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_riri_param_0];
+; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_riri_param_1];
+; CHECK-NEXT: { // callseq 77, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, 0f40000000, %f2, 0f40800000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 77
+; CHECK-NEXT: ret;
+ %struct.riri0 = insertvalue %struct.float4 poison, float %a, 0
+ %struct.riri1 = insertvalue %struct.float4 %struct.riri0, float 2.0, 1
+ %struct.riri2 = insertvalue %struct.float4 %struct.riri1, float %c, 2
+ %struct.riri3 = insertvalue %struct.float4 %struct.riri2, float 4.0, 3
+ call void @call_v4_f32(%struct.float4 %struct.riri3)
+ ret void
+}
+define void @st_param_v4_f32_rrii(float %a, float %b) {
+; CHECK-LABEL: st_param_v4_f32_rrii(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_rrii_param_0];
+; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_rrii_param_1];
+; CHECK-NEXT: { // callseq 78, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, %f2, 0f40400000, 0f40800000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 78
+; CHECK-NEXT: ret;
+ %struct.rrii0 = insertvalue %struct.float4 poison, float %a, 0
+ %struct.rrii1 = insertvalue %struct.float4 %struct.rrii0, float %b, 1
+ %struct.rrii2 = insertvalue %struct.float4 %struct.rrii1, float 3.0, 2
+ %struct.rrii3 = insertvalue %struct.float4 %struct.rrii2, float 4.0, 3
+ call void @call_v4_f32(%struct.float4 %struct.rrii3)
+ ret void
+}
+define void @st_param_v4_f32_iiir(float %d) {
+; CHECK-LABEL: st_param_v4_f32_iiir(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_iiir_param_0];
+; CHECK-NEXT: { // callseq 79, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, 0f40000000, 0f40400000, %f1};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 79
+; CHECK-NEXT: ret;
+ %struct.iiir0 = insertvalue %struct.float4 poison, float 1.0, 0
+ %struct.iiir1 = insertvalue %struct.float4 %struct.iiir0, float 2.0, 1
+ %struct.iiir2 = insertvalue %struct.float4 %struct.iiir1, float 3.0, 2
+ %struct.iiir3 = insertvalue %struct.float4 %struct.iiir2, float %d, 3
+ call void @call_v4_f32(%struct.float4 %struct.iiir3)
+ ret void
+}
+define void @st_param_v4_f32_iiri(float %c) {
+; CHECK-LABEL: st_param_v4_f32_iiri(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_iiri_param_0];
+; CHECK-NEXT: { // callseq 80, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, 0f40000000, %f1, 0f40800000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 80
+; CHECK-NEXT: ret;
+ %struct.iiri0 = insertvalue %struct.float4 poison, float 1.0, 0
+ %struct.iiri1 = insertvalue %struct.float4 %struct.iiri0, float 2.0, 1
+ %struct.iiri2 = insertvalue %struct.float4 %struct.iiri1, float %c, 2
+ %struct.iiri3 = insertvalue %struct.float4 %struct.iiri2, float 4.0, 3
+ call void @call_v4_f32(%struct.float4 %struct.iiri3)
+ ret void
+}
+define void @st_param_v4_f32_irii(float %b) {
+; CHECK-LABEL: st_param_v4_f32_irii(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_irii_param_0];
+; CHECK-NEXT: { // callseq 81, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, %f1, 0f40400000, 0f40800000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 81
+; CHECK-NEXT: ret;
+ %struct.irii0 = insertvalue %struct.float4 poison, float 1.0, 0
+ %struct.irii1 = insertvalue %struct.float4 %struct.irii0, float %b, 1
+ %struct.irii2 = insertvalue %struct.float4 %struct.irii1, float 3.0, 2
+ %struct.irii3 = insertvalue %struct.float4 %struct.irii2, float 4.0, 3
+ call void @call_v4_f32(%struct.float4 %struct.irii3)
+ ret void
+}
+define void @st_param_v4_f32_riii(float %a) {
+; CHECK-LABEL: st_param_v4_f32_riii(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_riii_param_0];
+; CHECK-NEXT: { // callseq 82, 0
+; CHECK-NEXT: .param .align 16 .b8 param0[16];
+; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, 0f40000000, 0f40400000, 0f40800000};
+; CHECK-NEXT: call.uni
+; CHECK-NEXT: call_v4_f32,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-NEXT: } // callseq 82
+; CHECK-NEXT: ret;
+ %struct.riii0 = insertvalue %struct.float4 poison, float %a, 0
+ %struct.riii1 = insertvalue %struct.float4 %struct.riii0, float 2.0, 1
+ %struct.riii2 = insertvalue %struct.float4 %struct.riii1, float 3.0, 2
+ %struct.riii3 = insertvalue %struct.float4 %struct.riii2, float 4.0, 3
+ call void @call_v4_f32(%struct.float4 %struct.riii3)
+ ret void
+}
+
+declare void @call_v4_i8(%struct.char4 alignstack(4))
+declare void @call_v4_i16(%struct.short4 alignstack(8))
+declare void @call_v4_i32(%struct.int4 alignstack(16))
+declare void @call_v4_f32(%struct.float4 alignstack(16))
diff --git a/llvm/test/CodeGen/PowerPC/aix-tocdata-fastisel.ll b/llvm/test/CodeGen/PowerPC/aix-tocdata-fastisel.ll
index 5a7fcd1d0ddd..65338919f631 100644
--- a/llvm/test/CodeGen/PowerPC/aix-tocdata-fastisel.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-tocdata-fastisel.ll
@@ -1,9 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc < %s -mtriple=powerpc64-ibm-aix-xcoff -fast-isel -verify-machineinstrs \
; RUN: -code-model=small | FileCheck %s --check-prefix=SMALL
-
-;; FIXME: when toc data for 64 big large code model is supported,
-;; add a run line for large code model too.
+; RUN: llc < %s -mtriple=powerpc64-ibm-aix-xcoff -fast-isel -verify-machineinstrs \
+; RUN: -code-model=large | FileCheck %s --check-prefix=LARGE
@a = global i32 0, align 4 #0
@@ -11,9 +10,15 @@ define signext i32 @foo() #1 {
; SMALL-LABEL: foo:
; SMALL: # %bb.0: # %entry
; SMALL-NEXT: la 3, a[TD](2)
-; SMALL-NEXT: lwz 3, 0(3)
-; SMALL-NEXT: extsw 3, 3
+; SMALL-NEXT: lwa 3, 0(3)
; SMALL-NEXT: blr
+;
+; LARGE-LABEL: foo:
+; LARGE: # %bb.0: # %entry
+; LARGE-NEXT: addis 3, a[TD]@u(2)
+; LARGE-NEXT: la 3, a[TD]@l(3)
+; LARGE-NEXT: lwa 3, 0(3)
+; LARGE-NEXT: blr
entry:
%0 = load i32, ptr @a, align 4
ret i32 %0
diff --git a/llvm/test/CodeGen/PowerPC/ctrloop-le.ll b/llvm/test/CodeGen/PowerPC/ctrloop-le.ll
index 599e540e898a..08ecd8970d83 100644
--- a/llvm/test/CodeGen/PowerPC/ctrloop-le.ll
+++ b/llvm/test/CodeGen/PowerPC/ctrloop-le.ll
@@ -293,8 +293,7 @@ for.end: ; preds = %for.body, %entry
; CHECK: test_pos1_rr_sle
-; FIXME: Support this loop!
-; CHECK-NOT: bdnz
+; CHECK: bdnz
; a < b
define void @test_pos1_rr_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
@@ -323,8 +322,7 @@ for.end: ; preds = %for.body, %entry
; CHECK: test_pos2_rr_sle
-; FIXME: Support this loop!
-; CHECK-NOT: bdnz
+; CHECK: bdnz
; a < b
define void @test_pos2_rr_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
@@ -353,8 +351,7 @@ for.end: ; preds = %for.body, %entry
; CHECK: test_pos4_rr_sle
-; FIXME: Support this loop!
-; CHECK-NOT: bdnz
+; CHECK: bdnz
; a < b
define void @test_pos4_rr_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
@@ -383,8 +380,7 @@ for.end: ; preds = %for.body, %entry
; CHECK: test_pos8_rr_sle
-; FIXME: Support this loop!
-; CHECK-NOT: bdnz
+; CHECK: bdnz
; a < b
define void @test_pos8_rr_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
@@ -413,8 +409,7 @@ for.end: ; preds = %for.body, %entry
; CHECK: test_pos16_rr_sle
-; FIXME: Support this loop!
-; CHECK-NOT: bdnz
+; CHECK: bdnz
; a < b
define void @test_pos16_rr_sle(ptr nocapture %p, i32 %a, i32 %b) nounwind {
entry:
diff --git a/llvm/test/CodeGen/PowerPC/mergeable-string-pool-pr92991.ll b/llvm/test/CodeGen/PowerPC/mergeable-string-pool-pr92991.ll
new file mode 100644
index 000000000000..4e9c69e5fe4c
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/mergeable-string-pool-pr92991.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+
+@g = private constant [4 x i32] [i32 122, i32 67, i32 35, i32 56]
+@g2 = private constant [1 x i64] [i64 1], align 8
+
+define void @test(ptr %p, ptr %p2) {
+; CHECK-LABEL: test:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addis 5, 2, .L__ModuleStringPool@toc@ha
+; CHECK-NEXT: addi 5, 5, .L__ModuleStringPool@toc@l
+; CHECK-NEXT: addi 6, 5, 12
+; CHECK-NEXT: std 6, 0(3)
+; CHECK-NEXT: addi 3, 5, 16
+; CHECK-NEXT: std 3, 0(4)
+; CHECK-NEXT: blr
+ store ptr getelementptr inbounds ([4 x i32], ptr @g, i64 0, i64 1), ptr %p
+ store ptr getelementptr inbounds ([4 x i32], ptr @g, i64 0, i64 2), ptr %p2
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/pr92233.ll b/llvm/test/CodeGen/PowerPC/pr92233.ll
new file mode 100644
index 000000000000..858d665909fe
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/pr92233.ll
@@ -0,0 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mcpu=pwr9 -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s
+
+define internal fp128 @f(i128 %v) nounwind {
+; CHECK-LABEL: f:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: stwu 1, -32(1)
+; CHECK-NEXT: stw 6, 28(1)
+; CHECK-NEXT: stw 5, 24(1)
+; CHECK-NEXT: stw 4, 20(1)
+; CHECK-NEXT: stw 3, 16(1)
+; CHECK-NEXT: lxv 34, 16(1)
+; CHECK-NEXT: addi 1, 1, 32
+; CHECK-NEXT: blr
+entry:
+ %cast = bitcast i128 %v to fp128
+ ret fp128 %cast
+}
+
diff --git a/llvm/test/CodeGen/PowerPC/toc-data-no-data-sections.ll b/llvm/test/CodeGen/PowerPC/toc-data-no-data-sections.ll
new file mode 100644
index 000000000000..77851fb83025
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/toc-data-no-data-sections.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -data-sections=false -verify-machineinstrs < %s | FileCheck %s
+
+@a1 = global i32 0, align 4 #0
+
+define void @foo() {
+entry:
+ store i32 1, ptr @a1, align 4
+ ret void
+}
+
+attributes #0 = { "toc-data" }
+
+; CHECK: .toc
+; CHECK-NEXT: .csect a1[TD],2
+; CHECK-NEXT: .globl a1[TD]
+; CHECK-NEXT: .align 2
+; CHECK-NOT: a1[TD]:
+; CHECK-NEXT: .vbyte 4, 0
diff --git a/llvm/test/CodeGen/PowerPC/toc-data.ll b/llvm/test/CodeGen/PowerPC/toc-data.ll
index 7f7afe76cfcd..12286657488d 100644
--- a/llvm/test/CodeGen/PowerPC/toc-data.ll
+++ b/llvm/test/CodeGen/PowerPC/toc-data.ll
@@ -16,6 +16,10 @@
; RUN: -stop-before=ppc-vsx-copy | FileCheck %s --check-prefix CHECK32LARGE
; RUN: llc -mtriple powerpc-ibm-aix-xcoff -code-model=large -verify-machineinstrs < %s | FileCheck %s --check-prefix TEST32LARGE
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=large -verify-machineinstrs < %s \
+; RUN: -stop-before=ppc-vsx-copy | FileCheck %s --check-prefix CHECK64LARGE
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=large -verify-machineinstrs < %s | FileCheck %s --check-prefix TEST64LARGE
+
; Global variables i and f have the toc-data attribute.
; In the following functions, those writing to or reading from
; variables i and f should use the toc-data access pattern.
@@ -45,8 +49,8 @@ define dso_local void @write_int(i32 signext %in) {
; CHECK64-NOOPT: name: write_int
; CHECK64-NOOPT: %[[SUBREG:[0-9]+]]:gprc = COPY %{{[0-9]}}.sub_32
-; CHECK64-NOOPT: %[[ADDR:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDItoc8 @i, $x2 :: (load (s64) from got)
-; CHECK64-NOOPT: STW %[[SUBREG]], 0, killed %[[ADDR]] :: (store (s32) into @i)
+; CHECK64-NOOPT: %[[ADDR:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDItoc8 @i, $x2
+; CHECK64-NOOPT: STW %[[SUBREG]], 0, %[[ADDR]]
; TEST64: .write_int:
; TEST64: la 4, i[TD](2)
@@ -63,6 +67,17 @@ define dso_local void @write_int(i32 signext %in) {
; TEST32LARGE-NEXT: la 4, i[TD]@l(4)
; TEST32LARGE-NEXT: stw 3, 0(4)
+
+; CHECK64LARGE: name: write_int
+; CHECK64LARGE: %[[SCRATCH1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @i
+; CHECK64LARGE-NEXT: %[[SCRATCH2:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDItocL8 killed %[[SCRATCH1]], @i
+; CHECK64LARGE-NEXT: STW8 %{{[0-9]+}}, 0, killed %[[SCRATCH2]] :: (store (s32) into @i)
+
+; TEST64LARGE: .write_int:
+; TEST64LARGE: addis 4, i[TD]@u(2)
+; TEST64LARGE-NEXT: la 4, i[TD]@l(4)
+; TEST64LARGE-NEXT: stw 3, 0(4)
+
define dso_local i64 @read_ll() {
entry:
%0 = load i64, ptr @ll, align 8
@@ -98,6 +113,15 @@ define dso_local i64 @read_ll() {
; TEST32LARGE-NEXT: lwz 3, 0(4)
; TEST32LARGE-NEXT: lwz 4, 4(4)
+; CHECK64LARGE: name: read_ll
+; CHECK64LARGE: %[[SCRATCH1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @ll
+; CHECK64LARGE: LDtocL @ll, killed %[[SCRATCH1]] :: (load (s64) from got)
+
+; TEST64LARGE: .read_ll:
+; TEST64LARGE: addis 3, L..C0@u(2)
+; TEST64LARGE-NEXT: ld 3, L..C0@l(3)
+; TEST64LARGE-NEXT: ld 3, 0(3)
+
define dso_local float @read_float() {
entry:
%0 = load float, ptr @f, align 4
@@ -117,7 +141,7 @@ define dso_local float @read_float() {
; CHECK64-NOOPT: name: read_float
; CHECK64-NOOPT: %[[SCRATCH:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDItoc8 @f, $x2
-; CHECK64-NOOPT: %{{[0-9]+}}:f4rc = LFS 0, killed %[[SCRATCH]]
+; CHECK64-NOOPT: %{{[0-9]+}}:f4rc = LFS 0, %[[SCRATCH]]
; TEST64: .read_float:
; TEST64: la 3, f[TD](2)
@@ -134,6 +158,18 @@ define dso_local float @read_float() {
; TEST32LARGE-NEXT: la 3, f[TD]@l(3)
; TEST32LARGE-NEXT: lfs 1, 0(3)
+
+; CHECK64LARGE: name: read_float
+; CHECK64LARGE: %[[SCRATCH1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @f
+; CHECK64LARGE-NEXT: %[[SCRATCH2:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDItocL8 killed %[[SCRATCH1]], @f
+; CHECK64LARGE-NEXT: LFS 0, killed %[[SCRATCH2]] :: (dereferenceable load (s32) from @f)
+
+
+; TEST64LARGE: .read_float:
+; TEST64LARGE: addis 3, f[TD]@u(2)
+; TEST64LARGE-NEXT: la 3, f[TD]@l(3)
+; TEST64LARGE-NEXT: lfs 1, 0(3)
+
define dso_local void @write_double(double %in) {
entry:
store double %in, ptr @d, align 8
@@ -167,6 +203,15 @@ define dso_local void @write_double(double %in) {
; TEST32LARGE-NEXT: lwz 3, L..C1@l(3)
; TEST32LARGE-NEXT: stfd 1, 0(3)
+; CHECK64LARGE: name: write_double
+; CHECK64LARGE: %[[SCRATCH1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @d
+; CHECK64LARGE: LDtocL @d, killed %[[SCRATCH1]] :: (load (s64) from got)
+
+; TEST64LARGE: .write_double:
+; TEST64LARGE: addis 3, L..C1@u(2)
+; TEST64LARGE-NEXT: ld 3, L..C1@l(3)
+; TEST64LARGE-NEXT: stfd 1, 0(3)
+
define dso_local nonnull ptr @addr() {
entry:
ret ptr @i
@@ -183,7 +228,7 @@ define dso_local nonnull ptr @addr() {
; CHECK64-NEXT: $x3 = COPY %[[SCRATCH]]
; CHECK64-NOOPT: name: addr
-; CHECK64-NOOPT: %[[SCRATCH:[0-9]+]]:g8rc = ADDItoc8 @i, $x2
+; CHECK64-NOOPT: %[[SCRATCH:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDItoc8 @i, $x2
; CHECK64-NOOPT: $x3 = COPY %[[SCRATCH]]
; TEST64: .addr
@@ -237,4 +282,26 @@ define dso_local nonnull ptr @addr() {
; TEST32LARGE-NEXT: .globl f[TD]
; TEST32LARGE-NOT: .tc f[TE],f[RW]
+; CHECK64LARGE: name: addr
+; CHECK64LARGE: %[[SCRATCH1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @i
+; CHECK64LARGE-NEXT: %[[SCRATCH2:[0-9]+]]:g8rc = ADDItocL8 killed %[[SCRATCH1]], @i
+; CHECK64LARGE-NEXT: $x3 = COPY %[[SCRATCH2]]
+
+; TEST64LARGE: .addr:
+; TEST64LARGE: addis 3, i[TD]@u(2)
+; TEST64LARGE: la 3, i[TD]@l(3)
+
+; TEST64LARGE: .toc
+; TEST64LARGE: .tc ll[TE],ll[RW]
+; TEST64LARGE-NOT: .csect ll[TD]
+; TEST64LARGE: .tc d[TE],d[RW]
+; TEST64LARGE-NOT: .csect d[TD],2
+; TEST64LARGE: .csect i[TD],2
+; TEST64LARGE-NEXT: .globl i[TD]
+; TEST64LARGE-NEXT: .align 2
+; TEST64LARGE-NOT: .tc i[TE],i[RW]
+; TEST64LARGE: .csect f[TD],2
+; TEST64LARGE-NEXT: .globl f[TD]
+; TEST64LARGE-NOT: .tc f[TE],f[RW]
+
attributes #0 = { "toc-data" }
diff --git a/llvm/test/CodeGen/PowerPC/vec_shuffle.ll b/llvm/test/CodeGen/PowerPC/vec_shuffle.ll
index e698ab1e15a9..22b5ff0d21cb 100644
--- a/llvm/test/CodeGen/PowerPC/vec_shuffle.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_shuffle.ll
@@ -32,7 +32,7 @@ entry:
%tmp15 = extractelement <16 x i8> %tmp2.upgrd.2, i32 2 ; <i8> [#uses=1]
%tmp16 = extractelement <16 x i8> %tmp2.upgrd.2, i32 3 ; <i8> [#uses=1]
%tmp17 = extractelement <16 x i8> %tmp2.upgrd.2, i32 4 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.3, i32 0 ; <<16 x i8>> [#uses=1]
+ %tmp18 = insertelement <16 x i8> poison, i8 %tmp.upgrd.3, i32 0 ; <<16 x i8>> [#uses=1]
%tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
%tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
%tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
@@ -80,7 +80,7 @@ define void @VSLDOI_xx(ptr %A, ptr %B) {
%tmp15 = extractelement <16 x i8> %tmp2.upgrd.6, i32 2 ; <i8> [#uses=1]
%tmp16 = extractelement <16 x i8> %tmp2.upgrd.6, i32 3 ; <i8> [#uses=1]
%tmp17 = extractelement <16 x i8> %tmp2.upgrd.6, i32 4 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.7, i32 0 ; <<16 x i8>> [#uses=1]
+ %tmp18 = insertelement <16 x i8> poison, i8 %tmp.upgrd.7, i32 0 ; <<16 x i8>> [#uses=1]
%tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
%tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
%tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
@@ -150,7 +150,7 @@ entry:
%tmp15 = extractelement <16 x i8> %tmp2, i32 14 ; <i8> [#uses=1]
%tmp16 = extractelement <16 x i8> %tmp, i32 15 ; <i8> [#uses=1]
%tmp17 = extractelement <16 x i8> %tmp2, i32 15 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.12, i32 0 ; <<16 x i8>> [#uses=1]
+ %tmp18 = insertelement <16 x i8> poison, i8 %tmp.upgrd.12, i32 0 ; <<16 x i8>> [#uses=1]
%tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
%tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
%tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
@@ -189,7 +189,7 @@ entry:
%tmp7 = extractelement <8 x i16> %tmp2, i32 6 ; <i16> [#uses=1]
%tmp8 = extractelement <8 x i16> %tmp, i32 7 ; <i16> [#uses=1]
%tmp9 = extractelement <8 x i16> %tmp2, i32 7 ; <i16> [#uses=1]
- %tmp10 = insertelement <8 x i16> undef, i16 %tmp.upgrd.13, i32 0 ; <<8 x i16>> [#uses=1]
+ %tmp10 = insertelement <8 x i16> poison, i16 %tmp.upgrd.13, i32 0 ; <<8 x i16>> [#uses=1]
%tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 1 ; <<8 x i16>> [#uses=1]
%tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 2 ; <<8 x i16>> [#uses=1]
%tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 3 ; <<8 x i16>> [#uses=1]
@@ -216,7 +216,7 @@ entry:
%tmp3 = extractelement <4 x i32> %tmp2, i32 2 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp, i32 3 ; <i32> [#uses=1]
%tmp5 = extractelement <4 x i32> %tmp2, i32 3 ; <i32> [#uses=1]
- %tmp6 = insertelement <4 x i32> undef, i32 %tmp.upgrd.14, i32 0 ; <<4 x i32>> [#uses=1]
+ %tmp6 = insertelement <4 x i32> poison, i32 %tmp.upgrd.14, i32 0 ; <<4 x i32>> [#uses=1]
%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/constbarrier-rv32.ll b/llvm/test/CodeGen/RISCV/GlobalISel/constbarrier-rv32.ll
new file mode 100644
index 000000000000..70d1b25309c8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/constbarrier-rv32.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -global-isel -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=RV32
+
+define i16 @constant_fold_barrier_i16(i16 %x, i16 %y) {
+; RV32-LABEL: constant_fold_barrier_i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 1
+; RV32-NEXT: slli a1, a1, 11
+; RV32-NEXT: and a0, a0, a1
+; RV32-NEXT: addi a1, a1, 289
+; RV32-NEXT: or a0, a0, a1
+; RV32-NEXT: ret
+entry:
+ %and = and i16 %x, 2048
+ %or = or i16 %and, 2337
+ ret i16 %or
+}
+
+define void @constant_fold_barrier_i128(ptr %p) {
+; RV32-LABEL: constant_fold_barrier_i128:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: li a1, 1
+; RV32-NEXT: slli a1, a1, 11
+; RV32-NEXT: lw a2, 0(a0)
+; RV32-NEXT: lw a3, 4(a0)
+; RV32-NEXT: lw a4, 8(a0)
+; RV32-NEXT: lw a5, 12(a0)
+; RV32-NEXT: and a2, a2, a1
+; RV32-NEXT: and a3, a3, zero
+; RV32-NEXT: and a4, a4, zero
+; RV32-NEXT: and a5, a5, zero
+; RV32-NEXT: add a2, a2, a1
+; RV32-NEXT: sltu a1, a2, a1
+; RV32-NEXT: add a6, a3, zero
+; RV32-NEXT: sltu a3, a6, a3
+; RV32-NEXT: add a6, a6, a1
+; RV32-NEXT: seqz a7, a6
+; RV32-NEXT: and a1, a7, a1
+; RV32-NEXT: or a1, a3, a1
+; RV32-NEXT: add a3, a4, zero
+; RV32-NEXT: sltu a4, a3, a4
+; RV32-NEXT: add a3, a3, a1
+; RV32-NEXT: seqz a7, a3
+; RV32-NEXT: and a1, a7, a1
+; RV32-NEXT: or a1, a4, a1
+; RV32-NEXT: add a5, a5, zero
+; RV32-NEXT: add a1, a5, a1
+; RV32-NEXT: sw a2, 0(a0)
+; RV32-NEXT: sw a6, 4(a0)
+; RV32-NEXT: sw a3, 8(a0)
+; RV32-NEXT: sw a1, 12(a0)
+; RV32-NEXT: ret
+entry:
+ %x = load i128, ptr %p
+ %and = and i128 %x, 2048
+ %add = add i128 %and, 2048
+ store i128 %add, ptr %p
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/constbarrier-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/constbarrier-rv64.ll
new file mode 100644
index 000000000000..21d7b1d70714
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/constbarrier-rv64.ll
@@ -0,0 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv64 -global-isel -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=RV64
+
+define i16 @constant_fold_barrier_i16(i16 %x, i16 %y) {
+; RV64-LABEL: constant_fold_barrier_i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a1, 1
+; RV64-NEXT: slli a1, a1, 11
+; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: addiw a1, a1, 289
+; RV64-NEXT: or a0, a0, a1
+; RV64-NEXT: ret
+entry:
+ %and = and i16 %x, 2048
+ %or = or i16 %and, 2337
+ ret i16 %or
+}
+
+define i128 @constant_fold_barrier_i128(i128 %x) {
+; RV64-LABEL: constant_fold_barrier_i128:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: li a2, 1
+; RV64-NEXT: slli a2, a2, 11
+; RV64-NEXT: and a0, a0, a2
+; RV64-NEXT: and a1, a1, zero
+; RV64-NEXT: add a0, a0, a2
+; RV64-NEXT: sltu a2, a0, a2
+; RV64-NEXT: add a1, a1, zero
+; RV64-NEXT: add a1, a1, a2
+; RV64-NEXT: ret
+entry:
+ %and = and i128 %x, 2048
+ %add = add i128 %and, 2048
+ ret i128 %add
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll b/llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll
new file mode 100644
index 000000000000..fad9effdd403
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll
@@ -0,0 +1,201 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+f,+d,+zfh,+m,+v -global-isel -global-isel-abort=1 -verify-machineinstrs < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+zfh,+m,+v -global-isel -global-isel-abort=1 -verify-machineinstrs < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,RV64
+
+define i32 @freeze_int(i32 %x) {
+; RV32-LABEL: freeze_int:
+; RV32: # %bb.0:
+; RV32-NEXT: mul a0, a0, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_int:
+; RV64: # %bb.0:
+; RV64-NEXT: mulw a0, a0, a0
+; RV64-NEXT: ret
+ %y1 = freeze i32 %x
+ %t1 = mul i32 %y1, %y1
+ ret i32 %t1
+}
+
+define i5 @freeze_int2(i5 %x) {
+; RV32-LABEL: freeze_int2:
+; RV32: # %bb.0:
+; RV32-NEXT: mul a0, a0, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_int2:
+; RV64: # %bb.0:
+; RV64-NEXT: mulw a0, a0, a0
+; RV64-NEXT: ret
+ %y1 = freeze i5 %x
+ %t1 = mul i5 %y1, %y1
+ ret i5 %t1
+}
+
+define float @freeze_float(float %x) {
+; CHECK-LABEL: freeze_float:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fadd.s fa0, fa0, fa0
+; CHECK-NEXT: ret
+ %y1 = freeze float %x
+ %t1 = fadd float %y1, %y1
+ ret float %t1
+}
+
+define double @freeze_double(double %x) nounwind {
+; RV32-LABEL: freeze_double:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: fsd fa0, 8(sp)
+; RV32-NEXT: lw a0, 8(sp)
+; RV32-NEXT: lw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: fld fa5, 8(sp)
+; RV32-NEXT: fadd.d fa0, fa5, fa5
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_double:
+; RV64: # %bb.0:
+; RV64-NEXT: fadd.d fa0, fa0, fa0
+; RV64-NEXT: ret
+ %y1 = freeze double %x
+ %t1 = fadd double %y1, %y1
+ ret double %t1
+}
+
+define void @freeze_half(ptr %p) {
+; CHECK-LABEL: freeze_half:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lh a1, 0(a0)
+; CHECK-NEXT: sh a1, 0(a0)
+; CHECK-NEXT: ret
+ %x = load half, ptr %p
+ %y1 = freeze half %x
+ store half %y1, ptr %p
+ ret void
+}
+
+define <vscale x 2 x i32> @freeze_ivec(<vscale x 2 x i32> %x) {
+; CHECK-LABEL: freeze_ivec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ret
+ %y = freeze <vscale x 2 x i32> %x
+ ret <vscale x 2 x i32> %y
+}
+
+define <vscale x 2 x float> @freeze_fvec(<vscale x 2 x float> %x) {
+; CHECK-LABEL: freeze_fvec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ret
+ %y = freeze <vscale x 2 x float> %x
+ ret <vscale x 2 x float> %y
+}
+
+define ptr @freeze_ptr(ptr %x) {
+; CHECK-LABEL: freeze_ptr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, 4
+; CHECK-NEXT: ret
+ %y1 = freeze ptr %x
+ %t1 = getelementptr i8, ptr %y1, i64 4
+ ret ptr %t1
+}
+
+%struct.T = type { i32, i32 }
+
+define i32 @freeze_struct(ptr %p) {
+; RV32-LABEL: freeze_struct:
+; RV32: # %bb.0:
+; RV32-NEXT: lw a1, 0(a0)
+; RV32-NEXT: lw a0, 4(a0)
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_struct:
+; RV64: # %bb.0:
+; RV64-NEXT: lw a1, 0(a0)
+; RV64-NEXT: lw a0, 4(a0)
+; RV64-NEXT: addw a0, a1, a0
+; RV64-NEXT: ret
+ %s = load %struct.T, ptr %p
+ %y1 = freeze %struct.T %s
+ %v1 = extractvalue %struct.T %y1, 0
+ %v2 = extractvalue %struct.T %y1, 1
+ %t1 = add i32 %v1, %v2
+ ret i32 %t1
+}
+
+define i32 @freeze_anonstruct(ptr %p) {
+; RV32-LABEL: freeze_anonstruct:
+; RV32: # %bb.0:
+; RV32-NEXT: lw a1, 0(a0)
+; RV32-NEXT: lw a0, 4(a0)
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_anonstruct:
+; RV64: # %bb.0:
+; RV64-NEXT: lw a1, 0(a0)
+; RV64-NEXT: lw a0, 4(a0)
+; RV64-NEXT: addw a0, a1, a0
+; RV64-NEXT: ret
+ %s = load {i32, i32}, ptr %p
+ %y1 = freeze {i32, i32} %s
+ %v1 = extractvalue {i32, i32} %y1, 0
+ %v2 = extractvalue {i32, i32} %y1, 1
+ %t1 = add i32 %v1, %v2
+ ret i32 %t1
+}
+
+define i32 @freeze_anonstruct2(ptr %p) {
+; RV32-LABEL: freeze_anonstruct2:
+; RV32: # %bb.0:
+; RV32-NEXT: lh a1, 4(a0)
+; RV32-NEXT: lw a0, 0(a0)
+; RV32-NEXT: lui a2, 16
+; RV32-NEXT: addi a2, a2, -1
+; RV32-NEXT: and a1, a1, a2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_anonstruct2:
+; RV64: # %bb.0:
+; RV64-NEXT: lh a1, 4(a0)
+; RV64-NEXT: lw a0, 0(a0)
+; RV64-NEXT: lui a2, 16
+; RV64-NEXT: addi a2, a2, -1
+; RV64-NEXT: and a1, a1, a2
+; RV64-NEXT: addw a0, a0, a1
+; RV64-NEXT: ret
+ %s = load {i32, i16}, ptr %p
+ %y1 = freeze {i32, i16} %s
+ %v1 = extractvalue {i32, i16} %y1, 0
+ %v2 = extractvalue {i32, i16} %y1, 1
+ %z2 = zext i16 %v2 to i32
+ %t1 = add i32 %v1, %z2
+ ret i32 %t1
+}
+
+define i32 @freeze_array(ptr %p) nounwind {
+; RV32-LABEL: freeze_array:
+; RV32: # %bb.0:
+; RV32-NEXT: lw a1, 0(a0)
+; RV32-NEXT: lw a0, 4(a0)
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_array:
+; RV64: # %bb.0:
+; RV64-NEXT: lw a1, 0(a0)
+; RV64-NEXT: lw a0, 4(a0)
+; RV64-NEXT: addw a0, a1, a0
+; RV64-NEXT: ret
+ %s = load [2 x i32], ptr %p
+ %y1 = freeze [2 x i32] %s
+ %v1 = extractvalue [2 x i32] %y1, 0
+ %v2 = extractvalue [2 x i32] %y1, 1
+ %t1 = add i32 %v1, %v2
+ ret i32 %t1
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv32.mir
new file mode 100644
index 000000000000..bbe8ef4b092d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv32.mir
@@ -0,0 +1,83 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: constbarrier_i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16368
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s32) = G_CONSTANT_FOLD_BARRIER [[C]]
+ ; CHECK-NEXT: $x10 = COPY [[CONSTANT_FOLD_BARRIER]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s32) = G_CONSTANT i32 16368
+ %2:_(s32) = G_CONSTANT_FOLD_BARRIER %1
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: constbarrier_i16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2048
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s32) = G_CONSTANT_FOLD_BARRIER [[C]]
+ ; CHECK-NEXT: $x10 = COPY [[CONSTANT_FOLD_BARRIER]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s16) = G_CONSTANT i16 2048
+ %2:_(s16) = G_CONSTANT_FOLD_BARRIER %1
+ %3:_(s32) = G_ANYEXT %2(s16)
+ $x10 = COPY %3(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: constbarrier_i128
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_i128
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2048
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s32) = G_CONSTANT_FOLD_BARRIER [[C]]
+ ; CHECK-NEXT: $x10 = COPY [[CONSTANT_FOLD_BARRIER]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s128) = G_CONSTANT i128 2048
+ %2:_(s128) = G_CONSTANT_FOLD_BARRIER %1
+ %3:_(s32) = G_TRUNC %2(s128)
+ $x10 = COPY %3(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: constbarrier_nxv2i1
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_nxv2i1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(<vscale x 2 x s1>) = G_CONSTANT_FOLD_BARRIER [[VMCLR_VL]]
+ ; CHECK-NEXT: $v8 = COPY [[CONSTANT_FOLD_BARRIER]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s1) = G_CONSTANT i1 0
+ %2:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %1(s1)
+ %3:_(<vscale x 2 x s1>) = G_CONSTANT_FOLD_BARRIER %2
+ $v8 = COPY %3(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: constbarrier_nxv2i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_nxv2i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(<vscale x 2 x s32>) = G_CONSTANT_FOLD_BARRIER [[SPLAT_VECTOR]]
+ ; CHECK-NEXT: $v8 = COPY [[CONSTANT_FOLD_BARRIER]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
+ %3:_(<vscale x 2 x s32>) = G_CONSTANT_FOLD_BARRIER %2
+ $v8 = COPY %3(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir
new file mode 100644
index 000000000000..96b1aa53d46e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir
@@ -0,0 +1,120 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: constbarrier_i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16368
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s32) = G_CONSTANT_FOLD_BARRIER [[C]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[CONSTANT_FOLD_BARRIER]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s32) = G_CONSTANT i32 16368
+ %2:_(s32) = G_CONSTANT_FOLD_BARRIER %1
+ %3:_(s64) = G_ANYEXT %2(s32)
+ $x10 = COPY %3(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: constbarrier_i64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_i64
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16368
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s64) = G_CONSTANT_FOLD_BARRIER [[C]]
+ ; CHECK-NEXT: $x10 = COPY [[CONSTANT_FOLD_BARRIER]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s64) = G_CONSTANT i64 16368
+ %2:_(s64) = G_CONSTANT_FOLD_BARRIER %1
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: constbarrier_i16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2048
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s32) = G_CONSTANT_FOLD_BARRIER [[C]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[CONSTANT_FOLD_BARRIER]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s16) = G_CONSTANT i16 2048
+ %2:_(s16) = G_CONSTANT_FOLD_BARRIER %1
+ %3:_(s64) = G_ANYEXT %2(s16)
+ $x10 = COPY %3(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: constbarrier_i128
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_i128
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2048
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s64) = G_CONSTANT_FOLD_BARRIER [[C]]
+ ; CHECK-NEXT: $x10 = COPY [[CONSTANT_FOLD_BARRIER]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s128) = G_CONSTANT i128 2048
+ %2:_(s128) = G_CONSTANT_FOLD_BARRIER %1
+ %3:_(s64) = G_TRUNC %2(s128)
+ $x10 = COPY %3(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: constbarrier_nxv2i1
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_nxv2i1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(<vscale x 2 x s1>) = G_CONSTANT_FOLD_BARRIER [[VMCLR_VL]]
+ ; CHECK-NEXT: $v8 = COPY [[CONSTANT_FOLD_BARRIER]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s1) = G_CONSTANT i1 0
+ %2:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %1(s1)
+ %3:_(<vscale x 2 x s1>) = G_CONSTANT_FOLD_BARRIER %2
+ $v8 = COPY %3(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: constbarrier_nxv2i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_nxv2i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(<vscale x 2 x s32>) = G_CONSTANT_FOLD_BARRIER [[SPLAT_VECTOR]]
+ ; CHECK-NEXT: $v8 = COPY [[CONSTANT_FOLD_BARRIER]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
+ %3:_(<vscale x 2 x s32>) = G_CONSTANT_FOLD_BARRIER %2
+ $v8 = COPY %3(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: constbarrier_nxv2i64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: constbarrier_nxv2i64
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(<vscale x 2 x s64>) = G_CONSTANT_FOLD_BARRIER [[SPLAT_VECTOR]]
+ ; CHECK-NEXT: $v8m2 = COPY [[CONSTANT_FOLD_BARRIER]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
+ %3:_(<vscale x 2 x s64>) = G_CONSTANT_FOLD_BARRIER %2(<vscale x 2 x s64>)
+ $v8m2 = COPY %3(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv32.mir
index 4177a40e3826..26d8785afb47 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv32.mir
@@ -555,3 +555,93 @@ body: |
PseudoRET implicit $x10, implicit $x11
...
+---
+name: udivrem_i32
+body: |
+ bb.1.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-I-LABEL: name: udivrem_i32
+ ; CHECK-I: liveins: $x10, $x11
+ ; CHECK-I-NEXT: {{ $}}
+ ; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umodsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]]
+ ; CHECK-I-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-I-NEXT: PseudoRET implicit $x10
+ ;
+ ; CHECK-M-LABEL: name: udivrem_i32
+ ; CHECK-M: liveins: $x10, $x11
+ ; CHECK-M-NEXT: {{ $}}
+ ; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-M-NEXT: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[COPY]], [[COPY1]]
+ ; CHECK-M-NEXT: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[COPY]], [[COPY1]]
+ ; CHECK-M-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UDIV]], [[UREM]]
+ ; CHECK-M-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-M-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = COPY $x10
+ %1:_(s32) = COPY $x11
+ %2:_(s32), %3:_(s32) = G_UDIVREM %0, %1
+ %4:_(s32) = G_ADD %2, %3
+ $x10 = COPY %4(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: sdivrem_i32
+body: |
+ bb.1.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-I-LABEL: name: sdivrem_i32
+ ; CHECK-I: liveins: $x10, $x11
+ ; CHECK-I-NEXT: {{ $}}
+ ; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__modsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]]
+ ; CHECK-I-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-I-NEXT: PseudoRET implicit $x10
+ ;
+ ; CHECK-M-LABEL: name: sdivrem_i32
+ ; CHECK-M: liveins: $x10, $x11
+ ; CHECK-M-NEXT: {{ $}}
+ ; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-M-NEXT: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[COPY]], [[COPY1]]
+ ; CHECK-M-NEXT: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[COPY]], [[COPY1]]
+ ; CHECK-M-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SDIV]], [[SREM]]
+ ; CHECK-M-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-M-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = COPY $x10
+ %1:_(s32) = COPY $x11
+ %2:_(s32), %3:_(s32) = G_SDIVREM %0, %1
+ %4:_(s32) = G_ADD %2, %3
+ $x10 = COPY %4(s32)
+ PseudoRET implicit $x10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv64.mir
index 492f9530997c..bbbe38f695d2 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv64.mir
@@ -655,3 +655,93 @@ body: |
PseudoRET implicit $x10, implicit $x11
...
+---
+name: udivrem_i64
+body: |
+ bb.1.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-I-LABEL: name: udivrem_i64
+ ; CHECK-I: liveins: $x10, $x11
+ ; CHECK-I-NEXT: {{ $}}
+ ; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]]
+ ; CHECK-I-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-I-NEXT: PseudoRET implicit $x10
+ ;
+ ; CHECK-M-LABEL: name: udivrem_i64
+ ; CHECK-M: liveins: $x10, $x11
+ ; CHECK-M-NEXT: {{ $}}
+ ; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-M-NEXT: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[COPY]], [[COPY1]]
+ ; CHECK-M-NEXT: [[UREM:%[0-9]+]]:_(s64) = G_UREM [[COPY]], [[COPY1]]
+ ; CHECK-M-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[UDIV]], [[UREM]]
+ ; CHECK-M-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-M-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s64), %3:_(s64) = G_UDIVREM %0, %1
+ %4:_(s64) = G_ADD %2, %3
+ $x10 = COPY %4(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: sdivrem_i64
+body: |
+ bb.1.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-I-LABEL: name: sdivrem_i64
+ ; CHECK-I: liveins: $x10, $x11
+ ; CHECK-I-NEXT: {{ $}}
+ ; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]]
+ ; CHECK-I-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-I-NEXT: PseudoRET implicit $x10
+ ;
+ ; CHECK-M-LABEL: name: sdivrem_i64
+ ; CHECK-M: liveins: $x10, $x11
+ ; CHECK-M-NEXT: {{ $}}
+ ; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-M-NEXT: [[SDIV:%[0-9]+]]:_(s64) = G_SDIV [[COPY]], [[COPY1]]
+ ; CHECK-M-NEXT: [[SREM:%[0-9]+]]:_(s64) = G_SREM [[COPY]], [[COPY1]]
+ ; CHECK-M-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[SDIV]], [[SREM]]
+ ; CHECK-M-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-M-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s64), %3:_(s64) = G_SDIVREM %0, %1
+ %4:_(s64) = G_ADD %2, %3
+ $x10 = COPY %4(s64)
+ PseudoRET implicit $x10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv32.mir
new file mode 100644
index 000000000000..4217910dc506
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv32.mir
@@ -0,0 +1,62 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv32 -mattr=+f,+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: freeze_i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $x10 = COPY [[FREEZE]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s32) = COPY $x10
+ %2:_(s32) = G_FREEZE %1
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: freeze_f32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_f32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $f10_f = COPY [[FREEZE]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $f10_f
+ %1:_(s32) = COPY $f10_f
+ %2:_(s32) = G_FREEZE %1
+ $f10_f = COPY %2(s32)
+ PseudoRET implicit $f10_f
+
+...
+---
+name: freeze_nxv2i1
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_nxv2i1
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<vscale x 2 x s1>) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $v8 = COPY [[FREEZE]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v8
+ %2:_(<vscale x 2 x s1>) = G_FREEZE %1
+ $v8 = COPY %2(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: freeze_nxv2i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_nxv2i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<vscale x 2 x s32>) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $v8 = COPY [[FREEZE]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ %2:_(<vscale x 2 x s32>) = G_FREEZE %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv64.mir
new file mode 100644
index 000000000000..355e22591588
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv64.mir
@@ -0,0 +1,96 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv64 -mattr=+f,+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: freeze_i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[TRUNC]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[FREEZE]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s64) = COPY $x10
+ %2:_(s32) = G_TRUNC %1(s64)
+ %3:_(s32) = G_FREEZE %2
+ %4:_(s64) = G_ANYEXT %3(s32)
+ $x10 = COPY %4(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: freeze_f32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_f32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $f10_f = COPY [[FREEZE]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $f10_f
+ %1:_(s32) = COPY $f10_f
+ %2:_(s32) = G_FREEZE %1
+ $f10_f = COPY %2(s32)
+ PseudoRET implicit $f10_f
+
+...
+---
+name: freeze_i64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $x10 = COPY [[FREEZE]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s64) = COPY $x10
+ %2:_(s64) = G_FREEZE %1
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: freeze_nxv2i1
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_nxv2i1
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<vscale x 2 x s1>) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $v8 = COPY [[FREEZE]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v8
+ %2:_(<vscale x 2 x s1>) = G_FREEZE %1
+ $v8 = COPY %2(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: freeze_nxv2i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_nxv2i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<vscale x 2 x s32>) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $v8 = COPY [[FREEZE]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ %2:_(<vscale x 2 x s32>) = G_FREEZE %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: freeze_nxv2i64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_nxv2i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<vscale x 2 x s64>) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $v8m2 = COPY [[FREEZE]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s64>) = COPY $v8
+ %2:_(<vscale x 2 x s64>) = G_FREEZE %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-frem-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-frem-rv32.mir
new file mode 100644
index 000000000000..adf3f450af7d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-frem-rv32.mir
@@ -0,0 +1,130 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv32 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck %s
+---
+name: frem_f32
+body: |
+ bb.1.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: frem_f32
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &fmodf, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = COPY $x10
+ %1:_(s32) = COPY $x11
+ %2:_(s32) = G_FREM %0, %1
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: frem_f64
+body: |
+ bb.1.entry:
+ liveins: $x10, $x11, $x12, $x13
+
+ ; CHECK-LABEL: name: frem_f64
+ ; CHECK: liveins: $x10, $x11, $x12, $x13
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: $x12 = COPY [[COPY2]](s32)
+ ; CHECK-NEXT: $x13 = COPY [[COPY3]](s32)
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &fmod, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: $x10 = COPY [[COPY4]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY5]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+ %2:_(s32) = COPY $x10
+ %3:_(s32) = COPY $x11
+ %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s32) = COPY $x12
+ %5:_(s32) = COPY $x13
+ %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+ %6:_(s64) = G_FREM %0, %1
+ %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+ $x10 = COPY %7(s32)
+ $x11 = COPY %8(s32)
+ PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name: frem_f16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: frem_f16
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: $x10 = COPY [[FPEXT]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[FPEXT1]](s32)
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &fmodf, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = COPY $x10
+ %1:_(s32) = COPY $x11
+ %2:_(s16) = G_TRUNC %0(s32)
+ %3:_(s16) = G_TRUNC %1(s32)
+ %4:_(s16) = G_FREM %2, %3
+ %5:_(s32) = G_ANYEXT %4(s16)
+ $x10 = COPY %5(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: frem_v2f32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: frem_v2f32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: $x10 = COPY [[UV]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[UV2]](s32)
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &fmodf, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: $x10 = COPY [[UV1]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[UV3]](s32)
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &fmodf, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<2 x s32>) = COPY $v8
+ %1:_(<2 x s32>) = COPY $v9
+ %2:_(<2 x s32>) = G_FREM %0, %1
+ $v8 = COPY %2(<2 x s32>)
+ PseudoRET implicit $v8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-frem-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-frem-rv64.mir
new file mode 100644
index 000000000000..5db66bbf8e52
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-frem-rv64.mir
@@ -0,0 +1,130 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv64 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck %s
+---
+name: frem_f32
+body: |
+ bb.1.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: frem_f32
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &fmodf, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %2:_(s64) = COPY $x10
+ %0:_(s32) = G_TRUNC %2(s64)
+ %3:_(s64) = COPY $x11
+ %1:_(s32) = G_TRUNC %3(s64)
+ %4:_(s32) = G_FREM %0, %1
+ %5:_(s64) = G_ANYEXT %4(s32)
+ $x10 = COPY %5(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: frem_f64
+body: |
+ bb.1.entry:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: frem_f64
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &fmod, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s64) = G_FREM %0, %1
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: frem_f16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: frem_f16
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
+ ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[FPEXT]](s32)
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[FPEXT1]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[ANYEXT1]](s64)
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &fmodf, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
+ ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[TRUNC2]](s32)
+ ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[FPTRUNC]](s16)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT2]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s16) = G_TRUNC %0(s64)
+ %3:_(s16) = G_TRUNC %1(s64)
+ %4:_(s16) = G_FREM %2, %3
+ %5:_(s64) = G_ANYEXT %4(s16)
+ $x10 = COPY %5(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: frem_v2f32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: frem_v2f32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $v9
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV]](s32)
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[ANYEXT1]](s64)
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &fmodf, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[UV1]](s32)
+ ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[UV3]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT2]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[ANYEXT3]](s64)
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &fmodf, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY3]](s64)
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[TRUNC]](s32), [[TRUNC1]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<2 x s32>) = COPY $v8
+ %1:_(<2 x s32>) = COPY $v9
+ %2:_(<2 x s32>) = G_FREM %0, %1
+ $v8 = COPY %2(<2 x s32>)
+ PseudoRET implicit $v8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-lshr-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-lshr-rv64.mir
index 8cbae0fa0173..43318118f09c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-lshr-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-lshr-rv64.mir
@@ -336,3 +336,29 @@ body: |
PseudoRET implicit $x10
...
+---
+name: lshr_i32_i48
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: lshr_i32_i48
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C]](s64)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LSHR]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s64) = COPY $x10
+ %0:_(s48) = G_TRUNC %1(s64)
+ %2:_(s48) = G_CONSTANT i48 16
+ %6:_(s32) = G_TRUNC %0(s48)
+ %7:_(s32) = G_LSHR %6, %2(s48)
+ %5:_(s64) = G_ANYEXT %7(s32)
+ $x10 = COPY %5(s64)
+ PseudoRET implicit $x10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv32.mir
new file mode 100644
index 000000000000..08aa92e0207b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv32.mir
@@ -0,0 +1,404 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=CHECK,RV32I
+# RUN: llc -mtriple=riscv32 -mattr=+zbb -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=CHECK,RV32ZBB
+
+---
+name: uaddsat_i32
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: uaddsat_i32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY1]]
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C]], [[COPY2]]
+ ; RV32I-NEXT: $x10 = COPY [[SELECT]](s32)
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV32ZBB-LABEL: name: uaddsat_i32
+ ; RV32ZBB: liveins: $x10, $x11
+ ; RV32ZBB-NEXT: {{ $}}
+ ; RV32ZBB-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32ZBB-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[C]]
+ ; RV32ZBB-NEXT: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[COPY1]]
+ ; RV32ZBB-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[UMIN]]
+ ; RV32ZBB-NEXT: $x10 = COPY [[ADD]](s32)
+ ; RV32ZBB-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = COPY $x10
+ %1:_(s32) = COPY $x11
+ %2:_(s32) = G_UADDSAT %0, %1(s32)
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: uaddsat_i64
+body: |
+ bb.1:
+ liveins: $x10, $x11
+ ; CHECK-LABEL: name: uaddsat_i64
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]]
+ ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY5]](s32), [[COPY3]]
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY5]](s32), [[COPY3]]
+ ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY4]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[ICMP3]], [[ICMP1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[SELECT]](s32), [[C]], [[COPY4]]
+ ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[SELECT]](s32), [[C1]], [[COPY5]]
+ ; CHECK-NEXT: $x10 = COPY [[SELECT1]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[SELECT2]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+ %2:_(s32) = COPY $x10
+ %3:_(s32) = COPY $x11
+ %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s32) = COPY $x12
+ %5:_(s32) = COPY $x13
+ %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+ %6:_(s64) = G_UADDSAT %0, %1(s64)
+ %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+ $x10 = COPY %7(s32)
+ $x11 = COPY %8(s32)
+ PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name: saddsat_i32
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: saddsat_i32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[ADD]](s32), [[COPY]]
+ ; RV32I-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY1]](s32), [[C]]
+ ; RV32I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ICMP1]], [[ICMP]]
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; RV32I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY2]], [[C1]](s32)
+ ; RV32I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; RV32I-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C2]]
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[XOR]](s32), [[ADD1]], [[COPY2]]
+ ; RV32I-NEXT: $x10 = COPY [[SELECT]](s32)
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV32ZBB-LABEL: name: saddsat_i32
+ ; RV32ZBB: liveins: $x10, $x11
+ ; RV32ZBB-NEXT: {{ $}}
+ ; RV32ZBB-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; RV32ZBB-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; RV32ZBB-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32ZBB-NEXT: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[COPY]], [[C2]]
+ ; RV32ZBB-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SMAX]]
+ ; RV32ZBB-NEXT: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[COPY]], [[C2]]
+ ; RV32ZBB-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMIN]]
+ ; RV32ZBB-NEXT: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[COPY1]]
+ ; RV32ZBB-NEXT: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; RV32ZBB-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[SMIN1]]
+ ; RV32ZBB-NEXT: $x10 = COPY [[ADD]](s32)
+ ; RV32ZBB-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = COPY $x10
+ %1:_(s32) = COPY $x11
+ %2:_(s32) = G_SADDSAT %0, %1(s32)
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: saddsat_i64
+body: |
+ bb.1:
+ liveins: $x10, $x11
+ ; CHECK-LABEL: name: saddsat_i64
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]]
+ ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY5]](s32), [[COPY1]]
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY5]](s32), [[COPY1]]
+ ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY4]](s32), [[COPY]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[ICMP3]], [[ICMP1]]
+ ; CHECK-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY3]](s32), [[C1]]
+ ; CHECK-NEXT: [[ICMP5:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY3]](s32), [[C1]]
+ ; CHECK-NEXT: [[ICMP6:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY2]](s32), [[C]]
+ ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s32), [[ICMP6]], [[ICMP4]]
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SELECT1]], [[SELECT]]
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY5]], [[C2]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[COPY5]], [[C3]](s32)
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C4]]
+ ; CHECK-NEXT: [[ICMP7:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[C4]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
+ ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ASHR1]], [[C5]]
+ ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[ICMP7]]
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ADD5]](s32)
+ ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[XOR]](s32), [[COPY6]], [[COPY4]]
+ ; CHECK-NEXT: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[XOR]](s32), [[COPY7]], [[COPY5]]
+ ; CHECK-NEXT: $x10 = COPY [[SELECT2]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[SELECT3]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+ %2:_(s32) = COPY $x10
+ %3:_(s32) = COPY $x11
+ %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s32) = COPY $x12
+ %5:_(s32) = COPY $x13
+ %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+ %6:_(s64) = G_SADDSAT %0, %1(s64)
+ %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+ $x10 = COPY %7(s32)
+ $x11 = COPY %8(s32)
+ PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name: usubsat_i32
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: usubsat_i32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY1]]
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C]], [[SUB]]
+ ; RV32I-NEXT: $x10 = COPY [[SELECT]](s32)
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV32ZBB-LABEL: name: usubsat_i32
+ ; RV32ZBB: liveins: $x10, $x11
+ ; RV32ZBB-NEXT: {{ $}}
+ ; RV32ZBB-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32ZBB-NEXT: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[COPY]], [[COPY1]]
+ ; RV32ZBB-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[UMIN]]
+ ; RV32ZBB-NEXT: $x10 = COPY [[SUB]](s32)
+ ; RV32ZBB-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = COPY $x10
+ %1:_(s32) = COPY $x11
+ %2:_(s32) = G_USUBSAT %0, %1(s32)
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: usubsat_i64
+body: |
+ bb.1:
+ liveins: $x10, $x11
+ ; CHECK-LABEL: name: usubsat_i64
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[COPY3]]
+ ; CHECK-NEXT: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[ICMP]]
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY1]](s32), [[COPY3]]
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY1]](s32), [[COPY3]]
+ ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[ICMP3]], [[ICMP1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[SELECT]](s32), [[C]], [[SUB]]
+ ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[SELECT]](s32), [[C1]], [[SUB2]]
+ ; CHECK-NEXT: $x10 = COPY [[SELECT1]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[SELECT2]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+ %2:_(s32) = COPY $x10
+ %3:_(s32) = COPY $x11
+ %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s32) = COPY $x12
+ %5:_(s32) = COPY $x13
+ %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+ %6:_(s64) = G_USUBSAT %0, %1(s64)
+ %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+ $x10 = COPY %7(s32)
+ $x11 = COPY %8(s32)
+ PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name: ssubsat_i32
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; RV32I-LABEL: name: ssubsat_i32
+ ; RV32I: liveins: $x10, $x11
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32I-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY1]]
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[SUB]](s32), [[COPY]]
+ ; RV32I-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY1]](s32), [[C]]
+ ; RV32I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ICMP1]], [[ICMP]]
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
+ ; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; RV32I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY2]], [[C1]](s32)
+ ; RV32I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C2]]
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[XOR]](s32), [[ADD]], [[COPY2]]
+ ; RV32I-NEXT: $x10 = COPY [[SELECT]](s32)
+ ; RV32I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV32ZBB-LABEL: name: ssubsat_i32
+ ; RV32ZBB: liveins: $x10, $x11
+ ; RV32ZBB-NEXT: {{ $}}
+ ; RV32ZBB-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; RV32ZBB-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; RV32ZBB-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32ZBB-NEXT: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[COPY]], [[C2]]
+ ; RV32ZBB-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C]]
+ ; RV32ZBB-NEXT: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[COPY]], [[C2]]
+ ; RV32ZBB-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C1]]
+ ; RV32ZBB-NEXT: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[COPY1]]
+ ; RV32ZBB-NEXT: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; RV32ZBB-NEXT: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[SMIN1]]
+ ; RV32ZBB-NEXT: $x10 = COPY [[SUB2]](s32)
+ ; RV32ZBB-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = COPY $x10
+ %1:_(s32) = COPY $x11
+ %2:_(s32) = G_SSUBSAT %0, %1(s32)
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: ssubsat_i64
+body: |
+ bb.1:
+ liveins: $x10, $x11
+ ; CHECK-LABEL: name: ssubsat_i64
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[COPY3]]
+ ; CHECK-NEXT: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[ICMP]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[SUB2]](s32), [[COPY1]]
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[SUB2]](s32), [[COPY1]]
+ ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[SUB]](s32), [[COPY]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[ICMP3]], [[ICMP1]]
+ ; CHECK-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY3]](s32), [[C1]]
+ ; CHECK-NEXT: [[ICMP5:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY3]](s32), [[C1]]
+ ; CHECK-NEXT: [[ICMP6:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[COPY2]](s32), [[C]]
+ ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s32), [[ICMP6]], [[ICMP4]]
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SELECT1]], [[SELECT]]
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SUB2]], [[C2]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SUB2]], [[C3]](s32)
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C4]]
+ ; CHECK-NEXT: [[ICMP7:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[C4]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ASHR1]], [[C5]]
+ ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP7]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[XOR]](s32), [[COPY4]], [[SUB]]
+ ; CHECK-NEXT: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[XOR]](s32), [[COPY5]], [[SUB2]]
+ ; CHECK-NEXT: $x10 = COPY [[SELECT2]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[SELECT3]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+ %2:_(s32) = COPY $x10
+ %3:_(s32) = COPY $x11
+ %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+ %4:_(s32) = COPY $x12
+ %5:_(s32) = COPY $x13
+ %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
+ %6:_(s64) = G_SSUBSAT %0, %1(s64)
+ %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64)
+ $x10 = COPY %7(s32)
+ $x11 = COPY %8(s32)
+ PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name: uaddsat_i8
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: uaddsat_i8
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[AND]](s32), [[AND1]]
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C2]], [[ADD]]
+ ; CHECK-NEXT: $x10 = COPY [[SELECT]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %2:_(s32) = COPY $x10
+ %0:_(s8) = G_TRUNC %2(s32)
+ %3:_(s32) = COPY $x11
+ %1:_(s8) = G_TRUNC %3(s32)
+ %4:_(s8) = G_UADDSAT %0, %1(s8)
+ %5:_(s32) = G_ANYEXT %4(s8)
+ $x10 = COPY %5(s32)
+ PseudoRET implicit $x10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
new file mode 100644
index 000000000000..5eaf8b37fe98
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
@@ -0,0 +1,358 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=CHECK,RV64I
+# RUN: llc -mtriple=riscv64 -mattr=+zbb -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefixes=CHECK,RV64ZBB
+
+---
+name: uaddsat_i32
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: uaddsat_i32
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ADD]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ZEXT]](s64), [[AND]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[C1]], [[COPY2]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %2:_(s64) = COPY $x10
+ %0:_(s32) = G_TRUNC %2(s64)
+ %3:_(s64) = COPY $x11
+ %1:_(s32) = G_TRUNC %3(s64)
+ %4:_(s32) = G_UADDSAT %0, %1(s32)
+ %5:_(s64) = G_ANYEXT %4(s32)
+ $x10 = COPY %5(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: uaddsat_i64
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: uaddsat_i64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[COPY1]]
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s64), [[C]], [[COPY2]]
+ ; RV64I-NEXT: $x10 = COPY [[SELECT]](s64)
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64ZBB-LABEL: name: uaddsat_i64
+ ; RV64ZBB: liveins: $x10, $x11
+ ; RV64ZBB-NEXT: {{ $}}
+ ; RV64ZBB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64ZBB-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[C]]
+ ; RV64ZBB-NEXT: [[UMIN:%[0-9]+]]:_(s64) = G_UMIN [[XOR]], [[COPY1]]
+ ; RV64ZBB-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[UMIN]]
+ ; RV64ZBB-NEXT: $x10 = COPY [[ADD]](s64)
+ ; RV64ZBB-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s64) = G_UADDSAT %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: saddsat_i32
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: saddsat_i32
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+ ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[SEXT_INREG]], [[SEXT_INREG1]]
+ ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[ADD]](s64), [[SEXT_INREG2]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ADD]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[TRUNC]], [[C]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C1]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[ADD1]], [[TRUNC]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %2:_(s64) = COPY $x10
+ %0:_(s32) = G_TRUNC %2(s64)
+ %3:_(s64) = COPY $x11
+ %1:_(s32) = G_TRUNC %3(s64)
+ %4:_(s32) = G_SADDSAT %0, %1(s32)
+ %5:_(s64) = G_ANYEXT %4(s32)
+ $x10 = COPY %5(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: saddsat_i64
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: saddsat_i64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(slt), [[ADD]](s64), [[COPY]]
+ ; RV64I-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
+ ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
+ ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
+ ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+ ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s64)
+ ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C2]]
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32)
+ ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C3]]
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s64), [[ADD1]], [[COPY2]]
+ ; RV64I-NEXT: $x10 = COPY [[SELECT]](s64)
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64ZBB-LABEL: name: saddsat_i64
+ ; RV64ZBB: liveins: $x10, $x11
+ ; RV64ZBB-NEXT: {{ $}}
+ ; RV64ZBB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
+ ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; RV64ZBB-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64ZBB-NEXT: [[SMAX:%[0-9]+]]:_(s64) = G_SMAX [[COPY]], [[C2]]
+ ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[SMAX]]
+ ; RV64ZBB-NEXT: [[SMIN:%[0-9]+]]:_(s64) = G_SMIN [[COPY]], [[C2]]
+ ; RV64ZBB-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C1]], [[SMIN]]
+ ; RV64ZBB-NEXT: [[SMAX1:%[0-9]+]]:_(s64) = G_SMAX [[SUB1]], [[COPY1]]
+ ; RV64ZBB-NEXT: [[SMIN1:%[0-9]+]]:_(s64) = G_SMIN [[SMAX1]], [[SUB]]
+ ; RV64ZBB-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[SMIN1]]
+ ; RV64ZBB-NEXT: $x10 = COPY [[ADD]](s64)
+ ; RV64ZBB-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s64) = G_SADDSAT %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: usubsat_i32
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: usubsat_i32
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND]](s64), [[AND1]]
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[C2]], [[SUB]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %2:_(s64) = COPY $x10
+ %0:_(s32) = G_TRUNC %2(s64)
+ %3:_(s64) = COPY $x11
+ %1:_(s32) = G_TRUNC %3(s64)
+ %4:_(s32) = G_USUBSAT %0, %1(s32)
+ %5:_(s64) = G_ANYEXT %4(s32)
+ $x10 = COPY %5(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: usubsat_i64
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: usubsat_i64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY1]]
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s64), [[C]], [[SUB]]
+ ; RV64I-NEXT: $x10 = COPY [[SELECT]](s64)
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64ZBB-LABEL: name: usubsat_i64
+ ; RV64ZBB: liveins: $x10, $x11
+ ; RV64ZBB-NEXT: {{ $}}
+ ; RV64ZBB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64ZBB-NEXT: [[UMIN:%[0-9]+]]:_(s64) = G_UMIN [[COPY]], [[COPY1]]
+ ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[UMIN]]
+ ; RV64ZBB-NEXT: $x10 = COPY [[SUB]](s64)
+ ; RV64ZBB-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s64) = G_USUBSAT %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: ssubsat_i32
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: ssubsat_i32
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+ ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[SEXT_INREG]], [[SEXT_INREG1]]
+ ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[SUB]](s64), [[SEXT_INREG2]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[SUB]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[TRUNC]], [[C]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C1]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[ADD]], [[TRUNC]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %2:_(s64) = COPY $x10
+ %0:_(s32) = G_TRUNC %2(s64)
+ %3:_(s64) = COPY $x11
+ %1:_(s32) = G_TRUNC %3(s64)
+ %4:_(s32) = G_SSUBSAT %0, %1(s32)
+ %5:_(s64) = G_ANYEXT %4(s32)
+ $x10 = COPY %5(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: ssubsat_i64
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; RV64I-LABEL: name: ssubsat_i64
+ ; RV64I: liveins: $x10, $x11
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]]
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(slt), [[SUB]](s64), [[COPY]]
+ ; RV64I-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
+ ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
+ ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[SUB]](s64)
+ ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+ ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s64)
+ ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C2]]
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32)
+ ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C3]]
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s64), [[ADD]], [[COPY2]]
+ ; RV64I-NEXT: $x10 = COPY [[SELECT]](s64)
+ ; RV64I-NEXT: PseudoRET implicit $x10
+ ;
+ ; RV64ZBB-LABEL: name: ssubsat_i64
+ ; RV64ZBB: liveins: $x10, $x11
+ ; RV64ZBB-NEXT: {{ $}}
+ ; RV64ZBB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
+ ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; RV64ZBB-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64ZBB-NEXT: [[SMAX:%[0-9]+]]:_(s64) = G_SMAX [[COPY]], [[C2]]
+ ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[SMAX]], [[C]]
+ ; RV64ZBB-NEXT: [[SMIN:%[0-9]+]]:_(s64) = G_SMIN [[COPY]], [[C2]]
+ ; RV64ZBB-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[SMIN]], [[C1]]
+ ; RV64ZBB-NEXT: [[SMAX1:%[0-9]+]]:_(s64) = G_SMAX [[SUB]], [[COPY1]]
+ ; RV64ZBB-NEXT: [[SMIN1:%[0-9]+]]:_(s64) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; RV64ZBB-NEXT: [[SUB2:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[SMIN1]]
+ ; RV64ZBB-NEXT: $x10 = COPY [[SUB2]](s64)
+ ; RV64ZBB-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = COPY $x10
+ %1:_(s64) = COPY $x11
+ %2:_(s64) = G_SSUBSAT %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: uaddsat_i8
+body: |
+ bb.1:
+ liveins: $x10, $x11
+
+ ; CHECK-LABEL: name: uaddsat_i8
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND]](s64), [[AND1]]
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[C2]], [[ADD]]
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT1]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %2:_(s64) = COPY $x10
+ %0:_(s8) = G_TRUNC %2(s64)
+ %3:_(s64) = COPY $x11
+ %1:_(s8) = G_TRUNC %3(s64)
+ %4:_(s8) = G_UADDSAT %0, %1(s8)
+ %5:_(s64) = G_ANYEXT %4(s8)
+ $x10 = COPY %5(s64)
+ PseudoRET implicit $x10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/libcalls.ll b/llvm/test/CodeGen/RISCV/GlobalISel/libcalls.ll
new file mode 100644
index 000000000000..aaef8d98c812
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/libcalls.ll
@@ -0,0 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -global-isel -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefix=RV32
+; RUN: llc -mtriple=riscv64 -global-isel -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefix=RV64
+
+define float @test_f32(float %x, float %y) nounwind {
+; RV32-LABEL: test_f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: call fmodf
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: test_f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: call fmodf
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+entry:
+ %z = frem float %x, %y
+ ret float %z
+}
+
+define double @test_f64(double %x, double %y) nounwind {
+; RV32-LABEL: test_f64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: call fmod
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: test_f64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: call fmod
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+entry:
+ %z = frem double %x, %y
+ ret double %z
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/shift.ll b/llvm/test/CodeGen/RISCV/GlobalISel/shift.ll
new file mode 100644
index 000000000000..b75cbf8e871a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/shift.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -global-isel -global-isel-abort=1 -verify-machineinstrs < %s 2>&1 | FileCheck %s --check-prefixes=RV32
+; RUN: llc -mtriple=riscv64 -global-isel -global-isel-abort=1 -verify-machineinstrs < %s 2>&1 | FileCheck %s --check-prefixes=RV64
+
+define i16 @test_lshr_i48(i48 %x) {
+; RV32-LABEL: test_lshr_i48:
+; RV32: # %bb.0:
+; RV32-NEXT: srli a0, a0, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: test_lshr_i48:
+; RV64: # %bb.0:
+; RV64-NEXT: srliw a0, a0, 16
+; RV64-NEXT: ret
+ %lshr = lshr i48 %x, 16
+ %trunc = trunc i48 %lshr to i16
+ ret i16 %trunc
+}
+
+define i16 @test_ashr_i48(i48 %x) {
+; RV32-LABEL: test_ashr_i48:
+; RV32: # %bb.0:
+; RV32-NEXT: srai a0, a0, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: test_ashr_i48:
+; RV64: # %bb.0:
+; RV64-NEXT: sraiw a0, a0, 16
+; RV64-NEXT: ret
+ %ashr = ashr i48 %x, 16
+ %trunc = trunc i48 %ashr to i16
+ ret i16 %trunc
+}
+
+define i16 @test_shl_i48(i48 %x) {
+; RV32-LABEL: test_shl_i48:
+; RV32: # %bb.0:
+; RV32-NEXT: slli a0, a0, 8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: test_shl_i48:
+; RV64: # %bb.0:
+; RV64-NEXT: slliw a0, a0, 8
+; RV64-NEXT: ret
+ %shl = shl i48 %x, 8
+ %trunc = trunc i48 %shl to i16
+ ret i16 %trunc
+}
diff --git a/llvm/test/CodeGen/RISCV/O0-pipeline.ll b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
index 3aaa5dc03a7d..e4abc93d1a8a 100644
--- a/llvm/test/CodeGen/RISCV/O0-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
@@ -44,14 +44,12 @@
; CHECK-NEXT: RISC-V Insert Write VXRM Pass
; CHECK-NEXT: Init Undef Pass
; CHECK-NEXT: Eliminate PHI nodes for register allocation
-; CHECK-NEXT: MachineDominator Tree Construction
-; CHECK-NEXT: Slot index numbering
-; CHECK-NEXT: Live Interval Analysis
-; CHECK-NEXT: RISC-V Insert VSETVLI pass
; CHECK-NEXT: Two-Address instruction pass
; CHECK-NEXT: Fast Register Allocator
+; CHECK-NEXT: MachineDominator Tree Construction
; CHECK-NEXT: Slot index numbering
; CHECK-NEXT: Live Interval Analysis
+; CHECK-NEXT: RISC-V Insert VSETVLI pass
; CHECK-NEXT: RISC-V Coalesce VSETVLI pass
; CHECK-NEXT: Fast Register Allocator
; CHECK-NEXT: Remove Redundant DEBUG_VALUE analysis
diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index 52634b2a8162..0528b00d408b 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -128,7 +128,6 @@
; CHECK-NEXT: Slot index numbering
; CHECK-NEXT: Live Interval Analysis
; CHECK-NEXT: Register Coalescer
-; CHECK-NEXT: RISC-V Insert VSETVLI pass
; CHECK-NEXT: Rename Disconnected Subregister Components
; CHECK-NEXT: Machine Instruction Scheduler
; CHECK-NEXT: Machine Block Frequency Analysis
@@ -142,6 +141,7 @@
; CHECK-NEXT: Machine Optimization Remark Emitter
; CHECK-NEXT: Greedy Register Allocator
; CHECK-NEXT: Virtual Register Rewriter
+; CHECK-NEXT: RISC-V Insert VSETVLI pass
; CHECK-NEXT: RISC-V Coalesce VSETVLI pass
; CHECK-NEXT: RISC-V Dead register definitions
; CHECK-NEXT: Virtual Register Map
diff --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index 953ed5ee3795..c90bb031e082 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -75,6 +75,8 @@
; RUN: llc -mtriple=riscv32 -mattr=+xtheadmemidx %s -o - | FileCheck --check-prefix=RV32XTHEADMEMIDX %s
; RUN: llc -mtriple=riscv32 -mattr=+xtheadmempair %s -o - | FileCheck --check-prefix=RV32XTHEADMEMPAIR %s
; RUN: llc -mtriple=riscv32 -mattr=+xtheadsync %s -o - | FileCheck --check-prefix=RV32XTHEADSYNC %s
+; RUN: llc -mtriple=riscv32 -mattr=+zaamo %s -o - | FileCheck --check-prefix=RV32ZAAMO %s
+; RUN: llc -mtriple=riscv32 -mattr=+zalrsc %s -o - | FileCheck --check-prefix=RV32ZALRSC %s
; RUN: llc -mtriple=riscv32 -mattr=+zca %s -o - | FileCheck --check-prefixes=CHECK,RV32ZCA %s
; RUN: llc -mtriple=riscv32 -mattr=+zcb %s -o - | FileCheck --check-prefixes=CHECK,RV32ZCB %s
; RUN: llc -mtriple=riscv32 -mattr=+zcd %s -o - | FileCheck --check-prefixes=CHECK,RV32ZCD %s
@@ -112,10 +114,8 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV32ZFBFMIN %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zvfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV32ZVFBFMIN %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zvfbfwma %s -o - | FileCheck --check-prefixes=CHECK,RV32ZVFBFWMA %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zaamo %s -o - | FileCheck --check-prefix=RV32ZAAMO %s
; RUN: llc -mtriple=riscv32 -mattr=+a,zacas %s -o - | FileCheck --check-prefix=RV32ZACAS %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zalasr %s -o - | FileCheck --check-prefix=RV32ZALASR %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zalrsc %s -o - | FileCheck --check-prefix=RV32ZALRSC %s
; RUN: llc -mtriple=riscv32 -mattr=+zama16b %s -o - | FileCheck --check-prefixes=CHECK,RV32ZAMA16B %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicfilp %s -o - | FileCheck --check-prefix=RV32ZICFILP %s
; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zabha %s -o - | FileCheck --check-prefix=RV32ZABHA %s
@@ -205,6 +205,8 @@
; RUN: llc -mtriple=riscv64 -mattr=+zama16b %s -o - | FileCheck --check-prefixes=CHECK,RV64ZAMA16B %s
; RUN: llc -mtriple=riscv64 -mattr=+zawrs %s -o - | FileCheck --check-prefixes=CHECK,RV64ZAWRS %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-ztso %s -o - | FileCheck --check-prefixes=CHECK,RV64ZTSO %s
+; RUN: llc -mtriple=riscv64 -mattr=+zaamo %s -o - | FileCheck --check-prefix=RV64ZAAMO %s
+; RUN: llc -mtriple=riscv64 -mattr=+zalrsc %s -o - | FileCheck --check-prefix=RV64ZALRSC %s
; RUN: llc -mtriple=riscv64 -mattr=+zca %s -o - | FileCheck --check-prefixes=CHECK,RV64ZCA %s
; RUN: llc -mtriple=riscv64 -mattr=+zcb %s -o - | FileCheck --check-prefixes=CHECK,RV64ZCB %s
; RUN: llc -mtriple=riscv64 -mattr=+zcd %s -o - | FileCheck --check-prefixes=CHECK,RV64ZCD %s
@@ -245,10 +247,8 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV64ZFBFMIN %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zvfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV64ZVFBFMIN %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zvfbfwma %s -o - | FileCheck --check-prefixes=CHECK,RV64ZVFBFWMA %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zaamo %s -o - | FileCheck --check-prefix=RV64ZAAMO %s
; RUN: llc -mtriple=riscv64 -mattr=+a,zacas %s -o - | FileCheck --check-prefix=RV64ZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zalasr %s -o - | FileCheck --check-prefix=RV64ZALASR %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zalrsc %s -o - | FileCheck --check-prefix=RV64ZALRSC %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicfilp %s -o - | FileCheck --check-prefix=RV64ZICFILP %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zabha %s -o - | FileCheck --check-prefix=RV64ZABHA %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-ssnpm %s -o - | FileCheck --check-prefix=RV64SSNPM %s
@@ -347,6 +347,8 @@
; RV32XTHEADMEMIDX: .attribute 5, "rv32i2p1_xtheadmemidx1p0"
; RV32XTHEADMEMPAIR: .attribute 5, "rv32i2p1_xtheadmempair1p0"
; RV32XTHEADSYNC: .attribute 5, "rv32i2p1_xtheadsync1p0"
+; RV32ZAAMO: .attribute 5, "rv32i2p1_zaamo1p0"
+; RV32ZALRSC: .attribute 5, "rv32i2p1_zalrsc1p0"
; RV32ZCA: .attribute 5, "rv32i2p1_zca1p0"
; RV32ZCB: .attribute 5, "rv32i2p1_zca1p0_zcb1p0"
; RV32ZCD: .attribute 5, "rv32i2p1_f2p2_d2p2_zicsr2p0_zca1p0_zcd1p0"
@@ -384,12 +386,10 @@
; RV32ZFBFMIN: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zfbfmin1p0"
; RV32ZVFBFMIN: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zve32f1p0_zve32x1p0_zvfbfmin1p0_zvl32b1p0"
; RV32ZVFBFWMA: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zfbfmin1p0_zve32f1p0_zve32x1p0_zvfbfmin1p0_zvfbfwma1p0_zvl32b1p0"
-; RV32ZAAMO: .attribute 5, "rv32i2p1_zaamo0p2"
; RV32ZACAS: .attribute 5, "rv32i2p1_a2p1_zacas1p0"
; RV32ZALASR: .attribute 5, "rv32i2p1_zalasr0p1"
-; RV32ZALRSC: .attribute 5, "rv32i2p1_zalrsc0p2"
; RV32ZAMA16B: .attribute 5, "rv32i2p1_zama16b1p0"
-; RV32ZICFILP: .attribute 5, "rv32i2p1_zicfilp0p4"
+; RV32ZICFILP: .attribute 5, "rv32i2p1_zicfilp0p4_zicsr2p0"
; RV32ZABHA: .attribute 5, "rv32i2p1_a2p1_zabha1p0"
; RV32SSNPM: .attribute 5, "rv32i2p1_ssnpm0p8"
; RV32SMNPM: .attribute 5, "rv32i2p1_smnpm0p8"
@@ -476,6 +476,8 @@
; RV64XTHEADSYNC: .attribute 5, "rv64i2p1_xtheadsync1p0"
; RV64XTHEADVDOT: .attribute 5, "rv64i2p1_f2p2_d2p2_v1p0_zicsr2p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0_xtheadvdot1p0"
; RV64ZTSO: .attribute 5, "rv64i2p1_ztso0p1"
+; RV64ZAAMO: .attribute 5, "rv64i2p1_zaamo1p0"
+; RV64ZALRSC: .attribute 5, "rv64i2p1_zalrsc1p0"
; RV64ZCA: .attribute 5, "rv64i2p1_zca1p0"
; RV64ZCB: .attribute 5, "rv64i2p1_zca1p0_zcb1p0"
; RV64ZCD: .attribute 5, "rv64i2p1_f2p2_d2p2_zicsr2p0_zca1p0_zcd1p0"
@@ -516,11 +518,9 @@
; RV64ZFBFMIN: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zfbfmin1p0"
; RV64ZVFBFMIN: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zve32f1p0_zve32x1p0_zvfbfmin1p0_zvl32b1p0"
; RV64ZVFBFWMA: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zfbfmin1p0_zve32f1p0_zve32x1p0_zvfbfmin1p0_zvfbfwma1p0_zvl32b1p0"
-; RV64ZAAMO: .attribute 5, "rv64i2p1_zaamo0p2"
; RV64ZACAS: .attribute 5, "rv64i2p1_a2p1_zacas1p0"
; RV64ZALASR: .attribute 5, "rv64i2p1_zalasr0p1"
-; RV64ZALRSC: .attribute 5, "rv64i2p1_zalrsc0p2"
-; RV64ZICFILP: .attribute 5, "rv64i2p1_zicfilp0p4"
+; RV64ZICFILP: .attribute 5, "rv64i2p1_zicfilp0p4_zicsr2p0"
; RV64ZABHA: .attribute 5, "rv64i2p1_a2p1_zabha1p0"
; RV64SSNPM: .attribute 5, "rv64i2p1_ssnpm0p8"
; RV64SMNPM: .attribute 5, "rv64i2p1_smnpm0p8"
diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
index 83a4f63add33..eb6ac985287a 100644
--- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
@@ -24,36 +24,36 @@ define void @_Z3foov() {
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_49)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_49)
; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a0)
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_48)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_48)
-; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_46)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_46)
-; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vle16.v v12, (a0)
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_45)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_45)
-; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: vle16.v v14, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: vs2r.v v14, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vs2r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_40)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_40)
+; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_44)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_44)
@@ -71,12 +71,12 @@ define void @_Z3foov() {
; CHECK-NEXT: lui a0, 1048572
; CHECK-NEXT: addi a0, a0, 928
; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu
; CHECK-NEXT: vsext.vf2 v10, v8, v0.t
; CHECK-NEXT: lui a0, %hi(var_47)
; CHECK-NEXT: addi a0, a0, %lo(var_47)
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
index ea8feef33298..6009a6c7e138 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
@@ -75,18 +75,18 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
; RV32-NEXT: sw a0, 16(sp)
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a2), zero
-; RV32-NEXT: vid.v v8
+; RV32-NEXT: vlse64.v v8, (a2), zero
+; RV32-NEXT: vid.v v16
; RV32-NEXT: li a2, -1
-; RV32-NEXT: vmadd.vx v8, a2, v16
-; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT: vmadd.vx v16, a2, v8
; RV32-NEXT: addi a2, sp, 32
-; RV32-NEXT: vl2r.v v16, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vmsne.vi v0, v16, 0
+; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT: vmsne.vi v0, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vmv.v.i v16, 0
-; RV32-NEXT: vmerge.vim v16, v16, -1, v0
-; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmv.v.i v8, 0
+; RV32-NEXT: vmerge.vim v8, v8, -1, v0
+; RV32-NEXT: vand.vv v8, v16, v8
; RV32-NEXT: vredmaxu.vs v8, v8, v8
; RV32-NEXT: vmv.x.s a2, v8
; RV32-NEXT: sltu a3, a0, a2
@@ -108,15 +108,15 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
; RV64: # %bb.0:
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vmv.v.x v24, a0
-; RV64-NEXT: vid.v v16
+; RV64-NEXT: vmv.v.x v16, a0
+; RV64-NEXT: vid.v v24
; RV64-NEXT: li a1, -1
-; RV64-NEXT: vmadd.vx v16, a1, v24
+; RV64-NEXT: vmadd.vx v24, a1, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV64-NEXT: vmsne.vi v0, v8, 0
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
-; RV64-NEXT: vmerge.vvm v8, v8, v16, v0
+; RV64-NEXT: vmerge.vvm v8, v8, v24, v0
; RV64-NEXT: vredmaxu.vs v8, v8, v8
; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: sub a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/loop-strength-reduce-loop-invar.ll b/llvm/test/CodeGen/RISCV/loop-strength-reduce-loop-invar.ll
index 8b22046cb624..8693283e8371 100644
--- a/llvm/test/CodeGen/RISCV/loop-strength-reduce-loop-invar.ll
+++ b/llvm/test/CodeGen/RISCV/loop-strength-reduce-loop-invar.ll
@@ -53,26 +53,24 @@ define void @test(i32 signext %row, i32 signext %N.in) nounwind {
; RV64: # %bb.0: # %entry
; RV64-NEXT: blez a1, .LBB0_3
; RV64-NEXT: # %bb.1: # %cond_true.preheader
-; RV64-NEXT: negw a1, a1
; RV64-NEXT: slli a0, a0, 6
; RV64-NEXT: lui a2, %hi(A)
; RV64-NEXT: addi a2, a2, %lo(A)
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: addi a2, a0, 4
+; RV64-NEXT: addiw a1, a1, 2
; RV64-NEXT: li a3, 2
; RV64-NEXT: li a4, 4
; RV64-NEXT: li a5, 5
-; RV64-NEXT: li a6, 2
; RV64-NEXT: .LBB0_2: # %cond_true
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NEXT: sw a4, 0(a2)
-; RV64-NEXT: slli a7, a6, 2
-; RV64-NEXT: add a7, a0, a7
-; RV64-NEXT: sw a5, 0(a7)
-; RV64-NEXT: addiw a6, a6, 1
-; RV64-NEXT: addw a7, a1, a6
+; RV64-NEXT: slli a6, a3, 2
+; RV64-NEXT: add a6, a0, a6
+; RV64-NEXT: sw a5, 0(a6)
+; RV64-NEXT: addiw a3, a3, 1
; RV64-NEXT: addi a2, a2, 4
-; RV64-NEXT: bne a7, a3, .LBB0_2
+; RV64-NEXT: bne a3, a1, .LBB0_2
; RV64-NEXT: .LBB0_3: # %return
; RV64-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index 364e8c7b38da..42ea425f99c0 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -1843,3 +1843,152 @@ define i8 @mulsub_demand_2(i8 %x, i8 %y) nounwind {
%r = or i8 %a, 240
ret i8 %r
}
+
+define i64 @muland_demand(i64 %x) nounwind {
+; RV32I-LABEL: muland_demand:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: andi a0, a0, -8
+; RV32I-NEXT: slli a1, a1, 2
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: li a2, 12
+; RV32I-NEXT: li a3, 0
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IM-LABEL: muland_demand:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: andi a0, a0, -8
+; RV32IM-NEXT: li a2, 12
+; RV32IM-NEXT: mul a1, a1, a2
+; RV32IM-NEXT: mulhu a3, a0, a2
+; RV32IM-NEXT: add a1, a3, a1
+; RV32IM-NEXT: mul a0, a0, a2
+; RV32IM-NEXT: ret
+;
+; RV64I-LABEL: muland_demand:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a1, -29
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: li a1, 12
+; RV64I-NEXT: tail __muldi3
+;
+; RV64IM-LABEL: muland_demand:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: andi a0, a0, -8
+; RV64IM-NEXT: li a1, 12
+; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: ret
+ %and = and i64 %x, 4611686018427387896
+ %mul = mul i64 %and, 12
+ ret i64 %mul
+}
+
+define i64 @mulzext_demand(i32 signext %x) nounwind {
+; RV32I-LABEL: mulzext_demand:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: li a3, 3
+; RV32I-NEXT: li a2, 0
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV32IM-LABEL: mulzext_demand:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: slli a1, a0, 1
+; RV32IM-NEXT: add a1, a1, a0
+; RV32IM-NEXT: li a0, 0
+; RV32IM-NEXT: ret
+;
+; RV64I-LABEL: mulzext_demand:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a1, 3
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: tail __muldi3
+;
+; RV64IM-LABEL: mulzext_demand:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: li a1, 3
+; RV64IM-NEXT: slli a1, a1, 32
+; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: ret
+ %ext = zext i32 %x to i64
+ %mul = mul i64 %ext, 12884901888
+ ret i64 %mul
+}
+
+define i32 @mulfshl_demand(i32 signext %x) nounwind {
+; RV32I-LABEL: mulfshl_demand:
+; RV32I: # %bb.0:
+; RV32I-NEXT: srli a0, a0, 11
+; RV32I-NEXT: lui a1, 92808
+; RV32I-NEXT: tail __mulsi3
+;
+; RV32IM-LABEL: mulfshl_demand:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: srli a0, a0, 11
+; RV32IM-NEXT: lui a1, 92808
+; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: ret
+;
+; RV64I-LABEL: mulfshl_demand:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: srliw a0, a0, 11
+; RV64I-NEXT: lui a1, 92808
+; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IM-LABEL: mulfshl_demand:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: srliw a0, a0, 11
+; RV64IM-NEXT: lui a1, 92808
+; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: ret
+ %fshl = tail call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 21)
+ %mul = mul i32 %fshl, 380141568
+ ret i32 %mul
+}
+
+define i32 @mulor_demand(i32 signext %x, i32 signext %y) nounwind {
+; RV32I-LABEL: mulor_demand:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a1, 92808
+; RV32I-NEXT: tail __mulsi3
+;
+; RV32IM-LABEL: mulor_demand:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: lui a1, 92808
+; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: ret
+;
+; RV64I-LABEL: mulor_demand:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: lui a1, 92808
+; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: ret
+;
+; RV64IM-LABEL: mulor_demand:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: lui a1, 92808
+; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: ret
+ %mul1 = mul i32 %y, 10485760
+ %or = or disjoint i32 %mul1, %x
+ %mul2 = mul i32 %or, 380141568
+ ret i32 %mul2
+}
diff --git a/llvm/test/CodeGen/RISCV/pr69586.ll b/llvm/test/CodeGen/RISCV/pr69586.ll
index 15daf2c57790..7084c04805be 100644
--- a/llvm/test/CodeGen/RISCV/pr69586.ll
+++ b/llvm/test/CodeGen/RISCV/pr69586.ll
@@ -927,258 +927,258 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v10, v12
-; REMAT-NEXT: vle32.v v12, (a2)
+; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: li a2, 11
; REMAT-NEXT: slli a2, a2, 10
; REMAT-NEXT: add a2, a0, a2
-; REMAT-NEXT: vle32.v v16, (a2)
+; REMAT-NEXT: vle32.v v26, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v8, v14
-; REMAT-NEXT: vle32.v v10, (a2)
+; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: li a2, 23
; REMAT-NEXT: slli a2, a2, 9
; REMAT-NEXT: add a2, a0, a2
-; REMAT-NEXT: vle32.v v26, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v12, v16
-; REMAT-NEXT: vle32.v v12, (a2)
-; REMAT-NEXT: lui a2, 3
-; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v28, (a2)
; REMAT-NEXT: vle32.v v14, (a2)
-; REMAT-NEXT: li a2, 25
-; REMAT-NEXT: slli a2, a2, 9
+; REMAT-NEXT: lui a2, 3
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
-; REMAT-NEXT: li a2, 13
-; REMAT-NEXT: slli a2, a2, 10
+; REMAT-NEXT: li a2, 25
+; REMAT-NEXT: slli a2, a2, 9
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
-; REMAT-NEXT: li a2, 27
-; REMAT-NEXT: slli a2, a2, 9
+; REMAT-NEXT: li a2, 13
+; REMAT-NEXT: slli a2, a2, 10
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v20, (a2)
-; REMAT-NEXT: li a2, 7
-; REMAT-NEXT: slli a2, a2, 11
+; REMAT-NEXT: li a2, 27
+; REMAT-NEXT: slli a2, a2, 9
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v22, (a2)
-; REMAT-NEXT: li a2, 29
-; REMAT-NEXT: slli a2, a2, 9
+; REMAT-NEXT: li a2, 7
+; REMAT-NEXT: slli a2, a2, 11
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v24, (a2)
; REMAT-NEXT: vle32.v v8, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v10, v26
-; REMAT-NEXT: li a2, 15
-; REMAT-NEXT: slli a2, a2, 10
+; REMAT-NEXT: li a2, 29
+; REMAT-NEXT: slli a2, a2, 9
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v26, (a2)
; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v12, v28
-; REMAT-NEXT: li a2, 31
-; REMAT-NEXT: slli a2, a2, 9
+; REMAT-NEXT: li a2, 15
+; REMAT-NEXT: slli a2, a2, 10
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v28, (a2)
; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v14, v30
-; REMAT-NEXT: lui a2, 4
+; REMAT-NEXT: li a2, 31
+; REMAT-NEXT: slli a2, a2, 9
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v16, v6
; REMAT-NEXT: lui a2, 4
-; REMAT-NEXT: addiw a2, a2, 512
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v18, v4
-; REMAT-NEXT: li a2, 17
-; REMAT-NEXT: slli a2, a2, 10
+; REMAT-NEXT: lui a2, 4
+; REMAT-NEXT: addiw a2, a2, 512
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v20, v2
-; REMAT-NEXT: lui a2, 4
-; REMAT-NEXT: addiw a2, a2, 1536
+; REMAT-NEXT: li a2, 17
+; REMAT-NEXT: slli a2, a2, 10
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v20, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v22, v24
-; REMAT-NEXT: li a2, 9
-; REMAT-NEXT: slli a2, a2, 11
+; REMAT-NEXT: lui a2, 4
+; REMAT-NEXT: addiw a2, a2, 1536
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v24, (a2)
; REMAT-NEXT: vle32.v v22, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v8, v26
-; REMAT-NEXT: lui a2, 5
-; REMAT-NEXT: addiw a2, a2, -1536
+; REMAT-NEXT: li a2, 9
+; REMAT-NEXT: slli a2, a2, 11
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v26, (a2)
; REMAT-NEXT: vle32.v v8, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v10, v28
-; REMAT-NEXT: li a2, 19
-; REMAT-NEXT: slli a2, a2, 10
+; REMAT-NEXT: lui a2, 5
+; REMAT-NEXT: addiw a2, a2, -1536
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v28, (a2)
; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v12, v30
-; REMAT-NEXT: lui ra, 5
-; REMAT-NEXT: addiw ra, ra, -512
-; REMAT-NEXT: add a2, a0, ra
+; REMAT-NEXT: li a2, 19
+; REMAT-NEXT: slli a2, a2, 10
+; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v14, v6
-; REMAT-NEXT: lui s11, 5
-; REMAT-NEXT: add a2, a0, s11
+; REMAT-NEXT: lui ra, 5
+; REMAT-NEXT: addiw ra, ra, -512
+; REMAT-NEXT: add a2, a0, ra
; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v16, v4
-; REMAT-NEXT: lui s10, 5
-; REMAT-NEXT: addiw s10, s10, 512
-; REMAT-NEXT: add a2, a0, s10
+; REMAT-NEXT: lui s11, 5
+; REMAT-NEXT: add a2, a0, s11
; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v18, v2
-; REMAT-NEXT: li s9, 21
-; REMAT-NEXT: slli s9, s9, 10
-; REMAT-NEXT: add a2, a0, s9
+; REMAT-NEXT: lui s10, 5
+; REMAT-NEXT: addiw s10, s10, 512
+; REMAT-NEXT: add a2, a0, s10
; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v20, v24
-; REMAT-NEXT: lui s8, 5
-; REMAT-NEXT: addiw s8, s8, 1536
-; REMAT-NEXT: add a2, a0, s8
+; REMAT-NEXT: li s9, 21
+; REMAT-NEXT: slli s9, s9, 10
+; REMAT-NEXT: add a2, a0, s9
; REMAT-NEXT: vle32.v v24, (a2)
; REMAT-NEXT: vle32.v v20, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v22, v26
-; REMAT-NEXT: li s7, 11
-; REMAT-NEXT: slli s7, s7, 11
-; REMAT-NEXT: add a2, a0, s7
+; REMAT-NEXT: lui s8, 5
+; REMAT-NEXT: addiw s8, s8, 1536
+; REMAT-NEXT: add a2, a0, s8
; REMAT-NEXT: vle32.v v26, (a2)
; REMAT-NEXT: vle32.v v22, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v8, v28
-; REMAT-NEXT: lui s6, 6
-; REMAT-NEXT: addiw s6, s6, -1536
-; REMAT-NEXT: add a2, a0, s6
+; REMAT-NEXT: li s7, 11
+; REMAT-NEXT: slli s7, s7, 11
+; REMAT-NEXT: add a2, a0, s7
; REMAT-NEXT: vle32.v v28, (a2)
; REMAT-NEXT: vle32.v v8, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v10, v30
-; REMAT-NEXT: li s5, 23
-; REMAT-NEXT: slli s5, s5, 10
-; REMAT-NEXT: add a2, a0, s5
+; REMAT-NEXT: lui s6, 6
+; REMAT-NEXT: addiw s6, s6, -1536
+; REMAT-NEXT: add a2, a0, s6
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v12, v6
-; REMAT-NEXT: lui s4, 6
-; REMAT-NEXT: addiw s4, s4, -512
-; REMAT-NEXT: add a2, a0, s4
+; REMAT-NEXT: li s5, 23
+; REMAT-NEXT: slli s5, s5, 10
+; REMAT-NEXT: add a2, a0, s5
; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v14, v4
-; REMAT-NEXT: lui s3, 6
-; REMAT-NEXT: add a2, a0, s3
+; REMAT-NEXT: lui s4, 6
+; REMAT-NEXT: addiw s4, s4, -512
+; REMAT-NEXT: add a2, a0, s4
; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v16, v2
-; REMAT-NEXT: lui s2, 6
-; REMAT-NEXT: addiw s2, s2, 512
-; REMAT-NEXT: add a2, a0, s2
+; REMAT-NEXT: lui s3, 6
+; REMAT-NEXT: add a2, a0, s3
; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v18, v24
-; REMAT-NEXT: li s1, 25
-; REMAT-NEXT: slli s1, s1, 10
-; REMAT-NEXT: add a2, a0, s1
+; REMAT-NEXT: lui s2, 6
+; REMAT-NEXT: addiw s2, s2, 512
+; REMAT-NEXT: add a2, a0, s2
; REMAT-NEXT: vle32.v v0, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v20, v26
-; REMAT-NEXT: lui s0, 6
-; REMAT-NEXT: addiw s0, s0, 1536
-; REMAT-NEXT: add a2, a0, s0
+; REMAT-NEXT: li s1, 25
+; REMAT-NEXT: slli s1, s1, 10
+; REMAT-NEXT: add a2, a0, s1
; REMAT-NEXT: vle32.v v26, (a2)
; REMAT-NEXT: vle32.v v20, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v22, v28
-; REMAT-NEXT: li t6, 13
-; REMAT-NEXT: slli t6, t6, 11
-; REMAT-NEXT: add a2, a0, t6
+; REMAT-NEXT: lui s0, 6
+; REMAT-NEXT: addiw s0, s0, 1536
+; REMAT-NEXT: add a2, a0, s0
; REMAT-NEXT: vle32.v v28, (a2)
; REMAT-NEXT: vle32.v v22, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v8, v30
-; REMAT-NEXT: lui t5, 7
-; REMAT-NEXT: addiw t5, t5, -1536
-; REMAT-NEXT: add a2, a0, t5
+; REMAT-NEXT: li t6, 13
+; REMAT-NEXT: slli t6, t6, 11
+; REMAT-NEXT: add a2, a0, t6
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v24, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v10, v6
-; REMAT-NEXT: li t4, 27
-; REMAT-NEXT: slli t4, t4, 10
-; REMAT-NEXT: add a2, a0, t4
+; REMAT-NEXT: lui t5, 7
+; REMAT-NEXT: addiw t5, t5, -1536
+; REMAT-NEXT: add a2, a0, t5
; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v12, v4
-; REMAT-NEXT: lui t3, 7
-; REMAT-NEXT: addiw t3, t3, -512
-; REMAT-NEXT: add a2, a0, t3
+; REMAT-NEXT: li t4, 27
+; REMAT-NEXT: slli t4, t4, 10
+; REMAT-NEXT: add a2, a0, t4
; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v14, v2
+; REMAT-NEXT: lui t3, 7
+; REMAT-NEXT: addiw t3, t3, -512
+; REMAT-NEXT: add a2, a0, t3
+; REMAT-NEXT: vle32.v v2, (a2)
+; REMAT-NEXT: vle32.v v14, (a2)
+; REMAT-NEXT: sf.vc.vv 3, 0, v16, v0
; REMAT-NEXT: lui t2, 7
; REMAT-NEXT: add a2, a0, t2
-; REMAT-NEXT: vle32.v v2, (a2)
+; REMAT-NEXT: vle32.v v0, (a2)
; REMAT-NEXT: vle32.v v8, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v16, v0
+; REMAT-NEXT: sf.vc.vv 3, 0, v18, v26
; REMAT-NEXT: lui t1, 7
; REMAT-NEXT: addiw t1, t1, 512
; REMAT-NEXT: add a2, a0, t1
-; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v18, v26
+; REMAT-NEXT: vle32.v v18, (a2)
+; REMAT-NEXT: sf.vc.vv 3, 0, v20, v28
; REMAT-NEXT: li t0, 29
; REMAT-NEXT: slli t0, t0, 10
; REMAT-NEXT: add a2, a0, t0
-; REMAT-NEXT: vle32.v v18, (a2)
+; REMAT-NEXT: vle32.v v20, (a2)
; REMAT-NEXT: vle32.v v26, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v20, v28
+; REMAT-NEXT: sf.vc.vv 3, 0, v22, v30
; REMAT-NEXT: lui a7, 7
; REMAT-NEXT: addiw a7, a7, 1536
; REMAT-NEXT: add a2, a0, a7
-; REMAT-NEXT: vle32.v v20, (a2)
+; REMAT-NEXT: vle32.v v22, (a2)
; REMAT-NEXT: vle32.v v28, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v22, v30
+; REMAT-NEXT: sf.vc.vv 3, 0, v24, v6
; REMAT-NEXT: li a6, 15
; REMAT-NEXT: slli a6, a6, 11
; REMAT-NEXT: add a2, a0, a6
-; REMAT-NEXT: vle32.v v22, (a2)
+; REMAT-NEXT: vle32.v v24, (a2)
; REMAT-NEXT: vle32.v v30, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v24, v6
+; REMAT-NEXT: sf.vc.vv 3, 0, v10, v4
; REMAT-NEXT: lui a5, 8
; REMAT-NEXT: addiw a5, a5, -1536
; REMAT-NEXT: add a2, a0, a5
-; REMAT-NEXT: vle32.v v24, (a2)
+; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: vle32.v v6, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v10, v4
+; REMAT-NEXT: sf.vc.vv 3, 0, v12, v2
; REMAT-NEXT: li a4, 31
; REMAT-NEXT: slli a4, a4, 10
; REMAT-NEXT: add a2, a0, a4
-; REMAT-NEXT: vle32.v v10, (a2)
+; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: vle32.v v4, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v12, v2
+; REMAT-NEXT: sf.vc.vv 3, 0, v14, v0
; REMAT-NEXT: lui a3, 8
; REMAT-NEXT: addiw a3, a3, -512
; REMAT-NEXT: add a2, a0, a3
-; REMAT-NEXT: vle32.v v12, (a2)
+; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: lui a2, 8
; REMAT-NEXT: add a0, a0, a2
; REMAT-NEXT: vle32.v v0, (a0)
-; REMAT-NEXT: sf.vc.vv 3, 0, v8, v14
-; REMAT-NEXT: sf.vc.vv 3, 0, v16, v18
-; REMAT-NEXT: sf.vc.vv 3, 0, v26, v20
-; REMAT-NEXT: sf.vc.vv 3, 0, v28, v22
-; REMAT-NEXT: sf.vc.vv 3, 0, v30, v24
-; REMAT-NEXT: sf.vc.vv 3, 0, v6, v10
-; REMAT-NEXT: sf.vc.vv 3, 0, v4, v12
+; REMAT-NEXT: sf.vc.vv 3, 0, v8, v16
+; REMAT-NEXT: sf.vc.vv 3, 0, v18, v20
+; REMAT-NEXT: sf.vc.vv 3, 0, v26, v22
+; REMAT-NEXT: sf.vc.vv 3, 0, v28, v24
+; REMAT-NEXT: sf.vc.vv 3, 0, v30, v10
+; REMAT-NEXT: sf.vc.vv 3, 0, v6, v12
+; REMAT-NEXT: sf.vc.vv 3, 0, v4, v14
; REMAT-NEXT: sf.vc.vv 3, 0, v2, v0
; REMAT-NEXT: sf.vc.v.i 2, 0, v8, 0
; REMAT-NEXT: addi a0, a1, 1024
diff --git a/llvm/test/CodeGen/RISCV/pr90730.ll b/llvm/test/CodeGen/RISCV/pr90730.ll
new file mode 100644
index 000000000000..7c3f4b43089c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/pr90730.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | FileCheck %s
+
+define i32 @pr90730(i32 %x, i1 %y, ptr %p) {
+; CHECK-LABEL: pr90730:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addiw a1, a1, -960
+; CHECK-NEXT: andn a0, a1, a0
+; CHECK-NEXT: sw zero, 0(a2)
+; CHECK-NEXT: ret
+entry:
+ %ext = zext i1 %y to i32
+ %xor1 = xor i32 %ext, 31817
+ %and1 = and i32 %xor1, %x
+ store i32 %and1, ptr %p, align 4
+ %v = load i32, ptr %p, align 4
+ %and2 = and i32 %v, 31808
+ %xor2 = xor i32 %and2, 31808
+ store i32 0, ptr %p, align 4
+ ret i32 %xor2
+}
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index 81ef6072449e..c92ba98dcc33 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -43,7 +43,6 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: call func
-; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
@@ -55,6 +54,7 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; CHECK-NEXT: vfwsub.wv v8, v24, v16
; CHECK-NEXT: vsetvli zero, zero, e32, m8, tu, mu
; CHECK-NEXT: vfdiv.vv v8, v24, v8, v0.t
@@ -99,7 +99,6 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: addi a0, sp, 16
; SUBREGLIVENESS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; SUBREGLIVENESS-NEXT: call func
-; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; SUBREGLIVENESS-NEXT: csrr a0, vlenb
; SUBREGLIVENESS-NEXT: slli a0, a0, 3
; SUBREGLIVENESS-NEXT: add a0, sp, a0
@@ -111,6 +110,7 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
; SUBREGLIVENESS-NEXT: addi a0, sp, 16
; SUBREGLIVENESS-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; SUBREGLIVENESS-NEXT: vfwsub.wv v8, v24, v16
; SUBREGLIVENESS-NEXT: vsetvli zero, zero, e32, m8, tu, mu
; SUBREGLIVENESS-NEXT: vfdiv.vv v8, v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
index 2db0d40b0ce5..cf7be57ccc90 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
@@ -637,8 +637,6 @@ define i64 @zext_mul288(i32 signext %a) {
define i64 @zext_mul12884901888(i32 signext %a) {
; RV64I-LABEL: zext_mul12884901888:
; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 3
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: mul a0, a0, a1
@@ -646,8 +644,8 @@ define i64 @zext_mul12884901888(i32 signext %a) {
;
; RV64ZBA-LABEL: zext_mul12884901888:
; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: sh1add a0, a0, a0
+; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 12884901888
@@ -658,8 +656,6 @@ define i64 @zext_mul12884901888(i32 signext %a) {
define i64 @zext_mul21474836480(i32 signext %a) {
; RV64I-LABEL: zext_mul21474836480:
; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 5
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: mul a0, a0, a1
@@ -667,8 +663,8 @@ define i64 @zext_mul21474836480(i32 signext %a) {
;
; RV64ZBA-LABEL: zext_mul21474836480:
; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: sh2add a0, a0, a0
+; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 21474836480
@@ -679,8 +675,6 @@ define i64 @zext_mul21474836480(i32 signext %a) {
define i64 @zext_mul38654705664(i32 signext %a) {
; RV64I-LABEL: zext_mul38654705664:
; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 9
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: mul a0, a0, a1
@@ -688,8 +682,8 @@ define i64 @zext_mul38654705664(i32 signext %a) {
;
; RV64ZBA-LABEL: zext_mul38654705664:
; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: sh3add a0, a0, a0
+; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 38654705664
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index dc93c0215a25..4a568fb2b25c 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -856,8 +856,6 @@ define i64 @zext_mul288(i32 signext %a) {
define i64 @zext_mul12884901888(i32 signext %a) {
; RV64I-LABEL: zext_mul12884901888:
; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 3
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: mul a0, a0, a1
@@ -865,8 +863,8 @@ define i64 @zext_mul12884901888(i32 signext %a) {
;
; RV64ZBA-LABEL: zext_mul12884901888:
; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: sh1add a0, a0, a0
+; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 12884901888
@@ -877,8 +875,6 @@ define i64 @zext_mul12884901888(i32 signext %a) {
define i64 @zext_mul21474836480(i32 signext %a) {
; RV64I-LABEL: zext_mul21474836480:
; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 5
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: mul a0, a0, a1
@@ -886,8 +882,8 @@ define i64 @zext_mul21474836480(i32 signext %a) {
;
; RV64ZBA-LABEL: zext_mul21474836480:
; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: sh2add a0, a0, a0
+; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 21474836480
@@ -898,8 +894,6 @@ define i64 @zext_mul21474836480(i32 signext %a) {
define i64 @zext_mul38654705664(i32 signext %a) {
; RV64I-LABEL: zext_mul38654705664:
; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 9
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: mul a0, a0, a1
@@ -907,8 +901,8 @@ define i64 @zext_mul38654705664(i32 signext %a) {
;
; RV64ZBA-LABEL: zext_mul38654705664:
; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: sh3add a0, a0, a0
+; RV64ZBA-NEXT: slli a0, a0, 32
; RV64ZBA-NEXT: ret
%b = zext i32 %a to i64
%c = mul i64 %b, 38654705664
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index eb74e2d302f1..05d6716e4719 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -590,13 +590,12 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB46_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: addi a0, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
index e578aada5a9c..91f700ef9680 100644
--- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
@@ -39,9 +39,9 @@ define <vscale x 1 x i64> @access_fixed_and_vector_objects(ptr %val) {
; RV64IV-NEXT: addi a0, sp, 8
; RV64IV-NEXT: vl1re64.v v8, (a0)
; RV64IV-NEXT: addi a0, sp, 528
-; RV64IV-NEXT: ld a1, 520(sp)
; RV64IV-NEXT: vl1re64.v v9, (a0)
-; RV64IV-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64IV-NEXT: ld a0, 520(sp)
+; RV64IV-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV64IV-NEXT: vadd.vv v8, v8, v9
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
index 139579b3d2a3..9cb3991f31f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
@@ -103,9 +103,9 @@ define <8 x i1> @fv8(ptr %p, i64 %index, i64 %tc) {
define <32 x i1> @fv32(ptr %p, i64 %index, i64 %tc) {
; CHECK-LABEL: fv32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: lui a0, %hi(.LCPI8_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0)
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vid.v v16
; CHECK-NEXT: vsaddu.vx v16, v16, a1
@@ -124,31 +124,30 @@ define <64 x i1> @fv64(ptr %p, i64 %index, i64 %tc) {
; CHECK-LABEL: fv64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vsaddu.vx v8, v8, a1
+; CHECK-NEXT: vmsltu.vx v0, v8, a2
; CHECK-NEXT: lui a0, %hi(.LCPI9_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_0)
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vid.v v16
-; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v0, v16, a2
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsaddu.vx v8, v16, a1
-; CHECK-NEXT: vmsltu.vx v16, v8, a2
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v16, 2
; CHECK-NEXT: lui a0, %hi(.LCPI9_1)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_1)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v9, (a0)
; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsaddu.vx v8, v16, a1
-; CHECK-NEXT: vmsltu.vx v16, v8, a2
-; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v16, 4
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v8, v16, a2
+; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: lui a0, %hi(.LCPI9_2)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_2)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vmsltu.vx v10, v16, a2
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v8, 2
+; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v10, 4
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsext.vf8 v16, v8
+; CHECK-NEXT: vsext.vf8 v16, v9
; CHECK-NEXT: vsaddu.vx v8, v16, a1
; CHECK-NEXT: vmsltu.vx v16, v8, a2
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
@@ -174,51 +173,48 @@ define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) {
; CHECK-NEXT: vsext.vf8 v16, v9
; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: vmsltu.vx v8, v16, a2
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 2
; CHECK-NEXT: lui a0, %hi(.LCPI10_2)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_2)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle8.v v9, (a0)
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v9, v16, a2
-; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: lui a0, %hi(.LCPI10_3)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_3)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vle8.v v11, (a0)
; CHECK-NEXT: vsext.vf8 v16, v9
; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: vmsltu.vx v9, v16, a2
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 6
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: lui a0, %hi(.LCPI10_4)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_4)
-; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsext.vf8 v16, v11
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v11, v16, a2
; CHECK-NEXT: vid.v v16
; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: vmsltu.vx v0, v16, a2
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v9, v16, a2
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v9, 2
+; CHECK-NEXT: lui a0, %hi(.LCPI10_4)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_4)
+; CHECK-NEXT: vle8.v v12, (a0)
; CHECK-NEXT: lui a0, %hi(.LCPI10_5)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_5)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v9, (a0)
-; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vle8.v v13, (a0)
+; CHECK-NEXT: vsext.vf8 v16, v12
; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v9, v16, a2
+; CHECK-NEXT: vmsltu.vx v12, v16, a2
+; CHECK-NEXT: vsext.vf8 v16, v13
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v13, v16, a2
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v8, v10, 2
; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v9, 4
+; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: lui a0, %hi(.LCPI10_6)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_6)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v11, 6
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v12, 2
+; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v13, 4
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vsext.vf8 v16, v9
; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: vmsltu.vx v9, v16, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
index 879dff4a6e49..5217148ba4f4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -1449,27 +1449,27 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64(<vscale x 1 x i64> %va, <vscale
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v10, v10, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11, v0.t
+; RV32-NEXT: vand.vx v11, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v11, v11, 24, v0.t
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
; RV32-NEXT: vsll.vi v12, v12, 8, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t
+; RV32-NEXT: vor.vv v11, v11, v12, v0.t
+; RV32-NEXT: vor.vv v9, v9, v11, v0.t
+; RV32-NEXT: vsrl.vx v11, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a3, v0.t
; RV32-NEXT: vand.vx v12, v12, a2, v0.t
-; RV32-NEXT: vor.vv v10, v12, v10, v0.t
+; RV32-NEXT: vor.vv v11, v12, v11, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v11, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
-; RV32-NEXT: vor.vv v8, v8, v10, v0.t
+; RV32-NEXT: vor.vv v8, v8, v11, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
; RV32-NEXT: lui a1, 61681
@@ -1590,27 +1590,27 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64_unmasked(<vscale x 1 x i64> %va
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3
; RV32-NEXT: vor.vv v9, v9, v10
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4
-; RV32-NEXT: vsll.vi v10, v10, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11
+; RV32-NEXT: vand.vx v11, v8, a4
+; RV32-NEXT: vsll.vi v11, v11, 24
+; RV32-NEXT: vand.vv v12, v8, v10
; RV32-NEXT: vsll.vi v12, v12, 8
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: vsrl.vx v10, v8, a1
+; RV32-NEXT: vor.vv v11, v11, v12
+; RV32-NEXT: vor.vv v9, v9, v11
+; RV32-NEXT: vsrl.vx v11, v8, a1
; RV32-NEXT: vsrl.vx v12, v8, a3
; RV32-NEXT: vand.vx v12, v12, a2
-; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vor.vv v11, v12, v11
; RV32-NEXT: vsrl.vi v12, v8, 24
; RV32-NEXT: vand.vx v12, v12, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v11
+; RV32-NEXT: vand.vv v8, v8, v10
; RV32-NEXT: vor.vv v8, v8, v12
-; RV32-NEXT: vor.vv v8, v8, v10
+; RV32-NEXT: vor.vv v8, v8, v11
; RV32-NEXT: vor.vv v8, v9, v8
; RV32-NEXT: vsrl.vi v9, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -1733,27 +1733,27 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64(<vscale x 2 x i64> %va, <vscale
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3, v0.t
; RV32-NEXT: vor.vv v10, v10, v12, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v12, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14, v0.t
+; RV32-NEXT: vand.vx v14, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v14, v14, 24, v0.t
+; RV32-NEXT: vand.vv v16, v8, v12, v0.t
; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
-; RV32-NEXT: vor.vv v12, v12, v16, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t
+; RV32-NEXT: vor.vv v14, v14, v16, v0.t
+; RV32-NEXT: vor.vv v10, v10, v14, v0.t
+; RV32-NEXT: vsrl.vx v14, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v12, v16, v12, v0.t
+; RV32-NEXT: vor.vv v14, v16, v14, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 24, v0.t
; RV32-NEXT: vand.vx v16, v16, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v14, v0.t
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vor.vv v8, v8, v12, v0.t
+; RV32-NEXT: vor.vv v8, v8, v14, v0.t
; RV32-NEXT: vor.vv v8, v10, v8, v0.t
; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t
; RV32-NEXT: lui a1, 61681
@@ -1874,27 +1874,27 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64_unmasked(<vscale x 2 x i64> %va
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3
; RV32-NEXT: vor.vv v10, v10, v12
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4
-; RV32-NEXT: vsll.vi v12, v12, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14
+; RV32-NEXT: vand.vx v14, v8, a4
+; RV32-NEXT: vsll.vi v14, v14, 24
+; RV32-NEXT: vand.vv v16, v8, v12
; RV32-NEXT: vsll.vi v16, v16, 8
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vsrl.vx v12, v8, a1
+; RV32-NEXT: vor.vv v14, v14, v16
+; RV32-NEXT: vor.vv v10, v10, v14
+; RV32-NEXT: vsrl.vx v14, v8, a1
; RV32-NEXT: vsrl.vx v16, v8, a3
; RV32-NEXT: vand.vx v16, v16, a2
-; RV32-NEXT: vor.vv v12, v16, v12
+; RV32-NEXT: vor.vv v14, v16, v14
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v14
+; RV32-NEXT: vand.vv v8, v8, v12
; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vor.vv v8, v8, v12
+; RV32-NEXT: vor.vv v8, v8, v14
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: vsrl.vi v10, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -2017,13 +2017,13 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64(<vscale x 4 x i64> %va, <vscale
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3, v0.t
; RV32-NEXT: vor.vv v16, v12, v16, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v20, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vx v20, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v20, v20, 24, v0.t
; RV32-NEXT: vand.vv v24, v8, v12, v0.t
; RV32-NEXT: vsll.vi v24, v24, 8, v0.t
; RV32-NEXT: vor.vv v20, v20, v24, v0.t
@@ -2158,27 +2158,27 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64_unmasked(<vscale x 4 x i64> %va
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3
; RV32-NEXT: vor.vv v12, v12, v16
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v16, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v20, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v20
+; RV32-NEXT: vand.vx v20, v8, a4
+; RV32-NEXT: vsll.vi v20, v20, 24
+; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v16, v16, v24
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v20, v20, v24
+; RV32-NEXT: vor.vv v12, v12, v20
+; RV32-NEXT: vsrl.vx v20, v8, a1
; RV32-NEXT: vsrl.vx v24, v8, a3
; RV32-NEXT: vand.vx v24, v24, a2
-; RV32-NEXT: vor.vv v16, v24, v16
+; RV32-NEXT: vor.vv v20, v24, v20
; RV32-NEXT: vsrl.vi v24, v8, 24
; RV32-NEXT: vand.vx v24, v24, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v20
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vor.vv v8, v8, v16
+; RV32-NEXT: vor.vv v8, v8, v20
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: vsrl.vi v12, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -2311,20 +2311,23 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a5, vlenb
; RV32-NEXT: slli a5, a5, 4
@@ -2337,10 +2340,10 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
; RV32-NEXT: add a5, sp, a5
; RV32-NEXT: addi a5, a5, 16
; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -2508,13 +2511,13 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64_unmasked(<vscale x 7 x i64> %va
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -2669,20 +2672,23 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a5, vlenb
; RV32-NEXT: slli a5, a5, 4
@@ -2695,10 +2701,10 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
; RV32-NEXT: add a5, sp, a5
; RV32-NEXT: addi a5, a5, 16
; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -2866,13 +2872,13 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64_unmasked(<vscale x 8 x i64> %va
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -3056,13 +3062,13 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: .LBB46_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
@@ -3107,8 +3113,8 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 64 x i16> @llvm.vp.bitreverse.nxv64i16(<vscale x 64 x i16> %va, <vscale x 64 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
index 800d06c5a78f..aadd9852af11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
@@ -525,27 +525,27 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v10, v10, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11, v0.t
+; RV32-NEXT: vand.vx v11, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v11, v11, 24, v0.t
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
; RV32-NEXT: vsll.vi v12, v12, 8, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t
+; RV32-NEXT: vor.vv v11, v11, v12, v0.t
+; RV32-NEXT: vor.vv v9, v9, v11, v0.t
+; RV32-NEXT: vsrl.vx v11, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a3, v0.t
; RV32-NEXT: vand.vx v12, v12, a2, v0.t
-; RV32-NEXT: vor.vv v10, v12, v10, v0.t
+; RV32-NEXT: vor.vv v11, v12, v11, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v11, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
-; RV32-NEXT: vor.vv v8, v8, v10, v0.t
+; RV32-NEXT: vor.vv v8, v8, v11, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -609,27 +609,27 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3
; RV32-NEXT: vor.vv v9, v9, v10
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4
-; RV32-NEXT: vsll.vi v10, v10, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11
+; RV32-NEXT: vand.vx v11, v8, a4
+; RV32-NEXT: vsll.vi v11, v11, 24
+; RV32-NEXT: vand.vv v12, v8, v10
; RV32-NEXT: vsll.vi v12, v12, 8
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: vsrl.vx v10, v8, a1
+; RV32-NEXT: vor.vv v11, v11, v12
+; RV32-NEXT: vor.vv v9, v9, v11
+; RV32-NEXT: vsrl.vx v11, v8, a1
; RV32-NEXT: vsrl.vx v12, v8, a3
; RV32-NEXT: vand.vx v12, v12, a2
-; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vor.vv v11, v12, v11
; RV32-NEXT: vsrl.vi v12, v8, 24
; RV32-NEXT: vand.vx v12, v12, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v11
+; RV32-NEXT: vand.vv v8, v8, v10
; RV32-NEXT: vor.vv v8, v8, v12
-; RV32-NEXT: vor.vv v8, v8, v10
+; RV32-NEXT: vor.vv v8, v8, v11
; RV32-NEXT: vor.vv v8, v9, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -695,27 +695,27 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3, v0.t
; RV32-NEXT: vor.vv v10, v10, v12, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v12, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14, v0.t
+; RV32-NEXT: vand.vx v14, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v14, v14, 24, v0.t
+; RV32-NEXT: vand.vv v16, v8, v12, v0.t
; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
-; RV32-NEXT: vor.vv v12, v12, v16, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t
+; RV32-NEXT: vor.vv v14, v14, v16, v0.t
+; RV32-NEXT: vor.vv v10, v10, v14, v0.t
+; RV32-NEXT: vsrl.vx v14, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v12, v16, v12, v0.t
+; RV32-NEXT: vor.vv v14, v16, v14, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 24, v0.t
; RV32-NEXT: vand.vx v16, v16, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v14, v0.t
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vor.vv v8, v8, v12, v0.t
+; RV32-NEXT: vor.vv v8, v8, v14, v0.t
; RV32-NEXT: vor.vv v8, v10, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -779,27 +779,27 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3
; RV32-NEXT: vor.vv v10, v10, v12
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4
-; RV32-NEXT: vsll.vi v12, v12, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14
+; RV32-NEXT: vand.vx v14, v8, a4
+; RV32-NEXT: vsll.vi v14, v14, 24
+; RV32-NEXT: vand.vv v16, v8, v12
; RV32-NEXT: vsll.vi v16, v16, 8
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vsrl.vx v12, v8, a1
+; RV32-NEXT: vor.vv v14, v14, v16
+; RV32-NEXT: vor.vv v10, v10, v14
+; RV32-NEXT: vsrl.vx v14, v8, a1
; RV32-NEXT: vsrl.vx v16, v8, a3
; RV32-NEXT: vand.vx v16, v16, a2
-; RV32-NEXT: vor.vv v12, v16, v12
+; RV32-NEXT: vor.vv v14, v16, v14
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v14
+; RV32-NEXT: vand.vv v8, v8, v12
; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vor.vv v8, v8, v12
+; RV32-NEXT: vor.vv v8, v8, v14
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -865,13 +865,13 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3, v0.t
; RV32-NEXT: vor.vv v16, v12, v16, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v20, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vx v20, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v20, v20, 24, v0.t
; RV32-NEXT: vand.vv v24, v8, v12, v0.t
; RV32-NEXT: vsll.vi v24, v24, 8, v0.t
; RV32-NEXT: vor.vv v20, v20, v24, v0.t
@@ -949,27 +949,27 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3
; RV32-NEXT: vor.vv v12, v12, v16
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v16, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v20, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v20
+; RV32-NEXT: vand.vx v20, v8, a4
+; RV32-NEXT: vsll.vi v20, v20, 24
+; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v16, v16, v24
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v20, v20, v24
+; RV32-NEXT: vor.vv v12, v12, v20
+; RV32-NEXT: vsrl.vx v20, v8, a1
; RV32-NEXT: vsrl.vx v24, v8, a3
; RV32-NEXT: vand.vx v24, v24, a2
-; RV32-NEXT: vor.vv v16, v24, v16
+; RV32-NEXT: vor.vv v20, v24, v20
; RV32-NEXT: vsrl.vi v24, v8, 24
; RV32-NEXT: vand.vx v24, v24, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v20
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vor.vv v8, v8, v16
+; RV32-NEXT: vor.vv v8, v8, v20
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -1045,20 +1045,23 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
@@ -1071,10 +1074,10 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -1185,13 +1188,13 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -1288,20 +1291,23 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
@@ -1314,10 +1320,10 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -1428,13 +1434,13 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -1539,13 +1545,13 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
@@ -1575,8 +1581,8 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-ZVKB-NEXT: # %bb.1:
; CHECK-ZVKB-NEXT: mv a0, a1
; CHECK-ZVKB-NEXT: .LBB32_2:
-; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVKB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVKB-NEXT: vrev8.v v8, v8, v0.t
; CHECK-ZVKB-NEXT: ret
%v = call <vscale x 64 x i16> @llvm.vp.bswap.nxv64i16(<vscale x 64 x i16> %va, <vscale x 64 x i1> %m, i32 %evl)
@@ -1646,27 +1652,27 @@ define <vscale x 1 x i48> @vp_bswap_nxv1i48(<vscale x 1 x i48> %va, <vscale x 1
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v10, v10, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11, v0.t
+; RV32-NEXT: vand.vx v11, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v11, v11, 24, v0.t
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
; RV32-NEXT: vsll.vi v12, v12, 8, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t
+; RV32-NEXT: vor.vv v11, v11, v12, v0.t
+; RV32-NEXT: vor.vv v9, v9, v11, v0.t
+; RV32-NEXT: vsrl.vx v11, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a3, v0.t
; RV32-NEXT: vand.vx v12, v12, a2, v0.t
-; RV32-NEXT: vor.vv v10, v12, v10, v0.t
+; RV32-NEXT: vor.vv v11, v12, v11, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v11, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
-; RV32-NEXT: vor.vv v8, v8, v10, v0.t
+; RV32-NEXT: vor.vv v8, v8, v11, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 16, v0.t
; RV32-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 0a7fa38b0c8a..2f0d5bb6e19c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -236,53 +236,40 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_param_nxv32i32_nxv32i32_nxv32i32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a3, 24
-; CHECK-NEXT: mul a1, a1, a3
+; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a3, a2, a1
; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vl8re32.v v8, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vl8re32.v v0, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT: vl8re32.v v8, (a3)
+; CHECK-NEXT: vl8re32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v0, (a1)
+; CHECK-NEXT: vl8re32.v v16, (a3)
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re32.v v16, (a2)
-; CHECK-NEXT: vadd.vv v0, v24, v0
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: vadd.vv v24, v8, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vv v24, v24, v8
+; CHECK-NEXT: vadd.vv v0, v8, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vv v8, v24, v8
-; CHECK-NEXT: vadd.vv v24, v0, v16
+; CHECK-NEXT: vadd.vv v8, v0, v8
+; CHECK-NEXT: vadd.vv v24, v24, v16
; CHECK-NEXT: vadd.vx v16, v8, a4
; CHECK-NEXT: vadd.vx v8, v24, a4
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index aa11e012af20..dec67721514d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -135,16 +135,16 @@ declare <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half>, <vscale x
define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -179,16 +179,16 @@ declare <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half>, <vscal
define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI8_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -223,16 +223,16 @@ declare <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half>, <vscal
define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI10_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -359,8 +359,8 @@ define <vscale x 4 x float> @vp_ceil_vv_nxv4f32(<vscale x 4 x float> %va, <vscal
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -403,8 +403,8 @@ define <vscale x 8 x float> @vp_ceil_vv_nxv8f32(<vscale x 8 x float> %va, <vscal
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -447,8 +447,8 @@ define <vscale x 16 x float> @vp_ceil_vv_nxv16f32(<vscale x 16 x float> %va, <vs
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -525,16 +525,16 @@ declare <vscale x 2 x double> @llvm.vp.ceil.nxv2f64(<vscale x 2 x double>, <vsca
define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -569,16 +569,16 @@ declare <vscale x 4 x double> @llvm.vp.ceil.nxv4f64(<vscale x 4 x double>, <vsca
define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -613,16 +613,16 @@ declare <vscale x 7 x double> @llvm.vp.ceil.nxv7f64(<vscale x 7 x double>, <vsca
define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -657,16 +657,16 @@ declare <vscale x 8 x double> @llvm.vp.ceil.nxv8f64(<vscale x 8 x double>, <vsca
define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -705,66 +705,56 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a2, 3
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll b/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll
index ed434deea1a8..482cf83d540c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll
@@ -10,19 +10,19 @@ define void @test(ptr %ref_array, ptr %sad_array) {
; RV32-NEXT: th.lwd a2, a3, (a0), 0, 3
; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV32-NEXT: vle8.v v8, (a2)
-; RV32-NEXT: vmv.v.i v9, 0
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vmv.s.x v8, zero
-; RV32-NEXT: vredsum.vs v10, v12, v8
-; RV32-NEXT: vmv.x.s a0, v10
+; RV32-NEXT: vredsum.vs v9, v12, v8
+; RV32-NEXT: vmv.x.s a0, v9
; RV32-NEXT: th.swia a0, (a1), 4, 0
; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV32-NEXT: vle8.v v10, (a3)
+; RV32-NEXT: vle8.v v9, (a3)
+; RV32-NEXT: vmv.v.i v10, 0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT: vslideup.vi v10, v9, 4
+; RV32-NEXT: vslideup.vi v9, v10, 4
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf4 v12, v10
+; RV32-NEXT: vzext.vf4 v12, v9
; RV32-NEXT: vredsum.vs v8, v12, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vse32.v v8, (a1)
@@ -33,19 +33,19 @@ define void @test(ptr %ref_array, ptr %sad_array) {
; RV64-NEXT: th.ldd a2, a3, (a0), 0, 4
; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64-NEXT: vle8.v v8, (a2)
-; RV64-NEXT: vmv.v.i v9, 0
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vzext.vf4 v12, v8
; RV64-NEXT: vmv.s.x v8, zero
-; RV64-NEXT: vredsum.vs v10, v12, v8
-; RV64-NEXT: vmv.x.s a0, v10
+; RV64-NEXT: vredsum.vs v9, v12, v8
+; RV64-NEXT: vmv.x.s a0, v9
; RV64-NEXT: th.swia a0, (a1), 4, 0
; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV64-NEXT: vle8.v v10, (a3)
+; RV64-NEXT: vle8.v v9, (a3)
+; RV64-NEXT: vmv.v.i v10, 0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT: vslideup.vi v10, v9, 4
+; RV64-NEXT: vslideup.vi v9, v10, 4
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf4 v12, v10
+; RV64-NEXT: vzext.vf4 v12, v9
; RV64-NEXT: vredsum.vs v8, v12, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vse32.v v8, (a1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
index 673008d9c0b3..52811133c53f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
@@ -197,28 +197,51 @@ entry:
define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data) {
; RV64-LABEL: test_compresstore_v256i8:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vmv1r.v v7, v8
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 4
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; RV64-NEXT: li a2, 128
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64-NEXT: vle8.v v24, (a1)
+; RV64-NEXT: vle8.v v16, (a1)
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vslidedown.vi v9, v0, 1
; RV64-NEXT: vmv.x.s a1, v9
; RV64-NEXT: vmv.x.s a3, v0
+; RV64-NEXT: csrr a4, vlenb
+; RV64-NEXT: slli a4, a4, 3
+; RV64-NEXT: add a4, sp, a4
+; RV64-NEXT: addi a4, a4, 16
+; RV64-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64-NEXT: vcompress.vm v8, v16, v0
+; RV64-NEXT: vcompress.vm v16, v24, v0
; RV64-NEXT: vcpop.m a4, v0
; RV64-NEXT: vsetvli zero, a4, e8, m8, ta, ma
-; RV64-NEXT: vse8.v v8, (a0)
+; RV64-NEXT: vse8.v v16, (a0)
+; RV64-NEXT: addi a4, sp, 16
+; RV64-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64-NEXT: vcompress.vm v8, v24, v7
-; RV64-NEXT: vcpop.m a2, v7
+; RV64-NEXT: vcompress.vm v16, v24, v8
+; RV64-NEXT: vcpop.m a2, v8
; RV64-NEXT: cpop a3, a3
; RV64-NEXT: cpop a1, a1
; RV64-NEXT: add a0, a0, a3
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64-NEXT: vse8.v v8, (a0)
+; RV64-NEXT: vse8.v v16, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
;
; RV32-LABEL: test_compresstore_v256i8:
@@ -796,18 +819,18 @@ define void @test_compresstore_v32i64(ptr %p, <32 x i1> %mask, <32 x i64> %data)
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vse64.v v24, (a0)
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: vslidedown.vi v8, v0, 2
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vcompress.vm v8, v16, v24
+; RV64-NEXT: vcompress.vm v24, v16, v8
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV64-NEXT: vmv.x.s a1, v0
; RV64-NEXT: zext.h a1, a1
; RV64-NEXT: cpopw a1, a1
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: vcpop.m a1, v24
+; RV64-NEXT: vcpop.m a1, v8
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: vse64.v v24, (a0)
; RV64-NEXT: ret
;
; RV32-LABEL: test_compresstore_v32i64:
@@ -818,18 +841,18 @@ define void @test_compresstore_v32i64(ptr %p, <32 x i1> %mask, <32 x i64> %data)
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vse64.v v24, (a0)
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v0, 2
+; RV32-NEXT: vslidedown.vi v8, v0, 2
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vcompress.vm v8, v16, v24
+; RV32-NEXT: vcompress.vm v24, v16, v8
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV32-NEXT: vmv.x.s a1, v0
; RV32-NEXT: zext.h a1, a1
; RV32-NEXT: cpop a1, a1
; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: vcpop.m a1, v24
+; RV32-NEXT: vcpop.m a1, v8
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: vse64.v v24, (a0)
; RV32-NEXT: ret
entry:
tail call void @llvm.masked.compressstore.v32i64(<32 x i64> %data, ptr align 8 %p, <32 x i1> %mask)
diff --git a/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll b/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll
index bd65ed52be68..1343b64b876d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll
@@ -189,16 +189,16 @@ define void @v4xi64_concat_vector_insert_idx3(ptr %a, ptr %b, i64 %x) {
; RV32-LABEL: v4xi64_concat_vector_insert_idx3:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vle64.v v8, (a0)
-; RV32-NEXT: vle64.v v10, (a1)
+; RV32-NEXT: vle64.v v8, (a1)
+; RV32-NEXT: vle64.v v10, (a0)
; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v9, v8, a2
; RV32-NEXT: vslide1down.vx v9, v9, a3
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vslideup.vi v10, v9, 1
+; RV32-NEXT: vslideup.vi v8, v9, 1
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vslideup.vi v8, v10, 2
-; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: vslideup.vi v10, v8, 2
+; RV32-NEXT: vse64.v v10, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: v4xi64_concat_vector_insert_idx3:
diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
index 113154c0f985..7839b602706d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
@@ -19,18 +19,19 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
; RV32-LABEL: constant_folding_crash:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lw a0, 8(a0)
-; RV32-NEXT: vmv1r.v v10, v0
; RV32-NEXT: andi a0, a0, 1
; RV32-NEXT: seqz a0, a0
; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV32-NEXT: vmv.v.x v11, a0
-; RV32-NEXT: vmsne.vi v0, v11, 0
+; RV32-NEXT: vmv.v.x v10, a0
+; RV32-NEXT: vmsne.vi v10, v10, 0
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vmerge.vvm v8, v9, v8, v0
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
-; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmv1r.v v0, v11
; RV32-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-NEXT: vrgather.vi v9, v8, 0
; RV32-NEXT: vmsne.vi v0, v9, 0
@@ -42,18 +43,19 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
; RV64-LABEL: constant_folding_crash:
; RV64: # %bb.0: # %entry
; RV64-NEXT: ld a0, 8(a0)
-; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: andi a0, a0, 1
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV64-NEXT: vmv.v.x v13, a0
-; RV64-NEXT: vmsne.vi v0, v13, 0
+; RV64-NEXT: vmv.v.x v12, a0
+; RV64-NEXT: vmsne.vi v12, v12, 0
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vmerge.vvm v8, v10, v8, v0
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
-; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-NEXT: vrgather.vi v9, v8, 0
; RV64-NEXT: vmsne.vi v0, v9, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
index 41ec102c34ef..6e538f3dfb38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
@@ -3341,16 +3341,16 @@ define <vscale x 8 x i64> @ctlz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
;
; RV32F-LABEL: ctlz_zero_undef_nxv8i64:
; RV32F: # %bb.0:
-; RV32F-NEXT: vmv8r.v v16, v8
; RV32F-NEXT: li a0, 190
; RV32F-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32F-NEXT: vmv.v.x v8, a0
+; RV32F-NEXT: vmv.v.x v16, a0
; RV32F-NEXT: fsrmi a0, 1
; RV32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV32F-NEXT: vfncvt.f.xu.w v24, v16
-; RV32F-NEXT: vsrl.vi v16, v24, 23
-; RV32F-NEXT: vwsubu.wv v8, v8, v16
+; RV32F-NEXT: vfncvt.f.xu.w v24, v8
+; RV32F-NEXT: vsrl.vi v8, v24, 23
+; RV32F-NEXT: vwsubu.wv v16, v16, v8
; RV32F-NEXT: fsrm a0
+; RV32F-NEXT: vmv8r.v v8, v16
; RV32F-NEXT: ret
;
; RV64F-LABEL: ctlz_zero_undef_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
index 86086f5dc88f..fff280c005b5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
@@ -1259,8 +1259,8 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB46_2:
; CHECK-NEXT: fsrmi a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
; CHECK-NEXT: vsrl.vx v8, v8, a2, v0.t
; CHECK-NEXT: vrsub.vx v8, v8, a3, v0.t
@@ -1285,8 +1285,8 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
@@ -2487,8 +2487,8 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB94_2:
; CHECK-NEXT: fsrmi a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
; CHECK-NEXT: vsrl.vx v8, v8, a2, v0.t
; CHECK-NEXT: vrsub.vx v8, v8, a3, v0.t
@@ -2512,8 +2512,8 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB94_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
index 883f68aec1f4..e3c53212e91b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
@@ -2024,8 +2024,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
; RV32-NEXT: vmv1r.v v24, v0
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 40
-; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -2044,30 +2043,35 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: addi a3, a3, -1
; RV32-NEXT: and a2, a3, a2
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: lui a3, 349525
; RV32-NEXT: addi a3, a3, 1365
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a3
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
@@ -2078,64 +2082,64 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: lui a3, 209715
; RV32-NEXT: addi a3, a3, 819
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a3
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: lui a3, 61681
; RV32-NEXT: addi a3, a3, -241
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a3
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
; RV32-NEXT: lui a3, 4112
; RV32-NEXT: addi a3, a3, 257
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v8, a3
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 16
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 4
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
+; RV32-NEXT: vmul.vv v16, v16, v8, v0.t
; RV32-NEXT: li a2, 56
-; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
+; RV32-NEXT: vsrl.vx v8, v16, a2, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
@@ -2145,14 +2149,13 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB46_2:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 48
-; RV32-NEXT: mul a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: li a3, 48
+; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
@@ -2161,17 +2164,17 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
@@ -2183,41 +2186,41 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
@@ -2303,13 +2306,13 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV64-NEXT: vand.vx v16, v16, a2, v0.t
; RV64-NEXT: vsub.vv v16, v8, v16, v0.t
@@ -2347,8 +2350,8 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.ctpop.nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 %evl)
@@ -2375,13 +2378,13 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va,
; RV32-NEXT: addi a3, a3, 1365
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v0, a3
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vand.vv v24, v24, v0
; RV32-NEXT: vsub.vv v24, v16, v24
; RV32-NEXT: lui a3, 209715
@@ -2404,20 +2407,20 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va,
; RV32-NEXT: addi a3, a3, -241
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v24, a3
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vand.vv v16, v16, v24
; RV32-NEXT: lui a3, 4112
; RV32-NEXT: addi a3, a3, 257
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v24, a3
+; RV32-NEXT: addi a3, sp, 16
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; RV32-NEXT: vmul.vv v16, v16, v24
; RV32-NEXT: li a2, 56
; RV32-NEXT: vsrl.vx v16, v16, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
index 4a001662ce2c..0ef0a431dabc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
@@ -2282,7 +2282,6 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: addi a4, a4, 1365
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v8, a4
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 24
; RV32-NEXT: mul a4, a4, a5
@@ -2295,6 +2294,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v16, v8, v0.t
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 40
@@ -2312,82 +2312,81 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: lui a4, 209715
; RV32-NEXT: addi a4, a4, 819
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a4
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a4
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 40
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: slli a4, a4, 4
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 40
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
+; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 48
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: slli a4, a4, 4
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: lui a4, 61681
; RV32-NEXT: addi a4, a4, -241
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a4
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a4
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: slli a4, a4, 4
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
; RV32-NEXT: lui a4, 4112
; RV32-NEXT: addi a4, a4, 257
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v8, a4
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
+; RV32-NEXT: vmul.vv v16, v16, v8, v0.t
; RV32-NEXT: li a3, 56
-; RV32-NEXT: vsrl.vx v8, v8, a3, v0.t
+; RV32-NEXT: vsrl.vx v8, v16, a3, v0.t
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: bltu a0, a1, .LBB46_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB46_2:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vx v8, v16, a2, v0.t
-; RV32-NEXT: vnot.v v16, v16, v0.t
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
+; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsub.vx v16, v8, a2, v0.t
+; RV32-NEXT: vnot.v v8, v8, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
@@ -2549,13 +2548,12 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB46_2:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsub.vx v16, v8, a2, v0.t
; RV64-NEXT: vnot.v v8, v8, v0.t
; RV64-NEXT: vand.vv v8, v8, v16, v0.t
@@ -2596,8 +2594,8 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vctz.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.cttz.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
@@ -2628,98 +2626,97 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64_unmasked(<vscale x 16 x i64> %va, i
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v8, v16, a2
; RV32-NEXT: vnot.v v16, v16
-; RV32-NEXT: vand.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v24, v8, 1
+; RV32-NEXT: vand.vv v16, v16, v8
+; RV32-NEXT: vsrl.vi v24, v16, 1
; RV32-NEXT: lui a4, 349525
; RV32-NEXT: addi a4, a4, 1365
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v16, a4
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v8, a4
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 24
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v24, v24, v16
-; RV32-NEXT: vsub.vv v8, v8, v24
+; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v24, v8
+; RV32-NEXT: vsub.vv v16, v16, v24
; RV32-NEXT: lui a4, 209715
; RV32-NEXT: addi a4, a4, 819
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v0, a4
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v24
+; RV32-NEXT: vand.vv v24, v16, v0
+; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v16, v16, v0
+; RV32-NEXT: vadd.vv v16, v24, v16
+; RV32-NEXT: vsrl.vi v24, v16, 4
+; RV32-NEXT: vadd.vv v16, v16, v24
; RV32-NEXT: lui a4, 61681
; RV32-NEXT: addi a4, a4, -241
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v16, a4
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v8, a4
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: slli a4, a4, 4
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v16, v8, v16
+; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v16, v8
; RV32-NEXT: lui a4, 4112
; RV32-NEXT: addi a4, a4, 257
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a4
+; RV32-NEXT: vmv.v.x v16, a4
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vmul.vv v16, v16, v8
+; RV32-NEXT: vmul.vv v8, v8, v16
; RV32-NEXT: li a3, 56
-; RV32-NEXT: vsrl.vx v8, v16, a3
+; RV32-NEXT: vsrl.vx v8, v8, a3
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: bltu a0, a1, .LBB47_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB47_2:
+; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vx v16, v24, a2
+; RV32-NEXT: vsub.vx v8, v24, a2
; RV32-NEXT: vnot.v v24, v24
-; RV32-NEXT: vand.vv v16, v24, v16
-; RV32-NEXT: vsrl.vi v24, v16, 1
+; RV32-NEXT: vand.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 1
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v24, v24, v8
-; RV32-NEXT: vsub.vv v16, v16, v24
-; RV32-NEXT: vand.vv v24, v16, v0
-; RV32-NEXT: vsrl.vi v16, v16, 2
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vadd.vv v16, v24, v16
-; RV32-NEXT: vsrl.vi v24, v16, 4
-; RV32-NEXT: vadd.vv v16, v16, v24
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vsub.vv v8, v8, v24
+; RV32-NEXT: vand.vv v24, v8, v0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v0
+; RV32-NEXT: vadd.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v24
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v16, v8
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v8, v16
; RV32-NEXT: vsrl.vx v8, v8, a3
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
@@ -4038,13 +4035,12 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB94_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
; CHECK-NEXT: fsrmi a0, 1
@@ -4077,8 +4073,8 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB94_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vctz.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.cttz.nxv16i64(<vscale x 16 x i64> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
index e69b4789a09a..14719e190a69 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
@@ -78,7 +78,6 @@ define i1 @extractelt_nxv16i1(ptr %x, i64 %idx) nounwind {
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -96,7 +95,6 @@ define i1 @extractelt_nxv32i1(ptr %x, i64 %idx) nounwind {
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -114,7 +112,6 @@ define i1 @extractelt_nxv64i1(ptr %x, i64 %idx) nounwind {
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -142,22 +139,22 @@ define i1 @extractelt_nxv128i1(ptr %x, i64 %idx) nounwind {
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: sub sp, sp, a3
; RV32-NEXT: andi sp, sp, -64
-; RV32-NEXT: addi a3, sp, 64
; RV32-NEXT: slli a2, a2, 3
-; RV32-NEXT: add a4, a0, a2
-; RV32-NEXT: vl8r.v v16, (a4)
+; RV32-NEXT: add a3, a0, a2
+; RV32-NEXT: vl8r.v v16, (a3)
; RV32-NEXT: vl8r.v v24, (a0)
-; RV32-NEXT: add a1, a3, a1
-; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: add a1, a0, a1
+; RV32-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; RV32-NEXT: vmseq.vi v8, v16, 0
; RV32-NEXT: vmseq.vi v0, v24, 0
; RV32-NEXT: vmv.v.i v16, 0
; RV32-NEXT: vmerge.vim v24, v16, 1, v0
-; RV32-NEXT: vs8r.v v24, (a3)
-; RV32-NEXT: add a2, a3, a2
+; RV32-NEXT: vs8r.v v24, (a0)
+; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: vmerge.vim v8, v16, 1, v0
-; RV32-NEXT: vs8r.v v8, (a2)
+; RV32-NEXT: vs8r.v v8, (a0)
; RV32-NEXT: lbu a0, 0(a1)
; RV32-NEXT: addi sp, s0, -80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
@@ -182,22 +179,22 @@ define i1 @extractelt_nxv128i1(ptr %x, i64 %idx) nounwind {
; RV64-NEXT: slli a3, a3, 4
; RV64-NEXT: sub sp, sp, a3
; RV64-NEXT: andi sp, sp, -64
-; RV64-NEXT: addi a3, sp, 64
; RV64-NEXT: slli a2, a2, 3
-; RV64-NEXT: add a4, a0, a2
-; RV64-NEXT: vl8r.v v16, (a4)
+; RV64-NEXT: add a3, a0, a2
+; RV64-NEXT: vl8r.v v16, (a3)
; RV64-NEXT: vl8r.v v24, (a0)
-; RV64-NEXT: add a1, a3, a1
-; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: add a1, a0, a1
+; RV64-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; RV64-NEXT: vmseq.vi v8, v16, 0
; RV64-NEXT: vmseq.vi v0, v24, 0
; RV64-NEXT: vmv.v.i v16, 0
; RV64-NEXT: vmerge.vim v24, v16, 1, v0
-; RV64-NEXT: vs8r.v v24, (a3)
-; RV64-NEXT: add a2, a3, a2
+; RV64-NEXT: vs8r.v v24, (a0)
+; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: vmv1r.v v0, v8
; RV64-NEXT: vmerge.vim v8, v16, 1, v0
-; RV64-NEXT: vs8r.v v8, (a2)
+; RV64-NEXT: vs8r.v v8, (a0)
; RV64-NEXT: lbu a0, 0(a1)
; RV64-NEXT: addi sp, s0, -80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
index 875f4f239028..6b8d778bc324 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
@@ -35,10 +35,10 @@ define <512 x i8> @single_source(<512 x i8> %a) {
; CHECK-NEXT: vslidedown.vi v16, v16, 4
; CHECK-NEXT: li a0, 466
; CHECK-NEXT: li a1, 465
+; CHECK-NEXT: lbu a2, 1012(sp)
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
-; CHECK-NEXT: lbu a0, 1012(sp)
; CHECK-NEXT: vslideup.vx v8, v16, a1
-; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: vmv.s.x v16, a2
; CHECK-NEXT: li a0, 501
; CHECK-NEXT: li a1, 500
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
@@ -118,16 +118,16 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
; CHECK-NEXT: vslidedown.vi v24, v24, 4
; CHECK-NEXT: li a1, 466
; CHECK-NEXT: li a2, 465
+; CHECK-NEXT: lbu a3, 985(sp)
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
-; CHECK-NEXT: lbu a1, 985(sp)
; CHECK-NEXT: vslideup.vx v8, v24, a2
-; CHECK-NEXT: vmv.s.x v24, a1
+; CHECK-NEXT: vmv.s.x v24, a3
; CHECK-NEXT: li a1, 478
; CHECK-NEXT: li a2, 477
+; CHECK-NEXT: lbu a3, 1012(sp)
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
-; CHECK-NEXT: lbu a1, 1012(sp)
; CHECK-NEXT: vslideup.vx v8, v24, a2
-; CHECK-NEXT: vmv.s.x v24, a1
+; CHECK-NEXT: vmv.s.x v24, a3
; CHECK-NEXT: li a1, 501
; CHECK-NEXT: li a2, 500
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
@@ -137,21 +137,21 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
; CHECK-NEXT: addi a1, a1, 501
; CHECK-NEXT: slli a1, a1, 13
; CHECK-NEXT: addi a1, a1, 512
+; CHECK-NEXT: vsetivli zero, 8, e64, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v24, 0
+; CHECK-NEXT: lui a2, 1047552
+; CHECK-NEXT: addiw a2, a2, 1
+; CHECK-NEXT: slli a2, a2, 23
+; CHECK-NEXT: addi a2, a2, 1
+; CHECK-NEXT: slli a2, a2, 18
+; CHECK-NEXT: vslide1down.vx v0, v24, a2
+; CHECK-NEXT: lui a2, 4
+; CHECK-NEXT: vmv.s.x v24, a2
; CHECK-NEXT: li a2, 64
+; CHECK-NEXT: vsetivli zero, 7, e64, m1, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v24, 6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv.v.x v24, a1
-; CHECK-NEXT: vsetivli zero, 8, e64, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v7, 0
-; CHECK-NEXT: lui a1, 1047552
-; CHECK-NEXT: addiw a1, a1, 1
-; CHECK-NEXT: slli a1, a1, 23
-; CHECK-NEXT: addi a1, a1, 1
-; CHECK-NEXT: slli a1, a1, 18
-; CHECK-NEXT: vslide1down.vx v0, v7, a1
-; CHECK-NEXT: lui a1, 4
-; CHECK-NEXT: vmv.s.x v7, a1
-; CHECK-NEXT: vsetivli zero, 7, e64, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v7, 6
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: addi sp, s0, -1536
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
index c0d366760d07..f3e823562888 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
@@ -417,8 +417,8 @@ declare <32 x i64> @llvm.vp.abs.v32i64(<32 x i64>, i1 immarg, <32 x i1>, i32)
define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_abs_v32i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB34_2
@@ -432,8 +432,8 @@ define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl)
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vrsub.vi v24, v16, 0, v0.t
; CHECK-NEXT: vmax.vv v16, v16, v24, v0.t
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
index 943fc58d637a..068c25b82100 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
@@ -847,27 +847,27 @@ define <2 x i64> @vp_bitreverse_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %e
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v10, v10, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
+; RV32-NEXT: vlse64.v v10, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11, v0.t
+; RV32-NEXT: vand.vx v11, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v11, v11, 24, v0.t
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
; RV32-NEXT: vsll.vi v12, v12, 8, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t
+; RV32-NEXT: vor.vv v11, v11, v12, v0.t
+; RV32-NEXT: vor.vv v9, v9, v11, v0.t
+; RV32-NEXT: vsrl.vx v11, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a3, v0.t
; RV32-NEXT: vand.vx v12, v12, a2, v0.t
-; RV32-NEXT: vor.vv v10, v12, v10, v0.t
+; RV32-NEXT: vor.vv v11, v12, v11, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v11, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
-; RV32-NEXT: vor.vv v8, v8, v10, v0.t
+; RV32-NEXT: vor.vv v8, v8, v11, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
; RV32-NEXT: lui a1, 61681
@@ -982,27 +982,27 @@ define <2 x i64> @vp_bitreverse_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl)
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3
; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4
-; RV32-NEXT: vsll.vi v10, v10, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
+; RV32-NEXT: vlse64.v v10, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11
+; RV32-NEXT: vand.vx v11, v8, a4
+; RV32-NEXT: vsll.vi v11, v11, 24
+; RV32-NEXT: vand.vv v12, v8, v10
; RV32-NEXT: vsll.vi v12, v12, 8
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: vsrl.vx v10, v8, a1
+; RV32-NEXT: vor.vv v11, v11, v12
+; RV32-NEXT: vor.vv v9, v9, v11
+; RV32-NEXT: vsrl.vx v11, v8, a1
; RV32-NEXT: vsrl.vx v12, v8, a3
; RV32-NEXT: vand.vx v12, v12, a2
-; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vor.vv v11, v12, v11
; RV32-NEXT: vsrl.vi v12, v8, 24
; RV32-NEXT: vand.vx v12, v12, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v11
+; RV32-NEXT: vand.vv v8, v8, v10
; RV32-NEXT: vor.vv v8, v8, v12
-; RV32-NEXT: vor.vv v8, v8, v10
+; RV32-NEXT: vor.vv v8, v8, v11
; RV32-NEXT: vor.vv v8, v9, v8
; RV32-NEXT: vsrl.vi v9, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -1119,27 +1119,27 @@ define <4 x i64> @vp_bitreverse_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %e
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3, v0.t
; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v12, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14, v0.t
+; RV32-NEXT: vand.vx v14, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v14, v14, 24, v0.t
+; RV32-NEXT: vand.vv v16, v8, v12, v0.t
; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
-; RV32-NEXT: vor.vv v12, v12, v16, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t
+; RV32-NEXT: vor.vv v14, v14, v16, v0.t
+; RV32-NEXT: vor.vv v10, v10, v14, v0.t
+; RV32-NEXT: vsrl.vx v14, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v12, v16, v12, v0.t
+; RV32-NEXT: vor.vv v14, v16, v14, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 24, v0.t
; RV32-NEXT: vand.vx v16, v16, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v14, v0.t
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vor.vv v8, v8, v12, v0.t
+; RV32-NEXT: vor.vv v8, v8, v14, v0.t
; RV32-NEXT: vor.vv v8, v10, v8, v0.t
; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t
; RV32-NEXT: lui a1, 61681
@@ -1254,27 +1254,27 @@ define <4 x i64> @vp_bitreverse_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl)
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3
; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4
-; RV32-NEXT: vsll.vi v12, v12, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14
+; RV32-NEXT: vand.vx v14, v8, a4
+; RV32-NEXT: vsll.vi v14, v14, 24
+; RV32-NEXT: vand.vv v16, v8, v12
; RV32-NEXT: vsll.vi v16, v16, 8
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vsrl.vx v12, v8, a1
+; RV32-NEXT: vor.vv v14, v14, v16
+; RV32-NEXT: vor.vv v10, v10, v14
+; RV32-NEXT: vsrl.vx v14, v8, a1
; RV32-NEXT: vsrl.vx v16, v8, a3
; RV32-NEXT: vand.vx v16, v16, a2
-; RV32-NEXT: vor.vv v12, v16, v12
+; RV32-NEXT: vor.vv v14, v16, v14
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v14
+; RV32-NEXT: vand.vv v8, v8, v12
; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vor.vv v8, v8, v12
+; RV32-NEXT: vor.vv v8, v8, v14
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: vsrl.vi v10, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -1391,13 +1391,13 @@ define <8 x i64> @vp_bitreverse_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %e
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3, v0.t
; RV32-NEXT: vor.vv v16, v12, v16, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v20, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vx v20, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v20, v20, 24, v0.t
; RV32-NEXT: vand.vv v24, v8, v12, v0.t
; RV32-NEXT: vsll.vi v24, v24, 8, v0.t
; RV32-NEXT: vor.vv v20, v20, v24, v0.t
@@ -1526,27 +1526,27 @@ define <8 x i64> @vp_bitreverse_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl)
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3
; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v16, v16, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v20, (a5), zero
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v20
+; RV32-NEXT: vand.vx v20, v8, a4
+; RV32-NEXT: vsll.vi v20, v20, 24
+; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v16, v16, v24
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v20, v20, v24
+; RV32-NEXT: vor.vv v12, v12, v20
+; RV32-NEXT: vsrl.vx v20, v8, a1
; RV32-NEXT: vsrl.vx v24, v8, a3
; RV32-NEXT: vand.vx v24, v24, a2
-; RV32-NEXT: vor.vv v16, v24, v16
+; RV32-NEXT: vor.vv v20, v24, v20
; RV32-NEXT: vsrl.vi v24, v8, 24
; RV32-NEXT: vand.vx v24, v24, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v20
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vor.vv v8, v8, v16
+; RV32-NEXT: vor.vv v8, v8, v20
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: vsrl.vi v12, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -1685,20 +1685,23 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 48
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 48
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 48
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a5, sp, 48
+; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a5, vlenb
; RV32-NEXT: slli a5, a5, 4
@@ -1711,10 +1714,10 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV32-NEXT: add a5, sp, a5
; RV32-NEXT: addi a5, a5, 48
; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a1, sp, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -1727,38 +1730,38 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vor.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: addi a1, sp, 48
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: addi a1, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vor.vv v16, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v8, v16, 4, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsll.vi v8, v8, 4, v0.t
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsll.vi v16, v16, 4, v0.t
+; RV32-NEXT: vor.vv v16, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v8, v16, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsll.vi v8, v8, 2, v0.t
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsll.vi v16, v16, 2, v0.t
+; RV32-NEXT: vor.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
; RV32-NEXT: vsll.vi v8, v8, 1, v0.t
@@ -1885,60 +1888,60 @@ define <15 x i64> @vp_bitreverse_v15i64_unmasked(<15 x i64> %va, i32 zeroext %ev
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 48
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: vlse64.v v24, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
-; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v24, v0, v24
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
+; RV32-NEXT: vand.vv v16, v8, v24
+; RV32-NEXT: vsll.vi v16, v16, 8
+; RV32-NEXT: vor.vv v16, v0, v16
; RV32-NEXT: addi a5, sp, 48
; RV32-NEXT: vl8r.v v0, (a5) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vor.vv v16, v0, v16
+; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vx v0, v8, a3
; RV32-NEXT: vand.vx v0, v0, a2
-; RV32-NEXT: vsrl.vx v24, v8, a1
-; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: vsrl.vi v0, v8, 8
-; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v0, v0, v16
+; RV32-NEXT: vsrl.vi v16, v8, 8
+; RV32-NEXT: vand.vv v16, v16, v24
; RV32-NEXT: vsrl.vi v8, v8, 24
; RV32-NEXT: vand.vx v8, v8, a4
; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: addi a1, sp, 48
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
-; RV32-NEXT: vsll.vi v8, v8, 4
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 2
+; RV32-NEXT: vor.vv v8, v8, v0
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 4
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
-; RV32-NEXT: vsll.vi v8, v8, 2
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: vsll.vi v8, v8, 4
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 2
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vsll.vi v8, v8, 2
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 1
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vadd.vv v8, v8, v8
-; RV32-NEXT: vor.vv v8, v16, v8
+; RV32-NEXT: vor.vv v8, v24, v8
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
@@ -2049,20 +2052,23 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 48
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 48
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 48
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a5, sp, 48
+; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a5, vlenb
; RV32-NEXT: slli a5, a5, 4
@@ -2075,10 +2081,10 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV32-NEXT: add a5, sp, a5
; RV32-NEXT: addi a5, a5, 48
; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a1, sp, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -2091,38 +2097,38 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vor.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: addi a1, sp, 48
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: addi a1, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vor.vv v16, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v8, v16, 4, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsll.vi v8, v8, 4, v0.t
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsll.vi v16, v16, 4, v0.t
+; RV32-NEXT: vor.vv v16, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v8, v16, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsll.vi v8, v8, 2, v0.t
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsll.vi v16, v16, 2, v0.t
+; RV32-NEXT: vor.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
; RV32-NEXT: vsll.vi v8, v8, 1, v0.t
@@ -2249,60 +2255,60 @@ define <16 x i64> @vp_bitreverse_v16i64_unmasked(<16 x i64> %va, i32 zeroext %ev
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 48
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: vlse64.v v24, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
-; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v24, v0, v24
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
+; RV32-NEXT: vand.vv v16, v8, v24
+; RV32-NEXT: vsll.vi v16, v16, 8
+; RV32-NEXT: vor.vv v16, v0, v16
; RV32-NEXT: addi a5, sp, 48
; RV32-NEXT: vl8r.v v0, (a5) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vor.vv v16, v0, v16
+; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vx v0, v8, a3
; RV32-NEXT: vand.vx v0, v0, a2
-; RV32-NEXT: vsrl.vx v24, v8, a1
-; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: vsrl.vi v0, v8, 8
-; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v0, v0, v16
+; RV32-NEXT: vsrl.vi v16, v8, 8
+; RV32-NEXT: vand.vv v16, v16, v24
; RV32-NEXT: vsrl.vi v8, v8, 24
; RV32-NEXT: vand.vx v8, v8, a4
; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: addi a1, sp, 48
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
-; RV32-NEXT: vsll.vi v8, v8, 4
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 2
+; RV32-NEXT: vor.vv v8, v8, v0
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 4
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
-; RV32-NEXT: vsll.vi v8, v8, 2
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: vsll.vi v8, v8, 4
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 2
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vsll.vi v8, v8, 2
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 1
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vadd.vv v8, v8, v8
-; RV32-NEXT: vor.vv v8, v16, v8
+; RV32-NEXT: vor.vv v8, v24, v8
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
@@ -2388,8 +2394,8 @@ define <128 x i16> @vp_bitreverse_v128i16(<128 x i16> %va, <128 x i1> %m, i32 ze
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: li a2, 64
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 8
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB34_2
@@ -2427,13 +2433,13 @@ define <128 x i16> @vp_bitreverse_v128i16(<128 x i16> %va, <128 x i1> %m, i32 ze
; CHECK-NEXT: sltu a0, a0, a4
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a4
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 3
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
index f80d4e5c0d7c..149073868732 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
@@ -295,27 +295,27 @@ define <2 x i64> @vp_bswap_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v10, v10, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
+; RV32-NEXT: vlse64.v v10, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11, v0.t
+; RV32-NEXT: vand.vx v11, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v11, v11, 24, v0.t
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
; RV32-NEXT: vsll.vi v12, v12, 8, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t
+; RV32-NEXT: vor.vv v11, v11, v12, v0.t
+; RV32-NEXT: vor.vv v9, v9, v11, v0.t
+; RV32-NEXT: vsrl.vx v11, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a3, v0.t
; RV32-NEXT: vand.vx v12, v12, a2, v0.t
-; RV32-NEXT: vor.vv v10, v12, v10, v0.t
+; RV32-NEXT: vor.vv v11, v12, v11, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v11, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
-; RV32-NEXT: vor.vv v8, v8, v10, v0.t
+; RV32-NEXT: vor.vv v8, v8, v11, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -373,27 +373,27 @@ define <2 x i64> @vp_bswap_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3
; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4
-; RV32-NEXT: vsll.vi v10, v10, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
+; RV32-NEXT: vlse64.v v10, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11
+; RV32-NEXT: vand.vx v11, v8, a4
+; RV32-NEXT: vsll.vi v11, v11, 24
+; RV32-NEXT: vand.vv v12, v8, v10
; RV32-NEXT: vsll.vi v12, v12, 8
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: vsrl.vx v10, v8, a1
+; RV32-NEXT: vor.vv v11, v11, v12
+; RV32-NEXT: vor.vv v9, v9, v11
+; RV32-NEXT: vsrl.vx v11, v8, a1
; RV32-NEXT: vsrl.vx v12, v8, a3
; RV32-NEXT: vand.vx v12, v12, a2
-; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vor.vv v11, v12, v11
; RV32-NEXT: vsrl.vi v12, v8, 24
; RV32-NEXT: vand.vx v12, v12, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v11
+; RV32-NEXT: vand.vv v8, v8, v10
; RV32-NEXT: vor.vv v8, v8, v12
-; RV32-NEXT: vor.vv v8, v8, v10
+; RV32-NEXT: vor.vv v8, v8, v11
; RV32-NEXT: vor.vv v8, v9, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -453,27 +453,27 @@ define <4 x i64> @vp_bswap_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3, v0.t
; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v12, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14, v0.t
+; RV32-NEXT: vand.vx v14, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v14, v14, 24, v0.t
+; RV32-NEXT: vand.vv v16, v8, v12, v0.t
; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
-; RV32-NEXT: vor.vv v12, v12, v16, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t
+; RV32-NEXT: vor.vv v14, v14, v16, v0.t
+; RV32-NEXT: vor.vv v10, v10, v14, v0.t
+; RV32-NEXT: vsrl.vx v14, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v12, v16, v12, v0.t
+; RV32-NEXT: vor.vv v14, v16, v14, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 24, v0.t
; RV32-NEXT: vand.vx v16, v16, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v14, v0.t
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vor.vv v8, v8, v12, v0.t
+; RV32-NEXT: vor.vv v8, v8, v14, v0.t
; RV32-NEXT: vor.vv v8, v10, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -531,27 +531,27 @@ define <4 x i64> @vp_bswap_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3
; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4
-; RV32-NEXT: vsll.vi v12, v12, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14
+; RV32-NEXT: vand.vx v14, v8, a4
+; RV32-NEXT: vsll.vi v14, v14, 24
+; RV32-NEXT: vand.vv v16, v8, v12
; RV32-NEXT: vsll.vi v16, v16, 8
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vsrl.vx v12, v8, a1
+; RV32-NEXT: vor.vv v14, v14, v16
+; RV32-NEXT: vor.vv v10, v10, v14
+; RV32-NEXT: vsrl.vx v14, v8, a1
; RV32-NEXT: vsrl.vx v16, v8, a3
; RV32-NEXT: vand.vx v16, v16, a2
-; RV32-NEXT: vor.vv v12, v16, v12
+; RV32-NEXT: vor.vv v14, v16, v14
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v14
+; RV32-NEXT: vand.vv v8, v8, v12
; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vor.vv v8, v8, v12
+; RV32-NEXT: vor.vv v8, v8, v14
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -611,13 +611,13 @@ define <8 x i64> @vp_bswap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3, v0.t
; RV32-NEXT: vor.vv v16, v12, v16, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v20, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vx v20, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v20, v20, 24, v0.t
; RV32-NEXT: vand.vv v24, v8, v12, v0.t
; RV32-NEXT: vsll.vi v24, v24, 8, v0.t
; RV32-NEXT: vor.vv v20, v20, v24, v0.t
@@ -689,27 +689,27 @@ define <8 x i64> @vp_bswap_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3
; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v16, v16, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v20, (a5), zero
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v20
+; RV32-NEXT: vand.vx v20, v8, a4
+; RV32-NEXT: vsll.vi v20, v20, 24
+; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v16, v16, v24
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v20, v20, v24
+; RV32-NEXT: vor.vv v12, v12, v20
+; RV32-NEXT: vsrl.vx v20, v8, a1
; RV32-NEXT: vsrl.vx v24, v8, a3
; RV32-NEXT: vand.vx v24, v24, a2
-; RV32-NEXT: vor.vv v16, v24, v16
+; RV32-NEXT: vor.vv v20, v24, v20
; RV32-NEXT: vsrl.vi v24, v8, 24
; RV32-NEXT: vand.vx v24, v24, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v20
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vor.vv v8, v8, v16
+; RV32-NEXT: vor.vv v8, v8, v20
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -779,20 +779,23 @@ define <15 x i64> @vp_bswap_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %ev
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
@@ -805,10 +808,10 @@ define <15 x i64> @vp_bswap_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %ev
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -913,13 +916,13 @@ define <15 x i64> @vp_bswap_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -1010,20 +1013,23 @@ define <16 x i64> @vp_bswap_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
@@ -1036,10 +1042,10 @@ define <16 x i64> @vp_bswap_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -1144,13 +1150,13 @@ define <16 x i64> @vp_bswap_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -1228,8 +1234,8 @@ define <128 x i16> @vp_bswap_v128i16(<128 x i16> %va, <128 x i1> %m, i32 zeroext
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: li a2, 64
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 8
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
@@ -1246,13 +1252,13 @@ define <128 x i16> @vp_bswap_v128i16(<128 x i16> %va, <128 x i1> %m, i32 zeroext
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t
; CHECK-NEXT: vor.vv v16, v8, v16, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
index af7d7f7ae755..65a1035fd815 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
@@ -567,13 +567,14 @@ define <8 x i32> @add_constant_rhs_8xi32_partial(<8 x i32> %vin, i32 %a, i32 %b,
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vmv.s.x v10, a2
+; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0)
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v12, (a0)
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vmv.s.x v10, a3
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0)
-; CHECK-NEXT: vle32.v v12, (a0)
; CHECK-NEXT: vslideup.vi v8, v10, 7
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index 3e2af7e8267b..befbfb88550b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -204,8 +204,8 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 3
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <16 x half> @llvm.vp.ceil.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 3
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -439,8 +439,8 @@ define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -483,8 +483,8 @@ define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -561,16 +561,16 @@ declare <4 x double> @llvm.vp.ceil.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -605,16 +605,16 @@ declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -649,16 +649,16 @@ declare <15 x double> @llvm.vp.ceil.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -693,16 +693,16 @@ declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -737,69 +737,59 @@ declare <32 x double> @llvm.vp.ceil.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a1, 3
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
index 2f4539d5038c..b42fb8c68616 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
@@ -1503,24 +1503,28 @@ declare <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32)
define <15 x i64> @vp_ctlz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctlz_v15i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
@@ -1535,37 +1539,60 @@ define <15 x i64> @vp_ctlz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vnot.v v24, v8, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctlz_v15i64:
@@ -1655,33 +1682,29 @@ define <15 x i64> @vp_ctlz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vsrl.vx v16, v8, a1
; RV32-NEXT: vor.vv v8, v8, v16
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1743,24 +1766,28 @@ declare <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32)
define <16 x i64> @vp_ctlz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctlz_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
@@ -1775,37 +1802,60 @@ define <16 x i64> @vp_ctlz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vnot.v v24, v8, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctlz_v16i64:
@@ -1895,33 +1945,29 @@ define <16 x i64> @vp_ctlz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vsrl.vx v16, v8, a1
; RV32-NEXT: vor.vv v8, v8, v16
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1991,7 +2037,7 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 56 * vlenb
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -2035,111 +2081,145 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 48
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
+; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
+; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: li a4, 48
+; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, sp, 24
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a3, a3, a5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v8, (a4), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 24
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
; RV32-NEXT: li a2, 56
; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
@@ -2147,13 +2227,13 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: vor.vv v8, v16, v8, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
@@ -2171,18 +2251,18 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
+; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -2191,17 +2271,35 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
@@ -2211,7 +2309,7 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
@@ -2219,21 +2317,21 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
+; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -2257,8 +2355,8 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB34_2
@@ -2315,13 +2413,13 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: sltu a0, a0, a7
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a7
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a7, vlenb
+; RV64-NEXT: slli a7, a7, 3
+; RV64-NEXT: add a7, sp, a7
+; RV64-NEXT: addi a7, a7, 16
+; RV64-NEXT: vl8r.v v8, (a7) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV64-NEXT: vor.vv v16, v8, v16, v0.t
; RV64-NEXT: vsrl.vi v8, v16, 2, v0.t
@@ -2364,10 +2462,14 @@ define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi sp, sp, -48
; RV32-NEXT: .cfi_def_cfa_offset 48
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 32 * vlenb
-; RV32-NEXT: vmv8r.v v24, v16
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
; RV32-NEXT: sw a1, 44(sp)
@@ -2391,74 +2493,8 @@ define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB35_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 2
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 8
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 16
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v8, a2
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 24
-; RV32-NEXT: mul a3, a3, a4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: addi a3, sp, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: li a1, 56
-; RV32-NEXT: vsrl.vx v8, v8, a1
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, a0, -16
-; RV32-NEXT: sltu a0, a0, a3
-; RV32-NEXT: addi a0, a0, -1
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v8, v24, 1
-; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 1
+; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 2
; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 4
@@ -2467,41 +2503,84 @@ define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 16
; RV32-NEXT: vor.vv v8, v8, v24
+; RV32-NEXT: li a2, 32
; RV32-NEXT: vsrl.vx v24, v8, a2
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v24, v8, 1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 24
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vnot.v v0, v8
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v24, v0, 1
; RV32-NEXT: vand.vv v24, v24, v16
-; RV32-NEXT: vsub.vv v8, v8, v24
-; RV32-NEXT: vand.vv v24, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
+; RV32-NEXT: vsub.vv v24, v0, v24
+; RV32-NEXT: vand.vv v0, v24, v8
+; RV32-NEXT: vsrl.vi v24, v24, 2
+; RV32-NEXT: vand.vv v24, v24, v8
+; RV32-NEXT: vadd.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vadd.vv v24, v24, v0
+; RV32-NEXT: addi a3, sp, 48
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, a0, -16
+; RV32-NEXT: sltu a0, a0, a3
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v0, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v24, v0, 1
+; RV32-NEXT: vor.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 2
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 8
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 16
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vx v0, v24, a2
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vnot.v v24, v24
+; RV32-NEXT: vsrl.vi v0, v24, 1
+; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: addi a2, sp, 24
+; RV32-NEXT: vsub.vv v16, v24, v16
+; RV32-NEXT: vand.vv v24, v16, v8
+; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v8, v16, v8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a2), zero
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a2), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v0, v0, v16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a0, sp, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v16, v0, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v24, v8, v24
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v8, v16, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v16, v24, a2
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
@@ -4060,24 +4139,28 @@ define <8 x i64> @vp_ctlz_zero_undef_v8i64_unmasked(<8 x i64> %va, i32 zeroext %
define <15 x i64> @vp_ctlz_zero_undef_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctlz_zero_undef_v15i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
@@ -4092,37 +4175,60 @@ define <15 x i64> @vp_ctlz_zero_undef_v15i64(<15 x i64> %va, <15 x i1> %m, i32 z
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vnot.v v24, v8, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctlz_zero_undef_v15i64:
@@ -4212,33 +4318,29 @@ define <15 x i64> @vp_ctlz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
; RV32-NEXT: vsrl.vx v16, v8, a1
; RV32-NEXT: vor.vv v8, v8, v16
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -4298,24 +4400,28 @@ define <15 x i64> @vp_ctlz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
define <16 x i64> @vp_ctlz_zero_undef_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctlz_zero_undef_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
@@ -4330,37 +4436,60 @@ define <16 x i64> @vp_ctlz_zero_undef_v16i64(<16 x i64> %va, <16 x i1> %m, i32 z
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vnot.v v24, v8, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctlz_zero_undef_v16i64:
@@ -4450,33 +4579,29 @@ define <16 x i64> @vp_ctlz_zero_undef_v16i64_unmasked(<16 x i64> %va, i32 zeroex
; RV32-NEXT: vsrl.vx v16, v8, a1
; RV32-NEXT: vor.vv v8, v8, v16
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -4544,7 +4669,7 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 56 * vlenb
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -4588,111 +4713,145 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 48
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
+; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
+; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: li a4, 48
+; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, sp, 24
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a3, a3, a5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v8, (a4), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 24
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
; RV32-NEXT: li a2, 56
; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
@@ -4700,13 +4859,13 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: vor.vv v8, v16, v8, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
@@ -4724,18 +4883,18 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
+; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -4744,17 +4903,35 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
@@ -4764,7 +4941,7 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
@@ -4772,21 +4949,21 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
+; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -4810,8 +4987,8 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB70_2
@@ -4868,13 +5045,13 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV64-NEXT: sltu a0, a0, a7
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a7
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a7, vlenb
+; RV64-NEXT: slli a7, a7, 3
+; RV64-NEXT: add a7, sp, a7
+; RV64-NEXT: addi a7, a7, 16
+; RV64-NEXT: vl8r.v v8, (a7) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV64-NEXT: vor.vv v16, v8, v16, v0.t
; RV64-NEXT: vsrl.vi v8, v16, 2, v0.t
@@ -4917,10 +5094,14 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: addi sp, sp, -48
; RV32-NEXT: .cfi_def_cfa_offset 48
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 32 * vlenb
-; RV32-NEXT: vmv8r.v v24, v16
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
; RV32-NEXT: sw a1, 44(sp)
@@ -4944,74 +5125,8 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB71_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 2
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 8
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 16
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v8, a2
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 24
-; RV32-NEXT: mul a3, a3, a4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: addi a3, sp, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: li a1, 56
-; RV32-NEXT: vsrl.vx v8, v8, a1
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, a0, -16
-; RV32-NEXT: sltu a0, a0, a3
-; RV32-NEXT: addi a0, a0, -1
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v8, v24, 1
-; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 1
+; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 2
; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 4
@@ -5020,41 +5135,84 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 16
; RV32-NEXT: vor.vv v8, v8, v24
+; RV32-NEXT: li a2, 32
; RV32-NEXT: vsrl.vx v24, v8, a2
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v24, v8, 1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 24
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vnot.v v0, v8
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v24, v0, 1
; RV32-NEXT: vand.vv v24, v24, v16
-; RV32-NEXT: vsub.vv v8, v8, v24
-; RV32-NEXT: vand.vv v24, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
+; RV32-NEXT: vsub.vv v24, v0, v24
+; RV32-NEXT: vand.vv v0, v24, v8
+; RV32-NEXT: vsrl.vi v24, v24, 2
+; RV32-NEXT: vand.vv v24, v24, v8
+; RV32-NEXT: vadd.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vadd.vv v24, v24, v0
+; RV32-NEXT: addi a3, sp, 48
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, a0, -16
+; RV32-NEXT: sltu a0, a0, a3
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v0, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v24, v0, 1
+; RV32-NEXT: vor.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 2
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 8
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 16
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vx v0, v24, a2
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vnot.v v24, v24
+; RV32-NEXT: vsrl.vi v0, v24, 1
+; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: addi a2, sp, 24
+; RV32-NEXT: vsub.vv v16, v24, v16
+; RV32-NEXT: vand.vv v24, v16, v8
+; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v8, v16, v8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a2), zero
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a2), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v0, v0, v16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a0, sp, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v16, v0, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v24, v8, v24
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v8, v16, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v16, v24, a2
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
index 0b6d8b33394d..5fceab869ab8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
@@ -1119,55 +1119,93 @@ declare <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64>, <15 x i1>, i32)
define <15 x i64> @vp_ctpop_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctpop_v15i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 24
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v16, v24, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctpop_v15i64:
@@ -1228,34 +1266,29 @@ define <15 x i64> @vp_ctpop_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi a1, a1, 257
; RV32-NEXT: sw a1, 4(sp)
; RV32-NEXT: sw a1, 0(sp)
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1303,55 +1336,93 @@ declare <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64>, <16 x i1>, i32)
define <16 x i64> @vp_ctpop_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctpop_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 24
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v16, v24, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctpop_v16i64:
@@ -1412,34 +1483,29 @@ define <16 x i64> @vp_ctpop_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi a1, a1, 257
; RV32-NEXT: sw a1, 4(sp)
; RV32-NEXT: sw a1, 0(sp)
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1495,11 +1561,16 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 48 * vlenb
; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a2, 40
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
-; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: lui a1, 349525
@@ -1524,74 +1595,93 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB34_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: addi a2, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 24
-; RV32-NEXT: mul a3, a3, a4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: vlse64.v v8, (a2), zero
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: slli a2, a2, 5
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: addi a2, sp, 32
+; RV32-NEXT: vlse64.v v16, (a2), zero
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: li a3, 24
; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v24, v8, v16, v0.t
-; RV32-NEXT: addi a2, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a2), zero
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 24
+; RV32-NEXT: li a3, 40
; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v24, v8, v0.t
+; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: slli a2, a2, 5
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vi v16, v24, 2, v0.t
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v24, v8, v0.t
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: li a3, 40
+; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v8, v16, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v16, v8, v0.t
-; RV32-NEXT: addi a2, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a2), zero
+; RV32-NEXT: vsub.vv v24, v8, v24, v0.t
+; RV32-NEXT: vand.vv v8, v24, v16, v0.t
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: li a3, 40
+; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
-; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vsrl.vi v8, v24, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 40
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: addi a2, sp, 24
+; RV32-NEXT: addi a3, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a2), zero
; RV32-NEXT: addi a2, sp, 48
; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 40
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v24, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
; RV32-NEXT: li a1, 56
; RV32-NEXT: vsrl.vx v8, v8, a1, v0.t
; RV32-NEXT: csrr a2, vlenb
@@ -1603,14 +1693,13 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sltu a0, a0, a2
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a2
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 40
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
@@ -1625,20 +1714,37 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: li a2, 24
+; RV32-NEXT: mul a0, a0, a2
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: addi a0, sp, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a0, a0, a2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
; RV32-NEXT: csrr a0, vlenb
@@ -1666,8 +1772,8 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB34_2
@@ -1710,13 +1816,13 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: sltu a0, a0, a6
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a6
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a6, vlenb
+; RV64-NEXT: slli a6, a6, 3
+; RV64-NEXT: add a6, sp, a6
+; RV64-NEXT: addi a6, a6, 16
+; RV64-NEXT: vl8r.v v8, (a6) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV64-NEXT: vand.vx v16, v16, a1, v0.t
; RV64-NEXT: vsub.vv v16, v8, v16, v0.t
@@ -1746,12 +1852,11 @@ define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi sp, sp, -48
; RV32-NEXT: .cfi_def_cfa_offset 48
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 40
-; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 40 * vlenb
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -1777,97 +1882,67 @@ define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB35_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a2, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a2), zero
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 24
-; RV32-NEXT: mul a2, a2, a3
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vsub.vv v8, v8, v16
+; RV32-NEXT: vlse64.v v16, (a2), zero
; RV32-NEXT: addi a2, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a2), zero
+; RV32-NEXT: vlse64.v v24, (a2), zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v0
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v0, v0, v16
+; RV32-NEXT: vsub.vv v8, v8, v0
+; RV32-NEXT: vand.vv v0, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: addi a2, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a2), zero
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 4
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a2), zero
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v0, v8
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
; RV32-NEXT: addi a2, sp, 48
; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v16, v16, v8
-; RV32-NEXT: li a1, 56
-; RV32-NEXT: vsrl.vx v8, v16, a1
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 3
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; RV32-NEXT: addi a2, a0, -16
; RV32-NEXT: sltu a0, a0, a2
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a2
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v16, v8, 1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 24
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: addi a2, sp, 24
; RV32-NEXT: vsub.vv v16, v8, v16
-; RV32-NEXT: vand.vv v8, v16, v0
+; RV32-NEXT: vand.vv v0, v16, v24
; RV32-NEXT: vsrl.vi v16, v16, 2
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a2), zero
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vadd.vv v16, v0, v16
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v0, (a2), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 4
+; RV32-NEXT: vadd.vv v8, v16, v8
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v16, v16, v0
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v24, v8, v0
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v8, v16, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v16, v24, a2
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a0, sp, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
-; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
index f2926fa91e5c..e7736e7f360f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
@@ -1263,59 +1263,86 @@ declare <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32)
define <15 x i64> @vp_cttz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_cttz_v15i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: li a1, 1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v16, v8, a1, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_cttz_v15i64:
@@ -1385,33 +1412,29 @@ define <15 x i64> @vp_cttz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vsub.vx v16, v8, a1
; RV32-NEXT: vnot.v v8, v8
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1463,59 +1486,86 @@ declare <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32)
define <16 x i64> @vp_cttz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_cttz_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: li a1, 1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v16, v8, a1, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_cttz_v16i64:
@@ -1585,33 +1635,29 @@ define <16 x i64> @vp_cttz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vsub.vx v16, v8, a1
; RV32-NEXT: vnot.v v8, v8
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1671,7 +1717,7 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 56 * vlenb
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -1705,111 +1751,145 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vnot.v v8, v8, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 48
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
+; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
+; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: li a4, 48
+; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, sp, 24
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a3, a3, a5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v8, (a4), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 24
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
; RV32-NEXT: li a2, 56
; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
@@ -1817,13 +1897,13 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v8, v16, a1, v0.t
; RV32-NEXT: vnot.v v16, v16, v0.t
; RV32-NEXT: vand.vv v8, v16, v8, v0.t
@@ -1831,18 +1911,18 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
+; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -1851,17 +1931,35 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
@@ -1871,7 +1969,7 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
@@ -1879,21 +1977,21 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
+; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -1917,8 +2015,8 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a1, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a2, a0
; RV64-NEXT: bltu a0, a1, .LBB34_2
@@ -1965,13 +2063,13 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: sltu a0, a0, a7
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a7
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a7, vlenb
+; RV64-NEXT: slli a7, a7, 3
+; RV64-NEXT: add a7, sp, a7
+; RV64-NEXT: addi a7, a7, 16
+; RV64-NEXT: vl8r.v v8, (a7) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsub.vx v16, v8, a1, v0.t
; RV64-NEXT: vnot.v v8, v8, v0.t
; RV64-NEXT: vand.vv v8, v8, v16, v0.t
@@ -2004,10 +2102,14 @@ define <32 x i64> @vp_cttz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi sp, sp, -48
; RV32-NEXT: .cfi_def_cfa_offset 48
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 32 * vlenb
-; RV32-NEXT: vmv8r.v v24, v16
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
; RV32-NEXT: sw a1, 44(sp)
@@ -2032,96 +2134,73 @@ define <32 x i64> @vp_cttz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: .LBB35_2:
; RV32-NEXT: li a2, 1
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsub.vx v16, v8, a2
+; RV32-NEXT: vsub.vx v24, v8, a2
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: vand.vv v0, v8, v24
; RV32-NEXT: addi a3, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 24
-; RV32-NEXT: mul a3, a3, a4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: vsrl.vi v24, v0, 1
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vsub.vv v24, v0, v24
+; RV32-NEXT: vand.vv v0, v24, v8
+; RV32-NEXT: vsrl.vi v24, v24, 2
+; RV32-NEXT: vand.vv v24, v24, v8
+; RV32-NEXT: vadd.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vadd.vv v24, v24, v0
; RV32-NEXT: addi a3, sp, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: li a1, 56
-; RV32-NEXT: vsrl.vx v8, v8, a1
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, a0, -16
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v0, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsub.vx v24, v0, a2
+; RV32-NEXT: vnot.v v0, v0
+; RV32-NEXT: vand.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 1
+; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: addi a2, sp, 24
+; RV32-NEXT: vsub.vv v16, v24, v16
+; RV32-NEXT: vand.vv v24, v16, v8
+; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v8, v16, v8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a2), zero
+; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsub.vx v8, v24, a2
-; RV32-NEXT: vnot.v v24, v24
-; RV32-NEXT: vand.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 24
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v24, v24, v16
-; RV32-NEXT: vsub.vv v8, v8, v24
-; RV32-NEXT: vand.vv v24, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a2), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v0, v0, v16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a0, sp, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v16, v0, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v24, v8, v24
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v8, v16, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v16, v24, a2
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
@@ -3420,59 +3499,86 @@ define <8 x i64> @vp_cttz_zero_undef_v8i64_unmasked(<8 x i64> %va, i32 zeroext %
define <15 x i64> @vp_cttz_zero_undef_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_cttz_zero_undef_v15i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: li a1, 1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v16, v8, a1, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_cttz_zero_undef_v15i64:
@@ -3542,33 +3648,29 @@ define <15 x i64> @vp_cttz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
; RV32-NEXT: vsub.vx v16, v8, a1
; RV32-NEXT: vnot.v v8, v8
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -3618,59 +3720,86 @@ define <15 x i64> @vp_cttz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
define <16 x i64> @vp_cttz_zero_undef_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_cttz_zero_undef_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: li a1, 1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v16, v8, a1, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_cttz_zero_undef_v16i64:
@@ -3740,33 +3869,29 @@ define <16 x i64> @vp_cttz_zero_undef_v16i64_unmasked(<16 x i64> %va, i32 zeroex
; RV32-NEXT: vsub.vx v16, v8, a1
; RV32-NEXT: vnot.v v8, v8
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -3824,7 +3949,7 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 56 * vlenb
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -3858,111 +3983,145 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vnot.v v8, v8, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 48
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
+; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
+; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: li a4, 48
+; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, sp, 24
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a3, a3, a5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v8, (a4), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 24
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
; RV32-NEXT: li a2, 56
; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
@@ -3970,13 +4129,13 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v8, v16, a1, v0.t
; RV32-NEXT: vnot.v v16, v16, v0.t
; RV32-NEXT: vand.vv v8, v16, v8, v0.t
@@ -3984,18 +4143,18 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
+; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -4004,17 +4163,35 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
@@ -4024,7 +4201,7 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
@@ -4032,21 +4209,21 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
+; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -4070,8 +4247,8 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a1, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a2, a0
; RV64-NEXT: bltu a0, a1, .LBB70_2
@@ -4118,13 +4295,13 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV64-NEXT: sltu a0, a0, a7
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a7
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a7, vlenb
+; RV64-NEXT: slli a7, a7, 3
+; RV64-NEXT: add a7, sp, a7
+; RV64-NEXT: addi a7, a7, 16
+; RV64-NEXT: vl8r.v v8, (a7) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsub.vx v16, v8, a1, v0.t
; RV64-NEXT: vnot.v v8, v8, v0.t
; RV64-NEXT: vand.vv v8, v8, v16, v0.t
@@ -4157,10 +4334,14 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: addi sp, sp, -48
; RV32-NEXT: .cfi_def_cfa_offset 48
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 32 * vlenb
-; RV32-NEXT: vmv8r.v v24, v16
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
; RV32-NEXT: sw a1, 44(sp)
@@ -4185,96 +4366,73 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: .LBB71_2:
; RV32-NEXT: li a2, 1
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsub.vx v16, v8, a2
+; RV32-NEXT: vsub.vx v24, v8, a2
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: vand.vv v0, v8, v24
; RV32-NEXT: addi a3, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 24
-; RV32-NEXT: mul a3, a3, a4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: vsrl.vi v24, v0, 1
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vsub.vv v24, v0, v24
+; RV32-NEXT: vand.vv v0, v24, v8
+; RV32-NEXT: vsrl.vi v24, v24, 2
+; RV32-NEXT: vand.vv v24, v24, v8
+; RV32-NEXT: vadd.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vadd.vv v24, v24, v0
; RV32-NEXT: addi a3, sp, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: li a1, 56
-; RV32-NEXT: vsrl.vx v8, v8, a1
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, a0, -16
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v0, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsub.vx v24, v0, a2
+; RV32-NEXT: vnot.v v0, v0
+; RV32-NEXT: vand.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 1
+; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: addi a2, sp, 24
+; RV32-NEXT: vsub.vv v16, v24, v16
+; RV32-NEXT: vand.vv v24, v16, v8
+; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v8, v16, v8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a2), zero
+; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsub.vx v8, v24, a2
-; RV32-NEXT: vnot.v v24, v24
-; RV32-NEXT: vand.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 24
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v24, v24, v16
-; RV32-NEXT: vsub.vv v8, v8, v24
-; RV32-NEXT: vand.vv v24, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a2), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v0, v0, v16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a0, sp, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v16, v0, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v24, v8, v24
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v8, v16, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v16, v24, a2
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll
index 1587f770f87c..9f8de22b25c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll
@@ -15,16 +15,16 @@ define {<16 x i1>, <16 x i1>} @vector_deinterleave_load_v16i1_v32i1(ptr %p) {
; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
; CHECK-NEXT: vid.v v9
; CHECK-NEXT: vadd.vv v11, v9, v9
-; CHECK-NEXT: vrgather.vv v9, v10, v11
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vrgather.vv v9, v10, v11
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vadd.vi v12, v11, -16
; CHECK-NEXT: li a0, -256
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vadd.vi v12, v11, -16
; CHECK-NEXT: vrgather.vv v9, v8, v12, v0.t
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vadd.vi v12, v11, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
index dccb62877af3..386c71cf665c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
@@ -326,9 +326,9 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV32-NEXT: andi sp, sp, -128
; RV32-NEXT: andi a1, a1, 255
; RV32-NEXT: li a2, 128
+; RV32-NEXT: addi a3, a0, 128
; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV32-NEXT: addi a2, a0, 128
-; RV32-NEXT: vle8.v v16, (a2)
+; RV32-NEXT: vle8.v v16, (a3)
; RV32-NEXT: vle8.v v24, (a0)
; RV32-NEXT: mv a0, sp
; RV32-NEXT: add a1, a0, a1
@@ -357,9 +357,9 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV64-NEXT: andi sp, sp, -128
; RV64-NEXT: andi a1, a1, 255
; RV64-NEXT: li a2, 128
+; RV64-NEXT: addi a3, a0, 128
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64-NEXT: addi a2, a0, 128
-; RV64-NEXT: vle8.v v16, (a2)
+; RV64-NEXT: vle8.v v16, (a3)
; RV64-NEXT: vle8.v v24, (a0)
; RV64-NEXT: mv a0, sp
; RV64-NEXT: add a1, a0, a1
@@ -388,9 +388,9 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV32ZBS-NEXT: andi sp, sp, -128
; RV32ZBS-NEXT: andi a1, a1, 255
; RV32ZBS-NEXT: li a2, 128
+; RV32ZBS-NEXT: addi a3, a0, 128
; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV32ZBS-NEXT: addi a2, a0, 128
-; RV32ZBS-NEXT: vle8.v v16, (a2)
+; RV32ZBS-NEXT: vle8.v v16, (a3)
; RV32ZBS-NEXT: vle8.v v24, (a0)
; RV32ZBS-NEXT: mv a0, sp
; RV32ZBS-NEXT: add a1, a0, a1
@@ -419,9 +419,9 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV64ZBS-NEXT: andi sp, sp, -128
; RV64ZBS-NEXT: andi a1, a1, 255
; RV64ZBS-NEXT: li a2, 128
+; RV64ZBS-NEXT: addi a3, a0, 128
; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64ZBS-NEXT: addi a2, a0, 128
-; RV64ZBS-NEXT: vle8.v v16, (a2)
+; RV64ZBS-NEXT: vle8.v v16, (a3)
; RV64ZBS-NEXT: vle8.v v24, (a0)
; RV64ZBS-NEXT: mv a0, sp
; RV64ZBS-NEXT: add a1, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
index b9c611bf3e54..33cd00c9f6af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
@@ -73,7 +73,6 @@ define void @extract_v1i32_v8i32_4(ptr %x, ptr %y) {
; VLA: # %bb.0:
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; VLA-NEXT: vle32.v v8, (a0)
-; VLA-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; VLA-NEXT: vslidedown.vi v8, v8, 4
; VLA-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; VLA-NEXT: vse32.v v8, (a1)
@@ -96,7 +95,6 @@ define void @extract_v1i32_v8i32_5(ptr %x, ptr %y) {
; VLA: # %bb.0:
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; VLA-NEXT: vle32.v v8, (a0)
-; VLA-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; VLA-NEXT: vslidedown.vi v8, v8, 5
; VLA-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; VLA-NEXT: vse32.v v8, (a1)
@@ -391,9 +389,8 @@ define void @extract_v8i1_v64i1_8(ptr %x, ptr %y) {
; VLA-NEXT: li a2, 64
; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; VLA-NEXT: vlm.v v8, (a0)
-; VLA-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; VLA-NEXT: vslidedown.vi v8, v8, 1
; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 1
; VLA-NEXT: vsm.v v8, (a1)
; VLA-NEXT: ret
;
@@ -401,9 +398,8 @@ define void @extract_v8i1_v64i1_8(ptr %x, ptr %y) {
; VLS: # %bb.0:
; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
; VLS-NEXT: vlm.v v8, (a0)
-; VLS-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; VLS-NEXT: vslidedown.vi v8, v8, 1
; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v8, 1
; VLS-NEXT: vsm.v v8, (a1)
; VLS-NEXT: ret
%a = load <64 x i1>, ptr %x
@@ -418,9 +414,8 @@ define void @extract_v8i1_v64i1_48(ptr %x, ptr %y) {
; VLA-NEXT: li a2, 64
; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; VLA-NEXT: vlm.v v8, (a0)
-; VLA-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; VLA-NEXT: vslidedown.vi v8, v8, 6
; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 6
; VLA-NEXT: vsm.v v8, (a1)
; VLA-NEXT: ret
;
@@ -428,9 +423,8 @@ define void @extract_v8i1_v64i1_48(ptr %x, ptr %y) {
; VLS: # %bb.0:
; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
; VLS-NEXT: vlm.v v8, (a0)
-; VLS-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; VLS-NEXT: vslidedown.vi v8, v8, 6
; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v8, 6
; VLS-NEXT: vsm.v v8, (a1)
; VLS-NEXT: ret
%a = load <64 x i1>, ptr %x
@@ -853,9 +847,8 @@ define void @extract_v2i1_nxv32i1_26(<vscale x 32 x i1> %x, ptr %y) {
define void @extract_v8i1_nxv32i1_16(<vscale x 32 x i1> %x, ptr %y) {
; CHECK-LABEL: extract_v8i1_nxv32i1_16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v0, 2
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v0, 2
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%c = call <8 x i1> @llvm.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %x, i64 16)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index e969da6fd45b..d309da6df7dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -138,7 +138,6 @@ define i32 @extractelt_v8i32(ptr %x) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 6
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -152,9 +151,9 @@ define i64 @extractelt_v4i64(ptr %x) nounwind {
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vle64.v v8, (a0)
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vsrl.vx v10, v8, a0
; RV32-NEXT: vmv.x.s a1, v10
; RV32-NEXT: vmv.x.s a0, v8
@@ -164,7 +163,6 @@ define i64 @extractelt_v4i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
@@ -233,7 +231,6 @@ define i64 @extractelt_v3i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 3, e64, m2, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 2
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
@@ -452,7 +449,6 @@ define i8 @extractelt_v32i8_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK-NEXT: li a2, 32
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -466,7 +462,6 @@ define i16 @extractelt_v16i16_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -481,7 +476,6 @@ define i32 @extractelt_v8i32_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vadd.vv v8, v8, v8
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -497,10 +491,10 @@ define i64 @extractelt_v4i64_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vle64.v v8, (a0)
; RV32-NEXT: vadd.vv v8, v8, v8
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vx v8, v8, a1
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vsrl.vx v8, v8, a1
; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: ret
@@ -510,7 +504,6 @@ define i64 @extractelt_v4i64_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: vadd.vv v8, v8, v8
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vx v8, v8, a1
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
@@ -526,7 +519,6 @@ define half @extractelt_v16f16_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vfadd.vv v8, v8, v8
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -542,7 +534,6 @@ define float @extractelt_v8f32_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vfadd.vv v8, v8, v8
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -558,7 +549,6 @@ define double @extractelt_v4f64_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vfadd.vv v8, v8, v8
-; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -594,7 +584,6 @@ define i64 @extractelt_v3i64_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vadd.vv v8, v8, v8
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vx v8, v8, a1
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
@@ -721,9 +710,9 @@ define i32 @extractelt_v64i32_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV32-NEXT: andi a1, a1, 63
; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: li a2, 32
+; RV32-NEXT: addi a3, a0, 128
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT: addi a2, a0, 128
-; RV32-NEXT: vle32.v v8, (a2)
+; RV32-NEXT: vle32.v v8, (a3)
; RV32-NEXT: vle32.v v16, (a0)
; RV32-NEXT: mv a0, sp
; RV32-NEXT: add a1, a0, a1
@@ -749,9 +738,9 @@ define i32 @extractelt_v64i32_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV64-NEXT: andi a1, a1, 63
; RV64-NEXT: slli a1, a1, 2
; RV64-NEXT: li a2, 32
+; RV64-NEXT: addi a3, a0, 128
; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; RV64-NEXT: addi a2, a0, 128
-; RV64-NEXT: vle32.v v8, (a2)
+; RV64-NEXT: vle32.v v8, (a3)
; RV64-NEXT: vle32.v v16, (a0)
; RV64-NEXT: mv a0, sp
; RV64-NEXT: add a1, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index 287dd510674d..c1b4c5fda6c6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -204,8 +204,8 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 2
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <16 x half> @llvm.vp.floor.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 2
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -439,8 +439,8 @@ define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -483,8 +483,8 @@ define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -561,16 +561,16 @@ declare <4 x double> @llvm.vp.floor.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -605,16 +605,16 @@ declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -649,16 +649,16 @@ declare <15 x double> @llvm.vp.floor.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -693,16 +693,16 @@ declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -737,69 +737,59 @@ declare <32 x double> @llvm.vp.floor.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a1, 2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
index edb33158e32e..51eb63f5f922 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
@@ -177,8 +177,8 @@ define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v16, v12, v14, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v8, v14, v14, v0.t
@@ -253,8 +253,8 @@ define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v20, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmfeq.vv v8, v20, v20, v0.t
@@ -608,7 +608,6 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
@@ -618,28 +617,28 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB24_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB24_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v26, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a0, vlenb
@@ -666,13 +665,13 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a0, vlenb
@@ -759,9 +758,9 @@ define <32 x double> @vfmax_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmfeq.vv v0, v16, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
index 48649c43f782..03e0ac42c442 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
@@ -177,8 +177,8 @@ define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v16, v12, v14, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v8, v14, v14, v0.t
@@ -253,8 +253,8 @@ define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v20, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmfeq.vv v8, v20, v20, v0.t
@@ -608,7 +608,6 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
@@ -618,28 +617,28 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB24_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB24_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v26, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a0, vlenb
@@ -666,13 +665,13 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a0, vlenb
@@ -759,9 +758,9 @@ define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmfeq.vv v0, v16, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index 9e83efd35195..379a51f4eee3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -39,9 +39,9 @@ define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x,
; CHECK-NEXT: vmul.vx v14, v12, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vrgatherei16.vv v12, v8, v14
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vadd.vi v8, v14, -14
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.i v0, 12
+; CHECK-NEXT: vadd.vi v8, v14, -14
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vrgatherei16.vv v12, v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v8, v12
@@ -1407,8 +1407,8 @@ define <8 x float> @buildvec_v8f32_zvl256(float %e0, float %e1, float %e2, float
; CHECK-NEXT: vfmv.v.f v8, fa4
; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: vfslide1down.vf v8, v8, fa6
-; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: ret
%v0 = insertelement <8 x float> poison, float %e0, i64 0
@@ -1458,8 +1458,8 @@ define <8 x double> @buildvec_v8f64_zvl512(double %e0, double %e1, double %e2, d
; CHECK-NEXT: vfmv.v.f v8, fa4
; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: vfslide1down.vf v8, v8, fa6
-; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: ret
%v0 = insertelement <8 x double> poison, double %e0, i64 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
index ed152e64a91e..f3b124aa34dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
@@ -56,9 +56,9 @@ define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
; RV32-V512-NEXT: vid.v v10
; RV32-V512-NEXT: vsrl.vi v11, v10, 1
+; RV32-V512-NEXT: vmv.v.i v0, 10
; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV32-V512-NEXT: vrgatherei16.vv v10, v8, v11
-; RV32-V512-NEXT: vmv.v.i v0, 10
; RV32-V512-NEXT: vrgatherei16.vv v10, v9, v11, v0.t
; RV32-V512-NEXT: vmv.v.v v8, v10
; RV32-V512-NEXT: ret
@@ -68,8 +68,8 @@ define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
; RV64-V512-NEXT: vsetivli zero, 4, e64, m1, ta, mu
; RV64-V512-NEXT: vid.v v10
; RV64-V512-NEXT: vsrl.vi v11, v10, 1
-; RV64-V512-NEXT: vrgather.vv v10, v8, v11
; RV64-V512-NEXT: vmv.v.i v0, 10
+; RV64-V512-NEXT: vrgather.vv v10, v8, v11
; RV64-V512-NEXT: vrgather.vv v10, v9, v11, v0.t
; RV64-V512-NEXT: vmv.v.v v8, v10
; RV64-V512-NEXT: ret
@@ -261,13 +261,13 @@ define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
; V128-NEXT: vwmaccu.vx v8, a0, v16
; V128-NEXT: lui a1, 699051
; V128-NEXT: addi a1, a1, -1366
-; V128-NEXT: li a2, 32
; V128-NEXT: vmv.s.x v0, a1
-; V128-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; V128-NEXT: li a1, 32
+; V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; V128-NEXT: vmerge.vvm v24, v8, v24, v0
-; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; V128-NEXT: addi a1, sp, 16
; V128-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; V128-NEXT: vwaddu.vv v0, v16, v8
; V128-NEXT: vwmaccu.vx v0, a0, v8
; V128-NEXT: vmv8r.v v8, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
index 8dc32d13e4a3..45c0a22b1939 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
@@ -5,9 +5,8 @@
define <4 x half> @shuffle_v4f16(<4 x half> %x, <4 x half> %y) {
; CHECK-LABEL: shuffle_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 11
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 11
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x half> %x, <4 x half> %y, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
@@ -93,12 +92,11 @@ define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y)
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v14, (a0)
-; CHECK-NEXT: vrgatherei16.vv v12, v8, v14
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v0, 8
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; CHECK-NEXT: vrgatherei16.vv v12, v8, v14
; CHECK-NEXT: vrgather.vi v12, v10, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
@@ -109,13 +107,13 @@ define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y)
define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) {
; CHECK-LABEL: vrgather_shuffle_xv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT: vid.v v12
; CHECK-NEXT: lui a0, %hi(.LCPI7_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI7_0)
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vrsub.vi v12, v12, 4
+; CHECK-NEXT: vid.v v12
; CHECK-NEXT: vmv.v.i v0, 12
+; CHECK-NEXT: vrsub.vi v12, v12, 4
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
@@ -129,12 +127,12 @@ define <4 x double> @vrgather_shuffle_vx_v4f64(<4 x double> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vid.v v12
+; CHECK-NEXT: lui a0, %hi(.LCPI8_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0)
+; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: li a0, 3
-; CHECK-NEXT: lui a1, %hi(.LCPI8_0)
-; CHECK-NEXT: addi a1, a1, %lo(.LCPI8_0)
-; CHECK-NEXT: vlse64.v v10, (a1), zero
-; CHECK-NEXT: vmul.vx v12, v12, a0
; CHECK-NEXT: vmv.v.i v0, 3
+; CHECK-NEXT: vmul.vx v12, v12, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index 0f003d7af610..d25312268ada 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -1199,7 +1199,7 @@ declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>)
define void @copysign_neg_trunc_v3f16_v3f32(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_neg_trunc_v3f16_v3f32:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
+; ZVFH-NEXT: vsetivli zero, 3, e32, m1, ta, ma
; ZVFH-NEXT: vle32.v v8, (a1)
; ZVFH-NEXT: vle16.v v9, (a0)
; ZVFH-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
index 6320b07125bb..bc46e7d264bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
@@ -351,25 +351,23 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: fmin.d fa3, fa3, fa4
; RV32-NEXT: fcvt.w.d a2, fa3, rtz
; RV32-NEXT: and a0, a0, a2
-; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa3, v8
-; RV32-NEXT: feq.d a0, fa3, fa3
+; RV32-NEXT: feq.d a2, fa3, fa3
; RV32-NEXT: fmax.d fa3, fa3, fa5
; RV32-NEXT: fmin.d fa3, fa3, fa4
-; RV32-NEXT: fcvt.w.d a2, fa3, rtz
+; RV32-NEXT: fcvt.w.d a3, fa3, rtz
; RV32-NEXT: fld fa3, 40(sp)
-; RV32-NEXT: neg a0, a0
-; RV32-NEXT: and a0, a0, a2
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-NEXT: vslide1down.vx v8, v10, a0
+; RV32-NEXT: neg a0, a2
+; RV32-NEXT: and a0, a0, a3
; RV32-NEXT: feq.d a2, fa3, fa3
; RV32-NEXT: fmax.d fa3, fa3, fa5
; RV32-NEXT: fmin.d fa3, fa3, fa4
; RV32-NEXT: fcvt.w.d a3, fa3, rtz
; RV32-NEXT: fld fa3, 32(sp)
-; RV32-NEXT: vslide1down.vx v8, v10, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: neg a0, a2
; RV32-NEXT: and a0, a0, a3
; RV32-NEXT: feq.d a2, fa3, fa3
@@ -395,8 +393,8 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: fmin.d fa5, fa5, fa4
; RV32-NEXT: fcvt.w.d a2, fa5, rtz
; RV32-NEXT: and a0, a0, a2
-; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
; RV32-NEXT: vse8.v v9, (a1)
; RV32-NEXT: addi sp, s0, -128
@@ -452,25 +450,23 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: fmin.d fa3, fa3, fa4
; RV64-NEXT: fcvt.l.d a2, fa3, rtz
; RV64-NEXT: and a0, a0, a2
-; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa3, v8
-; RV64-NEXT: feq.d a0, fa3, fa3
+; RV64-NEXT: feq.d a2, fa3, fa3
; RV64-NEXT: fmax.d fa3, fa3, fa5
; RV64-NEXT: fmin.d fa3, fa3, fa4
-; RV64-NEXT: fcvt.l.d a2, fa3, rtz
+; RV64-NEXT: fcvt.l.d a3, fa3, rtz
; RV64-NEXT: fld fa3, 40(sp)
-; RV64-NEXT: neg a0, a0
-; RV64-NEXT: and a0, a0, a2
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-NEXT: vslide1down.vx v8, v10, a0
+; RV64-NEXT: neg a0, a2
+; RV64-NEXT: and a0, a0, a3
; RV64-NEXT: feq.d a2, fa3, fa3
; RV64-NEXT: fmax.d fa3, fa3, fa5
; RV64-NEXT: fmin.d fa3, fa3, fa4
; RV64-NEXT: fcvt.l.d a3, fa3, rtz
; RV64-NEXT: fld fa3, 32(sp)
-; RV64-NEXT: vslide1down.vx v8, v10, a0
+; RV64-NEXT: vslide1down.vx v8, v8, a0
; RV64-NEXT: neg a0, a2
; RV64-NEXT: and a0, a0, a3
; RV64-NEXT: feq.d a2, fa3, fa3
@@ -496,8 +492,8 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: fmin.d fa5, fa5, fa4
; RV64-NEXT: fcvt.l.d a2, fa5, rtz
; RV64-NEXT: and a0, a0, a2
-; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
; RV64-NEXT: vse8.v v9, (a1)
; RV64-NEXT: addi sp, s0, -128
@@ -542,46 +538,43 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
; RV32-NEXT: fcvt.wu.d a2, fa4, rtz
-; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT: vmv.v.x v10, a2
-; RV32-NEXT: vslide1down.vx v10, v10, a0
; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 2
-; RV32-NEXT: vfmv.f.s fa4, v12
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vfmv.f.s fa4, v10
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
-; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
-; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: fcvt.wu.d a3, fa4, rtz
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa4, v8
-; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fld fa2, 40(sp)
-; RV32-NEXT: fmin.d fa4, fa4, fa5
-; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
-; RV32-NEXT: fld fa4, 32(sp)
-; RV32-NEXT: fmax.d fa2, fa2, fa3
-; RV32-NEXT: fmin.d fa2, fa2, fa5
-; RV32-NEXT: fcvt.wu.d a2, fa2, rtz
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
-; RV32-NEXT: fld fa2, 48(sp)
-; RV32-NEXT: fcvt.wu.d a3, fa4, rtz
+; RV32-NEXT: fcvt.wu.d a4, fa4, rtz
+; RV32-NEXT: fmax.d fa4, fa2, fa3
+; RV32-NEXT: fld fa2, 32(sp)
+; RV32-NEXT: fmin.d fa4, fa4, fa5
+; RV32-NEXT: fcvt.wu.d a5, fa4, rtz
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; RV32-NEXT: vslide1down.vx v8, v10, a0
+; RV32-NEXT: vmv.v.x v8, a2
; RV32-NEXT: fmax.d fa4, fa2, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
+; RV32-NEXT: fcvt.wu.d a2, fa4, rtz
+; RV32-NEXT: fld fa4, 48(sp)
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a3
+; RV32-NEXT: vslide1down.vx v8, v8, a4
+; RV32-NEXT: fmax.d fa4, fa4, fa3
+; RV32-NEXT: fmin.d fa4, fa4, fa5
; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
; RV32-NEXT: fld fa4, 56(sp)
-; RV32-NEXT: vmv.v.x v9, a3
-; RV32-NEXT: vslide1down.vx v9, v9, a2
+; RV32-NEXT: vmv.v.x v9, a2
+; RV32-NEXT: vslide1down.vx v9, v9, a5
; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa5, fa4, fa5
; RV32-NEXT: fcvt.wu.d a0, fa5, rtz
-; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
; RV32-NEXT: vse8.v v9, (a1)
; RV32-NEXT: addi sp, s0, -128
@@ -618,46 +611,43 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
; RV64-NEXT: fcvt.lu.d a2, fa4, rtz
-; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT: vmv.v.x v10, a2
-; RV64-NEXT: vslide1down.vx v10, v10, a0
; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 2
-; RV64-NEXT: vfmv.f.s fa4, v12
+; RV64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-NEXT: vfmv.f.s fa4, v10
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
-; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
-; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT: fcvt.lu.d a3, fa4, rtz
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa4, v8
-; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fld fa2, 40(sp)
-; RV64-NEXT: fmin.d fa4, fa4, fa5
-; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
-; RV64-NEXT: fld fa4, 32(sp)
-; RV64-NEXT: fmax.d fa2, fa2, fa3
-; RV64-NEXT: fmin.d fa2, fa2, fa5
-; RV64-NEXT: fcvt.lu.d a2, fa2, rtz
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
-; RV64-NEXT: fld fa2, 48(sp)
-; RV64-NEXT: fcvt.lu.d a3, fa4, rtz
+; RV64-NEXT: fcvt.lu.d a4, fa4, rtz
+; RV64-NEXT: fmax.d fa4, fa2, fa3
+; RV64-NEXT: fld fa2, 32(sp)
+; RV64-NEXT: fmin.d fa4, fa4, fa5
+; RV64-NEXT: fcvt.lu.d a5, fa4, rtz
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; RV64-NEXT: vslide1down.vx v8, v10, a0
+; RV64-NEXT: vmv.v.x v8, a2
; RV64-NEXT: fmax.d fa4, fa2, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
+; RV64-NEXT: fcvt.lu.d a2, fa4, rtz
+; RV64-NEXT: fld fa4, 48(sp)
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, a3
+; RV64-NEXT: vslide1down.vx v8, v8, a4
+; RV64-NEXT: fmax.d fa4, fa4, fa3
+; RV64-NEXT: fmin.d fa4, fa4, fa5
; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
; RV64-NEXT: fld fa4, 56(sp)
-; RV64-NEXT: vmv.v.x v9, a3
-; RV64-NEXT: vslide1down.vx v9, v9, a2
+; RV64-NEXT: vmv.v.x v9, a2
+; RV64-NEXT: vslide1down.vx v9, v9, a5
; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa5, fa4, fa5
; RV64-NEXT: fcvt.lu.d a0, fa5, rtz
-; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
; RV64-NEXT: vse8.v v9, (a1)
; RV64-NEXT: addi sp, s0, -128
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll
index 48cc3f17a626..f195eeadf027 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll
@@ -96,8 +96,8 @@ declare <32 x double> @llvm.vp.fpext.v32f64.v32f32(<32 x float>, <32 x i1>, i32)
define <32 x double> @vfpext_v32f32_v32f64(<32 x float> %a, <32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vfpext_v32f32_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB7_2
@@ -112,8 +112,8 @@ define <32 x double> @vfpext_v32f32_v32f64(<32 x float> %a, <32 x i1> %m, i32 ze
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 16
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
index 49a1b19b58a2..a4050b716e78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
@@ -394,8 +394,8 @@ declare <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x i64> @vfptosi_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptosi_v32i64_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB25_2
@@ -408,8 +408,8 @@ define <32 x i64> @vfptosi_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 ze
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
index d44efa2f6133..b652cdd88c7c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
@@ -394,8 +394,8 @@ declare <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x i64> @vfptoui_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptoui_v32i64_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB25_2
@@ -408,8 +408,8 @@ define <32 x i64> @vfptoui_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 ze
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
index d890bf5412f9..920eed322363 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
@@ -98,8 +98,8 @@ define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32
; CHECK-LABEL: vfptrunc_v32f32_v32f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v12, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB7_2
@@ -112,8 +112,8 @@ define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v24, v16, v0.t
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll
index f189354237ee..f189354237ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 53de1a875535..e81f686a2830 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -277,14 +277,14 @@ define void @insert_v8i32_v2i32_0(ptr %vp, ptr %svp) {
define void @insert_v8i32_v2i32_2(ptr %vp, ptr %svp) {
; VLA-LABEL: insert_v8i32_v2i32_2:
; VLA: # %bb.0:
-; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; VLA-NEXT: vle32.v v8, (a1)
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; VLA-NEXT: vle32.v v10, (a0)
+; VLA-NEXT: vle32.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vle32.v v10, (a1)
; VLA-NEXT: vsetivli zero, 4, e32, m2, tu, ma
-; VLA-NEXT: vslideup.vi v10, v8, 2
+; VLA-NEXT: vslideup.vi v8, v10, 2
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; VLA-NEXT: vse32.v v10, (a0)
+; VLA-NEXT: vse32.v v8, (a0)
; VLA-NEXT: ret
;
; VLS-LABEL: insert_v8i32_v2i32_2:
@@ -306,12 +306,13 @@ define void @insert_v8i32_v2i32_2(ptr %vp, ptr %svp) {
define void @insert_v8i32_v2i32_6(ptr %vp, ptr %svp) {
; VLA-LABEL: insert_v8i32_v2i32_6:
; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v8, (a0)
; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; VLA-NEXT: vle32.v v8, (a1)
+; VLA-NEXT: vle32.v v10, (a1)
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; VLA-NEXT: vle32.v v10, (a0)
-; VLA-NEXT: vslideup.vi v10, v8, 6
-; VLA-NEXT: vse32.v v10, (a0)
+; VLA-NEXT: vslideup.vi v8, v10, 6
+; VLA-NEXT: vse32.v v8, (a0)
; VLA-NEXT: ret
;
; VLS-LABEL: insert_v8i32_v2i32_6:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index 4954827876c1..776a1e9bab6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -533,11 +533,11 @@ define void @insertelt_c6_v8i64_0_add(ptr %x, ptr %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: li a2, 6
+; CHECK-NEXT: vle64.v v12, (a1)
+; CHECK-NEXT: li a1, 6
; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
-; CHECK-NEXT: vmv.s.x v8, a2
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v12, (a1)
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index 4f4f0a09de74..4a5d37b2a85a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -669,13 +669,14 @@ define void @buildvec_seq_v9i8(ptr %x) {
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 3
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.v.i v9, 3
; CHECK-NEXT: li a1, 146
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 2, v0
+; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v9, 2, v0
; CHECK-NEXT: vsetivli zero, 9, e8, m1, ta, ma
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
@@ -1214,11 +1215,11 @@ define <16 x i8> @buildvec_v16i8_loads_contigous(ptr %p) {
; CHECK-NEXT: vslide1down.vx v8, v8, t4
; CHECK-NEXT: vslide1down.vx v8, v8, t5
; CHECK-NEXT: vslide1down.vx v8, v8, t6
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
-; CHECK-NEXT: li a0, 255
+; CHECK-NEXT: li a1, 255
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslide1down.vx v8, v8, a0
; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
; CHECK-NEXT: ret
%p2 = getelementptr i8, ptr %p, i32 1
@@ -1308,11 +1309,11 @@ define <16 x i8> @buildvec_v16i8_loads_gather(ptr %p) {
; CHECK-NEXT: vslide1down.vx v8, v8, t4
; CHECK-NEXT: vslide1down.vx v8, v8, t5
; CHECK-NEXT: vslide1down.vx v8, v8, t6
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
-; CHECK-NEXT: li a0, 255
+; CHECK-NEXT: li a1, 255
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslide1down.vx v8, v8, a0
; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
; CHECK-NEXT: ret
%p2 = getelementptr i8, ptr %p, i32 1
@@ -1488,11 +1489,11 @@ define <16 x i8> @buildvec_v16i8_undef_edges(ptr %p) {
; CHECK-NEXT: vslide1down.vx v8, v9, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a7
; CHECK-NEXT: vslide1down.vx v8, v8, a0
-; CHECK-NEXT: vslidedown.vi v8, v8, 4
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslidedown.vi v8, v8, 4
; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
; CHECK-NEXT: ret
%p4 = getelementptr i8, ptr %p, i32 31
@@ -1553,11 +1554,11 @@ define <16 x i8> @buildvec_v16i8_loads_undef_scattered(ptr %p) {
; CHECK-NEXT: vslide1down.vx v8, v8, a7
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vslide1down.vx v8, v8, t0
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
-; CHECK-NEXT: li a0, 255
+; CHECK-NEXT: li a1, 255
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslide1down.vx v8, v8, a0
; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
; CHECK-NEXT: ret
%p2 = getelementptr i8, ptr %p, i32 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
index 4509642fdef1..e0c676788dcc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
@@ -828,112 +828,104 @@ define i64 @explode_8xi64(<8 x i64> %v) {
define i64 @explode_16xi64(<16 x i64> %v) {
; RV32-LABEL: explode_16xi64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -64
-; RV32-NEXT: .cfi_def_cfa_offset 64
-; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s9, 20(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s10, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s11, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset s3, -20
-; RV32-NEXT: .cfi_offset s4, -24
-; RV32-NEXT: .cfi_offset s5, -28
-; RV32-NEXT: .cfi_offset s6, -32
-; RV32-NEXT: .cfi_offset s7, -36
-; RV32-NEXT: .cfi_offset s8, -40
-; RV32-NEXT: .cfi_offset s9, -44
-; RV32-NEXT: .cfi_offset s10, -48
-; RV32-NEXT: .cfi_offset s11, -52
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw s0, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s3, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s4, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s5, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s6, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s7, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s8, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s9, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s10, 4(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s11, 0(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset s0, -4
+; RV32-NEXT: .cfi_offset s1, -8
+; RV32-NEXT: .cfi_offset s2, -12
+; RV32-NEXT: .cfi_offset s3, -16
+; RV32-NEXT: .cfi_offset s4, -20
+; RV32-NEXT: .cfi_offset s5, -24
+; RV32-NEXT: .cfi_offset s6, -28
+; RV32-NEXT: .cfi_offset s7, -32
+; RV32-NEXT: .cfi_offset s8, -36
+; RV32-NEXT: .cfi_offset s9, -40
+; RV32-NEXT: .cfi_offset s10, -44
+; RV32-NEXT: .cfi_offset s11, -48
; RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma
; RV32-NEXT: vslidedown.vi v16, v8, 2
-; RV32-NEXT: li a3, 32
-; RV32-NEXT: vsrl.vx v24, v16, a3
-; RV32-NEXT: vmv.x.s a0, v24
-; RV32-NEXT: vmv.x.s a1, v16
-; RV32-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT: vslidedown.vi v16, v8, 3
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s a1, v24
-; RV32-NEXT: sw a1, 4(sp) # 4-byte Folded Spill
+; RV32-NEXT: vmv.x.s a2, v16
+; RV32-NEXT: vslidedown.vi v16, v8, 3
+; RV32-NEXT: vsrl.vx v24, v16, a0
+; RV32-NEXT: vmv.x.s a3, v24
; RV32-NEXT: vmv.x.s a4, v16
; RV32-NEXT: vslidedown.vi v16, v8, 4
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s a5, v24
; RV32-NEXT: vmv.x.s a6, v16
; RV32-NEXT: vslidedown.vi v16, v8, 5
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s a7, v24
; RV32-NEXT: vmv.x.s t0, v16
; RV32-NEXT: vslidedown.vi v16, v8, 6
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s t1, v24
; RV32-NEXT: vmv.x.s t2, v16
; RV32-NEXT: vslidedown.vi v16, v8, 7
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s t3, v24
; RV32-NEXT: vmv.x.s t4, v16
; RV32-NEXT: vslidedown.vi v16, v8, 8
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s t5, v24
; RV32-NEXT: vmv.x.s t6, v16
; RV32-NEXT: vslidedown.vi v16, v8, 9
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s s0, v24
; RV32-NEXT: vmv.x.s s1, v16
; RV32-NEXT: vslidedown.vi v16, v8, 10
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s s2, v24
; RV32-NEXT: vmv.x.s s3, v16
; RV32-NEXT: vslidedown.vi v16, v8, 11
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s s4, v24
; RV32-NEXT: vmv.x.s s5, v16
; RV32-NEXT: vslidedown.vi v16, v8, 12
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s s6, v24
; RV32-NEXT: vmv.x.s s7, v16
; RV32-NEXT: vslidedown.vi v16, v8, 13
-; RV32-NEXT: vsrl.vx v24, v16, a3
-; RV32-NEXT: vmv.x.s s8, v24
-; RV32-NEXT: vmv.x.s s9, v16
+; RV32-NEXT: vsrl.vx v24, v16, a0
+; RV32-NEXT: vmv.x.s s9, v24
+; RV32-NEXT: vmv.x.s s8, v16
; RV32-NEXT: vslidedown.vi v16, v8, 14
-; RV32-NEXT: vsrl.vx v24, v16, a3
-; RV32-NEXT: vmv.x.s s10, v24
-; RV32-NEXT: vmv.x.s s11, v16
-; RV32-NEXT: vslidedown.vi v16, v8, 15
-; RV32-NEXT: vsrl.vx v24, v16, a3
-; RV32-NEXT: vmv.x.s ra, v24
-; RV32-NEXT: vmv.s.x v9, zero
-; RV32-NEXT: vmv.x.s a2, v16
+; RV32-NEXT: vsrl.vx v24, v16, a0
+; RV32-NEXT: vmv.s.x v17, zero
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vredxor.vs v8, v8, v9
+; RV32-NEXT: vredxor.vs v17, v8, v17
+; RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 15
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vsrl.vx v9, v8, a3
-; RV32-NEXT: vmv.x.s a3, v9
-; RV32-NEXT: add a3, a3, a0
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: sltu a1, a0, a1
-; RV32-NEXT: add a1, a3, a1
-; RV32-NEXT: lw a3, 4(sp) # 4-byte Folded Reload
-; RV32-NEXT: add a1, a1, a3
-; RV32-NEXT: add a4, a0, a4
-; RV32-NEXT: sltu a0, a4, a0
-; RV32-NEXT: add a0, a0, a5
+; RV32-NEXT: vsrl.vx v18, v17, a0
+; RV32-NEXT: vmv.x.s s10, v18
+; RV32-NEXT: vmv.x.s s11, v17
+; RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v0, v8, a0
+; RV32-NEXT: add a1, s10, a1
+; RV32-NEXT: add a2, s11, a2
+; RV32-NEXT: sltu a0, a2, s11
; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, a0, a3
+; RV32-NEXT: add a4, a2, a4
+; RV32-NEXT: sltu a1, a4, a2
+; RV32-NEXT: add a1, a1, a5
+; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a6, a4, a6
; RV32-NEXT: sltu a1, a6, a4
; RV32-NEXT: add a1, a1, a7
@@ -968,33 +960,36 @@ define i64 @explode_16xi64(<16 x i64> %v) {
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add s7, s5, s7
; RV32-NEXT: sltu a1, s7, s5
-; RV32-NEXT: add a1, a1, s8
+; RV32-NEXT: add a1, a1, s9
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add s9, s7, s9
-; RV32-NEXT: sltu a1, s9, s7
-; RV32-NEXT: add a1, a1, s10
+; RV32-NEXT: vmv.x.s a1, v24
+; RV32-NEXT: add s8, s7, s8
+; RV32-NEXT: sltu a2, s8, s7
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: vmv.x.s a2, v16
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add s11, s9, s11
-; RV32-NEXT: sltu a1, s11, s9
-; RV32-NEXT: add a1, a1, ra
+; RV32-NEXT: vmv.x.s a1, v0
+; RV32-NEXT: add a2, s8, a2
+; RV32-NEXT: sltu a3, a2, s8
+; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, a0, a1
-; RV32-NEXT: add a0, s11, a2
-; RV32-NEXT: sltu a2, a0, s11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: add a0, a2, a0
+; RV32-NEXT: sltu a2, a0, a2
; RV32-NEXT: add a1, a1, a2
-; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s9, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s10, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s11, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: lw s0, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s3, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s4, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s5, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s6, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s7, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s8, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s9, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s10, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s11, 0(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: explode_16xi64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
index 40ff8b50d99d..2ea90203b210 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
@@ -69,9 +69,9 @@ define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
; RV32-V512-NEXT: vid.v v10
; RV32-V512-NEXT: vsrl.vi v11, v10, 1
+; RV32-V512-NEXT: vmv.v.i v0, 10
; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV32-V512-NEXT: vrgatherei16.vv v10, v8, v11
-; RV32-V512-NEXT: vmv.v.i v0, 10
; RV32-V512-NEXT: vrgatherei16.vv v10, v9, v11, v0.t
; RV32-V512-NEXT: vmv.v.v v8, v10
; RV32-V512-NEXT: ret
@@ -81,8 +81,8 @@ define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
; RV64-V512-NEXT: vsetivli zero, 4, e64, m1, ta, mu
; RV64-V512-NEXT: vid.v v10
; RV64-V512-NEXT: vsrl.vi v11, v10, 1
-; RV64-V512-NEXT: vrgather.vv v10, v8, v11
; RV64-V512-NEXT: vmv.v.i v0, 10
+; RV64-V512-NEXT: vrgather.vv v10, v8, v11
; RV64-V512-NEXT: vrgather.vv v10, v9, v11, v0.t
; RV64-V512-NEXT: vmv.v.v v8, v10
; RV64-V512-NEXT: ret
@@ -195,8 +195,8 @@ define <4 x i32> @interleave_v4i32_offset_1(<4 x i32> %x, <4 x i32> %y) {
; V128-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; V128-NEXT: vid.v v8
; V128-NEXT: vsrl.vi v8, v8, 1
-; V128-NEXT: vadd.vi v8, v8, 1
; V128-NEXT: vmv.v.i v0, 10
+; V128-NEXT: vadd.vi v8, v8, 1
; V128-NEXT: vrgather.vv v10, v9, v8, v0.t
; V128-NEXT: vmv.v.v v8, v10
; V128-NEXT: ret
@@ -210,8 +210,8 @@ define <4 x i32> @interleave_v4i32_offset_1(<4 x i32> %x, <4 x i32> %y) {
; V512-NEXT: vsetivli zero, 4, e32, mf2, ta, mu
; V512-NEXT: vid.v v8
; V512-NEXT: vsrl.vi v8, v8, 1
-; V512-NEXT: vadd.vi v8, v8, 1
; V512-NEXT: vmv.v.i v0, 10
+; V512-NEXT: vadd.vi v8, v8, 1
; V512-NEXT: vrgather.vv v10, v9, v8, v0.t
; V512-NEXT: vmv1r.v v8, v10
; V512-NEXT: ret
@@ -426,13 +426,13 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
; V128-NEXT: vwmaccu.vx v8, a0, v16
; V128-NEXT: lui a1, 699051
; V128-NEXT: addi a1, a1, -1366
-; V128-NEXT: li a2, 32
; V128-NEXT: vmv.s.x v0, a1
-; V128-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; V128-NEXT: li a1, 32
+; V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; V128-NEXT: vmerge.vvm v24, v8, v24, v0
-; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; V128-NEXT: addi a1, sp, 16
; V128-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; V128-NEXT: vwaddu.vv v0, v16, v8
; V128-NEXT: vwmaccu.vx v0, a0, v8
; V128-NEXT: vmv8r.v v8, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index aba69dc84620..32782f1c6045 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -5,9 +5,8 @@
define <4 x i16> @shuffle_v4i16(<4 x i16> %x, <4 x i16> %y) {
; CHECK-LABEL: shuffle_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 11
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 11
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> %x, <4 x i16> %y, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
@@ -29,9 +28,8 @@ define <8 x i32> @shuffle_v8i32(<8 x i32> %x, <8 x i32> %y) {
define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) {
; CHECK-LABEL: shuffle_xv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 9
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 9
; CHECK-NEXT: vmerge.vim v8, v8, 5, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> <i16 5, i16 5, i16 5, i16 5>, <4 x i16> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
@@ -41,9 +39,8 @@ define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) {
define <4 x i16> @shuffle_vx_v4i16(<4 x i16> %x) {
; CHECK-LABEL: shuffle_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 6
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 6
; CHECK-NEXT: vmerge.vim v8, v8, 5, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> %x, <4 x i16> <i16 5, i16 5, i16 5, i16 5>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
@@ -89,8 +86,8 @@ define <4 x i16> @vrgather_shuffle_vv_v4i16(<4 x i16> %x, <4 x i16> %y) {
; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0)
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vle16.v v11, (a0)
-; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: vmv.v.i v0, 8
+; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: vrgather.vi v10, v9, 1, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -162,22 +159,21 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vmv.v.i v16, 2
-; RV32-NEXT: lui a0, %hi(.LCPI11_0)
-; RV32-NEXT: addi a0, a0, %lo(.LCPI11_0)
-; RV32-NEXT: vle16.v v20, (a0)
; RV32-NEXT: li a0, 5
+; RV32-NEXT: lui a1, %hi(.LCPI11_0)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI11_0)
+; RV32-NEXT: vle16.v v20, (a1)
; RV32-NEXT: vslide1down.vx v21, v16, a0
-; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; RV32-NEXT: vrgatherei16.vv v16, v8, v20
; RV32-NEXT: li a0, 164
; RV32-NEXT: vmv.s.x v0, a0
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vrgatherei16.vv v16, v8, v20
; RV32-NEXT: vrgatherei16.vv v16, v12, v21, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vrgather_shuffle_vv_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vmv4r.v v16, v8
; RV64-NEXT: lui a0, 327683
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: addi a0, a0, 1
@@ -186,7 +182,7 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vmv.v.x v20, a0
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vrgatherei16.vv v8, v16, v20
+; RV64-NEXT: vrgatherei16.vv v16, v8, v20
; RV64-NEXT: li a0, 164
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: lui a0, 163841
@@ -194,9 +190,10 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
; RV64-NEXT: addi a0, a0, 1
; RV64-NEXT: slli a0, a0, 17
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT: vmv.v.x v16, a0
+; RV64-NEXT: vmv.v.x v8, a0
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT: vrgatherei16.vv v8, v12, v16, v0.t
+; RV64-NEXT: vrgatherei16.vv v16, v12, v8, v0.t
+; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%s = shufflevector <8 x i64> %x, <8 x i64> %y, <8 x i32> <i32 1, i32 2, i32 10, i32 5, i32 1, i32 10, i32 3, i32 13>
ret <8 x i64> %s
@@ -210,13 +207,13 @@ define <8 x i64> @vrgather_shuffle_xv_v8i64(<8 x i64> %x) {
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vle16.v v16, (a0)
; RV32-NEXT: vmv.v.i v20, -1
-; RV32-NEXT: vrgatherei16.vv v12, v20, v16
; RV32-NEXT: lui a0, %hi(.LCPI12_1)
; RV32-NEXT: addi a0, a0, %lo(.LCPI12_1)
-; RV32-NEXT: vle16.v v16, (a0)
+; RV32-NEXT: vle16.v v17, (a0)
; RV32-NEXT: li a0, 113
; RV32-NEXT: vmv.s.x v0, a0
-; RV32-NEXT: vrgatherei16.vv v12, v8, v16, v0.t
+; RV32-NEXT: vrgatherei16.vv v12, v20, v16
+; RV32-NEXT: vrgatherei16.vv v12, v8, v17, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
@@ -367,10 +364,10 @@ define <8 x i8> @splat_ve4_ins_i1ve3(<8 x i8> %v) {
define <8 x i8> @splat_ve2_we0(<8 x i8> %v, <8 x i8> %w) {
; CHECK-LABEL: splat_ve2_we0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vrgather.vi v10, v8, 2
; CHECK-NEXT: li a0, 66
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vrgather.vi v10, v8, 2
; CHECK-NEXT: vrgather.vi v10, v9, 0, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -386,10 +383,10 @@ define <8 x i8> @splat_ve2_we0_ins_i0ve4(<8 x i8> %v, <8 x i8> %w) {
; CHECK-NEXT: li a0, 4
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v11, a0
-; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: li a0, 66
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: vrgather.vi v10, v9, 0, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -402,10 +399,10 @@ define <8 x i8> @splat_ve2_we0_ins_i0we4(<8 x i8> %v, <8 x i8> %w) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vrgather.vi v10, v8, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 4
; CHECK-NEXT: li a0, 67
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 4
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
@@ -421,10 +418,10 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4(<8 x i8> %v, <8 x i8> %w) {
; CHECK-NEXT: addi a0, a0, 514
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v11, a0
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: li a0, 66
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: vrgather.vi v10, v9, 0, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -440,10 +437,10 @@ define <8 x i8> @splat_ve2_we0_ins_i2we4(<8 x i8> %v, <8 x i8> %w) {
; CHECK-NEXT: vmv.v.i v11, 0
; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, ma
; CHECK-NEXT: vslideup.vi v11, v10, 2
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vrgather.vi v10, v8, 2
; CHECK-NEXT: li a0, 70
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vi v10, v8, 2
; CHECK-NEXT: vrgather.vv v10, v9, v11, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -463,10 +460,10 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) {
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vrgather.vv v10, v8, v12
; CHECK-NEXT: li a0, 98
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vv v10, v8, v12
; CHECK-NEXT: vrgather.vv v10, v9, v11, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -666,8 +663,8 @@ define <8 x i8> @merge_start_into_start(<8 x i8> %v, <8 x i8> %w) {
define <8 x i8> @merge_slidedown(<8 x i8> %v, <8 x i8> %w) {
; CHECK-LABEL: merge_slidedown:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: li a0, 195
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
@@ -680,10 +677,10 @@ define <8 x i8> @merge_slidedown(<8 x i8> %v, <8 x i8> %w) {
define <8 x i8> @merge_non_contiguous_slideup_slidedown(<8 x i8> %v, <8 x i8> %w) {
; CHECK-LABEL: merge_non_contiguous_slideup_slidedown:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: li a0, -22
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vslideup.vi v8, v9, 1, v0.t
; CHECK-NEXT: ret
%res = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> <i32 2, i32 8, i32 4, i32 10, i32 6, i32 12, i32 13, i32 14>
@@ -694,13 +691,13 @@ define <8 x i8> @merge_non_contiguous_slideup_slidedown(<8 x i8> %v, <8 x i8> %w
define <8 x i8> @unmergable(<8 x i8> %v, <8 x i8> %w) {
; CHECK-LABEL: unmergable:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: lui a0, %hi(.LCPI46_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI46_0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vle8.v v10, (a0)
; CHECK-NEXT: li a0, -22
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> <i32 2, i32 9, i32 4, i32 11, i32 6, i32 13, i32 8, i32 15>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 635869904832..79c36a629465 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -1100,46 +1100,46 @@ define void @mulhu_v16i8(ptr %x) {
; CHECK-LABEL: mulhu_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v9, (a0)
; CHECK-NEXT: lui a1, 3
; CHECK-NEXT: addi a1, a1, -2044
; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: li a1, -128
-; CHECK-NEXT: vmerge.vxm v10, v9, a1, v0
+; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: lui a1, 1
; CHECK-NEXT: addi a2, a1, 32
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a2
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: lui a2, %hi(.LCPI65_0)
; CHECK-NEXT: addi a2, a2, %lo(.LCPI65_0)
; CHECK-NEXT: vle8.v v11, (a2)
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsrl.vv v9, v8, v9
-; CHECK-NEXT: vmulhu.vv v9, v9, v11
-; CHECK-NEXT: vsub.vv v8, v8, v9
-; CHECK-NEXT: vmulhu.vv v8, v8, v10
-; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: li a2, -128
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmerge.vxm v12, v10, a2, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsrl.vv v8, v9, v8
+; CHECK-NEXT: vmulhu.vv v8, v8, v11
+; CHECK-NEXT: vsub.vv v9, v9, v8
+; CHECK-NEXT: vmulhu.vv v9, v9, v12
+; CHECK-NEXT: vadd.vv v9, v9, v8
; CHECK-NEXT: li a2, 513
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a2
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v9, 4
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmv.v.i v8, 4
+; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
; CHECK-NEXT: addi a1, a1, 78
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v9, v9, 3, v0
; CHECK-NEXT: lui a1, 8
; CHECK-NEXT: addi a1, a1, 304
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v9, v9, 2, v0
-; CHECK-NEXT: vsrl.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v10, v10, 3, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 2, v0
+; CHECK-NEXT: vsrl.vv v8, v9, v8
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i8>, ptr %x
@@ -1158,16 +1158,16 @@ define void @mulhu_v8i16(ptr %x) {
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, ma
; CHECK-NEXT: vmv.s.x v10, a1
+; CHECK-NEXT: lui a1, %hi(.LCPI66_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI66_0)
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v11, 1
+; CHECK-NEXT: vle16.v v11, (a1)
+; CHECK-NEXT: vmv.v.i v12, 1
; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v9, v11, 6
+; CHECK-NEXT: vslideup.vi v9, v12, 6
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT: lui a1, %hi(.LCPI66_0)
-; CHECK-NEXT: addi a1, a1, %lo(.LCPI66_0)
-; CHECK-NEXT: vle16.v v12, (a1)
; CHECK-NEXT: vsrl.vv v9, v8, v9
-; CHECK-NEXT: vmulhu.vv v9, v9, v12
+; CHECK-NEXT: vmulhu.vv v9, v9, v11
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: vmulhu.vv v8, v8, v10
; CHECK-NEXT: vadd.vv v8, v8, v9
@@ -1176,7 +1176,7 @@ define void @mulhu_v8i16(ptr %x) {
; CHECK-NEXT: vmv.v.i v9, 3
; CHECK-NEXT: vmerge.vim v9, v9, 2, v0
; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v9, v11, 6
+; CHECK-NEXT: vslideup.vi v9, v12, 6
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9
; CHECK-NEXT: vse16.v v8, (a0)
@@ -1222,18 +1222,18 @@ define void @mulhu_v4i32(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: lui a1, 524288
-; CHECK-NEXT: vmv.s.x v9, a1
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v9, 2
; CHECK-NEXT: lui a1, %hi(.LCPI68_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI68_0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v9, (a1)
+; CHECK-NEXT: lui a1, 524288
+; CHECK-NEXT: vmv.s.x v10, a1
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
+; CHECK-NEXT: vslideup.vi v11, v10, 2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmulhu.vv v9, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v9
-; CHECK-NEXT: vmulhu.vv v8, v8, v10
+; CHECK-NEXT: vmulhu.vv v8, v8, v11
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: lui a1, 4128
; CHECK-NEXT: addi a1, a1, 514
@@ -1455,13 +1455,13 @@ define void @mulhs_v2i64(ptr %x) {
; RV64-LABEL: mulhs_v2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: lui a1, 349525
; RV64-NEXT: addiw a1, a1, 1365
-; RV64-NEXT: slli a2, a1, 32
-; RV64-NEXT: add a1, a1, a2
; RV64-NEXT: lui a2, %hi(.LCPI74_0)
; RV64-NEXT: ld a2, %lo(.LCPI74_0)(a2)
-; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: slli a3, a1, 32
+; RV64-NEXT: add a1, a1, a3
; RV64-NEXT: vmv.v.x v9, a1
; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma
; RV64-NEXT: vmv.s.x v9, a2
@@ -3260,49 +3260,47 @@ define void @mulhu_v32i8(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: lui a1, 163907
; CHECK-NEXT: addi a1, a1, -2044
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: li a1, -128
-; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vxm v12, v10, a1, v0
; CHECK-NEXT: lui a1, 66049
; CHECK-NEXT: addi a1, a1, 32
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: lui a1, %hi(.LCPI181_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI181_0)
; CHECK-NEXT: vle8.v v14, (a1)
-; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT: vsrl.vv v10, v8, v10
-; CHECK-NEXT: vmulhu.vv v10, v10, v14
-; CHECK-NEXT: vsub.vv v8, v8, v10
-; CHECK-NEXT: vmulhu.vv v8, v8, v12
-; CHECK-NEXT: vadd.vv v8, v8, v10
-; CHECK-NEXT: vmv.v.i v10, 4
+; CHECK-NEXT: li a1, -128
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vxm v16, v12, a1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
+; CHECK-NEXT: vsrl.vv v8, v10, v8
+; CHECK-NEXT: vmulhu.vv v8, v8, v14
+; CHECK-NEXT: vsub.vv v10, v10, v8
+; CHECK-NEXT: vmulhu.vv v10, v10, v16
+; CHECK-NEXT: vadd.vv v10, v10, v8
; CHECK-NEXT: lui a1, 8208
; CHECK-NEXT: addi a1, a1, 513
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: vmv.v.i v8, 4
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
; CHECK-NEXT: lui a1, 66785
; CHECK-NEXT: addi a1, a1, 78
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v10, v10, 3, v0
; CHECK-NEXT: lui a1, 529160
; CHECK-NEXT: addi a1, a1, 304
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v10, v10, 2, v0
-; CHECK-NEXT: vsrl.vv v8, v8, v10
+; CHECK-NEXT: vmerge.vim v12, v12, 3, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v12, 2, v0
+; CHECK-NEXT: vsrl.vv v8, v10, v8
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
@@ -3326,12 +3324,12 @@ define void @mulhu_v16i16(ptr %x) {
; RV32-NEXT: vmv.s.x v8, a1
; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV32-NEXT: vmv.v.i v9, 0
-; RV32-NEXT: vmv1r.v v0, v8
-; RV32-NEXT: vmerge.vim v9, v9, 1, v0
-; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV32-NEXT: lui a1, %hi(.LCPI182_0)
; RV32-NEXT: addi a1, a1, %lo(.LCPI182_0)
; RV32-NEXT: vle16.v v14, (a1)
+; RV32-NEXT: vmv1r.v v0, v8
+; RV32-NEXT: vmerge.vim v9, v9, 1, v0
+; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV32-NEXT: vsext.vf2 v16, v9
; RV32-NEXT: vsrl.vv v16, v10, v16
; RV32-NEXT: vmulhu.vv v14, v16, v14
@@ -3361,27 +3359,27 @@ define void @mulhu_v16i16(ptr %x) {
; RV64-NEXT: vmv.v.i v10, 0
; RV64-NEXT: lui a1, 1048568
; RV64-NEXT: vmerge.vxm v10, v10, a1, v0
+; RV64-NEXT: lui a1, %hi(.LCPI182_0)
+; RV64-NEXT: addi a1, a1, %lo(.LCPI182_0)
+; RV64-NEXT: vle16.v v12, (a1)
; RV64-NEXT: li a1, 1
; RV64-NEXT: slli a1, a1, 48
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT: vmv.v.x v12, a1
+; RV64-NEXT: vmv.v.x v14, a1
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: lui a1, %hi(.LCPI182_0)
-; RV64-NEXT: addi a1, a1, %lo(.LCPI182_0)
-; RV64-NEXT: vle16.v v14, (a1)
-; RV64-NEXT: vsext.vf2 v16, v12
-; RV64-NEXT: vsrl.vv v12, v8, v16
-; RV64-NEXT: vmulhu.vv v12, v12, v14
-; RV64-NEXT: vsub.vv v8, v8, v12
-; RV64-NEXT: vmulhu.vv v8, v8, v10
-; RV64-NEXT: vadd.vv v8, v8, v12
+; RV64-NEXT: vsext.vf2 v16, v14
+; RV64-NEXT: vsrl.vv v14, v8, v16
+; RV64-NEXT: vmulhu.vv v12, v14, v12
; RV64-NEXT: lui a1, %hi(.LCPI182_1)
; RV64-NEXT: addi a1, a1, %lo(.LCPI182_1)
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT: vlse64.v v10, (a1), zero
+; RV64-NEXT: vlse64.v v14, (a1), zero
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vsext.vf2 v12, v10
-; RV64-NEXT: vsrl.vv v8, v8, v12
+; RV64-NEXT: vsub.vv v8, v8, v12
+; RV64-NEXT: vmulhu.vv v8, v8, v10
+; RV64-NEXT: vadd.vv v8, v8, v12
+; RV64-NEXT: vsext.vf2 v10, v14
+; RV64-NEXT: vsrl.vv v8, v8, v10
; RV64-NEXT: vse16.v v8, (a0)
; RV64-NEXT: ret
%a = load <16 x i16>, ptr %x
@@ -3433,23 +3431,24 @@ define void @mulhu_v4i64(ptr %x) {
; RV32-NEXT: vle32.v v10, (a1)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vmulhu.vv v10, v8, v10
-; RV32-NEXT: vsub.vv v8, v8, v10
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: vmv.s.x v12, a1
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vmv.v.i v14, 0
; RV32-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV32-NEXT: vslideup.vi v14, v12, 5
+; RV32-NEXT: lui a1, %hi(.LCPI184_1)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI184_1)
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vle8.v v12, (a1)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vsub.vv v8, v8, v10
; RV32-NEXT: vmulhu.vv v8, v8, v14
; RV32-NEXT: vadd.vv v8, v8, v10
-; RV32-NEXT: lui a1, %hi(.LCPI184_1)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI184_1)
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vle8.v v10, (a1)
-; RV32-NEXT: vsext.vf4 v12, v10
+; RV32-NEXT: vsext.vf4 v10, v12
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vsrl.vv v8, v8, v12
+; RV32-NEXT: vsrl.vv v8, v8, v10
; RV32-NEXT: vse64.v v8, (a0)
; RV32-NEXT: ret
;
@@ -3457,19 +3456,19 @@ define void @mulhu_v4i64(ptr %x) {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: lui a1, %hi(.LCPI184_0)
+; RV64-NEXT: addi a1, a1, %lo(.LCPI184_0)
+; RV64-NEXT: vle64.v v10, (a1)
; RV64-NEXT: li a1, -1
; RV64-NEXT: slli a1, a1, 63
-; RV64-NEXT: vmv.s.x v10, a1
-; RV64-NEXT: vmv.v.i v12, 0
+; RV64-NEXT: vmv.s.x v12, a1
+; RV64-NEXT: vmv.v.i v14, 0
; RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; RV64-NEXT: vslideup.vi v12, v10, 2
-; RV64-NEXT: lui a1, %hi(.LCPI184_0)
-; RV64-NEXT: addi a1, a1, %lo(.LCPI184_0)
+; RV64-NEXT: vslideup.vi v14, v12, 2
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vle64.v v10, (a1)
; RV64-NEXT: vmulhu.vv v10, v8, v10
; RV64-NEXT: vsub.vv v8, v8, v10
-; RV64-NEXT: vmulhu.vv v8, v8, v12
+; RV64-NEXT: vmulhu.vv v8, v8, v14
; RV64-NEXT: vadd.vv v8, v8, v10
; RV64-NEXT: lui a1, 12320
; RV64-NEXT: addi a1, a1, 513
@@ -3488,14 +3487,13 @@ define void @mulhs_v32i8(ptr %x) {
; CHECK-LABEL: mulhs_v32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vmv.v.i v10, 7
; CHECK-NEXT: lui a1, 304453
; CHECK-NEXT: addi a1, a1, -1452
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 7
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
; CHECK-NEXT: li a1, -123
; CHECK-NEXT: vmv.v.x v12, a1
@@ -3615,19 +3613,19 @@ define void @mulhs_v4i64(ptr %x) {
;
; RV64-LABEL: mulhs_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: lui a1, 349525
; RV64-NEXT: addiw a1, a1, 1365
; RV64-NEXT: slli a2, a1, 32
; RV64-NEXT: add a1, a1, a2
-; RV64-NEXT: vmv.v.x v10, a1
-; RV64-NEXT: lui a1, %hi(.LCPI188_0)
-; RV64-NEXT: ld a1, %lo(.LCPI188_0)(a1)
+; RV64-NEXT: lui a2, %hi(.LCPI188_0)
+; RV64-NEXT: ld a2, %lo(.LCPI188_0)(a2)
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64-NEXT: vmv.v.i v0, 5
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vmerge.vxm v10, v10, a1, v0
+; RV64-NEXT: vmv.v.x v10, a1
+; RV64-NEXT: vmerge.vxm v10, v10, a2, v0
; RV64-NEXT: vmulh.vv v10, v8, v10
; RV64-NEXT: lui a1, 1044496
; RV64-NEXT: addi a1, a1, -256
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll
index eb95d86e3404..82e0760d593c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll
@@ -7,57 +7,63 @@
define <4 x i1> @load_large_vector(ptr %p) {
; ZVE32X-LABEL: load_large_vector:
; ZVE32X: # %bb.0:
-; ZVE32X-NEXT: ld a1, 56(a0)
-; ZVE32X-NEXT: ld a2, 32(a0)
-; ZVE32X-NEXT: ld a3, 24(a0)
-; ZVE32X-NEXT: ld a4, 48(a0)
-; ZVE32X-NEXT: ld a5, 8(a0)
-; ZVE32X-NEXT: ld a6, 0(a0)
-; ZVE32X-NEXT: xor a2, a3, a2
-; ZVE32X-NEXT: snez a2, a2
+; ZVE32X-NEXT: ld a1, 80(a0)
+; ZVE32X-NEXT: ld a2, 72(a0)
+; ZVE32X-NEXT: ld a3, 56(a0)
+; ZVE32X-NEXT: ld a4, 32(a0)
+; ZVE32X-NEXT: ld a5, 24(a0)
+; ZVE32X-NEXT: ld a6, 48(a0)
+; ZVE32X-NEXT: ld a7, 8(a0)
+; ZVE32X-NEXT: ld a0, 0(a0)
+; ZVE32X-NEXT: xor a4, a5, a4
+; ZVE32X-NEXT: snez a4, a4
; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmv.s.x v8, a2
+; ZVE32X-NEXT: vmv.s.x v8, a4
; ZVE32X-NEXT: vand.vi v8, v8, 1
; ZVE32X-NEXT: vmsne.vi v0, v8, 0
-; ZVE32X-NEXT: vmv.s.x v8, zero
-; ZVE32X-NEXT: vmerge.vim v9, v8, 1, v0
-; ZVE32X-NEXT: xor a2, a6, a5
-; ZVE32X-NEXT: snez a2, a2
-; ZVE32X-NEXT: vmv.s.x v10, a2
+; ZVE32X-NEXT: vmv.s.x v9, zero
+; ZVE32X-NEXT: vmerge.vim v8, v9, 1, v0
+; ZVE32X-NEXT: xor a0, a0, a7
+; ZVE32X-NEXT: snez a0, a0
+; ZVE32X-NEXT: vmv.s.x v10, a0
; ZVE32X-NEXT: vand.vi v10, v10, 1
; ZVE32X-NEXT: vmsne.vi v0, v10, 0
; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; ZVE32X-NEXT: vmv.v.i v10, 0
; ZVE32X-NEXT: vmerge.vim v11, v10, 1, v0
; ZVE32X-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
-; ZVE32X-NEXT: vslideup.vi v11, v9, 1
+; ZVE32X-NEXT: vslideup.vi v11, v8, 1
; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; ZVE32X-NEXT: vmsne.vi v0, v11, 0
-; ZVE32X-NEXT: ld a2, 80(a0)
-; ZVE32X-NEXT: vmerge.vim v9, v10, 1, v0
-; ZVE32X-NEXT: xor a1, a4, a1
-; ZVE32X-NEXT: snez a1, a1
-; ZVE32X-NEXT: vmv.s.x v11, a1
+; ZVE32X-NEXT: xor a0, a6, a3
+; ZVE32X-NEXT: snez a0, a0
+; ZVE32X-NEXT: vmv.s.x v8, a0
; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; ZVE32X-NEXT: vand.vi v11, v11, 1
-; ZVE32X-NEXT: vmsne.vi v0, v11, 0
-; ZVE32X-NEXT: ld a0, 72(a0)
-; ZVE32X-NEXT: vmerge.vim v11, v8, 1, v0
+; ZVE32X-NEXT: vand.vi v8, v8, 1
+; ZVE32X-NEXT: vmsne.vi v8, v8, 0
+; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVE32X-NEXT: vmerge.vim v11, v10, 1, v0
+; ZVE32X-NEXT: vmv1r.v v0, v8
+; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; ZVE32X-NEXT: vmerge.vim v8, v9, 1, v0
; ZVE32X-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
-; ZVE32X-NEXT: vslideup.vi v9, v11, 2
+; ZVE32X-NEXT: vslideup.vi v11, v8, 2
; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmsne.vi v0, v9, 0
-; ZVE32X-NEXT: vmerge.vim v9, v10, 1, v0
-; ZVE32X-NEXT: xor a0, a0, a2
-; ZVE32X-NEXT: snez a0, a0
-; ZVE32X-NEXT: vmv.s.x v10, a0
+; ZVE32X-NEXT: vmsne.vi v0, v11, 0
+; ZVE32X-NEXT: xor a1, a2, a1
+; ZVE32X-NEXT: snez a0, a1
+; ZVE32X-NEXT: vmv.s.x v8, a0
; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; ZVE32X-NEXT: vand.vi v10, v10, 1
-; ZVE32X-NEXT: vmsne.vi v0, v10, 0
-; ZVE32X-NEXT: vmerge.vim v8, v8, 1, v0
+; ZVE32X-NEXT: vand.vi v8, v8, 1
+; ZVE32X-NEXT: vmsne.vi v8, v8, 0
+; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVE32X-NEXT: vmerge.vim v10, v10, 1, v0
+; ZVE32X-NEXT: vmv1r.v v0, v8
+; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; ZVE32X-NEXT: vmerge.vim v8, v9, 1, v0
; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; ZVE32X-NEXT: vslideup.vi v9, v8, 3
-; ZVE32X-NEXT: vmsne.vi v0, v9, 0
+; ZVE32X-NEXT: vslideup.vi v10, v8, 3
+; ZVE32X-NEXT: vmsne.vi v0, v10, 0
; ZVE32X-NEXT: ret
;
; ZVE64X-LABEL: load_large_vector:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index 99364264de82..178a920169ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -159,16 +159,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 54
+; RV32-NEXT: li a3, 82
; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x36, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 54 * vlenb
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd2, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 82 * vlenb
; RV32-NEXT: addi a3, a1, 256
; RV32-NEXT: li a2, 32
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vle32.v v16, (a3)
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 21
+; RV32-NEXT: li a4, 57
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
@@ -177,30 +177,27 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vslideup.vi v8, v16, 4
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a5, a4, 3
-; RV32-NEXT: add a4, a5, a4
+; RV32-NEXT: li a5, 41
+; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 12
-; RV32-NEXT: vmv.s.x v0, a4
-; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: add a4, sp, a4
-; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vmv.s.x v1, a4
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v16, v16, 16
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 37
-; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: slli a5, a4, 6
+; RV32-NEXT: add a4, a5, a4
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; RV32-NEXT: vslideup.vi v8, v16, 10, v0.t
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a5, a4, 4
-; RV32-NEXT: add a4, a5, a4
+; RV32-NEXT: li a5, 45
+; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
@@ -209,391 +206,429 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; RV32-NEXT: vle16.v v8, (a4)
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 13
-; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: slli a5, a4, 5
+; RV32-NEXT: add a4, a5, a4
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: lui a4, %hi(.LCPI6_1)
+; RV32-NEXT: addi a4, a4, %lo(.LCPI6_1)
+; RV32-NEXT: lui a5, 1
+; RV32-NEXT: vle16.v v8, (a4)
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a6, 25
+; RV32-NEXT: mul a4, a4, a6
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vle32.v v24, (a1)
+; RV32-NEXT: vle32.v v8, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a4, 45
+; RV32-NEXT: li a4, 73
; RV32-NEXT: mul a1, a1, a4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: lui a1, %hi(.LCPI6_1)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_1)
-; RV32-NEXT: lui a4, 1
-; RV32-NEXT: addi a4, a4, -64
-; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vle32.v v24, (a3)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a5, a1, 2
-; RV32-NEXT: add a1, a5, a1
+; RV32-NEXT: li a3, 49
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vle32.v v16, (a3)
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, a5, -64
+; RV32-NEXT: vmv.s.x v0, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 29
+; RV32-NEXT: li a3, 37
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vmv.s.x v2, a4
+; RV32-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a3, a1, 5
+; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl4r.v v4, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v24, v4
-; RV32-NEXT: vmv1r.v v0, v2
+; RV32-NEXT: vrgatherei16.vv v16, v8, v4
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 2
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 25
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
-; RV32-NEXT: vsetivli zero, 12, e32, m4, tu, ma
+; RV32-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v16, v24, v8, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 4
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 45
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vmv.v.v v12, v8
+; RV32-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 12, e32, m4, tu, ma
+; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 4
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 45
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
+; RV32-NEXT: li a3, 57
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vmv4r.v v16, v8
-; RV32-NEXT: vslideup.vi v8, v16, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vslideup.vi v12, v8, 2
; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 21
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v3, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vmv1r.v v0, v3
+; RV32-NEXT: vs1r.v v1, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 37
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a3, a1, 6
+; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslideup.vi v8, v16, 8, v0.t
-; RV32-NEXT: vmv.v.v v20, v8
+; RV32-NEXT: vslideup.vi v12, v16, 8, v0.t
+; RV32-NEXT: vmv.v.v v20, v12
; RV32-NEXT: lui a1, %hi(.LCPI6_2)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_2)
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: lui a3, %hi(.LCPI6_3)
+; RV32-NEXT: addi a3, a3, %lo(.LCPI6_3)
+; RV32-NEXT: lui a4, %hi(.LCPI6_4)
+; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV32-NEXT: vle16.v v4, (a1)
+; RV32-NEXT: vle16.v v16, (a3)
+; RV32-NEXT: addi a1, a4, %lo(.LCPI6_4)
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v2, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
+; RV32-NEXT: li a3, 73
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: lui a1, %hi(.LCPI6_3)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_3)
-; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
+; RV32-NEXT: vrgatherei16.vv v24, v8, v4
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 2
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 37
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 45
+; RV32-NEXT: li a3, 49
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsetivli zero, 12, e32, m4, tu, ma
+; RV32-NEXT: vmv.v.v v20, v24
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
+; RV32-NEXT: li a3, 37
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v4, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v24, v4
-; RV32-NEXT: vmv1r.v v0, v2
+; RV32-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 29
+; RV32-NEXT: li a3, 57
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vrgatherei16.vv v16, v24, v2
+; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 2
+; RV32-NEXT: slli a3, a1, 6
; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v4, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v24, v4, v0.t
-; RV32-NEXT: vsetivli zero, 12, e32, m4, tu, ma
-; RV32-NEXT: vmv.v.v v20, v8
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslideup.vi v16, v8, 6, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 2
+; RV32-NEXT: slli a3, a1, 5
; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: lui a1, %hi(.LCPI6_4)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_4)
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
-; RV32-NEXT: mul a1, a1, a3
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v12, v24, v8
-; RV32-NEXT: vmv1r.v v0, v3
-; RV32-NEXT: vslideup.vi v12, v16, 6, v0.t
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
-; RV32-NEXT: mul a1, a1, a3
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_5)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_5)
+; RV32-NEXT: lui a3, %hi(.LCPI6_6)
+; RV32-NEXT: addi a3, a3, %lo(.LCPI6_6)
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v24, (a1)
-; RV32-NEXT: lui a1, %hi(.LCPI6_6)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_6)
-; RV32-NEXT: li a3, 960
-; RV32-NEXT: vle16.v v4, (a1)
-; RV32-NEXT: vmv.s.x v0, a3
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v16, (a1)
+; RV32-NEXT: vle16.v v4, (a3)
+; RV32-NEXT: li a1, 960
+; RV32-NEXT: vmv.s.x v0, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 45
+; RV32-NEXT: li a3, 13
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v24
+; RV32-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 29
+; RV32-NEXT: li a3, 73
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v24, v4, v0.t
-; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
+; RV32-NEXT: vrgatherei16.vv v8, v24, v16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
+; RV32-NEXT: li a3, 49
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vmv.v.v v12, v8
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v8, v16, v4, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
+; RV32-NEXT: li a3, 25
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_7)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_7)
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: lui a3, %hi(.LCPI6_8)
+; RV32-NEXT: addi a3, a3, %lo(.LCPI6_8)
+; RV32-NEXT: lui a4, %hi(.LCPI6_9)
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: addi a1, a4, %lo(.LCPI6_9)
+; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV32-NEXT: vle16.v v24, (a3)
+; RV32-NEXT: vle16.v v28, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
+; RV32-NEXT: li a3, 57
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v4, v16, v8
-; RV32-NEXT: vmv1r.v v0, v3
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vrgatherei16.vv v4, v0, v8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 37
+; RV32-NEXT: li a3, 21
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslideup.vi v4, v8, 4, v0.t
-; RV32-NEXT: lui a1, %hi(.LCPI6_8)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_8)
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v0, (a1)
-; RV32-NEXT: lui a1, %hi(.LCPI6_9)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_9)
-; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a3, a1, 6
+; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslideup.vi v4, v8, 4, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 45
+; RV32-NEXT: li a3, 21
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vs4r.v v4, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 73
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v24, v16, v0.t
-; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
-; RV32-NEXT: vmv.v.v v4, v8
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
+; RV32-NEXT: vrgatherei16.vv v8, v0, v24
; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 13
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v4, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
+; RV32-NEXT: li a3, 13
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslideup.vi v12, v8, 6
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_10)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_10)
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; RV32-NEXT: vle16.v v8, (a1)
; RV32-NEXT: lui a1, 15
-; RV32-NEXT: vmv.s.x v24, a1
-; RV32-NEXT: vmv1r.v v0, v24
+; RV32-NEXT: vmv.s.x v3, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 37
+; RV32-NEXT: li a3, 57
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslideup.vi v12, v16, 6
+; RV32-NEXT: vmv1r.v v0, v3
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a3, a1, 6
+; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
-; RV32-NEXT: vmv.v.v v28, v12
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 57
+; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_11)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_11)
+; RV32-NEXT: lui a3, %hi(.LCPI6_12)
+; RV32-NEXT: addi a3, a3, %lo(.LCPI6_12)
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v0, (a1)
-; RV32-NEXT: lui a1, %hi(.LCPI6_12)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_12)
-; RV32-NEXT: li a3, 1008
-; RV32-NEXT: vle16.v v4, (a1)
-; RV32-NEXT: vmv.s.x v25, a3
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v25, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vle16.v v12, (a3)
+; RV32-NEXT: li a1, 1008
+; RV32-NEXT: vmv.s.x v0, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 45
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 73
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v0
-; RV32-NEXT: vmv1r.v v0, v25
+; RV32-NEXT: vrgatherei16.vv v24, v16, v8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 29
+; RV32-NEXT: li a3, 49
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v4, v0.t
-; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
-; RV32-NEXT: vmv.v.v v28, v8
+; RV32-NEXT: vrgatherei16.vv v24, v16, v12, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a3, a1, 2
+; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v28, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_13)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_13)
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: lui a3, %hi(.LCPI6_14)
+; RV32-NEXT: addi a3, a3, %lo(.LCPI6_14)
+; RV32-NEXT: lui a4, %hi(.LCPI6_15)
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v20, (a1)
+; RV32-NEXT: addi a1, a4, %lo(.LCPI6_15)
+; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV32-NEXT: vle16.v v24, (a3)
; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: vmv1r.v v0, v24
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v3
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 41
+; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 3
+; RV32-NEXT: slli a3, a1, 6
; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vrgatherei16.vv v16, v8, v20, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 37
+; RV32-NEXT: slli a3, a1, 5
+; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl4r.v v20, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 25
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v24, v16, v8, v0.t
-; RV32-NEXT: lui a1, %hi(.LCPI6_14)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_14)
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
+; RV32-NEXT: vmv.v.v v20, v8
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 73
+; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v16, (a1)
-; RV32-NEXT: lui a1, %hi(.LCPI6_15)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_15)
-; RV32-NEXT: vle16.v v28, (a1)
+; RV32-NEXT: vrgatherei16.vv v8, v0, v24
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 45
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 49
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v0, v16
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl4r.v v4, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v8, v24, v4, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 29
+; RV32-NEXT: li a2, 21
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
+; RV32-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 13
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
-; RV32-NEXT: vmv.v.v v24, v8
+; RV32-NEXT: vmv.v.v v24, v0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 57
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl4r.v v28, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 2
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vmv.v.v v28, v0
+; RV32-NEXT: vmv.v.v v16, v8
; RV32-NEXT: addi a1, a0, 320
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT: vse32.v v24, (a1)
+; RV32-NEXT: vse32.v v16, (a1)
; RV32-NEXT: addi a1, a0, 256
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 21
-; RV32-NEXT: mul a2, a2, a3
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vse32.v v8, (a1)
+; RV32-NEXT: vse32.v v28, (a1)
; RV32-NEXT: addi a1, a0, 192
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vse32.v v8, (a1)
+; RV32-NEXT: vse32.v v24, (a1)
; RV32-NEXT: addi a1, a0, 128
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 13
-; RV32-NEXT: mul a2, a2, a3
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vse32.v v8, (a1)
+; RV32-NEXT: vse32.v v20, (a1)
; RV32-NEXT: addi a1, a0, 64
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a3, a2, 2
-; RV32-NEXT: add a2, a3, a2
+; RV32-NEXT: li a3, 37
+; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 16
; RV32-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
; RV32-NEXT: vse32.v v8, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 4
-; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: li a2, 45
+; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vse32.v v8, (a0)
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 54
+; RV32-NEXT: li a1, 82
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 16
@@ -604,372 +639,422 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: li a3, 56
+; RV64-NEXT: li a3, 74
; RV64-NEXT: mul a2, a2, a3
; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xca, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 74 * vlenb
; RV64-NEXT: addi a2, a1, 256
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 5
+; RV64-NEXT: li a3, 25
+; RV64-NEXT: mul a2, a2, a3
; RV64-NEXT: add a2, sp, a2
; RV64-NEXT: addi a2, a2, 16
; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; RV64-NEXT: addi a2, a1, 128
-; RV64-NEXT: vle64.v v8, (a2)
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: li a3, 40
-; RV64-NEXT: mul a2, a2, a3
-; RV64-NEXT: add a2, sp, a2
-; RV64-NEXT: addi a2, a2, 16
-; RV64-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV64-NEXT: vle64.v v24, (a1)
+; RV64-NEXT: vle64.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a3, a1, 6
+; RV64-NEXT: add a1, a3, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vrgather.vi v8, v16, 4
+; RV64-NEXT: vrgather.vi v12, v16, 4
; RV64-NEXT: li a1, 128
-; RV64-NEXT: vmv.s.x v4, a1
+; RV64-NEXT: vmv.s.x v8, a1
; RV64-NEXT: vsetivli zero, 8, e64, m8, ta, ma
; RV64-NEXT: vslidedown.vi v16, v16, 8
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 24
-; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: li a3, 49
+; RV64-NEXT: mul a1, a1, a3
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v0, v8
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT: vmv1r.v v0, v4
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 20
-; RV64-NEXT: mul a1, a1, a2
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v4, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vrgather.vi v8, v16, 2, v0.t
-; RV64-NEXT: vmv.v.v v20, v8
+; RV64-NEXT: vrgather.vi v12, v16, 2, v0.t
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vid.v v10
; RV64-NEXT: li a1, 6
-; RV64-NEXT: vid.v v8
-; RV64-NEXT: vmul.vx v6, v8, a1
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vrgatherei16.vv v8, v24, v6
+; RV64-NEXT: vmul.vx v2, v10, a1
+; RV64-NEXT: li a1, 56
+; RV64-NEXT: vle64.v v16, (a2)
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: li a3, 57
+; RV64-NEXT: mul a2, a2, a3
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: vmv.s.x v7, a1
+; RV64-NEXT: vadd.vi v10, v2, -16
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
-; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: li a1, 56
-; RV64-NEXT: vmv.s.x v5, a1
-; RV64-NEXT: vadd.vi v16, v6, -16
+; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT: vmv1r.v v0, v5
+; RV64-NEXT: vrgatherei16.vv v16, v24, v2
+; RV64-NEXT: vmv1r.v v0, v7
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 40
+; RV64-NEXT: li a2, 57
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v24, v16, v0.t
+; RV64-NEXT: vrgatherei16.vv v16, v24, v10, v0.t
; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v20, v8
+; RV64-NEXT: vmv.v.v v12, v16
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 4
+; RV64-NEXT: li a2, 21
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: li a2, 25
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v24, v16, 5
-; RV64-NEXT: vmv1r.v v0, v4
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vrgather.vi v12, v16, 5
+; RV64-NEXT: vmv1r.v v0, v8
+; RV64-NEXT: vmv1r.v v6, v8
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 24
+; RV64-NEXT: li a2, 49
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v24, v16, 3, v0.t
+; RV64-NEXT: vrgather.vi v12, v16, 3, v0.t
+; RV64-NEXT: vmv.v.v v28, v12
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v28, v6, 1
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vadd.vi v24, v2, 1
+; RV64-NEXT: vadd.vi v26, v2, -15
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
-; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v16, v28
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v28, v6, -15
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT: vmv1r.v v0, v5
+; RV64-NEXT: vrgatherei16.vv v16, v8, v24
+; RV64-NEXT: vmv1r.v v0, v7
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 40
+; RV64-NEXT: li a2, 57
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v16, v8, v26, v0.t
; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v24, v8
+; RV64-NEXT: vmv.v.v v28, v16
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 12
+; RV64-NEXT: slli a2, a1, 4
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs4r.v v28, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: lui a1, 16
+; RV64-NEXT: addi a1, a1, 7
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT: vmv.v.i v9, 6
+; RV64-NEXT: vmv.v.x v10, a1
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 25
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v24, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vmv2r.v v26, v6
-; RV64-NEXT: vadd.vi v24, v6, 2
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT: vrgatherei16.vv v12, v16, v9
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
+; RV64-NEXT: li a2, 45
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v0, v24
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vrgatherei16.vv v12, v16, v10
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 41
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vmv4r.v v8, v16
+; RV64-NEXT: vrgather.vi v12, v16, 2
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 37
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vrgather.vi v12, v16, 3
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 5
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
; RV64-NEXT: li a1, 24
-; RV64-NEXT: vmv.s.x v0, a1
+; RV64-NEXT: vmv.s.x v1, a1
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vadd.vi v24, v2, 2
+; RV64-NEXT: vadd.vi v4, v2, -14
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vadd.vi v24, v26, -14
-; RV64-NEXT: vmv2r.v v6, v26
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
-; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT: vmv.v.i v12, 6
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vrgatherei16.vv v8, v16, v24
+; RV64-NEXT: vmv1r.v v0, v1
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: li a2, 57
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v20, v24, v12
+; RV64-NEXT: vrgatherei16.vv v8, v24, v4, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 20
+; RV64-NEXT: li a2, 25
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v0, v6
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 24
+; RV64-NEXT: li a2, 49
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v20, v24, 4, v0.t
-; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v20, v8
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: li a2, 45
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vmv2r.v v10, v6
+; RV64-NEXT: vl4r.v v20, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vrgather.vi v20, v16, 4, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 6
+; RV64-NEXT: li a2, 45
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs2r.v v6, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vadd.vi v8, v6, 3
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vadd.vi v4, v2, 3
+; RV64-NEXT: vadd.vi v8, v2, -13
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
-; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v16, v0, v8
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v28, v10, -13
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vrgatherei16.vv v8, v16, v4
+; RV64-NEXT: vmv1r.v v0, v1
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 40
-; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v16, v8, v28, v0.t
-; RV64-NEXT: lui a1, 16
-; RV64-NEXT: addi a1, a1, 7
-; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT: vmv.v.x v12, a1
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vl2r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v8, v24, v16, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: slli a2, a1, 3
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vmv4r.v v8, v0
-; RV64-NEXT: vrgatherei16.vv v20, v0, v12
+; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v0, v6
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 20
+; RV64-NEXT: li a2, 49
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v20, v24, 5, v0.t
-; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v20, v16
+; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 20
+; RV64-NEXT: li a2, 41
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: lui a1, 96
-; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT: vmv.v.x v12, a1
+; RV64-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT: li a1, 192
-; RV64-NEXT: vmv.s.x v0, a1
+; RV64-NEXT: vrgather.vi v8, v24, 5, v0.t
; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 41
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vrgather.vi v28, v8, 2
-; RV64-NEXT: vrgatherei16.vv v28, v24, v12, v0.t
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: lui a1, 96
+; RV64-NEXT: li a2, 192
+; RV64-NEXT: vmv.s.x v28, a2
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a1
+; RV64-NEXT: vmv1r.v v0, v28
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 6
+; RV64-NEXT: li a2, 37
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl2r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vadd.vi v16, v24, 4
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vrgatherei16.vv v12, v24, v8, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
+; RV64-NEXT: li a2, 37
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v0, v16
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
; RV64-NEXT: li a1, 28
; RV64-NEXT: vmv.s.x v0, a1
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vadd.vi v26, v24, -12
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vadd.vi v30, v2, 4
+; RV64-NEXT: vadd.vi v6, v2, -12
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vrgatherei16.vv v16, v8, v30
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 40
+; RV64-NEXT: li a2, 57
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v16, v26, v0.t
-; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v28, v8
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v16, v8, v6, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v28, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: lui a1, 112
; RV64-NEXT: addi a1, a1, 1
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vmv.v.x v12, a1
+; RV64-NEXT: vmv1r.v v0, v28
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 5
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vrgatherei16.vv v16, v24, v12, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: slli a2, a1, 5
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v8, v16, 3
+; RV64-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 45
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 24
+; RV64-NEXT: li a2, 25
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v16, v12, v0.t
+; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
+; RV64-NEXT: vmv.v.v v16, v24
+; RV64-NEXT: vmv2r.v v8, v2
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v12, v24, 5
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vadd.vi v12, v2, 5
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
-; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v16, v0, v12
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vrgatherei16.vv v24, v0, v12
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v12, v24, -11
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vadd.vi v2, v8, -11
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 40
+; RV64-NEXT: li a2, 57
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v16, v24, v12, v0.t
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vrgatherei16.vv v24, v8, v2, v0.t
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 41
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 3
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v8, v16
+; RV64-NEXT: vmv.v.v v12, v0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 37
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl4r.v v20, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vmv.v.v v20, v0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 5
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vmv.v.v v8, v24
; RV64-NEXT: addi a1, a0, 320
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vse64.v v8, (a1)
; RV64-NEXT: addi a1, a0, 256
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: add a2, sp, a2
-; RV64-NEXT: addi a2, a2, 16
-; RV64-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vse64.v v8, (a1)
+; RV64-NEXT: vse64.v v20, (a1)
; RV64-NEXT: addi a1, a0, 192
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: li a3, 20
-; RV64-NEXT: mul a2, a2, a3
-; RV64-NEXT: add a2, sp, a2
-; RV64-NEXT: addi a2, a2, 16
-; RV64-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vse64.v v8, (a1)
+; RV64-NEXT: vse64.v v12, (a1)
; RV64-NEXT: addi a1, a0, 128
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 3
-; RV64-NEXT: add a2, sp, a2
-; RV64-NEXT: addi a2, a2, 16
-; RV64-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vse64.v v8, (a1)
+; RV64-NEXT: vse64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 64
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: li a3, 12
-; RV64-NEXT: mul a2, a2, a3
+; RV64-NEXT: slli a3, a2, 4
+; RV64-NEXT: add a2, a3, a2
; RV64-NEXT: add a2, sp, a2
; RV64-NEXT: addi a2, a2, 16
; RV64-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
; RV64-NEXT: vse64.v v8, (a1)
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 4
+; RV64-NEXT: li a2, 21
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: li a1, 56
+; RV64-NEXT: li a1, 74
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
index d55683e653d2..9463267d0b0e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
@@ -57,17 +57,17 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 16
; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: csrr a0, vlenb
@@ -118,50 +118,50 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: csrr a0, vlenb
@@ -182,17 +182,17 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v9, v8, 2
; RV64-NEXT: vfmv.f.s fa5, v9
; RV64-NEXT: fcvt.l.s a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa5, v8
; RV64-NEXT: fcvt.l.s a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v8, v10, a0
; RV64-NEXT: ret
%a = call <3 x i64> @llvm.llrint.v3i64.v3f32(<3 x float> %x)
@@ -224,50 +224,50 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: csrr a0, vlenb
@@ -288,17 +288,17 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v9, v8, 2
; RV64-NEXT: vfmv.f.s fa5, v9
; RV64-NEXT: fcvt.l.s a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa5, v8
; RV64-NEXT: fcvt.l.s a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v8, v10, a0
; RV64-NEXT: ret
%a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
@@ -328,57 +328,57 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 68(sp)
; RV32-NEXT: sw a0, 64(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 7
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 124(sp)
; RV32-NEXT: sw a0, 120(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 6
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 116(sp)
; RV32-NEXT: sw a0, 112(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 5
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 108(sp)
; RV32-NEXT: sw a0, 104(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 4
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 100(sp)
; RV32-NEXT: sw a0, 96(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 92(sp)
; RV32-NEXT: sw a0, 88(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 84(sp)
; RV32-NEXT: sw a0, 80(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
@@ -502,64 +502,64 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 196(sp)
; RV32-NEXT: sw a0, 192(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 132(sp)
; RV32-NEXT: sw a0, 128(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 156(sp)
; RV32-NEXT: sw a0, 152(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 148(sp)
; RV32-NEXT: sw a0, 144(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 140(sp)
; RV32-NEXT: sw a0, 136(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 7
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 188(sp)
; RV32-NEXT: sw a0, 184(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 6
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 180(sp)
; RV32-NEXT: sw a0, 176(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 5
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 172(sp)
; RV32-NEXT: sw a0, 168(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 4
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
@@ -711,17 +711,17 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 16
; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: csrr a0, vlenb
@@ -733,13 +733,12 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
;
; RV64-LABEL: llrint_v2i64_v2f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vslidedown.vi v9, v8, 1
; RV64-NEXT: vfmv.f.s fa5, v9
; RV64-NEXT: fcvt.l.d a0, fa5
; RV64-NEXT: vfmv.f.s fa5, v8
; RV64-NEXT: fcvt.l.d a1, fa5
-; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vmv.v.x v8, a1
; RV64-NEXT: vslide1down.vx v8, v8, a0
; RV64-NEXT: ret
@@ -772,50 +771,50 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: csrr a0, vlenb
@@ -836,17 +835,13 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 2
; RV64-NEXT: vfmv.f.s fa5, v12
; RV64-NEXT: fcvt.l.d a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa5, v8
; RV64-NEXT: fcvt.l.d a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v8, v10, a0
; RV64-NEXT: ret
%a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
@@ -890,32 +885,32 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 164(sp)
; RV32-NEXT: sw a0, 160(sp)
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: addi a0, sp, 256
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 132(sp)
; RV32-NEXT: sw a0, 128(sp)
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: addi a0, sp, 256
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 140(sp)
; RV32-NEXT: sw a0, 136(sp)
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: addi a0, sp, 256
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 156(sp)
; RV32-NEXT: sw a0, 152(sp)
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: addi a0, sp, 256
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
index e2075e074179..9b0944e7e2f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
@@ -39,26 +39,24 @@ declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>)
define <2 x iXLen> @lrint_v2f32(<2 x float> %x) {
; RV32-LABEL: lrint_v2f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vfmv.f.s fa5, v9
; RV32-NEXT: fcvt.w.s a0, fa5
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.s a1, fa5
-; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vmv.v.x v8, a1
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: ret
;
; RV64-i32-LABEL: lrint_v2f32:
; RV64-i32: # %bb.0:
-; RV64-i32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-i32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
; RV64-i32-NEXT: vfmv.f.s fa5, v9
; RV64-i32-NEXT: fcvt.l.s a0, fa5
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.s a1, fa5
-; RV64-i32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-i32-NEXT: vmv.v.x v8, a1
; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
; RV64-i32-NEXT: ret
@@ -83,13 +81,12 @@ declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>)
define <3 x iXLen> @lrint_v3f32(<3 x float> %x) {
; RV32-LABEL: lrint_v3f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vfmv.f.s fa5, v9
; RV32-NEXT: fcvt.w.s a0, fa5
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.s a1, fa5
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.x v9, a1
; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vslidedown.vi v10, v8, 2
@@ -104,13 +101,12 @@ define <3 x iXLen> @lrint_v3f32(<3 x float> %x) {
;
; RV64-i32-LABEL: lrint_v3f32:
; RV64-i32: # %bb.0:
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
; RV64-i32-NEXT: vfmv.f.s fa5, v9
; RV64-i32-NEXT: fcvt.l.s a0, fa5
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.s a1, fa5
-; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-i32-NEXT: vmv.v.x v9, a1
; RV64-i32-NEXT: vslide1down.vx v9, v9, a0
; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
@@ -134,17 +130,17 @@ define <3 x iXLen> @lrint_v3f32(<3 x float> %x) {
; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-i64-NEXT: vmv.v.x v10, a1
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i64-NEXT: vslidedown.vi v9, v8, 2
; RV64-i64-NEXT: vfmv.f.s fa5, v9
; RV64-i64-NEXT: fcvt.l.s a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i64-NEXT: vslidedown.vi v8, v8, 3
; RV64-i64-NEXT: vfmv.f.s fa5, v8
; RV64-i64-NEXT: fcvt.l.s a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v8, v10, a0
; RV64-i64-NEXT: ret
%a = call <3 x iXLen> @llvm.lrint.v3iXLen.v3f32(<3 x float> %x)
@@ -155,13 +151,12 @@ declare <3 x iXLen> @llvm.lrint.v3iXLen.v3f32(<3 x float>)
define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
; RV32-LABEL: lrint_v4f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vfmv.f.s fa5, v9
; RV32-NEXT: fcvt.w.s a0, fa5
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.s a1, fa5
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.x v9, a1
; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vslidedown.vi v10, v8, 2
@@ -176,13 +171,12 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
;
; RV64-i32-LABEL: lrint_v4f32:
; RV64-i32: # %bb.0:
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
; RV64-i32-NEXT: vfmv.f.s fa5, v9
; RV64-i32-NEXT: fcvt.l.s a0, fa5
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.s a1, fa5
-; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-i32-NEXT: vmv.v.x v9, a1
; RV64-i32-NEXT: vslide1down.vx v9, v9, a0
; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
@@ -206,17 +200,17 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-i64-NEXT: vmv.v.x v10, a1
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i64-NEXT: vslidedown.vi v9, v8, 2
; RV64-i64-NEXT: vfmv.f.s fa5, v9
; RV64-i64-NEXT: fcvt.l.s a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i64-NEXT: vslidedown.vi v8, v8, 3
; RV64-i64-NEXT: vfmv.f.s fa5, v8
; RV64-i64-NEXT: fcvt.l.s a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v8, v10, a0
; RV64-i64-NEXT: ret
%a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float> %x)
@@ -248,29 +242,21 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
; RV32-NEXT: fcvt.w.s a0, fa5
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 4
; RV32-NEXT: vfmv.f.s fa5, v12
; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 5
; RV32-NEXT: vfmv.f.s fa5, v12
; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 6
; RV32-NEXT: vfmv.f.s fa5, v12
; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 7
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v10, a0
; RV32-NEXT: ret
;
@@ -297,29 +283,21 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
; RV64-i32-NEXT: fcvt.l.s a0, fa5
; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v12, v8, 4
; RV64-i32-NEXT: vfmv.f.s fa5, v12
; RV64-i32-NEXT: fcvt.l.s a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v12, v8, 5
; RV64-i32-NEXT: vfmv.f.s fa5, v12
; RV64-i32-NEXT: fcvt.l.s a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v12, v8, 6
; RV64-i32-NEXT: vfmv.f.s fa5, v12
; RV64-i32-NEXT: fcvt.l.s a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v8, v8, 7
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.s a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64-i32-NEXT: vslide1down.vx v8, v10, a0
; RV64-i32-NEXT: ret
;
@@ -685,13 +663,12 @@ define <2 x iXLen> @lrint_v2f64(<2 x double> %x) {
;
; RV64-i64-LABEL: lrint_v2f64:
; RV64-i64: # %bb.0:
-; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-i64-NEXT: vslidedown.vi v9, v8, 1
; RV64-i64-NEXT: vfmv.f.s fa5, v9
; RV64-i64-NEXT: fcvt.l.d a0, fa5
; RV64-i64-NEXT: vfmv.f.s fa5, v8
; RV64-i64-NEXT: fcvt.l.d a1, fa5
-; RV64-i64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-i64-NEXT: vmv.v.x v8, a1
; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
; RV64-i64-NEXT: ret
@@ -712,17 +689,17 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.x v10, a1
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 2
; RV32-NEXT: vfmv.f.s fa5, v12
; RV32-NEXT: fcvt.w.d a0, fa5
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.d a0, fa5
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v8, v10, a0
; RV32-NEXT: ret
;
@@ -737,17 +714,17 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-i32-NEXT: vmv.v.x v10, a1
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v12, v8, 2
; RV64-i32-NEXT: vfmv.f.s fa5, v12
; RV64-i32-NEXT: fcvt.l.d a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v8, v8, 3
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.d a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i32-NEXT: vslide1down.vx v8, v10, a0
; RV64-i32-NEXT: ret
;
@@ -762,17 +739,13 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-i64-NEXT: vmv.v.x v10, a1
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-i64-NEXT: vslidedown.vi v12, v8, 2
; RV64-i64-NEXT: vfmv.f.s fa5, v12
; RV64-i64-NEXT: fcvt.l.d a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-i64-NEXT: vslidedown.vi v8, v8, 3
; RV64-i64-NEXT: vfmv.f.s fa5, v8
; RV64-i64-NEXT: fcvt.l.d a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v8, v10, a0
; RV64-i64-NEXT: ret
%a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double> %x)
@@ -801,30 +774,27 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
; RV32-NEXT: fcvt.w.d a0, fa5
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.d a1, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vmv.v.x v10, a1
-; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 2
-; RV32-NEXT: vfmv.f.s fa5, v12
-; RV32-NEXT: fcvt.w.d a0, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v10, v10, a0
; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: fcvt.w.d a2, fa5
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: fld fa5, 32(sp)
; RV32-NEXT: vfmv.f.s fa4, v8
; RV32-NEXT: fld fa3, 40(sp)
-; RV32-NEXT: fcvt.w.d a0, fa4
-; RV32-NEXT: fcvt.w.d a1, fa5
-; RV32-NEXT: fld fa5, 48(sp)
-; RV32-NEXT: fcvt.w.d a2, fa3
+; RV32-NEXT: fcvt.w.d a3, fa4
+; RV32-NEXT: fcvt.w.d a4, fa5
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v10, a0
+; RV32-NEXT: vmv.v.x v8, a1
+; RV32-NEXT: fcvt.w.d a1, fa3
+; RV32-NEXT: fld fa5, 48(sp)
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a2
+; RV32-NEXT: vslide1down.vx v8, v8, a3
; RV32-NEXT: fcvt.w.d a0, fa5
; RV32-NEXT: fld fa5, 56(sp)
+; RV32-NEXT: vslide1down.vx v8, v8, a4
; RV32-NEXT: vslide1down.vx v8, v8, a1
-; RV32-NEXT: vslide1down.vx v8, v8, a2
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: fcvt.w.d a0, fa5
; RV32-NEXT: vslide1down.vx v8, v8, a0
@@ -854,30 +824,27 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
; RV64-i32-NEXT: fcvt.l.d a0, fa5
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.d a1, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64-i32-NEXT: vmv.v.x v10, a1
-; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-i32-NEXT: vslidedown.vi v12, v8, 2
-; RV64-i32-NEXT: vfmv.f.s fa5, v12
-; RV64-i32-NEXT: fcvt.l.d a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: fcvt.l.d a2, fa5
; RV64-i32-NEXT: vslidedown.vi v8, v8, 3
; RV64-i32-NEXT: fld fa5, 32(sp)
; RV64-i32-NEXT: vfmv.f.s fa4, v8
; RV64-i32-NEXT: fld fa3, 40(sp)
-; RV64-i32-NEXT: fcvt.l.d a0, fa4
-; RV64-i32-NEXT: fcvt.l.d a1, fa5
-; RV64-i32-NEXT: fld fa5, 48(sp)
-; RV64-i32-NEXT: fcvt.l.d a2, fa3
+; RV64-i32-NEXT: fcvt.l.d a3, fa4
+; RV64-i32-NEXT: fcvt.l.d a4, fa5
; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64-i32-NEXT: vslide1down.vx v8, v10, a0
+; RV64-i32-NEXT: vmv.v.x v8, a1
+; RV64-i32-NEXT: fcvt.l.d a1, fa3
+; RV64-i32-NEXT: fld fa5, 48(sp)
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a2
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a3
; RV64-i32-NEXT: fcvt.l.d a0, fa5
; RV64-i32-NEXT: fld fa5, 56(sp)
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a4
; RV64-i32-NEXT: vslide1down.vx v8, v8, a1
-; RV64-i32-NEXT: vslide1down.vx v8, v8, a2
; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
; RV64-i32-NEXT: fcvt.l.d a0, fa5
; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
index 023d707f07bf..174831518693 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
@@ -245,8 +245,8 @@ define <8 x i1> @buildvec_mask_v8i1() {
define <8 x i1> @buildvec_mask_nonconst_v8i1(i1 %x, i1 %y) {
; CHECK-LABEL: buildvec_mask_nonconst_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: li a2, 19
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a2
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
@@ -256,8 +256,8 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1(i1 %x, i1 %y) {
;
; ZVE32F-LABEL: buildvec_mask_nonconst_v8i1:
; ZVE32F: # %bb.0:
-; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; ZVE32F-NEXT: li a2, 19
+; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; ZVE32F-NEXT: vmv.s.x v0, a2
; ZVE32F-NEXT: vmv.v.x v8, a1
; ZVE32F-NEXT: vmerge.vxm v8, v8, a0, v0
@@ -286,8 +286,8 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %w) {
; CHECK-NEXT: vslide1down.vx v9, v9, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a3
; CHECK-NEXT: vslide1down.vx v8, v8, zero
-; CHECK-NEXT: vslide1down.vx v8, v8, a2
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vslide1down.vx v8, v8, a2
; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -303,8 +303,8 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %w) {
; ZVE32F-NEXT: vslide1down.vx v9, v9, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a3
; ZVE32F-NEXT: vslide1down.vx v8, v8, zero
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a2
; ZVE32F-NEXT: vmv.v.i v0, 15
+; ZVE32F-NEXT: vslide1down.vx v8, v8, a2
; ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; ZVE32F-NEXT: vand.vi v8, v8, 1
; ZVE32F-NEXT: vmsne.vi v0, v8, 0
@@ -331,8 +331,8 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %
; CHECK-NEXT: vslide1down.vx v9, v9, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a3
; CHECK-NEXT: vslide1down.vx v8, v8, zero
-; CHECK-NEXT: vslide1down.vx v8, v8, a2
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vslide1down.vx v8, v8, a2
; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -348,8 +348,8 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %
; ZVE32F-NEXT: vslide1down.vx v9, v9, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a3
; ZVE32F-NEXT: vslide1down.vx v8, v8, zero
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a2
; ZVE32F-NEXT: vmv.v.i v0, 15
+; ZVE32F-NEXT: vslide1down.vx v8, v8, a2
; ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; ZVE32F-NEXT: vand.vi v8, v8, 1
; ZVE32F-NEXT: vmsne.vi v0, v8, 0
@@ -375,8 +375,8 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1(i1 %x, i1 %y) optsize {
; CHECK-NEXT: vslide1down.vx v9, v9, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a1
-; CHECK-NEXT: vslide1down.vx v8, v8, a1
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vslide1down.vx v8, v8, a1
; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -391,8 +391,8 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1(i1 %x, i1 %y) optsize {
; ZVE32F-NEXT: vslide1down.vx v9, v9, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
; ZVE32F-NEXT: vmv.v.i v0, 15
+; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
; ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; ZVE32F-NEXT: vand.vi v8, v8, 1
; ZVE32F-NEXT: vmsne.vi v0, v8, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
index 7fc442c88d10..979785dd2c02 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
@@ -24,11 +24,11 @@ define void @splat_zeros_v2i1(ptr %x) {
define void @splat_v1i1(ptr %x, i1 %y) {
; CHECK-LABEL: splat_v1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.s.x v8, a1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.s.x v8, zero
-; CHECK-NEXT: andi a1, a1, 1
-; CHECK-NEXT: vmv.s.x v9, a1
-; CHECK-NEXT: vmsne.vi v0, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 08cad29ab1b8..db0969c85a8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -520,16 +520,16 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: ld a1, 8(a0)
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 24(a0)
-; RV64ZVE32F-NEXT: ld a0, 16(a0)
+; RV64ZVE32F-NEXT: ld a3, 16(a0)
+; RV64ZVE32F-NEXT: ld a0, 24(a0)
; RV64ZVE32F-NEXT: lbu a1, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vlse8.v v8, (a2), zero
-; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: lbu a2, 0(a3)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: ret
%v = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> splat (i1 1), <4 x i8> %passthru)
ret <4 x i8> %v
@@ -711,8 +711,8 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: .LBB12_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB12_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -734,8 +734,8 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB12_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB12_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -1209,16 +1209,16 @@ define <4 x i16> @mgather_truemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) {
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: ld a1, 8(a0)
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 24(a0)
-; RV64ZVE32F-NEXT: ld a0, 16(a0)
+; RV64ZVE32F-NEXT: ld a3, 16(a0)
+; RV64ZVE32F-NEXT: ld a0, 24(a0)
; RV64ZVE32F-NEXT: lh a1, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vlse16.v v8, (a2), zero
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
; RV64ZVE32F-NEXT: lh a2, 0(a3)
+; RV64ZVE32F-NEXT: lh a0, 0(a0)
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: ret
%v = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1), <4 x i16> %passthru)
ret <4 x i16> %v
@@ -1405,8 +1405,8 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: .LBB23_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB23_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -1430,8 +1430,8 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB23_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB23_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -1556,8 +1556,8 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB24_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB24_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -1581,8 +1581,8 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB24_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB24_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -1708,8 +1708,8 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB25_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB25_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -1734,8 +1734,8 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB25_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB25_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -1863,8 +1863,8 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: .LBB26_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB26_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -1887,8 +1887,8 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB26_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB26_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -2258,16 +2258,16 @@ define <4 x i32> @mgather_truemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) {
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: ld a1, 8(a0)
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 24(a0)
-; RV64ZVE32F-NEXT: ld a0, 16(a0)
+; RV64ZVE32F-NEXT: ld a3, 16(a0)
+; RV64ZVE32F-NEXT: ld a0, 24(a0)
; RV64ZVE32F-NEXT: lw a1, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vlse32.v v8, (a2), zero
-; RV64ZVE32F-NEXT: lw a0, 0(a0)
; RV64ZVE32F-NEXT: lw a2, 0(a3)
+; RV64ZVE32F-NEXT: lw a0, 0(a0)
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: ret
%v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1), <4 x i32> %passthru)
ret <4 x i32> %v
@@ -2453,8 +2453,8 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: .LBB35_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB35_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -2478,8 +2478,8 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB35_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB35_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -2603,8 +2603,8 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB36_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB36_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -2628,8 +2628,8 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB36_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB36_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -2757,8 +2757,8 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB37_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB37_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -2783,8 +2783,8 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB37_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB37_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -2915,8 +2915,8 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: .LBB38_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB38_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -2940,8 +2940,8 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB38_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB38_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -3066,8 +3066,8 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: .LBB39_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB39_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -3091,8 +3091,8 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB39_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB39_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -3221,8 +3221,8 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: .LBB40_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB40_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -3247,8 +3247,8 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB40_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB40_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -3364,20 +3364,19 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB41_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1
; RV64ZVE32F-NEXT: .LBB41_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB41_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -3400,8 +3399,8 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB41_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB41_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -3421,14 +3420,13 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB41_6
; RV64ZVE32F-NEXT: .LBB41_13: # %cond.load7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB41_7
@@ -4090,13 +4088,13 @@ define <8 x i64> @mgather_baseidx_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB48_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB48_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -4253,8 +4251,8 @@ define <8 x i64> @mgather_baseidx_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: .LBB48_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB48_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -4364,13 +4362,13 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB49_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB49_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -4527,8 +4525,8 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB49_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB49_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -4640,13 +4638,13 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB50_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB50_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -4805,8 +4803,8 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB50_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB50_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -4923,13 +4921,13 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB51_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB51_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5087,8 +5085,8 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: .LBB51_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB51_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -5198,13 +5196,13 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB52_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB52_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5362,8 +5360,8 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: .LBB52_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB52_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -5475,13 +5473,13 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB53_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB53_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5643,8 +5641,8 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: .LBB53_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a7, a6, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a7, .LBB53_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -5759,13 +5757,13 @@ define <8 x i64> @mgather_baseidx_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <8 x i
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB54_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB54_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5923,8 +5921,8 @@ define <8 x i64> @mgather_baseidx_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <8 x i
; RV64ZVE32F-NEXT: .LBB54_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB54_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -6032,13 +6030,13 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB55_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB55_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -6196,8 +6194,8 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV64ZVE32F-NEXT: .LBB55_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB55_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -6306,13 +6304,13 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB56_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB56_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -6472,8 +6470,8 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV64ZVE32F-NEXT: .LBB56_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB56_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -6603,13 +6601,13 @@ define <8 x i64> @mgather_baseidx_v8i64(ptr %base, <8 x i64> %idxs, <8 x i1> %m,
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a5
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a4
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB57_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a2, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a2, .LBB57_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: lw a1, 4(a2)
; RV32ZVE32F-NEXT: lw a2, 0(a2)
@@ -7018,13 +7016,13 @@ define <4 x half> @mgather_truemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru)
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: ld a1, 8(a0)
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 24(a0)
-; RV64ZVE32F-NEXT: ld a0, 16(a0)
+; RV64ZVE32F-NEXT: ld a3, 16(a0)
+; RV64ZVE32F-NEXT: ld a0, 24(a0)
; RV64ZVE32F-NEXT: flh fa5, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vlse16.v v8, (a2), zero
-; RV64ZVE32F-NEXT: flh fa4, 0(a0)
-; RV64ZVE32F-NEXT: flh fa3, 0(a3)
+; RV64ZVE32F-NEXT: flh fa4, 0(a3)
+; RV64ZVE32F-NEXT: flh fa3, 0(a0)
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa5
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa4
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa3
@@ -7214,8 +7212,8 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-NEXT: .LBB64_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB64_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -7239,8 +7237,8 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB64_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB64_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -7365,8 +7363,8 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB65_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB65_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -7390,8 +7388,8 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB65_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB65_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -7517,8 +7515,8 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB66_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB66_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -7543,8 +7541,8 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB66_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB66_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -7672,8 +7670,8 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-NEXT: .LBB67_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB67_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -7696,8 +7694,8 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB67_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB67_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -7941,13 +7939,13 @@ define <4 x float> @mgather_truemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthr
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: ld a1, 8(a0)
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 24(a0)
-; RV64ZVE32F-NEXT: ld a0, 16(a0)
+; RV64ZVE32F-NEXT: ld a3, 16(a0)
+; RV64ZVE32F-NEXT: ld a0, 24(a0)
; RV64ZVE32F-NEXT: flw fa5, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vlse32.v v8, (a2), zero
-; RV64ZVE32F-NEXT: flw fa4, 0(a0)
-; RV64ZVE32F-NEXT: flw fa3, 0(a3)
+; RV64ZVE32F-NEXT: flw fa4, 0(a3)
+; RV64ZVE32F-NEXT: flw fa3, 0(a0)
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa5
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa4
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa3
@@ -8136,8 +8134,8 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
; RV64ZVE32F-NEXT: .LBB74_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB74_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8161,8 +8159,8 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB74_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB74_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -8286,8 +8284,8 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: .LBB75_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB75_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8311,8 +8309,8 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB75_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB75_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -8440,8 +8438,8 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: .LBB76_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB76_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8466,8 +8464,8 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB76_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB76_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -8598,8 +8596,8 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
; RV64ZVE32F-NEXT: .LBB77_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB77_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8623,8 +8621,8 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB77_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB77_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -8749,8 +8747,8 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: .LBB78_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB78_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8774,8 +8772,8 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB78_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB78_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -8904,8 +8902,8 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: .LBB79_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB79_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8930,8 +8928,8 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB79_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB79_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -9047,20 +9045,19 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB80_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1
; RV64ZVE32F-NEXT: .LBB80_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB80_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -9083,8 +9080,8 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB80_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB80_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -9104,14 +9101,13 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB80_6
; RV64ZVE32F-NEXT: .LBB80_13: # %cond.load7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB80_7
@@ -9664,31 +9660,32 @@ define <8 x double> @mgather_baseidx_v8i8_v8f64(ptr %base, <8 x i8> %idxs, <8 x
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB87_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_11
; RV32ZVE32F-NEXT: .LBB87_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_12
; RV32ZVE32F-NEXT: .LBB87_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_13
; RV32ZVE32F-NEXT: .LBB87_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_14
; RV32ZVE32F-NEXT: .LBB87_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_15
; RV32ZVE32F-NEXT: .LBB87_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_16
; RV32ZVE32F-NEXT: .LBB87_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB87_9
; RV32ZVE32F-NEXT: .LBB87_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9706,52 +9703,51 @@ define <8 x double> @mgather_baseidx_v8i8_v8f64(ptr %base, <8 x i8> %idxs, <8 x
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB87_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_2
; RV32ZVE32F-NEXT: .LBB87_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_3
; RV32ZVE32F-NEXT: .LBB87_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_4
; RV32ZVE32F-NEXT: .LBB87_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_5
; RV32ZVE32F-NEXT: .LBB87_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_6
; RV32ZVE32F-NEXT: .LBB87_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_7
; RV32ZVE32F-NEXT: .LBB87_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB87_8
; RV32ZVE32F-NEXT: j .LBB87_9
;
@@ -9779,8 +9775,8 @@ define <8 x double> @mgather_baseidx_v8i8_v8f64(ptr %base, <8 x i8> %idxs, <8 x
; RV64ZVE32F-NEXT: .LBB87_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB87_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -9879,31 +9875,32 @@ define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB88_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_11
; RV32ZVE32F-NEXT: .LBB88_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_12
; RV32ZVE32F-NEXT: .LBB88_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_13
; RV32ZVE32F-NEXT: .LBB88_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_14
; RV32ZVE32F-NEXT: .LBB88_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_15
; RV32ZVE32F-NEXT: .LBB88_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_16
; RV32ZVE32F-NEXT: .LBB88_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB88_9
; RV32ZVE32F-NEXT: .LBB88_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9921,52 +9918,51 @@ define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB88_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_2
; RV32ZVE32F-NEXT: .LBB88_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_3
; RV32ZVE32F-NEXT: .LBB88_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_4
; RV32ZVE32F-NEXT: .LBB88_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_5
; RV32ZVE32F-NEXT: .LBB88_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_6
; RV32ZVE32F-NEXT: .LBB88_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_7
; RV32ZVE32F-NEXT: .LBB88_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB88_8
; RV32ZVE32F-NEXT: j .LBB88_9
;
@@ -9994,8 +9990,8 @@ define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB88_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB88_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -10096,31 +10092,32 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB89_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_11
; RV32ZVE32F-NEXT: .LBB89_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_12
; RV32ZVE32F-NEXT: .LBB89_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_13
; RV32ZVE32F-NEXT: .LBB89_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_14
; RV32ZVE32F-NEXT: .LBB89_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_15
; RV32ZVE32F-NEXT: .LBB89_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_16
; RV32ZVE32F-NEXT: .LBB89_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB89_9
; RV32ZVE32F-NEXT: .LBB89_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10138,52 +10135,51 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB89_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_2
; RV32ZVE32F-NEXT: .LBB89_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_3
; RV32ZVE32F-NEXT: .LBB89_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_4
; RV32ZVE32F-NEXT: .LBB89_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_5
; RV32ZVE32F-NEXT: .LBB89_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_6
; RV32ZVE32F-NEXT: .LBB89_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_7
; RV32ZVE32F-NEXT: .LBB89_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB89_8
; RV32ZVE32F-NEXT: j .LBB89_9
;
@@ -10213,8 +10209,8 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB89_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB89_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -10320,31 +10316,32 @@ define <8 x double> @mgather_baseidx_v8i16_v8f64(ptr %base, <8 x i16> %idxs, <8
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB90_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_11
; RV32ZVE32F-NEXT: .LBB90_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_12
; RV32ZVE32F-NEXT: .LBB90_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_13
; RV32ZVE32F-NEXT: .LBB90_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_14
; RV32ZVE32F-NEXT: .LBB90_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_15
; RV32ZVE32F-NEXT: .LBB90_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_16
; RV32ZVE32F-NEXT: .LBB90_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB90_9
; RV32ZVE32F-NEXT: .LBB90_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10362,52 +10359,51 @@ define <8 x double> @mgather_baseidx_v8i16_v8f64(ptr %base, <8 x i16> %idxs, <8
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB90_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_2
; RV32ZVE32F-NEXT: .LBB90_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_3
; RV32ZVE32F-NEXT: .LBB90_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_4
; RV32ZVE32F-NEXT: .LBB90_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_5
; RV32ZVE32F-NEXT: .LBB90_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_6
; RV32ZVE32F-NEXT: .LBB90_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_7
; RV32ZVE32F-NEXT: .LBB90_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB90_8
; RV32ZVE32F-NEXT: j .LBB90_9
;
@@ -10436,8 +10432,8 @@ define <8 x double> @mgather_baseidx_v8i16_v8f64(ptr %base, <8 x i16> %idxs, <8
; RV64ZVE32F-NEXT: .LBB90_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB90_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -10536,31 +10532,32 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB91_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_11
; RV32ZVE32F-NEXT: .LBB91_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_12
; RV32ZVE32F-NEXT: .LBB91_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_13
; RV32ZVE32F-NEXT: .LBB91_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_14
; RV32ZVE32F-NEXT: .LBB91_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_15
; RV32ZVE32F-NEXT: .LBB91_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_16
; RV32ZVE32F-NEXT: .LBB91_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB91_9
; RV32ZVE32F-NEXT: .LBB91_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10578,52 +10575,51 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB91_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_2
; RV32ZVE32F-NEXT: .LBB91_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_3
; RV32ZVE32F-NEXT: .LBB91_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_4
; RV32ZVE32F-NEXT: .LBB91_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_5
; RV32ZVE32F-NEXT: .LBB91_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_6
; RV32ZVE32F-NEXT: .LBB91_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_7
; RV32ZVE32F-NEXT: .LBB91_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB91_8
; RV32ZVE32F-NEXT: j .LBB91_9
;
@@ -10652,8 +10648,8 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV64ZVE32F-NEXT: .LBB91_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB91_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -10754,31 +10750,32 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB92_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_11
; RV32ZVE32F-NEXT: .LBB92_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_12
; RV32ZVE32F-NEXT: .LBB92_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_13
; RV32ZVE32F-NEXT: .LBB92_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_14
; RV32ZVE32F-NEXT: .LBB92_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_15
; RV32ZVE32F-NEXT: .LBB92_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_16
; RV32ZVE32F-NEXT: .LBB92_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB92_9
; RV32ZVE32F-NEXT: .LBB92_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10796,52 +10793,51 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB92_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_2
; RV32ZVE32F-NEXT: .LBB92_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_3
; RV32ZVE32F-NEXT: .LBB92_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_4
; RV32ZVE32F-NEXT: .LBB92_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_5
; RV32ZVE32F-NEXT: .LBB92_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_6
; RV32ZVE32F-NEXT: .LBB92_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_7
; RV32ZVE32F-NEXT: .LBB92_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB92_8
; RV32ZVE32F-NEXT: j .LBB92_9
;
@@ -10874,8 +10870,8 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV64ZVE32F-NEXT: .LBB92_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a4, a3, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a4, .LBB92_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -10979,31 +10975,32 @@ define <8 x double> @mgather_baseidx_v8i32_v8f64(ptr %base, <8 x i32> %idxs, <8
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB93_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_11
; RV32ZVE32F-NEXT: .LBB93_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_12
; RV32ZVE32F-NEXT: .LBB93_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_13
; RV32ZVE32F-NEXT: .LBB93_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_14
; RV32ZVE32F-NEXT: .LBB93_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_15
; RV32ZVE32F-NEXT: .LBB93_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_16
; RV32ZVE32F-NEXT: .LBB93_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB93_9
; RV32ZVE32F-NEXT: .LBB93_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -11021,52 +11018,51 @@ define <8 x double> @mgather_baseidx_v8i32_v8f64(ptr %base, <8 x i32> %idxs, <8
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB93_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_2
; RV32ZVE32F-NEXT: .LBB93_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_3
; RV32ZVE32F-NEXT: .LBB93_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_4
; RV32ZVE32F-NEXT: .LBB93_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_5
; RV32ZVE32F-NEXT: .LBB93_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_6
; RV32ZVE32F-NEXT: .LBB93_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_7
; RV32ZVE32F-NEXT: .LBB93_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB93_8
; RV32ZVE32F-NEXT: j .LBB93_9
;
@@ -11095,8 +11091,8 @@ define <8 x double> @mgather_baseidx_v8i32_v8f64(ptr %base, <8 x i32> %idxs, <8
; RV64ZVE32F-NEXT: .LBB93_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB93_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -11193,31 +11189,32 @@ define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB94_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_11
; RV32ZVE32F-NEXT: .LBB94_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_12
; RV32ZVE32F-NEXT: .LBB94_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_13
; RV32ZVE32F-NEXT: .LBB94_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_14
; RV32ZVE32F-NEXT: .LBB94_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_15
; RV32ZVE32F-NEXT: .LBB94_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_16
; RV32ZVE32F-NEXT: .LBB94_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB94_9
; RV32ZVE32F-NEXT: .LBB94_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -11235,52 +11232,51 @@ define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB94_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_2
; RV32ZVE32F-NEXT: .LBB94_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_3
; RV32ZVE32F-NEXT: .LBB94_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_4
; RV32ZVE32F-NEXT: .LBB94_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_5
; RV32ZVE32F-NEXT: .LBB94_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_6
; RV32ZVE32F-NEXT: .LBB94_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_7
; RV32ZVE32F-NEXT: .LBB94_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB94_8
; RV32ZVE32F-NEXT: j .LBB94_9
;
@@ -11309,8 +11305,8 @@ define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: .LBB94_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB94_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -11408,31 +11404,32 @@ define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB95_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_11
; RV32ZVE32F-NEXT: .LBB95_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_12
; RV32ZVE32F-NEXT: .LBB95_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_13
; RV32ZVE32F-NEXT: .LBB95_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_14
; RV32ZVE32F-NEXT: .LBB95_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_15
; RV32ZVE32F-NEXT: .LBB95_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_16
; RV32ZVE32F-NEXT: .LBB95_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB95_9
; RV32ZVE32F-NEXT: .LBB95_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -11450,52 +11447,51 @@ define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB95_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_2
; RV32ZVE32F-NEXT: .LBB95_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_3
; RV32ZVE32F-NEXT: .LBB95_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_4
; RV32ZVE32F-NEXT: .LBB95_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_5
; RV32ZVE32F-NEXT: .LBB95_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_6
; RV32ZVE32F-NEXT: .LBB95_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_7
; RV32ZVE32F-NEXT: .LBB95_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB95_8
; RV32ZVE32F-NEXT: j .LBB95_9
;
@@ -11526,8 +11522,8 @@ define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: .LBB95_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB95_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -11646,31 +11642,32 @@ define <8 x double> @mgather_baseidx_v8f64(ptr %base, <8 x i64> %idxs, <8 x i1>
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a4
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a3
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB96_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_11
; RV32ZVE32F-NEXT: .LBB96_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_12
; RV32ZVE32F-NEXT: .LBB96_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_13
; RV32ZVE32F-NEXT: .LBB96_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_14
; RV32ZVE32F-NEXT: .LBB96_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_15
; RV32ZVE32F-NEXT: .LBB96_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_16
; RV32ZVE32F-NEXT: .LBB96_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB96_9
; RV32ZVE32F-NEXT: .LBB96_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -11688,52 +11685,51 @@ define <8 x double> @mgather_baseidx_v8f64(ptr %base, <8 x i64> %idxs, <8 x i1>
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB96_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_2
; RV32ZVE32F-NEXT: .LBB96_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_3
; RV32ZVE32F-NEXT: .LBB96_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_4
; RV32ZVE32F-NEXT: .LBB96_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_5
; RV32ZVE32F-NEXT: .LBB96_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_6
; RV32ZVE32F-NEXT: .LBB96_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_7
; RV32ZVE32F-NEXT: .LBB96_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB96_8
; RV32ZVE32F-NEXT: j .LBB96_9
;
@@ -11882,8 +11878,8 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: .LBB97_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB97_25
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -11900,8 +11896,8 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 4
; RV64ZVE32F-NEXT: .LBB97_8: # %else11
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB97_10
; RV64ZVE32F-NEXT: # %bb.9: # %cond.load13
@@ -11914,8 +11910,8 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 5
; RV64ZVE32F-NEXT: .LBB97_10: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB97_27
; RV64ZVE32F-NEXT: # %bb.11: # %else17
@@ -11939,8 +11935,8 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: .LBB97_15: # %else26
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 1024
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB97_30
; RV64ZVE32F-NEXT: # %bb.16: # %else29
@@ -11962,8 +11958,8 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 13
; RV64ZVE32F-NEXT: .LBB97_20: # %else38
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 49
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bgez a2, .LBB97_22
; RV64ZVE32F-NEXT: # %bb.21: # %cond.load40
@@ -12092,22 +12088,22 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64V: # %bb.0:
; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64V-NEXT: vsext.vf8 v16, v8
-; RV64V-NEXT: vmv1r.v v12, v10
-; RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; RV64V-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64V-NEXT: vsetivli zero, 16, e8, m2, ta, ma
-; RV64V-NEXT: vslidedown.vi v10, v10, 16
+; RV64V-NEXT: vslidedown.vi v12, v10, 16
+; RV64V-NEXT: vsetivli zero, 16, e8, m1, ta, mu
+; RV64V-NEXT: vluxei64.v v10, (a0), v16, v0.t
+; RV64V-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64V-NEXT: vslidedown.vi v8, v8, 16
-; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64V-NEXT: vsext.vf8 v16, v8
; RV64V-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64V-NEXT: vslidedown.vi v0, v0, 2
-; RV64V-NEXT: vsetivli zero, 16, e8, m1, ta, mu
-; RV64V-NEXT: vluxei64.v v10, (a0), v16, v0.t
+; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64V-NEXT: vsext.vf8 v16, v8
+; RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; RV64V-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64V-NEXT: li a0, 32
; RV64V-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; RV64V-NEXT: vslideup.vi v12, v10, 16
-; RV64V-NEXT: vmv.v.v v8, v12
+; RV64V-NEXT: vslideup.vi v10, v12, 16
+; RV64V-NEXT: vmv.v.v v8, v10
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_baseidx_v32i8:
@@ -12139,8 +12135,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: .LBB98_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB98_49
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -12157,8 +12153,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
; RV64ZVE32F-NEXT: .LBB98_8: # %else11
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB98_10
; RV64ZVE32F-NEXT: # %bb.9: # %cond.load13
@@ -12171,8 +12167,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 5
; RV64ZVE32F-NEXT: .LBB98_10: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB98_51
; RV64ZVE32F-NEXT: # %bb.11: # %else17
@@ -12196,8 +12192,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: .LBB98_15: # %else26
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 1024
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB98_17
; RV64ZVE32F-NEXT: # %bb.16: # %cond.load28
@@ -12220,8 +12216,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 11
; RV64ZVE32F-NEXT: .LBB98_19: # %else32
-; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 51
+; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 16
; RV64ZVE32F-NEXT: bgez a2, .LBB98_21
; RV64ZVE32F-NEXT: # %bb.20: # %cond.load34
@@ -12244,8 +12240,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v9, 13
; RV64ZVE32F-NEXT: .LBB98_23: # %else38
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 49
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v13, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB98_54
; RV64ZVE32F-NEXT: # %bb.24: # %else41
@@ -12269,8 +12265,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: .LBB98_28: # %else50
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 45
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB98_57
; RV64ZVE32F-NEXT: # %bb.29: # %else53
@@ -12287,8 +12283,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 20
; RV64ZVE32F-NEXT: .LBB98_32: # %else59
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 42
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 8
; RV64ZVE32F-NEXT: bgez a2, .LBB98_34
; RV64ZVE32F-NEXT: # %bb.33: # %cond.load61
@@ -12301,8 +12297,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 22, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 21
; RV64ZVE32F-NEXT: .LBB98_34: # %else62
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 41
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB98_59
; RV64ZVE32F-NEXT: # %bb.35: # %else65
@@ -12326,8 +12322,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: .LBB98_39: # %else74
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 37
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB98_62
; RV64ZVE32F-NEXT: # %bb.40: # %else77
@@ -12349,8 +12345,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 30, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 29
; RV64ZVE32F-NEXT: .LBB98_44: # %else86
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 33
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bgez a2, .LBB98_46
; RV64ZVE32F-NEXT: # %bb.45: # %cond.load88
@@ -12640,11 +12636,10 @@ define <4 x i32> @mgather_narrow_edge_case(ptr %base) {
; RV64V-LABEL: mgather_narrow_edge_case:
; RV64V: # %bb.0:
; RV64V-NEXT: li a1, -512
-; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64V-NEXT: vmv.v.x v8, a1
; RV64V-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64V-NEXT: vmv.v.i v0, 5
; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64V-NEXT: vmv.v.x v8, a1
; RV64V-NEXT: vmerge.vim v10, v8, 0, v0
; RV64V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64V-NEXT: vluxei64.v v8, (a0), v10
@@ -12728,8 +12723,8 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
; RV32-NEXT: vmv.v.x v8, a3
; RV32-NEXT: vslide1down.vx v8, v8, a5
; RV32-NEXT: vslide1down.vx v8, v8, a6
-; RV32-NEXT: vslide1down.vx v8, v8, a7
; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v8, v8, a7
; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
; RV32-NEXT: ret
;
@@ -12803,8 +12798,8 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
; RV64V-NEXT: vmv.v.x v8, a3
; RV64V-NEXT: vslide1down.vx v8, v8, a5
; RV64V-NEXT: vslide1down.vx v8, v8, a6
-; RV64V-NEXT: vslide1down.vx v8, v8, a7
; RV64V-NEXT: vmv.v.i v0, 15
+; RV64V-NEXT: vslide1down.vx v8, v8, a7
; RV64V-NEXT: vslidedown.vi v8, v9, 4, v0.t
; RV64V-NEXT: addi sp, s0, -128
; RV64V-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
@@ -12854,8 +12849,8 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
; RV64ZVE32F-NEXT: vmv.v.x v8, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a7
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -12896,8 +12891,8 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
@@ -12941,8 +12936,8 @@ define <8 x i16> @mgather_strided_2xSEW_with_offset(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a6
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i64> <i64 2, i64 3, i64 6, i64 7, i64 10, i64 11, i64 14, i64 15>
@@ -12986,8 +12981,8 @@ define <8 x i16> @mgather_reverse_unit_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a6
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i64> <i64 14, i64 15, i64 12, i64 13, i64 10, i64 11, i64 8, i64 9>
@@ -13031,8 +13026,8 @@ define <8 x i16> @mgather_reverse_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a6
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i64> <i64 14, i64 15, i64 10, i64 11, i64 6, i64 7, i64 2, i64 3>
@@ -13074,8 +13069,8 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 2, i32 3>
@@ -13120,8 +13115,8 @@ define <8 x i16> @mgather_gather_2xSEW_unaligned(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 9, i32 10, i32 4, i32 5, i32 2, i32 3>
@@ -13167,8 +13162,8 @@ define <8 x i16> @mgather_gather_2xSEW_unaligned2(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a2
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a3
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 1, i32 2, i32 9, i32 10, i32 4, i32 5, i32 2, i32 3>
@@ -13217,8 +13212,8 @@ define <8 x i16> @mgather_gather_4xSEW(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
@@ -13264,8 +13259,8 @@ define <8 x i16> @mgather_gather_4xSEW_partial_align(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
@@ -13320,8 +13315,8 @@ define <8 x i16> @mgather_shuffle_rotate(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslide1down.vx v9, v9, a2
; RV64ZVE32F-NEXT: vslide1down.vx v9, v9, a3
-; RV64ZVE32F-NEXT: vslide1down.vx v9, v9, a4
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v9, v9, a4
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i64> <i64 4, i64 5, i64 6, i64 7, i64 0, i64 1, i64 2, i64 3>
@@ -13367,8 +13362,8 @@ define <8 x i16> @mgather_shuffle_vrgather(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i64> <i64 0, i64 2, i64 3, i64 1, i64 4, i64 5, i64 6, i64 7>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
index 4bbda2152a6f..ad075e4b4e19 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
@@ -401,14 +401,14 @@ define void @masked_load_v32i64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
; RV32: # %bb.0:
; RV32-NEXT: addi a3, a1, 128
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vle64.v v16, (a3)
; RV32-NEXT: vle64.v v0, (a1)
+; RV32-NEXT: vle64.v v24, (a3)
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.i v24, 0
+; RV32-NEXT: vmv.v.i v16, 0
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vmseq.vv v8, v0, v24
-; RV32-NEXT: vmseq.vv v0, v16, v24
+; RV32-NEXT: vmseq.vv v8, v0, v16
+; RV32-NEXT: vmseq.vv v0, v24, v16
; RV32-NEXT: addi a1, a0, 128
; RV32-NEXT: vle64.v v16, (a1), v0.t
; RV32-NEXT: vmv1r.v v0, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index 42e52436a7da..e6852c1b5751 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -335,18 +335,18 @@ define void @mscatter_truemask_v4i8(<4 x i8> %val, <4 x ptr> %ptrs) {
;
; RV64ZVE32F-LABEL: mscatter_truemask_v4i8:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: ld a1, 24(a0)
-; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 16(a0)
-; RV64ZVE32F-NEXT: ld a0, 8(a0)
+; RV64ZVE32F-NEXT: ld a1, 0(a0)
+; RV64ZVE32F-NEXT: ld a2, 24(a0)
+; RV64ZVE32F-NEXT: ld a3, 8(a0)
+; RV64ZVE32F-NEXT: ld a0, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vse8.v v8, (a2)
+; RV64ZVE32F-NEXT: vse8.v v8, (a1)
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT: vse8.v v9, (a0)
-; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV64ZVE32F-NEXT: vse8.v v9, (a3)
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
+; RV64ZVE32F-NEXT: vse8.v v9, (a0)
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3
-; RV64ZVE32F-NEXT: vse8.v v8, (a1)
+; RV64ZVE32F-NEXT: vse8.v v8, (a2)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %val, <4 x ptr> %ptrs, i32 1, <4 x i1> splat (i1 1))
ret void
@@ -504,8 +504,8 @@ define void @mscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB9_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB9_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -526,8 +526,8 @@ define void @mscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse8.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB9_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB9_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -689,11 +689,11 @@ define void @mscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x ptr> %ptrs, <2
;
; RV64ZVE32F-LABEL: mscatter_v2i32_truncstore_v2i16:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v0
; RV64ZVE32F-NEXT: andi a3, a2, 1
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0
; RV64ZVE32F-NEXT: bnez a3, .LBB12_3
; RV64ZVE32F-NEXT: # %bb.1: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
@@ -747,13 +747,14 @@ define void @mscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x ptr> %ptrs, <2
;
; RV64ZVE32F-LABEL: mscatter_v2i64_truncstore_v2i16:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a1
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a0, v0
; RV64ZVE32F-NEXT: andi a1, a0, 1
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
; RV64ZVE32F-NEXT: bnez a1, .LBB13_3
; RV64ZVE32F-NEXT: # %bb.1: # %else
; RV64ZVE32F-NEXT: andi a0, a0, 2
@@ -852,18 +853,18 @@ define void @mscatter_truemask_v4i16(<4 x i16> %val, <4 x ptr> %ptrs) {
;
; RV64ZVE32F-LABEL: mscatter_truemask_v4i16:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: ld a1, 24(a0)
-; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 16(a0)
-; RV64ZVE32F-NEXT: ld a0, 8(a0)
+; RV64ZVE32F-NEXT: ld a1, 0(a0)
+; RV64ZVE32F-NEXT: ld a2, 24(a0)
+; RV64ZVE32F-NEXT: ld a3, 8(a0)
+; RV64ZVE32F-NEXT: ld a0, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vse16.v v8, (a2)
+; RV64ZVE32F-NEXT: vse16.v v8, (a1)
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT: vse16.v v9, (a0)
-; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV64ZVE32F-NEXT: vse16.v v9, (a3)
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
+; RV64ZVE32F-NEXT: vse16.v v9, (a0)
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3
-; RV64ZVE32F-NEXT: vse16.v v8, (a1)
+; RV64ZVE32F-NEXT: vse16.v v8, (a2)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1))
ret void
@@ -1025,8 +1026,8 @@ define void @mscatter_baseidx_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: .LBB18_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB18_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -1048,8 +1049,8 @@ define void @mscatter_baseidx_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB18_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB18_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -1158,8 +1159,8 @@ define void @mscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB19_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB19_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -1181,8 +1182,8 @@ define void @mscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB19_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB19_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -1292,8 +1293,8 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB20_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB20_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -1316,8 +1317,8 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB20_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB20_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -1430,8 +1431,8 @@ define void @mscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: .LBB21_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB21_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -1453,8 +1454,8 @@ define void @mscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB21_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB21_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -1625,11 +1626,12 @@ define void @mscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x ptr> %ptrs, <2
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.v.x v8, a0
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a0, v0
-; RV64ZVE32F-NEXT: andi a1, a0, 1
-; RV64ZVE32F-NEXT: bnez a1, .LBB24_3
+; RV64ZVE32F-NEXT: andi a4, a0, 1
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT: bnez a4, .LBB24_3
; RV64ZVE32F-NEXT: # %bb.1: # %else
; RV64ZVE32F-NEXT: andi a0, a0, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB24_4
@@ -1727,18 +1729,18 @@ define void @mscatter_truemask_v4i32(<4 x i32> %val, <4 x ptr> %ptrs) {
;
; RV64ZVE32F-LABEL: mscatter_truemask_v4i32:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: ld a1, 24(a0)
-; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 16(a0)
-; RV64ZVE32F-NEXT: ld a0, 8(a0)
+; RV64ZVE32F-NEXT: ld a1, 0(a0)
+; RV64ZVE32F-NEXT: ld a2, 24(a0)
+; RV64ZVE32F-NEXT: ld a3, 8(a0)
+; RV64ZVE32F-NEXT: ld a0, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vse32.v v8, (a2)
+; RV64ZVE32F-NEXT: vse32.v v8, (a1)
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v9, (a0)
-; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV64ZVE32F-NEXT: vse32.v v9, (a3)
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
+; RV64ZVE32F-NEXT: vse32.v v9, (a0)
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v8, (a1)
+; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1))
ret void
@@ -1903,8 +1905,8 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: .LBB29_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB29_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -1927,8 +1929,8 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB29_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB29_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -1940,8 +1942,9 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB29_6
@@ -2039,8 +2042,8 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB30_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB30_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2063,8 +2066,8 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB30_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB30_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -2076,8 +2079,9 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB30_6
@@ -2179,8 +2183,8 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB31_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB31_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2204,8 +2208,8 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB31_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB31_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -2218,8 +2222,9 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB31_6
@@ -2323,8 +2328,8 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: .LBB32_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB32_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2347,8 +2352,8 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB32_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB32_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -2360,8 +2365,9 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB32_6
@@ -2460,8 +2466,8 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB33_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB33_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2484,8 +2490,8 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB33_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB33_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -2497,8 +2503,9 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB33_6
@@ -2601,8 +2608,8 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB34_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB34_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2626,8 +2633,8 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: .LBB34_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB34_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -2640,8 +2647,9 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: andi a3, a2, 8
; RV64ZVE32F-NEXT: beqz a3, .LBB34_6
@@ -2742,8 +2750,8 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs,
; RV64ZVE32F-NEXT: .LBB35_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB35_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2766,8 +2774,8 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB35_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v12, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB35_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -3074,17 +3082,17 @@ define void @mscatter_truemask_v4i64(<4 x i64> %val, <4 x ptr> %ptrs) {
; RV32ZVE32F-NEXT: lw a3, 20(a0)
; RV32ZVE32F-NEXT: lw a4, 16(a0)
; RV32ZVE32F-NEXT: lw a5, 12(a0)
-; RV32ZVE32F-NEXT: lw a6, 8(a0)
-; RV32ZVE32F-NEXT: lw a7, 0(a0)
-; RV32ZVE32F-NEXT: lw a0, 4(a0)
+; RV32ZVE32F-NEXT: lw a6, 0(a0)
+; RV32ZVE32F-NEXT: lw a7, 4(a0)
+; RV32ZVE32F-NEXT: lw a0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v8
-; RV32ZVE32F-NEXT: sw a7, 0(t0)
-; RV32ZVE32F-NEXT: sw a0, 4(t0)
+; RV32ZVE32F-NEXT: sw a6, 0(t0)
+; RV32ZVE32F-NEXT: sw a7, 4(t0)
; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a0, v9
-; RV32ZVE32F-NEXT: sw a6, 0(a0)
-; RV32ZVE32F-NEXT: sw a5, 4(a0)
+; RV32ZVE32F-NEXT: vmv.x.s a6, v9
+; RV32ZVE32F-NEXT: sw a0, 0(a6)
+; RV32ZVE32F-NEXT: sw a5, 4(a6)
; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v9
; RV32ZVE32F-NEXT: sw a4, 0(a0)
@@ -3383,42 +3391,43 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB42_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB42_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB42_11
; RV32ZVE32F-NEXT: .LBB42_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB42_12
; RV32ZVE32F-NEXT: .LBB42_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB42_13
; RV32ZVE32F-NEXT: .LBB42_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB42_14
; RV32ZVE32F-NEXT: .LBB42_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB42_15
; RV32ZVE32F-NEXT: .LBB42_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB42_16
; RV32ZVE32F-NEXT: .LBB42_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB42_9
; RV32ZVE32F-NEXT: .LBB42_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3433,45 +3442,44 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB42_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB42_2
; RV32ZVE32F-NEXT: .LBB42_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB42_3
; RV32ZVE32F-NEXT: .LBB42_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB42_4
; RV32ZVE32F-NEXT: .LBB42_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB42_5
; RV32ZVE32F-NEXT: .LBB42_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB42_6
; RV32ZVE32F-NEXT: .LBB42_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3479,7 +3487,7 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB42_7
; RV32ZVE32F-NEXT: .LBB42_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3487,7 +3495,7 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB42_8
; RV32ZVE32F-NEXT: j .LBB42_9
;
@@ -3523,8 +3531,8 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: .LBB42_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB42_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -3627,42 +3635,43 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB43_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB43_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB43_11
; RV32ZVE32F-NEXT: .LBB43_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB43_12
; RV32ZVE32F-NEXT: .LBB43_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB43_13
; RV32ZVE32F-NEXT: .LBB43_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB43_14
; RV32ZVE32F-NEXT: .LBB43_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB43_15
; RV32ZVE32F-NEXT: .LBB43_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB43_16
; RV32ZVE32F-NEXT: .LBB43_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB43_9
; RV32ZVE32F-NEXT: .LBB43_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3677,45 +3686,44 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB43_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB43_2
; RV32ZVE32F-NEXT: .LBB43_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB43_3
; RV32ZVE32F-NEXT: .LBB43_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB43_4
; RV32ZVE32F-NEXT: .LBB43_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB43_5
; RV32ZVE32F-NEXT: .LBB43_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB43_6
; RV32ZVE32F-NEXT: .LBB43_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3723,7 +3731,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB43_7
; RV32ZVE32F-NEXT: .LBB43_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3731,7 +3739,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB43_8
; RV32ZVE32F-NEXT: j .LBB43_9
;
@@ -3767,8 +3775,8 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB43_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB43_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -3873,42 +3881,43 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB44_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB44_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB44_11
; RV32ZVE32F-NEXT: .LBB44_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB44_12
; RV32ZVE32F-NEXT: .LBB44_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB44_13
; RV32ZVE32F-NEXT: .LBB44_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB44_14
; RV32ZVE32F-NEXT: .LBB44_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB44_15
; RV32ZVE32F-NEXT: .LBB44_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB44_16
; RV32ZVE32F-NEXT: .LBB44_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB44_9
; RV32ZVE32F-NEXT: .LBB44_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3923,45 +3932,44 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB44_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB44_2
; RV32ZVE32F-NEXT: .LBB44_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB44_3
; RV32ZVE32F-NEXT: .LBB44_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB44_4
; RV32ZVE32F-NEXT: .LBB44_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB44_5
; RV32ZVE32F-NEXT: .LBB44_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB44_6
; RV32ZVE32F-NEXT: .LBB44_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3969,7 +3977,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB44_7
; RV32ZVE32F-NEXT: .LBB44_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3977,7 +3985,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB44_8
; RV32ZVE32F-NEXT: j .LBB44_9
;
@@ -4015,8 +4023,8 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB44_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB44_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -4126,42 +4134,43 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB45_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB45_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB45_11
; RV32ZVE32F-NEXT: .LBB45_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB45_12
; RV32ZVE32F-NEXT: .LBB45_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB45_13
; RV32ZVE32F-NEXT: .LBB45_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB45_14
; RV32ZVE32F-NEXT: .LBB45_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB45_15
; RV32ZVE32F-NEXT: .LBB45_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB45_16
; RV32ZVE32F-NEXT: .LBB45_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB45_9
; RV32ZVE32F-NEXT: .LBB45_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4176,45 +4185,44 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB45_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB45_2
; RV32ZVE32F-NEXT: .LBB45_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB45_3
; RV32ZVE32F-NEXT: .LBB45_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB45_4
; RV32ZVE32F-NEXT: .LBB45_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB45_5
; RV32ZVE32F-NEXT: .LBB45_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB45_6
; RV32ZVE32F-NEXT: .LBB45_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4222,7 +4230,7 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB45_7
; RV32ZVE32F-NEXT: .LBB45_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4230,7 +4238,7 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB45_8
; RV32ZVE32F-NEXT: j .LBB45_9
;
@@ -4267,8 +4275,8 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: .LBB45_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB45_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -4371,42 +4379,43 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB46_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB46_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB46_11
; RV32ZVE32F-NEXT: .LBB46_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB46_12
; RV32ZVE32F-NEXT: .LBB46_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB46_13
; RV32ZVE32F-NEXT: .LBB46_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB46_14
; RV32ZVE32F-NEXT: .LBB46_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB46_15
; RV32ZVE32F-NEXT: .LBB46_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB46_16
; RV32ZVE32F-NEXT: .LBB46_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB46_9
; RV32ZVE32F-NEXT: .LBB46_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4421,45 +4430,44 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB46_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB46_2
; RV32ZVE32F-NEXT: .LBB46_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB46_3
; RV32ZVE32F-NEXT: .LBB46_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB46_4
; RV32ZVE32F-NEXT: .LBB46_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB46_5
; RV32ZVE32F-NEXT: .LBB46_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB46_6
; RV32ZVE32F-NEXT: .LBB46_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4467,7 +4475,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB46_7
; RV32ZVE32F-NEXT: .LBB46_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4475,7 +4483,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB46_8
; RV32ZVE32F-NEXT: j .LBB46_9
;
@@ -4512,8 +4520,8 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB46_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB46_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -4618,42 +4626,43 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB47_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB47_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB47_11
; RV32ZVE32F-NEXT: .LBB47_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB47_12
; RV32ZVE32F-NEXT: .LBB47_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB47_13
; RV32ZVE32F-NEXT: .LBB47_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB47_14
; RV32ZVE32F-NEXT: .LBB47_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB47_15
; RV32ZVE32F-NEXT: .LBB47_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB47_16
; RV32ZVE32F-NEXT: .LBB47_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB47_9
; RV32ZVE32F-NEXT: .LBB47_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4668,45 +4677,44 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB47_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB47_2
; RV32ZVE32F-NEXT: .LBB47_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB47_3
; RV32ZVE32F-NEXT: .LBB47_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB47_4
; RV32ZVE32F-NEXT: .LBB47_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB47_5
; RV32ZVE32F-NEXT: .LBB47_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB47_6
; RV32ZVE32F-NEXT: .LBB47_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4714,7 +4722,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB47_7
; RV32ZVE32F-NEXT: .LBB47_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4722,7 +4730,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB47_8
; RV32ZVE32F-NEXT: j .LBB47_9
;
@@ -4763,8 +4771,8 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB47_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a0, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB47_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -4872,42 +4880,43 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
; RV32ZVE32F-NEXT: lw a4, 52(a0)
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
-; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t0, 40(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB48_10
+; RV32ZVE32F-NEXT: vmv.x.s a7, v0
+; RV32ZVE32F-NEXT: andi s2, a7, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB48_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB48_11
; RV32ZVE32F-NEXT: .LBB48_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB48_12
; RV32ZVE32F-NEXT: .LBB48_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB48_13
; RV32ZVE32F-NEXT: .LBB48_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB48_14
; RV32ZVE32F-NEXT: .LBB48_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB48_15
; RV32ZVE32F-NEXT: .LBB48_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB48_16
; RV32ZVE32F-NEXT: .LBB48_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB48_9
; RV32ZVE32F-NEXT: .LBB48_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4922,53 +4931,52 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB48_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB48_2
; RV32ZVE32F-NEXT: .LBB48_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB48_3
; RV32ZVE32F-NEXT: .LBB48_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB48_4
; RV32ZVE32F-NEXT: .LBB48_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB48_5
; RV32ZVE32F-NEXT: .LBB48_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB48_6
; RV32ZVE32F-NEXT: .LBB48_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw a7, 0(a0)
+; RV32ZVE32F-NEXT: sw t0, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB48_7
; RV32ZVE32F-NEXT: .LBB48_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4976,7 +4984,7 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB48_8
; RV32ZVE32F-NEXT: j .LBB48_9
;
@@ -5013,8 +5021,8 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
; RV64ZVE32F-NEXT: .LBB48_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB48_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -5115,42 +5123,43 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: lw a4, 52(a0)
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
-; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t0, 40(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB49_10
+; RV32ZVE32F-NEXT: vmv.x.s a7, v0
+; RV32ZVE32F-NEXT: andi s2, a7, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB49_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB49_11
; RV32ZVE32F-NEXT: .LBB49_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB49_12
; RV32ZVE32F-NEXT: .LBB49_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB49_13
; RV32ZVE32F-NEXT: .LBB49_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB49_14
; RV32ZVE32F-NEXT: .LBB49_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB49_15
; RV32ZVE32F-NEXT: .LBB49_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB49_16
; RV32ZVE32F-NEXT: .LBB49_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB49_9
; RV32ZVE32F-NEXT: .LBB49_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5165,53 +5174,52 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB49_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB49_2
; RV32ZVE32F-NEXT: .LBB49_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB49_3
; RV32ZVE32F-NEXT: .LBB49_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB49_4
; RV32ZVE32F-NEXT: .LBB49_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB49_5
; RV32ZVE32F-NEXT: .LBB49_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB49_6
; RV32ZVE32F-NEXT: .LBB49_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw a7, 0(a0)
+; RV32ZVE32F-NEXT: sw t0, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB49_7
; RV32ZVE32F-NEXT: .LBB49_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5219,7 +5227,7 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB49_8
; RV32ZVE32F-NEXT: j .LBB49_9
;
@@ -5256,8 +5264,8 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB49_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB49_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -5359,42 +5367,43 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: lw a4, 52(a0)
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
-; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t0, 40(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB50_10
+; RV32ZVE32F-NEXT: vmv.x.s a7, v0
+; RV32ZVE32F-NEXT: andi s2, a7, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB50_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB50_11
; RV32ZVE32F-NEXT: .LBB50_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB50_12
; RV32ZVE32F-NEXT: .LBB50_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB50_13
; RV32ZVE32F-NEXT: .LBB50_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB50_14
; RV32ZVE32F-NEXT: .LBB50_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB50_15
; RV32ZVE32F-NEXT: .LBB50_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB50_16
; RV32ZVE32F-NEXT: .LBB50_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB50_9
; RV32ZVE32F-NEXT: .LBB50_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5409,53 +5418,52 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB50_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB50_2
; RV32ZVE32F-NEXT: .LBB50_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB50_3
; RV32ZVE32F-NEXT: .LBB50_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB50_4
; RV32ZVE32F-NEXT: .LBB50_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB50_5
; RV32ZVE32F-NEXT: .LBB50_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB50_6
; RV32ZVE32F-NEXT: .LBB50_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw a7, 0(a0)
+; RV32ZVE32F-NEXT: sw t0, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB50_7
; RV32ZVE32F-NEXT: .LBB50_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5463,7 +5471,7 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB50_8
; RV32ZVE32F-NEXT: j .LBB50_9
;
@@ -5502,8 +5510,8 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB50_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB50_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -5647,31 +5655,32 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, s3
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, s2
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB51_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi s2, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB51_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a2, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB51_11
; RV32ZVE32F-NEXT: .LBB51_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, a2, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB51_12
; RV32ZVE32F-NEXT: .LBB51_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, a2, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB51_13
; RV32ZVE32F-NEXT: .LBB51_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, a2, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB51_14
; RV32ZVE32F-NEXT: .LBB51_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, a2, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB51_15
; RV32ZVE32F-NEXT: .LBB51_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a2, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB51_16
; RV32ZVE32F-NEXT: .LBB51_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a2, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB51_9
; RV32ZVE32F-NEXT: .LBB51_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5691,13 +5700,12 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: addi sp, sp, 32
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB51_10: # %cond.store
-; RV32ZVE32F-NEXT: lw a2, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw a2, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a2, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB51_2
; RV32ZVE32F-NEXT: .LBB51_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -5705,7 +5713,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s1, 4(a0)
; RV32ZVE32F-NEXT: sw s0, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, a2, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB51_3
; RV32ZVE32F-NEXT: .LBB51_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -5713,7 +5721,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t6, 0(a0)
; RV32ZVE32F-NEXT: sw t5, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, a2, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB51_4
; RV32ZVE32F-NEXT: .LBB51_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -5721,7 +5729,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t4, 0(a0)
; RV32ZVE32F-NEXT: sw t3, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, a2, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB51_5
; RV32ZVE32F-NEXT: .LBB51_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5729,7 +5737,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t2, 0(a0)
; RV32ZVE32F-NEXT: sw t1, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, a2, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB51_6
; RV32ZVE32F-NEXT: .LBB51_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5737,7 +5745,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t0, 0(a0)
; RV32ZVE32F-NEXT: sw a7, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a2, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB51_7
; RV32ZVE32F-NEXT: .LBB51_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5745,7 +5753,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a6, 0(a0)
; RV32ZVE32F-NEXT: sw a5, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a2, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB51_8
; RV32ZVE32F-NEXT: j .LBB51_9
;
@@ -6019,18 +6027,18 @@ define void @mscatter_truemask_v4f16(<4 x half> %val, <4 x ptr> %ptrs) {
;
; RV64ZVE32F-LABEL: mscatter_truemask_v4f16:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: ld a1, 24(a0)
-; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 16(a0)
-; RV64ZVE32F-NEXT: ld a0, 8(a0)
+; RV64ZVE32F-NEXT: ld a1, 0(a0)
+; RV64ZVE32F-NEXT: ld a2, 24(a0)
+; RV64ZVE32F-NEXT: ld a3, 8(a0)
+; RV64ZVE32F-NEXT: ld a0, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vse16.v v8, (a2)
+; RV64ZVE32F-NEXT: vse16.v v8, (a1)
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT: vse16.v v9, (a0)
-; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV64ZVE32F-NEXT: vse16.v v9, (a3)
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
+; RV64ZVE32F-NEXT: vse16.v v9, (a0)
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3
-; RV64ZVE32F-NEXT: vse16.v v8, (a1)
+; RV64ZVE32F-NEXT: vse16.v v8, (a2)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v4f16.v4p0(<4 x half> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1))
ret void
@@ -6192,8 +6200,8 @@ define void @mscatter_baseidx_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i8> %i
; RV64ZVE32F-NEXT: .LBB58_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB58_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -6215,8 +6223,8 @@ define void @mscatter_baseidx_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i8> %i
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB58_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB58_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -6325,8 +6333,8 @@ define void @mscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB59_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB59_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -6348,8 +6356,8 @@ define void @mscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB59_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB59_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -6459,8 +6467,8 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB60_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB60_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -6483,8 +6491,8 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB60_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB60_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -6597,8 +6605,8 @@ define void @mscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: .LBB61_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB61_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -6620,8 +6628,8 @@ define void @mscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB61_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB61_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -6841,18 +6849,18 @@ define void @mscatter_truemask_v4f32(<4 x float> %val, <4 x ptr> %ptrs) {
;
; RV64ZVE32F-LABEL: mscatter_truemask_v4f32:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: ld a1, 24(a0)
-; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 16(a0)
-; RV64ZVE32F-NEXT: ld a0, 8(a0)
+; RV64ZVE32F-NEXT: ld a1, 0(a0)
+; RV64ZVE32F-NEXT: ld a2, 24(a0)
+; RV64ZVE32F-NEXT: ld a3, 8(a0)
+; RV64ZVE32F-NEXT: ld a0, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vse32.v v8, (a2)
+; RV64ZVE32F-NEXT: vse32.v v8, (a1)
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v9, (a0)
-; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV64ZVE32F-NEXT: vse32.v v9, (a3)
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
+; RV64ZVE32F-NEXT: vse32.v v9, (a0)
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v8, (a1)
+; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1))
ret void
@@ -7017,8 +7025,8 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: .LBB68_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB68_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7041,8 +7049,8 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB68_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB68_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7054,8 +7062,9 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB68_6
@@ -7153,8 +7162,8 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB69_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB69_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7177,8 +7186,8 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB69_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB69_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7190,8 +7199,9 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB69_6
@@ -7293,8 +7303,8 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB70_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB70_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7318,8 +7328,8 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB70_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB70_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7332,8 +7342,9 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB70_6
@@ -7437,8 +7448,8 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: .LBB71_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB71_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7461,8 +7472,8 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB71_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB71_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7474,8 +7485,9 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB71_6
@@ -7574,8 +7586,8 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB72_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB72_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7598,8 +7610,8 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB72_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB72_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7611,8 +7623,9 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB72_6
@@ -7715,8 +7728,8 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB73_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB73_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7740,8 +7753,8 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: .LBB73_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB73_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7754,8 +7767,9 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: andi a3, a2, 8
; RV64ZVE32F-NEXT: beqz a3, .LBB73_6
@@ -7856,8 +7870,8 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: .LBB74_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB74_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7880,8 +7894,8 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB74_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v12, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB74_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -8391,81 +8405,81 @@ define void @mscatter_baseidx_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x i8>
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB81_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_10
; RV32ZVE32F-NEXT: .LBB81_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_11
; RV32ZVE32F-NEXT: .LBB81_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_12
; RV32ZVE32F-NEXT: .LBB81_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_13
; RV32ZVE32F-NEXT: .LBB81_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_14
; RV32ZVE32F-NEXT: .LBB81_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_15
; RV32ZVE32F-NEXT: .LBB81_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB81_16
; RV32ZVE32F-NEXT: .LBB81_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB81_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_2
; RV32ZVE32F-NEXT: .LBB81_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_3
; RV32ZVE32F-NEXT: .LBB81_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_4
; RV32ZVE32F-NEXT: .LBB81_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_5
; RV32ZVE32F-NEXT: .LBB81_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_6
; RV32ZVE32F-NEXT: .LBB81_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_7
; RV32ZVE32F-NEXT: .LBB81_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB81_8
; RV32ZVE32F-NEXT: .LBB81_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -8498,8 +8512,8 @@ define void @mscatter_baseidx_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x i8>
; RV64ZVE32F-NEXT: .LBB81_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB81_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -8591,81 +8605,81 @@ define void @mscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB82_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_10
; RV32ZVE32F-NEXT: .LBB82_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_11
; RV32ZVE32F-NEXT: .LBB82_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_12
; RV32ZVE32F-NEXT: .LBB82_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_13
; RV32ZVE32F-NEXT: .LBB82_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_14
; RV32ZVE32F-NEXT: .LBB82_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_15
; RV32ZVE32F-NEXT: .LBB82_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB82_16
; RV32ZVE32F-NEXT: .LBB82_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB82_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_2
; RV32ZVE32F-NEXT: .LBB82_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_3
; RV32ZVE32F-NEXT: .LBB82_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_4
; RV32ZVE32F-NEXT: .LBB82_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_5
; RV32ZVE32F-NEXT: .LBB82_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_6
; RV32ZVE32F-NEXT: .LBB82_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_7
; RV32ZVE32F-NEXT: .LBB82_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB82_8
; RV32ZVE32F-NEXT: .LBB82_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -8698,8 +8712,8 @@ define void @mscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB82_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB82_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -8793,81 +8807,81 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB83_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_10
; RV32ZVE32F-NEXT: .LBB83_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_11
; RV32ZVE32F-NEXT: .LBB83_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_12
; RV32ZVE32F-NEXT: .LBB83_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_13
; RV32ZVE32F-NEXT: .LBB83_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_14
; RV32ZVE32F-NEXT: .LBB83_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_15
; RV32ZVE32F-NEXT: .LBB83_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB83_16
; RV32ZVE32F-NEXT: .LBB83_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB83_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_2
; RV32ZVE32F-NEXT: .LBB83_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_3
; RV32ZVE32F-NEXT: .LBB83_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_4
; RV32ZVE32F-NEXT: .LBB83_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_5
; RV32ZVE32F-NEXT: .LBB83_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_6
; RV32ZVE32F-NEXT: .LBB83_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_7
; RV32ZVE32F-NEXT: .LBB83_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB83_8
; RV32ZVE32F-NEXT: .LBB83_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -8902,8 +8916,8 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB83_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB83_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -9002,81 +9016,81 @@ define void @mscatter_baseidx_v8i16_v8f64(<8 x double> %val, ptr %base, <8 x i16
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB84_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_10
; RV32ZVE32F-NEXT: .LBB84_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_11
; RV32ZVE32F-NEXT: .LBB84_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_12
; RV32ZVE32F-NEXT: .LBB84_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_13
; RV32ZVE32F-NEXT: .LBB84_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_14
; RV32ZVE32F-NEXT: .LBB84_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_15
; RV32ZVE32F-NEXT: .LBB84_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB84_16
; RV32ZVE32F-NEXT: .LBB84_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB84_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_2
; RV32ZVE32F-NEXT: .LBB84_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_3
; RV32ZVE32F-NEXT: .LBB84_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_4
; RV32ZVE32F-NEXT: .LBB84_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_5
; RV32ZVE32F-NEXT: .LBB84_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_6
; RV32ZVE32F-NEXT: .LBB84_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_7
; RV32ZVE32F-NEXT: .LBB84_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB84_8
; RV32ZVE32F-NEXT: .LBB84_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9110,8 +9124,8 @@ define void @mscatter_baseidx_v8i16_v8f64(<8 x double> %val, ptr %base, <8 x i16
; RV64ZVE32F-NEXT: .LBB84_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB84_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -9203,81 +9217,81 @@ define void @mscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB85_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_10
; RV32ZVE32F-NEXT: .LBB85_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_11
; RV32ZVE32F-NEXT: .LBB85_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_12
; RV32ZVE32F-NEXT: .LBB85_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_13
; RV32ZVE32F-NEXT: .LBB85_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_14
; RV32ZVE32F-NEXT: .LBB85_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_15
; RV32ZVE32F-NEXT: .LBB85_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB85_16
; RV32ZVE32F-NEXT: .LBB85_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB85_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_2
; RV32ZVE32F-NEXT: .LBB85_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_3
; RV32ZVE32F-NEXT: .LBB85_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_4
; RV32ZVE32F-NEXT: .LBB85_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_5
; RV32ZVE32F-NEXT: .LBB85_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_6
; RV32ZVE32F-NEXT: .LBB85_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_7
; RV32ZVE32F-NEXT: .LBB85_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB85_8
; RV32ZVE32F-NEXT: .LBB85_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9311,8 +9325,8 @@ define void @mscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV64ZVE32F-NEXT: .LBB85_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB85_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -9406,81 +9420,81 @@ define void @mscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB86_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_10
; RV32ZVE32F-NEXT: .LBB86_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_11
; RV32ZVE32F-NEXT: .LBB86_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_12
; RV32ZVE32F-NEXT: .LBB86_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_13
; RV32ZVE32F-NEXT: .LBB86_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_14
; RV32ZVE32F-NEXT: .LBB86_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_15
; RV32ZVE32F-NEXT: .LBB86_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB86_16
; RV32ZVE32F-NEXT: .LBB86_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB86_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_2
; RV32ZVE32F-NEXT: .LBB86_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_3
; RV32ZVE32F-NEXT: .LBB86_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_4
; RV32ZVE32F-NEXT: .LBB86_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_5
; RV32ZVE32F-NEXT: .LBB86_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_6
; RV32ZVE32F-NEXT: .LBB86_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_7
; RV32ZVE32F-NEXT: .LBB86_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB86_8
; RV32ZVE32F-NEXT: .LBB86_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9518,8 +9532,8 @@ define void @mscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV64ZVE32F-NEXT: .LBB86_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB86_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -9616,81 +9630,81 @@ define void @mscatter_baseidx_v8i32_v8f64(<8 x double> %val, ptr %base, <8 x i32
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB87_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_10
; RV32ZVE32F-NEXT: .LBB87_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_11
; RV32ZVE32F-NEXT: .LBB87_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_12
; RV32ZVE32F-NEXT: .LBB87_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_13
; RV32ZVE32F-NEXT: .LBB87_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_14
; RV32ZVE32F-NEXT: .LBB87_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_15
; RV32ZVE32F-NEXT: .LBB87_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB87_16
; RV32ZVE32F-NEXT: .LBB87_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB87_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_2
; RV32ZVE32F-NEXT: .LBB87_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_3
; RV32ZVE32F-NEXT: .LBB87_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_4
; RV32ZVE32F-NEXT: .LBB87_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_5
; RV32ZVE32F-NEXT: .LBB87_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_6
; RV32ZVE32F-NEXT: .LBB87_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_7
; RV32ZVE32F-NEXT: .LBB87_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB87_8
; RV32ZVE32F-NEXT: .LBB87_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9724,8 +9738,8 @@ define void @mscatter_baseidx_v8i32_v8f64(<8 x double> %val, ptr %base, <8 x i32
; RV64ZVE32F-NEXT: .LBB87_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB87_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -9815,81 +9829,81 @@ define void @mscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB88_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_10
; RV32ZVE32F-NEXT: .LBB88_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_11
; RV32ZVE32F-NEXT: .LBB88_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_12
; RV32ZVE32F-NEXT: .LBB88_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_13
; RV32ZVE32F-NEXT: .LBB88_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_14
; RV32ZVE32F-NEXT: .LBB88_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_15
; RV32ZVE32F-NEXT: .LBB88_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB88_16
; RV32ZVE32F-NEXT: .LBB88_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB88_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_2
; RV32ZVE32F-NEXT: .LBB88_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_3
; RV32ZVE32F-NEXT: .LBB88_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_4
; RV32ZVE32F-NEXT: .LBB88_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_5
; RV32ZVE32F-NEXT: .LBB88_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_6
; RV32ZVE32F-NEXT: .LBB88_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_7
; RV32ZVE32F-NEXT: .LBB88_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB88_8
; RV32ZVE32F-NEXT: .LBB88_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9923,8 +9937,8 @@ define void @mscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, ptr %base, <8
; RV64ZVE32F-NEXT: .LBB88_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB88_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -10015,81 +10029,81 @@ define void @mscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB89_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_10
; RV32ZVE32F-NEXT: .LBB89_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_11
; RV32ZVE32F-NEXT: .LBB89_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_12
; RV32ZVE32F-NEXT: .LBB89_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_13
; RV32ZVE32F-NEXT: .LBB89_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_14
; RV32ZVE32F-NEXT: .LBB89_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_15
; RV32ZVE32F-NEXT: .LBB89_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB89_16
; RV32ZVE32F-NEXT: .LBB89_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB89_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_2
; RV32ZVE32F-NEXT: .LBB89_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_3
; RV32ZVE32F-NEXT: .LBB89_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_4
; RV32ZVE32F-NEXT: .LBB89_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_5
; RV32ZVE32F-NEXT: .LBB89_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_6
; RV32ZVE32F-NEXT: .LBB89_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_7
; RV32ZVE32F-NEXT: .LBB89_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB89_8
; RV32ZVE32F-NEXT: .LBB89_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10125,8 +10139,8 @@ define void @mscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, ptr %base, <8
; RV64ZVE32F-NEXT: .LBB89_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB89_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -10238,81 +10252,81 @@ define void @mscatter_baseidx_v8f64(<8 x double> %val, ptr %base, <8 x i64> %idx
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a3
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a2
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB90_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_10
; RV32ZVE32F-NEXT: .LBB90_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_11
; RV32ZVE32F-NEXT: .LBB90_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_12
; RV32ZVE32F-NEXT: .LBB90_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_13
; RV32ZVE32F-NEXT: .LBB90_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_14
; RV32ZVE32F-NEXT: .LBB90_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_15
; RV32ZVE32F-NEXT: .LBB90_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB90_16
; RV32ZVE32F-NEXT: .LBB90_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB90_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_2
; RV32ZVE32F-NEXT: .LBB90_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_3
; RV32ZVE32F-NEXT: .LBB90_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_4
; RV32ZVE32F-NEXT: .LBB90_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_5
; RV32ZVE32F-NEXT: .LBB90_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_6
; RV32ZVE32F-NEXT: .LBB90_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_7
; RV32ZVE32F-NEXT: .LBB90_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB90_8
; RV32ZVE32F-NEXT: .LBB90_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10454,8 +10468,8 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB91_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB91_25
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -10471,8 +10485,8 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 4
; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB91_8: # %else8
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB91_10
; RV64ZVE32F-NEXT: # %bb.9: # %cond.store9
@@ -10484,8 +10498,8 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 5
; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB91_10: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB91_27
; RV64ZVE32F-NEXT: # %bb.11: # %else12
@@ -10508,8 +10522,8 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB91_15: # %else18
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 1024
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB91_30
; RV64ZVE32F-NEXT: # %bb.16: # %else20
@@ -10530,8 +10544,8 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 13
; RV64ZVE32F-NEXT: vse8.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB91_20: # %else26
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 49
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bgez a2, .LBB91_22
; RV64ZVE32F-NEXT: # %bb.21: # %cond.store27
@@ -10653,11 +10667,11 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 16
; RV64-NEXT: vslidedown.vi v10, v10, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
-; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsext.vf8 v16, v10
+; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
;
@@ -10686,8 +10700,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB92_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB92_49
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -10703,8 +10717,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_8: # %else8
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB92_10
; RV64ZVE32F-NEXT: # %bb.9: # %cond.store9
@@ -10716,8 +10730,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 5
; RV64ZVE32F-NEXT: vse8.v v14, (a2)
; RV64ZVE32F-NEXT: .LBB92_10: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB92_51
; RV64ZVE32F-NEXT: # %bb.11: # %else12
@@ -10740,8 +10754,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB92_15: # %else18
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 1024
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB92_17
; RV64ZVE32F-NEXT: # %bb.16: # %cond.store19
@@ -10762,8 +10776,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 11
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_19: # %else22
-; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 51
+; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 16
; RV64ZVE32F-NEXT: bgez a2, .LBB92_21
; RV64ZVE32F-NEXT: # %bb.20: # %cond.store23
@@ -10784,8 +10798,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 13
; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB92_23: # %else26
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 49
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v13, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB92_54
; RV64ZVE32F-NEXT: # %bb.24: # %else28
@@ -10809,8 +10823,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB92_28: # %else34
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 45
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB92_57
; RV64ZVE32F-NEXT: # %bb.29: # %else36
@@ -10827,8 +10841,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_32: # %else40
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 42
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 8
; RV64ZVE32F-NEXT: bgez a2, .LBB92_34
; RV64ZVE32F-NEXT: # %bb.33: # %cond.store41
@@ -10841,8 +10855,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_34: # %else42
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 41
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB92_59
; RV64ZVE32F-NEXT: # %bb.35: # %else44
@@ -10866,8 +10880,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB92_39: # %else50
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 37
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB92_62
; RV64ZVE32F-NEXT: # %bb.40: # %else52
@@ -10889,8 +10903,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_44: # %else58
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 33
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bgez a2, .LBB92_46
; RV64ZVE32F-NEXT: # %bb.45: # %cond.store59
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
index b3011d0f01ca..86c28247e97e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
@@ -401,54 +401,41 @@ define void @masked_store_v32i64(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 18
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: sub sp, sp, a3
; RV32-NEXT: addi a3, a2, 128
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v24, (a2)
; RV32-NEXT: vle64.v v8, (a3)
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a4, a3, 3
-; RV32-NEXT: add a3, a4, a3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vle64.v v0, (a2)
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.i v24, 0
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vmseq.vv v8, v0, v24
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: slli a2, a2, 3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: li a2, 32
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v8, 0
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vmseq.vv v7, v24, v8
; RV32-NEXT: addi a2, a0, 128
-; RV32-NEXT: vle64.v v8, (a2)
+; RV32-NEXT: vle64.v v24, (a2)
; RV32-NEXT: vle64.v v16, (a0)
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a2, a0, 3
-; RV32-NEXT: add a0, a2, a0
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmseq.vv v0, v16, v24
+; RV32-NEXT: vmseq.vv v0, v16, v8
; RV32-NEXT: addi a0, a1, 128
-; RV32-NEXT: vse64.v v8, (a0), v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vse64.v v24, (a0), v0.t
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vse64.v v8, (a1), v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 18
-; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index 7be015e26b09..93b4f7d2a9c9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -135,16 +135,16 @@ declare <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
@@ -271,8 +271,8 @@ define <8 x float> @vp_nearbyint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -315,8 +315,8 @@ define <16 x float> @vp_nearbyint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zer
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -393,16 +393,16 @@ declare <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
@@ -437,16 +437,16 @@ declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
@@ -481,16 +481,16 @@ declare <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -525,16 +525,16 @@ declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -569,17 +569,9 @@ declare <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
@@ -588,43 +580,36 @@ define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: .LBB26_2:
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: frflags a1
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: fsflags a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v24, v16, v24, v0.t
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: vmv.v.v v16, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
index 03624113a826..c0bd49cc9c5c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
@@ -163,12 +163,12 @@ define i32 @reduce_sum_16xi32_prefix5(ptr %p) {
; CHECK-NEXT: li a1, -32
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vmv.v.i v8, -1
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vmerge.vim v10, v10, 0, v0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vand.vv v8, v10, v12
+; CHECK-NEXT: vsext.vf4 v12, v10
+; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vmv.s.x v10, zero
; CHECK-NEXT: vredsum.vs v8, v8, v10
; CHECK-NEXT: vmv.x.s a0, v8
@@ -192,12 +192,12 @@ define i32 @reduce_sum_16xi32_prefix6(ptr %p) {
; CHECK-NEXT: li a1, 192
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vmv.v.i v8, -1
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vmerge.vim v10, v10, 0, v0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vand.vv v8, v10, v12
+; CHECK-NEXT: vsext.vf4 v12, v10
+; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vmv.s.x v10, zero
; CHECK-NEXT: vredsum.vs v8, v8, v10
; CHECK-NEXT: vmv.x.s a0, v8
@@ -221,10 +221,10 @@ define i32 @reduce_sum_16xi32_prefix7(ptr %p) {
; CHECK-LABEL: reduce_sum_16xi32_prefix7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, zero
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vredsum.vs v8, v10, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.s.x v10, zero
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vredsum.vs v8, v8, v10
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -248,9 +248,9 @@ define i32 @reduce_sum_16xi32_prefix8(ptr %p) {
; CHECK-LABEL: reduce_sum_16xi32_prefix8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, zero
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vredsum.vs v8, v10, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.s.x v10, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v10
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -535,12 +535,12 @@ define i32 @reduce_xor_16xi32_prefix5(ptr %p) {
; CHECK-NEXT: li a1, -32
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vmv.v.i v8, -1
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vmerge.vim v10, v10, 0, v0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vand.vv v8, v10, v12
+; CHECK-NEXT: vsext.vf4 v12, v10
+; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vmv.s.x v10, zero
; CHECK-NEXT: vredxor.vs v8, v8, v10
; CHECK-NEXT: vmv.x.s a0, v8
@@ -576,17 +576,17 @@ define i32 @reduce_and_16xi32_prefix2(ptr %p) {
define i32 @reduce_and_16xi32_prefix5(ptr %p) {
; CHECK-LABEL: reduce_and_16xi32_prefix5:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, -1
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 5
+; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vredand.vs v8, v10, v10
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vredand.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -623,12 +623,12 @@ define i32 @reduce_or_16xi32_prefix5(ptr %p) {
; CHECK-NEXT: li a1, -32
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vmv.v.i v8, -1
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vmerge.vim v10, v10, 0, v0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vand.vv v8, v10, v12
+; CHECK-NEXT: vsext.vf4 v12, v10
+; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vredor.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -668,17 +668,17 @@ define i32 @reduce_smax_16xi32_prefix2(ptr %p) {
define i32 @reduce_smax_16xi32_prefix5(ptr %p) {
; CHECK-LABEL: reduce_smax_16xi32_prefix5:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, 524288
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, a1
-; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: lui a0, 524288
+; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 5
+; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vredmax.vs v8, v10, v10
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vredmax.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -713,17 +713,17 @@ define i32 @reduce_smin_16xi32_prefix5(ptr %p) {
; CHECK-LABEL: reduce_smin_16xi32_prefix5:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, 524288
-; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, a1
-; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vmv.s.x v10, a1
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 5
+; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vredmin.vs v8, v10, v10
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vredmin.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -760,12 +760,12 @@ define i32 @reduce_umax_16xi32_prefix5(ptr %p) {
; CHECK-NEXT: li a1, -32
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vmv.v.i v8, -1
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vmerge.vim v10, v10, 0, v0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vand.vv v8, v10, v12
+; CHECK-NEXT: vsext.vf4 v12, v10
+; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vredmaxu.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -800,17 +800,17 @@ define i32 @reduce_umin_16xi32_prefix2(ptr %p) {
define i32 @reduce_umin_16xi32_prefix5(ptr %p) {
; CHECK-LABEL: reduce_umin_16xi32_prefix5:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, -1
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 5
+; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vredminu.vs v8, v10, v10
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vredminu.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -830,9 +830,9 @@ define float @reduce_fadd_16xf32_prefix2(ptr %p) {
; CHECK-LABEL: reduce_fadd_16xf32_prefix2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, zero
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vfredusum.vs v8, v9, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.s.x v9, zero
+; CHECK-NEXT: vfredusum.vs v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %p, align 256
@@ -845,17 +845,17 @@ define float @reduce_fadd_16xf32_prefix2(ptr %p) {
define float @reduce_fadd_16xi32_prefix5(ptr %p) {
; CHECK-LABEL: reduce_fadd_16xi32_prefix5:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, 524288
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, a1
-; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: lui a0, 524288
+; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 5
+; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vfredusum.vs v8, v10, v8
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vfredusum.vs v8, v8, v10
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %p, align 256
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
index 9df160bf30f0..7adaaa05f9dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
@@ -177,8 +177,8 @@ declare float @llvm.vp.reduce.fadd.v64f32(float, <64 x float>, <64 x i1>, i32)
define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_fadd_v64f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 4
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB8_2
@@ -193,8 +193,8 @@ define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfredusum.vs v25, v16, v25, v0.t
; CHECK-NEXT: vfmv.f.s fa0, v25
; CHECK-NEXT: ret
@@ -205,8 +205,8 @@ define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32
define float @vpreduce_ord_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_ord_fadd_v64f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 4
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB9_2
@@ -221,8 +221,8 @@ define float @vpreduce_ord_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m,
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfredosum.vs v25, v16, v25, v0.t
; CHECK-NEXT: vfmv.f.s fa0, v25
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index 7dcfb247d37c..a6763fa22822 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -1853,9 +1853,9 @@ define float @vreduce_fminimum_v128f32(ptr %x) {
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: addi a2, a0, 128
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: addi a1, a0, 128
-; CHECK-NEXT: vle32.v v16, (a1)
+; CHECK-NEXT: vle32.v v16, (a2)
; CHECK-NEXT: addi a1, a0, 384
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: addi a1, a0, 256
@@ -2188,8 +2188,8 @@ define double @vreduce_fminimum_v64f64(ptr %x) {
; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: addi a1, a0, 128
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: addi a1, a0, 384
; CHECK-NEXT: vle64.v v8, (a1)
@@ -2286,9 +2286,9 @@ define double @vreduce_fminimum_v64f64_nonans(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: addi a1, a0, 384
-; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: addi a1, a0, 256
+; CHECK-NEXT: addi a2, a0, 384
+; CHECK-NEXT: vle64.v v16, (a2)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vle64.v v0, (a1)
@@ -2563,9 +2563,9 @@ define float @vreduce_fmaximum_v128f32(ptr %x) {
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: addi a2, a0, 128
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: addi a1, a0, 128
-; CHECK-NEXT: vle32.v v16, (a1)
+; CHECK-NEXT: vle32.v v16, (a2)
; CHECK-NEXT: addi a1, a0, 384
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: addi a1, a0, 256
@@ -2898,8 +2898,8 @@ define double @vreduce_fmaximum_v64f64(ptr %x) {
; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: addi a1, a0, 128
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: addi a1, a0, 384
; CHECK-NEXT: vle64.v v8, (a1)
@@ -2996,9 +2996,9 @@ define double @vreduce_fmaximum_v64f64_nonans(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: addi a1, a0, 384
-; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: addi a1, a0, 256
+; CHECK-NEXT: addi a2, a0, 384
+; CHECK-NEXT: vle64.v v16, (a2)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vle64.v v0, (a1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
index 02a989a96996..016f95bfef7e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -801,8 +801,8 @@ declare i32 @llvm.vp.reduce.xor.v64i32(i32, <64 x i32>, <64 x i1>, i32)
define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v64i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: li a3, 32
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 4
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: bltu a1, a3, .LBB49_2
@@ -817,8 +817,8 @@ define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1>
; CHECK-NEXT: sltu a1, a1, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vredxor.vs v25, v16, v25, v0.t
; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: ret
@@ -1750,9 +1750,9 @@ define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m,
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: li a3, 32
-; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV32-NEXT: lui a2, %hi(.LCPI72_0)
; RV32-NEXT: addi a2, a2, %lo(.LCPI72_0)
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV32-NEXT: vle8.v v12, (a2)
; RV32-NEXT: mv a2, a0
; RV32-NEXT: vid.v v16
@@ -1794,9 +1794,9 @@ define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m,
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: li a3, 32
-; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV64-NEXT: lui a2, %hi(.LCPI72_0)
; RV64-NEXT: addi a2, a2, %lo(.LCPI72_0)
+; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV64-NEXT: vle8.v v12, (a2)
; RV64-NEXT: mv a2, a0
; RV64-NEXT: vid.v v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index 6c75c9b9c294..28ce6a12c4c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -1540,22 +1540,21 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
; RV32-NEXT: vslidedown.vi v24, v8, 16
; RV32-NEXT: vslidedown.vi v0, v16, 16
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT: vmv4r.v v8, v0
-; RV32-NEXT: vwadd.vv v0, v24, v8
+; RV32-NEXT: vwadd.vv v8, v24, v0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vwadd.vv v0, v8, v16
-; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v0, v8
; RV32-NEXT: vmv.s.x v16, zero
; RV32-NEXT: vredsum.vs v8, v8, v16
@@ -1588,22 +1587,21 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
; RV64-NEXT: vslidedown.vi v24, v8, 16
; RV64-NEXT: vslidedown.vi v0, v16, 16
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT: vmv4r.v v8, v0
-; RV64-NEXT: vwadd.vv v0, v24, v8
+; RV64-NEXT: vwadd.vv v8, v24, v0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vwadd.vv v0, v8, v16
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vadd.vv v8, v0, v8
; RV64-NEXT: vmv.s.x v16, zero
; RV64-NEXT: vredsum.vs v8, v8, v16
@@ -1639,22 +1637,21 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
; RV32-NEXT: vslidedown.vi v24, v8, 16
; RV32-NEXT: vslidedown.vi v0, v16, 16
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT: vmv4r.v v8, v0
-; RV32-NEXT: vwaddu.vv v0, v24, v8
+; RV32-NEXT: vwaddu.vv v8, v24, v0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vwaddu.vv v0, v8, v16
-; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v0, v8
; RV32-NEXT: vmv.s.x v16, zero
; RV32-NEXT: vredsum.vs v8, v8, v16
@@ -1687,22 +1684,21 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
; RV64-NEXT: vslidedown.vi v24, v8, 16
; RV64-NEXT: vslidedown.vi v0, v16, 16
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT: vmv4r.v v8, v0
-; RV64-NEXT: vwaddu.vv v0, v24, v8
+; RV64-NEXT: vwaddu.vv v8, v24, v0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vwaddu.vv v0, v8, v16
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vadd.vv v8, v0, v8
; RV64-NEXT: vmv.s.x v16, zero
; RV64-NEXT: vredsum.vs v8, v8, v16
@@ -2286,9 +2282,9 @@ define i64 @vreduce_and_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
@@ -2871,9 +2867,9 @@ define i64 @vreduce_or_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
@@ -4074,9 +4070,9 @@ define i64 @vreduce_smin_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
@@ -4659,9 +4655,9 @@ define i64 @vreduce_smax_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
@@ -5244,9 +5240,9 @@ define i64 @vreduce_umin_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
@@ -5829,9 +5825,9 @@ define i64 @vreduce_umax_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
index a1f010f98ab4..dc0f4e743055 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
@@ -24,8 +24,8 @@ define zeroext i1 @vpreduce_or_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32
; CHECK-LABEL: vpreduce_or_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -40,8 +40,8 @@ define zeroext i1 @vpreduce_xor_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i3
; CHECK-LABEL: vpreduce_xor_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -72,8 +72,8 @@ define zeroext i1 @vpreduce_or_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32
; CHECK-LABEL: vpreduce_or_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -88,8 +88,8 @@ define zeroext i1 @vpreduce_xor_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3
; CHECK-LABEL: vpreduce_xor_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -120,8 +120,8 @@ define zeroext i1 @vpreduce_or_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32
; CHECK-LABEL: vpreduce_or_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -136,8 +136,8 @@ define zeroext i1 @vpreduce_xor_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3
; CHECK-LABEL: vpreduce_xor_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -168,8 +168,8 @@ define zeroext i1 @vpreduce_or_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32
; CHECK-LABEL: vpreduce_or_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -184,8 +184,8 @@ define zeroext i1 @vpreduce_xor_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3
; CHECK-LABEL: vpreduce_xor_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -264,8 +264,8 @@ define zeroext i1 @vpreduce_or_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m,
; CHECK-LABEL: vpreduce_or_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -280,8 +280,8 @@ define zeroext i1 @vpreduce_xor_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m,
; CHECK-LABEL: vpreduce_xor_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -296,8 +296,8 @@ define zeroext i1 @vpreduce_add_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i3
; CHECK-LABEL: vpreduce_add_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -312,8 +312,8 @@ define zeroext i1 @vpreduce_add_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3
; CHECK-LABEL: vpreduce_add_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -328,8 +328,8 @@ define zeroext i1 @vpreduce_add_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3
; CHECK-LABEL: vpreduce_add_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -344,8 +344,8 @@ define zeroext i1 @vpreduce_add_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3
; CHECK-LABEL: vpreduce_add_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -360,8 +360,8 @@ define zeroext i1 @vpreduce_add_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m,
; CHECK-LABEL: vpreduce_add_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -488,8 +488,8 @@ define zeroext i1 @vpreduce_smin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i
; CHECK-LABEL: vpreduce_smin_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -504,8 +504,8 @@ define zeroext i1 @vpreduce_smin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i
; CHECK-LABEL: vpreduce_smin_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -520,8 +520,8 @@ define zeroext i1 @vpreduce_smin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i
; CHECK-LABEL: vpreduce_smin_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -536,8 +536,8 @@ define zeroext i1 @vpreduce_smin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i
; CHECK-LABEL: vpreduce_smin_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -552,8 +552,8 @@ define zeroext i1 @vpreduce_smin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m
; CHECK-LABEL: vpreduce_smin_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -568,8 +568,8 @@ define zeroext i1 @vpreduce_smin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m
; CHECK-LABEL: vpreduce_smin_v32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -584,8 +584,8 @@ define zeroext i1 @vpreduce_smin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m
; CHECK-LABEL: vpreduce_smin_v64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -600,8 +600,8 @@ define zeroext i1 @vpreduce_umax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i
; CHECK-LABEL: vpreduce_umax_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -616,8 +616,8 @@ define zeroext i1 @vpreduce_umax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i
; CHECK-LABEL: vpreduce_umax_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -632,8 +632,8 @@ define zeroext i1 @vpreduce_umax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i
; CHECK-LABEL: vpreduce_umax_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -648,8 +648,8 @@ define zeroext i1 @vpreduce_umax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i
; CHECK-LABEL: vpreduce_umax_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -664,8 +664,8 @@ define zeroext i1 @vpreduce_umax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m
; CHECK-LABEL: vpreduce_umax_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -680,8 +680,8 @@ define zeroext i1 @vpreduce_umax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m
; CHECK-LABEL: vpreduce_umax_v32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -696,8 +696,8 @@ define zeroext i1 @vpreduce_umax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m
; CHECK-LABEL: vpreduce_umax_v64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index 920d0d5fe7ba..1f856d04ca89 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -123,15 +123,15 @@ declare <16 x half> @llvm.vp.rint.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
@@ -246,8 +246,8 @@ define <8 x float> @vp_rint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -286,8 +286,8 @@ define <16 x float> @vp_rint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -357,15 +357,15 @@ declare <4 x double> @llvm.vp.rint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
@@ -397,15 +397,15 @@ declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
@@ -437,15 +437,15 @@ declare <15 x double> @llvm.vp.rint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -477,15 +477,15 @@ declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -517,65 +517,54 @@ declare <32 x double> @llvm.vp.rint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
@@ -597,17 +586,20 @@ define <32 x double> @vp_rint_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8
; CHECK-NEXT: vmflt.vf v0, v24, fa5
+; CHECK-NEXT: addi a2, a0, -16
+; CHECK-NEXT: sltu a0, a0, a2
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: and a0, a0, a2
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfabs.v v24, v16
+; CHECK-NEXT: vmflt.vf v7, v24, fa5
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: addi a1, a0, -16
-; CHECK-NEXT: sltu a0, a0, a1
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index 716cf7b0f46f..0f587232680d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -204,8 +204,8 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <16 x half> @llvm.vp.round.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -439,8 +439,8 @@ define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -483,8 +483,8 @@ define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -561,16 +561,16 @@ declare <4 x double> @llvm.vp.round.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -605,16 +605,16 @@ declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -649,16 +649,16 @@ declare <15 x double> @llvm.vp.round.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -693,16 +693,16 @@ declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -737,69 +737,59 @@ declare <32 x double> @llvm.vp.round.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a1, 4
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index 603f9397dc90..0fb7e6a7de56 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -204,8 +204,8 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <16 x half> @llvm.vp.roundeven.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -439,8 +439,8 @@ define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -483,8 +483,8 @@ define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zer
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -561,16 +561,16 @@ declare <4 x double> @llvm.vp.roundeven.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -605,16 +605,16 @@ declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -649,16 +649,16 @@ declare <15 x double> @llvm.vp.roundeven.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -693,16 +693,16 @@ declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -737,69 +737,59 @@ declare <32 x double> @llvm.vp.roundeven.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index a5adfc36887a..927f96b64422 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -204,8 +204,8 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -439,8 +439,8 @@ define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroe
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -483,8 +483,8 @@ define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 z
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -561,16 +561,16 @@ declare <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -605,16 +605,16 @@ declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -649,16 +649,16 @@ declare <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -693,16 +693,16 @@ declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -737,69 +737,59 @@ declare <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a1, 1
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
index a4ab67f41595..80561be0ca2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -115,17 +115,17 @@ define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea
; CHECK-NEXT: vwaddu.vv v10, v9, v8
; CHECK-NEXT: vminu.vv v8, v12, v13
; CHECK-NEXT: vmaxu.vv v9, v12, v13
-; CHECK-NEXT: vsub.vv v8, v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: add a1, a1, a3
-; CHECK-NEXT: vle8.v v9, (a0)
-; CHECK-NEXT: vle8.v v12, (a1)
+; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: vle8.v v13, (a1)
+; CHECK-NEXT: vsub.vv v8, v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v14, v8
; CHECK-NEXT: vwaddu.vv v16, v14, v10
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vminu.vv v8, v9, v12
-; CHECK-NEXT: vmaxu.vv v9, v9, v12
+; CHECK-NEXT: vminu.vv v8, v12, v13
+; CHECK-NEXT: vmaxu.vv v9, v12, v13
; CHECK-NEXT: vsub.vv v8, v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v10, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
index 4598bf67a236..33e9cde4c30a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
@@ -1163,31 +1163,31 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
; ZVFH-NEXT: addi a0, sp, 16
; ZVFH-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFH-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; ZVFH-NEXT: vslidedown.vi v7, v0, 8
+; ZVFH-NEXT: vslidedown.vi v6, v0, 8
; ZVFH-NEXT: mv a0, a2
; ZVFH-NEXT: bltu a2, a3, .LBB43_2
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: li a0, 64
; ZVFH-NEXT: .LBB43_2:
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT: addi a0, sp, 16
-; ZVFH-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFH-NEXT: vmfeq.vv v6, v8, v24, v0.t
+; ZVFH-NEXT: vmfeq.vv v7, v8, v24, v0.t
; ZVFH-NEXT: addi a0, a2, -64
; ZVFH-NEXT: sltu a1, a2, a0
; ZVFH-NEXT: addi a1, a1, -1
; ZVFH-NEXT: and a0, a1, a0
+; ZVFH-NEXT: vmv1r.v v0, v6
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: add a1, sp, a1
+; ZVFH-NEXT: addi a1, a1, 16
+; ZVFH-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT: vmv1r.v v0, v7
-; ZVFH-NEXT: csrr a0, vlenb
-; ZVFH-NEXT: slli a0, a0, 3
-; ZVFH-NEXT: add a0, sp, a0
-; ZVFH-NEXT: addi a0, a0, 16
-; ZVFH-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFH-NEXT: vmfeq.vv v24, v16, v8, v0.t
+; ZVFH-NEXT: vmfeq.vv v8, v16, v24, v0.t
; ZVFH-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; ZVFH-NEXT: vslideup.vi v6, v24, 8
-; ZVFH-NEXT: vmv.v.v v0, v6
+; ZVFH-NEXT: vslideup.vi v7, v8, 8
+; ZVFH-NEXT: vmv.v.v v0, v7
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 4
; ZVFH-NEXT: add sp, sp, a0
@@ -2865,37 +2865,36 @@ define <32 x i1> @fcmp_oeq_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v6, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB87_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB87_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v6, v8, v24, v0.t
+; CHECK-NEXT: vmfeq.vv v7, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v24, v16, v8, v0.t
+; CHECK-NEXT: vmfeq.vv v8, v16, v24, v0.t
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; CHECK-NEXT: vslideup.vi v6, v24, 2
-; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: vslideup.vi v7, v8, 2
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
index 21bbca00921d..5f3847e08505 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
@@ -611,10 +611,10 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: vle8.v v8, (a2)
; CHECK-NEXT: addi a2, a3, -128
; CHECK-NEXT: sltu a4, a3, a2
-; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a2, a4, a2
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vv v6, v16, v8, v0.t
@@ -622,7 +622,6 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: .LBB51_2:
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -631,6 +630,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v6
@@ -660,8 +660,8 @@ define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 z
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB52_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v25
@@ -689,8 +689,8 @@ define <256 x i1> @icmp_eq_vx_swap_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB53_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v25
@@ -1264,31 +1264,31 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 4
+; CHECK-NEXT: vslidedown.vi v6, v0, 4
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a3, .LBB99_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: .LBB99_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmseq.vv v6, v8, v24, v0.t
+; CHECK-NEXT: vmseq.vv v7, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -32
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmseq.vv v24, v16, v8, v0.t
+; CHECK-NEXT: vmseq.vv v8, v16, v24, v0.t
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v6, v24, 4
-; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: vslideup.vi v7, v8, 4
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
@@ -1301,8 +1301,8 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
define <64 x i1> @icmp_eq_vx_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_v64i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: li a3, 32
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 4
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: bltu a1, a3, .LBB100_2
@@ -1315,8 +1315,8 @@ define <64 x i1> @icmp_eq_vx_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i32 ze
; CHECK-NEXT: sltu a1, a1, a2
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a2
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v8, v16, a0, v0.t
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v25, v8, 4
@@ -1331,8 +1331,8 @@ define <64 x i1> @icmp_eq_vx_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i32 ze
define <64 x i1> @icmp_eq_vx_swap_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_v64i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: li a3, 32
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 4
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: bltu a1, a3, .LBB101_2
@@ -1345,8 +1345,8 @@ define <64 x i1> @icmp_eq_vx_swap_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i
; CHECK-NEXT: sltu a1, a1, a2
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a2
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v8, v16, a0, v0.t
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v25, v8, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll
index 52596d889241..d1980ee3b0a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll
@@ -151,8 +151,8 @@ declare <32 x i64> @llvm.vp.sext.v32i64.v32i32(<32 x i32>, <32 x i1>, i32)
define <32 x i64> @vsext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsext_v32i64_v32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB12_2
@@ -167,8 +167,8 @@ define <32 x i64> @vsext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 16
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsext.vf2 v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
index 609b4e982489..925366e8b1d5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
@@ -33,8 +33,8 @@ define <8 x i32> @concat_4xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
; VLS-LABEL: concat_4xv2i32:
; VLS: # %bb.0:
; VLS-NEXT: vmv1r.v v13, v10
-; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; VLS-NEXT: vmv1r.v v12, v8
+; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; VLS-NEXT: vslideup.vi v13, v11, 2
; VLS-NEXT: vslideup.vi v12, v9, 2
; VLS-NEXT: vmv2r.v v8, v12
@@ -147,8 +147,8 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
; VLS-NEXT: vmv1r.v v19, v14
; VLS-NEXT: vmv1r.v v18, v12
; VLS-NEXT: vmv1r.v v17, v10
-; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; VLS-NEXT: vmv1r.v v16, v8
+; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; VLS-NEXT: vslideup.vi v19, v15, 2
; VLS-NEXT: vslideup.vi v18, v13, 2
; VLS-NEXT: vslideup.vi v17, v11, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
index 8499086994bc..d461fa8378cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
@@ -164,11 +164,10 @@ define <4 x i64> @m2_splat_into_slide_two_source_v2_lo(<4 x i64> %v1, <4 x i64>
define <4 x i64> @m2_splat_into_slide_two_source(<4 x i64> %v1, <4 x i64> %v2) vscale_range(2,2) {
; CHECK-LABEL: m2_splat_into_slide_two_source:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vrgather.vi v12, v8, 0
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v0, 12
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT: vrgather.vi v12, v8, 0
; CHECK-NEXT: vslideup.vi v12, v10, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
index 47d7baade8b4..d70ed2fb0e26 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
@@ -101,10 +101,10 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
; NO-ZVBB-LABEL: reverse_v32i1:
; NO-ZVBB: # %bb.0:
; NO-ZVBB-NEXT: li a0, 32
+; NO-ZVBB-NEXT: lui a1, %hi(.LCPI4_0)
+; NO-ZVBB-NEXT: addi a1, a1, %lo(.LCPI4_0)
; NO-ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; NO-ZVBB-NEXT: lui a0, %hi(.LCPI4_0)
-; NO-ZVBB-NEXT: addi a0, a0, %lo(.LCPI4_0)
-; NO-ZVBB-NEXT: vle8.v v8, (a0)
+; NO-ZVBB-NEXT: vle8.v v8, (a1)
; NO-ZVBB-NEXT: vmv.v.i v10, 0
; NO-ZVBB-NEXT: vmerge.vim v10, v10, 1, v0
; NO-ZVBB-NEXT: vrgather.vv v12, v10, v8
@@ -124,10 +124,10 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
; NO-ZVBB-LABEL: reverse_v64i1:
; NO-ZVBB: # %bb.0:
; NO-ZVBB-NEXT: li a0, 64
+; NO-ZVBB-NEXT: lui a1, %hi(.LCPI5_0)
+; NO-ZVBB-NEXT: addi a1, a1, %lo(.LCPI5_0)
; NO-ZVBB-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; NO-ZVBB-NEXT: lui a0, %hi(.LCPI5_0)
-; NO-ZVBB-NEXT: addi a0, a0, %lo(.LCPI5_0)
-; NO-ZVBB-NEXT: vle8.v v8, (a0)
+; NO-ZVBB-NEXT: vle8.v v8, (a1)
; NO-ZVBB-NEXT: vmv.v.i v12, 0
; NO-ZVBB-NEXT: vmerge.vim v12, v12, 1, v0
; NO-ZVBB-NEXT: vrgather.vv v16, v12, v8
@@ -147,10 +147,10 @@ define <128 x i1> @reverse_v128i1(<128 x i1> %a) {
; CHECK-LABEL: reverse_v128i1:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 128
+; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI6_0)
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0)
-; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vmv.v.i v16, 0
; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
; CHECK-NEXT: vrgather.vv v24, v16, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-transpose.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-transpose.ll
index 038fead011d8..82c57a9d90a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-transpose.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-transpose.ll
@@ -19,8 +19,8 @@ define <8 x i8> @trn1.v8i8(<8 x i8> %v0, <8 x i8> %v1) {
define <8 x i8> @trn2.v8i8(<8 x i8> %v0, <8 x i8> %v1) {
; CHECK-LABEL: trn2.v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: li a0, 170
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
@@ -46,13 +46,12 @@ define <16 x i8> @trn1.v16i8(<16 x i8> %v0, <16 x i8> %v1) {
define <16 x i8> @trn2.v16i8(<16 x i8> %v0, <16 x i8> %v1) {
; CHECK-LABEL: trn2.v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: lui a0, 11
; CHECK-NEXT: addi a0, a0, -1366
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: ret
%tmp0 = shufflevector <16 x i8> %v0, <16 x i8> %v1, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
@@ -62,9 +61,8 @@ define <16 x i8> @trn2.v16i8(<16 x i8> %v0, <16 x i8> %v1) {
define <4 x i16> @trn1.v4i16(<4 x i16> %v0, <4 x i16> %v1) {
; CHECK-LABEL: trn1.v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslideup.vi v8, v9, 1, v0.t
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x i16> %v0, <4 x i16> %v1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -98,8 +96,8 @@ define <8 x i16> @trn1.v8i16(<8 x i16> %v0, <8 x i16> %v1) {
define <8 x i16> @trn2.v8i16(<8 x i16> %v0, <8 x i16> %v1) {
; CHECK-LABEL: trn2.v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: li a0, 170
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
@@ -133,9 +131,8 @@ define <2 x i32> @trn2.v2i32(<2 x i32> %v0, <2 x i32> %v1) {
define <4 x i32> @trn1.v4i32(<4 x i32> %v0, <4 x i32> %v1) {
; CHECK-LABEL: trn1.v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslideup.vi v8, v9, 1, v0.t
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -201,9 +198,8 @@ define <2 x float> @trn2.v2f32(<2 x float> %v0, <2 x float> %v1) {
define <4 x float> @trn1.v4f32(<4 x float> %v0, <4 x float> %v1) {
; CHECK-LABEL: trn1.v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslideup.vi v8, v9, 1, v0.t
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x float> %v0, <4 x float> %v1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -247,9 +243,8 @@ define <2 x double> @trn2.v2f64(<2 x double> %v0, <2 x double> %v1) {
define <4 x half> @trn1.v4f16(<4 x half> %v0, <4 x half> %v1) {
; CHECK-LABEL: trn1.v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslideup.vi v8, v9, 1, v0.t
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x half> %v0, <4 x half> %v1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -283,8 +278,8 @@ define <8 x half> @trn1.v8f16(<8 x half> %v0, <8 x half> %v1) {
define <8 x half> @trn2.v8f16(<8 x half> %v0, <8 x half> %v1) {
; CHECK-LABEL: trn2.v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: li a0, 170
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll
index 5e93fdfc7a65..bf0eab77d0ac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll
@@ -390,8 +390,8 @@ declare <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64>, <32 x i1>, i32)
define <32 x double> @vsitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsitofp_v32f64_v32i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB25_2
@@ -404,8 +404,8 @@ define <32 x double> @vsitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 ze
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index 0e6b03bf1632..0e1105848440 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -75,9 +75,9 @@ define void @widen_4xv4i16_unaligned(ptr %x, ptr %z) {
; CHECK-NO-MISALIGN: # %bb.0:
; CHECK-NO-MISALIGN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NO-MISALIGN-NEXT: vle8.v v8, (a0)
-; CHECK-NO-MISALIGN-NEXT: addi a2, a0, 16
-; CHECK-NO-MISALIGN-NEXT: vle8.v v10, (a2)
; CHECK-NO-MISALIGN-NEXT: addi a2, a0, 8
+; CHECK-NO-MISALIGN-NEXT: addi a3, a0, 16
+; CHECK-NO-MISALIGN-NEXT: vle8.v v10, (a3)
; CHECK-NO-MISALIGN-NEXT: addi a0, a0, 24
; CHECK-NO-MISALIGN-NEXT: vle8.v v9, (a0)
; CHECK-NO-MISALIGN-NEXT: vle8.v v11, (a2)
@@ -186,9 +186,9 @@ define void @strided_constant_mismatch_4xv4i16(ptr %x, ptr %z) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: addi a2, a0, 6
-; CHECK-NEXT: vle16.v v10, (a2)
; CHECK-NEXT: addi a2, a0, 2
+; CHECK-NEXT: addi a3, a0, 6
+; CHECK-NEXT: vle16.v v10, (a3)
; CHECK-NEXT: addi a0, a0, 8
; CHECK-NEXT: vle16.v v9, (a0)
; CHECK-NEXT: vle16.v v11, (a2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
index 6a8d2008de74..5e64e9fbc1a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
@@ -480,14 +480,14 @@ define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x
; CHECK-NEXT: addi a5, a2, -16
; CHECK-NEXT: sltu a2, a2, a5
; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a5
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v9, 2
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: and a2, a2, a5
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vlse64.v v16, (a4), a1, v0.t
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr %ptr, i32 %stride, <32 x i1> %m, i32 %evl)
@@ -555,13 +555,13 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
; CHECK-RV32-NEXT: li a4, 16
; CHECK-RV32-NEXT: .LBB42_6:
; CHECK-RV32-NEXT: mul a5, a4, a2
-; CHECK-RV32-NEXT: add a5, a1, a5
; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2
+; CHECK-RV32-NEXT: add a5, a1, a5
; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v24, (a5), a2, v0.t
-; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v8
+; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a1), a2, v0.t
; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-RV32-NEXT: vse64.v v8, (a0)
@@ -605,13 +605,13 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
; CHECK-RV64-NEXT: li a3, 16
; CHECK-RV64-NEXT: .LBB42_6:
; CHECK-RV64-NEXT: mul a5, a3, a2
-; CHECK-RV64-NEXT: add a5, a1, a5
; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2
+; CHECK-RV64-NEXT: add a5, a1, a5
; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v24, (a5), a2, v0.t
-; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v8
+; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a1), a2, v0.t
; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-RV64-NEXT: vse64.v v8, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
index dee422a4c17d..35f123f1157f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
@@ -420,9 +420,9 @@ define void @strided_store_v32f64(<32 x double> %v, ptr %ptr, i32 signext %strid
; CHECK-NEXT: addi a3, a2, -16
; CHECK-NEXT: sltu a2, a2, a3
; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: and a2, a2, a3
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vsse64.v v16, (a0), a1, v0.t
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
index 9fa8ab39723f..7513d31b54bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
@@ -54,8 +54,8 @@ define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zero
; CHECK-LABEL: vtrunc_v128i7_v128i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: li a1, 64
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v12, v0, 8
; CHECK-NEXT: mv a2, a0
; CHECK-NEXT: bltu a0, a1, .LBB4_2
@@ -68,8 +68,8 @@ define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zero
; CHECK-NEXT: sltu a0, a0, a2
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t
; CHECK-NEXT: li a0, 128
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
@@ -243,75 +243,67 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v5, v0, 8
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v26, v0, 4
+; CHECK-NEXT: vslidedown.vi v25, v0, 8
; CHECK-NEXT: addi a2, a1, 512
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v8, (a2)
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: li a3, 48
-; CHECK-NEXT: mul a2, a2, a3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v27, v5, 4
-; CHECK-NEXT: addi a2, a1, 640
+; CHECK-NEXT: vslidedown.vi v27, v25, 4
+; CHECK-NEXT: addi a3, a1, 640
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a2)
+; CHECK-NEXT: vle64.v v8, (a3)
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: addi a2, a7, -64
-; CHECK-NEXT: sltu a3, a7, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a4, a3, a2
-; CHECK-NEXT: addi a2, a4, -32
-; CHECK-NEXT: sltu a3, a4, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a2
-; CHECK-NEXT: addi a2, a3, -16
-; CHECK-NEXT: sltu a5, a3, a2
-; CHECK-NEXT: addi a5, a5, -1
-; CHECK-NEXT: and a2, a5, a2
; CHECK-NEXT: vslidedown.vi v0, v27, 2
-; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
-; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: li a5, 24
-; CHECK-NEXT: mul a2, a2, a5
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: addi a3, a7, -64
+; CHECK-NEXT: sltu a4, a7, a3
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a4, a4, a3
+; CHECK-NEXT: addi a3, a4, -32
+; CHECK-NEXT: sltu a5, a4, a3
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a3, a5, a3
+; CHECK-NEXT: addi a5, a3, -16
+; CHECK-NEXT: sltu a6, a3, a5
+; CHECK-NEXT: addi a6, a6, -1
+; CHECK-NEXT: and a5, a6, a5
+; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
+; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: li a6, 24
+; CHECK-NEXT: mul a5, a5, a6
+; CHECK-NEXT: add a5, sp, a5
+; CHECK-NEXT: addi a5, a5, 16
+; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a2)
; CHECK-NEXT: addi a5, a1, 128
+; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v26, v7, 4
; CHECK-NEXT: bltu a3, a2, .LBB16_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: .LBB16_2:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v28, v26, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v8, (a5)
-; CHECK-NEXT: addi a5, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma
-; CHECK-NEXT: li a3, 64
-; CHECK-NEXT: vmv1r.v v0, v27
+; CHECK-NEXT: vle64.v v16, (a5)
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: li a6, 48
; CHECK-NEXT: mul a5, a5, a6
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
+; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v28, v26, 2
+; CHECK-NEXT: li a5, 64
+; CHECK-NEXT: vmv1r.v v0, v27
+; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
-; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: li a6, 56
-; CHECK-NEXT: mul a5, a5, a6
-; CHECK-NEXT: add a5, sp, a5
-; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; CHECK-NEXT: mul a3, a3, a6
+; CHECK-NEXT: add a3, sp, a3
+; CHECK-NEXT: addi a3, a3, 16
+; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: mv a6, a7
-; CHECK-NEXT: bltu a7, a3, .LBB16_4
+; CHECK-NEXT: bltu a7, a5, .LBB16_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a6, 64
; CHECK-NEXT: .LBB16_4:
@@ -332,10 +324,14 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: sltu t1, a6, t0
; CHECK-NEXT: addi t1, t1, -1
; CHECK-NEXT: and t0, t1, t0
-; CHECK-NEXT: vsetvli zero, t0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v28
-; CHECK-NEXT: addi t0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (t0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr t1, vlenb
+; CHECK-NEXT: li t2, 48
+; CHECK-NEXT: mul t1, t1, t2
+; CHECK-NEXT: add t1, sp, t1
+; CHECK-NEXT: addi t1, t1, 16
+; CHECK-NEXT: vl8r.v v16, (t1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, t0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
; CHECK-NEXT: csrr t0, vlenb
; CHECK-NEXT: slli t0, t0, 4
@@ -346,19 +342,21 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: li a6, 16
; CHECK-NEXT: .LBB16_6:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v20, v5, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a5)
+; CHECK-NEXT: addi a5, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a1, 256
-; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v27, v25, 2
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: slli a5, a5, 3
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
-; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
+; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
+; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: li a6, 48
; CHECK-NEXT: mul a5, a5, a6
@@ -371,13 +369,20 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: li a5, 32
; CHECK-NEXT: .LBB16_8:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v24, (a1)
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a5, -16
; CHECK-NEXT: sltu a5, a5, a1
; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: and a1, a5, a1
+; CHECK-NEXT: vmv1r.v v0, v27
+; CHECK-NEXT: addi a5, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
; CHECK-NEXT: bltu a4, a2, .LBB16_10
; CHECK-NEXT: # %bb.9:
@@ -385,8 +390,13 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: .LBB16_10:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v6, v7, 2
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v5
; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: mv a1, a7
@@ -401,13 +411,13 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: addi a4, a4, 16
; CHECK-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: li a5, 56
; CHECK-NEXT: mul a4, a4, a5
; CHECK-NEXT: add a4, sp, a4
; CHECK-NEXT: addi a4, a4, 16
; CHECK-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vslideup.vi v8, v24, 16
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: li a5, 56
@@ -446,19 +456,18 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: sltu a1, a1, a4
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a4
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 5
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 5
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
; CHECK-NEXT: bltu a7, a2, .LBB16_14
; CHECK-NEXT: # %bb.13:
; CHECK-NEXT: li a7, 16
; CHECK-NEXT: .LBB16_14:
-; CHECK-NEXT: vsetvli zero, a7, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a2, 40
@@ -466,6 +475,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a7, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vslideup.vi v16, v8, 16
@@ -509,8 +519,8 @@ define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext
; CHECK-LABEL: vtrunc_v32i32_v32i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v12, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB17_2
@@ -523,8 +533,8 @@ define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll
index 698c48bc5565..e28d55f46abc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll
@@ -390,8 +390,8 @@ declare <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64>, <32 x i1>, i32)
define <32 x double> @vuitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vuitofp_v32f64_v32i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB25_2
@@ -404,8 +404,8 @@ define <32 x double> @vuitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 ze
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index 36c36a13964c..805b548b0cd1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -227,7 +227,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV64-SLOW-NEXT: andi a0, a0, 2
; RV64-SLOW-NEXT: beqz a0, .LBB5_2
; RV64-SLOW-NEXT: .LBB5_4: # %cond.load1
-; RV64-SLOW-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-SLOW-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-SLOW-NEXT: vslidedown.vi v8, v8, 1
; RV64-SLOW-NEXT: vmv.x.s a0, v8
; RV64-SLOW-NEXT: lwu a1, 4(a0)
@@ -235,7 +235,6 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV64-SLOW-NEXT: slli a1, a1, 32
; RV64-SLOW-NEXT: or a0, a1, a0
; RV64-SLOW-NEXT: vmv.s.x v8, a0
-; RV64-SLOW-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-SLOW-NEXT: vslideup.vi v9, v8, 1
; RV64-SLOW-NEXT: vmv1r.v v8, v9
; RV64-SLOW-NEXT: ret
@@ -612,7 +611,7 @@ define void @masked_store_v2i32_align2(<2 x i32> %val, ptr %a, <2 x i32> %m) nou
; SLOW-NEXT: andi a1, a1, 2
; SLOW-NEXT: beqz a1, .LBB9_2
; SLOW-NEXT: .LBB9_4: # %cond.store1
-; SLOW-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; SLOW-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; SLOW-NEXT: vslidedown.vi v8, v8, 1
; SLOW-NEXT: vmv.x.s a1, v8
; SLOW-NEXT: sh a1, 4(a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
index 600290a62515..ea7f6beb22a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
@@ -38,9 +38,9 @@ define <8 x i8> @vaaddu_vx_v8i8_floor(<8 x i8> %x, i8 %y) {
define <8 x i8> @vaaddu_vv_v8i8_floor_sexti16(<8 x i8> %x, <8 x i8> %y) {
; CHECK-LABEL: vaaddu_vv_v8i8_floor_sexti16:
; CHECK: # %bb.0:
+; CHECK-NEXT: csrwi vxrm, 2
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vwadd.vv v10, v8, v9
-; CHECK-NEXT: vnsrl.wi v8, v10, 1
+; CHECK-NEXT: vaadd.vv v8, v8, v9
; CHECK-NEXT: ret
%xzv = sext <8 x i8> %x to <8 x i16>
%yzv = sext <8 x i8> %y to <8 x i16>
@@ -248,12 +248,9 @@ define <8 x i8> @vaaddu_vx_v8i8_ceil(<8 x i8> %x, i8 %y) {
define <8 x i8> @vaaddu_vv_v8i8_ceil_sexti16(<8 x i8> %x, <8 x i8> %y) {
; CHECK-LABEL: vaaddu_vv_v8i8_ceil_sexti16:
; CHECK: # %bb.0:
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vwadd.vv v10, v8, v9
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vadd.vi v8, v10, 1
-; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vnsrl.wi v8, v8, 1
+; CHECK-NEXT: vaadd.vv v8, v8, v9
; CHECK-NEXT: ret
%xzv = sext <8 x i8> %x to <8 x i16>
%yzv = sext <8 x i8> %y to <8 x i16>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index 2c62cbd583d0..5601bd5ee7a3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -377,8 +377,8 @@ define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %ev
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
@@ -416,8 +416,8 @@ define <256 x i8> @vadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
@@ -1348,8 +1348,8 @@ declare <32 x i64> @llvm.vp.add.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vadd_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB108_2
@@ -1365,15 +1365,15 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB108_2
@@ -1386,8 +1386,8 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
@@ -1468,8 +1468,8 @@ define <32 x i64> @vadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1479,8 +1479,8 @@ define <32 x i64> @vadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
index 507cf5cc6b80..d414be76672a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
@@ -1140,15 +1140,16 @@ define <11 x i64> @vand_vx_v11i64(<11 x i64> %va, i64 %b, <11 x i1> %m, i32 zero
; RV32-LABEL: vand_vx_v11i64:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v16, v0
-; RV32-NEXT: li a3, 32
-; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV32-NEXT: lui a3, 341
; RV32-NEXT: addi a3, a3, 1365
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vmv.s.x v0, a3
+; RV32-NEXT: li a3, 32
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v24, a1
; RV32-NEXT: vmerge.vxm v24, v24, a0, v0
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v16
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
;
@@ -1167,10 +1168,11 @@ define <11 x i64> @vand_vx_v11i64_unmasked(<11 x i64> %va, i64 %b, i32 zeroext %
; RV32-LABEL: vand_vx_v11i64_unmasked:
; RV32: # %bb.0:
; RV32-NEXT: li a3, 32
+; RV32-NEXT: lui a4, 341
+; RV32-NEXT: addi a4, a4, 1365
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vmv.s.x v0, a4
; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; RV32-NEXT: lui a3, 341
-; RV32-NEXT: addi a3, a3, 1365
-; RV32-NEXT: vmv.s.x v0, a3
; RV32-NEXT: vmv.v.x v16, a1
; RV32-NEXT: vmerge.vxm v16, v16, a0, v0
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
index 01b07b4081e6..77a095303675 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
@@ -298,37 +298,46 @@ define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
-; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v16, v16, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
index f32e2bbf3794..ae3dce497c6d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
@@ -379,8 +379,8 @@ declare <32 x double> @llvm.vp.fabs.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vfabs_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfabs_vv_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
@@ -393,8 +393,8 @@ define <32 x double> @vfabs_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.fabs.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
index 0574773fb2fd..e2e48cee3eac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
@@ -862,51 +862,51 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a2)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vle64.v v24, (a0)
-; CHECK-NEXT: li a1, 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: li a1, 16
; CHECK-NEXT: mv a0, a4
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: bltu a4, a1, .LBB50_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a4, -16
; CHECK-NEXT: sltu a1, a4, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul a0, a0, a1
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a1, a1, a2
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -954,25 +954,25 @@ define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB51_2:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v0, v8, v24
; CHECK-NEXT: addi a0, a4, -16
; CHECK-NEXT: sltu a1, a4, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v24, v16, v8
; CHECK-NEXT: vmv8r.v v8, v0
; CHECK-NEXT: vmv.v.v v16, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
index ffa88e28d7dc..c83a298cb501 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
@@ -390,37 +390,46 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
-; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmax.vv v16, v16, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
index 17f851e172f8..60dbededb90a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
@@ -390,37 +390,46 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
-; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmin.vv v16, v16, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
index 288efb0f1fc2..6c695b43d271 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
@@ -626,51 +626,51 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a2)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vle64.v v24, (a0)
-; CHECK-NEXT: li a1, 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: li a1, 16
; CHECK-NEXT: mv a0, a4
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: bltu a4, a1, .LBB50_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a4, -16
; CHECK-NEXT: sltu a1, a4, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul a0, a0, a1
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a1, a1, a2
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -718,25 +718,25 @@ define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB51_2:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v0, v8, v24
; CHECK-NEXT: addi a0, a4, -16
; CHECK-NEXT: sltu a1, a4, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v24, v16, v8
; CHECK-NEXT: vmv8r.v v8, v0
; CHECK-NEXT: vmv.v.v v16, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
index c36ec25c04f9..fbc4c56a9113 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
@@ -379,8 +379,8 @@ declare <32 x double> @llvm.vp.fneg.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vfneg_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfneg_vv_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
@@ -393,8 +393,8 @@ define <32 x double> @vfneg_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfneg.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
index 6004eb4fe217..988b200ae536 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
@@ -379,8 +379,8 @@ declare <32 x double> @llvm.vp.sqrt.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vfsqrt_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsqrt_vv_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
@@ -393,8 +393,8 @@ define <32 x double> @vfsqrt_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zero
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsqrt.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.sqrt.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
index dd3a50cfd773..05c7bd990642 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
@@ -105,13 +105,12 @@ define <64 x float> @vfwadd_v64f16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwadd.vv v8, v16, v24
+; CHECK-NEXT: vfwadd.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwadd.vv v8, v16, v0
@@ -216,13 +215,12 @@ define <32 x double> @vfwadd_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwadd.vv v8, v16, v24
+; CHECK-NEXT: vfwadd.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwadd.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
index 7eaa1856ce22..5a57801d33b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
@@ -105,13 +105,12 @@ define <64 x float> @vfwmul_v64f16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwmul.vv v8, v16, v24
+; CHECK-NEXT: vfwmul.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwmul.vv v8, v16, v0
@@ -216,13 +215,12 @@ define <32 x double> @vfwmul_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwmul.vv v8, v16, v24
+; CHECK-NEXT: vfwmul.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwmul.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
index 8cf7c5f17586..2c706cad9742 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
@@ -105,13 +105,12 @@ define <64 x float> @vfwsub_v64f16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwsub.vv v8, v16, v24
+; CHECK-NEXT: vfwsub.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwsub.vv v8, v16, v0
@@ -216,13 +215,12 @@ define <32 x double> @vfwsub_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwsub.vv v8, v16, v24
+; CHECK-NEXT: vfwsub.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwsub.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
index 3db44e87109b..9789afda9344 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
@@ -282,8 +282,8 @@ define <256 x i8> @vmax_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zero
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -325,8 +325,8 @@ define <256 x i8> @vmax_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -1021,8 +1021,8 @@ declare <32 x i64> @llvm.vp.smax.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vmax_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
@@ -1038,15 +1038,15 @@ define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmax.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB74_2
@@ -1060,8 +1060,8 @@ define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmax.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.smax.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
index c97c2232715f..36b0a4642b61 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
@@ -281,8 +281,8 @@ define <256 x i8> @vmaxu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zer
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -324,8 +324,8 @@ define <256 x i8> @vmaxu_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m)
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -1020,8 +1020,8 @@ declare <32 x i64> @llvm.vp.umax.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vmaxu_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
@@ -1037,15 +1037,15 @@ define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmaxu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vmaxu_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB74_2
@@ -1059,8 +1059,8 @@ define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmaxu.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.umax.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
index eaa19110a2a2..adb0a30f34d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
@@ -282,8 +282,8 @@ define <256 x i8> @vmin_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zero
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -325,8 +325,8 @@ define <256 x i8> @vmin_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -1021,8 +1021,8 @@ declare <32 x i64> @llvm.vp.smin.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vmin_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
@@ -1038,15 +1038,15 @@ define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmin.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB74_2
@@ -1060,8 +1060,8 @@ define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmin.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.smin.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
index 48175e5b905b..671ce82d4ae7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
@@ -281,8 +281,8 @@ define <256 x i8> @vminu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zer
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -324,8 +324,8 @@ define <256 x i8> @vminu_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m)
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -1020,8 +1020,8 @@ declare <32 x i64> @llvm.vp.umin.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vminu_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
@@ -1037,15 +1037,15 @@ define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vminu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vminu_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB74_2
@@ -1059,8 +1059,8 @@ define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vminu.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.umin.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index a13f1eed8efb..028fb9a626f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -297,10 +297,10 @@ define <32 x i8> @vpgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %
; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: li a0, 32
@@ -1882,10 +1882,10 @@ define <32 x double> @vpgather_v32f64(<32 x ptr> %ptrs, <32 x i1> %m, i32 zeroex
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 16
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: vmv8r.v v8, v24
@@ -1904,9 +1904,9 @@ define <32 x double> @vpgather_v32f64(<32 x ptr> %ptrs, <32 x i1> %m, i32 zeroex
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
-; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (zero), v16, v0.t
; RV64-NEXT: ret
@@ -1933,10 +1933,10 @@ define <32 x double> @vpgather_baseidx_v32i8_v32f64(ptr %base, <32 x i8> %idxs,
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -1961,9 +1961,9 @@ define <32 x double> @vpgather_baseidx_v32i8_v32f64(ptr %base, <32 x i8> %idxs,
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -1991,10 +1991,10 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(ptr %base, <32 x i8> %i
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2020,9 +2020,9 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(ptr %base, <32 x i8> %i
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2051,10 +2051,10 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(ptr %base, <32 x i8> %i
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e16, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e16, m4, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei16.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2077,10 +2077,10 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(ptr %base, <32 x i8> %i
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
; RV64-NEXT: and a1, a1, a2
-; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma
-; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei16.v v16, (a0), v24, v0.t
; RV64-NEXT: ret
@@ -2109,10 +2109,10 @@ define <32 x double> @vpgather_baseidx_v32i16_v32f64(ptr %base, <32 x i16> %idxs
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2137,9 +2137,9 @@ define <32 x double> @vpgather_baseidx_v32i16_v32f64(ptr %base, <32 x i16> %idxs
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2167,10 +2167,10 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(ptr %base, <32 x i16>
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2196,9 +2196,9 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(ptr %base, <32 x i16>
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2227,10 +2227,10 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(ptr %base, <32 x i16>
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2253,10 +2253,10 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(ptr %base, <32 x i16>
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
; RV64-NEXT: and a1, a1, a2
-; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV64-NEXT: ret
@@ -2270,8 +2270,8 @@ define <32 x double> @vpgather_baseidx_v32i32_v32f64(ptr %base, <32 x i32> %idxs
; RV32-LABEL: vpgather_baseidx_v32i32_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: li a3, 16
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
; RV32-NEXT: mv a2, a1
; RV32-NEXT: bltu a1, a3, .LBB93_2
@@ -2284,10 +2284,10 @@ define <32 x double> @vpgather_baseidx_v32i32_v32f64(ptr %base, <32 x i32> %idxs
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2312,9 +2312,9 @@ define <32 x double> @vpgather_baseidx_v32i32_v32f64(ptr %base, <32 x i32> %idxs
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2327,8 +2327,8 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(ptr %base, <32 x i32>
; RV32-LABEL: vpgather_baseidx_sext_v32i32_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: li a3, 16
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
; RV32-NEXT: mv a2, a1
; RV32-NEXT: bltu a1, a3, .LBB94_2
@@ -2341,10 +2341,10 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(ptr %base, <32 x i32>
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2370,9 +2370,9 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(ptr %base, <32 x i32>
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2386,8 +2386,8 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
; RV32-LABEL: vpgather_baseidx_zext_v32i32_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: li a3, 16
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
; RV32-NEXT: mv a2, a1
; RV32-NEXT: bltu a1, a3, .LBB95_2
@@ -2400,10 +2400,10 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2429,9 +2429,9 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2457,9 +2457,9 @@ define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x
; RV32-NEXT: addi a2, a1, -16
; RV32-NEXT: sltu a3, a1, a2
; RV32-NEXT: addi a3, a3, -1
-; RV32-NEXT: and a2, a3, a2
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: and a2, a3, a2
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: li a2, 16
@@ -2467,8 +2467,8 @@ define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB96_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
;
@@ -2488,9 +2488,9 @@ define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
index 9ef89352e65e..f204d812c14f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
@@ -377,9 +377,9 @@ define <32 x double> @vpload_v32f64(ptr %ptr, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: sltu a1, a1, a2
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a2
-; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a0), v0.t
; CHECK-NEXT: ret
@@ -405,9 +405,9 @@ define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: sltu a3, a3, a4
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a3, a3, a4
-; CHECK-NEXT: addi a4, a1, 128
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 2
+; CHECK-NEXT: addi a4, a1, 128
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a4), v0.t
; CHECK-NEXT: addi a3, a2, -32
@@ -419,17 +419,17 @@ define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: .LBB32_4:
-; CHECK-NEXT: addi a5, a1, 256
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 4
+; CHECK-NEXT: addi a5, a1, 256
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a5), v0.t
; CHECK-NEXT: bltu a2, a3, .LBB32_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB32_6:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a1), v0.t
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vse64.v v8, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
index 466448a7a05a..9f0561b394b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
@@ -1193,17 +1193,17 @@ define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <3
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
-; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: csrr a0, vlenb
@@ -1229,9 +1229,9 @@ define <32 x double> @vpmerge_vf_v32f64(double %a, <32 x double> %vb, <32 x i1>
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
; CHECK-NEXT: vfmerge.vfm v16, v16, fa0, v0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
index cd9a38d5167d..0c180cd148b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
@@ -1685,10 +1685,10 @@ define void @vpscatter_v32f64(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m
; RV32-NEXT: sltu a1, a1, a0
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a0, a1, a0
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: ret
@@ -1718,12 +1718,12 @@ define void @vpscatter_v32f64(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m
; RV64-NEXT: addi a0, a2, -16
; RV64-NEXT: sltu a1, a2, a0
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a0, a1, a0
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a0, a1, a0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
@@ -1753,10 +1753,10 @@ define void @vpscatter_baseidx_v32i32_v32f64(<32 x double> %val, ptr %base, <32
; RV32-NEXT: sltu a2, a2, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a1, a2, a1
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: ret
@@ -1766,51 +1766,44 @@ define void @vpscatter_baseidx_v32i32_v32f64(<32 x double> %val, ptr %base, <32
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: li a4, 10
-; RV64-NEXT: mul a3, a3, a4
+; RV64-NEXT: slli a3, a3, 3
; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x0a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 10 * vlenb
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; RV64-NEXT: li a3, 32
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV64-NEXT: vle32.v v24, (a1)
+; RV64-NEXT: vmv1r.v v7, v0
; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV64-NEXT: vslidedown.vi v0, v24, 16
+; RV64-NEXT: vslidedown.vi v16, v24, 16
+; RV64-NEXT: vmv4r.v v0, v24
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v0
-; RV64-NEXT: vsll.vi v16, v16, 3
-; RV64-NEXT: vsext.vf2 v0, v24
+; RV64-NEXT: vsext.vf2 v24, v16
+; RV64-NEXT: vsll.vi v16, v24, 3
+; RV64-NEXT: vsext.vf2 v24, v0
; RV64-NEXT: li a3, 16
-; RV64-NEXT: vsll.vi v24, v0, 3
+; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: mv a1, a2
; RV64-NEXT: bltu a2, a3, .LBB80_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB80_2:
+; RV64-NEXT: vmv1r.v v0, v7
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: addi a1, a2, -16
; RV64-NEXT: sltu a2, a2, a1
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a1, a2, a1
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: vslidedown.vi v0, v7, 2
+; RV64-NEXT: and a1, a2, a1
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: li a1, 10
-; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -1838,10 +1831,10 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV32-NEXT: sltu a2, a2, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a1, a2, a1
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: ret
@@ -1878,21 +1871,21 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB81_2:
+; RV64-NEXT: addi a3, sp, 16
+; RV64-NEXT: vl1r.v v0, (a3) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: addi a1, a2, -16
; RV64-NEXT: sltu a2, a2, a1
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a1, a2, a1
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a2, a1
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 10
@@ -1925,10 +1918,10 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV32-NEXT: sltu a2, a2, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a1, a2, a1
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: ret
@@ -1965,21 +1958,21 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB82_2:
+; RV64-NEXT: addi a3, sp, 16
+; RV64-NEXT: vl1r.v v0, (a3) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: addi a1, a2, -16
; RV64-NEXT: sltu a2, a2, a1
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a1, a2, a1
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a2, a1
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 10
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
index c0aa735614b2..f396790f4f17 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
@@ -295,9 +295,9 @@ define void @vpstore_v32f64(<32 x double> %val, ptr %ptr, <32 x i1> %m, i32 zero
; CHECK-NEXT: sltu a1, a1, a2
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a2
-; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vse64.v v16, (a0), v0.t
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
index 291629de6dcf..df2c83028e5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
@@ -386,8 +386,8 @@ define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
@@ -425,8 +425,8 @@ define <256 x i8> @vsadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
@@ -442,8 +442,8 @@ define <256 x i8> @vsadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
@@ -1361,8 +1361,8 @@ declare <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i
define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsadd_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB108_2
@@ -1378,15 +1378,15 @@ define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsadd_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB108_2
@@ -1399,8 +1399,8 @@ define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
@@ -1462,8 +1462,8 @@ define <32 x i64> @vsadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1473,8 +1473,8 @@ define <32 x i64> @vsadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV64-NEXT: vsadd.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vsadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
@@ -1491,8 +1491,8 @@ define <32 x i64> @vsadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1502,8 +1502,8 @@ define <32 x i64> @vsadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vsadd.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vsadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
index d38ee1148e89..f50dadf01991 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
@@ -382,8 +382,8 @@ define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
@@ -421,8 +421,8 @@ define <256 x i8> @vsaddu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
@@ -438,8 +438,8 @@ define <256 x i8> @vsaddu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
@@ -1357,8 +1357,8 @@ declare <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i
define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsaddu_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB108_2
@@ -1374,15 +1374,15 @@ define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsaddu_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB108_2
@@ -1395,8 +1395,8 @@ define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsaddu.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
@@ -1458,8 +1458,8 @@ define <32 x i64> @vsaddu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1469,8 +1469,8 @@ define <32 x i64> @vsaddu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV64-NEXT: vsaddu.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vsaddu.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
@@ -1487,8 +1487,8 @@ define <32 x i64> @vsaddu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1498,8 +1498,8 @@ define <32 x i64> @vsaddu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vsaddu.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vsaddu.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
index 12d96fbfb88d..4f533f2055bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
@@ -24,17 +24,17 @@ define <512 x i8> @vadd_v512i8_zvl128(<512 x i8> %a, <512 x i8> %b) #0 {
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: addi a4, a3, 128
+; CHECK-NEXT: addi a5, a3, 384
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; CHECK-NEXT: addi a2, a3, 128
-; CHECK-NEXT: addi a4, a3, 384
-; CHECK-NEXT: vle8.v v8, (a4)
-; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: vle8.v v8, (a5)
+; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: li a5, 24
-; CHECK-NEXT: mul a4, a4, a5
-; CHECK-NEXT: add a4, sp, a4
-; CHECK-NEXT: addi a4, a4, 16
-; CHECK-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
-; CHECK-NEXT: addi a4, a1, 128
+; CHECK-NEXT: mul a2, a2, a5
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: addi a2, a1, 128
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
@@ -48,10 +48,10 @@ define <512 x i8> @vadd_v512i8_zvl128(<512 x i8> %a, <512 x i8> %b) #0 {
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vle8.v v8, (a4)
+; CHECK-NEXT: vle8.v v8, (a2)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vle8.v v24, (a2)
+; CHECK-NEXT: vle8.v v24, (a4)
; CHECK-NEXT: vle8.v v0, (a3)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
index d05f580ea7d2..0a2ed3eb1ffb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
@@ -175,19 +175,18 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3
; CHECK-NEXT: vle8.v v16, (a0)
; CHECK-NEXT: addi a0, a3, -128
; CHECK-NEXT: sltu a4, a3, a0
-; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: vle8.v v0, (a1)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v0, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a0, a4, a0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v24, v16, v24, v0
; CHECK-NEXT: bltu a3, a2, .LBB11_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: .LBB11_2:
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -196,6 +195,7 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vmv8r.v v16, v24
; CHECK-NEXT: csrr a0, vlenb
@@ -221,39 +221,39 @@ define <256 x i8> @select_evl_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a1, 128
; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vle8.v v24, (a1)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vle8.v v16, (a1)
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmerge.vvm v24, v24, v16, v0
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v24, v8, v24, v0
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
; CHECK-NEXT: vmv8r.v v16, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
@@ -437,12 +437,12 @@ define <32 x i64> @select_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
-; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -456,15 +456,41 @@ define <32 x i64> @select_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32
define <32 x i64> @select_evl_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c) {
; CHECK-LABEL: select_evl_v32i64:
; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
-; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
-; CHECK-NEXT: addi a0, a0, 128
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vle64.v v24, (a1)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
+; CHECK-NEXT: vmv1r.v v0, v7
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 17)
ret <32 x i64> %v
@@ -594,12 +620,12 @@ define <64 x float> @select_v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %
; CHECK-NEXT: addi a0, a2, -32
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
-; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 4
+; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
index 2caa2ff41a7d..b82ca70477ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
@@ -399,8 +399,8 @@ define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
@@ -440,8 +440,8 @@ define <256 x i8> @vssub_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
@@ -458,8 +458,8 @@ define <256 x i8> @vssub_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
@@ -1401,8 +1401,8 @@ declare <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i
define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vssub_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB108_2
@@ -1418,15 +1418,15 @@ define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vssub_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB108_2
@@ -1440,8 +1440,8 @@ define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vssub.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
@@ -1504,8 +1504,8 @@ define <32 x i64> @vssub_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1516,8 +1516,8 @@ define <32 x i64> @vssub_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: li a0, -1
; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vssub.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
@@ -1534,8 +1534,8 @@ define <32 x i64> @vssub_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1546,8 +1546,8 @@ define <32 x i64> @vssub_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: li a0, -1
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vssub.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
index 6313f31bc1a6..6d8ed563f02b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
@@ -394,8 +394,8 @@ define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
@@ -435,8 +435,8 @@ define <256 x i8> @vssubu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
@@ -453,8 +453,8 @@ define <256 x i8> @vssubu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
@@ -1396,8 +1396,8 @@ declare <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i
define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vssubu_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB108_2
@@ -1413,15 +1413,15 @@ define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vssubu_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB108_2
@@ -1435,8 +1435,8 @@ define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vssubu.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
@@ -1499,8 +1499,8 @@ define <32 x i64> @vssubu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1511,8 +1511,8 @@ define <32 x i64> @vssubu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: li a0, -1
; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vssubu.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
@@ -1529,8 +1529,8 @@ define <32 x i64> @vssubu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1541,8 +1541,8 @@ define <32 x i64> @vssubu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: li a0, -1
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vssubu.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
index b1726be941e3..d6ca6c5a4b83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
@@ -263,13 +263,12 @@ define <128 x i16> @vwadd_v128i16(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwadd.vv v8, v16, v24
+; CHECK-NEXT: vwadd.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwadd.vv v8, v16, v0
@@ -309,13 +308,12 @@ define <64 x i32> @vwadd_v64i32(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwadd.vv v8, v16, v24
+; CHECK-NEXT: vwadd.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwadd.vv v8, v16, v0
@@ -354,13 +352,12 @@ define <32 x i64> @vwadd_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwadd.vv v8, v16, v24
+; CHECK-NEXT: vwadd.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwadd.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
index f6d9695c5149..61378a424ecb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
@@ -263,13 +263,12 @@ define <128 x i16> @vwaddu_v128i16(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v16, v24
+; CHECK-NEXT: vwaddu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwaddu.vv v8, v16, v0
@@ -309,13 +308,12 @@ define <64 x i32> @vwaddu_v64i32(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v16, v24
+; CHECK-NEXT: vwaddu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwaddu.vv v8, v16, v0
@@ -354,13 +352,12 @@ define <32 x i64> @vwaddu_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v16, v24
+; CHECK-NEXT: vwaddu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwaddu.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
index c87584ab6351..93927e10e607 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
@@ -289,13 +289,12 @@ define <128 x i16> @vwmul_v128i16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmul.vv v8, v16, v24
+; CHECK-NEXT: vwmul.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmul.vv v8, v16, v0
@@ -337,13 +336,12 @@ define <64 x i32> @vwmul_v64i32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmul.vv v8, v16, v24
+; CHECK-NEXT: vwmul.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmul.vv v8, v16, v0
@@ -384,13 +382,12 @@ define <32 x i64> @vwmul_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmul.vv v8, v16, v24
+; CHECK-NEXT: vwmul.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmul.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
index a56984577ea7..ee114350a432 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
@@ -281,13 +281,12 @@ define <128 x i16> @vwmulsu_v128i16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulsu.vv v8, v24, v16
+; CHECK-NEXT: vwmulsu.vv v24, v8, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulsu.vv v8, v0, v16
@@ -329,13 +328,12 @@ define <64 x i32> @vwmulsu_v64i32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulsu.vv v8, v24, v16
+; CHECK-NEXT: vwmulsu.vv v24, v8, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulsu.vv v8, v0, v16
@@ -376,13 +374,12 @@ define <32 x i64> @vwmulsu_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulsu.vv v8, v24, v16
+; CHECK-NEXT: vwmulsu.vv v24, v8, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulsu.vv v8, v0, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index b97c9654ad3c..17a76ae5e7f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -265,13 +265,12 @@ define <128 x i16> @vwmulu_v128i16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulu.vv v8, v16, v24
+; CHECK-NEXT: vwmulu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulu.vv v8, v16, v0
@@ -313,13 +312,12 @@ define <64 x i32> @vwmulu_v64i32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulu.vv v8, v16, v24
+; CHECK-NEXT: vwmulu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulu.vv v8, v16, v0
@@ -360,13 +358,12 @@ define <32 x i64> @vwmulu_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulu.vv v8, v16, v24
+; CHECK-NEXT: vwmulu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulu.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
index 2782a5fbb1ea..a2675d59ade9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
@@ -263,13 +263,12 @@ define <128 x i16> @vwsub_v128i16(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsub.vv v8, v16, v24
+; CHECK-NEXT: vwsub.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsub.vv v8, v16, v0
@@ -309,13 +308,12 @@ define <64 x i32> @vwsub_v64i32(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsub.vv v8, v16, v24
+; CHECK-NEXT: vwsub.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsub.vv v8, v16, v0
@@ -354,13 +352,12 @@ define <32 x i64> @vwsub_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsub.vv v8, v16, v24
+; CHECK-NEXT: vwsub.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsub.vv v8, v16, v0
@@ -715,8 +712,8 @@ define <8 x i16> @vwsub_vx_v8i16_i16(ptr %x, ptr %y) {
define <4 x i32> @vwsub_vx_v4i32_i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwsub_vx_v4i32_i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: lb a1, 0(a1)
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v9, (a0)
; CHECK-NEXT: vmv.v.x v10, a1
; CHECK-NEXT: vwsub.vv v8, v10, v9
@@ -779,8 +776,8 @@ define <2 x i64> @vwsub_vx_v2i64_i8(ptr %x, ptr %y) nounwind {
;
; RV64-LABEL: vwsub_vx_v2i64_i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: lb a1, 0(a1)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vle32.v v9, (a0)
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vwsub.vv v8, v10, v9
@@ -808,8 +805,8 @@ define <2 x i64> @vwsub_vx_v2i64_i16(ptr %x, ptr %y) nounwind {
;
; RV64-LABEL: vwsub_vx_v2i64_i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: lh a1, 0(a1)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vle32.v v9, (a0)
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vwsub.vv v8, v10, v9
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
index ccbc26c84d80..1a9e3aac0034 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
@@ -263,13 +263,12 @@ define <128 x i16> @vwsubu_v128i16(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsubu.vv v8, v16, v24
+; CHECK-NEXT: vwsubu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsubu.vv v8, v16, v0
@@ -309,13 +308,12 @@ define <64 x i32> @vwsubu_v64i32(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsubu.vv v8, v16, v24
+; CHECK-NEXT: vwsubu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsubu.vv v8, v16, v0
@@ -354,13 +352,12 @@ define <32 x i64> @vwsubu_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsubu.vv v8, v16, v24
+; CHECK-NEXT: vwsubu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsubu.vv v8, v16, v0
@@ -715,8 +712,8 @@ define <8 x i16> @vwsubu_vx_v8i16_i16(ptr %x, ptr %y) {
define <4 x i32> @vwsubu_vx_v4i32_i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwsubu_vx_v4i32_i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: lbu a1, 0(a1)
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v9, (a0)
; CHECK-NEXT: vmv.v.x v10, a1
; CHECK-NEXT: vwsubu.vv v8, v10, v9
@@ -783,8 +780,8 @@ define <2 x i64> @vwsubu_vx_v2i64_i8(ptr %x, ptr %y) nounwind {
;
; RV64-LABEL: vwsubu_vx_v2i64_i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: lbu a1, 0(a1)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vle32.v v9, (a0)
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vwsubu.vv v8, v10, v9
@@ -816,8 +813,8 @@ define <2 x i64> @vwsubu_vx_v2i64_i16(ptr %x, ptr %y) nounwind {
;
; RV64-LABEL: vwsubu_vx_v2i64_i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: lhu a1, 0(a1)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vle32.v v9, (a0)
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vwsubu.vv v8, v10, v9
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll
index f4d679cd57ca..df90dae379c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll
@@ -151,8 +151,8 @@ declare <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32>, <32 x i1>, i32)
define <32 x i64> @vzext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vzext_v32i64_v32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB12_2
@@ -167,8 +167,8 @@ define <32 x i64> @vzext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 16
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vzext.vf2 v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index d464b491bbbe..26a3e053bf7a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -135,16 +135,16 @@ declare <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -179,16 +179,16 @@ declare <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half>, <vsca
define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI8_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -223,16 +223,16 @@ declare <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half>, <vsca
define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI10_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -359,8 +359,8 @@ define <vscale x 4 x float> @vp_floor_nxv4f32(<vscale x 4 x float> %va, <vscale
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -403,8 +403,8 @@ define <vscale x 8 x float> @vp_floor_nxv8f32(<vscale x 8 x float> %va, <vscale
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -447,8 +447,8 @@ define <vscale x 16 x float> @vp_floor_nxv16f32(<vscale x 16 x float> %va, <vsca
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -525,16 +525,16 @@ declare <vscale x 2 x double> @llvm.vp.floor.nxv2f64(<vscale x 2 x double>, <vsc
define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -569,16 +569,16 @@ declare <vscale x 4 x double> @llvm.vp.floor.nxv4f64(<vscale x 4 x double>, <vsc
define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -613,16 +613,16 @@ declare <vscale x 7 x double> @llvm.vp.floor.nxv7f64(<vscale x 7 x double>, <vsc
define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -657,16 +657,16 @@ declare <vscale x 8 x double> @llvm.vp.floor.nxv8f64(<vscale x 8 x double>, <vsc
define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -705,66 +705,56 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a2, 2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
index 386f23f68c35..05896d8ef6ff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
@@ -214,10 +214,7 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
@@ -229,38 +226,31 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v8, v0
-; ZVFHMIN-NEXT: vfmax.vv v24, v8, v16
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfmax.vv v8, v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
; ZVFHMIN-NEXT: vmfeq.vv v7, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v8, v0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v16, v0
+; ZVFHMIN-NEXT: vmerge.vvm v8, v8, v16, v0
+; ZVFHMIN-NEXT: vfmax.vv v16, v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmax.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index 02cfd3de6b4d..ab07fff59b21 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -177,8 +177,8 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v16, v12, v14, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v8, v14, v14, v0.t
@@ -253,8 +253,8 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v20, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmfeq.vv v8, v20, v20, v0.t
@@ -335,8 +335,8 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
@@ -444,13 +444,8 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x22, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 34 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 25
-; ZVFHMIN-NEXT: mul a1, a1, a2
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v24, v0
+; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -458,35 +453,36 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: li a5, 24
-; ZVFHMIN-NEXT: mul a4, a4, a5
+; ZVFHMIN-NEXT: slli a4, a4, 5
; ZVFHMIN-NEXT: add a4, sp, a4
; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
+; ZVFHMIN-NEXT: vs1r.v v24, (a4) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v8, v24, a2
; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vs8r.v v0, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: li a4, 25
+; ZVFHMIN-NEXT: li a4, 24
; ZVFHMIN-NEXT: mul a2, a2, a4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
@@ -523,45 +519,34 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 24
-; ZVFHMIN-NEXT: mul a1, a1, a2
+; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 4
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 25
+; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vmerge.vvm v24, v24, v16, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
@@ -573,18 +558,18 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v9
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmax.vv v16, v16, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 34
@@ -627,19 +612,18 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v7, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv4r.v v8, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: li a4, 24
@@ -647,11 +631,12 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
@@ -683,13 +668,13 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v3, v16, v16
@@ -1027,13 +1012,13 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a3, a3, 16
; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: li a5, 18
; CHECK-NEXT: mul a4, a4, a5
; CHECK-NEXT: add a4, sp, a4
; CHECK-NEXT: addi a4, a4, 16
; CHECK-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v7, v0, a3
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: add a3, sp, a3
@@ -1043,13 +1028,13 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: sltu a4, a2, a3
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a3, a3, 1
-; CHECK-NEXT: add a3, sp, a3
-; CHECK-NEXT: addi a3, a3, 16
-; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 1
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v26, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a3, vlenb
@@ -1112,7 +1097,6 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB28_2:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 18
; CHECK-NEXT: mul a0, a0, a1
@@ -1120,6 +1104,7 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl1r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a0, vlenb
@@ -1221,12 +1206,12 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB29_2:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v16, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
index 48baa12aa2e5..e94259392498 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
@@ -214,10 +214,7 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
@@ -229,38 +226,31 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v8, v0
-; ZVFHMIN-NEXT: vfmin.vv v24, v8, v16
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfmin.vv v8, v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
; ZVFHMIN-NEXT: vmfeq.vv v7, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v8, v0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v16, v0
+; ZVFHMIN-NEXT: vmerge.vvm v8, v8, v16, v0
+; ZVFHMIN-NEXT: vfmin.vv v16, v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmin.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 72a47ca2a605..fc5b11284dab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -177,8 +177,8 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v16, v12, v14, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v8, v14, v14, v0.t
@@ -253,8 +253,8 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v20, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmfeq.vv v8, v20, v20, v0.t
@@ -335,8 +335,8 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
@@ -444,13 +444,8 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x22, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 34 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 25
-; ZVFHMIN-NEXT: mul a1, a1, a2
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v24, v0
+; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -458,35 +453,36 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: li a5, 24
-; ZVFHMIN-NEXT: mul a4, a4, a5
+; ZVFHMIN-NEXT: slli a4, a4, 5
; ZVFHMIN-NEXT: add a4, sp, a4
; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
+; ZVFHMIN-NEXT: vs1r.v v24, (a4) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v8, v24, a2
; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vs8r.v v0, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: li a4, 25
+; ZVFHMIN-NEXT: li a4, 24
; ZVFHMIN-NEXT: mul a2, a2, a4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
@@ -523,45 +519,34 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 24
-; ZVFHMIN-NEXT: mul a1, a1, a2
+; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 4
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 25
+; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vmerge.vvm v24, v24, v16, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
@@ -573,18 +558,18 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v9
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmin.vv v16, v16, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 34
@@ -627,19 +612,18 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v7, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv4r.v v8, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: li a4, 24
@@ -647,11 +631,12 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
@@ -683,13 +668,13 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v3, v16, v16
@@ -1027,13 +1012,13 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a3, a3, 16
; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: li a5, 18
; CHECK-NEXT: mul a4, a4, a5
; CHECK-NEXT: add a4, sp, a4
; CHECK-NEXT: addi a4, a4, 16
; CHECK-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v7, v0, a3
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: add a3, sp, a3
@@ -1043,13 +1028,13 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: sltu a4, a2, a3
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a3, a3, 1
-; CHECK-NEXT: add a3, sp, a3
-; CHECK-NEXT: addi a3, a3, 16
-; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 1
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v26, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a3, vlenb
@@ -1112,7 +1097,6 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB28_2:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 18
; CHECK-NEXT: mul a0, a0, a1
@@ -1120,6 +1104,7 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl1r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a0, vlenb
@@ -1221,12 +1206,12 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB29_2:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v16, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
index c8bed2de754b..9da4d7ec9f2d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
@@ -8,23 +8,25 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV32-LABEL: test:
; RV32: # %bb.0: # %entry
; RV32-NEXT: th.lbib a3, (a1), -1, 0
-; RV32-NEXT: th.lrb a0, a1, a0, 0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: vmv.v.x v8, a3
-; RV32-NEXT: addi a1, a2, 1
+; RV32-NEXT: addi a3, a2, 1
+; RV32-NEXT: addi a4, a0, 1
; RV32-NEXT: .LBB0_1: # %for.body
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32-NEXT: th.lrb a0, a1, a0, 0
; RV32-NEXT: vmv.s.x v9, zero
; RV32-NEXT: vmv1r.v v10, v8
-; RV32-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; RV32-NEXT: vsetvli zero, a3, e8, mf2, tu, ma
; RV32-NEXT: vslideup.vx v10, v9, a2
; RV32-NEXT: vsetivli zero, 8, e8, mf2, tu, ma
; RV32-NEXT: vmv.s.x v10, a0
; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32-NEXT: vmseq.vi v9, v10, 0
-; RV32-NEXT: vmv.x.s a3, v9
-; RV32-NEXT: andi a3, a3, 255
-; RV32-NEXT: bnez a3, .LBB0_1
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: andi a5, a0, 255
+; RV32-NEXT: mv a0, a4
+; RV32-NEXT: bnez a5, .LBB0_1
; RV32-NEXT: # %bb.2: # %if.then381
; RV32-NEXT: li a0, 0
; RV32-NEXT: ret
@@ -32,24 +34,26 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV64-LABEL: test:
; RV64: # %bb.0: # %entry
; RV64-NEXT: th.lbib a3, (a1), -1, 0
-; RV64-NEXT: sext.w a0, a0
-; RV64-NEXT: th.lrb a0, a1, a0, 0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: vmv.v.x v8, a3
-; RV64-NEXT: addi a1, a2, 1
+; RV64-NEXT: addi a3, a2, 1
+; RV64-NEXT: addi a4, a0, 1
; RV64-NEXT: .LBB0_1: # %for.body
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64-NEXT: sext.w a0, a0
+; RV64-NEXT: th.lrb a0, a1, a0, 0
; RV64-NEXT: vmv.s.x v9, zero
; RV64-NEXT: vmv1r.v v10, v8
-; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
+; RV64-NEXT: vsetvli zero, a3, e8, mf2, tu, ma
; RV64-NEXT: vslideup.vx v10, v9, a2
; RV64-NEXT: vsetivli zero, 8, e8, mf2, tu, ma
; RV64-NEXT: vmv.s.x v10, a0
; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64-NEXT: vmseq.vi v9, v10, 0
-; RV64-NEXT: vmv.x.s a3, v9
-; RV64-NEXT: andi a3, a3, 255
-; RV64-NEXT: bnez a3, .LBB0_1
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: andi a5, a0, 255
+; RV64-NEXT: mv a0, a4
+; RV64-NEXT: bnez a5, .LBB0_1
; RV64-NEXT: # %bb.2: # %if.then381
; RV64-NEXT: li a0, 0
; RV64-NEXT: ret
@@ -57,8 +61,9 @@ entry:
br label %for.body
for.body: ; preds = %for.body, %entry
+ %size.actual = phi i32 [%size, %entry], [%size.inc, %for.body]
%add.ptr1 = getelementptr i8, ptr %add.ptr, i32 -1
- %add.ptr2 = getelementptr i8, ptr %add.ptr1, i32 %size
+ %add.ptr2 = getelementptr i8, ptr %add.ptr1, i32 %size.actual
%0 = load i8, ptr %add.ptr1, align 1
%1 = load i8, ptr %add.ptr2, align 1
%2 = insertelement <8 x i8> poison, i8 %0, i64 0
@@ -68,6 +73,7 @@ for.body: ; preds = %for.body, %entry
%6 = bitcast <8 x i1> %5 to i8
%7 = zext i8 %6 to i32
%cond = icmp eq i32 %7, 0
+ %size.inc = add i32 %size, 1
br i1 %cond, label %if.then381, label %for.body
if.then381: ; preds = %for.body
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index bb28ff5c6dc4..aa845bd8bb0b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -448,8 +448,8 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -467,8 +467,8 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -609,8 +609,8 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -628,8 +628,8 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -780,8 +780,8 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -799,8 +799,8 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -1397,8 +1397,8 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1422,8 +1422,8 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1440,11 +1440,11 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1454,8 +1454,8 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -1473,8 +1473,8 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -1485,18 +1485,18 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v10, v8, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclip.wi v8, v10, 0
@@ -1710,8 +1710,8 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1735,8 +1735,8 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1753,11 +1753,11 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1767,8 +1767,8 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -1786,8 +1786,8 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -1798,18 +1798,18 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v10, v8, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclipu.wi v8, v10, 0
@@ -2045,8 +2045,8 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -2070,8 +2070,8 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -2088,11 +2088,11 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -2102,8 +2102,8 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -2121,8 +2121,8 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -2133,18 +2133,18 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v10, 4
; CHECK-V-NEXT: lui a0, 16
; CHECK-V-NEXT: addi a0, a0, -1
@@ -2279,9 +2279,9 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: li a2, -1
@@ -2412,9 +2412,9 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: call __fixunsdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixunsdfti
; CHECK-V-NEXT: snez a1, a1
@@ -2524,9 +2524,9 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv a2, s1
@@ -2686,9 +2686,9 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: li a2, -1
@@ -2819,9 +2819,9 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: snez a1, a1
@@ -2931,9 +2931,9 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv a2, s1
@@ -3819,8 +3819,8 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -3838,8 +3838,8 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -3978,8 +3978,8 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -3997,8 +3997,8 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -4148,8 +4148,8 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -4167,8 +4167,8 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -4753,8 +4753,8 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -4778,8 +4778,8 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -4796,11 +4796,11 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -4810,8 +4810,8 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -4829,8 +4829,8 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -4841,18 +4841,18 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v10, v8, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclip.wi v8, v10, 0
@@ -5064,8 +5064,8 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5089,8 +5089,8 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5107,11 +5107,11 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5121,8 +5121,8 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -5140,8 +5140,8 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -5152,18 +5152,18 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v10, v8, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclipu.wi v8, v10, 0
@@ -5398,8 +5398,8 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5423,8 +5423,8 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5441,11 +5441,11 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5455,8 +5455,8 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -5474,8 +5474,8 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -5486,18 +5486,18 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v10, 4
; CHECK-V-NEXT: lui a0, 16
; CHECK-V-NEXT: addi a0, a0, -1
@@ -5633,9 +5633,9 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: li a2, -1
@@ -5766,9 +5766,9 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: call __fixunsdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vslidedown.vi v8, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixunsdfti
@@ -5867,9 +5867,9 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv a2, a1
@@ -6019,9 +6019,9 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: li a2, -1
@@ -6152,9 +6152,9 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vslidedown.vi v8, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixunssfti
@@ -6253,9 +6253,9 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv a2, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll
index 8f36aad81727..c45af61ced94 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll
@@ -163,12 +163,11 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
; CHECK-NEXT: vfmin.vf v12, v12, fa4
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v12
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vnsrl.wi v12, v16, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmfne.vv v0, v8, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v12, 0, v0
+; CHECK-NEXT: vnsrl.wi v8, v16, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%x = call <vscale x 4 x i16> @llvm.fptosi.sat.nxv4f64.nxv4i16(<vscale x 4 x double> %f)
ret <vscale x 4 x i16> %x
@@ -186,12 +185,11 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
; CHECK-NEXT: vfmin.vf v16, v16, fa4
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.rtz.x.f.w v24, v16
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vnsrl.wi v16, v24, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmfne.vv v0, v8, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v16, 0, v0
+; CHECK-NEXT: vnsrl.wi v8, v24, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%x = call <vscale x 8 x i16> @llvm.fptosi.sat.nxv8f64.nxv8i16(<vscale x 8 x double> %f)
ret <vscale x 8 x i16> %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll
index 3276f481f30e..3276f481f30e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll
diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
index 249f765971b0..bc5617957d7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
@@ -960,141 +960,158 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a3, 40
+; CHECK-NEXT: li a3, 48
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: li a3, 24
+; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a3, 24
-; CHECK-NEXT: mul a1, a1, a3
+; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a1, a3, 3
-; CHECK-NEXT: add a5, a0, a1
-; CHECK-NEXT: srli a6, a3, 3
-; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
-; CHECK-NEXT: add a1, a2, a1
-; CHECK-NEXT: vl8re64.v v16, (a1)
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: sub a1, a4, a3
-; CHECK-NEXT: sltu a7, a4, a1
-; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: and a7, a7, a1
-; CHECK-NEXT: li a1, 63
+; CHECK-NEXT: slli a3, a1, 3
+; CHECK-NEXT: add a5, a0, a3
+; CHECK-NEXT: add a3, a2, a3
+; CHECK-NEXT: vl8re64.v v16, (a3)
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 4
+; CHECK-NEXT: add a3, sp, a3
+; CHECK-NEXT: addi a3, a3, 16
+; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; CHECK-NEXT: sub a3, a4, a1
+; CHECK-NEXT: sltu a6, a4, a3
+; CHECK-NEXT: addi a6, a6, -1
+; CHECK-NEXT: and a6, a6, a3
+; CHECK-NEXT: srli a3, a1, 3
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: slli a5, a5, 5
+; CHECK-NEXT: li a7, 40
+; CHECK-NEXT: mul a5, a5, a7
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v0, a6
-; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
-; CHECK-NEXT: vand.vx v8, v16, a1, v0.t
+; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a3
+; CHECK-NEXT: li a3, 63
+; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
+; CHECK-NEXT: vand.vx v8, v16, a3, v0.t
; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: slli a5, a5, 5
+; CHECK-NEXT: li a6, 40
+; CHECK-NEXT: mul a5, a5, a6
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
; CHECK-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload
; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t
-; CHECK-NEXT: addi a5, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: slli a5, a5, 3
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
+; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: slli a5, a5, 4
+; CHECK-NEXT: add a5, sp, a5
+; CHECK-NEXT: addi a5, a5, 16
; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
; CHECK-NEXT: vnot.v v8, v8, v0.t
-; CHECK-NEXT: vand.vx v16, v8, a1, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a3, v0.t
+; CHECK-NEXT: addi a5, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re64.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re64.v v8, (a2)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 5
+; CHECK-NEXT: li a2, 40
+; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
-; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
+; CHECK-NEXT: vsll.vi v16, v8, 1, v0.t
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsll.vv v16, v16, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vor.vv v8, v16, v8, v0.t
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: bltu a4, a3, .LBB46_2
+; CHECK-NEXT: bltu a4, a1, .LBB46_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a4, a3
+; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB46_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 5
+; CHECK-NEXT: li a1, 40
+; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-NEXT: vand.vx v8, v16, a3, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 5
+; CHECK-NEXT: li a1, 40
+; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vnot.v v16, v8, v0.t
-; CHECK-NEXT: vand.vx v16, v16, a1, v0.t
+; CHECK-NEXT: vand.vx v16, v16, a3, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: li a1, 24
+; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 40
+; CHECK-NEXT: li a1, 48
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
@@ -1150,12 +1167,12 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: add a6, sp, a6
; CHECK-NEXT: addi a6, a6, 16
; CHECK-NEXT: vl8r.v v16, (a6) # Unknown-size Folded Reload
-; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vsll.vv v16, v16, v8, v0.t
; CHECK-NEXT: csrr a6, vlenb
; CHECK-NEXT: slli a6, a6, 3
; CHECK-NEXT: add a6, sp, a6
; CHECK-NEXT: addi a6, a6, 16
-; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v16, (a6) # Unknown-size Folded Spill
; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: csrr a6, vlenb
; CHECK-NEXT: slli a6, a6, 4
@@ -1198,13 +1215,13 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB47_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
@@ -1318,10 +1335,8 @@ define <vscale x 1 x i8> @fshr_v1i4(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b,
; CHECK-NEXT: li a1, 4
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vremu.vx v10, v10, a1, v0.t
+; CHECK-NEXT: vand.vi v9, v9, 15, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vand.vi v9, v9, 15
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
@@ -1343,10 +1358,8 @@ define <vscale x 1 x i8> @fshl_v1i4(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b,
; CHECK-NEXT: li a1, 4
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vremu.vx v10, v10, a1, v0.t
+; CHECK-NEXT: vand.vi v9, v9, 15, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vand.vi v9, v9, 15
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
index 6d8763d34ec1..e34b4a81b631 100644
--- a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
@@ -70,8 +70,8 @@ define <vscale x 16 x i64> @llrint_nxv16i64_nxv16f32(<vscale x 16 x float> %x, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB4_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
index 9fa8807ed4ad..c9f91bf9def2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
@@ -132,8 +132,8 @@ define <vscale x 16 x iXLen> @lrint_nxv16f32(<vscale x 16 x float> %x, <vscale x
; RV64-i64-NEXT: # %bb.1:
; RV64-i64-NEXT: mv a0, a1
; RV64-i64-NEXT: .LBB4_2:
-; RV64-i64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; RV64-i64-NEXT: vmv1r.v v0, v24
+; RV64-i64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; RV64-i64-NEXT: vfwcvt.x.f.v v24, v8, v0.t
; RV64-i64-NEXT: vmv8r.v v8, v24
; RV64-i64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
index f87fa3ec6f16..3aca3130cc54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -1289,8 +1289,8 @@ define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: viota.m v8, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -1313,8 +1313,8 @@ define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmsbf.m v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
@@ -1444,8 +1444,8 @@ define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmsbf.m v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index e260ae5344e4..be37be06f0e7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -1200,13 +1200,13 @@ define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptr
; RV32-LABEL: mgather_nxv16i64:
; RV32: # %bb.0:
; RV32-NEXT: vl8re64.v v24, (a0)
-; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
-; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: srli a2, a0, 3
; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a2
+; RV32-NEXT: vslidedown.vx v7, v0, a2
; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu
+; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vluxei32.v v24, (zero), v12, v0.t
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, a1, a0
@@ -1216,20 +1216,35 @@ define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptr
;
; RV64-LABEL: mgather_nxv16i64:
; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a3, a3, 3
+; RV64-NEXT: sub sp, sp, a3
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV64-NEXT: addi a3, sp, 16
+; RV64-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV64-NEXT: vmv8r.v v16, v8
; RV64-NEXT: vl8re64.v v24, (a0)
-; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
-; RV64-NEXT: vluxei64.v v24, (zero), v8, v0.t
-; RV64-NEXT: vl8re64.v v8, (a1)
; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: vl8re64.v v8, (a1)
; RV64-NEXT: srli a1, a0, 3
; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v0, a1
+; RV64-NEXT: vslidedown.vx v7, v0, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v24, (zero), v16, v0.t
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vluxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, a2, a0
; RV64-NEXT: vs8r.v v8, (a0)
; RV64-NEXT: vs8r.v v24, (a2)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%p0 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> undef, <vscale x 8 x ptr> %ptrs0, i64 0)
%p1 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> %p0, <vscale x 8 x ptr> %ptrs1, i64 8)
@@ -2116,8 +2131,8 @@ define <vscale x 32 x i8> @mgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8>
; RV64-NEXT: vluxei64.v v15, (a0), v16, v0.t
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v10
-; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v8
+; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v14, (a0), v16, v0.t
; RV64-NEXT: vmv4r.v v8, v12
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index fc8fdf4aaafe..9bfa0f31dc3a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -1691,15 +1691,15 @@ declare <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x pt
define void @mscatter_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double> %val1, <vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptrs1, <vscale x 16 x i1> %m) {
; RV32-LABEL: mscatter_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vl4re32.v v24, (a0)
; RV32-NEXT: vl4re32.v v28, (a1)
-; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (zero), v24, v0.t
+; RV32-NEXT: vl4re32.v v4, (a0)
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: srli a0, a0, 3
; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a0
+; RV32-NEXT: vslidedown.vx v24, v0, a0
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (zero), v4, v0.t
+; RV32-NEXT: vmv1r.v v0, v24
; RV32-NEXT: vsoxei32.v v16, (zero), v28, v0.t
; RV32-NEXT: ret
;
@@ -1708,25 +1708,36 @@ define void @mscatter_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: slli a2, a2, 4
; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; RV64-NEXT: vl8re64.v v24, (a0)
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; RV64-NEXT: vl8re64.v v16, (a1)
-; RV64-NEXT: vsoxei64.v v8, (zero), v24, v0.t
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: vmv8r.v v16, v8
+; RV64-NEXT: vl8re64.v v8, (a1)
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vl8re64.v v8, (a0)
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: srli a0, a0, 3
; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v0, a0
+; RV64-NEXT: vslidedown.vx v24, v0, a0
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vsoxei64.v v16, (zero), v8, v0.t
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -1745,13 +1756,13 @@ define void @mscatter_baseidx_nxv16i8_nxv16f64(<vscale x 8 x double> %val0, <vsc
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v24, v6
; RV32-NEXT: vsll.vi v24, v24, 3
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 3
; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a1
+; RV32-NEXT: vslidedown.vx v7, v0, a1
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
@@ -1763,12 +1774,12 @@ define void @mscatter_baseidx_nxv16i8_nxv16f64(<vscale x 8 x double> %val0, <vsc
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: vsext.vf8 v8, v7
-; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i8> %idxs
@@ -1785,13 +1796,13 @@ define void @mscatter_baseidx_nxv16i16_nxv16f64(<vscale x 8 x double> %val0, <vs
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf2 v24, v4
; RV32-NEXT: vsll.vi v24, v24, 3
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 3
; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a1
+; RV32-NEXT: vslidedown.vx v7, v0, a1
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
@@ -1803,12 +1814,12 @@ define void @mscatter_baseidx_nxv16i16_nxv16f64(<vscale x 8 x double> %val0, <vs
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: vsext.vf4 v8, v6
-; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i16> %idxs
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index ebe89817630d..a3ea462b6a73 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -204,8 +204,8 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16(<vscale x 4 x half> %va, <vscal
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: frflags a0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -261,16 +261,16 @@ declare <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half>, <vsc
define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: frflags a0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
@@ -290,8 +290,8 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscal
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: frflags a0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -347,16 +347,16 @@ declare <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half>, <
define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI8_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: frflags a0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
@@ -376,8 +376,8 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vs
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
; ZVFHMIN-NEXT: frflags a0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
@@ -433,16 +433,16 @@ declare <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half>, <
define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI10_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: frflags a0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
@@ -458,7 +458,7 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v16, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -468,50 +468,49 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
; ZVFHMIN-NEXT: frflags a2
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: fsflags a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
+; ZVFHMIN-NEXT: vmflt.vf v7, v16, fa5, v0.t
; ZVFHMIN-NEXT: frflags a0
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
-; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: fsflags a0
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -556,20 +555,20 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: frflags a2
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: fsflags a2
@@ -701,8 +700,8 @@ define <vscale x 4 x float> @vp_nearbyint_nxv4f32(<vscale x 4 x float> %va, <vsc
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -745,8 +744,8 @@ define <vscale x 8 x float> @vp_nearbyint_nxv8f32(<vscale x 8 x float> %va, <vsc
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -789,8 +788,8 @@ define <vscale x 16 x float> @vp_nearbyint_nxv16f32(<vscale x 16 x float> %va, <
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
@@ -867,16 +866,16 @@ declare <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double>,
define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
@@ -911,16 +910,16 @@ declare <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double>,
define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
@@ -955,16 +954,16 @@ declare <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double>,
define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -999,16 +998,16 @@ declare <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double>,
define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -1044,62 +1043,47 @@ declare <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double
define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vfabs.v v16, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: frflags a2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: fsflags a2
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr63596.ll b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
index d13d67fd0a88..8bb62eaa8e9e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
@@ -27,20 +27,18 @@ define <4 x float> @foo(ptr %0) nounwind {
; CHECK-NEXT: fsw fa0, 0(sp)
; CHECK-NEXT: addi a0, sp, 4
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: addi a0, sp, 12
; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 1
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: addi a0, sp, 12
; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: addi a0, sp, 8
+; CHECK-NEXT: vle32.v v11, (a0)
; CHECK-NEXT: mv a0, sp
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 1
+; CHECK-NEXT: vslideup.vi v10, v9, 1
+; CHECK-NEXT: vslideup.vi v8, v11, 1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 2
+; CHECK-NEXT: vslideup.vi v8, v10, 2
; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index f934127f978d..88bd92c6ec16 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -185,8 +185,8 @@ define <vscale x 4 x half> @vp_rint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -237,15 +237,15 @@ declare <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half>, <vscale x
define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
@@ -263,8 +263,8 @@ define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -315,15 +315,15 @@ declare <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half>, <vscal
define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI8_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
@@ -341,8 +341,8 @@ define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
@@ -393,15 +393,15 @@ declare <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half>, <vscal
define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI10_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
@@ -426,46 +426,50 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vmv1r.v v8, v16
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
+; ZVFHMIN-NEXT: vmflt.vf v8, v16, fa5, v0.t
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
-; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -508,19 +512,19 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
@@ -640,8 +644,8 @@ define <vscale x 4 x float> @vp_rint_nxv4f32(<vscale x 4 x float> %va, <vscale x
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -680,8 +684,8 @@ define <vscale x 8 x float> @vp_rint_nxv8f32(<vscale x 8 x float> %va, <vscale x
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -720,8 +724,8 @@ define <vscale x 16 x float> @vp_rint_nxv16f32(<vscale x 16 x float> %va, <vscal
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
@@ -791,15 +795,15 @@ declare <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double>, <vsca
define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
@@ -831,15 +835,15 @@ declare <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double>, <vsca
define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
@@ -871,15 +875,15 @@ declare <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double>, <vsca
define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -911,15 +915,15 @@ declare <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double>, <vsca
define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -955,62 +959,51 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index edeac1acf3b0..1ddadcc49373 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -204,8 +204,8 @@ define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -347,16 +347,16 @@ declare <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half>, <vsca
define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI8_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
-; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -376,8 +376,8 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -433,16 +433,16 @@ declare <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half>, <vsca
define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI10_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
-; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -458,7 +458,6 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -466,52 +465,60 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vmv1r.v v8, v16
; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
+; ZVFHMIN-NEXT: vmflt.vf v8, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fsrm a0
-; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -556,20 +563,20 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
@@ -701,8 +708,8 @@ define <vscale x 4 x float> @vp_round_nxv4f32(<vscale x 4 x float> %va, <vscale
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -745,8 +752,8 @@ define <vscale x 8 x float> @vp_round_nxv8f32(<vscale x 8 x float> %va, <vscale
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -789,8 +796,8 @@ define <vscale x 16 x float> @vp_round_nxv16f32(<vscale x 16 x float> %va, <vsca
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -867,16 +874,16 @@ declare <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double>, <vsc
define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -911,16 +918,16 @@ declare <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double>, <vsc
define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -955,16 +962,16 @@ declare <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double>, <vsc
define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -999,16 +1006,16 @@ declare <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double>, <vsc
define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -1047,66 +1054,56 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a2, 4
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index a77c58ba9ec5..8c5a7bb2dea6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -204,8 +204,8 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscal
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half>, <vsc
define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -347,16 +347,16 @@ declare <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half>, <
define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI8_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -376,8 +376,8 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -433,16 +433,16 @@ declare <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half>, <
define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI10_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -458,7 +458,6 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -466,52 +465,60 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vmv1r.v v8, v16
; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
+; ZVFHMIN-NEXT: vmflt.vf v8, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fsrm a0
-; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -556,20 +563,20 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
@@ -701,8 +708,8 @@ define <vscale x 4 x float> @vp_roundeven_nxv4f32(<vscale x 4 x float> %va, <vsc
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -745,8 +752,8 @@ define <vscale x 8 x float> @vp_roundeven_nxv8f32(<vscale x 8 x float> %va, <vsc
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -789,8 +796,8 @@ define <vscale x 16 x float> @vp_roundeven_nxv16f32(<vscale x 16 x float> %va, <
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -867,16 +874,16 @@ declare <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double>,
define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -911,16 +918,16 @@ declare <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double>,
define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -955,16 +962,16 @@ declare <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double>,
define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -999,16 +1006,16 @@ declare <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double>,
define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -1047,66 +1054,56 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a2, 0
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index 71a53c525551..1227e73a0243 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -204,8 +204,8 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vsc
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half>, <v
define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -347,16 +347,16 @@ declare <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half>,
define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI8_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
-; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -376,8 +376,8 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -433,16 +433,16 @@ declare <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half>,
define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI10_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
-; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -458,7 +458,6 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -466,52 +465,60 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vmv1r.v v8, v16
; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
+; ZVFHMIN-NEXT: vmflt.vf v8, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fsrm a0
-; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -556,20 +563,20 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
@@ -701,8 +708,8 @@ define <vscale x 4 x float> @vp_roundtozero_nxv4f32(<vscale x 4 x float> %va, <v
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -745,8 +752,8 @@ define <vscale x 8 x float> @vp_roundtozero_nxv8f32(<vscale x 8 x float> %va, <v
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -789,8 +796,8 @@ define <vscale x 16 x float> @vp_roundtozero_nxv16f32(<vscale x 16 x float> %va,
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -867,16 +874,16 @@ declare <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double>
define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -911,16 +918,16 @@ declare <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double>
define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -955,16 +962,16 @@ declare <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double>
define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -999,16 +1006,16 @@ declare <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double>
define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -1047,66 +1054,56 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a2, 1
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
index e73415ac0085..8210ea22a6ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
@@ -67,13 +67,13 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-NEXT: lui a0, %hi(.L.str)
; SPILL-O2-NEXT: addi a0, a0, %lo(.L.str)
; SPILL-O2-NEXT: call puts
-; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: add a0, sp, a0
; SPILL-O2-NEXT: addi a0, a0, 16
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-NEXT: vfadd.vv v8, v9, v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
index 483f689cf633..352362908898 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
@@ -70,13 +70,13 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-NEXT: lui a0, %hi(.L.str)
; SPILL-O2-NEXT: addi a0, a0, %lo(.L.str)
; SPILL-O2-NEXT: call puts
-; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: add a0, sp, a0
; SPILL-O2-NEXT: addi a0, a0, 16
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-NEXT: vfadd.vv v8, v9, v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
@@ -102,11 +102,11 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-VLEN128-NEXT: lui a0, %hi(.L.str)
; SPILL-O2-VLEN128-NEXT: addi a0, a0, %lo(.L.str)
; SPILL-O2-VLEN128-NEXT: call puts
-; SPILL-O2-VLEN128-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 32
; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-VLEN128-NEXT: vfadd.vv v8, v9, v8
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
; SPILL-O2-VLEN128-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index 743016a7cbcd..b9ede8d68e3c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -893,8 +893,8 @@ define void @test_dag_loop() {
; CHECK-LABEL: test_dag_loop:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vsetivli zero, 0, e8, m4, tu, mu
; CHECK-NEXT: vssubu.vx v12, v8, zero, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index cc967396153b..3dba88136306 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -2212,19 +2212,18 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: sub a4, a2, a3
; ZVFH-NEXT: sltu a5, a2, a4
; ZVFH-NEXT: addi a5, a5, -1
-; ZVFH-NEXT: and a4, a5, a4
-; ZVFH-NEXT: vsetvli a5, zero, e8, m1, ta, ma
; ZVFH-NEXT: vl8re16.v v0, (a0)
; ZVFH-NEXT: addi a0, sp, 16
; ZVFH-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; ZVFH-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; ZVFH-NEXT: vslidedown.vx v0, v24, a1
+; ZVFH-NEXT: and a4, a5, a4
; ZVFH-NEXT: vsetvli zero, a4, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v7, v16, v8, v0.t
; ZVFH-NEXT: bltu a2, a3, .LBB85_2
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a2, a3
; ZVFH-NEXT: .LBB85_2:
-; ZVFH-NEXT: vsetvli zero, a2, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v24
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
@@ -2233,6 +2232,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFH-NEXT: addi a0, sp, 16
; ZVFH-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFH-NEXT: vsetvli zero, a2, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v16, v8, v24, v0.t
; ZVFH-NEXT: add a0, a1, a1
; ZVFH-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -2249,133 +2249,152 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 5
+; ZVFHMIN-NEXT: li a3, 34
+; ZVFHMIN-NEXT: mul a1, a1, a3
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 4
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x22, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 34 * vlenb
+; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: li a3, 18
+; ZVFHMIN-NEXT: mul a1, a1, a3
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: srli a1, a3, 1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, m1, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v17, v0
-; ZVFHMIN-NEXT: vslidedown.vx v18, v0, a1
-; ZVFHMIN-NEXT: slli a4, a3, 3
-; ZVFHMIN-NEXT: add a4, a0, a4
-; ZVFHMIN-NEXT: vl8re16.v v0, (a4)
+; ZVFHMIN-NEXT: slli a1, a3, 3
+; ZVFHMIN-NEXT: add a1, a0, a1
+; ZVFHMIN-NEXT: vl8re16.v v16, (a1)
; ZVFHMIN-NEXT: slli a5, a3, 2
-; ZVFHMIN-NEXT: sub a4, a2, a5
-; ZVFHMIN-NEXT: sltu a6, a2, a4
-; ZVFHMIN-NEXT: addi a6, a6, -1
-; ZVFHMIN-NEXT: and a6, a6, a4
+; ZVFHMIN-NEXT: sub a1, a2, a5
+; ZVFHMIN-NEXT: sltu a4, a2, a1
+; ZVFHMIN-NEXT: addi a4, a4, -1
+; ZVFHMIN-NEXT: and a6, a4, a1
; ZVFHMIN-NEXT: slli a4, a3, 1
-; ZVFHMIN-NEXT: sub a7, a6, a4
-; ZVFHMIN-NEXT: sltu t0, a6, a7
-; ZVFHMIN-NEXT: addi t0, t0, -1
-; ZVFHMIN-NEXT: and a7, t0, a7
+; ZVFHMIN-NEXT: sub a1, a6, a4
+; ZVFHMIN-NEXT: sltu a7, a6, a1
+; ZVFHMIN-NEXT: addi a7, a7, -1
+; ZVFHMIN-NEXT: and a7, a7, a1
+; ZVFHMIN-NEXT: srli a1, a3, 1
+; ZVFHMIN-NEXT: csrr t0, vlenb
+; ZVFHMIN-NEXT: add t0, sp, t0
+; ZVFHMIN-NEXT: addi t0, t0, 16
+; ZVFHMIN-NEXT: vs1r.v v0, (t0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli t0, zero, e8, m1, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v8, v0, a1
; ZVFHMIN-NEXT: srli a3, a3, 2
+; ZVFHMIN-NEXT: addi t0, sp, 16
+; ZVFHMIN-NEXT: vs1r.v v8, (t0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli t0, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v16, v18, a3
-; ZVFHMIN-NEXT: vsetvli t0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3
; ZVFHMIN-NEXT: vl8re16.v v8, (a0)
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li t0, 24
+; ZVFHMIN-NEXT: li t0, 26
; ZVFHMIN-NEXT: mul a0, a0, t0
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: slli a0, a0, 1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: vmv4r.v v16, v24
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li t0, 10
+; ZVFHMIN-NEXT: mul a0, a0, t0
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
; ZVFHMIN-NEXT: vsetvli zero, a7, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vmfeq.vv v20, v8, v24, v0.t
+; ZVFHMIN-NEXT: vmfeq.vv v26, v16, v8, v0.t
; ZVFHMIN-NEXT: bltu a6, a4, .LBB85_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a6, a4
; ZVFHMIN-NEXT: .LBB85_2:
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: li a7, 10
+; ZVFHMIN-NEXT: mul a0, a0, a7
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, a6, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v18
-; ZVFHMIN-NEXT: vmfeq.vv v6, v24, v8, v0.t
+; ZVFHMIN-NEXT: vmfeq.vv v6, v16, v8, v0.t
; ZVFHMIN-NEXT: add a0, a3, a3
; ZVFHMIN-NEXT: bltu a2, a5, .LBB85_4
; ZVFHMIN-NEXT: # %bb.3:
; ZVFHMIN-NEXT: mv a2, a5
; ZVFHMIN-NEXT: .LBB85_4:
; ZVFHMIN-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslideup.vx v6, v20, a3
+; ZVFHMIN-NEXT: vslideup.vx v6, v26, a3
; ZVFHMIN-NEXT: sub a5, a2, a4
; ZVFHMIN-NEXT: sltu a6, a2, a5
; ZVFHMIN-NEXT: addi a6, a6, -1
; ZVFHMIN-NEXT: and a5, a6, a5
+; ZVFHMIN-NEXT: csrr a6, vlenb
+; ZVFHMIN-NEXT: add a6, sp, a6
+; ZVFHMIN-NEXT: addi a6, a6, 16
+; ZVFHMIN-NEXT: vl1r.v v8, (a6) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vmv1r.v v7, v8
; ZVFHMIN-NEXT: vsetvli a6, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v7, v17
-; ZVFHMIN-NEXT: vslidedown.vx v0, v17, a3
-; ZVFHMIN-NEXT: vsetvli a6, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3
; ZVFHMIN-NEXT: csrr a6, vlenb
-; ZVFHMIN-NEXT: slli a6, a6, 3
+; ZVFHMIN-NEXT: li a7, 18
+; ZVFHMIN-NEXT: mul a6, a6, a7
; ZVFHMIN-NEXT: add a6, sp, a6
; ZVFHMIN-NEXT: addi a6, a6, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a6) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: vl8r.v v24, (a6) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a6, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
; ZVFHMIN-NEXT: csrr a6, vlenb
-; ZVFHMIN-NEXT: slli a6, a6, 4
+; ZVFHMIN-NEXT: li a7, 10
+; ZVFHMIN-NEXT: mul a6, a6, a7
; ZVFHMIN-NEXT: add a6, sp, a6
; ZVFHMIN-NEXT: addi a6, a6, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a6, vlenb
-; ZVFHMIN-NEXT: li a7, 24
+; ZVFHMIN-NEXT: li a7, 26
; ZVFHMIN-NEXT: mul a6, a6, a7
; ZVFHMIN-NEXT: add a6, sp, a6
; ZVFHMIN-NEXT: addi a6, a6, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a6) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
+; ZVFHMIN-NEXT: vl8r.v v16, (a6) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: csrr a6, vlenb
+; ZVFHMIN-NEXT: li a7, 10
+; ZVFHMIN-NEXT: mul a6, a6, a7
+; ZVFHMIN-NEXT: add a6, sp, a6
+; ZVFHMIN-NEXT: addi a6, a6, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a6) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a5, vlenb
-; ZVFHMIN-NEXT: slli a5, a5, 4
-; ZVFHMIN-NEXT: add a5, sp, a5
-; ZVFHMIN-NEXT: addi a5, a5, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vmfeq.vv v5, v24, v8, v0.t
+; ZVFHMIN-NEXT: vmfeq.vv v5, v16, v8, v0.t
; ZVFHMIN-NEXT: bltu a2, a4, .LBB85_6
; ZVFHMIN-NEXT: # %bb.5:
; ZVFHMIN-NEXT: mv a2, a4
; ZVFHMIN-NEXT: .LBB85_6:
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: li a5, 24
+; ZVFHMIN-NEXT: li a5, 26
; ZVFHMIN-NEXT: mul a4, a4, a5
; ZVFHMIN-NEXT: add a4, sp, a4
; ZVFHMIN-NEXT: addi a4, a4, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslideup.vx v8, v5, a3
; ZVFHMIN-NEXT: add a0, a1, a1
@@ -2383,7 +2402,8 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: vslideup.vx v8, v6, a1
; ZVFHMIN-NEXT: vmv.v.v v0, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 34
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
@@ -3474,130 +3494,154 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a3, 24
+; CHECK-NEXT: li a3, 48
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: li a3, 40
+; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul t2, a3, a1
-; CHECK-NEXT: slli a7, a3, 3
-; CHECK-NEXT: srli a4, a3, 2
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v7, v0, a4
-; CHECK-NEXT: srli a1, a3, 3
-; CHECK-NEXT: slli t0, a3, 4
-; CHECK-NEXT: add a5, a2, a7
-; CHECK-NEXT: vl8re64.v v16, (a5)
-; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; CHECK-NEXT: slli a5, a3, 1
-; CHECK-NEXT: vslidedown.vx v0, v0, a1
-; CHECK-NEXT: mv t1, a6
+; CHECK-NEXT: mul t0, a4, a1
+; CHECK-NEXT: slli t1, a4, 3
+; CHECK-NEXT: srli a1, a4, 2
+; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v7, v0, a1
+; CHECK-NEXT: srli a3, a4, 3
+; CHECK-NEXT: add a5, a2, t1
+; CHECK-NEXT: vl8re64.v v8, (a5)
+; CHECK-NEXT: slli t3, a4, 4
+; CHECK-NEXT: slli a5, a4, 1
+; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a3
+; CHECK-NEXT: mv a7, a6
; CHECK-NEXT: bltu a6, a5, .LBB171_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv t1, a5
+; CHECK-NEXT: mv a7, a5
; CHECK-NEXT: .LBB171_2:
-; CHECK-NEXT: add t2, a2, t2
-; CHECK-NEXT: add a7, a0, a7
-; CHECK-NEXT: add t0, a2, t0
-; CHECK-NEXT: vl8re64.v v8, (a2)
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: sub a2, t1, a3
-; CHECK-NEXT: sltu t3, t1, a2
-; CHECK-NEXT: addi t3, t3, -1
-; CHECK-NEXT: and a2, t3, a2
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: add t2, a2, t0
+; CHECK-NEXT: add t1, a0, t1
+; CHECK-NEXT: add t0, a2, t3
+; CHECK-NEXT: vl8re64.v v16, (a2)
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: slli a2, a2, 4
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v6, v8, v16, v0.t
-; CHECK-NEXT: bltu t1, a3, .LBB171_4
+; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: sub a2, a7, a4
+; CHECK-NEXT: sltu t3, a7, a2
+; CHECK-NEXT: addi t3, t3, -1
+; CHECK-NEXT: and a2, t3, a2
+; CHECK-NEXT: csrr t3, vlenb
+; CHECK-NEXT: slli t3, t3, 5
+; CHECK-NEXT: add t3, sp, t3
+; CHECK-NEXT: addi t3, t3, 16
+; CHECK-NEXT: vl8r.v v16, (t3) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: vmfeq.vv v6, v16, v8, v0.t
+; CHECK-NEXT: bltu a7, a4, .LBB171_4
; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: mv t1, a3
+; CHECK-NEXT: mv a7, a4
; CHECK-NEXT: .LBB171_4:
-; CHECK-NEXT: vl8re64.v v16, (t2)
+; CHECK-NEXT: vl8re64.v v8, (t2)
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: slli a2, a2, 5
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v18, v7, a1
-; CHECK-NEXT: vsetvli zero, t1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8re64.v v8, (t1)
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 4
+; CHECK-NEXT: li t1, 24
+; CHECK-NEXT: mul a2, a2, t1
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v17, v24, v8, v0.t
-; CHECK-NEXT: vl8re64.v v8, (a7)
+; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v18, v7, a3
+; CHECK-NEXT: vl8re64.v v8, (t0)
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 4
+; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vl8re64.v v8, (t0)
-; CHECK-NEXT: add a2, a1, a1
+; CHECK-NEXT: vl8re64.v v8, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a2, 40
+; CHECK-NEXT: mul a0, a0, a2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
+; CHECK-NEXT: vmfeq.vv v17, v24, v8, v0.t
+; CHECK-NEXT: add a2, a3, a3
+; CHECK-NEXT: sub a0, a6, a5
+; CHECK-NEXT: sltu a5, a6, a0
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a0, a5, a0
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, tu, ma
-; CHECK-NEXT: sub a2, a6, a5
-; CHECK-NEXT: sltu a5, a6, a2
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: addi a0, a5, -1
-; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: vslideup.vx v17, v6, a1
+; CHECK-NEXT: vslideup.vx v17, v6, a3
; CHECK-NEXT: mv a2, a0
-; CHECK-NEXT: bltu a0, a3, .LBB171_6
+; CHECK-NEXT: bltu a0, a4, .LBB171_6
; CHECK-NEXT: # %bb.5:
-; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: mv a2, a4
; CHECK-NEXT: .LBB171_6:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: slli a5, a5, 3
+; CHECK-NEXT: add a5, sp, a5
+; CHECK-NEXT: addi a5, a5, 16
+; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a5, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v16, v24, v8, v0.t
-; CHECK-NEXT: add a2, a4, a1
-; CHECK-NEXT: vsetvli zero, a2, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vx v17, v16, a4
-; CHECK-NEXT: sub a2, a0, a3
+; CHECK-NEXT: sub a2, a0, a4
; CHECK-NEXT: sltu a0, a0, a2
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v18
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v16, v8, v24, v0.t
-; CHECK-NEXT: slli a0, a1, 1
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vx v17, v16, a0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 5
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: li a4, 24
+; CHECK-NEXT: mul a2, a2, a4
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vmfeq.vv v18, v8, v24, v0.t
+; CHECK-NEXT: add a0, a1, a3
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vx v17, v16, a1
+; CHECK-NEXT: slli a0, a3, 1
+; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetvli zero, a3, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v17, v18, a0
; CHECK-NEXT: vmv1r.v v0, v17
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
+; CHECK-NEXT: li a1, 48
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index 85f5ffd784e9..eb8c58d2d377 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -1106,10 +1106,10 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: vlm.v v0, (a2)
; CHECK-NEXT: sub a2, a3, a1
; CHECK-NEXT: sltu a4, a3, a2
-; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: vl8r.v v24, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a2, a4, a2
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vv v6, v16, v8, v0.t
@@ -1117,7 +1117,6 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: .LBB96_2:
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -1126,6 +1125,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v6
@@ -1156,8 +1156,8 @@ define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB97_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v25
@@ -1186,8 +1186,8 @@ define <vscale x 128 x i1> @icmp_eq_vx_swap_nxv128i8(<vscale x 128 x i8> %va, i8
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB98_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v25
@@ -2257,19 +2257,18 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: sub a4, a2, a3
; CHECK-NEXT: sltu a5, a2, a4
; CHECK-NEXT: addi a5, a5, -1
-; CHECK-NEXT: and a4, a5, a4
-; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
; CHECK-NEXT: vl8re32.v v0, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v24, a1
+; CHECK-NEXT: and a4, a5, a4
; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vv v7, v16, v8, v0.t
; CHECK-NEXT: bltu a2, a3, .LBB189_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB189_2:
-; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -2278,6 +2277,7 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t
; CHECK-NEXT: add a0, a1, a1
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -2311,8 +2311,8 @@ define <vscale x 32 x i1> @icmp_eq_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: .LBB190_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: add a0, a2, a2
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -2344,8 +2344,8 @@ define <vscale x 32 x i1> @icmp_eq_vx_swap_nxv32i32(<vscale x 32 x i32> %va, i32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: .LBB191_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: add a0, a2, a2
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
index ab7da9e0faf2..6e327457bebf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
@@ -49,8 +49,8 @@ define <8 x i8> @v4i8_2(<4 x i8> %a, <4 x i8> %b) {
; CHECK-NEXT: vid.v v11
; CHECK-NEXT: vrsub.vi v12, v11, 7
; CHECK-NEXT: vrgather.vv v10, v8, v12
-; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -78,11 +78,11 @@ define <16 x i8> @v8i8_2(<8 x i8> %a, <8 x i8> %b) {
; CHECK-NEXT: vid.v v11
; CHECK-NEXT: vrsub.vi v12, v11, 15
; CHECK-NEXT: vrgather.vv v10, v8, v12
-; CHECK-NEXT: vrsub.vi v8, v11, 7
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vrsub.vi v8, v11, 7
; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
@@ -174,8 +174,8 @@ define <8 x i16> @v4i16_2(<4 x i16> %a, <4 x i16> %b) {
; CHECK-NEXT: vid.v v11
; CHECK-NEXT: vrsub.vi v12, v11, 7
; CHECK-NEXT: vrgather.vv v10, v8, v12
-; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
@@ -234,10 +234,10 @@ define <32 x i16> @v16i16_2(<16 x i16> %a, <16 x i16> %b) {
; CHECK-NEXT: addi a0, a0, %lo(.LCPI15_0)
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vle16.v v20, (a0)
-; CHECK-NEXT: vmv2r.v v16, v10
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vmv2r.v v20, v10
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vrgather.vv v8, v12, v20
+; CHECK-NEXT: vrgather.vv v8, v12, v16
; CHECK-NEXT: vid.v v12
; CHECK-NEXT: vrsub.vi v12, v12, 15
; CHECK-NEXT: lui a0, 16
@@ -245,7 +245,7 @@ define <32 x i16> @v16i16_2(<16 x i16> %a, <16 x i16> %b) {
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT: vrgather.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vrgather.vv v8, v20, v12, v0.t
; CHECK-NEXT: ret
%v32i16 = shufflevector <16 x i16> %a, <16 x i16> %b, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <32 x i16> %v32i16
@@ -329,18 +329,18 @@ define <16 x i32> @v8i32_2(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: v8i32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv2r.v v16, v10
-; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vid.v v14
-; CHECK-NEXT: vrsub.vi v18, v14, 15
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: vrsub.vi v18, v10, 15
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v8, v12, v18
+; CHECK-NEXT: vrgatherei16.vv v12, v8, v18
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vrsub.vi v12, v14, 7
+; CHECK-NEXT: vrsub.vi v8, v10, 7
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT: vrgatherei16.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%v16i32 = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <16 x i32> %v16i32
@@ -492,8 +492,8 @@ define <8 x half> @v4f16_2(<4 x half> %a, <4 x half> %b) {
; CHECK-NEXT: vid.v v11
; CHECK-NEXT: vrsub.vi v12, v11, 7
; CHECK-NEXT: vrgather.vv v10, v8, v12
-; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
@@ -638,18 +638,18 @@ define <16 x float> @v8f32_2(<8 x float> %a, <8 x float> %b) {
; CHECK-LABEL: v8f32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv2r.v v16, v10
-; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vid.v v14
-; CHECK-NEXT: vrsub.vi v18, v14, 15
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: vrsub.vi v18, v10, 15
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v8, v12, v18
+; CHECK-NEXT: vrgatherei16.vv v12, v8, v18
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vrsub.vi v12, v14, 7
+; CHECK-NEXT: vrsub.vi v8, v10, 7
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT: vrgatherei16.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%v16f32 = shufflevector <8 x float> %a, <8 x float> %b, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <16 x float> %v16f32
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 8a297db7a3b8..d1c98f828e76 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -4873,8 +4873,8 @@ define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i
; CHECK-NEXT: .LBB102_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vmseq.vx v0, v10, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0), v0.t
@@ -4914,8 +4914,8 @@ define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zer
; CHECK-NEXT: .LBB103_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vf v0, v10, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0), v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll
index 5d5a2a3b898b..28583efccdbc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll
@@ -76,7 +76,6 @@ define i32 @splat_vector_split_i64() {
; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: vor.vv v8, v10, v8
-; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 3
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll
index b7fe722958bf..9d0234d2ec2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll
@@ -33,13 +33,13 @@ define <4 x i32> @vec_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: addi a1, a0, -1
-; CHECK-NEXT: vsll.vv v10, v8, v9
-; CHECK-NEXT: vsra.vv v9, v10, v9
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vsll.vv v11, v8, v9
+; CHECK-NEXT: vsra.vv v9, v11, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
+; CHECK-NEXT: vmerge.vxm v9, v10, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
+; CHECK-NEXT: vmerge.vvm v8, v11, v9, v0
; CHECK-NEXT: ret
%tmp = call <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
ret <4 x i32> %tmp
@@ -52,13 +52,13 @@ define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: addi a1, a0, -1
-; CHECK-NEXT: vsll.vv v10, v8, v9
-; CHECK-NEXT: vsra.vv v9, v10, v9
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vsll.vv v11, v8, v9
+; CHECK-NEXT: vsra.vv v9, v11, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
+; CHECK-NEXT: vmerge.vxm v9, v10, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
+; CHECK-NEXT: vmerge.vvm v8, v11, v9, v0
; CHECK-NEXT: ret
%tmp = call <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
ret <8 x i16> %tmp
@@ -70,14 +70,14 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, 127
-; CHECK-NEXT: vsll.vv v10, v8, v9
-; CHECK-NEXT: vsra.vv v9, v10, v9
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vmv.v.x v10, a0
; CHECK-NEXT: li a0, 128
-; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
+; CHECK-NEXT: vsll.vv v11, v8, v9
+; CHECK-NEXT: vsra.vv v9, v11, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vxm v9, v10, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
+; CHECK-NEXT: vmerge.vvm v8, v11, v9, v0
; CHECK-NEXT: ret
%tmp = call <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
ret <16 x i8> %tmp
@@ -115,13 +115,13 @@ define <vscale x 4 x i32> @vec_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32>
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: addi a1, a0, -1
-; CHECK-NEXT: vsll.vv v12, v8, v10
-; CHECK-NEXT: vsra.vv v14, v12, v10
-; CHECK-NEXT: vmsne.vv v10, v8, v14
-; CHECK-NEXT: vmv.v.x v8, a1
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
+; CHECK-NEXT: vmv.v.x v12, a1
+; CHECK-NEXT: vsll.vv v14, v8, v10
+; CHECK-NEXT: vsra.vv v16, v14, v10
+; CHECK-NEXT: vmsne.vv v10, v8, v16
+; CHECK-NEXT: vmerge.vxm v8, v12, a0, v0
; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
+; CHECK-NEXT: vmerge.vvm v8, v14, v8, v0
; CHECK-NEXT: ret
%tmp = call <vscale x 4 x i32> @llvm.sshl.sat.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y)
ret <vscale x 4 x i32> %tmp
@@ -134,13 +134,13 @@ define <vscale x 8 x i16> @vec_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16>
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: addi a1, a0, -1
-; CHECK-NEXT: vsll.vv v12, v8, v10
-; CHECK-NEXT: vsra.vv v14, v12, v10
-; CHECK-NEXT: vmsne.vv v10, v8, v14
-; CHECK-NEXT: vmv.v.x v8, a1
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
+; CHECK-NEXT: vmv.v.x v12, a1
+; CHECK-NEXT: vsll.vv v14, v8, v10
+; CHECK-NEXT: vsra.vv v16, v14, v10
+; CHECK-NEXT: vmsne.vv v10, v8, v16
+; CHECK-NEXT: vmerge.vxm v8, v12, a0, v0
; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
+; CHECK-NEXT: vmerge.vvm v8, v14, v8, v0
; CHECK-NEXT: ret
%tmp = call <vscale x 8 x i16> @llvm.sshl.sat.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y)
ret <vscale x 8 x i16> %tmp
@@ -152,14 +152,14 @@ define <vscale x 16 x i8> @vec_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8>
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, 127
-; CHECK-NEXT: vsll.vv v12, v8, v10
-; CHECK-NEXT: vsra.vv v14, v12, v10
-; CHECK-NEXT: vmsne.vv v10, v8, v14
-; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: li a0, 128
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
+; CHECK-NEXT: vsll.vv v14, v8, v10
+; CHECK-NEXT: vsra.vv v16, v14, v10
+; CHECK-NEXT: vmsne.vv v10, v8, v16
+; CHECK-NEXT: vmerge.vxm v8, v12, a0, v0
; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
+; CHECK-NEXT: vmerge.vvm v8, v14, v8, v0
; CHECK-NEXT: ret
%tmp = call <vscale x 16 x i8> @llvm.sshl.sat.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y)
ret <vscale x 16 x i8> %tmp
diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
index eff8c26d4d06..b3150ecea6c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
@@ -562,8 +562,8 @@ define <vscale x 16 x i64> @add_stepvector_nxv16i64() {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: sw a0, 8(sp)
-; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vid.v v8
; RV32-NEXT: vadd.vv v8, v8, v8
@@ -597,8 +597,8 @@ define <vscale x 16 x i64> @mul_stepvector_nxv16i64() {
; RV32-NEXT: slli a1, a0, 1
; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: sw a0, 8(sp)
-; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vid.v v8
; RV32-NEXT: li a0, 3
@@ -686,8 +686,8 @@ define <vscale x 16 x i64> @shl_stepvector_nxv16i64() {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 2
; RV32-NEXT: sw a0, 8(sp)
-; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vid.v v8
; RV32-NEXT: vsll.vi v8, v8, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
index 0e2105d5cba8..4d3bced0bcb5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
@@ -588,15 +588,15 @@ define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV32-NEXT: mv a3, a4
; CHECK-RV32-NEXT: .LBB49_2:
; CHECK-RV32-NEXT: mul a5, a3, a1
-; CHECK-RV32-NEXT: add a5, a0, a5
; CHECK-RV32-NEXT: srli a4, a4, 3
; CHECK-RV32-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vx v8, v9, a4
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-RV32-NEXT: add a5, a0, a5
; CHECK-RV32-NEXT: vmv1r.v v0, v8
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v16, (a5), a1, v0.t
-; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v9
+; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
@@ -613,15 +613,15 @@ define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV64-NEXT: mv a2, a4
; CHECK-RV64-NEXT: .LBB49_2:
; CHECK-RV64-NEXT: mul a5, a2, a1
-; CHECK-RV64-NEXT: add a5, a0, a5
; CHECK-RV64-NEXT: srli a4, a4, 3
; CHECK-RV64-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vx v8, v9, a4
-; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; CHECK-RV64-NEXT: add a5, a0, a5
; CHECK-RV64-NEXT: vmv1r.v v0, v8
+; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v16, (a5), a1, v0.t
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v9
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%v = call <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr %ptr, i64 %stride, <vscale x 16 x i1> %mask, i32 %evl)
@@ -697,10 +697,10 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV32-NEXT: mv a5, a2
; CHECK-RV32-NEXT: .LBB51_4:
; CHECK-RV32-NEXT: mul t1, a5, a1
-; CHECK-RV32-NEXT: add t1, a0, t1
; CHECK-RV32-NEXT: srli t2, a2, 3
; CHECK-RV32-NEXT: vsetvli t3, zero, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vx v0, v8, t2
+; CHECK-RV32-NEXT: add t1, a0, t1
; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v16, (t1), a1, v0.t
; CHECK-RV32-NEXT: sub a7, a3, a7
@@ -712,14 +712,14 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV32-NEXT: mv a3, a2
; CHECK-RV32-NEXT: .LBB51_6:
; CHECK-RV32-NEXT: mul a6, a6, a1
-; CHECK-RV32-NEXT: add a6, a0, a6
; CHECK-RV32-NEXT: srli a2, a2, 2
; CHECK-RV32-NEXT: vsetvli a7, zero, e8, mf2, ta, ma
; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a2
+; CHECK-RV32-NEXT: add a6, a0, a6
; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v24, (a6), a1, v0.t
-; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v8
+; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: vs1r.v v24, (a4)
; CHECK-RV32-NEXT: ret
@@ -744,10 +744,10 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV64-NEXT: mv a5, a4
; CHECK-RV64-NEXT: .LBB51_4:
; CHECK-RV64-NEXT: mul t1, a5, a1
-; CHECK-RV64-NEXT: add t1, a0, t1
; CHECK-RV64-NEXT: srli t2, a4, 3
; CHECK-RV64-NEXT: vsetvli t3, zero, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vx v0, v8, t2
+; CHECK-RV64-NEXT: add t1, a0, t1
; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v16, (t1), a1, v0.t
; CHECK-RV64-NEXT: sub a7, a2, a7
@@ -759,14 +759,14 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV64-NEXT: mv a2, a4
; CHECK-RV64-NEXT: .LBB51_6:
; CHECK-RV64-NEXT: mul a6, a6, a1
-; CHECK-RV64-NEXT: add a6, a0, a6
; CHECK-RV64-NEXT: srli a4, a4, 2
; CHECK-RV64-NEXT: vsetvli a7, zero, e8, mf2, ta, ma
; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a4
+; CHECK-RV64-NEXT: add a6, a0, a6
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v24, (a6), a1, v0.t
-; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v8
+; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: vs1r.v v24, (a3)
; CHECK-RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
index 9378bb3d3ca6..e8704b35f31f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
@@ -504,10 +504,10 @@ define void @strided_store_nxv16f64(<vscale x 16 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a2, a2, a5
; CHECK-NEXT: mul a4, a4, a1
-; CHECK-NEXT: add a0, a0, a4
; CHECK-NEXT: srli a3, a3, 3
-; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a3
+; CHECK-NEXT: add a0, a0, a4
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vsse64.v v16, (a0), a1, v0.t
; CHECK-NEXT: ret
@@ -567,36 +567,36 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: vl8re64.v v0, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
; CHECK-NEXT: vsse64.v v8, (a1), a2, v0.t
; CHECK-NEXT: sub a0, a5, a4
; CHECK-NEXT: sltu t0, a5, a0
; CHECK-NEXT: addi t0, t0, -1
-; CHECK-NEXT: and a0, t0, a0
-; CHECK-NEXT: mul a7, a7, a2
-; CHECK-NEXT: add a7, a1, a7
-; CHECK-NEXT: srli t0, a4, 3
+; CHECK-NEXT: and t0, t0, a0
+; CHECK-NEXT: mul a0, a7, a2
+; CHECK-NEXT: add a7, a1, a0
+; CHECK-NEXT: srli a0, a4, 3
; CHECK-NEXT: vsetvli t1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v24, t0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v24, a0
; CHECK-NEXT: sub a0, a3, a6
; CHECK-NEXT: sltu a3, a3, a0
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
+; CHECK-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-NEXT: vsse64.v v16, (a7), a2, v0.t
; CHECK-NEXT: bltu a0, a4, .LBB43_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a0, a4
; CHECK-NEXT: .LBB43_6:
; CHECK-NEXT: mul a3, a5, a2
-; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: srli a4, a4, 2
-; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v24, a4
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsse64.v v8, (a1), a2, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
index dd2c14b037ee..cd9edca1d4c4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
@@ -37,9 +37,9 @@ define <vscale x 8 x i8> @vaaddu_vx_nxv8i8_floor(<vscale x 8 x i8> %x, i8 %y) {
define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_floor_sexti16(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y) {
; CHECK-LABEL: vaaddu_vv_nxv8i8_floor_sexti16:
; CHECK: # %bb.0:
+; CHECK-NEXT: csrwi vxrm, 2
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT: vwadd.vv v10, v8, v9
-; CHECK-NEXT: vnsrl.wi v8, v10, 1
+; CHECK-NEXT: vaadd.vv v8, v8, v9
; CHECK-NEXT: ret
%xzv = sext <vscale x 8 x i8> %x to <vscale x 8 x i16>
%yzv = sext <vscale x 8 x i8> %y to <vscale x 8 x i16>
@@ -226,12 +226,9 @@ define <vscale x 8 x i8> @vaaddu_vx_nxv8i8_ceil(<vscale x 8 x i8> %x, i8 %y) {
define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_ceil_sexti16(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y) {
; CHECK-LABEL: vaaddu_vv_nxv8i8_ceil_sexti16:
; CHECK: # %bb.0:
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT: vwadd.vv v10, v8, v9
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vadd.vi v10, v10, 1
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vnsrl.wi v8, v10, 1
+; CHECK-NEXT: vaadd.vv v8, v8, v9
; CHECK-NEXT: ret
%xzv = sext <vscale x 8 x i8> %x to <vscale x 8 x i16>
%yzv = sext <vscale x 8 x i8> %y to <vscale x 8 x i16>
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index 4b5e737d22eb..ede395f4df8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -580,8 +580,8 @@ define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
@@ -1359,8 +1359,8 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
@@ -1415,8 +1415,8 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB120_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%evl = call i32 @llvm.vscale.i32()
@@ -1451,8 +1451,8 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, <
; RV64-NEXT: slli a0, a0, 1
; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%evl = call i32 @llvm.vscale.i32()
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
index 939a45e15c10..6e34d59a2d98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
@@ -255,9 +255,9 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -272,8 +272,8 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -312,9 +312,9 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
index 6f06d8e570de..e59a9174b03d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
@@ -44,8 +44,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -98,8 +98,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -138,8 +138,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -178,8 +178,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -218,8 +218,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -258,8 +258,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -298,8 +298,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
index d02fe5b205f7..0ef7572890ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
@@ -12,16 +12,16 @@ define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) {
; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
; CHECK-NEXT: vid.v v9
; CHECK-NEXT: vadd.vv v11, v9, v9
-; CHECK-NEXT: vrgather.vv v9, v10, v11
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vrgather.vv v9, v10, v11
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vadd.vi v12, v11, -16
; CHECK-NEXT: li a0, -256
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vadd.vi v12, v11, -16
; CHECK-NEXT: vrgather.vv v9, v8, v12, v0.t
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vadd.vi v12, v11, 1
@@ -80,9 +80,8 @@ define {<2 x i64>, <2 x i64>} @vector_deinterleave_v2i64_v4i64(<4 x i64> %vec) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 2
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vrgather.vi v9, v8, 1
; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0
; CHECK-NEXT: vslideup.vi v8, v10, 1
@@ -167,9 +166,8 @@ define {<2 x double>, <2 x double>} @vector_deinterleave_v2f64_v4f64(<4 x double
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 2
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vrgather.vi v9, v8, 1
; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0
; CHECK-NEXT: vslideup.vi v8, v10, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index 8f4ff37fffb0..f0f847c61f3b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -110,23 +110,22 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_load_nxv8i6
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vl8re64.v v8, (a1)
-; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: vl8re64.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a2, 24
-; CHECK-NEXT: mul a1, a1, a2
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vl8re64.v v0, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vid.v v8
-; CHECK-NEXT: vadd.vv v16, v8, v8
-; CHECK-NEXT: vrgather.vv v8, v0, v16
+; CHECK-NEXT: mul a0, a0, a2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8re64.v v0, (a1)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vadd.vv v16, v8, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
@@ -134,34 +133,47 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_load_nxv8i6
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vrgather.vv v24, v8, v16
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vrgather.vv v8, v0, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vadd.vi v8, v16, 1
-; CHECK-NEXT: vrgather.vv v16, v0, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vrgather.vv v16, v0, v8
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vrgather.vv v24, v0, v8
-; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 24
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vmv4r.v v28, v8
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: li a1, 24
+; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmv4r.v v28, v8
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmv4r.v v20, v8
; CHECK-NEXT: vmv8r.v v8, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index 7797577362c9..bcb008857ad3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -8,14 +8,15 @@ define {<vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv
; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a0
+; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v14, v8, 1, v0
+; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v14, v10, 1, v0
; CHECK-NEXT: vnsrl.wi v10, v12, 0
; CHECK-NEXT: vmsne.vi v8, v10, 0
; CHECK-NEXT: vnsrl.wi v10, v12, 8
@@ -90,25 +91,38 @@ declare {<vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave2.nxv4
define {<vscale x 64 x i1>, <vscale x 64 x i1>} @vector_deinterleave_nxv64i1_nxv128i1(<vscale x 128 x i1> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv64i1_nxv128i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v28, v8
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT: vmv.v.i v24, 0
+; CHECK-NEXT: vmerge.vim v16, v24, 1, v0
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT: vnsrl.wi v24, v16, 0
+; CHECK-NEXT: vnsrl.wi v8, v16, 0
+; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v28
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmerge.vim v24, v24, 1, v0
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT: vnsrl.wi v28, v8, 0
+; CHECK-NEXT: vnsrl.wi v12, v24, 0
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v7, v24, 0
+; CHECK-NEXT: vmsne.vi v7, v8, 0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs1r.v v7, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT: vnsrl.wi v24, v16, 8
-; CHECK-NEXT: vnsrl.wi v28, v8, 8
+; CHECK-NEXT: vnsrl.wi v0, v16, 8
+; CHECK-NEXT: vnsrl.wi v4, v24, 8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v9, v24, 0
-; CHECK-NEXT: vmv1r.v v8, v7
+; CHECK-NEXT: vmsne.vi v9, v0, 0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%retval = call {<vscale x 64 x i1>, <vscale x 64 x i1>} @llvm.vector.deinterleave2.nxv128i1(<vscale x 128 x i1> %vec)
ret {<vscale x 64 x i1>, <vscale x 64 x i1>} %retval
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index 7ade47e60bc6..5ebf63f0a441 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -107,14 +107,14 @@ define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vs
; CHECK-NEXT: srli a2, a1, 1
; CHECK-NEXT: vsetvli a3, zero, e16, m2, ta, mu
; CHECK-NEXT: vid.v v24
-; CHECK-NEXT: vsrl.vi v26, v24, 1
-; CHECK-NEXT: vand.vi v24, v24, 1
-; CHECK-NEXT: vmsne.vi v28, v24, 0
+; CHECK-NEXT: vand.vi v26, v24, 1
+; CHECK-NEXT: vmsne.vi v28, v26, 0
+; CHECK-NEXT: vsrl.vi v24, v24, 1
; CHECK-NEXT: vmv1r.v v0, v28
-; CHECK-NEXT: vadd.vx v26, v26, a2, v0.t
+; CHECK-NEXT: vadd.vx v24, v24, a2, v0.t
; CHECK-NEXT: vmv4r.v v12, v16
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v0, v8, v26
+; CHECK-NEXT: vrgatherei16.vv v0, v8, v24
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: add a2, sp, a2
@@ -123,7 +123,7 @@ define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vs
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vrgatherei16.vv v8, v16, v26
+; CHECK-NEXT: vrgatherei16.vv v8, v16, v24
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vs8r.v v8, (a1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index a7e0ad6ee5f4..2e9f62e2f552 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -122,9 +122,9 @@ define <vscale x 4 x i64> @vector_interleave_nxv4i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vid.v v12
+; CHECK-NEXT: vand.vi v13, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v13, 0
; CHECK-NEXT: vsrl.vi v16, v12, 1
-; CHECK-NEXT: vand.vi v12, v12, 1
-; CHECK-NEXT: vmsne.vi v0, v12, 0
; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
@@ -137,9 +137,9 @@ define <vscale x 4 x i64> @vector_interleave_nxv4i64_nxv2i64(<vscale x 2 x i64>
; ZVBB-NEXT: srli a0, a0, 2
; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; ZVBB-NEXT: vid.v v12
+; ZVBB-NEXT: vand.vi v13, v12, 1
+; ZVBB-NEXT: vmsne.vi v0, v13, 0
; ZVBB-NEXT: vsrl.vi v16, v12, 1
-; ZVBB-NEXT: vand.vi v12, v12, 1
-; ZVBB-NEXT: vmsne.vi v0, v12, 0
; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t
; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
@@ -269,13 +269,13 @@ define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i
;
; ZVBB-LABEL: vector_interleave_nxv32i32_nxv16i32:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: li a0, 32
; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; ZVBB-NEXT: vwsll.vx v24, v16, a0
-; ZVBB-NEXT: vwaddu.wv v24, v24, v8
+; ZVBB-NEXT: vwsll.vx v8, v16, a0
+; ZVBB-NEXT: vwaddu.wv v8, v8, v24
; ZVBB-NEXT: vwsll.vx v0, v20, a0
-; ZVBB-NEXT: vwaddu.wv v0, v0, v12
-; ZVBB-NEXT: vmv8r.v v8, v24
+; ZVBB-NEXT: vwaddu.wv v0, v0, v28
; ZVBB-NEXT: vmv8r.v v16, v0
; ZVBB-NEXT: ret
%res = call <vscale x 32 x i32> @llvm.vector.interleave2.nxv32i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b)
@@ -288,44 +288,32 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv8r.v v0, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vid.v v24
-; CHECK-NEXT: vsrl.vi v6, v24, 1
-; CHECK-NEXT: vand.vi v8, v24, 1
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vx v6, v6, a0, v0.t
+; CHECK-NEXT: vand.vi v26, v24, 1
+; CHECK-NEXT: vmsne.vi v10, v26, 0
+; CHECK-NEXT: vsrl.vi v8, v24, 1
+; CHECK-NEXT: vmv8r.v v24, v0
+; CHECK-NEXT: vmv4r.v v12, v4
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: vmv4r.v v28, v16
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v8, v24, v6
+; CHECK-NEXT: vrgatherei16.vv v0, v24, v8
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vrgatherei16.vv v24, v16, v6
-; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vrgatherei16.vv v24, v16, v8
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmv.v.v v16, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
@@ -335,44 +323,32 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
; ZVBB-NEXT: addi sp, sp, -16
; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 4
-; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 3
-; ZVBB-NEXT: add a0, sp, a0
-; ZVBB-NEXT: addi a0, a0, 16
-; ZVBB-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVBB-NEXT: vmv8r.v v0, v8
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: srli a0, a0, 1
; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; ZVBB-NEXT: vid.v v24
-; ZVBB-NEXT: vsrl.vi v6, v24, 1
-; ZVBB-NEXT: vand.vi v8, v24, 1
-; ZVBB-NEXT: vmsne.vi v0, v8, 0
-; ZVBB-NEXT: csrr a1, vlenb
-; ZVBB-NEXT: slli a1, a1, 3
-; ZVBB-NEXT: add a1, sp, a1
-; ZVBB-NEXT: addi a1, a1, 16
-; ZVBB-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; ZVBB-NEXT: vadd.vx v6, v6, a0, v0.t
+; ZVBB-NEXT: vand.vi v26, v24, 1
+; ZVBB-NEXT: vmsne.vi v10, v26, 0
+; ZVBB-NEXT: vsrl.vi v8, v24, 1
+; ZVBB-NEXT: vmv8r.v v24, v0
+; ZVBB-NEXT: vmv4r.v v12, v4
+; ZVBB-NEXT: vmv1r.v v0, v10
+; ZVBB-NEXT: vadd.vx v8, v8, a0, v0.t
; ZVBB-NEXT: vmv4r.v v28, v16
; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; ZVBB-NEXT: vrgatherei16.vv v8, v24, v6
+; ZVBB-NEXT: vrgatherei16.vv v0, v24, v8
; ZVBB-NEXT: addi a0, sp, 16
-; ZVBB-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 3
-; ZVBB-NEXT: add a0, sp, a0
-; ZVBB-NEXT: addi a0, a0, 16
-; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVBB-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; ZVBB-NEXT: vmv4r.v v16, v12
-; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6
-; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: vrgatherei16.vv v24, v16, v8
; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVBB-NEXT: vmv.v.v v16, v24
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 4
+; ZVBB-NEXT: slli a0, a0, 3
; ZVBB-NEXT: add sp, sp, a0
; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
@@ -516,9 +492,9 @@ define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x do
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vid.v v12
+; CHECK-NEXT: vand.vi v13, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v13, 0
; CHECK-NEXT: vsrl.vi v16, v12, 1
-; CHECK-NEXT: vand.vi v12, v12, 1
-; CHECK-NEXT: vmsne.vi v0, v12, 0
; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
@@ -531,9 +507,9 @@ define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x do
; ZVBB-NEXT: srli a0, a0, 2
; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; ZVBB-NEXT: vid.v v12
+; ZVBB-NEXT: vand.vi v13, v12, 1
+; ZVBB-NEXT: vmsne.vi v0, v13, 0
; ZVBB-NEXT: vsrl.vi v16, v12, 1
-; ZVBB-NEXT: vand.vi v12, v12, 1
-; ZVBB-NEXT: vmsne.vi v0, v12, 0
; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t
; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
@@ -593,13 +569,13 @@ define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x
;
; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: li a0, 32
; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; ZVBB-NEXT: vwsll.vx v24, v16, a0
-; ZVBB-NEXT: vwaddu.wv v24, v24, v8
+; ZVBB-NEXT: vwsll.vx v8, v16, a0
+; ZVBB-NEXT: vwaddu.wv v8, v8, v24
; ZVBB-NEXT: vwsll.vx v0, v20, a0
-; ZVBB-NEXT: vwaddu.wv v0, v0, v12
-; ZVBB-NEXT: vmv8r.v v8, v24
+; ZVBB-NEXT: vwaddu.wv v0, v0, v28
; ZVBB-NEXT: vmv8r.v v16, v0
; ZVBB-NEXT: ret
%res = call <vscale x 32 x float> @llvm.vector.interleave2.nxv32f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b)
@@ -612,44 +588,32 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv8r.v v0, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vid.v v24
-; CHECK-NEXT: vsrl.vi v6, v24, 1
-; CHECK-NEXT: vand.vi v8, v24, 1
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vx v6, v6, a0, v0.t
+; CHECK-NEXT: vand.vi v26, v24, 1
+; CHECK-NEXT: vmsne.vi v10, v26, 0
+; CHECK-NEXT: vsrl.vi v8, v24, 1
+; CHECK-NEXT: vmv8r.v v24, v0
+; CHECK-NEXT: vmv4r.v v12, v4
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: vmv4r.v v28, v16
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v8, v24, v6
+; CHECK-NEXT: vrgatherei16.vv v0, v24, v8
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vrgatherei16.vv v24, v16, v6
-; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vrgatherei16.vv v24, v16, v8
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmv.v.v v16, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
@@ -659,44 +623,32 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
; ZVBB-NEXT: addi sp, sp, -16
; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 4
-; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 3
-; ZVBB-NEXT: add a0, sp, a0
-; ZVBB-NEXT: addi a0, a0, 16
-; ZVBB-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVBB-NEXT: vmv8r.v v0, v8
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: srli a0, a0, 1
; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; ZVBB-NEXT: vid.v v24
-; ZVBB-NEXT: vsrl.vi v6, v24, 1
-; ZVBB-NEXT: vand.vi v8, v24, 1
-; ZVBB-NEXT: vmsne.vi v0, v8, 0
-; ZVBB-NEXT: csrr a1, vlenb
-; ZVBB-NEXT: slli a1, a1, 3
-; ZVBB-NEXT: add a1, sp, a1
-; ZVBB-NEXT: addi a1, a1, 16
-; ZVBB-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; ZVBB-NEXT: vadd.vx v6, v6, a0, v0.t
+; ZVBB-NEXT: vand.vi v26, v24, 1
+; ZVBB-NEXT: vmsne.vi v10, v26, 0
+; ZVBB-NEXT: vsrl.vi v8, v24, 1
+; ZVBB-NEXT: vmv8r.v v24, v0
+; ZVBB-NEXT: vmv4r.v v12, v4
+; ZVBB-NEXT: vmv1r.v v0, v10
+; ZVBB-NEXT: vadd.vx v8, v8, a0, v0.t
; ZVBB-NEXT: vmv4r.v v28, v16
; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; ZVBB-NEXT: vrgatherei16.vv v8, v24, v6
+; ZVBB-NEXT: vrgatherei16.vv v0, v24, v8
; ZVBB-NEXT: addi a0, sp, 16
-; ZVBB-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 3
-; ZVBB-NEXT: add a0, sp, a0
-; ZVBB-NEXT: addi a0, a0, 16
-; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVBB-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; ZVBB-NEXT: vmv4r.v v16, v12
-; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6
-; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: vrgatherei16.vv v24, v16, v8
; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVBB-NEXT: vmv.v.v v16, v24
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 4
+; ZVBB-NEXT: slli a0, a0, 3
; ZVBB-NEXT: add sp, sp, a0
; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
index be56db52e349..8cb6fed2f588 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
@@ -209,9 +209,7 @@ define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v12, 1
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -259,9 +257,7 @@ define <vscale x 32 x i1> @splice_nxv32i1_offset_negone(<vscale x 32 x i1> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vslideup.vi v8, v16, 1
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -282,8 +278,8 @@ define <vscale x 32 x i1> @splice_nxv32i1_offset_max(<vscale x 32 x i1> %a, <vsc
; CHECK-NEXT: li a1, 63
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v16, v16, a1
-; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
; CHECK-NEXT: vslideup.vx v16, v8, a0
; CHECK-NEXT: vand.vi v8, v16, 1
@@ -308,9 +304,7 @@ define <vscale x 64 x i1> @splice_nxv64i1_offset_negone(<vscale x 64 x i1> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-NEXT: vslideup.vi v8, v16, 1
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -331,8 +325,8 @@ define <vscale x 64 x i1> @splice_nxv64i1_offset_max(<vscale x 64 x i1> %a, <vsc
; CHECK-NEXT: li a1, 127
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v24, v24, a1
-; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
; CHECK-NEXT: vslideup.vx v24, v8, a0
; CHECK-NEXT: vand.vi v8, v24, 1
@@ -358,9 +352,8 @@ define <vscale x 1 x i8> @splice_nxv1i8_offset_negone(<vscale x 1 x i8> %a, <vsc
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 -1)
@@ -413,9 +406,8 @@ define <vscale x 2 x i8> @splice_nxv2i8_offset_negone(<vscale x 2 x i8> %a, <vsc
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 -1)
@@ -468,9 +460,8 @@ define <vscale x 4 x i8> @splice_nxv4i8_offset_negone(<vscale x 4 x i8> %a, <vsc
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 -1)
@@ -522,9 +513,8 @@ define <vscale x 8 x i8> @splice_nxv8i8_offset_negone(<vscale x 8 x i8> %a, <vsc
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 -1)
@@ -745,9 +735,8 @@ define <vscale x 1 x i16> @splice_nxv1i16_offset_negone(<vscale x 1 x i16> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 -1)
@@ -800,9 +789,8 @@ define <vscale x 2 x i16> @splice_nxv2i16_offset_negone(<vscale x 2 x i16> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 -1)
@@ -855,9 +843,8 @@ define <vscale x 4 x i16> @splice_nxv4i16_offset_negone(<vscale x 4 x i16> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 -1)
@@ -1075,9 +1062,8 @@ define <vscale x 1 x i32> @splice_nxv1i32_offset_negone(<vscale x 1 x i32> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 -1)
@@ -1130,9 +1116,8 @@ define <vscale x 2 x i32> @splice_nxv2i32_offset_negone(<vscale x 2 x i32> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 -1)
@@ -1348,9 +1333,8 @@ define <vscale x 1 x i64> @splice_nxv1i64_offset_negone(<vscale x 1 x i64> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 -1)
@@ -1565,9 +1549,8 @@ define <vscale x 1 x half> @splice_nxv1f16_offset_negone(<vscale x 1 x half> %a,
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 -1)
@@ -1620,9 +1603,8 @@ define <vscale x 2 x half> @splice_nxv2f16_offset_negone(<vscale x 2 x half> %a,
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -1)
@@ -1675,9 +1657,8 @@ define <vscale x 4 x half> @splice_nxv4f16_offset_negone(<vscale x 4 x half> %a,
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -1)
@@ -1895,9 +1876,8 @@ define <vscale x 1 x float> @splice_nxv1f32_offset_negone(<vscale x 1 x float> %
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 -1)
@@ -1950,9 +1930,8 @@ define <vscale x 2 x float> @splice_nxv2f32_offset_negone(<vscale x 2 x float> %
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -1)
@@ -2168,9 +2147,8 @@ define <vscale x 1 x double> @splice_nxv1f64_offset_negone(<vscale x 1 x double>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 -1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
index df2bc523cd7a..0f7e3f1e0ea5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
@@ -250,8 +250,8 @@ define <vscale x 32 x half> @vfabs_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -541,8 +541,8 @@ define <vscale x 16 x double> @vfabs_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index c69a7bc5cece..ad7fb63fec2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -579,9 +579,9 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -596,8 +596,8 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -636,9 +636,9 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -675,12 +675,20 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfadd_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 2
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -692,7 +700,9 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl4r.v v12, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -702,11 +712,15 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB24_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index 3ad17e85570a..81d844d1950a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -525,9 +525,9 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -542,8 +542,8 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -582,9 +582,9 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -621,12 +621,20 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfdiv_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 2
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -638,7 +646,9 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl4r.v v12, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -648,11 +658,15 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
index 3be3f835f3d1..eafd605c6110 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
@@ -44,8 +44,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -98,8 +98,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -138,8 +138,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -178,8 +178,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -218,8 +218,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 1
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -258,8 +258,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 3
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -298,8 +298,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 6
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index 0f47236d6600..8201f18175e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -1134,12 +1134,10 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: add a6, sp, a6
; CHECK-NEXT: addi a6, a6, 16
; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
+; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: sub a6, a4, a1
; CHECK-NEXT: sltu a7, a4, a6
; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: and a6, a7, a6
-; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
-; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: vl8re64.v v16, (a2)
; CHECK-NEXT: csrr a2, vlenb
@@ -1150,19 +1148,21 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: vl8re64.v v16, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a3
-; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a2, 24
-; CHECK-NEXT: mul a0, a0, a2
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: and a0, a7, a6
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: li a3, 24
+; CHECK-NEXT: mul a2, a2, a3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 4
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a2, 24
@@ -1174,7 +1174,6 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB92_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
@@ -1188,6 +1187,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: csrr a0, vlenb
@@ -1234,24 +1234,23 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: vl8re64.v v16, (a3)
; CHECK-NEXT: sub a3, a4, a1
; CHECK-NEXT: sltu a5, a4, a3
-; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: vl8re64.v v8, (a2)
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re64.v v0, (a0)
+; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: and a3, a5, a3
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v8, v24
; CHECK-NEXT: bltu a4, a1, .LBB93_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB93_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
@@ -1259,6 +1258,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v0, v24, v8
; CHECK-NEXT: vmv.v.v v8, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
index d4ba0f8c9073..c15b875e8f0c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
@@ -239,15 +239,15 @@ define <vscale x 16 x half> @vfmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscal
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24
+; ZVFHMIN-NEXT: vfmadd.vv v24, v0, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 2
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -315,87 +315,88 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 40
-; ZVFHMIN-NEXT: mul a1, a1, a2
+; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv8r.v v0, v8
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 24
-; ZVFHMIN-NEXT: mul a1, a1, a2
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vl8re16.v v0, (a0)
+; ZVFHMIN-NEXT: vmv8r.v v24, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT: vmv8r.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16
+; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8
-; ZVFHMIN-NEXT: vmv8r.v v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 40
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
@@ -415,77 +416,68 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vmv8r.v v0, v16
-; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: sub sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
+; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
+; ZVFHMIN-NEXT: vmv.v.v v8, v4
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v28
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v28
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
index 8a72b2ddafac..4ce556efb44d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
@@ -323,87 +323,88 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 40
-; ZVFHMIN-NEXT: mul a1, a1, a2
+; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv8r.v v0, v8
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 24
-; ZVFHMIN-NEXT: mul a1, a1, a2
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vl8re16.v v0, (a0)
+; ZVFHMIN-NEXT: vmv8r.v v24, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT: vmv8r.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16
+; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8
-; ZVFHMIN-NEXT: vmv8r.v v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 40
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
@@ -423,10 +424,14 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
@@ -435,33 +440,50 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmadd.vv v0, v8, v24
-; ZVFHMIN-NEXT: vmv8r.v v24, v8
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0
-; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv4r.v v12, v20
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v0
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
index 7556b3ace5c6..7ab999ea4fa7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
@@ -255,9 +255,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -272,8 +272,8 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -312,9 +312,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
index 755c66537612..e928df85b5bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
@@ -255,9 +255,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -272,8 +272,8 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -312,9 +312,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
index 5114f0a8d1d6..c835dc72268b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
@@ -347,75 +347,64 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 28
+; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 28 * vlenb
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN-NEXT: vl8re16.v v16, (a0)
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 20
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: vmv4r.v v20, v12
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v0, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v0, v0
+; ZVFHMIN-NEXT: vfmacc.vv v0, v8, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 20
-; ZVFHMIN-NEXT: mul a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmacc.vv v8, v0, v16
+; ZVFHMIN-NEXT: vfneg.v v8, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v8, v0, v24
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 28
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
@@ -437,55 +426,93 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: li a1, 28
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vmv8r.v v24, v16
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 28 * vlenb
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv8r.v v24, v8
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v0, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v0, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 20
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 20
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v0, v0
+; ZVFHMIN-NEXT: vfmacc.vv v0, v24, v16
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmacc.vv v24, v0, v8
+; ZVFHMIN-NEXT: vfneg.v v8, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 20
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl4r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 20
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: li a1, 28
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index 30d5919238cf..61f3e63f246c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -525,9 +525,9 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -542,8 +542,8 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -582,9 +582,9 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -621,12 +621,20 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfmul_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 2
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -638,7 +646,9 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl4r.v v12, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -648,11 +658,15 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index bacf9bae83ed..abda6750e5a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -1134,12 +1134,10 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: add a6, sp, a6
; CHECK-NEXT: addi a6, a6, 16
; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
+; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: sub a6, a4, a1
; CHECK-NEXT: sltu a7, a4, a6
; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: and a6, a7, a6
-; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
-; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: vl8re64.v v16, (a2)
; CHECK-NEXT: csrr a2, vlenb
@@ -1150,19 +1148,21 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: vl8re64.v v16, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a3
-; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a2, 24
-; CHECK-NEXT: mul a0, a0, a2
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: and a0, a7, a6
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: li a3, 24
+; CHECK-NEXT: mul a2, a2, a3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 4
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a2, 24
@@ -1174,7 +1174,6 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB92_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
@@ -1188,6 +1187,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: csrr a0, vlenb
@@ -1234,24 +1234,23 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: vl8re64.v v16, (a3)
; CHECK-NEXT: sub a3, a4, a1
; CHECK-NEXT: sltu a5, a4, a3
-; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: vl8re64.v v8, (a2)
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re64.v v0, (a0)
+; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: and a3, a5, a3
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v8, v24
; CHECK-NEXT: bltu a4, a1, .LBB93_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB93_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
@@ -1259,6 +1258,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v0, v24, v8
; CHECK-NEXT: vmv.v.v v8, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
index 1db5fa1720a2..69ea7ce33cf6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
@@ -250,8 +250,8 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -541,8 +541,8 @@ define <vscale x 16 x double> @vfneg_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfneg.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
index 785f60ad1d39..b54590cd9d84 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
@@ -412,85 +412,85 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; ZVFHMIN-NEXT: vl8re16.v v0, (a0)
+; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 12
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
+; ZVFHMIN-NEXT: vfneg.v v0, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfneg.v v0, v0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 20
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
+; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v8
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
+; ZVFHMIN-NEXT: vfneg.v v8, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
+; ZVFHMIN-NEXT: vfneg.v v8, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl4r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 12
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 20
-; ZVFHMIN-NEXT: mul a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v20
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v0, v8, v24
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v0
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
@@ -515,16 +515,16 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: li a1, 20
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x0c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 12 * vlenb
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x14, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 20 * vlenb
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
@@ -534,48 +534,61 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v0, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v20
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v0, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v0, v0
+; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfneg.v v16, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vmv.v.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl4r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
+; ZVFHMIN-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: li a1, 20
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
index 1a2da051c962..2f7e693a8a6f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
@@ -376,85 +376,76 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 28
+; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 28 * vlenb
-; ZVFHMIN-NEXT: vl8re16.v v0, (a0)
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
+; ZVFHMIN-NEXT: vfneg.v v0, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v24
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT: vmv4r.v v20, v12
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v0
+; ZVFHMIN-NEXT: vfneg.v v8, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v28
+; ZVFHMIN-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v8, v0, v24
+; ZVFHMIN-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 28
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
@@ -480,7 +471,6 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; ZVFHMIN-NEXT: vmv8r.v v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
@@ -488,58 +478,59 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmv.v.f v8, fa5
+; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v0, v8
+; ZVFHMIN-NEXT: vfneg.v v24, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v28, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v28
-; ZVFHMIN-NEXT: vmv.v.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: vmv.v.v v8, v4
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v0, v8, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v24, v0, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v28
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
index aaaf4ad46071..28dc3e765dc3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
@@ -112,8 +112,8 @@ define <vscale x 32 x float> @vfpext_nxv32f16_nxv32f32(<vscale x 32 x half> %a,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB7_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
index 15c4bf255e6d..f3544589407d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
@@ -394,7 +394,15 @@ declare <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptosi_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
@@ -404,16 +412,22 @@ define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v16, v0.t
+; CHECK-NEXT: vfncvt.rtz.x.f.w v20, v24, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB25_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfncvt.rtz.x.f.w v24, v8, v0.t
-; CHECK-NEXT: vmv8r.v v8, v24
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i16> %v
@@ -440,8 +454,8 @@ define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.fptosi.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
index a2591e7dc35f..9fd2d8edb220 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
@@ -394,7 +394,15 @@ declare <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptoui_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
@@ -404,16 +412,22 @@ define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v16, v0.t
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v20, v24, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB25_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfncvt.rtz.xu.f.w v24, v8, v0.t
-; CHECK-NEXT: vmv8r.v v8, v24
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i16> %v
@@ -440,8 +454,8 @@ define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.fptoui.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index 0c3abe37af27..d2219cf96359 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -113,16 +113,16 @@ define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double>
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vfncvt.f.f.w v20, v24, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB7_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB7_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: csrr a0, vlenb
@@ -169,11 +169,11 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: sub a6, a5, a1
; CHECK-NEXT: sltu a7, a5, a6
; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: and a6, a7, a6
-; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v16, a3
-; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
+; CHECK-NEXT: and a0, a7, a6
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v20, v8, v0.t
; CHECK-NEXT: bltu a5, a1, .LBB8_2
; CHECK-NEXT: # %bb.1:
@@ -181,8 +181,8 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: .LBB8_2:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v7, a3
-; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t
; CHECK-NEXT: bltu a2, a4, .LBB8_4
; CHECK-NEXT: # %bb.3:
@@ -192,22 +192,22 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: sltu a3, a2, a0
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v28, v8, v0.t
; CHECK-NEXT: bltu a2, a1, .LBB8_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB8_6:
-; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v24, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
index d6caad15e40a..bd229e0220a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
@@ -250,8 +250,8 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -541,8 +541,8 @@ define <vscale x 16 x double> @vfsqrt_vv_nxv16f64(<vscale x 16 x double> %va, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsqrt.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.sqrt.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index 2eae18d7cc49..c833f8048fe3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -525,9 +525,9 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -542,8 +542,8 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -582,9 +582,9 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -621,12 +621,20 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfsub_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 2
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -638,7 +646,9 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl4r.v v12, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -648,11 +658,15 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
index 78f3792dbaf0..f9d992a40299 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
@@ -671,9 +671,9 @@ define <vscale x 16 x float> @vfmacc_vv_nxv16f32(<vscale x 16 x half> %a, <vscal
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t
; ZVFHMIN-NEXT: vmv.v.v v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
index ca0bbfd65ca2..c11867d55ba0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
@@ -625,9 +625,9 @@ define <vscale x 16 x float> @vfnmacc_vv_nxv16f32(<vscale x 16 x half> %a, <vsca
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfnmadd.vv v24, v16, v8, v0.t
; ZVFHMIN-NEXT: vmv.v.v v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
@@ -710,17 +710,17 @@ define <vscale x 16 x float> @vfnmacc_vf_nxv16f32_commute(<vscale x 16 x half> %
;
; ZVFHMIN-LABEL: vfnmacc_vf_nxv16f32_commute:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv4r.v v24, v8
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmv.v.f v8, fa5
+; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24, v0.t
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfnmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN-NEXT: vfnmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN-NEXT: vmv.v.v v8, v24
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
index 2797ca2eb316..0ad7be47bcc8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
@@ -601,9 +601,9 @@ define <vscale x 16 x float> @vfnmsac_vv_nxv16f32(<vscale x 16 x half> %a, <vsca
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfnmsub.vv v24, v16, v8, v0.t
; ZVFHMIN-NEXT: vmv.v.v v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
@@ -682,17 +682,17 @@ define <vscale x 16 x float> @vfnmsac_vf_nxv16f32_commute(<vscale x 16 x half> %
;
; ZVFHMIN-LABEL: vfnmsac_vf_nxv16f32_commute:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv4r.v v24, v8
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmv.v.f v8, fa5
+; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24, v0.t
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfnmsub.vv v8, v24, v16, v0.t
+; ZVFHMIN-NEXT: vfnmsub.vv v24, v8, v16, v0.t
+; ZVFHMIN-NEXT: vmv.v.v v8, v24
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
index 5d0172430d15..77ef0a340270 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
@@ -449,12 +449,12 @@ define <vscale x 32 x half> @vsitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
; ZVFHMIN-NEXT: vmv.v.i v12, 0
; ZVFHMIN-NEXT: vmerge.vim v8, v12, -1, v0
; ZVFHMIN-NEXT: vfwcvt.f.x.v v16, v8
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: srli a0, a0, 2
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a0
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: vmerge.vim v12, v12, -1, v0
; ZVFHMIN-NEXT: vfwcvt.f.x.v v16, v12
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -478,12 +478,12 @@ define <vscale x 32 x half> @vuitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
; ZVFHMIN-NEXT: vmv.v.i v12, 0
; ZVFHMIN-NEXT: vmerge.vim v8, v12, 1, v0
; ZVFHMIN-NEXT: vfwcvt.f.xu.v v16, v8
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: srli a0, a0, 2
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a0
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: vmerge.vim v12, v12, 1, v0
; ZVFHMIN-NEXT: vfwcvt.f.xu.v v16, v12
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
index a35fc874065a..8a7646798662 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
@@ -423,8 +423,8 @@ define <vscale x 128 x i8> @vmax_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
@@ -986,8 +986,8 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1046,8 +1046,8 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i3
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1084,8 +1084,8 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i
; RV64-NEXT: slli a1, a1, 1
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmax.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
index 1f620a44dbbc..1c74887c1b20 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
@@ -425,8 +425,8 @@ define <vscale x 128 x i8> @vmaxu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
@@ -988,8 +988,8 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1048,8 +1048,8 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1086,8 +1086,8 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va,
; RV64-NEXT: slli a1, a1, 1
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmaxu.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
index e7184921d87a..2e5b67c93fce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v10
-; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfeq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfeq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v12
-; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfeq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfeq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v10
-; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfeq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfeq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v12
-; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfeq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfeq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v10
-; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfeq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfeq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v12
-; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfeq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfeq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
index a6dad9eaa4f3..b5ca47707c8a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v10, v8
-; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmfle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v12, v8
-; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmfle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v10, v8
-; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmfle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v12, v8
-; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmfle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v10, v8
-; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmfle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v12, v8
-; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmfle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
index f643a4036381..971249d38d1b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v10, v8
-; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmflt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v12, v8
-; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmflt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v10, v8
-; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmflt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v12, v8
-; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmflt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v10, v8
-; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmflt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v12, v8
-; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmflt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
index 6c52364c1fbd..f19a181a365a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v10
-; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v12
-; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v10
-; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v12
-; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v10
-; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v12
-; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
index 37a9c6b081a1..0a0464221933 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v10
-; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmflt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v12
-; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmflt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v10
-; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmflt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v12
-; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmflt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v10
-; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmflt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v12
-; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmflt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
index 5defce42091e..520099247e0f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v10
-; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v12
-; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v10
-; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v12
-; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v10
-; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v12
-; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
index 8fabf93356ae..1c71242c3c7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
@@ -423,8 +423,8 @@ define <vscale x 128 x i8> @vmin_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
@@ -986,8 +986,8 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1046,8 +1046,8 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i3
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1084,8 +1084,8 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i
; RV64-NEXT: slli a1, a1, 1
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmin.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
index 8ec85e545a0f..6d89a9777cf9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
@@ -425,8 +425,8 @@ define <vscale x 128 x i8> @vminu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
@@ -988,8 +988,8 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1048,8 +1048,8 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1086,8 +1086,8 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va,
; RV64-NEXT: slli a1, a1, 1
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV64-NEXT: vminu.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vminu.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
index 2d6e958fcd0b..14a1f084c398 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
@@ -32,8 +32,8 @@ define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -74,8 +74,8 @@ define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -116,8 +116,8 @@ define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -158,8 +158,8 @@ define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -200,8 +200,8 @@ define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -242,8 +242,8 @@ define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -284,8 +284,8 @@ define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
index cc6c1f585bb7..9f181f7a30eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmseq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmseq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmseq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmseq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmseq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmseq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmseq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmseq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmseq.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmseq.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmseq.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
index c8f9b60a3f2d..75fc407abbc2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
@@ -954,8 +972,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1002,8 +1020,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1050,8 +1068,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1098,8 +1116,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1146,8 +1164,8 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1194,8 +1212,8 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1242,8 +1260,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1290,8 +1308,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1338,8 +1356,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1386,8 +1404,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1434,8 +1452,8 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1482,8 +1500,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1530,8 +1548,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1578,8 +1596,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1626,8 +1644,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1701,8 +1719,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v10, v9
; RV64-NEXT: ret
@@ -1776,8 +1794,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v11, v10
; RV64-NEXT: ret
@@ -1851,8 +1869,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v13, v12
; RV64-NEXT: ret
@@ -1886,8 +1904,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -15, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1921,8 +1939,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -13, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1956,8 +1974,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -11, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1991,8 +2009,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2026,8 +2044,8 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, -7, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2061,8 +2079,8 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, -5, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2096,8 +2114,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -3, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2131,8 +2149,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -1, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2166,8 +2184,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2201,8 +2219,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 2, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2236,8 +2254,8 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 4, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2271,8 +2289,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 6, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2306,8 +2324,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2341,8 +2359,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2376,8 +2394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 12, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2411,8 +2429,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2446,8 +2464,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 8, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2481,8 +2499,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 8, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
index b6c6d9e90f61..5568c1e9b1cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
@@ -954,8 +972,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1002,8 +1020,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1050,8 +1068,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1098,8 +1116,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1146,8 +1164,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1194,8 +1212,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1242,8 +1260,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1290,8 +1308,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1338,8 +1356,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1386,8 +1404,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1434,8 +1452,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1482,8 +1500,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1530,8 +1548,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1578,8 +1596,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1626,8 +1644,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1701,8 +1719,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsltu.vx v10, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v10, v9
; RV64-NEXT: ret
@@ -1776,8 +1794,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsltu.vx v11, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v11, v10
; RV64-NEXT: ret
@@ -1851,8 +1869,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsltu.vx v13, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v13, v12
; RV64-NEXT: ret
@@ -1886,8 +1904,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, -15, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1921,8 +1939,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, -13, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1956,8 +1974,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, -11, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1991,8 +2009,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, -9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2026,8 +2044,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, -7, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2061,8 +2079,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, -5, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2096,8 +2114,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, -3, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2178,8 +2196,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2213,8 +2231,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 2, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2248,8 +2266,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 4, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2283,8 +2301,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 6, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2318,8 +2336,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2353,8 +2371,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2388,8 +2406,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 12, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2423,8 +2441,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 14, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2458,8 +2476,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, -16, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2493,8 +2511,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, -14, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
index dfd7096a65eb..f1fa6484d976 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmslt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmslt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmslt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmslt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmslt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmslt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmslt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmslt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsgt.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsgt.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsgt.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
index 8826be03bbeb..de7a0ad87be2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsgtu.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsgtu.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
index 8ce9a3020b7a..05d402afc934 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
@@ -32,8 +32,8 @@ define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -74,8 +74,8 @@ define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -116,8 +116,8 @@ define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -158,8 +158,8 @@ define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -200,8 +200,8 @@ define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -242,8 +242,8 @@ define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -284,8 +284,8 @@ define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
index 5d5a28edbfe1..f54aef3ed405 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsle.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsle.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsle.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
index c58ac2d07183..540577247484 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsleu.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsleu.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsleu.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
index 6c6e580b043d..554d25172d4f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmslt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmslt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmslt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmslt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmslt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmslt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmslt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmslt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, -15, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, -13, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, -11, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, -9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, -7, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, -5, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, -3, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, zero, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 2, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 4, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 6, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 12, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 8, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 8, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
index 76f3e449ab58..7a8efa6c80fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsltu.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsltu.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsltu.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, -15, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, -13, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, -11, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, -9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, -7, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, -5, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, -3, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, zero, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 2, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 4, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 6, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 12, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 14, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, -16, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, -14, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
index 161c1bc4314f..bd6bd8a804bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsne.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsne.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsne.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
index f6f90eddcd8c..0c60681ea8de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
@@ -32,8 +32,8 @@ define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -74,8 +74,8 @@ define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -116,8 +116,8 @@ define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -158,8 +158,8 @@ define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -200,8 +200,8 @@ define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -242,8 +242,8 @@ define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -284,8 +284,8 @@ define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
index 8b368bfaab08..f7ca65801dc8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
@@ -6,8 +6,8 @@ define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
; RV32-LABEL: bool_vec:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v9, v0
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vmv1r.v v0, v8
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vfirst.m a1, v9, v0.t
; RV32-NEXT: bltz a1, .LBB0_2
; RV32-NEXT: # %bb.1:
@@ -20,8 +20,8 @@ define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
; RV64-NEXT: vmv1r.v v9, v0
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vmv1r.v v0, v8
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vfirst.m a1, v9, v0.t
; RV64-NEXT: bltz a1, .LBB0_2
; RV64-NEXT: # %bb.1:
@@ -36,8 +36,8 @@ define iXLen @bool_vec_zero_poison(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m,
; RV32-LABEL: bool_vec_zero_poison:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v9, v0
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vmv1r.v v0, v8
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vfirst.m a0, v9, v0.t
; RV32-NEXT: ret
;
@@ -46,8 +46,8 @@ define iXLen @bool_vec_zero_poison(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m,
; RV64-NEXT: vmv1r.v v9, v0
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vmv1r.v v0, v8
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vfirst.m a0, v9, v0.t
; RV64-NEXT: ret
%r = call iXLen @llvm.vp.cttz.elts.iXLen.nxv2i1(<vscale x 2 x i1> %src, i1 1, <vscale x 2 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
index 8b1660283cb7..d0f2ce1ca800 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
@@ -450,14 +450,14 @@ define <vscale x 64 x i8> @test_vp_reverse_nxv64i8_masked(<vscale x 64 x i8> %sr
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma
; CHECK-NEXT: vid.v v16
-; CHECK-NEXT: vrsub.vx v24, v16, a2
+; CHECK-NEXT: vrsub.vx v16, v16, a2
; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v20, v8, v24
-; CHECK-NEXT: vrgatherei16.vv v16, v12, v24
+; CHECK-NEXT: vrgatherei16.vv v28, v8, v16
+; CHECK-NEXT: vrgatherei16.vv v24, v12, v16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub a1, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vslidedown.vx v8, v16, a1, v0.t
+; CHECK-NEXT: vslidedown.vx v8, v24, a1, v0.t
; CHECK-NEXT: ret
%dst = call <vscale x 64 x i8> @llvm.experimental.vp.reverse.nxv64i8(<vscale x 64 x i8> %src, <vscale x 64 x i1> %mask, i32 %evl)
ret <vscale x 64 x i8> %dst
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll
index a30ebf2d33b5..7f81b99eb033 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll
@@ -8,8 +8,8 @@ define <2 x i1> @test_vp_reverse_v2i1_masked(<2 x i1> %src, <2 x i1> %mask, i32
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -45,8 +45,8 @@ define <4 x i1> @test_vp_reverse_v4i1_masked(<4 x i1> %src, <4 x i1> %mask, i32
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -82,8 +82,8 @@ define <8 x i1> @test_vp_reverse_v8i1_masked(<8 x i1> %src, <8 x i1> %mask, i32
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -119,8 +119,8 @@ define <16 x i1> @test_vp_reverse_v16i1_masked(<16 x i1> %src, <16 x i1> %mask,
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll
index ceb6a164e20d..acf7d16bda98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll
@@ -7,8 +7,8 @@ define <vscale x 1 x i1> @test_vp_reverse_nxv1i1_masked(<vscale x 1 x i1> %src,
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -44,8 +44,8 @@ define <vscale x 2 x i1> @test_vp_reverse_nxv2i1_masked(<vscale x 2 x i1> %src,
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -81,8 +81,8 @@ define <vscale x 4 x i1> @test_vp_reverse_nxv4i1_masked(<vscale x 4 x i1> %src,
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -118,8 +118,8 @@ define <vscale x 8 x i1> @test_vp_reverse_nxv8i1_masked(<vscale x 8 x i1> %src,
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -155,8 +155,8 @@ define <vscale x 16 x i1> @test_vp_reverse_nxv16i1_masked(<vscale x 16 x i1> %sr
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vid.v v12, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v12, v12, a0, v0.t
@@ -193,8 +193,8 @@ define <vscale x 32 x i1> @test_vp_reverse_nxv32i1_masked(<vscale x 32 x i1> %sr
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vid.v v16, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v16, v16, a0, v0.t
@@ -242,8 +242,8 @@ define <vscale x 64 x i1> @test_vp_reverse_nxv64i1_masked(<vscale x 64 x i1> %sr
; CHECK-NEXT: vrgatherei16.vv v16, v28, v0
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub a1, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v16, v16, a1, v0.t
; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
@@ -263,14 +263,14 @@ define <vscale x 64 x i1> @test_vp_reverse_nxv64i1(<vscale x 64 x i1> %src, i32
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma
; CHECK-NEXT: vid.v v16
-; CHECK-NEXT: vrsub.vx v24, v16, a2
+; CHECK-NEXT: vrsub.vx v16, v16, a2
; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v20, v8, v24
-; CHECK-NEXT: vrgatherei16.vv v16, v12, v24
+; CHECK-NEXT: vrgatherei16.vv v28, v8, v16
+; CHECK-NEXT: vrgatherei16.vv v24, v12, v16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub a1, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vslidedown.vx v8, v16, a1
+; CHECK-NEXT: vslidedown.vx v8, v24, a1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
index ce0ae2022885..9496cd82947d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
@@ -68,8 +68,8 @@ define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1>
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -141,8 +141,8 @@ define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1>
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -214,8 +214,8 @@ define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1>
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -287,8 +287,8 @@ define <16 x i1> @test_vp_splice_v16i1_masked(<16 x i1> %va, <16 x i1> %vb, <16
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
index 668cff234293..902763082522 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
@@ -71,8 +71,8 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <v
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -144,8 +144,8 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <v
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -217,8 +217,8 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <v
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -290,8 +290,8 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <v
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -363,8 +363,8 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va,
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v14, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vx v10, v12, a0, v0.t
@@ -437,8 +437,8 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va,
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v16, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vx v16, v12, a0, v0.t
@@ -511,8 +511,8 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va,
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v24, v24, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v24, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vslideup.vx v24, v16, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index c86fee630593..c0d7ecf74956 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -274,8 +274,8 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
; RV32-NEXT: .LBB12_2:
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v16, v8
-; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
@@ -308,8 +308,8 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
; RV64-NEXT: .LBB12_2:
; RV64-NEXT: vsetvli a6, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v10
-; RV64-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; RV64-NEXT: vmv1r.v v0, v13
+; RV64-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: bltu a1, a3, .LBB12_4
; RV64-NEXT: # %bb.3:
@@ -331,8 +331,8 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
; RV64-NEXT: .LBB12_6:
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
-; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, ptr %base, <vscale x 32 x i8> %idxs
@@ -2269,18 +2269,18 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
; RV32-NEXT: sub a2, a0, a1
; RV32-NEXT: sltu a3, a0, a2
; RV32-NEXT: addi a3, a3, -1
+; RV32-NEXT: srli a4, a1, 3
+; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vx v0, v0, a4
; RV32-NEXT: and a2, a3, a2
-; RV32-NEXT: srli a3, a1, 3
-; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (zero), v12, v0.t
; RV32-NEXT: bltu a0, a1, .LBB102_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB102_2:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v24, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v24
; RV32-NEXT: ret
@@ -2292,18 +2292,18 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
; RV64-NEXT: sub a2, a0, a1
; RV64-NEXT: sltu a3, a0, a2
; RV64-NEXT: addi a3, a3, -1
+; RV64-NEXT: srli a4, a1, 3
+; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vx v0, v0, a4
; RV64-NEXT: and a2, a3, a2
-; RV64-NEXT: srli a3, a1, 3
-; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v0, a3
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (zero), v16, v0.t
; RV64-NEXT: bltu a0, a1, .LBB102_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB102_2:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.gather.nxv16f64.nxv16p0(<vscale x 16 x ptr> %ptrs, <vscale x 16 x i1> %m, i32 %evl)
@@ -2319,20 +2319,20 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
; RV32-NEXT: vsll.vi v24, v16, 3
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: srli a4, a2, 3
; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a4
+; RV32-NEXT: sltu a4, a1, a3
+; RV32-NEXT: addi a4, a4, -1
+; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: bltu a1, a2, .LBB103_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB103_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
;
@@ -2340,26 +2340,27 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf4 v16, v8
-; RV64-NEXT: vsll.vi v24, v16, 3
; RV64-NEXT: vsext.vf4 v16, v10
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: csrr a2, vlenb
; RV64-NEXT: sub a3, a1, a2
-; RV64-NEXT: sltu a4, a1, a3
-; RV64-NEXT: addi a4, a4, -1
-; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: srli a4, a2, 3
; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a4
+; RV64-NEXT: sltu a4, a1, a3
+; RV64-NEXT: addi a4, a4, -1
+; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
+; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, ma
+; RV64-NEXT: vsext.vf4 v24, v8
+; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: bltu a1, a2, .LBB103_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB103_2:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i16> %idxs
@@ -2376,20 +2377,20 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
; RV32-NEXT: vsll.vi v24, v16, 3
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: srli a4, a2, 3
; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a4
+; RV32-NEXT: sltu a4, a1, a3
+; RV32-NEXT: addi a4, a4, -1
+; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: bltu a1, a2, .LBB104_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB104_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
;
@@ -2398,25 +2399,26 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v10
-; RV64-NEXT: vsext.vf4 v24, v8
-; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: csrr a2, vlenb
; RV64-NEXT: sub a3, a1, a2
-; RV64-NEXT: sltu a4, a1, a3
-; RV64-NEXT: addi a4, a4, -1
-; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: srli a4, a2, 3
; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a4
+; RV64-NEXT: sltu a4, a1, a3
+; RV64-NEXT: addi a4, a4, -1
+; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
+; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, ma
+; RV64-NEXT: vsext.vf4 v24, v8
+; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: bltu a1, a2, .LBB104_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB104_2:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 16 x i16> %idxs to <vscale x 16 x i64>
@@ -2434,20 +2436,20 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base
; RV32-NEXT: vsll.vi v24, v16, 3
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: srli a4, a2, 3
; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a4
+; RV32-NEXT: sltu a4, a1, a3
+; RV32-NEXT: addi a4, a4, -1
+; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: bltu a1, a2, .LBB105_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB105_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
;
@@ -2459,20 +2461,20 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base
; RV64-NEXT: vsll.vi v24, v16, 3
; RV64-NEXT: csrr a2, vlenb
; RV64-NEXT: sub a3, a1, a2
-; RV64-NEXT: sltu a4, a1, a3
-; RV64-NEXT: addi a4, a4, -1
-; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: srli a4, a2, 3
; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a4
+; RV64-NEXT: sltu a4, a1, a3
+; RV64-NEXT: addi a4, a4, -1
+; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV64-NEXT: bltu a1, a2, .LBB105_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB105_2:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 16 x i16> %idxs to <vscale x 16 x i64>
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
index f07c16476c56..1b1e9153a2fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
@@ -444,18 +444,18 @@ define <vscale x 16 x double> @vpload_nxv16f64(ptr %ptr, <vscale x 16 x i1> %m,
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a3, a4, a3
; CHECK-NEXT: slli a4, a2, 3
-; CHECK-NEXT: add a4, a0, a4
; CHECK-NEXT: srli a5, a2, 3
; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a5
+; CHECK-NEXT: add a4, a0, a4
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a4), v0.t
; CHECK-NEXT: bltu a1, a2, .LBB37_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB37_2:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
@@ -489,10 +489,10 @@ define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17
; CHECK-NEXT: addi a7, a7, -1
; CHECK-NEXT: and a6, a7, a6
; CHECK-NEXT: slli a7, a3, 3
-; CHECK-NEXT: add a7, a0, a7
; CHECK-NEXT: srli t0, a3, 3
; CHECK-NEXT: vsetvli t1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v8, t0
+; CHECK-NEXT: add a7, a0, a7
; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a7), v0.t
; CHECK-NEXT: sub a5, a2, a5
@@ -504,18 +504,18 @@ define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB38_4:
; CHECK-NEXT: slli a5, a3, 4
-; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: srli a6, a3, 2
; CHECK-NEXT: vsetvli a7, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v8, a6
+; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a5), v0.t
; CHECK-NEXT: bltu a4, a3, .LBB38_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB38_6:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: vs1r.v v24, (a1)
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index 26f7c56f05ce..094e6c9cc754 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -373,8 +373,8 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: vlm.v v0, (a2)
; CHECK-NEXT: sub a2, a3, a1
; CHECK-NEXT: sltu a4, a3, a2
-; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: vl8r.v v8, (a0)
+; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a2, a4, a2
; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0
@@ -382,10 +382,10 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: .LBB28_2:
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -414,8 +414,8 @@ define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB29_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 128 x i8> poison, i8 %a, i32 0
@@ -442,8 +442,8 @@ define <vscale x 128 x i8> @vpmerge_vi_nxv128i8(<vscale x 128 x i8> %vb, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB30_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vim v8, v8, 2, v0
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.merge.nxv128i8(<vscale x 128 x i1> %m, <vscale x 128 x i8> splat (i8 2), <vscale x 128 x i8> %vb, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
index 351fc500145e..59662db42898 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
@@ -2124,10 +2124,10 @@ define void @vpscatter_nxv16f64(<vscale x 16 x double> %val, <vscale x 16 x ptr>
; RV32-NEXT: sub a2, a1, a0
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
-; RV32-NEXT: and a1, a1, a2
; RV32-NEXT: srli a0, a0, 3
-; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
+; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a0
+; RV32-NEXT: and a1, a1, a2
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (zero), v28, v0.t
; RV32-NEXT: ret
@@ -2157,13 +2157,13 @@ define void @vpscatter_nxv16f64(<vscale x 16 x double> %val, <vscale x 16 x ptr>
; RV64-NEXT: sub a0, a2, a1
; RV64-NEXT: sltu a2, a2, a0
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a0, a2, a0
; RV64-NEXT: srli a1, a1, 3
-; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
+; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
+; RV64-NEXT: and a0, a2, a0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
@@ -2192,10 +2192,10 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
; RV32-NEXT: sub a3, a2, a1
; RV32-NEXT: sltu a2, a2, a3
; RV32-NEXT: addi a2, a2, -1
-; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: srli a1, a1, 3
-; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a1
+; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
@@ -2232,11 +2232,10 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
; RV64-NEXT: sub a3, a2, a1
; RV64-NEXT: sltu a2, a2, a3
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: srli a1, a1, 3
-; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a1, sp, a1
@@ -2244,6 +2243,7 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
@@ -2273,10 +2273,10 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV32-NEXT: sub a3, a2, a1
; RV32-NEXT: sltu a2, a2, a3
; RV32-NEXT: addi a2, a2, -1
-; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: srli a1, a1, 3
-; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a1
+; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
@@ -2308,22 +2308,22 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a3, a1
; RV64-NEXT: .LBB101_2:
+; RV64-NEXT: addi a4, sp, 16
+; RV64-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV64-NEXT: addi a3, sp, 16
-; RV64-NEXT: vl1r.v v0, (a3) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: sub a3, a2, a1
; RV64-NEXT: sltu a2, a2, a3
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: srli a1, a1, 3
-; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 10
@@ -2355,10 +2355,10 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV32-NEXT: sub a3, a2, a1
; RV32-NEXT: sltu a2, a2, a3
; RV32-NEXT: addi a2, a2, -1
-; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: srli a1, a1, 3
-; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a1
+; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
@@ -2380,10 +2380,10 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV64-NEXT: sub a3, a2, a1
; RV64-NEXT: sltu a2, a2, a3
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: srli a1, a1, 3
-; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
+; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
index c12fc0497742..ce0ee38bc704 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
@@ -380,10 +380,10 @@ define void @vpstore_nxv16f64(<vscale x 16 x double> %val, ptr %ptr, <vscale x 1
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a3
; CHECK-NEXT: slli a3, a2, 3
-; CHECK-NEXT: add a0, a0, a3
; CHECK-NEXT: srli a2, a2, 3
-; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a2
+; CHECK-NEXT: add a0, a0, a3
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vse64.v v16, (a0), v0.t
; CHECK-NEXT: ret
@@ -420,36 +420,36 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, ptr %ptr, <vscale x 1
; CHECK-NEXT: vl8re64.v v0, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-NEXT: vse64.v v8, (a1), v0.t
; CHECK-NEXT: sub a0, a5, a3
; CHECK-NEXT: sltu a5, a5, a0
; CHECK-NEXT: addi a5, a5, -1
-; CHECK-NEXT: and a0, a5, a0
-; CHECK-NEXT: slli a5, a3, 3
-; CHECK-NEXT: add a5, a1, a5
-; CHECK-NEXT: srli a6, a3, 3
+; CHECK-NEXT: and a5, a5, a0
+; CHECK-NEXT: slli a0, a3, 3
+; CHECK-NEXT: add a6, a1, a0
+; CHECK-NEXT: srli a0, a3, 3
; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v24, a6
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v24, a0
; CHECK-NEXT: sub a0, a2, a4
; CHECK-NEXT: sltu a2, a2, a0
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a0, a2, a0
-; CHECK-NEXT: vse64.v v16, (a5), v0.t
+; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-NEXT: vse64.v v16, (a6), v0.t
; CHECK-NEXT: bltu a0, a3, .LBB31_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a0, a3
; CHECK-NEXT: .LBB31_6:
; CHECK-NEXT: slli a2, a3, 4
-; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: srli a3, a3, 2
-; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v24, a3
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vse64.v v8, (a1), v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
index 6d42b15273cf..fa7830133834 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
@@ -1018,11 +1018,11 @@ declare half @llvm.vector.reduce.fmin.nxv10f16(<vscale x 10 x half>)
define half @vreduce_fmin_nxv10f16(<vscale x 10 x half> %v) {
; CHECK-LABEL: vreduce_fmin_nxv10f16:
; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI73_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI73_0)
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vlse16.v v12, (a0), zero
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: lui a1, %hi(.LCPI73_0)
-; CHECK-NEXT: addi a1, a1, %lo(.LCPI73_0)
-; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
-; CHECK-NEXT: vlse16.v v12, (a1), zero
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
index 46560fc501c6..f21b42e9519b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
@@ -192,8 +192,8 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
; ZVFH-NEXT: vfmv.s.f v25, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfredusum.vs v25, v8, v25, v0.t
-; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v24
+; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; ZVFH-NEXT: vfredusum.vs v25, v16, v25, v0.t
; ZVFH-NEXT: vfmv.f.s fa0, v25
; ZVFH-NEXT: ret
@@ -247,8 +247,8 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
; ZVFHMIN-NEXT: vfmv.s.f v8, fa5
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v6
+; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfredusum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
@@ -257,8 +257,8 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
; ZVFHMIN-NEXT: vfmv.s.f v8, fa5
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfredusum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
@@ -298,8 +298,8 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
; ZVFH-NEXT: vfmv.s.f v25, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfredosum.vs v25, v8, v25, v0.t
-; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v24
+; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; ZVFH-NEXT: vfredosum.vs v25, v16, v25, v0.t
; ZVFH-NEXT: vfmv.f.s fa0, v25
; ZVFH-NEXT: ret
@@ -353,8 +353,8 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
; ZVFHMIN-NEXT: vfmv.s.f v8, fa5
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v6
+; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfredosum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
@@ -363,8 +363,8 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
; ZVFHMIN-NEXT: vfmv.s.f v8, fa5
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfredosum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
index 7bcf37b1af3c..c41ddaee75a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
@@ -1115,8 +1115,8 @@ define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, <vscale x 32 x i32> %
; CHECK-NEXT: vmv.s.x v25, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vredmaxu.vs v25, v8, v25, v0.t
-; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vredmaxu.vs v25, v16, v25, v0.t
; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
index 94ed7e568a01..39666bb6119a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
@@ -24,8 +24,8 @@ define zeroext i1 @vpreduce_or_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vsca
; CHECK-LABEL: vpreduce_or_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -40,8 +40,8 @@ define zeroext i1 @vpreduce_xor_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_xor_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -72,8 +72,8 @@ define zeroext i1 @vpreduce_or_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vsca
; CHECK-LABEL: vpreduce_or_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -88,8 +88,8 @@ define zeroext i1 @vpreduce_xor_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_xor_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -120,8 +120,8 @@ define zeroext i1 @vpreduce_or_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vsca
; CHECK-LABEL: vpreduce_or_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -136,8 +136,8 @@ define zeroext i1 @vpreduce_xor_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_xor_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -168,8 +168,8 @@ define zeroext i1 @vpreduce_or_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vsca
; CHECK-LABEL: vpreduce_or_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -184,8 +184,8 @@ define zeroext i1 @vpreduce_xor_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_xor_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -216,8 +216,8 @@ define zeroext i1 @vpreduce_or_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vs
; CHECK-LABEL: vpreduce_or_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -232,8 +232,8 @@ define zeroext i1 @vpreduce_xor_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <v
; CHECK-LABEL: vpreduce_xor_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -264,8 +264,8 @@ define zeroext i1 @vpreduce_or_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vs
; CHECK-LABEL: vpreduce_or_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -280,8 +280,8 @@ define zeroext i1 @vpreduce_xor_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <v
; CHECK-LABEL: vpreduce_xor_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -296,8 +296,8 @@ define zeroext i1 @vpreduce_or_nxv40i1(i1 zeroext %s, <vscale x 40 x i1> %v, <vs
; CHECK-LABEL: vpreduce_or_nxv40i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -328,8 +328,8 @@ define zeroext i1 @vpreduce_or_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vs
; CHECK-LABEL: vpreduce_or_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -344,8 +344,8 @@ define zeroext i1 @vpreduce_xor_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <v
; CHECK-LABEL: vpreduce_xor_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -366,16 +366,16 @@ define zeroext i1 @vpreduce_or_nxv128i1(i1 zeroext %s, <vscale x 128 x i1> %v, <
; CHECK-NEXT: sltu a4, a1, a3
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a3, v8, v0.t
; CHECK-NEXT: snez a3, a3
; CHECK-NEXT: bltu a1, a2, .LBB22_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v11, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -391,8 +391,8 @@ define zeroext i1 @vpreduce_add_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_add_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -407,8 +407,8 @@ define zeroext i1 @vpreduce_add_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_add_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -423,8 +423,8 @@ define zeroext i1 @vpreduce_add_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_add_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -439,8 +439,8 @@ define zeroext i1 @vpreduce_add_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_add_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -455,8 +455,8 @@ define zeroext i1 @vpreduce_add_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <v
; CHECK-LABEL: vpreduce_add_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -471,8 +471,8 @@ define zeroext i1 @vpreduce_add_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <v
; CHECK-LABEL: vpreduce_add_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -487,8 +487,8 @@ define zeroext i1 @vpreduce_add_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <v
; CHECK-LABEL: vpreduce_add_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -616,8 +616,8 @@ define zeroext i1 @vpreduce_smin_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vs
; CHECK-LABEL: vpreduce_smin_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -632,8 +632,8 @@ define zeroext i1 @vpreduce_smin_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vs
; CHECK-LABEL: vpreduce_smin_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -648,8 +648,8 @@ define zeroext i1 @vpreduce_smin_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vs
; CHECK-LABEL: vpreduce_smin_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -664,8 +664,8 @@ define zeroext i1 @vpreduce_smin_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vs
; CHECK-LABEL: vpreduce_smin_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -680,8 +680,8 @@ define zeroext i1 @vpreduce_smin_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <
; CHECK-LABEL: vpreduce_smin_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -696,8 +696,8 @@ define zeroext i1 @vpreduce_smin_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <
; CHECK-LABEL: vpreduce_smin_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -712,8 +712,8 @@ define zeroext i1 @vpreduce_smin_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <
; CHECK-LABEL: vpreduce_smin_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -728,8 +728,8 @@ define zeroext i1 @vpreduce_umax_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vs
; CHECK-LABEL: vpreduce_umax_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -744,8 +744,8 @@ define zeroext i1 @vpreduce_umax_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vs
; CHECK-LABEL: vpreduce_umax_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -760,8 +760,8 @@ define zeroext i1 @vpreduce_umax_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vs
; CHECK-LABEL: vpreduce_umax_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -776,8 +776,8 @@ define zeroext i1 @vpreduce_umax_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vs
; CHECK-LABEL: vpreduce_umax_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -792,8 +792,8 @@ define zeroext i1 @vpreduce_umax_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <
; CHECK-LABEL: vpreduce_umax_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -808,8 +808,8 @@ define zeroext i1 @vpreduce_umax_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <
; CHECK-LABEL: vpreduce_umax_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -824,8 +824,8 @@ define zeroext i1 @vpreduce_umax_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <
; CHECK-LABEL: vpreduce_umax_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
index 462d49991ae4..e95e9fabe934 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
@@ -12,11 +12,11 @@
define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscale x 1 x i16> %vs12.i.i.i, <vscale x 1 x i16> %1, <vscale x 8 x i8> %v37) {
; NOSUBREG-LABEL: foo:
; NOSUBREG: # %bb.0: # %loopIR.preheader.i.i
-; NOSUBREG-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; NOSUBREG-NEXT: vmv.v.i v14, 0
-; NOSUBREG-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; NOSUBREG-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; NOSUBREG-NEXT: vmv.v.i v9, 0
-; NOSUBREG-NEXT: vmv.v.i v8, 0
+; NOSUBREG-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; NOSUBREG-NEXT: vmv.v.i v14, 0
+; NOSUBREG-NEXT: vmv1r.v v8, v9
; NOSUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; NOSUBREG-NEXT: vrgatherei16.vv v8, v9, v14
; NOSUBREG-NEXT: .LBB0_1: # %loopIR3.i.i
@@ -32,11 +32,11 @@ define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscal
;
; SUBREG-LABEL: foo:
; SUBREG: # %bb.0: # %loopIR.preheader.i.i
-; SUBREG-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; SUBREG-NEXT: vmv.v.i v14, 0
-; SUBREG-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; SUBREG-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; SUBREG-NEXT: vmv.v.i v9, 0
-; SUBREG-NEXT: vmv.v.i v8, 0
+; SUBREG-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; SUBREG-NEXT: vmv.v.i v14, 0
+; SUBREG-NEXT: vmv1r.v v8, v9
; SUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; SUBREG-NEXT: vrgatherei16.vv v8, v9, v14
; SUBREG-NEXT: .LBB0_1: # %loopIR3.i.i
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
index f9ea5143cfcb..71b91f56e89a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
@@ -587,8 +587,8 @@ define <vscale x 128 x i8> @vsadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
@@ -1366,8 +1366,8 @@ define <vscale x 32 x i32> @vsadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
index 745b93b25708..454a4ebab04a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
@@ -586,8 +586,8 @@ define <vscale x 128 x i8> @vsaddu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
@@ -1365,8 +1365,8 @@ define <vscale x 32 x i32> @vsaddu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 4457c1002acc..53b8e4a78b75 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -211,12 +211,12 @@ define <vscale x 32 x half> @vfmerge_fv_nxv32f16(<vscale x 32 x half> %va, half
; CHECK-ZVFHMIN: # %bb.0:
; CHECK-ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; CHECK-ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-ZVFHMIN-NEXT: vfmv.v.f v24, fa5
+; CHECK-ZVFHMIN-NEXT: vfmv.v.f v16, fa5
; CHECK-ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; CHECK-ZVFHMIN-NEXT: vmv.v.v v20, v16
+; CHECK-ZVFHMIN-NEXT: vfncvt.f.f.w v24, v16
+; CHECK-ZVFHMIN-NEXT: vmv.v.v v28, v24
; CHECK-ZVFHMIN-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-ZVFHMIN-NEXT: vmerge.vvm v8, v8, v16, v0
+; CHECK-ZVFHMIN-NEXT: vmerge.vvm v8, v8, v24, v0
; CHECK-ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
index 312378d39373..ee0617c93148 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
@@ -354,11 +354,17 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: li a3, 24
+; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
@@ -367,35 +373,51 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
; CHECK-NEXT: slli a1, a3, 3
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vl8re32.v v8, (a1)
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: slli a1, a3, 1
; CHECK-NEXT: sub a4, a2, a1
; CHECK-NEXT: sltu a5, a2, a4
; CHECK-NEXT: addi a5, a5, -1
-; CHECK-NEXT: and a4, a5, a4
; CHECK-NEXT: srli a3, a3, 2
-; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vl8re32.v v0, (a0)
+; CHECK-NEXT: vl8re32.v v8, (a0)
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v24, a3
+; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a3
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: bltu a2, a1, .LBB27_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB27_2:
-; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: li a1, 24
+; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
@@ -411,11 +433,17 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a1, a1, a2
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
@@ -424,35 +452,51 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; CHECK-NEXT: slli a2, a1, 3
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vl8re32.v v8, (a2)
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: slli a2, a1, 1
; CHECK-NEXT: sub a3, a1, a2
; CHECK-NEXT: sltu a4, a1, a3
; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: srli a4, a1, 2
-; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vl8re32.v v0, (a0)
+; CHECK-NEXT: srli a5, a1, 2
+; CHECK-NEXT: vl8re32.v v8, (a0)
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v24, a4
+; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a5
+; CHECK-NEXT: and a3, a4, a3
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: bltu a1, a2, .LBB28_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB28_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
@@ -714,20 +758,19 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: sub a3, a2, a1
; CHECK-NEXT: sltu a4, a2, a3
; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: srli a4, a1, 3
-; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
+; CHECK-NEXT: srli a5, a1, 3
; CHECK-NEXT: vl8re64.v v0, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v24, a4
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v24, a5
+; CHECK-NEXT: and a3, a4, a3
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: bltu a2, a1, .LBB48_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB48_2:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -736,6 +779,7 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 9d5ff00fd597..7eb6cacf1ca4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -341,13 +341,13 @@ define <vscale x 1 x double> @test8(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: call foo
-; CHECK-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: add a0, a0, sp
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v9, v8
; CHECK-NEXT: .LBB6_3: # %if.then
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
index 834e7dd85aea..9b5a1a54ad5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
@@ -167,8 +167,8 @@ define <vscale x 32 x i32> @vsext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscal
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsext.vf4 v24, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
index 706876dc3854..ca44a9a64de4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
@@ -386,7 +386,15 @@ declare <vscale x 32 x half> @llvm.vp.sitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vsitofp_nxv32f16_nxv32i32:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v24, v0
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: vmv1r.v v7, v0
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: srli a2, a1, 2
; ZVFH-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
@@ -396,16 +404,22 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFH-NEXT: sltu a3, a0, a2
; ZVFH-NEXT: addi a3, a3, -1
; ZVFH-NEXT: and a2, a3, a2
+; ZVFH-NEXT: addi a3, sp, 16
+; ZVFH-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; ZVFH-NEXT: vfncvt.f.x.w v28, v16, v0.t
+; ZVFH-NEXT: vfncvt.f.x.w v20, v24, v0.t
; ZVFH-NEXT: bltu a0, a1, .LBB25_2
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a0, a1
; ZVFH-NEXT: .LBB25_2:
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFH-NEXT: vmv1r.v v0, v24
-; ZVFH-NEXT: vfncvt.f.x.w v24, v8, v0.t
-; ZVFH-NEXT: vmv8r.v v8, v24
+; ZVFH-NEXT: vfncvt.f.x.w v16, v8, v0.t
+; ZVFH-NEXT: vmv8r.v v8, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: addi sp, sp, 16
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vsitofp_nxv32f16_nxv32i32:
@@ -428,8 +442,8 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB25_2:
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
@@ -460,8 +474,8 @@ define <vscale x 32 x float> @vsitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x float> @llvm.vp.sitofp.nxv32f32.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
index b56a0f40176c..613b58b0f1b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
@@ -593,22 +593,22 @@ define <vscale x 128 x i8> @vssub_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: sub a0, a1, a2
-; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub a2, a1, a0
+; CHECK-NEXT: sltu a3, a1, a2
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a0
-; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
-; CHECK-NEXT: bltu a1, a2, .LBB50_2
+; CHECK-NEXT: vssub.vx v16, v16, a2, v0.t
+; CHECK-NEXT: bltu a1, a0, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a2, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
ret <vscale x 128 x i8> %v
@@ -1393,25 +1393,25 @@ define <vscale x 32 x i32> @vssub_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-LABEL: vssub_vi_nxv32i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: srli a1, a2, 2
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a1
-; CHECK-NEXT: slli a2, a2, 1
-; CHECK-NEXT: sub a1, a0, a2
-; CHECK-NEXT: sltu a3, a0, a1
+; CHECK-NEXT: vslidedown.vx v0, v0, a2
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a1
-; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: vssub.vx v16, v16, a1, v0.t
-; CHECK-NEXT: bltu a0, a2, .LBB118_2
+; CHECK-NEXT: vssub.vx v16, v16, a2, v0.t
+; CHECK-NEXT: bltu a0, a1, .LBB118_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a2, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i32> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
index 8275c3081c7c..8c729d7d9bfb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
@@ -591,22 +591,22 @@ define <vscale x 128 x i8> @vssubu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: sub a0, a1, a2
-; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub a2, a1, a0
+; CHECK-NEXT: sltu a3, a1, a2
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a0
-; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
-; CHECK-NEXT: bltu a1, a2, .LBB50_2
+; CHECK-NEXT: vssubu.vx v16, v16, a2, v0.t
+; CHECK-NEXT: bltu a1, a0, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a2, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
ret <vscale x 128 x i8> %v
@@ -1391,25 +1391,25 @@ define <vscale x 32 x i32> @vssubu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-LABEL: vssubu_vi_nxv32i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: srli a1, a2, 2
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a1
-; CHECK-NEXT: slli a2, a2, 1
-; CHECK-NEXT: sub a1, a0, a2
-; CHECK-NEXT: sltu a3, a0, a1
+; CHECK-NEXT: vslidedown.vx v0, v0, a2
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a1
-; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v16, v16, a1, v0.t
-; CHECK-NEXT: bltu a0, a2, .LBB118_2
+; CHECK-NEXT: vssubu.vx v16, v16, a2, v0.t
+; CHECK-NEXT: bltu a0, a1, .LBB118_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a2, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i32> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index 4857810e7a17..27755c166cc5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -174,8 +174,8 @@ define <vscale x 15 x i16> @vtrunc_nxv15i16_nxv15i64(<vscale x 15 x i64> %a, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
@@ -232,8 +232,8 @@ define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vsca
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB15_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
@@ -266,8 +266,8 @@ define <vscale x 32 x i8> @vtrunc_nxv32i8_nxv32i32(<vscale x 32 x i32> %a, <vsca
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB16_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
@@ -312,11 +312,11 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: sub a6, a5, a1
; CHECK-NEXT: sltu a7, a5, a6
; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: and a6, a7, a6
-; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v16, a3
-; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
+; CHECK-NEXT: and a0, a7, a6
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
; CHECK-NEXT: bltu a5, a1, .LBB17_2
; CHECK-NEXT: # %bb.1:
@@ -324,8 +324,8 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: .LBB17_2:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v7, a3
-; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
; CHECK-NEXT: bltu a2, a4, .LBB17_4
; CHECK-NEXT: # %bb.3:
@@ -335,22 +335,22 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: sltu a3, a2, a0
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v28, v8, 0, v0.t
; CHECK-NEXT: bltu a2, a1, .LBB17_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB17_6:
-; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v24, v8, 0, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
index e083d594db25..e5941dc7b5ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
@@ -386,7 +386,15 @@ declare <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vuitofp_nxv32f16_nxv32i32:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v24, v0
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: vmv1r.v v7, v0
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: srli a2, a1, 2
; ZVFH-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
@@ -396,16 +404,22 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFH-NEXT: sltu a3, a0, a2
; ZVFH-NEXT: addi a3, a3, -1
; ZVFH-NEXT: and a2, a3, a2
+; ZVFH-NEXT: addi a3, sp, 16
+; ZVFH-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; ZVFH-NEXT: vfncvt.f.xu.w v28, v16, v0.t
+; ZVFH-NEXT: vfncvt.f.xu.w v20, v24, v0.t
; ZVFH-NEXT: bltu a0, a1, .LBB25_2
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a0, a1
; ZVFH-NEXT: .LBB25_2:
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFH-NEXT: vmv1r.v v0, v24
-; ZVFH-NEXT: vfncvt.f.xu.w v24, v8, v0.t
-; ZVFH-NEXT: vmv8r.v v8, v24
+; ZVFH-NEXT: vfncvt.f.xu.w v16, v8, v0.t
+; ZVFH-NEXT: vmv8r.v v8, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: addi sp, sp, 16
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vuitofp_nxv32f16_nxv32i32:
@@ -428,8 +442,8 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB25_2:
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.f.xu.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
@@ -460,8 +474,8 @@ define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x float> @llvm.vp.uitofp.nxv32f32.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
index c5f34eee3118..a869b433a495 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
@@ -85,9 +85,9 @@ define <vscale x 1 x i8> @test3(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vsc
; RV32-NEXT: vaadd.vv v8, v8, v9
; RV32-NEXT: call foo
; RV32-NEXT: csrwi vxrm, 0
-; RV32-NEXT: vsetvli zero, s0, e8, mf8, ta, ma
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, s0, e8, mf8, ta, ma
; RV32-NEXT: vaadd.vv v8, v8, v9
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
@@ -113,9 +113,9 @@ define <vscale x 1 x i8> @test3(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vsc
; RV64-NEXT: vaadd.vv v8, v8, v9
; RV64-NEXT: call foo
; RV64-NEXT: csrwi vxrm, 0
-; RV64-NEXT: vsetvli zero, s0, e8, mf8, ta, ma
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, s0, e8, mf8, ta, ma
; RV64-NEXT: vaadd.vv v8, v8, v9
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
index 400f89b1ef77..3b5541c1a244 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
@@ -167,8 +167,8 @@ define <vscale x 32 x i32> @vzext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscal
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vzext.vf4 v24, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll
index f707cb31e3ec..8cf78551d28f 100644
--- a/llvm/test/CodeGen/RISCV/sextw-removal.ll
+++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll
@@ -1047,25 +1047,25 @@ define signext i32 @bug(i32 signext %x) {
; CHECK-NEXT: seqz a2, a2
; CHECK-NEXT: slli a3, a2, 3
; CHECK-NEXT: sllw a1, a1, a3
-; CHECK-NEXT: neg a2, a2
+; CHECK-NEXT: negw a2, a2
; CHECK-NEXT: andi a2, a2, -8
; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: srliw a2, a1, 28
; CHECK-NEXT: seqz a2, a2
; CHECK-NEXT: slli a3, a2, 2
; CHECK-NEXT: sllw a1, a1, a3
-; CHECK-NEXT: neg a2, a2
+; CHECK-NEXT: negw a2, a2
; CHECK-NEXT: andi a2, a2, -4
; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: srliw a2, a1, 30
; CHECK-NEXT: seqz a2, a2
; CHECK-NEXT: slli a3, a2, 1
; CHECK-NEXT: sllw a1, a1, a3
-; CHECK-NEXT: neg a2, a2
+; CHECK-NEXT: negw a2, a2
; CHECK-NEXT: andi a2, a2, -2
; CHECK-NEXT: add a0, a0, a2
-; CHECK-NEXT: srai a1, a1, 31
; CHECK-NEXT: not a1, a1
+; CHECK-NEXT: srli a1, a1, 31
; CHECK-NEXT: addw a0, a0, a1
; CHECK-NEXT: .LBB18_4: # %cleanup
; CHECK-NEXT: ret
@@ -1087,28 +1087,27 @@ define signext i32 @bug(i32 signext %x) {
; NOREMOVAL-NEXT: seqz a2, a2
; NOREMOVAL-NEXT: slli a3, a2, 3
; NOREMOVAL-NEXT: sllw a1, a1, a3
-; NOREMOVAL-NEXT: neg a2, a2
+; NOREMOVAL-NEXT: negw a2, a2
; NOREMOVAL-NEXT: andi a2, a2, -8
; NOREMOVAL-NEXT: add a0, a0, a2
; NOREMOVAL-NEXT: srliw a2, a1, 28
; NOREMOVAL-NEXT: seqz a2, a2
; NOREMOVAL-NEXT: slli a3, a2, 2
; NOREMOVAL-NEXT: sllw a1, a1, a3
-; NOREMOVAL-NEXT: neg a2, a2
+; NOREMOVAL-NEXT: negw a2, a2
; NOREMOVAL-NEXT: andi a2, a2, -4
; NOREMOVAL-NEXT: add a0, a0, a2
; NOREMOVAL-NEXT: srliw a2, a1, 30
; NOREMOVAL-NEXT: seqz a2, a2
; NOREMOVAL-NEXT: slli a3, a2, 1
; NOREMOVAL-NEXT: sllw a1, a1, a3
-; NOREMOVAL-NEXT: neg a2, a2
+; NOREMOVAL-NEXT: negw a2, a2
; NOREMOVAL-NEXT: andi a2, a2, -2
; NOREMOVAL-NEXT: add a0, a0, a2
-; NOREMOVAL-NEXT: srai a1, a1, 31
; NOREMOVAL-NEXT: not a1, a1
-; NOREMOVAL-NEXT: add a0, a0, a1
+; NOREMOVAL-NEXT: srli a1, a1, 31
+; NOREMOVAL-NEXT: addw a0, a0, a1
; NOREMOVAL-NEXT: .LBB18_4: # %cleanup
-; NOREMOVAL-NEXT: sext.w a0, a0
; NOREMOVAL-NEXT: ret
entry:
%tobool.not = icmp eq i32 %x, 0
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 9ecfa5017831..56c1ad3527aa 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -643,9 +643,9 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: mv a1, s4
; RV32MV-NEXT: li a3, 0
; RV32MV-NEXT: call __moddi3
-; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32MV-NEXT: addi a2, sp, 16
; RV32MV-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32MV-NEXT: vslide1down.vx v8, v8, a0
; RV32MV-NEXT: vslide1down.vx v8, v8, a1
; RV32MV-NEXT: addi a0, sp, 16
@@ -655,9 +655,9 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: mv a0, s1
; RV32MV-NEXT: mv a1, s3
; RV32MV-NEXT: call __moddi3
-; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32MV-NEXT: addi a2, sp, 16
; RV32MV-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32MV-NEXT: vslide1down.vx v8, v8, a0
; RV32MV-NEXT: vslide1down.vx v8, v8, a1
; RV32MV-NEXT: vslidedown.vi v8, v8, 2
@@ -779,7 +779,6 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64MV-NEXT: vmsne.vv v0, v8, v12
; RV64MV-NEXT: vmv.v.i v8, 0
; RV64MV-NEXT: vmerge.vim v8, v8, -1, v0
-; RV64MV-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64MV-NEXT: vslidedown.vi v10, v8, 2
; RV64MV-NEXT: vmv.x.s a2, v10
; RV64MV-NEXT: slli a3, a2, 31
diff --git a/llvm/test/CodeGen/SPIRV/execution-mode-reqd_work_group_size.ll b/llvm/test/CodeGen/SPIRV/execution-mode-reqd_work_group_size.ll
new file mode 100644
index 000000000000..6e36b0bd5b9d
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/execution-mode-reqd_work_group_size.ll
@@ -0,0 +1,35 @@
+; From Khronos Translator's test case: test/reqd_work_group_size_md.ll
+
+; The purpose of this test is to check that the reqd_work_group_size metadata
+; is correctly converted to the LocalSize execution mode for the kernels it is
+; applied to.
+
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpMemoryModel
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY1:]] "test1"
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY2:]] "test2"
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY3:]] "test3"
+; CHECK-DAG: OpExecutionMode %[[#ENTRY1]] LocalSize 1 2 3
+; CHECK-DAG: OpExecutionMode %[[#ENTRY2]] LocalSize 2 3 1
+; CHECK-DAG: OpExecutionMode %[[#ENTRY3]] LocalSize 3 1 1
+
+define spir_kernel void @test1() !reqd_work_group_size !1 {
+entry:
+ ret void
+}
+
+define spir_kernel void @test2() !reqd_work_group_size !2 {
+entry:
+ ret void
+}
+
+define spir_kernel void @test3() !reqd_work_group_size !3 {
+entry:
+ ret void
+}
+
+!1 = !{i32 1, i32 2, i32 3}
+!2 = !{i32 2, i32 3}
+!3 = !{i32 3}
diff --git a/llvm/test/CodeGen/SPIRV/execution-mode-work_group_size_hint.ll b/llvm/test/CodeGen/SPIRV/execution-mode-work_group_size_hint.ll
new file mode 100644
index 000000000000..f2c43d3748af
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/execution-mode-work_group_size_hint.ll
@@ -0,0 +1,34 @@
+; From Khronos Translator's test case: test/reqd_work_group_size_md.ll
+
+; The purpose of this test is to check that the work_group_size_hint metadata
+; is correctly converted to the LocalSizeHint execution mode.
+
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpMemoryModel
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY1:]] "test1"
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY2:]] "test2"
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY3:]] "test3"
+; CHECK-DAG: OpExecutionMode %[[#ENTRY1]] LocalSizeHint 1 2 3
+; CHECK-DAG: OpExecutionMode %[[#ENTRY2]] LocalSizeHint 2 3 1
+; CHECK-DAG: OpExecutionMode %[[#ENTRY3]] LocalSizeHint 3 1 1
+
+define spir_kernel void @test1() !work_group_size_hint !1 {
+entry:
+ ret void
+}
+
+define spir_kernel void @test2() !work_group_size_hint !2 {
+entry:
+ ret void
+}
+
+define spir_kernel void @test3() !work_group_size_hint !3 {
+entry:
+ ret void
+}
+
+!1 = !{i32 1, i32 2, i32 3}
+!2 = !{i32 2, i32 3}
+!3 = !{i32 3}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_inline_assembly/inline_asm.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_inline_assembly/inline_asm.ll
new file mode 100644
index 000000000000..449dd7195450
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_inline_assembly/inline_asm.ll
@@ -0,0 +1,93 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s --spirv-ext=+SPV_INTEL_inline_assembly -o - | FileCheck %s
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s --spirv-ext=+SPV_INTEL_inline_assembly -o - -filetype=obj | spirv-val %}
+
+; RUN: not llc -O0 -mtriple=spirv64-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; CHECK-ERROR: Inline assembly instructions require the following SPIR-V extension: SPV_INTEL_inline_assembly
+
+; CHECK: OpCapability AsmINTEL
+; CHECK: OpExtension "SPV_INTEL_inline_assembly"
+
+; CHECK-COUNT-8: OpDecorate %[[#]] SideEffectsINTEL
+
+; CHECK-DAG: %[[#VoidTy:]] = OpTypeVoid
+; CHECK-DAG: %[[#Int8Ty:]] = OpTypeInt 8 0
+; CHECK-DAG: %[[#Int32Ty:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#Int64Ty:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#HalfTy:]] = OpTypeFloat 16
+; CHECK-DAG: %[[#FloatTy:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#DoubleTy:]] = OpTypeFloat 64
+
+; CHECK-DAG: OpTypeFunction %[[#VoidTy]] %[[#]] %[[#]] %[[#]] %[[#Int64Ty]]
+; CHECK-DAG: %[[#Fun1Ty:]] = OpTypeFunction %[[#VoidTy]]
+; CHECK-DAG: %[[#Fun2Ty:]] = OpTypeFunction %[[#Int32Ty]]
+; CHECK-DAG: %[[#Fun3Ty:]] = OpTypeFunction %[[#Int32Ty]] %[[#Int32Ty]]
+; CHECK-DAG: %[[#Fun4Ty:]] = OpTypeFunction %[[#FloatTy]] %[[#FloatTy]]
+; CHECK-DAG: %[[#Fun5Ty:]] = OpTypeFunction %[[#HalfTy]] %[[#FloatTy]] %[[#FloatTy]]
+; CHECK-DAG: %[[#Fun6Ty:]] = OpTypeFunction %[[#Int8Ty]] %[[#FloatTy]] %[[#Int32Ty]] %[[#Int8Ty]]
+; CHECK-DAG: %[[#Fun7Ty:]] = OpTypeFunction %[[#Int64Ty]] %[[#Int64Ty]] %[[#Int32Ty]] %[[#Int8Ty]]
+; CHECK-DAG: %[[#Fun8Ty:]] = OpTypeFunction %[[#VoidTy]] %[[#Int32Ty]] %[[#DoubleTy]]
+
+; CHECK-DAG: %[[#Const2:]] = OpConstant %[[#FloatTy]] 2
+; CHECK-DAG: %[[#Const123:]] = OpConstant %[[#Int32Ty]] 123
+; CHECK-DAG: %[[#Const42:]] = OpConstant %[[#DoubleTy:]] 42
+
+; CHECK: %[[#Dialect:]] = OpAsmTargetINTEL "spirv64-unknown-unknown"
+; CHECK-NO: OpAsmTargetINTEL
+
+; CHECK: %[[#Asm1:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun1Ty]] %[[#Dialect]] "" ""
+; CHECK: %[[#Asm2:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun1Ty]] %[[#Dialect]] "nop" ""
+; CHECK: %[[#Asm3:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun1Ty]] %[[#Dialect]] "" "~{cc},~{memory}"
+; CHECK: %[[#Asm4:]] = OpAsmINTEL %[[#Int32Ty]] %[[#Fun2Ty:]] %[[#Dialect]] "clobber_out $0" "=&r"
+; CHECK: %[[#Asm5:]] = OpAsmINTEL %[[#Int32Ty]] %[[#Fun3Ty]] %[[#Dialect]] "icmd $0 $1" "=r,r"
+; CHECK: %[[#Asm6:]] = OpAsmINTEL %[[#FloatTy]] %[[#Fun4Ty]] %[[#Dialect]] "fcmd $0 $1" "=r,r"
+; CHECK: %[[#Asm7:]] = OpAsmINTEL %[[#HalfTy]] %[[#Fun5Ty]] %[[#Dialect]] "fcmdext $0 $1 $2" "=r,r,r"
+; CHECK: %[[#Asm8:]] = OpAsmINTEL %[[#Int8Ty]] %[[#Fun6Ty]] %[[#Dialect]] "cmdext $0 $3 $1 $2" "=r,r,r,r"
+; CHECK: %[[#Asm9:]] = OpAsmINTEL %[[#Int64Ty]] %[[#Fun7Ty]] %[[#Dialect]] "icmdext $0 $3 $1 $2" "=r,r,r,r"
+; CHECK: %[[#Asm10:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun8Ty]] %[[#Dialect]] "constcmd $0 $1" "r,r"
+; CHECK: %[[#Asm11:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun8Ty]] %[[#Dialect]] "constcmd $0 $1" "i,i"
+; CHECK-NO: OpAsmINTEL
+
+; CHECK: OpFunction
+; CHECK: OpAsmCallINTEL %[[#VoidTy]] %[[#Asm1]]
+; CHECK: OpAsmCallINTEL %[[#VoidTy]] %[[#Asm2]]
+; CHECK: OpAsmCallINTEL %[[#VoidTy]] %[[#Asm3]]
+; CHECK: OpAsmCallINTEL %[[#Int32Ty]] %[[#Asm4]]
+; CHECK: OpAsmCallINTEL %[[#Int32Ty]] %[[#Asm5]] %[[#]]
+; CHECK: OpAsmCallINTEL %[[#FloatTy]] %[[#Asm6]] %[[#]]
+; CHECK: OpAsmCallINTEL %[[#HalfTy]] %[[#Asm7]] %[[#Const2]] %[[#]]
+; CHECK: OpAsmCallINTEL %[[#Int8Ty]] %[[#Asm8]] %[[#]] %[[#Const123]] %[[#]]
+; CHECK: OpAsmCallINTEL %[[#Int64Ty]] %[[#Asm9]] %[[#]] %[[#]] %[[#]]
+; CHECK: OpAsmCallINTEL %[[#VoidTy]] %[[#Asm10]] %[[#Const123]] %[[#Const42]]
+; CHECK: OpAsmCallINTEL %[[#VoidTy]] %[[#Asm11]] %[[#Const123]] %[[#Const42]]
+; CHECK-NO: OpAsmCallINTEL
+
+define spir_kernel void @foo(ptr addrspace(1) %_arg_int, ptr addrspace(1) %_arg_float, ptr addrspace(1) %_arg_half, i64 %_lng) {
+ %i1 = load i32, ptr addrspace(1) %_arg_int
+ %i2 = load i8, ptr addrspace(1) %_arg_int
+ %f1 = load float, ptr addrspace(1) %_arg_float
+ %h1 = load half, ptr addrspace(1) %_arg_half
+ ; inline asm
+ call void asm sideeffect "", ""()
+ call void asm sideeffect "nop", ""()
+ call void asm sideeffect "", "~{cc},~{memory}"()
+ %res_i0 = call i32 asm "clobber_out $0", "=&r"()
+ store i32 %res_i0, ptr addrspace(1) %_arg_int
+ ; inline asm: integer
+ %res_i1 = call i32 asm sideeffect "icmd $0 $1", "=r,r"(i32 %i1)
+ store i32 %res_i1, ptr addrspace(1) %_arg_int
+ ; inline asm: float
+ %res_f1 = call float asm sideeffect "fcmd $0 $1", "=r,r"(float %f1)
+ store float %res_f1, ptr addrspace(1) %_arg_float
+ ; inline asm: mixed floats
+ %res_f2 = call half asm sideeffect "fcmdext $0 $1 $2", "=r,r,r"(float 2.0, float %f1)
+ store half %res_f2, ptr addrspace(1) %_arg_half
+ ; inline asm: mixed operands of different types
+ call i8 asm sideeffect "cmdext $0 $3 $1 $2", "=r,r,r,r"(float %f1, i32 123, i8 %i2)
+ ; inline asm: mixed integers
+ %res_i2 = call i64 asm sideeffect "icmdext $0 $3 $1 $2", "=r,r,r,r"(i64 %_lng, i32 %i1, i8 %i2)
+ store i64 %res_i2, ptr addrspace(1) %_arg_int
+ ; inline asm: constant arguments, misc constraints
+ call void asm "constcmd $0 $1", "r,r"(i32 123, double 42.0)
+ call void asm "constcmd $0 $1", "i,i"(i32 123, double 42.0)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll
new file mode 100644
index 000000000000..e219f61b5c6e
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll
@@ -0,0 +1,59 @@
+; RUN: not llc -O0 -mtriple=spirv64-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_shader_clock %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_shader_clock %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-ERROR: LLVM ERROR: clock_read_device: the builtin requires the following SPIR-V extension: SPV_KHR_shader_clock
+
+; CHECK: OpCapability ShaderClockKHR
+; CHECK: OpExtension "SPV_KHR_shader_clock"
+; CHECK-DAG: [[uint:%[a-z0-9_]+]] = OpTypeInt 32
+; CHECK-DAG: [[ulong:%[a-z0-9_]+]] = OpTypeInt 64
+; CHECK-DAG: [[v2uint:%[a-z0-9_]+]] = OpTypeVector [[uint]] 2
+; CHECK-DAG: [[uint_1:%[a-z0-9_]+]] = OpConstant [[uint]] 1
+; CHECK-DAG: [[uint_2:%[a-z0-9_]+]] = OpConstant [[uint]] 2
+; CHECK-DAG: [[uint_3:%[a-z0-9_]+]] = OpConstant [[uint]] 3
+; CHECK: OpReadClockKHR [[ulong]] [[uint_1]]
+; CHECK: OpReadClockKHR [[ulong]] [[uint_2]]
+; CHECK: OpReadClockKHR [[ulong]] [[uint_3]]
+; CHECK: OpReadClockKHR [[v2uint]] [[uint_1]]
+; CHECK: OpReadClockKHR [[v2uint]] [[uint_2]]
+; CHECK: OpReadClockKHR [[v2uint]] [[uint_3]]
+
+define dso_local spir_kernel void @test_clocks(ptr addrspace(1) nocapture noundef writeonly align 8 %out64, ptr addrspace(1) nocapture noundef writeonly align 8 %outv2) {
+entry:
+ %call = tail call spir_func i64 @_Z17clock_read_devicev()
+ store i64 %call, ptr addrspace(1) %out64, align 8
+ %call1 = tail call spir_func i64 @_Z21clock_read_work_groupv()
+ %arrayidx2 = getelementptr inbounds i8, ptr addrspace(1) %out64, i32 8
+ store i64 %call1, ptr addrspace(1) %arrayidx2, align 8
+ %call3 = tail call spir_func i64 @_Z20clock_read_sub_groupv()
+ %arrayidx4 = getelementptr inbounds i8, ptr addrspace(1) %out64, i32 16
+ store i64 %call3, ptr addrspace(1) %arrayidx4, align 8
+ %call5 = tail call spir_func <2 x i32> @_Z22clock_read_hilo_devicev()
+ store <2 x i32> %call5, ptr addrspace(1) %outv2, align 8
+ %call7 = tail call spir_func <2 x i32> @_Z26clock_read_hilo_work_groupv()
+ %arrayidx8 = getelementptr inbounds i8, ptr addrspace(1) %outv2, i32 8
+ store <2 x i32> %call7, ptr addrspace(1) %arrayidx8, align 8
+ %call9 = tail call spir_func <2 x i32> @_Z25clock_read_hilo_sub_groupv()
+ %arrayidx10 = getelementptr inbounds i8, ptr addrspace(1) %outv2, i32 16
+ store <2 x i32> %call9, ptr addrspace(1) %arrayidx10, align 8
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func i64 @_Z17clock_read_devicev() local_unnamed_addr
+
+; Function Attrs: convergent nounwind
+declare spir_func i64 @_Z21clock_read_work_groupv() local_unnamed_addr
+
+; Function Attrs: convergent nounwind
+declare spir_func i64 @_Z20clock_read_sub_groupv() local_unnamed_addr
+
+; Function Attrs: convergent nounwind
+declare spir_func <2 x i32> @_Z22clock_read_hilo_devicev() local_unnamed_addr
+
+; Function Attrs: convergent nounwind
+declare spir_func <2 x i32> @_Z26clock_read_hilo_work_groupv() local_unnamed_addr
+
+; Function Attrs: convergent nounwind
+declare spir_func <2 x i32> @_Z25clock_read_hilo_sub_groupv() local_unnamed_addr
diff --git a/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll b/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll
new file mode 100644
index 000000000000..2cd321b05a40
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll
@@ -0,0 +1,94 @@
+; The goal of the test is to check that newly inserted `ptrcast` internal
+; intrinsic functions for PHI's operands are inserted at the correct
+; positions, and don't break rules of instruction domination and PHI nodes
+; grouping at top of basic block.
+
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[#Case1:]] "case1"
+; CHECK-DAG: OpName %[[#Case2:]] "case2"
+; CHECK-DAG: OpName %[[#Case3:]] "case3"
+; CHECK: %[[#Case1]] = OpFunction
+; CHECK: OpBranchConditional
+; CHECK: OpPhi
+; CHECK: OpBranch
+; CHECK-COUNT-2: OpBranchConditional
+; CHECK: OpFunctionEnd
+; CHECK: %[[#Case2]] = OpFunction
+; CHECK: OpBranchConditional
+; CHECK: OpPhi
+; CHECK: OpBranch
+; CHECK-COUNT-2: OpBranchConditional
+; CHECK: OpFunctionEnd
+; CHECK: %[[#Case3]] = OpFunction
+; CHECK: OpBranchConditional
+; CHECK: OpPhi
+; CHECK: OpBranch
+; CHECK: OpInBoundsPtrAccessChain
+; CHECK: OpBranchConditional
+; CHECK: OpInBoundsPtrAccessChain
+; CHECK: OpBranchConditional
+; CHECK: OpFunctionEnd
+
+%struct1 = type { i64 }
+%struct2 = type { i64, i64 }
+
+@.str.1 = private unnamed_addr addrspace(1) constant [3 x i8] c"OK\00", align 1
+@.str.2 = private unnamed_addr addrspace(1) constant [6 x i8] c"WRONG\00", align 1
+
+define spir_func void @case1(i1 %b1, i1 %b2, i1 %b3) {
+entry:
+ br i1 %b1, label %l1, label %l2
+
+l1:
+ %str = phi ptr addrspace(1) [ @.str.1, %entry ], [ @.str.2, %l2 ], [ @.str.2, %l3 ]
+ br label %exit
+
+l2:
+ br i1 %b2, label %l1, label %l3
+
+l3:
+ br i1 %b3, label %l1, label %exit
+
+exit:
+ ret void
+}
+
+define spir_func void @case2(i1 %b1, i1 %b2, i1 %b3, ptr addrspace(1) byval(%struct1) %str1, ptr addrspace(1) byval(%struct2) %str2) {
+entry:
+ br i1 %b1, label %l1, label %l2
+
+l1:
+ %str = phi ptr addrspace(1) [ %str1, %entry ], [ %str2, %l2 ], [ %str2, %l3 ]
+ br label %exit
+
+l2:
+ br i1 %b2, label %l1, label %l3
+
+l3:
+ br i1 %b3, label %l1, label %exit
+
+exit:
+ ret void
+}
+
+define spir_func void @case3(i1 %b1, i1 %b2, i1 %b3, ptr addrspace(1) byval(%struct1) %_arg_str1, ptr addrspace(1) byval(%struct2) %_arg_str2) {
+entry:
+ br i1 %b1, label %l1, label %l2
+
+l1:
+ %str = phi ptr addrspace(1) [ %_arg_str1, %entry ], [ %str2, %l2 ], [ %str3, %l3 ]
+ br label %exit
+
+l2:
+ %str2 = getelementptr inbounds %struct2, ptr addrspace(1) %_arg_str2, i32 1
+ br i1 %b2, label %l1, label %l3
+
+l3:
+ %str3 = getelementptr inbounds %struct2, ptr addrspace(1) %_arg_str2, i32 2
+ br i1 %b3, label %l1, label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/CodeGen/Thumb/shift-and.ll b/llvm/test/CodeGen/Thumb/shift-and.ll
index e5fee86343b0..a8a09dbe1b73 100644
--- a/llvm/test/CodeGen/Thumb/shift-and.ll
+++ b/llvm/test/CodeGen/Thumb/shift-and.ll
@@ -70,9 +70,8 @@ define i32 @test6(i32 %x) {
; CHECK-LABEL: test6:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: movs r1, #5
-; CHECK-NEXT: lsls r1, r1, #29
-; CHECK-NEXT: lsls r0, r0, #29
-; CHECK-NEXT: ands r0, r1
+; CHECK-NEXT: ands r1, r0
+; CHECK-NEXT: lsls r0, r1, #29
; CHECK-NEXT: bx lr
entry:
%0 = shl i32 %x, 29
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/add_reduce.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/add_reduce.mir
index 84ff0d78993d..870ee341a4fc 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/add_reduce.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/add_reduce.mir
@@ -147,53 +147,59 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: max_min_add_reduce
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x40000000), %bb.3(0x40000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $lr
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -24
- ; CHECK: renamable $r12 = t2LDRi12 $sp, 48, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.6, align 8)
- ; CHECK: renamable $r5 = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r7, dead $cpsr = tLSRri killed renamable $r5, 2, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2WLS renamable $r7, %bb.3
- ; CHECK: bb.1.for.body.lr.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r7, $r12
- ; CHECK: $r6, $r5 = t2LDRDi8 $sp, 40, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.4, align 8), (load (s32) from %fixed-stack.5)
- ; CHECK: $r4 = tMOVr killed $r7, 14 /* CC::al */, $noreg
- ; CHECK: $r7, $r8 = t2LDRDi8 $sp, 24, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8), (load (s32) from %fixed-stack.1)
- ; CHECK: renamable $q0 = MVE_VDUP32 killed renamable $r5, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r6, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r5, dead $cpsr = tSUBi3 killed renamable $r7, 4, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.for.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $q1, $r0, $r1, $r2, $r3, $r4, $r5, $r8, $r12
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 4, 1, renamable $vpr, $noreg :: (load (s128) from %ir.input_2_cast, align 4)
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 4, 1, renamable $vpr, $noreg :: (load (s128) from %ir.input_1_cast, align 4)
- ; CHECK: renamable $q2 = MVE_VADD_qr_i32 killed renamable $q2, renamable $r3, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q3 = MVE_VADD_qr_i32 killed renamable $q3, renamable $r2, 0, $noreg, $noreg, undef renamable $q3
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q2 = MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $r4, dead $cpsr = tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q2 = MVE_VADD_qr_i32 killed renamable $q2, renamable $r8, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $q2 = MVE_VMAXu32 killed renamable $q2, renamable $q1, 1, renamable $vpr, $noreg, undef renamable $q2
- ; CHECK: renamable $q2 = MVE_VMINu32 killed renamable $q2, renamable $q0, 1, killed renamable $vpr, $noreg, undef renamable $q2
- ; CHECK: renamable $r6 = MVE_VADDVu32no_acc killed renamable $q2, 0, $noreg, $noreg
- ; CHECK: early-clobber renamable $r5 = t2STR_PRE killed renamable $r6, killed renamable $r5, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep2)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $sp = t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $lr
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -24
+ ; CHECK-NEXT: renamable $r12 = t2LDRi12 $sp, 48, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.6, align 8)
+ ; CHECK-NEXT: renamable $r5 = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = tLSRri killed renamable $r5, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2WLS renamable $r7, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.for.body.lr.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r7, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r6, $r5 = t2LDRDi8 $sp, 40, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.4, align 8), (load (s32) from %fixed-stack.5)
+ ; CHECK-NEXT: $r4 = tMOVr killed $r7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r7, $r8 = t2LDRDi8 $sp, 24, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8), (load (s32) from %fixed-stack.1)
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 killed renamable $r5, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q1 = MVE_VDUP32 killed renamable $r6, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tSUBi3 killed renamable $r7, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $q1, $r0, $r1, $r2, $r3, $r4, $r5, $r8, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 4, 1, renamable $vpr, $noreg :: (load (s128) from %ir.input_2_cast, align 4)
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 4, 1, renamable $vpr, $noreg :: (load (s128) from %ir.input_1_cast, align 4)
+ ; CHECK-NEXT: renamable $q2 = MVE_VADD_qr_i32 killed renamable $q2, renamable $r3, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q3 = MVE_VADD_qr_i32 killed renamable $q3, renamable $r2, 0, $noreg, $noreg, undef renamable $q3
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q2 = MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q2 = MVE_VADD_qr_i32 killed renamable $q2, renamable $r8, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $q2 = MVE_VMAXu32 killed renamable $q2, renamable $q1, 1, renamable $vpr, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q2 = MVE_VMINu32 killed renamable $q2, renamable $q0, 1, killed renamable $vpr, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $r6 = MVE_VADDVu32no_acc killed renamable $q2, 0, $noreg, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r5 = t2STR_PRE killed renamable $r6, killed renamable $r5, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep2)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x40000000), %bb.3(0x40000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/begin-vpt-without-inst.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/begin-vpt-without-inst.mir
index 372fc6108129..c5f9d3253668 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/begin-vpt-without-inst.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/begin-vpt-without-inst.mir
@@ -57,30 +57,37 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: foo
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $r0
- ; CHECK: tCBZ $r0, %bb.3
- ; CHECK: bb.1.vector.ph.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0
- ; CHECK: renamable $r1 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 3, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q1 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool, align 8)
- ; CHECK: $r1 = t2MOVi16 target-flags(arm-lo16) @arr, 14 /* CC::al */, $noreg
- ; CHECK: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @arr, 14 /* CC::al */, $noreg
- ; CHECK: renamable $vpr = MVE_VCMPu32 killed renamable $q0, killed renamable $q1, 8, 0, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 2, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: bb.2.vector.ph:
- ; CHECK: successors: %bb.3(0x04000000), %bb.2(0x7c000000)
- ; CHECK: liveins: $vpr, $q0, $r0, $r1
- ; CHECK: renamable $r0, $cpsr = tADDi8 killed renamable $r0, 1, 14 /* CC::al */, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: MVE_VSTRWU32 renamable $q0, renamable $r1, 0, 1, renamable $vpr, $noreg :: (store (s128) into @arr, align 4)
- ; CHECK: tBcc %bb.2, 3 /* CC::lo */, killed $cpsr
- ; CHECK: bb.3.for.end5:
- ; CHECK: tBX_RET 14 /* CC::al */, $noreg
- ; CHECK: bb.4 (align 8):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCBZ $r0, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 3, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool, align 8)
+ ; CHECK-NEXT: $r1 = t2MOVi16 target-flags(arm-lo16) @arr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @arr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCMPu32 killed renamable $q0, killed renamable $q1, 8, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 2, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.ph:
+ ; CHECK-NEXT: successors: %bb.3(0x04000000), %bb.2(0x7c000000)
+ ; CHECK-NEXT: liveins: $vpr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, $cpsr = tADDi8 killed renamable $r0, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: MVE_VSTRWU32 renamable $q0, renamable $r1, 0, 1, renamable $vpr, $noreg :: (store (s128) into @arr, align 4)
+ ; CHECK-NEXT: tBcc %bb.2, 3 /* CC::lo */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.end5:
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4 (align 8):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0.entry:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-default.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-default.mir
index 40231e135597..647270bc0aad 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-default.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-default.mir
@@ -200,96 +200,108 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: arm_biquad_cascade_df1_q31
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 36
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r11, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r10, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -28
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -32
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -36
- ; CHECK: $sp = frame-setup tSUBspi $sp, 10, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 76
- ; CHECK: $r6, $r5 = t2LDRDi8 $r0, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i7), (load (s32) from %ir.i10)
- ; CHECK: $r8 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: $r3, $r7 = t2LDRDi8 killed $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i), (load (s32) from %ir.i5)
- ; CHECK: renamable $r0 = t2RSBri killed renamable $r6, 31, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2STMIA $sp, 14 /* CC::al */, $noreg, killed $r0, $r2, $r8 :: (store (s32) into %stack.9), (store (s32) into %stack.8), (store (s32) into %stack.7)
- ; CHECK: $r12 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.9)
- ; CHECK: tB %bb.2, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.bb74 (align 4):
- ; CHECK: successors: %bb.6(0x04000000), %bb.2(0x7c000000)
- ; CHECK: liveins: $r0, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r12, $r2
- ; CHECK: renamable $r7, dead $cpsr = nuw tADDi8 killed renamable $r7, 20, 14 /* CC::al */, $noreg
- ; CHECK: t2STRDi8 killed $r9, killed $r4, $r3, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i14), (store (s32) into %ir.i81)
- ; CHECK: t2STRDi8 killed $r6, killed $r0, $r3, 8, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i84), (store (s32) into %ir.i88)
- ; CHECK: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r5, $cpsr = tSUBi8 killed renamable $r5, 1, 14 /* CC::al */, $noreg
- ; CHECK: $r1 = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: tBcc %bb.6, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.2.bb12:
- ; CHECK: successors: %bb.3(0x40000000), %bb.1(0x40000000)
- ; CHECK: liveins: $r1, $r2, $r3, $r5, $r7, $r8, $r12
- ; CHECK: $r9, $r4 = t2LDRDi8 $r3, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i14), (load (s32) from %ir.i20)
- ; CHECK: $r6, $r0 = t2LDRDi8 $r3, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i22), (load (s32) from %ir.i24)
- ; CHECK: dead $lr = t2SUBri renamable $r8, 0, 14 /* CC::al */, $noreg, def $cpsr
- ; CHECK: tBcc %bb.1, 0 /* CC::eq */, killed $cpsr
- ; CHECK: tB %bb.3, 14 /* CC::al */, $noreg
- ; CHECK: bb.3.bb27:
- ; CHECK: successors: %bb.4(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r12
- ; CHECK: t2STRDi8 killed $r3, killed $r5, $sp, 12, 14 /* CC::al */, $noreg :: (store (s32) into %stack.6), (store (s32) into %stack.5)
- ; CHECK: renamable $r3 = tLDRi renamable $r7, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i13)
- ; CHECK: tSTRspi killed renamable $r3, $sp, 9, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: renamable $r3 = tLDRi renamable $r7, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i34)
- ; CHECK: tSTRspi killed renamable $r3, $sp, 8, 14 /* CC::al */, $noreg :: (store (s32) into %stack.1)
- ; CHECK: renamable $r3 = tLDRi renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i32)
- ; CHECK: tSTRspi killed renamable $r3, $sp, 7, 14 /* CC::al */, $noreg :: (store (s32) into %stack.2)
- ; CHECK: renamable $r3 = tLDRi renamable $r7, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i30)
- ; CHECK: t2STRDi8 $r7, killed $r3, $sp, 20, 14 /* CC::al */, $noreg :: (store (s32) into %stack.4), (store (s32) into %stack.3)
- ; CHECK: renamable $r10 = t2LDRi12 killed renamable $r7, 16, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i28)
- ; CHECK: bb.4.bb37 (align 4):
- ; CHECK: successors: %bb.4(0x7c000000), %bb.5(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r4, $r6, $r8, $r9, $r10, $r12
- ; CHECK: $r7 = tMOVr killed $r6, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = tLDRspi $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %stack.1)
- ; CHECK: renamable $r3 = tLDRspi $sp, 7, 14 /* CC::al */, $noreg :: (load (s32) from %stack.2)
- ; CHECK: renamable $r6, renamable $r11 = t2SMULL $r9, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r4, killed renamable $r3, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = tLDRspi $sp, 6, 14 /* CC::al */, $noreg :: (load (s32) from %stack.3)
- ; CHECK: $r5 = tMOVr killed $r9, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL renamable $r7, killed renamable $r3, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r9, renamable $r1 = t2LDR_POST killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i38)
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r0, renamable $r10, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0 = tLDRspi $sp, 9, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: $lr = tMOVr $r8, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL renamable $r9, killed renamable $r0, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r6, dead early-clobber renamable $r11 = MVE_ASRLr killed renamable $r6, killed renamable $r11, renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r12 = t2STR_POST renamable $r6, killed renamable $r12, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i39)
- ; CHECK: dead renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
- ; CHECK: renamable $r8 = t2SUBri killed renamable $r8, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r0 = tMOVr $r7, 14 /* CC::al */, $noreg
- ; CHECK: $r4 = tMOVr $r5, 14 /* CC::al */, $noreg
- ; CHECK: tBcc %bb.4, 1 /* CC::ne */, killed $cpsr
- ; CHECK: tB %bb.5, 14 /* CC::al */, $noreg
- ; CHECK: bb.5.bb72:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $r2, $r5, $r6, $r7, $r9
- ; CHECK: $r0 = tMOVr killed $r7, 14 /* CC::al */, $noreg
- ; CHECK: $r7 = tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
- ; CHECK: $r4 = tMOVr killed $r5, 14 /* CC::al */, $noreg
- ; CHECK: $r12, $r8 = t2LDRDi8 $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %stack.8), (load (s32) from %stack.7)
- ; CHECK: tLDMIA killed $r7, 14 /* CC::al */, $noreg, def $r3, def $r5, def $r7 :: (load (s32) from %stack.6), (load (s32) from %stack.5), (load (s32) from %stack.4)
- ; CHECK: tB %bb.1, 14 /* CC::al */, $noreg
- ; CHECK: bb.6.bb91:
- ; CHECK: $sp = frame-destroy tADDspi $sp, 10, 14 /* CC::al */, $noreg
- ; CHECK: $sp = frame-destroy t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 36
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r11, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r10, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -28
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -32
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -36
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 10, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 76
+ ; CHECK-NEXT: $r6, $r5 = t2LDRDi8 $r0, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i7), (load (s32) from %ir.i10)
+ ; CHECK-NEXT: $r8 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r3, $r7 = t2LDRDi8 killed $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i), (load (s32) from %ir.i5)
+ ; CHECK-NEXT: renamable $r0 = t2RSBri killed renamable $r6, 31, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2STMIA $sp, 14 /* CC::al */, $noreg, killed $r0, $r2, $r8 :: (store (s32) into %stack.9), (store (s32) into %stack.8), (store (s32) into %stack.7)
+ ; CHECK-NEXT: $r12 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.9)
+ ; CHECK-NEXT: tB %bb.2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb74 (align 4):
+ ; CHECK-NEXT: successors: %bb.6(0x04000000), %bb.2(0x7c000000)
+ ; CHECK-NEXT: liveins: $r0, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r12, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = nuw tADDi8 killed renamable $r7, 20, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRDi8 killed $r9, killed $r4, $r3, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i14), (store (s32) into %ir.i81)
+ ; CHECK-NEXT: t2STRDi8 killed $r6, killed $r0, $r3, 8, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i84), (store (s32) into %ir.i88)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r5, $cpsr = tSUBi8 killed renamable $r5, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r1 = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tBcc %bb.6, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.bb12:
+ ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $r1, $r2, $r3, $r5, $r7, $r8, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r9, $r4 = t2LDRDi8 $r3, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i14), (load (s32) from %ir.i20)
+ ; CHECK-NEXT: $r6, $r0 = t2LDRDi8 $r3, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i22), (load (s32) from %ir.i24)
+ ; CHECK-NEXT: dead $lr = t2SUBri renamable $r8, 0, 14 /* CC::al */, $noreg, def $cpsr
+ ; CHECK-NEXT: tBcc %bb.1, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: tB %bb.3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.bb27:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: t2STRDi8 killed $r3, killed $r5, $sp, 12, 14 /* CC::al */, $noreg :: (store (s32) into %stack.6), (store (s32) into %stack.5)
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r7, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i13)
+ ; CHECK-NEXT: tSTRspi killed renamable $r3, $sp, 9, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r7, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i34)
+ ; CHECK-NEXT: tSTRspi killed renamable $r3, $sp, 8, 14 /* CC::al */, $noreg :: (store (s32) into %stack.1)
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i32)
+ ; CHECK-NEXT: tSTRspi killed renamable $r3, $sp, 7, 14 /* CC::al */, $noreg :: (store (s32) into %stack.2)
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r7, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i30)
+ ; CHECK-NEXT: t2STRDi8 $r7, killed $r3, $sp, 20, 14 /* CC::al */, $noreg :: (store (s32) into %stack.4), (store (s32) into %stack.3)
+ ; CHECK-NEXT: renamable $r10 = t2LDRi12 killed renamable $r7, 16, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i28)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.bb37 (align 4):
+ ; CHECK-NEXT: successors: %bb.4(0x7c000000), %bb.5(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r4, $r6, $r8, $r9, $r10, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r7 = tMOVr killed $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = tLDRspi $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %stack.1)
+ ; CHECK-NEXT: renamable $r3 = tLDRspi $sp, 7, 14 /* CC::al */, $noreg :: (load (s32) from %stack.2)
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMULL $r9, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r4, killed renamable $r3, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = tLDRspi $sp, 6, 14 /* CC::al */, $noreg :: (load (s32) from %stack.3)
+ ; CHECK-NEXT: $r5 = tMOVr killed $r9, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL renamable $r7, killed renamable $r3, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r9, renamable $r1 = t2LDR_POST killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i38)
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r0, renamable $r10, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0 = tLDRspi $sp, 9, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: $lr = tMOVr $r8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL renamable $r9, killed renamable $r0, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r6, dead early-clobber renamable $r11 = MVE_ASRLr killed renamable $r6, killed renamable $r11, renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r12 = t2STR_POST renamable $r6, killed renamable $r12, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i39)
+ ; CHECK-NEXT: dead renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
+ ; CHECK-NEXT: renamable $r8 = t2SUBri killed renamable $r8, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr $r7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r4 = tMOVr $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tBcc %bb.4, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: tB %bb.5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.bb72:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $r2, $r5, $r6, $r7, $r9
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r7 = tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r4 = tMOVr killed $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r12, $r8 = t2LDRDi8 $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %stack.8), (load (s32) from %stack.7)
+ ; CHECK-NEXT: tLDMIA killed $r7, 14 /* CC::al */, $noreg, def $r3, def $r5, def $r7 :: (load (s32) from %stack.6), (load (s32) from %stack.5), (load (s32) from %stack.4)
+ ; CHECK-NEXT: tB %bb.1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.bb91:
+ ; CHECK-NEXT: $sp = frame-destroy tADDspi $sp, 10, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = frame-destroy t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
bb.0.bb:
successors: %bb.2(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-optsize-strd-lr.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-optsize-strd-lr.mir
index 789429abf778..d4bc80dde254 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-optsize-strd-lr.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-optsize-strd-lr.mir
@@ -3,7 +3,6 @@
--- |
%struct.arm_biquad_casd_df1_inst_q31 = type { ptr, ptr, i32, i32 }
- ; Function Attrs: optsize
define hidden void @arm_biquad_cascade_df1_q31(ptr nocapture readonly %arg, ptr nocapture readonly %arg1, ptr nocapture %arg2, i32 %arg3) #0 {
bb:
%i = bitcast ptr %arg to ptr
@@ -198,83 +197,95 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: arm_biquad_cascade_df1_q31
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 36
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r11, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r10, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -28
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -32
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -36
- ; CHECK: $sp = frame-setup tSUBspi $sp, 8, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 68
- ; CHECK: $r6, $r4 = t2LDRDi8 $r0, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i7), (load (s32) from %ir.i10)
- ; CHECK: $r7, $r5 = t2LDRDi8 killed $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i), (load (s32) from %ir.i5)
- ; CHECK: renamable $r0 = t2RSBri killed renamable $r6, 31, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2STMIA $sp, 14 /* CC::al */, $noreg, killed $r0, $r2, $r3 :: (store (s32) into %stack.7), (store (s32) into %stack.6), (store (s32) into %stack.5)
- ; CHECK: $r12 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.7)
- ; CHECK: bb.1.bb12 (align 4):
- ; CHECK: successors: %bb.2(0x40000000), %bb.5(0x40000000)
- ; CHECK: liveins: $r1, $r2, $r3, $r4, $r5, $r7, $r12
- ; CHECK: $r10, $r0 = t2LDRDi8 $r7, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i14), (load (s32) from %ir.i20)
- ; CHECK: $r6, $r8 = t2LDRDi8 $r7, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i22), (load (s32) from %ir.i24)
- ; CHECK: $lr = t2WLS renamable $r3, %bb.5
- ; CHECK: bb.2.bb27:
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4, $r5, $r6, $r7, $r8, $r10, $r12
- ; CHECK: renamable $r3 = tLDRi renamable $r5, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i13)
- ; CHECK: t2STRDi8 killed $r7, killed $r4, $sp, 12, 14 /* CC::al */, $noreg :: (store (s32) into %stack.4), (store (s32) into %stack.3)
- ; CHECK: tSTRspi killed renamable $r3, $sp, 7, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: renamable $r3 = tLDRi renamable $r5, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i34)
- ; CHECK: renamable $r4 = tLDRi renamable $r5, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i28)
- ; CHECK: tSTRspi killed renamable $r3, $sp, 6, 14 /* CC::al */, $noreg :: (store (s32) into %stack.1)
- ; CHECK: $r9, $r3 = t2LDRDi8 $r5, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i32), (load (s32) from %ir.i30)
- ; CHECK: tSTRspi killed renamable $r5, $sp, 5, 14 /* CC::al */, $noreg :: (store (s32) into %stack.2)
- ; CHECK: bb.3.bb37 (align 4):
- ; CHECK: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r6, $r8, $r9, $r10, $r12
- ; CHECK: $r7 = tMOVr killed $r6, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = tLDRspi $sp, 6, 14 /* CC::al */, $noreg :: (load (s32) from %stack.1)
- ; CHECK: $r5 = tMOVr $r10, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6, renamable $r11 = t2SMULL killed $r10, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r0, renamable $r9, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r10, renamable $r1 = t2LDR_POST killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i38)
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL renamable $r7, renamable $r3, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0 = tLDRspi $sp, 7, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r8, renamable $r4, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL renamable $r10, killed renamable $r0, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r6, dead early-clobber renamable $r11 = MVE_ASRLr killed renamable $r6, killed renamable $r11, renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r12 = t2STR_POST renamable $r6, killed renamable $r12, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i39)
- ; CHECK: $r8 = tMOVr $r7, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr $r5, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.3
- ; CHECK: bb.4.bb72:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $r2, $r5, $r6, $r7, $r10
- ; CHECK: $r0 = tMOVr killed $r5, 14 /* CC::al */, $noreg
- ; CHECK: $r8 = tMOVr killed $r7, 14 /* CC::al */, $noreg
- ; CHECK: $r12, $r3 = t2LDRDi8 $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %stack.6), (load (s32) from %stack.5)
- ; CHECK: renamable $r5 = tLDRspi $sp, 5, 14 /* CC::al */, $noreg :: (load (s32) from %stack.2)
- ; CHECK: $r7, $r4 = t2LDRDi8 $sp, 12, 14 /* CC::al */, $noreg :: (load (s32) from %stack.4), (load (s32) from %stack.3)
- ; CHECK: bb.5.bb74:
- ; CHECK: successors: %bb.6(0x04000000), %bb.1(0x7c000000)
- ; CHECK: liveins: $r0, $r3, $r4, $r5, $r6, $r7, $r8, $r10, $r12, $r2
- ; CHECK: renamable $r5, dead $cpsr = nuw tADDi8 killed renamable $r5, 20, 14 /* CC::al */, $noreg
- ; CHECK: t2STRDi8 killed $r10, killed $r0, $r7, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i14), (store (s32) into %ir.i81)
- ; CHECK: t2STRDi8 killed $r6, killed $r8, $r7, 8, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i84), (store (s32) into %ir.i88)
- ; CHECK: renamable $r7, dead $cpsr = nuw tADDi8 killed renamable $r7, 16, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4, $cpsr = tSUBi8 killed renamable $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: $r1 = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: tBcc %bb.1, 1 /* CC::ne */, killed $cpsr
- ; CHECK: bb.6.bb91:
- ; CHECK: $sp = frame-destroy tADDspi $sp, 8, 14 /* CC::al */, $noreg
- ; CHECK: $sp = frame-destroy t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 36
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r11, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r10, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -28
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -32
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -36
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 68
+ ; CHECK-NEXT: $r6, $r4 = t2LDRDi8 $r0, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i7), (load (s32) from %ir.i10)
+ ; CHECK-NEXT: $r7, $r5 = t2LDRDi8 killed $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i), (load (s32) from %ir.i5)
+ ; CHECK-NEXT: renamable $r0 = t2RSBri killed renamable $r6, 31, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2STMIA $sp, 14 /* CC::al */, $noreg, killed $r0, $r2, $r3 :: (store (s32) into %stack.7), (store (s32) into %stack.6), (store (s32) into %stack.5)
+ ; CHECK-NEXT: $r12 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.7)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb12 (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.5(0x40000000)
+ ; CHECK-NEXT: liveins: $r1, $r2, $r3, $r4, $r5, $r7, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r10, $r0 = t2LDRDi8 $r7, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i14), (load (s32) from %ir.i20)
+ ; CHECK-NEXT: $r6, $r8 = t2LDRDi8 $r7, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i22), (load (s32) from %ir.i24)
+ ; CHECK-NEXT: $lr = t2WLS renamable $r3, %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.bb27:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4, $r5, $r6, $r7, $r8, $r10, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r5, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i13)
+ ; CHECK-NEXT: t2STRDi8 killed $r7, killed $r4, $sp, 12, 14 /* CC::al */, $noreg :: (store (s32) into %stack.4), (store (s32) into %stack.3)
+ ; CHECK-NEXT: tSTRspi killed renamable $r3, $sp, 7, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r5, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i34)
+ ; CHECK-NEXT: renamable $r4 = tLDRi renamable $r5, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i28)
+ ; CHECK-NEXT: tSTRspi killed renamable $r3, $sp, 6, 14 /* CC::al */, $noreg :: (store (s32) into %stack.1)
+ ; CHECK-NEXT: $r9, $r3 = t2LDRDi8 $r5, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i32), (load (s32) from %ir.i30)
+ ; CHECK-NEXT: tSTRspi killed renamable $r5, $sp, 5, 14 /* CC::al */, $noreg :: (store (s32) into %stack.2)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.bb37 (align 4):
+ ; CHECK-NEXT: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r6, $r8, $r9, $r10, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r7 = tMOVr killed $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = tLDRspi $sp, 6, 14 /* CC::al */, $noreg :: (load (s32) from %stack.1)
+ ; CHECK-NEXT: $r5 = tMOVr $r10, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMULL killed $r10, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r0, renamable $r9, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r10, renamable $r1 = t2LDR_POST killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i38)
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL renamable $r7, renamable $r3, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0 = tLDRspi $sp, 7, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r8, renamable $r4, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL renamable $r10, killed renamable $r0, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r6, dead early-clobber renamable $r11 = MVE_ASRLr killed renamable $r6, killed renamable $r11, renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r12 = t2STR_POST renamable $r6, killed renamable $r12, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i39)
+ ; CHECK-NEXT: $r8 = tMOVr $r7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.bb72:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $r2, $r5, $r6, $r7, $r10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r8 = tMOVr killed $r7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r12, $r3 = t2LDRDi8 $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %stack.6), (load (s32) from %stack.5)
+ ; CHECK-NEXT: renamable $r5 = tLDRspi $sp, 5, 14 /* CC::al */, $noreg :: (load (s32) from %stack.2)
+ ; CHECK-NEXT: $r7, $r4 = t2LDRDi8 $sp, 12, 14 /* CC::al */, $noreg :: (load (s32) from %stack.4), (load (s32) from %stack.3)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.bb74:
+ ; CHECK-NEXT: successors: %bb.6(0x04000000), %bb.1(0x7c000000)
+ ; CHECK-NEXT: liveins: $r0, $r3, $r4, $r5, $r6, $r7, $r8, $r10, $r12, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = nuw tADDi8 killed renamable $r5, 20, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRDi8 killed $r10, killed $r0, $r7, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i14), (store (s32) into %ir.i81)
+ ; CHECK-NEXT: t2STRDi8 killed $r6, killed $r8, $r7, 8, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i84), (store (s32) into %ir.i88)
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = nuw tADDi8 killed renamable $r7, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4, $cpsr = tSUBi8 killed renamable $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r1 = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tBcc %bb.1, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.bb91:
+ ; CHECK-NEXT: $sp = frame-destroy tADDspi $sp, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = frame-destroy t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
bb.0.bb:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-optsize.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-optsize.mir
index 3a8765f48cc5..e7f64cac75b3 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-optsize.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/biquad-cascade-optsize.mir
@@ -4,7 +4,6 @@
--- |
%struct.arm_biquad_casd_df1_inst_q31 = type { ptr, ptr, i32, i32 }
- ; Function Attrs: optsize
define hidden void @arm_biquad_cascade_df1_q31(ptr nocapture readonly %arg, ptr nocapture readonly %arg1, ptr nocapture %arg2, i32 %arg3) #0 {
bb:
%i = bitcast ptr %arg to ptr
@@ -116,10 +115,8 @@
ret void
}
- ; Function Attrs: noduplicate nounwind
declare i1 @llvm.test.set.loop.iterations.i32(i32) #1
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.loop.decrement.reg.i32(i32, i32) #1
attributes #0 = { optsize "target-cpu"="cortex-m55" }
@@ -208,90 +205,102 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: arm_biquad_cascade_df1_q31
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 36
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r11, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r10, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -28
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -32
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -36
- ; CHECK: $sp = frame-setup tSUBspi $sp, 10, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 76
- ; CHECK: $r6, $r5 = t2LDRDi8 $r0, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i7), (load (s32) from %ir.i10)
- ; CHECK: $r8 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: $r3, $r7 = t2LDRDi8 killed $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i), (load (s32) from %ir.i5)
- ; CHECK: renamable $r0 = t2RSBri killed renamable $r6, 31, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2STMIA $sp, 14 /* CC::al */, $noreg, killed $r0, $r2, $r8 :: (store (s32) into %stack.9), (store (s32) into %stack.8), (store (s32) into %stack.7)
- ; CHECK: $r12 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.9)
- ; CHECK: bb.1.bb12 (align 4):
- ; CHECK: successors: %bb.2(0x40000000), %bb.5(0x40000000)
- ; CHECK: liveins: $r1, $r2, $r3, $r5, $r7, $r8, $r12
- ; CHECK: $r9, $r4 = t2LDRDi8 $r3, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i14), (load (s32) from %ir.i20)
- ; CHECK: $r6, $r0 = t2LDRDi8 $r3, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i22), (load (s32) from %ir.i24)
- ; CHECK: dead $lr = t2WLS renamable $r8, %bb.5
- ; CHECK: bb.2.bb27:
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r12
- ; CHECK: t2STRDi8 killed $r3, killed $r5, $sp, 12, 14 /* CC::al */, $noreg :: (store (s32) into %stack.6), (store (s32) into %stack.5)
- ; CHECK: renamable $r3 = tLDRi renamable $r7, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i13)
- ; CHECK: tSTRspi killed renamable $r3, $sp, 9, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: renamable $r3 = tLDRi renamable $r7, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i34)
- ; CHECK: tSTRspi killed renamable $r3, $sp, 8, 14 /* CC::al */, $noreg :: (store (s32) into %stack.1)
- ; CHECK: renamable $r3 = tLDRi renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i32)
- ; CHECK: tSTRspi killed renamable $r3, $sp, 7, 14 /* CC::al */, $noreg :: (store (s32) into %stack.2)
- ; CHECK: renamable $r3 = tLDRi renamable $r7, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i30)
- ; CHECK: t2STRDi8 $r7, killed $r3, $sp, 20, 14 /* CC::al */, $noreg :: (store (s32) into %stack.4), (store (s32) into %stack.3)
- ; CHECK: renamable $r10 = t2LDRi12 killed renamable $r7, 16, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i28)
- ; CHECK: bb.3.bb37 (align 4):
- ; CHECK: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r4, $r6, $r8, $r9, $r10, $r12
- ; CHECK: $r7 = tMOVr killed $r6, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = tLDRspi $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %stack.1)
- ; CHECK: renamable $r3 = tLDRspi $sp, 7, 14 /* CC::al */, $noreg :: (load (s32) from %stack.2)
- ; CHECK: renamable $r6, renamable $r11 = t2SMULL $r9, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r4, killed renamable $r3, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = tLDRspi $sp, 6, 14 /* CC::al */, $noreg :: (load (s32) from %stack.3)
- ; CHECK: $r5 = tMOVr killed $r9, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL renamable $r7, killed renamable $r3, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r9, renamable $r1 = t2LDR_POST killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i38)
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r0, renamable $r10, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0 = tLDRspi $sp, 9, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: $lr = tMOVr $r8, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6, renamable $r11 = t2SMLAL renamable $r9, killed renamable $r0, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r6, dead early-clobber renamable $r11 = MVE_ASRLr killed renamable $r6, killed renamable $r11, renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r12 = t2STR_POST renamable $r6, killed renamable $r12, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i39)
- ; CHECK: renamable $r8 = t2SUBri killed renamable $r8, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r0 = tMOVr $r7, 14 /* CC::al */, $noreg
- ; CHECK: $r4 = tMOVr $r5, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.3
- ; CHECK: bb.4.bb72:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $r2, $r5, $r6, $r7, $r9
- ; CHECK: $r0 = tMOVr killed $r7, 14 /* CC::al */, $noreg
- ; CHECK: $r7 = tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
- ; CHECK: $r4 = tMOVr killed $r5, 14 /* CC::al */, $noreg
- ; CHECK: $r12, $r8 = t2LDRDi8 $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %stack.8), (load (s32) from %stack.7)
- ; CHECK: tLDMIA killed $r7, 14 /* CC::al */, $noreg, def $r3, def $r5, def $r7 :: (load (s32) from %stack.6), (load (s32) from %stack.5), (load (s32) from %stack.4)
- ; CHECK: bb.5.bb74:
- ; CHECK: successors: %bb.6(0x04000000), %bb.1(0x7c000000)
- ; CHECK: liveins: $r0, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r12, $r2
- ; CHECK: renamable $r7, dead $cpsr = nuw tADDi8 killed renamable $r7, 20, 14 /* CC::al */, $noreg
- ; CHECK: t2STRDi8 killed $r9, killed $r4, $r3, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i14), (store (s32) into %ir.i81)
- ; CHECK: t2STRDi8 killed $r6, killed $r0, $r3, 8, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i84), (store (s32) into %ir.i88)
- ; CHECK: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r5, $cpsr = tSUBi8 killed renamable $r5, 1, 14 /* CC::al */, $noreg
- ; CHECK: $r1 = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: tBcc %bb.1, 1 /* CC::ne */, killed $cpsr
- ; CHECK: bb.6.bb91:
- ; CHECK: $sp = frame-destroy tADDspi $sp, 10, 14 /* CC::al */, $noreg
- ; CHECK: $sp = frame-destroy t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 36
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r11, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r10, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -28
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -32
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -36
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 10, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 76
+ ; CHECK-NEXT: $r6, $r5 = t2LDRDi8 $r0, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i7), (load (s32) from %ir.i10)
+ ; CHECK-NEXT: $r8 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r3, $r7 = t2LDRDi8 killed $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i), (load (s32) from %ir.i5)
+ ; CHECK-NEXT: renamable $r0 = t2RSBri killed renamable $r6, 31, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2STMIA $sp, 14 /* CC::al */, $noreg, killed $r0, $r2, $r8 :: (store (s32) into %stack.9), (store (s32) into %stack.8), (store (s32) into %stack.7)
+ ; CHECK-NEXT: $r12 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.9)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb12 (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.5(0x40000000)
+ ; CHECK-NEXT: liveins: $r1, $r2, $r3, $r5, $r7, $r8, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r9, $r4 = t2LDRDi8 $r3, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i14), (load (s32) from %ir.i20)
+ ; CHECK-NEXT: $r6, $r0 = t2LDRDi8 $r3, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i22), (load (s32) from %ir.i24)
+ ; CHECK-NEXT: dead $lr = t2WLS renamable $r8, %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.bb27:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: t2STRDi8 killed $r3, killed $r5, $sp, 12, 14 /* CC::al */, $noreg :: (store (s32) into %stack.6), (store (s32) into %stack.5)
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r7, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i13)
+ ; CHECK-NEXT: tSTRspi killed renamable $r3, $sp, 9, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r7, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i34)
+ ; CHECK-NEXT: tSTRspi killed renamable $r3, $sp, 8, 14 /* CC::al */, $noreg :: (store (s32) into %stack.1)
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i32)
+ ; CHECK-NEXT: tSTRspi killed renamable $r3, $sp, 7, 14 /* CC::al */, $noreg :: (store (s32) into %stack.2)
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r7, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i30)
+ ; CHECK-NEXT: t2STRDi8 $r7, killed $r3, $sp, 20, 14 /* CC::al */, $noreg :: (store (s32) into %stack.4), (store (s32) into %stack.3)
+ ; CHECK-NEXT: renamable $r10 = t2LDRi12 killed renamable $r7, 16, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i28)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.bb37 (align 4):
+ ; CHECK-NEXT: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r4, $r6, $r8, $r9, $r10, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r7 = tMOVr killed $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = tLDRspi $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %stack.1)
+ ; CHECK-NEXT: renamable $r3 = tLDRspi $sp, 7, 14 /* CC::al */, $noreg :: (load (s32) from %stack.2)
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMULL $r9, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r4, killed renamable $r3, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = tLDRspi $sp, 6, 14 /* CC::al */, $noreg :: (load (s32) from %stack.3)
+ ; CHECK-NEXT: $r5 = tMOVr killed $r9, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL renamable $r7, killed renamable $r3, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r9, renamable $r1 = t2LDR_POST killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.i38)
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL killed renamable $r0, renamable $r10, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0 = tLDRspi $sp, 9, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: $lr = tMOVr $r8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6, renamable $r11 = t2SMLAL renamable $r9, killed renamable $r0, killed renamable $r6, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r6, dead early-clobber renamable $r11 = MVE_ASRLr killed renamable $r6, killed renamable $r11, renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r12 = t2STR_POST renamable $r6, killed renamable $r12, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i39)
+ ; CHECK-NEXT: renamable $r8 = t2SUBri killed renamable $r8, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr $r7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r4 = tMOVr $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.bb72:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $r2, $r5, $r6, $r7, $r9
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r7 = tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r4 = tMOVr killed $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r12, $r8 = t2LDRDi8 $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %stack.8), (load (s32) from %stack.7)
+ ; CHECK-NEXT: tLDMIA killed $r7, 14 /* CC::al */, $noreg, def $r3, def $r5, def $r7 :: (load (s32) from %stack.6), (load (s32) from %stack.5), (load (s32) from %stack.4)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.bb74:
+ ; CHECK-NEXT: successors: %bb.6(0x04000000), %bb.1(0x7c000000)
+ ; CHECK-NEXT: liveins: $r0, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r12, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = nuw tADDi8 killed renamable $r7, 20, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRDi8 killed $r9, killed $r4, $r3, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i14), (store (s32) into %ir.i81)
+ ; CHECK-NEXT: t2STRDi8 killed $r6, killed $r0, $r3, 8, 14 /* CC::al */, $noreg :: (store (s32) into %ir.i84), (store (s32) into %ir.i88)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r5, $cpsr = tSUBi8 killed renamable $r5, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r1 = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tBcc %bb.1, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.bb91:
+ ; CHECK-NEXT: $sp = frame-destroy tADDspi $sp, 10, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = frame-destroy t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
bb.0.bb:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cmplx_cong.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cmplx_cong.mir
index ef037b8a5c46..cc98419f47ef 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cmplx_cong.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cmplx_cong.mir
@@ -5,7 +5,6 @@
@arm_cmplx_conj_f32_mve.cmplx_conj_sign = internal constant [4 x float] [float 1.000000e+00, float -1.000000e+00, float 1.000000e+00, float -1.000000e+00], align 4
- ; Function Attrs: nounwind
define hidden void @arm_cmplx_conj_f32_mve(ptr %pSrc, ptr %pDst, i32 %blockSize) local_unnamed_addr #0 {
entry:
ret void
@@ -37,28 +36,32 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: arm_cmplx_conj_f32_mve
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: renamable $r3, dead $cpsr = tLSLri killed renamable $r2, 1, 14 /* CC::al */, $noreg
- ; CHECK: $r4 = t2MOVi16 target-flags(arm-lo16) @arm_cmplx_conj_f32_mve.cmplx_conj_sign, 14 /* CC::al */, $noreg
- ; CHECK: $r4 = t2MOVTi16 killed $r4, target-flags(arm-hi16) @arm_cmplx_conj_f32_mve.cmplx_conj_sign, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = nnan ninf nsz MVE_VLDRWU32 killed renamable $r4, 0, 0, $noreg, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.1 (align 4):
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 0, $noreg, $noreg
- ; CHECK: renamable $q1 = nnan ninf nsz MVE_VMULf32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: MVE_VSTRWU32 killed renamable $q1, renamable $r1, 0, 0, killed $noreg, $noreg
- ; CHECK: renamable $r1, dead $cpsr = nuw tADDi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tLSLri killed renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r4 = t2MOVi16 target-flags(arm-lo16) @arm_cmplx_conj_f32_mve.cmplx_conj_sign, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r4 = t2MOVTi16 killed $r4, target-flags(arm-hi16) @arm_cmplx_conj_f32_mve.cmplx_conj_sign, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = nnan ninf nsz MVE_VLDRWU32 killed renamable $r4, 0, 0, $noreg, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1 (align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q1 = nnan ninf nsz MVE_VMULf32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q1, renamable $r1, 0, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = nuw tADDi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r4, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-mov.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-mov.mir
index d124063f6a84..615dd3f47654 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-mov.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-mov.mir
@@ -85,25 +85,29 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: do_copy
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $lr = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2DLS killed $r0
- ; CHECK: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.while.body:
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1
- ; CHECK: renamable $r2, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep6)
- ; CHECK: early-clobber renamable $r0 = t2STR_PRE killed renamable $r2, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep2)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.1
- ; CHECK: bb.2.while.end:
- ; CHECK: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $lr = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2DLS killed $r0
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.body:
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep6)
+ ; CHECK-NEXT: early-clobber renamable $r0 = t2STR_PRE killed renamable $r2, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep2)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.end:
+ ; CHECK-NEXT: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/count_dominates_start.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/count_dominates_start.mir
index 6307db7f5503..fe50e0606b87 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/count_dominates_start.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/count_dominates_start.mir
@@ -103,52 +103,60 @@ liveins:
body: |
; CHECK-LABEL: name: test
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.2(0x50000000), %bb.1(0x30000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: [[COPY:%[0-9]+]]:gprnopc = COPY $r2
- ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY $r1
- ; CHECK: [[COPY2:%[0-9]+]]:gpr = COPY $r0
- ; CHECK: t2CMPri [[COPY]], 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2Bcc %bb.2, 10 /* CC::ge */, $cpsr
- ; CHECK: bb.1:
- ; CHECK: successors: %bb.4(0x80000000)
- ; CHECK: [[t2MOVi:%[0-9]+]]:rgpr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: [[COPY3:%[0-9]+]]:gpr = COPY [[t2MOVi]]
- ; CHECK: t2B %bb.4, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.ph:
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: [[t2ADDri:%[0-9]+]]:rgpr = t2ADDri [[COPY]], 7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: [[t2BICri:%[0-9]+]]:rgpr = t2BICri [[t2ADDri]], 7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: [[t2SUBri:%[0-9]+]]:rgpr = t2SUBri [[t2BICri]], 8, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: [[t2MOVi1:%[0-9]+]]:rgpr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: [[t2ADDrs:%[0-9]+]]:gprnopc = nuw nsw t2ADDrs [[t2MOVi1]], [[t2SUBri]], 27, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: [[COPY4:%[0-9]+]]:rgpr = COPY [[t2ADDrs]]
- ; CHECK: [[t2MOVi2:%[0-9]+]]:rgpr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: [[COPY5:%[0-9]+]]:gpr = COPY [[t2MOVi2]]
- ; CHECK: [[COPY6:%[0-9]+]]:rgpr = COPY [[COPY]]
- ; CHECK: [[t2DoLoopStartTP:%[0-9]+]]:gprlr = t2DoLoopStartTP [[COPY4]], [[COPY6]]
- ; CHECK: bb.3.vector.body:
- ; CHECK: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
- ; CHECK: [[PHI:%[0-9]+]]:rgpr = PHI [[COPY2]], %bb.2, %10, %bb.3
- ; CHECK: [[PHI1:%[0-9]+]]:rgpr = PHI [[COPY1]], %bb.2, %9, %bb.3
- ; CHECK: [[PHI2:%[0-9]+]]:tgpreven = PHI [[COPY5]], %bb.2, %8, %bb.3
- ; CHECK: [[PHI3:%[0-9]+]]:gprlr = PHI [[t2DoLoopStartTP]], %bb.2, %33, %bb.3
- ; CHECK: [[PHI4:%[0-9]+]]:rgpr = PHI [[COPY6]], %bb.2, %7, %bb.3
- ; CHECK: [[MVE_VCTP16_:%[0-9]+]]:vccr = MVE_VCTP16 [[PHI4]], 0, $noreg, $noreg
- ; CHECK: [[t2SUBri1:%[0-9]+]]:rgpr = t2SUBri [[PHI4]], 8, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: [[COPY7:%[0-9]+]]:gpr = COPY [[t2SUBri1]]
- ; CHECK: [[MVE_VLDRHU16_post:%[0-9]+]]:rgpr, [[MVE_VLDRHU16_post1:%[0-9]+]]:mqpr = MVE_VLDRHU16_post [[PHI]], 16, 1, [[MVE_VCTP16_]], [[PHI3]] :: (load (s128) from %ir.lsr.iv35, align 2)
- ; CHECK: [[MVE_VLDRHU16_post2:%[0-9]+]]:rgpr, [[MVE_VLDRHU16_post3:%[0-9]+]]:mqpr = MVE_VLDRHU16_post [[PHI1]], 16, 1, [[MVE_VCTP16_]], [[PHI3]] :: (load (s128) from %ir.lsr.iv12, align 2)
- ; CHECK: [[MVE_VMLADAVas16_:%[0-9]+]]:tgpreven = MVE_VMLADAVas16 [[PHI2]], killed [[MVE_VLDRHU16_post3]], killed [[MVE_VLDRHU16_post1]], 1, [[MVE_VCTP16_]], [[PHI3]]
- ; CHECK: [[COPY8:%[0-9]+]]:gpr = COPY [[MVE_VMLADAVas16_]]
- ; CHECK: [[COPY9:%[0-9]+]]:gpr = COPY [[MVE_VLDRHU16_post2]]
- ; CHECK: [[COPY10:%[0-9]+]]:gpr = COPY [[MVE_VLDRHU16_post]]
- ; CHECK: [[t2LoopEndDec:%[0-9]+]]:gprlr = t2LoopEndDec [[PHI3]], %bb.3, implicit-def $cpsr
- ; CHECK: t2B %bb.4, 14 /* CC::al */, $noreg
- ; CHECK: bb.4.for.cond.cleanup:
- ; CHECK: [[PHI5:%[0-9]+]]:gpr = PHI [[COPY3]], %bb.1, [[COPY8]], %bb.3
- ; CHECK: $r0 = COPY [[PHI5]]
- ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit $r0
+ ; CHECK-NEXT: successors: %bb.2(0x50000000), %bb.1(0x30000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnopc = COPY $r2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $r0
+ ; CHECK-NEXT: t2CMPri [[COPY]], 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2Bcc %bb.2, 10 /* CC::ge */, $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[t2MOVi:%[0-9]+]]:rgpr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY [[t2MOVi]]
+ ; CHECK-NEXT: t2B %bb.4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.ph:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[t2ADDri:%[0-9]+]]:rgpr = t2ADDri [[COPY]], 7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[t2BICri:%[0-9]+]]:rgpr = t2BICri [[t2ADDri]], 7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[t2SUBri:%[0-9]+]]:rgpr = t2SUBri [[t2BICri]], 8, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[t2MOVi1:%[0-9]+]]:rgpr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[t2ADDrs:%[0-9]+]]:gprnopc = nuw nsw t2ADDrs [[t2MOVi1]], [[t2SUBri]], 27, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:rgpr = COPY [[t2ADDrs]]
+ ; CHECK-NEXT: [[t2MOVi2:%[0-9]+]]:rgpr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr = COPY [[t2MOVi2]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:rgpr = COPY [[COPY]]
+ ; CHECK-NEXT: [[t2DoLoopStartTP:%[0-9]+]]:gprlr = t2DoLoopStartTP [[COPY4]], [[COPY6]]
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.vector.body:
+ ; CHECK-NEXT: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:rgpr = PHI [[COPY2]], %bb.2, %10, %bb.3
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:rgpr = PHI [[COPY1]], %bb.2, %9, %bb.3
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:tgpreven = PHI [[COPY5]], %bb.2, %8, %bb.3
+ ; CHECK-NEXT: [[PHI3:%[0-9]+]]:gprlr = PHI [[t2DoLoopStartTP]], %bb.2, %33, %bb.3
+ ; CHECK-NEXT: [[PHI4:%[0-9]+]]:rgpr = PHI [[COPY6]], %bb.2, %7, %bb.3
+ ; CHECK-NEXT: [[MVE_VCTP16_:%[0-9]+]]:vccr = MVE_VCTP16 [[PHI4]], 0, $noreg, $noreg
+ ; CHECK-NEXT: [[t2SUBri1:%[0-9]+]]:rgpr = t2SUBri [[PHI4]], 8, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gpr = COPY [[t2SUBri1]]
+ ; CHECK-NEXT: [[MVE_VLDRHU16_post:%[0-9]+]]:rgpr, [[MVE_VLDRHU16_post1:%[0-9]+]]:mqpr = MVE_VLDRHU16_post [[PHI]], 16, 1, [[MVE_VCTP16_]], [[PHI3]] :: (load (s128) from %ir.lsr.iv35, align 2)
+ ; CHECK-NEXT: [[MVE_VLDRHU16_post2:%[0-9]+]]:rgpr, [[MVE_VLDRHU16_post3:%[0-9]+]]:mqpr = MVE_VLDRHU16_post [[PHI1]], 16, 1, [[MVE_VCTP16_]], [[PHI3]] :: (load (s128) from %ir.lsr.iv12, align 2)
+ ; CHECK-NEXT: [[MVE_VMLADAVas16_:%[0-9]+]]:tgpreven = MVE_VMLADAVas16 [[PHI2]], killed [[MVE_VLDRHU16_post3]], killed [[MVE_VLDRHU16_post1]], 1, [[MVE_VCTP16_]], [[PHI3]]
+ ; CHECK-NEXT: [[COPY8:%[0-9]+]]:gpr = COPY [[MVE_VMLADAVas16_]]
+ ; CHECK-NEXT: [[COPY9:%[0-9]+]]:gpr = COPY [[MVE_VLDRHU16_post2]]
+ ; CHECK-NEXT: [[COPY10:%[0-9]+]]:gpr = COPY [[MVE_VLDRHU16_post]]
+ ; CHECK-NEXT: [[t2LoopEndDec:%[0-9]+]]:gprlr = t2LoopEndDec [[PHI3]], %bb.3, implicit-def $cpsr
+ ; CHECK-NEXT: t2B %bb.4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.for.cond.cleanup:
+ ; CHECK-NEXT: [[PHI5:%[0-9]+]]:gpr = PHI [[COPY3]], %bb.1, [[COPY8]], %bb.3
+ ; CHECK-NEXT: $r0 = COPY [[PHI5]]
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit $r0
bb.0.entry:
successors: %bb.1(0x50000000), %bb.4(0x30000000)
liveins: $r0, $r1, $r2
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir
index 588fe4cfcdb9..e04e6e8cdc3d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir
@@ -158,38 +158,44 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_ctlz_i8
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.loop.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $lr = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.loop.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $vpr = MVE_VCTP16 renamable $r3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.b, align 2)
- ; CHECK: renamable $q1 = MVE_VLDRHU16 killed renamable $r0, 0, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.a, align 2)
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 8, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VCLZs8 killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $r0 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VQSHRUNs16th killed renamable $q1, killed renamable $q0, 1, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r2 = MVE_VSTRHU16_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.addr.c, align 2)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.loop.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.loop.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.b, align 2)
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRHU16 killed renamable $r0, 0, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.a, align 2)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VCLZs8 killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $r0 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VQSHRUNs16th killed renamable $q1, killed renamable $q0, 1, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r2 = MVE_VSTRHU16_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.addr.c, align 2)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
@@ -265,38 +271,45 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_ctlz_i16
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def dead $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.loop.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4
- ; CHECK: renamable $lr = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: $r12 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.loop.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $r12
- ; CHECK: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.b, align 4)
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.a, align 4)
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q1 = MVE_VCLZs16 killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q1 = MVE_VQSHRUNs32th killed renamable $q1, killed renamable $q0, 3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r2 = MVE_VSTRWU32_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.addr.c, align 4)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $r4
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def dead $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def dead $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.loop.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: $r12 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.loop.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.b, align 4)
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.a, align 4)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VCLZs16 killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q1 = MVE_VQSHRUNs32th killed renamable $q1, killed renamable $q0, 3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r2 = MVE_VSTRWU32_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.addr.c, align 4)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def dead $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
@@ -371,38 +384,45 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_ctlz_i32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def dead $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.loop.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4
- ; CHECK: renamable $lr = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: $r12 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.loop.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $r12
- ; CHECK: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.a, align 4)
- ; CHECK: renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.b, align 4)
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q1 = MVE_VCLZs32 killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VQSHRUNs32th killed renamable $q0, killed renamable $q1, 3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r2 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.addr.c, align 4)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $r4
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def dead $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def dead $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.loop.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: $r12 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.loop.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.a, align 4)
+ ; CHECK-NEXT: renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.b, align 4)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VCLZs32 killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VQSHRUNs32th killed renamable $q0, killed renamable $q1, 3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r2 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.addr.c, align 4)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def dead $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/disjoint-vcmp.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/disjoint-vcmp.mir
index d5781155fe0e..9c2434da4b79 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/disjoint-vcmp.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/disjoint-vcmp.mir
@@ -120,51 +120,57 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -16
- ; CHECK: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
- ; CHECK: tCBZ $r2, %bb.3
- ; CHECK: bb.1.bb3:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $r12 = t2MOVi16 target-flags(arm-lo16) @mask, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: $r12 = t2MOVTi16 killed $r12, target-flags(arm-hi16) @mask, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4 = t2BICri killed renamable $r4, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r5 = t2LDRHi12 killed renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s16) from %ir.mask.gep9)
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r4, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: $vpr = VMSR_P0 $r5, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 16, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: renamable $q0 = MVE_VDUP32 killed renamable $r5, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.bb9:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv24, align 4)
- ; CHECK: renamable $r3, renamable $q2 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1, align 4)
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r12, renamable $q2 = MVE_VLDRWU32_pre killed renamable $r12, 16, 0, $noreg, $noreg :: (load (s128) from %ir.scevgep2, align 8)
- ; CHECK: MVE_VPTv4u32 8, renamable $q0, killed renamable $q2, 2, implicit-def $vpr
- ; CHECK: MVE_VSTRWU32 killed renamable $q1, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
- ; CHECK: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.bb27:
- ; CHECK: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -16
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 20
+ ; CHECK-NEXT: tCBZ $r2, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb3:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r12 = t2MOVi16 target-flags(arm-lo16) @mask, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r12 = t2MOVTi16 killed $r12, target-flags(arm-hi16) @mask, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4 = t2BICri killed renamable $r4, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r5 = t2LDRHi12 killed renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s16) from %ir.mask.gep9)
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r4, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $vpr = VMSR_P0 $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 16, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 killed renamable $r5, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.bb9:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv24, align 4)
+ ; CHECK-NEXT: renamable $r3, renamable $q2 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1, align 4)
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r12, renamable $q2 = MVE_VLDRWU32_pre killed renamable $r12, 16, 0, $noreg, $noreg :: (load (s128) from %ir.scevgep2, align 8)
+ ; CHECK-NEXT: MVE_VPTv4u32 8, renamable $q0, killed renamable $q2, 2, implicit-def $vpr
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q1, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
+ ; CHECK-NEXT: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.bb27:
+ ; CHECK-NEXT: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
bb.0.bb:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-ignore-vctp.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-ignore-vctp.mir
index f32242417039..59f5c8e4f14a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-ignore-vctp.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-ignore-vctp.mir
@@ -92,29 +92,34 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: dont_ignore_vctp
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: renamable $r3, dead $cpsr = tLSLri killed renamable $r2, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.1.do.body (align 4):
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 0, $noreg, $lr
- ; CHECK: renamable $q1 = nnan ninf nsz MVE_VMULf32 killed renamable $q1, renamable $q0, 0, $noreg, $lr, undef renamable $q1
- ; CHECK: MVE_VSTRWU32 killed renamable $q1, renamable $r1, 0, 0, killed $noreg, $lr
- ; CHECK: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = nuw tADDi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2.do.end:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.3 (align 16):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tLSLri killed renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.do.body (align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 0, $noreg, $lr
+ ; CHECK-NEXT: renamable $q1 = nnan ninf nsz MVE_VMULf32 killed renamable $q1, renamable $q0, 0, $noreg, $lr, undef renamable $q1
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q1, renamable $r1, 0, 0, killed $noreg, $lr
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = nuw tADDi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.do.end:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (align 16):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-remove-loop-update.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-remove-loop-update.mir
index d54023503e29..18fc66eeb262 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-remove-loop-update.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-remove-loop-update.mir
@@ -104,40 +104,46 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: use_before_def
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2ADDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $lr :: (load (s128) from %ir.lsr.iv13, align 4)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr, $lr :: (load (s128) from %ir.lsr.iv1416, align 4)
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $lr, undef renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $lr :: (store (s128) into %ir.lsr.iv1719, align 4)
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2ADDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $lr :: (load (s128) from %ir.lsr.iv13, align 4)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr, $lr :: (load (s128) from %ir.lsr.iv1416, align 4)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $lr, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $lr :: (store (s128) into %ir.lsr.iv1719, align 4)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/end-positive-offset.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/end-positive-offset.mir
index 4f667a549f3f..9ebb714bc4ee 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/end-positive-offset.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/end-positive-offset.mir
@@ -39,13 +39,10 @@
br label %for.body
}
- ; Function Attrs: nounwind
declare i32 @llvm.arm.space(i32 immarg, i32) #0
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.start.loop.iterations.i32(i32) #1
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #1
attributes #0 = { nounwind }
@@ -125,56 +122,62 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: size_limit
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: $sp = frame-setup tSUBspi $sp, 8, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 40
- ; CHECK: dead $lr = tMOVr renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = tSUBi8 killed renamable $r0, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: tSTRspi killed $r1, $sp, 7, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: tSTRspi killed $r2, $sp, 6, 14 /* CC::al */, $noreg :: (store (s32) into %stack.1)
- ; CHECK: tSTRspi killed $r0, $sp, 5, 14 /* CC::al */, $noreg :: (store (s32) into %stack.2)
- ; CHECK: tSTRspi killed $r3, $sp, 4, 14 /* CC::al */, $noreg :: (store (s32) into %stack.3)
- ; CHECK: tB %bb.3, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.for.body:
- ; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000)
- ; CHECK: $r0 = tLDRspi $sp, 3, 14 /* CC::al */, $noreg :: (load (s32) from %stack.4)
- ; CHECK: renamable $r1, renamable $r0 = t2LDR_PRE killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep11)
- ; CHECK: $r2 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load (s32) from %stack.5)
- ; CHECK: renamable $r3, renamable $r2 = t2LDR_PRE killed renamable $r2, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
- ; CHECK: renamable $r1, dead $cpsr = nsw tMUL killed renamable $r3, killed renamable $r1, 14 /* CC::al */, $noreg
- ; CHECK: $r3 = tLDRspi $sp, 1, 14 /* CC::al */, $noreg :: (load (s32) from %stack.6)
- ; CHECK: early-clobber renamable $r3 = t2STR_PRE killed renamable $r1, killed renamable $r3, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep3)
- ; CHECK: $r1 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.7)
- ; CHECK: $lr = tMOVr killed $r1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
- ; CHECK: $r12 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: tSTRspi killed $r0, $sp, 7, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: tSTRspi killed $r2, $sp, 6, 14 /* CC::al */, $noreg :: (store (s32) into %stack.1)
- ; CHECK: tSTRspi killed $r3, $sp, 5, 14 /* CC::al */, $noreg :: (store (s32) into %stack.2)
- ; CHECK: t2STRi12 killed $r12, $sp, 16, 14 /* CC::al */, $noreg :: (store (s32) into %stack.3)
- ; CHECK: tBcc %bb.3, 1 /* CC::ne */, killed $cpsr
- ; CHECK: tB %bb.2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.for.cond.cleanup:
- ; CHECK: $sp = tADDspi $sp, 8, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.3.for.header:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: $r0 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %stack.3)
- ; CHECK: $r1 = tLDRspi $sp, 5, 14 /* CC::al */, $noreg :: (load (s32) from %stack.2)
- ; CHECK: $r2 = tLDRspi $sp, 6, 14 /* CC::al */, $noreg :: (load (s32) from %stack.1)
- ; CHECK: $r3 = tLDRspi $sp, 7, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: tSTRspi killed $r0, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.7)
- ; CHECK: tSTRspi killed $r1, $sp, 1, 14 /* CC::al */, $noreg :: (store (s32) into %stack.6)
- ; CHECK: tSTRspi killed $r2, $sp, 2, 14 /* CC::al */, $noreg :: (store (s32) into %stack.5)
- ; CHECK: tSTRspi killed $r3, $sp, 3, 14 /* CC::al */, $noreg :: (store (s32) into %stack.4)
- ; CHECK: tB %bb.1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 40
+ ; CHECK-NEXT: dead $lr = tMOVr renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi8 killed renamable $r0, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRspi killed $r1, $sp, 7, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: tSTRspi killed $r2, $sp, 6, 14 /* CC::al */, $noreg :: (store (s32) into %stack.1)
+ ; CHECK-NEXT: tSTRspi killed $r0, $sp, 5, 14 /* CC::al */, $noreg :: (store (s32) into %stack.2)
+ ; CHECK-NEXT: tSTRspi killed $r3, $sp, 4, 14 /* CC::al */, $noreg :: (store (s32) into %stack.3)
+ ; CHECK-NEXT: tB %bb.3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.for.body:
+ ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tLDRspi $sp, 3, 14 /* CC::al */, $noreg :: (load (s32) from %stack.4)
+ ; CHECK-NEXT: renamable $r1, renamable $r0 = t2LDR_PRE killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep11)
+ ; CHECK-NEXT: $r2 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load (s32) from %stack.5)
+ ; CHECK-NEXT: renamable $r3, renamable $r2 = t2LDR_PRE killed renamable $r2, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = nsw tMUL killed renamable $r3, killed renamable $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r3 = tLDRspi $sp, 1, 14 /* CC::al */, $noreg :: (load (s32) from %stack.6)
+ ; CHECK-NEXT: early-clobber renamable $r3 = t2STR_PRE killed renamable $r1, killed renamable $r3, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep3)
+ ; CHECK-NEXT: $r1 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.7)
+ ; CHECK-NEXT: $lr = tMOVr killed $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
+ ; CHECK-NEXT: $r12 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRspi killed $r0, $sp, 7, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: tSTRspi killed $r2, $sp, 6, 14 /* CC::al */, $noreg :: (store (s32) into %stack.1)
+ ; CHECK-NEXT: tSTRspi killed $r3, $sp, 5, 14 /* CC::al */, $noreg :: (store (s32) into %stack.2)
+ ; CHECK-NEXT: t2STRi12 killed $r12, $sp, 16, 14 /* CC::al */, $noreg :: (store (s32) into %stack.3)
+ ; CHECK-NEXT: tBcc %bb.3, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: tB %bb.2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.cond.cleanup:
+ ; CHECK-NEXT: $sp = tADDspi $sp, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.header:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %stack.3)
+ ; CHECK-NEXT: $r1 = tLDRspi $sp, 5, 14 /* CC::al */, $noreg :: (load (s32) from %stack.2)
+ ; CHECK-NEXT: $r2 = tLDRspi $sp, 6, 14 /* CC::al */, $noreg :: (load (s32) from %stack.1)
+ ; CHECK-NEXT: $r3 = tLDRspi $sp, 7, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: tSTRspi killed $r0, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.7)
+ ; CHECK-NEXT: tSTRspi killed $r1, $sp, 1, 14 /* CC::al */, $noreg :: (store (s32) into %stack.6)
+ ; CHECK-NEXT: tSTRspi killed $r2, $sp, 2, 14 /* CC::al */, $noreg :: (store (s32) into %stack.5)
+ ; CHECK-NEXT: tSTRspi killed $r3, $sp, 3, 14 /* CC::al */, $noreg :: (store (s32) into %stack.4)
+ ; CHECK-NEXT: tB %bb.1, 14 /* CC::al */, $noreg
bb.0.entry:
successors: %bb.3(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extract-element.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extract-element.mir
index fe156fe9dab7..cd5292310f16 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extract-element.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extract-element.mir
@@ -105,33 +105,40 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: no_vpsel_liveout
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, killed $noreg, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0
- ; CHECK: $r0 = VMOVRS killed $s3, 14 /* CC::al */, $noreg, implicit killed $q0
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, killed $noreg, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = VMOVRS killed $s3, 14 /* CC::al */, $noreg, implicit killed $q0
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $lr, $r7
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-16.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-16.mir
index e529a691545f..d95db905283c 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-16.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-16.mir
@@ -96,37 +96,43 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: incorrect_sub_16
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2ADDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP16 renamable $r3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRHU16_post killed renamable $r2, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 7, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = nsw MVE_VADDi16 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0 = MVE_VSTRHU16_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2ADDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRHU16_post killed renamable $r2, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi16 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRHU16_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-32.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-32.mir
index 51844a7cf692..35effea71350 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-32.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-32.mir
@@ -104,37 +104,43 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: incorrect_sub_32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2ADDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 5, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2ADDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-8.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-8.mir
index 305c31b33b21..71f8f20a37f7 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-8.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/incorrect-sub-8.mir
@@ -97,37 +97,43 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: incorrect_sub_8
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2ADDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP8 renamable $r3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRBU8_post killed renamable $r2, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 15, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = nsw MVE_VADDi8 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0 = MVE_VSTRBU8_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2ADDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP8 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRBU8_post killed renamable $r2, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 15, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi8 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRBU8_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-1.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-1.mir
index 6ef6ba1046dd..40b557a5e6de 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-1.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-1.mir
@@ -130,51 +130,57 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: inloop_vpnot
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -16
- ; CHECK: renamable $r12 = t2LDRi12 $sp, 20, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.1)
- ; CHECK: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r5 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
- ; CHECK: renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
- ; CHECK: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r5 = MVE_VSTRWU32_post renamable $q0, killed renamable $r5, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.cast.e, align 4)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -16
+ ; CHECK-NEXT: renamable $r12 = t2LDRi12 $sp, 20, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.1)
+ ; CHECK-NEXT: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r5 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
+ ; CHECK-NEXT: renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
+ ; CHECK-NEXT: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r5 = MVE_VSTRWU32_post renamable $q0, killed renamable $r5, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.cast.e, align 4)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
bb.0.entry:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-2.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-2.mir
index 6681e0a82071..380c6957c3b1 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-2.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-2.mir
@@ -130,51 +130,57 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: inloop_vpnot
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -16
- ; CHECK: renamable $r12 = t2LDRi12 $sp, 20, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.1)
- ; CHECK: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r5 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
- ; CHECK: renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
- ; CHECK: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, killed renamable $vpr, $noreg
- ; CHECK: renamable $r5 = MVE_VSTRWU32_post renamable $q0, killed renamable $r5, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.cast.e, align 4)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -16
+ ; CHECK-NEXT: renamable $r12 = t2LDRi12 $sp, 20, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.1)
+ ; CHECK-NEXT: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r5 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
+ ; CHECK-NEXT: renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
+ ; CHECK-NEXT: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r5 = MVE_VSTRWU32_post renamable $q0, killed renamable $r5, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.cast.e, align 4)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
bb.0.entry:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-3.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-3.mir
index d0716d872495..a81f2a557180 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-3.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpnot-3.mir
@@ -130,51 +130,57 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: inloop_vpnot
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -16
- ; CHECK: renamable $r12 = t2LDRi12 $sp, 20, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.1)
- ; CHECK: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r5 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
- ; CHECK: renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, renamable $vpr, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, renamable $vpr, $noreg, undef renamable $q0
- ; CHECK: renamable $r5 = MVE_VSTRWU32_post renamable $q0, killed renamable $r5, 16, 1, renamable $vpr, $noreg :: (store (s128) into %ir.lsr.cast.e, align 4)
- ; CHECK: dead renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -16
+ ; CHECK-NEXT: renamable $r12 = t2LDRi12 $sp, 20, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.1)
+ ; CHECK-NEXT: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r5 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r4 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
+ ; CHECK-NEXT: renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, renamable $vpr, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, renamable $vpr, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r5 = MVE_VSTRWU32_post renamable $q0, killed renamable $r5, 16, 1, renamable $vpr, $noreg :: (store (s128) into %ir.lsr.cast.e, align 4)
+ ; CHECK-NEXT: dead renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
bb.0.entry:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir
index 2608276a423a..45f0371660a5 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir
@@ -128,54 +128,62 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: vpsel_after_vpt
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -16
- ; CHECK: renamable $r12 = t2LDRi12 $sp, 16, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.4, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r5 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r5
- ; CHECK: $r4 = tMOVr killed $r5, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r12
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
- ; CHECK: renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
- ; CHECK: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -16
+ ; CHECK-NEXT: renamable $r12 = t2LDRi12 $sp, 16, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.4, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r5 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r5
+ ; CHECK-NEXT: $r4 = tMOVr killed $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
+ ; CHECK-NEXT: renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
+ ; CHECK-NEXT: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir
index a28abae7c763..78a05c38b46f 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir
@@ -130,53 +130,61 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: vpsel_after_vpt
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -16
- ; CHECK: renamable $r12 = t2LDRi12 $sp, 16, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.4, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r5 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r5
- ; CHECK: $r4 = tMOVr killed $r5, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r12
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
- ; CHECK: renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
- ; CHECK: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
- ; CHECK: renamable $r0, renamable $q4 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q4, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -16
+ ; CHECK-NEXT: renamable $r12 = t2LDRi12 $sp, 16, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.4, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2ADDri renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = t2BICri killed renamable $lr, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r5 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r5
+ ; CHECK-NEXT: $r4 = tMOVr killed $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r3, $r4, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $r3, renamable $q1 = MVE_VLDRHS32_post killed renamable $r3, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17.d, align 2)
+ ; CHECK-NEXT: renamable $r2, renamable $q2 = MVE_VLDRHS32_post killed renamable $r2, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820.c, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
+ ; CHECK-NEXT: renamable $r0, renamable $q4 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q4, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir
index f4d6ce629706..2ac2c822a8c6 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir
@@ -155,33 +155,40 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: invariant_use_store
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
- ; CHECK: tCBZ $r2, %bb.3
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2
- ; CHECK: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q1, $r0, $r1
- ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $q2 = nsw MVE_VMULi32 renamable $q0, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r1 = MVE_VSTRWU32_post renamable $q1, killed renamable $r1, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.store, align 4)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0, renamable $r1 = VMOVRRD renamable $d0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, renamable $r3 = VMOVRRD killed renamable $d1, 14 /* CC::al */, $noreg, implicit killed $q0
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: tCBZ $r2, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q1, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $q2 = nsw MVE_VMULi32 renamable $q0, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r1 = MVE_VSTRWU32_post renamable $q1, killed renamable $r1, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.store, align 4)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $r1 = VMOVRRD renamable $d0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, renamable $r3 = VMOVRRD killed renamable $d1, 14 /* CC::al */, $noreg, implicit killed $q0
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3
bb.0.entry:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -262,44 +269,52 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: invariant_mul_use_reduce
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCBZ $r2, %bb.4
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r2
- ; CHECK: renamable $r1, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1 = t2BICri killed renamable $r1, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
- ; CHECK: dead $lr = t2DLS renamable $r3
- ; CHECK: $r1 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
- ; CHECK: $lr = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = nsw tSUBi8 killed $r1, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r12 = MVE_VMLADAVu32 renamable $q0, killed renamable $q1, 0, $noreg, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $r12
- ; CHECK: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCBZ $r2, %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1 = t2BICri killed renamable $r1, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r3
+ ; CHECK-NEXT: $r1 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: $lr = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = nsw tSUBi8 killed $r1, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r12 = MVE_VMLADAVu32 renamable $q0, killed renamable $q1, 0, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r2, $r7, $lr
@@ -380,45 +395,53 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: invariant_add_use_reduce
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCBZ $r2, %bb.4
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r2
- ; CHECK: renamable $r1, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1 = t2BICri killed renamable $r1, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
- ; CHECK: dead $lr = t2DLS renamable $r3
- ; CHECK: $r1 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: $lr = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = nsw MVE_VADDi32 renamable $q0, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r1, dead $cpsr = nsw tSUBi8 killed $r1, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = MVE_VADDVu32no_acc killed renamable $q1, 0, $noreg, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $r12
- ; CHECK: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCBZ $r2, %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1 = t2BICri killed renamable $r1, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r3
+ ; CHECK-NEXT: $r1 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: $lr = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VADDi32 renamable $q0, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = nsw tSUBi8 killed $r1, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVu32no_acc killed renamable $q1, 0, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain-store.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain-store.mir
index b1749fdaad5a..0c6c3330991d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain-store.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain-store.mir
@@ -127,35 +127,39 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: it_block_store_count_before_start
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: renamable $lr = t2MOVi 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2LSLri renamable $r2, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2CMPri renamable $r12, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: $lr = t2LSLri renamable $r2, 1, 11 /* CC::lt */, killed $cpsr, $noreg, implicit killed renamable $lr, implicit killed $itstate
- ; CHECK: renamable $r2 = t2RSBrs killed renamable $lr, killed renamable $r2, 10, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2ADDri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r2, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2STRi12 killed renamable $lr, killed renamable $r3, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.iter.addr)
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r12
- ; CHECK: $r2 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.do.body:
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: $lr = tMOVr $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = nsw tSUBi8 killed $r2, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.pSrc.addr.02, align 4)
- ; CHECK: renamable $q0 = MVE_VMULf32 killed renamable $q0, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r1 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r1, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.pDst.addr.01, align 4)
- ; CHECK: dead $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2.do.end:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: renamable $lr = t2MOVi 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2LSLri renamable $r2, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2CMPri renamable $r12, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: $lr = t2LSLri renamable $r2, 1, 11 /* CC::lt */, killed $cpsr, $noreg, implicit killed renamable $lr, implicit killed $itstate
+ ; CHECK-NEXT: renamable $r2 = t2RSBrs killed renamable $lr, killed renamable $r2, 10, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2ADDri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r2, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2STRi12 killed renamable $lr, killed renamable $r3, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.iter.addr)
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r12
+ ; CHECK-NEXT: $r2 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.do.body:
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = tMOVr $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = nsw tSUBi8 killed $r2, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.pSrc.addr.02, align 4)
+ ; CHECK-NEXT: renamable $q0 = MVE_VMULf32 killed renamable $q0, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r1 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r1, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.pDst.addr.01, align 4)
+ ; CHECK-NEXT: dead $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.do.end:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
@@ -228,35 +232,39 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: it_block_store_count_after_start
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: renamable $lr = t2MOVi 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2LSLri renamable $r2, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2CMPri renamable $r12, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: $lr = t2LSLri renamable $r2, 1, 11 /* CC::lt */, killed $cpsr, $noreg, implicit killed renamable $lr, implicit killed $itstate
- ; CHECK: renamable $r2 = t2RSBrs killed renamable $lr, killed renamable $r2, 10, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2ADDri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r2, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2STRi12 killed renamable $lr, killed renamable $r3, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.iter.addr)
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r12
- ; CHECK: $r2 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.do.body:
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: $lr = tMOVr $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = nsw tSUBi8 killed $r2, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.pSrc.addr.02, align 4)
- ; CHECK: renamable $q0 = MVE_VMULf32 killed renamable $q0, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r1 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r1, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.pDst.addr.01, align 4)
- ; CHECK: dead $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2.do.end:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: renamable $lr = t2MOVi 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2LSLri renamable $r2, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2CMPri renamable $r12, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: $lr = t2LSLri renamable $r2, 1, 11 /* CC::lt */, killed $cpsr, $noreg, implicit killed renamable $lr, implicit killed $itstate
+ ; CHECK-NEXT: renamable $r2 = t2RSBrs killed renamable $lr, killed renamable $r2, 10, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2ADDri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r2, killed renamable $lr, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2STRi12 killed renamable $lr, killed renamable $r3, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.iter.addr)
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r12
+ ; CHECK-NEXT: $r2 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.do.body:
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = tMOVr $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = nsw tSUBi8 killed $r2, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.pSrc.addr.02, align 4)
+ ; CHECK-NEXT: renamable $q0 = MVE_VMULf32 killed renamable $q0, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r1 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r1, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.pDst.addr.01, align 4)
+ ; CHECK-NEXT: dead $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.do.end:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain.mir
index f27777bc3403..6f021dae228b 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain.mir
@@ -94,38 +94,43 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: it_block_2_stmts
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: renamable $r3, dead $cpsr = tLSLri renamable $r2, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2MOVi 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: $r1 = t2ADDri renamable $r0, 3, 11 /* CC::lt */, $noreg, $noreg, implicit $itstate
- ; CHECK: $r3 = t2LSLri renamable $r2, 1, 11 /* CC::lt */, $cpsr, $noreg, implicit renamable $r12, implicit $itstate
- ; CHECK: $r12 = t2LSLri renamable $r3, 1, 11 /* CC::lt */, killed $cpsr, $noreg, implicit killed renamable $r12, implicit killed $itstate
- ; CHECK: renamable $r2 = t2RSBrs killed renamable $r12, killed renamable $r2, 10, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead renamable $r12 = t2ADDri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.1.do.body (align 4):
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 0, $noreg, $noreg
- ; CHECK: renamable $q1 = nnan ninf nsz MVE_VMULf32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: MVE_VSTRWU32 killed renamable $q1, renamable $r1, 0, 0, killed $noreg, $noreg
- ; CHECK: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = nuw tADDi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2.do.end:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.3 (align 16):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tLSLri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: $r1 = t2ADDri renamable $r0, 3, 11 /* CC::lt */, $noreg, $noreg, implicit $itstate
+ ; CHECK-NEXT: $r3 = t2LSLri renamable $r2, 1, 11 /* CC::lt */, $cpsr, $noreg, implicit renamable $r12, implicit $itstate
+ ; CHECK-NEXT: $r12 = t2LSLri renamable $r3, 1, 11 /* CC::lt */, killed $cpsr, $noreg, implicit killed renamable $r12, implicit killed $itstate
+ ; CHECK-NEXT: renamable $r2 = t2RSBrs killed renamable $r12, killed renamable $r2, 10, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead renamable $r12 = t2ADDri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.do.body (align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q1 = nnan ninf nsz MVE_VMULf32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q1, renamable $r1, 0, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = nuw tADDi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.do.end:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (align 16):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-itercount.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-itercount.mir
index fc216474db45..1e1e2a4dda3e 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-itercount.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-itercount.mir
@@ -95,29 +95,34 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: it_block_2_stmts
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: renamable $r3, dead $cpsr = tLSLri killed renamable $r2, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.1.do.body (align 4):
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 0, $noreg, $noreg
- ; CHECK: renamable $q1 = nnan ninf nsz MVE_VMULf32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: MVE_VSTRWU32 killed renamable $q1, renamable $r1, 0, 0, killed $noreg, $noreg
- ; CHECK: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = nuw tADDi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2.do.end:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.3 (align 16):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tLSLri killed renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.do.body (align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q1 = nnan ninf nsz MVE_VMULf32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q1, renamable $r1, 0, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = nuw tADDi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.do.end:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (align 16):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir
index 938ae829db4e..31e88ea49a1a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir
@@ -2,7 +2,6 @@
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
--- |
- ; Function Attrs: nounwind
define hidden arm_aapcs_vfpcc void @cond_trip_count(ptr %0, i32 %1, ptr nocapture %2) local_unnamed_addr #1 {
ret void
}
@@ -39,79 +38,93 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: cond_trip_count
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCMPi8 renamable $r1, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r12 = t2MOVi 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tBcc %bb.2, 2 /* CC::hs */, killed $cpsr
- ; CHECK: bb.1:
- ; CHECK: liveins: $r2
- ; CHECK: renamable $s0 = VLDRS %const.0, 0, 14 /* CC::al */, $noreg
- ; CHECK: VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
- ; CHECK: bb.2:
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r12
- ; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: tCMPi8 renamable $r1, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: $r12 = tMOVr renamable $r1, 11 /* CC::lt */, killed $cpsr, implicit killed renamable $r12, implicit killed $itstate
- ; CHECK: renamable $r3 = t2SUBrr renamable $r1, killed renamable $r12, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 3, 14 /* CC::al */, $noreg
- ; CHECK: $r12 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r12
- ; CHECK: bb.3:
- ; CHECK: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r4
- ; CHECK: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r3, 0, 0, $noreg, $noreg
- ; CHECK: renamable $q0 = nnan ninf nsz MVE_VADDf32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, killed renamable $q0
- ; CHECK: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.3
- ; CHECK: bb.4:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4
- ; CHECK: renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s4 = nnan ninf nsz VADDS renamable $s2, killed renamable $s4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s0 = nnan ninf nsz VADDS killed renamable $s3, killed renamable $s4, 14 /* CC::al */, $noreg, implicit killed $q0
- ; CHECK: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2DLS killed $r4
- ; CHECK: renamable $s4 = nnan ninf nsz VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: bb.5:
- ; CHECK: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $s4
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
- ; CHECK: $r4 = VMOVRS $s4, 14 /* CC::al */, $noreg
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $q2 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 1, renamable $vpr, $noreg
- ; CHECK: renamable $q2 = nnan ninf nsz MVE_VSUB_qr_f32 killed renamable $q2, killed renamable $r4, 1, renamable $vpr, $noreg, undef renamable $q2
- ; CHECK: renamable $q0 = nnan ninf nsz MVE_VFMAf32 killed renamable $q0, killed renamable $q2, killed renamable $q2, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r3, dead $cpsr = nsw tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.5
- ; CHECK: bb.6:
- ; CHECK: liveins: $q0, $r1, $r2
- ; CHECK: renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s4 = nnan ninf nsz VADDS renamable $s2, killed renamable $s4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s0 = nnan ninf nsz VADDS killed renamable $s3, killed renamable $s4, 14 /* CC::al */, $noreg, implicit killed $q0
- ; CHECK: $s2 = VMOVSR killed $r0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s0 = nnan ninf nsz VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg
- ; CHECK: VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
- ; CHECK: bb.7 (align 4):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r1, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tBcc %bb.2, 2 /* CC::hs */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: liveins: $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $s0 = VLDRS %const.0, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCMPi8 renamable $r1, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: $r12 = tMOVr renamable $r1, 11 /* CC::lt */, killed $cpsr, implicit killed renamable $r12, implicit killed $itstate
+ ; CHECK-NEXT: renamable $r3 = t2SUBrr renamable $r1, killed renamable $r12, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r12 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r3, 0, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = nnan ninf nsz MVE_VADDf32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, killed renamable $q0
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s4 = nnan ninf nsz VADDS renamable $s2, killed renamable $s4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz VADDS killed renamable $s3, killed renamable $s4, 14 /* CC::al */, $noreg, implicit killed $q0
+ ; CHECK-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2DLS killed $r4
+ ; CHECK-NEXT: renamable $s4 = nnan ninf nsz VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5:
+ ; CHECK-NEXT: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $s4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: $r4 = VMOVRS $s4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $q2 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 1, renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $q2 = nnan ninf nsz MVE_VSUB_qr_f32 killed renamable $q2, killed renamable $r4, 1, renamable $vpr, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q0 = nnan ninf nsz MVE_VFMAf32 killed renamable $q0, killed renamable $q2, killed renamable $q2, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6:
+ ; CHECK-NEXT: liveins: $q0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s4 = nnan ninf nsz VADDS renamable $s2, killed renamable $s4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz VADDS killed renamable $s3, killed renamable $s4, 14 /* CC::al */, $noreg, implicit killed $q0
+ ; CHECK-NEXT: $s2 = VMOVSR killed $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7 (align 4):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 4
bb.0:
successors: %bb.1(0x40000000), %bb.2(0x40000000)
liveins: $r0, $r1, $r2, $r4, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-random.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-random.mir
index 1fac3e7e90ae..ea8a8a5b2ab0 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-random.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-random.mir
@@ -96,37 +96,42 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: it_block_2_stmts
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: renamable $r3, dead $cpsr = tLSLri renamable $r2, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2MOVi 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPi8 renamable $r3, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: $r12 = t2LSLri renamable $r2, 1, 11 /* CC::lt */, $cpsr, $noreg, implicit killed renamable $r12, implicit $itstate
- ; CHECK: $r0 = t2ADDri killed renamable $r0, 42, 11 /* CC::lt */, killed $cpsr, $noreg, implicit killed renamable $r0, implicit killed $itstate
- ; CHECK: renamable $r2 = t2RSBrs killed renamable $r12, killed renamable $r2, 10, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead renamable $r12 = t2ADDri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.1.do.body (align 4):
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 0, $noreg, $noreg
- ; CHECK: renamable $q1 = nnan ninf nsz MVE_VMULf32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: MVE_VSTRWU32 killed renamable $q1, renamable $r1, 0, 0, killed $noreg, $noreg
- ; CHECK: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = nuw tADDi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2.do.end:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.3 (align 16):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tLSLri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: $r12 = t2LSLri renamable $r2, 1, 11 /* CC::lt */, $cpsr, $noreg, implicit killed renamable $r12, implicit $itstate
+ ; CHECK-NEXT: $r0 = t2ADDri killed renamable $r0, 42, 11 /* CC::lt */, killed $cpsr, $noreg, implicit killed renamable $r0, implicit killed $itstate
+ ; CHECK-NEXT: renamable $r2 = t2RSBrs killed renamable $r12, killed renamable $r2, 10, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead renamable $r12 = t2ADDri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.do.body (align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q1 = nnan ninf nsz MVE_VMULf32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q1, renamable $r1, 0, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = nuw tADDi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.do.end:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (align 16):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-two-vcmp-reordered.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-two-vcmp-reordered.mir
index f7154763e778..29d9b3e1dd61 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-two-vcmp-reordered.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-two-vcmp-reordered.mir
@@ -97,52 +97,59 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $d8, $d9, $r0, $r1, $r2, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: $sp = frame-setup VSTMDDB_UPD $sp, 14 /* CC::al */, $noreg, killed $d8, killed $d9
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $d9, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $d8, -24
- ; CHECK: tCBZ $r2, %bb.3
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q2 = MVE_VMOVimmi32 1, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q3 = MVE_VMOVimmi32 4, 0, $noreg, $noreg, undef renamable $q3
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r3
- ; CHECK: $r4 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
- ; CHECK: renamable $r3, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $q1, $q2, $q3, $r0, $r1, $r2, $r4
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $vpr = MVE_VCMPu32 renamable $q1, renamable $q0, 8, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 1, implicit $vpr
- ; CHECK: renamable $vpr = MVE_VCMPu32 renamable $q0, renamable $q2, 2, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r1, renamable $q4 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv35, align 4)
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q4, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv12, align 4)
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q0, renamable $q3, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: $sp = frame-destroy VLDMDIA_UPD $sp, 14 /* CC::al */, $noreg, def $d8, def $d9
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
- ; CHECK: bb.4 (align 16):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $d8, $d9, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: $sp = frame-setup VSTMDDB_UPD $sp, 14 /* CC::al */, $noreg, killed $d8, killed $d9
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $d9, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $d8, -24
+ ; CHECK-NEXT: tCBZ $r2, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q2 = MVE_VMOVimmi32 1, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q3 = MVE_VMOVimmi32 4, 0, $noreg, $noreg, undef renamable $q3
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r3
+ ; CHECK-NEXT: $r4 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $q1, $q2, $q3, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCMPu32 renamable $q1, renamable $q0, 8, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 1, implicit $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCMPu32 renamable $q0, renamable $q2, 2, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r1, renamable $q4 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv35, align 4)
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q4, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv12, align 4)
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q0, renamable $q3, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: $sp = frame-destroy VLDMDIA_UPD $sp, 14 /* CC::al */, $noreg, def $d8, def $d9
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4 (align 16):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0.entry:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r4, $lr, $d8, $d9
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-two-vcmp.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-two-vcmp.mir
index 551cf31f8a9d..ff186bb01150 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-two-vcmp.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-two-vcmp.mir
@@ -94,52 +94,59 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $d8, $d9, $r0, $r1, $r2, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: $sp = frame-setup VSTMDDB_UPD $sp, 14 /* CC::al */, $noreg, killed $d8, killed $d9
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $d9, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $d8, -24
- ; CHECK: tCBZ $r2, %bb.3
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q2 = MVE_VMOVimmi32 1, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q3 = MVE_VMOVimmi32 4, 0, $noreg, $noreg, undef renamable $q3
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r3
- ; CHECK: $r4 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
- ; CHECK: renamable $r3, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $q1, $q2, $q3, $r0, $r1, $r2, $r4
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: MVE_VPST 1, implicit $vpr
- ; CHECK: renamable $vpr = MVE_VCMPu32 renamable $q1, renamable $q0, 8, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $vpr = MVE_VCMPu32 renamable $q0, renamable $q2, 2, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r1, renamable $q4 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv35, align 4)
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q4, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv12, align 4)
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q0, renamable $q3, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: $sp = frame-destroy VLDMDIA_UPD $sp, 14 /* CC::al */, $noreg, def $d8, def $d9
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
- ; CHECK: bb.4 (align 16):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $d8, $d9, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: $sp = frame-setup VSTMDDB_UPD $sp, 14 /* CC::al */, $noreg, killed $d8, killed $d9
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $d9, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $d8, -24
+ ; CHECK-NEXT: tCBZ $r2, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q2 = MVE_VMOVimmi32 1, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q3 = MVE_VMOVimmi32 4, 0, $noreg, $noreg, undef renamable $q3
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r3
+ ; CHECK-NEXT: $r4 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $q1, $q2, $q3, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: MVE_VPST 1, implicit $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCMPu32 renamable $q1, renamable $q0, 8, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCMPu32 renamable $q0, renamable $q2, 2, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r1, renamable $q4 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv35, align 4)
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q4, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv12, align 4)
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q0, renamable $q3, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: $sp = frame-destroy VLDMDIA_UPD $sp, 14 /* CC::al */, $noreg, def $d8, def $d9
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4 (align 16):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0.entry:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r4, $lr, $d8, $d9
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-vcmp.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-vcmp.mir
index 5ede970f7984..a8a84073483d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-vcmp.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/iv-vcmp.mir
@@ -2,7 +2,6 @@
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -verify-machineinstrs -o - | FileCheck %s
--- |
- ; Function Attrs: nofree norecurse nounwind
define dso_local arm_aapcs_vfpcc void @test(ptr noalias nocapture %a, ptr nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
entry:
%cmp9 = icmp eq i32 %N, 0
@@ -87,38 +86,45 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $q2 = MVE_VMOVimmi32 4, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $r3 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
- ; CHECK: renamable $r3, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q1, $q2, $r0, $r1
- ; CHECK: MVE_VPTv4u32 4, renamable $q1, renamable $q0, 8, implicit-def $vpr
- ; CHECK: renamable $r1, renamable $q3 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv35, align 4)
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q3, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv12, align 4)
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q0, renamable $q2, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.4 (align 16):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q2 = MVE_VMOVimmi32 4, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $r3 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q1, $q2, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: MVE_VPTv4u32 4, renamable $q1, renamable $q0, 8, implicit-def $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q3 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv35, align 4)
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q3, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv12, align 4)
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q0, renamable $q2, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4 (align 16):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/livereg-no-loop-def.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/livereg-no-loop-def.mir
index 1e0c546b81f3..cee07ca5c19e 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/livereg-no-loop-def.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/livereg-no-loop-def.mir
@@ -86,34 +86,41 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: exit_liveout
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: renamable $r12 = t2ADDri $sp, 8, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r12, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
- ; CHECK: tCBZ $r3, %bb.3
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r3
- ; CHECK: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q1, $r0, $r1, $r2
- ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
- ; CHECK: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r2 = MVE_VSTRWU32_post renamable $q1, killed renamable $r2, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.store, align 4)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0, renamable $r1 = VMOVRRD renamable $d0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, renamable $r3 = VMOVRRD killed renamable $d1, 14 /* CC::al */, $noreg, implicit killed $q0
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: renamable $r12 = t2ADDri $sp, 8, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r12, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: tCBZ $r3, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q1, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
+ ; CHECK-NEXT: renamable $q2 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r2 = MVE_VSTRWU32_post renamable $q1, killed renamable $r2, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.store, align 4)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $r1 = VMOVRRD renamable $d0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, renamable $r3 = VMOVRRD killed renamable $d1, 14 /* CC::al */, $noreg, implicit killed $q0
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3
bb.0.entry:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-copy-chain.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-copy-chain.mir
index 4278cfc01057..9f027f916475 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-copy-chain.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-copy-chain.mir
@@ -222,126 +222,144 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test1
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.8(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 36
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r11, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r10, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -28
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -32
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -36
- ; CHECK: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 40
- ; CHECK: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.8, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.bb4:
- ; CHECK: successors: %bb.2(0x40000000), %bb.3(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r4, dead $cpsr = tSUBi3 renamable $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r7 = t2ANDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPi8 killed renamable $r4, 3, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tSTRspi killed renamable $r7, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: tBcc %bb.3, 2 /* CC::hs */, killed $cpsr
- ; CHECK: bb.2:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tB %bb.5, 14 /* CC::al */, $noreg
- ; CHECK: bb.3.bb12:
- ; CHECK: successors: %bb.4(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = tMOVr renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: $r12 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.4.bb28:
- ; CHECK: successors: %bb.4(0x7c000000), %bb.5(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r8, $r12
- ; CHECK: renamable $r5 = tLDRr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep617)
- ; CHECK: renamable $r7, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = tLDRr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep418)
- ; CHECK: $lr = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r8 = nuw t2ADDri killed renamable $r8, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r5, dead $cpsr = tEOR killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = tLDRr renamable $r0, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep219)
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r12 = tMOVr $lr, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r5 = nsw tADDhirr killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: tSTRr killed renamable $r5, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep219)
- ; CHECK: renamable $r5, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4 = tLDRi renamable $r7, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep11)
- ; CHECK: renamable $r6 = tLDRi renamable $r5, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep14)
- ; CHECK: renamable $r9 = t2EORrr killed renamable $r4, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r6, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: $r11 = t2ADDri $r6, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
- ; CHECK: t2LDMIA killed $r11, 14 /* CC::al */, $noreg, def $r4, def $r10, def $r11 :: (load (s32) from %ir.scevgep9), (load (s32) from %ir.scevgep8), (load (s32) from %ir.scevgep1)
- ; CHECK: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r4, renamable $r6, 1, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep9)
- ; CHECK: renamable $r9 = t2LDRi12 renamable $r5, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep12)
- ; CHECK: renamable $r4 = tLDRi renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep10)
- ; CHECK: renamable $r4 = t2EORrr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r10, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r4, renamable $r6, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep8)
- ; CHECK: renamable $r4 = tLDRi killed renamable $r5, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep5)
- ; CHECK: renamable $r5 = tLDRi killed renamable $r7, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
- ; CHECK: renamable $r4, dead $cpsr = tEOR killed renamable $r4, killed renamable $r5, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r4, killed renamable $r6, 3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep1)
- ; CHECK: t2CMPri killed renamable $lr, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.4, 1 /* CC::ne */, killed $cpsr
- ; CHECK: tB %bb.5, 14 /* CC::al */, $noreg
- ; CHECK: bb.5.bb13:
- ; CHECK: successors: %bb.8(0x30000000), %bb.6(0x50000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r8
- ; CHECK: renamable $r5 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: tCBZ $r5, %bb.8
- ; CHECK: bb.6.bb16:
- ; CHECK: successors: %bb.8(0x40000000), %bb.7(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r5, $r8
- ; CHECK: renamable $lr = t2LDRs renamable $r1, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp17)
- ; CHECK: tCMPi8 renamable $r5, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r3 = t2LDRs renamable $r2, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp19)
- ; CHECK: renamable $lr = t2EORrr killed renamable $lr, killed renamable $r3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = t2LDRs renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp22)
- ; CHECK: renamable $r3 = nsw tADDhirr killed renamable $r3, killed renamable $lr, 14 /* CC::al */, $noreg
- ; CHECK: t2STRs killed renamable $r3, renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp22)
- ; CHECK: tBcc %bb.8, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.7.bb57:
- ; CHECK: successors: %bb.8(0x40000000), %bb.9(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r5, $r8
- ; CHECK: renamable $r3 = nuw t2ADDri renamable $r8, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPi8 killed renamable $r5, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r7 = t2LDRs renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp58)
- ; CHECK: renamable $r6 = t2LDRs renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp60)
- ; CHECK: renamable $r7 = t2EORrr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r6 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp63)
- ; CHECK: renamable $r7 = nsw tADDhirr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: t2STRs killed renamable $r7, renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp63)
- ; CHECK: tBcc %bb.9, 1 /* CC::ne */, killed $cpsr
- ; CHECK: bb.8.bb27:
- ; CHECK: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: $sp = t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
- ; CHECK: bb.9.bb68:
- ; CHECK: liveins: $r0, $r1, $r2, $r8
- ; CHECK: renamable $r3 = nuw t2ADDri killed renamable $r8, 2, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r1 = t2LDRs killed renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp69)
- ; CHECK: renamable $r2 = t2LDRs killed renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp71)
- ; CHECK: renamable $r1, dead $cpsr = tEOR killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp74)
- ; CHECK: renamable $r1 = nsw tADDhirr killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: t2STRs killed renamable $r1, killed renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp74)
- ; CHECK: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: $sp = t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
+ ; CHECK-NEXT: successors: %bb.8(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 36
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r11, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r10, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -28
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -32
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -36
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 40
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.8, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb4:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tSUBi3 renamable $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r7 = t2ANDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPi8 killed renamable $r4, 3, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tSTRspi killed renamable $r7, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: tBcc %bb.3, 2 /* CC::hs */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tB %bb.5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.bb12:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = tMOVr renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r12 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.bb28:
+ ; CHECK-NEXT: successors: %bb.4(0x7c000000), %bb.5(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r8, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r5 = tLDRr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep617)
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = tLDRr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep418)
+ ; CHECK-NEXT: $lr = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r8 = nuw t2ADDri killed renamable $r8, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tEOR killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = tLDRr renamable $r0, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep219)
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r12 = tMOVr $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r5 = nsw tADDhirr killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRr killed renamable $r5, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep219)
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4 = tLDRi renamable $r7, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep11)
+ ; CHECK-NEXT: renamable $r6 = tLDRi renamable $r5, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep14)
+ ; CHECK-NEXT: renamable $r9 = t2EORrr killed renamable $r4, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r6, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r11 = t2ADDri $r6, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2LDMIA killed $r11, 14 /* CC::al */, $noreg, def $r4, def $r10, def $r11 :: (load (s32) from %ir.scevgep9), (load (s32) from %ir.scevgep8), (load (s32) from %ir.scevgep1)
+ ; CHECK-NEXT: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r4, renamable $r6, 1, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep9)
+ ; CHECK-NEXT: renamable $r9 = t2LDRi12 renamable $r5, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep12)
+ ; CHECK-NEXT: renamable $r4 = tLDRi renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep10)
+ ; CHECK-NEXT: renamable $r4 = t2EORrr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r10, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r4, renamable $r6, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep8)
+ ; CHECK-NEXT: renamable $r4 = tLDRi killed renamable $r5, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep5)
+ ; CHECK-NEXT: renamable $r5 = tLDRi killed renamable $r7, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tEOR killed renamable $r4, killed renamable $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r4, killed renamable $r6, 3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep1)
+ ; CHECK-NEXT: t2CMPri killed renamable $lr, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.4, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: tB %bb.5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.bb13:
+ ; CHECK-NEXT: successors: %bb.8(0x30000000), %bb.6(0x50000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r5 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: tCBZ $r5, %bb.8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.bb16:
+ ; CHECK-NEXT: successors: %bb.8(0x40000000), %bb.7(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r5, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2LDRs renamable $r1, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp17)
+ ; CHECK-NEXT: tCMPi8 renamable $r5, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r3 = t2LDRs renamable $r2, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp19)
+ ; CHECK-NEXT: renamable $lr = t2EORrr killed renamable $lr, killed renamable $r3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2LDRs renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp22)
+ ; CHECK-NEXT: renamable $r3 = nsw tADDhirr killed renamable $r3, killed renamable $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r3, renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp22)
+ ; CHECK-NEXT: tBcc %bb.8, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7.bb57:
+ ; CHECK-NEXT: successors: %bb.8(0x40000000), %bb.9(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r5, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = nuw t2ADDri renamable $r8, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPi8 killed renamable $r5, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r7 = t2LDRs renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp58)
+ ; CHECK-NEXT: renamable $r6 = t2LDRs renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp60)
+ ; CHECK-NEXT: renamable $r7 = t2EORrr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r6 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp63)
+ ; CHECK-NEXT: renamable $r7 = nsw tADDhirr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r7, renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp63)
+ ; CHECK-NEXT: tBcc %bb.9, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8.bb27:
+ ; CHECK-NEXT: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9.bb68:
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = nuw t2ADDri killed renamable $r8, 2, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1 = t2LDRs killed renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp69)
+ ; CHECK-NEXT: renamable $r2 = t2LDRs killed renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp71)
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tEOR killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp74)
+ ; CHECK-NEXT: renamable $r1 = nsw tADDhirr killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r1, killed renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp74)
+ ; CHECK-NEXT: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
bb.0.bb:
successors: %bb.8(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-copy-prev-iteration.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-copy-prev-iteration.mir
index 14c383f95f89..e0fd23130baf 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-copy-prev-iteration.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-copy-prev-iteration.mir
@@ -223,136 +223,161 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test1
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 36
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r11, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r10, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -28
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -32
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -36
- ; CHECK: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 40
- ; CHECK: tCBZ $r3, %bb.3
- ; CHECK: bb.1.bb4:
- ; CHECK: successors: %bb.2(0x40000000), %bb.4(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r4, dead $cpsr = tSUBi3 renamable $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r7 = t2ANDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPi8 killed renamable $r4, 3, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tSTRspi killed renamable $r7, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: tBcc %bb.4, 2 /* CC::hs */, killed $cpsr
- ; CHECK: bb.2:
- ; CHECK: successors: %bb.6(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tB %bb.6, 14 /* CC::al */, $noreg
- ; CHECK: bb.3:
- ; CHECK: successors: %bb.12(0x80000000)
- ; CHECK: renamable $lr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tB %bb.12, 14 /* CC::al */, $noreg
- ; CHECK: bb.4.bb12:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r3
- ; CHECK: $lr = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.5.bb28:
- ; CHECK: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r8
- ; CHECK: renamable $r5 = tLDRr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep617)
- ; CHECK: renamable $r7, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = tLDRr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep418)
- ; CHECK: dead $r12 = tMOVr $lr, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r8 = nuw t2ADDri killed renamable $r8, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r5, dead $cpsr = tEOR killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = tLDRr renamable $r0, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep219)
- ; CHECK: renamable $r5 = nsw tADDhirr killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: tSTRr killed renamable $r5, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep219)
- ; CHECK: renamable $r5, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4 = tLDRi renamable $r7, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep11)
- ; CHECK: renamable $r6 = tLDRi renamable $r5, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep14)
- ; CHECK: renamable $r9 = t2EORrr killed renamable $r4, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r6, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: $r11 = t2ADDri $r6, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
- ; CHECK: t2LDMIA killed $r11, 14 /* CC::al */, $noreg, def $r4, def $r10, def $r11 :: (load (s32) from %ir.scevgep9), (load (s32) from %ir.scevgep8), (load (s32) from %ir.scevgep1)
- ; CHECK: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r4, renamable $r6, 1, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep9)
- ; CHECK: renamable $r9 = t2LDRi12 renamable $r5, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep12)
- ; CHECK: renamable $r4 = tLDRi renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep10)
- ; CHECK: renamable $r4 = t2EORrr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r10, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r4, renamable $r6, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep8)
- ; CHECK: renamable $r4 = tLDRi killed renamable $r5, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep5)
- ; CHECK: renamable $r5 = tLDRi killed renamable $r7, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
- ; CHECK: renamable $r4, dead $cpsr = tEOR killed renamable $r4, killed renamable $r5, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r4, killed renamable $r6, 3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep1)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.5
- ; CHECK: bb.6.bb13:
- ; CHECK: successors: %bb.12(0x30000000), %bb.7(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r8
- ; CHECK: renamable $r5 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: tCBZ $r5, %bb.12
- ; CHECK: bb.7.bb16:
- ; CHECK: successors: %bb.8(0x40000000), %bb.9(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r5, $r8
- ; CHECK: renamable $lr = t2LDRs renamable $r1, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp17)
- ; CHECK: tCMPi8 renamable $r5, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r3 = t2LDRs renamable $r2, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp19)
- ; CHECK: renamable $lr = t2EORrr killed renamable $lr, killed renamable $r3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = t2LDRs renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp22)
- ; CHECK: renamable $r3 = nsw tADDhirr killed renamable $r3, killed renamable $lr, 14 /* CC::al */, $noreg
- ; CHECK: t2STRs killed renamable $r3, renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp22)
- ; CHECK: tBcc %bb.9, 1 /* CC::ne */, killed $cpsr
- ; CHECK: bb.8:
- ; CHECK: successors: %bb.12(0x80000000)
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tB %bb.12, 14 /* CC::al */, $noreg
- ; CHECK: bb.9.bb57:
- ; CHECK: successors: %bb.10(0x40000000), %bb.11(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r5, $r8
- ; CHECK: renamable $r3 = nuw t2ADDri renamable $r8, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPi8 killed renamable $r5, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r7 = t2LDRs renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp58)
- ; CHECK: renamable $r6 = t2LDRs renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp60)
- ; CHECK: renamable $r7 = t2EORrr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r6 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp63)
- ; CHECK: renamable $r7 = nsw tADDhirr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: t2STRs killed renamable $r7, renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp63)
- ; CHECK: tBcc %bb.11, 1 /* CC::ne */, killed $cpsr
- ; CHECK: bb.10:
- ; CHECK: successors: %bb.12(0x80000000)
- ; CHECK: renamable $lr = t2MOVi 2, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tB %bb.12, 14 /* CC::al */, $noreg
- ; CHECK: bb.11.bb68:
- ; CHECK: successors: %bb.12(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r8
- ; CHECK: renamable $r3 = nuw t2ADDri killed renamable $r8, 2, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r1 = t2LDRs killed renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp69)
- ; CHECK: renamable $r2 = t2LDRs killed renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp71)
- ; CHECK: renamable $r1, dead $cpsr = tEOR killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp74)
- ; CHECK: renamable $r1 = nsw tADDhirr killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: t2STRs killed renamable $r1, killed renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp74)
- ; CHECK: bb.12.bb27:
- ; CHECK: liveins: $lr
- ; CHECK: $r0 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: $sp = t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 36
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r11, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r10, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -28
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -32
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -36
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 40
+ ; CHECK-NEXT: tCBZ $r3, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb4:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tSUBi3 renamable $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r7 = t2ANDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPi8 killed renamable $r4, 3, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tSTRspi killed renamable $r7, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: tBcc %bb.4, 2 /* CC::hs */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.6(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tB %bb.6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.12(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tB %bb.12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.bb12:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r3
+ ; CHECK-NEXT: $lr = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.bb28:
+ ; CHECK-NEXT: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r5 = tLDRr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep617)
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = tLDRr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep418)
+ ; CHECK-NEXT: dead $r12 = tMOVr $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r8 = nuw t2ADDri killed renamable $r8, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tEOR killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = tLDRr renamable $r0, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep219)
+ ; CHECK-NEXT: renamable $r5 = nsw tADDhirr killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRr killed renamable $r5, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep219)
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4 = tLDRi renamable $r7, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep11)
+ ; CHECK-NEXT: renamable $r6 = tLDRi renamable $r5, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep14)
+ ; CHECK-NEXT: renamable $r9 = t2EORrr killed renamable $r4, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r6, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r11 = t2ADDri $r6, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2LDMIA killed $r11, 14 /* CC::al */, $noreg, def $r4, def $r10, def $r11 :: (load (s32) from %ir.scevgep9), (load (s32) from %ir.scevgep8), (load (s32) from %ir.scevgep1)
+ ; CHECK-NEXT: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r4, renamable $r6, 1, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep9)
+ ; CHECK-NEXT: renamable $r9 = t2LDRi12 renamable $r5, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep12)
+ ; CHECK-NEXT: renamable $r4 = tLDRi renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep10)
+ ; CHECK-NEXT: renamable $r4 = t2EORrr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r10, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r4, renamable $r6, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep8)
+ ; CHECK-NEXT: renamable $r4 = tLDRi killed renamable $r5, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep5)
+ ; CHECK-NEXT: renamable $r5 = tLDRi killed renamable $r7, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tEOR killed renamable $r4, killed renamable $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r4, killed renamable $r6, 3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep1)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.bb13:
+ ; CHECK-NEXT: successors: %bb.12(0x30000000), %bb.7(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r5 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: tCBZ $r5, %bb.12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7.bb16:
+ ; CHECK-NEXT: successors: %bb.8(0x40000000), %bb.9(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r5, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2LDRs renamable $r1, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp17)
+ ; CHECK-NEXT: tCMPi8 renamable $r5, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r3 = t2LDRs renamable $r2, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp19)
+ ; CHECK-NEXT: renamable $lr = t2EORrr killed renamable $lr, killed renamable $r3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2LDRs renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp22)
+ ; CHECK-NEXT: renamable $r3 = nsw tADDhirr killed renamable $r3, killed renamable $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r3, renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp22)
+ ; CHECK-NEXT: tBcc %bb.9, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8:
+ ; CHECK-NEXT: successors: %bb.12(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tB %bb.12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9.bb57:
+ ; CHECK-NEXT: successors: %bb.10(0x40000000), %bb.11(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r5, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = nuw t2ADDri renamable $r8, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPi8 killed renamable $r5, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r7 = t2LDRs renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp58)
+ ; CHECK-NEXT: renamable $r6 = t2LDRs renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp60)
+ ; CHECK-NEXT: renamable $r7 = t2EORrr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r6 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp63)
+ ; CHECK-NEXT: renamable $r7 = nsw tADDhirr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r7, renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp63)
+ ; CHECK-NEXT: tBcc %bb.11, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.10:
+ ; CHECK-NEXT: successors: %bb.12(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2MOVi 2, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tB %bb.12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.11.bb68:
+ ; CHECK-NEXT: successors: %bb.12(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = nuw t2ADDri killed renamable $r8, 2, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1 = t2LDRs killed renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp69)
+ ; CHECK-NEXT: renamable $r2 = t2LDRs killed renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp71)
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tEOR killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp74)
+ ; CHECK-NEXT: renamable $r1 = nsw tADDhirr killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r1, killed renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp74)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.12.bb27:
+ ; CHECK-NEXT: liveins: $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc, implicit killed $r0
bb.0.bb:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-liveout.mir
index 98f6dbc486d4..6b29018d28d6 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-liveout.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/loop-dec-liveout.mir
@@ -223,135 +223,160 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test1
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 36
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r11, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r10, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -28
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -32
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -36
- ; CHECK: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 40
- ; CHECK: tCBZ $r3, %bb.3
- ; CHECK: bb.1.bb4:
- ; CHECK: successors: %bb.2(0x40000000), %bb.4(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r4, dead $cpsr = tSUBi3 renamable $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r7 = t2ANDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPi8 killed renamable $r4, 3, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tSTRspi killed renamable $r7, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: tBcc %bb.4, 2 /* CC::hs */, killed $cpsr
- ; CHECK: bb.2:
- ; CHECK: successors: %bb.6(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tB %bb.6, 14 /* CC::al */, $noreg
- ; CHECK: bb.3:
- ; CHECK: successors: %bb.12(0x80000000)
- ; CHECK: renamable $lr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tB %bb.12, 14 /* CC::al */, $noreg
- ; CHECK: bb.4.bb12:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r3
- ; CHECK: $lr = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.5.bb28:
- ; CHECK: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r8
- ; CHECK: renamable $r5 = tLDRr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep617)
- ; CHECK: renamable $r7, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = tLDRr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep418)
- ; CHECK: renamable $r8 = nuw t2ADDri killed renamable $r8, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r5, dead $cpsr = tEOR killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = tLDRr renamable $r0, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep219)
- ; CHECK: renamable $r5 = nsw tADDhirr killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: tSTRr killed renamable $r5, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep219)
- ; CHECK: renamable $r5, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4 = tLDRi renamable $r7, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep11)
- ; CHECK: renamable $r6 = tLDRi renamable $r5, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep14)
- ; CHECK: renamable $r9 = t2EORrr killed renamable $r4, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r6, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: $r11 = t2ADDri $r6, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
- ; CHECK: t2LDMIA killed $r11, 14 /* CC::al */, $noreg, def $r4, def $r10, def $r11 :: (load (s32) from %ir.scevgep9), (load (s32) from %ir.scevgep8), (load (s32) from %ir.scevgep1)
- ; CHECK: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r4, renamable $r6, 1, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep9)
- ; CHECK: renamable $r9 = t2LDRi12 renamable $r5, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep12)
- ; CHECK: renamable $r4 = tLDRi renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep10)
- ; CHECK: renamable $r4 = t2EORrr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r10, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r4, renamable $r6, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep8)
- ; CHECK: renamable $r4 = tLDRi killed renamable $r5, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep5)
- ; CHECK: renamable $r5 = tLDRi killed renamable $r7, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
- ; CHECK: renamable $r4, dead $cpsr = tEOR killed renamable $r4, killed renamable $r5, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r11, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r4, killed renamable $r6, 3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep1)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.5
- ; CHECK: bb.6.bb13:
- ; CHECK: successors: %bb.12(0x30000000), %bb.7(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r8
- ; CHECK: renamable $r5 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: tCBZ $r5, %bb.12
- ; CHECK: bb.7.bb16:
- ; CHECK: successors: %bb.8(0x40000000), %bb.9(0x40000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r5, $r8
- ; CHECK: renamable $lr = t2LDRs renamable $r1, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp17)
- ; CHECK: tCMPi8 renamable $r5, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r3 = t2LDRs renamable $r2, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp19)
- ; CHECK: renamable $lr = t2EORrr killed renamable $lr, killed renamable $r3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = t2LDRs renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp22)
- ; CHECK: renamable $r3 = nsw tADDhirr killed renamable $r3, killed renamable $lr, 14 /* CC::al */, $noreg
- ; CHECK: t2STRs killed renamable $r3, renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp22)
- ; CHECK: tBcc %bb.9, 1 /* CC::ne */, killed $cpsr
- ; CHECK: bb.8:
- ; CHECK: successors: %bb.12(0x80000000)
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tB %bb.12, 14 /* CC::al */, $noreg
- ; CHECK: bb.9.bb57:
- ; CHECK: successors: %bb.10(0x40000000), %bb.11(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r5, $r8
- ; CHECK: renamable $r3 = nuw t2ADDri renamable $r8, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPi8 killed renamable $r5, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r7 = t2LDRs renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp58)
- ; CHECK: renamable $r6 = t2LDRs renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp60)
- ; CHECK: renamable $r7 = t2EORrr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r6 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp63)
- ; CHECK: renamable $r7 = nsw tADDhirr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: t2STRs killed renamable $r7, renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp63)
- ; CHECK: tBcc %bb.11, 1 /* CC::ne */, killed $cpsr
- ; CHECK: bb.10:
- ; CHECK: successors: %bb.12(0x80000000)
- ; CHECK: renamable $lr = t2MOVi 2, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tB %bb.12, 14 /* CC::al */, $noreg
- ; CHECK: bb.11.bb68:
- ; CHECK: successors: %bb.12(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r8
- ; CHECK: renamable $r3 = nuw t2ADDri killed renamable $r8, 2, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r1 = t2LDRs killed renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp69)
- ; CHECK: renamable $r2 = t2LDRs killed renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp71)
- ; CHECK: renamable $r1, dead $cpsr = tEOR killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp74)
- ; CHECK: renamable $r1 = nsw tADDhirr killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: t2STRs killed renamable $r1, killed renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp74)
- ; CHECK: bb.12.bb27:
- ; CHECK: liveins: $lr
- ; CHECK: $r0 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: $sp = t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11, killed $lr
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 36
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r11, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r10, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -28
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -32
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -36
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 40
+ ; CHECK-NEXT: tCBZ $r3, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb4:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tSUBi3 renamable $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r7 = t2ANDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPi8 killed renamable $r4, 3, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tSTRspi killed renamable $r7, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: tBcc %bb.4, 2 /* CC::hs */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.6(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tB %bb.6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.12(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tB %bb.12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.bb12:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r8 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r3
+ ; CHECK-NEXT: $lr = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.bb28:
+ ; CHECK-NEXT: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r5 = tLDRr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep617)
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = tLDRr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep418)
+ ; CHECK-NEXT: renamable $r8 = nuw t2ADDri killed renamable $r8, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tEOR killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = tLDRr renamable $r0, $r3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep219)
+ ; CHECK-NEXT: renamable $r5 = nsw tADDhirr killed renamable $r5, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRr killed renamable $r5, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep219)
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4 = tLDRi renamable $r7, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep11)
+ ; CHECK-NEXT: renamable $r6 = tLDRi renamable $r5, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep14)
+ ; CHECK-NEXT: renamable $r9 = t2EORrr killed renamable $r4, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r6, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r11 = t2ADDri $r6, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2LDMIA killed $r11, 14 /* CC::al */, $noreg, def $r4, def $r10, def $r11 :: (load (s32) from %ir.scevgep9), (load (s32) from %ir.scevgep8), (load (s32) from %ir.scevgep1)
+ ; CHECK-NEXT: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r4, renamable $r6, 1, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep9)
+ ; CHECK-NEXT: renamable $r9 = t2LDRi12 renamable $r5, 8, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep12)
+ ; CHECK-NEXT: renamable $r4 = tLDRi renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep10)
+ ; CHECK-NEXT: renamable $r4 = t2EORrr killed renamable $r4, killed renamable $r9, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r10, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r4, renamable $r6, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep8)
+ ; CHECK-NEXT: renamable $r4 = tLDRi killed renamable $r5, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep5)
+ ; CHECK-NEXT: renamable $r5 = tLDRi killed renamable $r7, 3, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tEOR killed renamable $r4, killed renamable $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4 = nsw tADDhirr killed renamable $r4, killed renamable $r11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r4, killed renamable $r6, 3, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep1)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.bb13:
+ ; CHECK-NEXT: successors: %bb.12(0x30000000), %bb.7(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r5 = tLDRspi $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: tCBZ $r5, %bb.12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7.bb16:
+ ; CHECK-NEXT: successors: %bb.8(0x40000000), %bb.9(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r5, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2LDRs renamable $r1, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp17)
+ ; CHECK-NEXT: tCMPi8 renamable $r5, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r3 = t2LDRs renamable $r2, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp19)
+ ; CHECK-NEXT: renamable $lr = t2EORrr killed renamable $lr, killed renamable $r3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2LDRs renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp22)
+ ; CHECK-NEXT: renamable $r3 = nsw tADDhirr killed renamable $r3, killed renamable $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r3, renamable $r0, renamable $r8, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp22)
+ ; CHECK-NEXT: tBcc %bb.9, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8:
+ ; CHECK-NEXT: successors: %bb.12(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tB %bb.12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9.bb57:
+ ; CHECK-NEXT: successors: %bb.10(0x40000000), %bb.11(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r5, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = nuw t2ADDri renamable $r8, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPi8 killed renamable $r5, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r7 = t2LDRs renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp58)
+ ; CHECK-NEXT: renamable $r6 = t2LDRs renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp60)
+ ; CHECK-NEXT: renamable $r7 = t2EORrr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r6 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp63)
+ ; CHECK-NEXT: renamable $r7 = nsw tADDhirr killed renamable $r7, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r7, renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp63)
+ ; CHECK-NEXT: tBcc %bb.11, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.10:
+ ; CHECK-NEXT: successors: %bb.12(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $lr = t2MOVi 2, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tB %bb.12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.11.bb68:
+ ; CHECK-NEXT: successors: %bb.12(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = nuw t2ADDri killed renamable $r8, 2, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1 = t2LDRs killed renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp69)
+ ; CHECK-NEXT: renamable $r2 = t2LDRs killed renamable $r2, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp71)
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tEOR killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.tmp74)
+ ; CHECK-NEXT: renamable $r1 = nsw tADDhirr killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r1, killed renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.tmp74)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.12.bb27:
+ ; CHECK-NEXT: liveins: $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc, implicit killed $r0
bb.0.bb:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir
index d1374679f320..6c67084dd02d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir
@@ -48,7 +48,6 @@
ret float %res.0.lcssa
}
- ; Function Attrs: norecurse nounwind readonly
define dso_local arm_aapcs_vfpcc float @insert_after_vdup_2(ptr nocapture readonly %a, ptr nocapture readonly %b, float %init, i32 %N) local_unnamed_addr #0 {
entry:
%shr = lshr i32 %N, 2
@@ -139,47 +138,55 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: insert_after_vdup_1
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7, $s0
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $s0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $s0
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = tLDRpci %const.0, 14 /* CC::al */, $noreg :: (load (s32) from constant-pool)
- ; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $s4 = VMOVS killed $s0, 14 /* CC::al */, $noreg, implicit killed $q1, implicit-def $q1
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q1, $r0, $r1, $r2
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv12, align 4)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1315, align 4)
- ; CHECK: renamable $q1 = MVE_VFMAf32 killed renamable $q1, killed renamable $q2, killed renamable $q0, 1, killed renamable $vpr, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q1
- ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit killed $q1
- ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg
- ; CHECK: $sp = frame-destroy t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
- ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $s0
- ; CHECK: bb.4 (align 4):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7, $s0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $s0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $s0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = tLDRpci %const.0, 14 /* CC::al */, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $s4 = VMOVS killed $s0, 14 /* CC::al */, $noreg, implicit killed $q1, implicit-def $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q1, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv12, align 4)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1315, align 4)
+ ; CHECK-NEXT: renamable $q1 = MVE_VFMAf32 killed renamable $q1, killed renamable $q2, killed renamable $q0, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit killed $q1
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = frame-destroy t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit killed $s0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4 (align 4):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 4
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $s0, $lr
@@ -269,50 +276,58 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: insert_after_vdup_2
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7, $s0
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: t2CMPrs killed renamable $r3, renamable $r2, 19, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $s0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $s0
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = tLDRpci %const.0, 14 /* CC::al */, $noreg :: (load (s32) from constant-pool)
- ; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r2, dead $cpsr = tLSRri killed renamable $r2, 2, 14 /* CC::al */, $noreg
- ; CHECK: $s4 = VMOVS killed $s0, 14 /* CC::al */, $noreg, implicit killed $q1, implicit-def $q1
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q1, $r0, $r1, $r2
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
- ; CHECK: renamable $q1 = MVE_VFMAf32 killed renamable $q1, killed renamable $q2, killed renamable $q0, 1, killed renamable $vpr, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q1
- ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit killed $q1
- ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg
- ; CHECK: $sp = frame-destroy t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
- ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $s0
- ; CHECK: bb.4 (align 4):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7, $s0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2CMPrs killed renamable $r3, renamable $r2, 19, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $s0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $s0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = tLDRpci %const.0, 14 /* CC::al */, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tLSRri killed renamable $r2, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $s4 = VMOVS killed $s0, 14 /* CC::al */, $noreg, implicit killed $q1, implicit-def $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q1, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
+ ; CHECK-NEXT: renamable $q1 = MVE_VFMAf32 killed renamable $q1, killed renamable $q2, killed renamable $q0, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit killed $q1
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = frame-destroy t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit killed $s0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4 (align 4):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 4
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $s0, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/massive.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/massive.mir
index 2fb744e8e762..9448a1ab00d3 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/massive.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/massive.mir
@@ -2,7 +2,6 @@
# RUN: llc -mtriple=armv8.1m.main -mattr=+lob -run-pass=arm-low-overhead-loops --verify-machineinstrs %s -o - | FileCheck %s
--- |
- ; ModuleID = 'massive.ll'
source_filename = "massive.ll"
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "thumbv8.1m.main"
@@ -43,16 +42,12 @@
br i1 %4, label %for.body, label %for.cond.cleanup
}
- ; Function Attrs: nounwind
declare i32 @llvm.arm.space(i32 immarg, i32) #0
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.start.loop.iterations.i32(i32) #1
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #1
- ; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #0
attributes #0 = { nounwind }
@@ -108,36 +103,42 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: massive
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.for.body.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = tSUBi8 killed renamable $r0, 4, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.for.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: dead renamable $r3 = SPACE 4096, undef renamable $r0
- ; CHECK: renamable $r12, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
- ; CHECK: renamable $r3, renamable $r2 = t2LDR_PRE killed renamable $r2, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
- ; CHECK: renamable $r3 = nsw t2MUL killed renamable $r3, killed renamable $r12, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r0 = t2STR_PRE killed renamable $r3, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep11)
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
- ; CHECK: t2Bcc %bb.2, 1 /* CC::ne */, killed $cpsr
- ; CHECK: tB %bb.3, 14 /* CC::al */, $noreg
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.for.body.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi8 killed renamable $r0, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead renamable $r3 = SPACE 4096, undef renamable $r0
+ ; CHECK-NEXT: renamable $r12, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
+ ; CHECK-NEXT: renamable $r3, renamable $r2 = t2LDR_PRE killed renamable $r2, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
+ ; CHECK-NEXT: renamable $r3 = nsw t2MUL killed renamable $r3, killed renamable $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r0 = t2STR_PRE killed renamable $r3, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep11)
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
+ ; CHECK-NEXT: t2Bcc %bb.2, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: tB %bb.3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix-debug.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix-debug.mir
index 8cf8589041e3..2dfaf1732694 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix-debug.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix-debug.mir
@@ -202,76 +202,86 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_debug
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x50000000), %bb.5(0x30000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8, $r9, $r10
- ; CHECK: DBG_VALUE $r0, $noreg, !23, !DIExpression(), debug-location !32
- ; CHECK: DBG_VALUE $r1, $noreg, !24, !DIExpression(), debug-location !32
- ; CHECK: DBG_VALUE $r2, $noreg, !25, !DIExpression(), debug-location !32
- ; CHECK: DBG_VALUE $r3, $noreg, !26, !DIExpression(), debug-location !32
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -20
- ; CHECK: $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r8, killed $r9, killed $r10
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r10, -24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -28
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -32
- ; CHECK: $r5 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: DBG_VALUE $r5, $noreg, !25, !DIExpression(), debug-location !32
- ; CHECK: $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg, debug-location !33
- ; CHECK: $r8 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: DBG_VALUE $r8, $noreg, !26, !DIExpression(), debug-location !32
- ; CHECK: $r9 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: DBG_VALUE $r9, $noreg, !24, !DIExpression(), debug-location !32
- ; CHECK: $r10 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: DBG_VALUE 0, $noreg, !29, !DIExpression(), debug-location !32
- ; CHECK: DBG_VALUE $r10, $noreg, !23, !DIExpression(), debug-location !32
- ; CHECK: tBL 14 /* CC::al */, $noreg, @get_input, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit-def $sp, implicit-def dead $r0, debug-location !33
- ; CHECK: DBG_VALUE 0, $noreg, !30, !DIExpression(), debug-location !32
- ; CHECK: DBG_VALUE $noreg, $noreg, !28, !DIExpression(), debug-location !32
- ; CHECK: t2CMPri renamable $r10, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr, debug-location !37
- ; CHECK: tBcc %bb.5, 11 /* CC::lt */, killed $cpsr, debug-location !37
- ; CHECK: bb.1.for.cond1.preheader.us.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r5, $r8, $r9, $r10
- ; CHECK: renamable $r12 = t2LSLri renamable $r10, 1, 14 /* CC::al */, $noreg, $noreg, debug-location !37
- ; CHECK: renamable $r1, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.for.cond1.preheader.us:
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: liveins: $r1, $r5, $r8, $r9, $r10, $r12
- ; CHECK: DBG_VALUE $r1, $noreg, !30, !DIExpression(), debug-location !32
- ; CHECK: DBG_VALUE 0, $noreg, !31, !DIExpression(), debug-location !32
- ; CHECK: renamable $r2 = t2LDRs renamable $r9, renamable $r1, 2, 14 /* CC::al */, $noreg, debug-location !41 :: (load (s32) from %ir.arrayidx7.us)
- ; CHECK: $r3 = tMOVr $r5, 14 /* CC::al */, $noreg, debug-location !32
- ; CHECK: $r0 = tMOVr $r8, 14 /* CC::al */, $noreg, debug-location !32
- ; CHECK: dead $lr = tMOVr $r10, 14 /* CC::al */, $noreg, debug-location !32
- ; CHECK: $lr = t2DLS renamable $r10, debug-location !42
- ; CHECK: bb.3.for.body3.us:
- ; CHECK: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r8, $r9, $r10, $r12
- ; CHECK: DBG_VALUE $noreg, $noreg, !31, !DIExpression(), debug-location !32
- ; CHECK: renamable $r6, renamable $r3 = t2LDRSH_POST killed renamable $r3, 2, 14 /* CC::al */, $noreg, debug-location !43 :: (load (s16) from %ir.lsr.iv5)
- ; CHECK: renamable $r4, renamable $r0 = t2LDRSH_POST killed renamable $r0, 2, 14 /* CC::al */, $noreg, debug-location !44 :: (load (s16) from %ir.lsr.iv1)
- ; CHECK: renamable $r2 = nsw t2SMLABB killed renamable $r4, killed renamable $r6, killed renamable $r2, 14 /* CC::al */, $noreg, debug-location !41
- ; CHECK: DBG_VALUE $noreg, $noreg, !31, !DIExpression(DW_OP_plus_uconst, 1, DW_OP_stack_value), debug-location !32
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.3, debug-location !42
- ; CHECK: bb.4.for.cond1.for.inc9_crit_edge.us:
- ; CHECK: successors: %bb.5(0x04000000), %bb.2(0x7c000000)
- ; CHECK: liveins: $r1, $r2, $r5, $r8, $r9, $r10, $r12
- ; CHECK: t2STRs killed renamable $r2, renamable $r9, renamable $r1, 2, 14 /* CC::al */, $noreg, debug-location !41 :: (store (s32) into %ir.8)
- ; CHECK: renamable $r1, dead $cpsr = nuw nsw tADDi8 killed renamable $r1, 1, 14 /* CC::al */, $noreg, debug-location !49
- ; CHECK: DBG_VALUE $r1, $noreg, !30, !DIExpression(), debug-location !32
- ; CHECK: renamable $r5 = tADDhirr killed renamable $r5, renamable $r12, 14 /* CC::al */, $noreg, debug-location !37
- ; CHECK: tCMPhir renamable $r1, renamable $r10, 14 /* CC::al */, $noreg, implicit-def $cpsr, debug-location !37
- ; CHECK: tBcc %bb.2, 1 /* CC::ne */, killed $cpsr, debug-location !37
- ; CHECK: bb.5.for.end11:
- ; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r10, debug-location !52
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc, debug-location !52
+ ; CHECK-NEXT: successors: %bb.1(0x50000000), %bb.5(0x30000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8, $r9, $r10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: DBG_VALUE $r0, $noreg, !23, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: DBG_VALUE $r1, $noreg, !24, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: DBG_VALUE $r2, $noreg, !25, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: DBG_VALUE $r3, $noreg, !26, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -20
+ ; CHECK-NEXT: $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r8, killed $r9, killed $r10
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r10, -24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -28
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -32
+ ; CHECK-NEXT: $r5 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: DBG_VALUE $r5, $noreg, !25, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg, debug-location !33
+ ; CHECK-NEXT: $r8 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: DBG_VALUE $r8, $noreg, !26, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: $r9 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: DBG_VALUE $r9, $noreg, !24, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: $r10 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: DBG_VALUE 0, $noreg, !29, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: DBG_VALUE $r10, $noreg, !23, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: tBL 14 /* CC::al */, $noreg, @get_input, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit-def $sp, implicit-def dead $r0, debug-location !33
+ ; CHECK-NEXT: DBG_VALUE 0, $noreg, !30, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: DBG_VALUE $noreg, $noreg, !28, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: t2CMPri renamable $r10, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr, debug-location !37
+ ; CHECK-NEXT: tBcc %bb.5, 11 /* CC::lt */, killed $cpsr, debug-location !37
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.for.cond1.preheader.us.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r5, $r8, $r9, $r10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2LSLri renamable $r10, 1, 14 /* CC::al */, $noreg, $noreg, debug-location !37
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.cond1.preheader.us:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $r1, $r5, $r8, $r9, $r10, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: DBG_VALUE $r1, $noreg, !30, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: DBG_VALUE 0, $noreg, !31, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: renamable $r2 = t2LDRs renamable $r9, renamable $r1, 2, 14 /* CC::al */, $noreg, debug-location !41 :: (load (s32) from %ir.arrayidx7.us)
+ ; CHECK-NEXT: $r3 = tMOVr $r5, 14 /* CC::al */, $noreg, debug-location !32
+ ; CHECK-NEXT: $r0 = tMOVr $r8, 14 /* CC::al */, $noreg, debug-location !32
+ ; CHECK-NEXT: dead $lr = tMOVr $r10, 14 /* CC::al */, $noreg, debug-location !32
+ ; CHECK-NEXT: $lr = t2DLS renamable $r10, debug-location !42
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.body3.us:
+ ; CHECK-NEXT: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r8, $r9, $r10, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: DBG_VALUE $noreg, $noreg, !31, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: renamable $r6, renamable $r3 = t2LDRSH_POST killed renamable $r3, 2, 14 /* CC::al */, $noreg, debug-location !43 :: (load (s16) from %ir.lsr.iv5)
+ ; CHECK-NEXT: renamable $r4, renamable $r0 = t2LDRSH_POST killed renamable $r0, 2, 14 /* CC::al */, $noreg, debug-location !44 :: (load (s16) from %ir.lsr.iv1)
+ ; CHECK-NEXT: renamable $r2 = nsw t2SMLABB killed renamable $r4, killed renamable $r6, killed renamable $r2, 14 /* CC::al */, $noreg, debug-location !41
+ ; CHECK-NEXT: DBG_VALUE $noreg, $noreg, !31, !DIExpression(DW_OP_plus_uconst, 1, DW_OP_stack_value), debug-location !32
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.3, debug-location !42
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.for.cond1.for.inc9_crit_edge.us:
+ ; CHECK-NEXT: successors: %bb.5(0x04000000), %bb.2(0x7c000000)
+ ; CHECK-NEXT: liveins: $r1, $r2, $r5, $r8, $r9, $r10, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: t2STRs killed renamable $r2, renamable $r9, renamable $r1, 2, 14 /* CC::al */, $noreg, debug-location !41 :: (store (s32) into %ir.8)
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = nuw nsw tADDi8 killed renamable $r1, 1, 14 /* CC::al */, $noreg, debug-location !49
+ ; CHECK-NEXT: DBG_VALUE $r1, $noreg, !30, !DIExpression(), debug-location !32
+ ; CHECK-NEXT: renamable $r5 = tADDhirr killed renamable $r5, renamable $r12, 14 /* CC::al */, $noreg, debug-location !37
+ ; CHECK-NEXT: tCMPhir renamable $r1, renamable $r10, 14 /* CC::al */, $noreg, implicit-def $cpsr, debug-location !37
+ ; CHECK-NEXT: tBcc %bb.2, 1 /* CC::ne */, killed $cpsr, debug-location !37
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.for.end11:
+ ; CHECK-NEXT: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r10, debug-location !52
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc, debug-location !52
bb.0.entry:
successors: %bb.1(0x50000000), %bb.5(0x30000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8, $r9, $r10
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir
index 4d3f2e2b10c6..cbed22ff10ef 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir
@@ -214,150 +214,179 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: matrix_test
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x50000000), %bb.12(0x30000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $lr
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 32
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r10, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -28
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -32
- ; CHECK: tCMPi8 renamable $r0, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2Bcc %bb.12, 11 /* CC::lt */, killed $cpsr
- ; CHECK: bb.1.for.body.i.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $r5 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: $r8 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: $r4 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = IMPLICIT_DEF
- ; CHECK: $r10 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2DLS killed renamable $r0
- ; CHECK: bb.2.for.body.i:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r1, $r2, $r4, $r5, $r6, $r8, $r10
- ; CHECK: renamable $r3, renamable $r1 = t2LDR_POST killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.lsr.iv15)
- ; CHECK: renamable $r2 = nsw tADDhirr killed renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r7 = t2CSINC $zr, $zr, 13, implicit killed $cpsr
- ; CHECK: tCMPi8 killed renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r3 = t2CSINC $zr, $zr, 13, implicit killed $cpsr
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r3 = t2ANDrr killed renamable $r3, killed renamable $r7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2IT 12, 8, implicit-def $itstate
- ; CHECK: $r2 = tMOVi8 $noreg, 0, 12 /* CC::gt */, killed $cpsr, implicit killed renamable $r2, implicit killed $itstate
- ; CHECK: renamable $r6 = tADDhirr killed renamable $r6, killed renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.c.exit:
- ; CHECK: successors: %bb.4(0x50000000), %bb.14(0x30000000)
- ; CHECK: liveins: $r4, $r5, $r6, $r8, $r10
- ; CHECK: renamable $r0 = tSXTH killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: tBL 14 /* CC::al */, $noreg, @crc16, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit-def $sp, implicit-def $r0
- ; CHECK: $r12 = tMOVr killed $r0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r7, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: t2CMPri $r10, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.14, 11 /* CC::lt */, killed $cpsr
- ; CHECK: bb.4.for.cond4.preheader.us.preheader:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $r4, $r5, $r7, $r8, $r10, $r12
- ; CHECK: renamable $r0 = t2ADDri $r10, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $lr = tMOVr $r10, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0 = t2BICri killed renamable $r0, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = t2LSLri $r10, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r0, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VDUP32 renamable $r7, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r0 = nuw nsw t2ADDrs killed renamable $r0, renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tLSRri killed renamable $r1, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r9 = t2SUBrs $r10, killed renamable $r1, 18, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.5.for.cond4.preheader.us:
- ; CHECK: successors: %bb.6(0x80000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r3, $r4, $r5, $r7, $r8, $r9, $r10, $r12
- ; CHECK: renamable $r1 = t2LDRs renamable $r4, renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.arrayidx12.us)
- ; CHECK: $q1 = MVE_VORR $q0, $q0, 0, $noreg, $noreg, undef $q1
- ; CHECK: $r2 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VMOV_to_lane_32 killed renamable $q1, killed renamable $r1, 0, 14 /* CC::al */, $noreg
- ; CHECK: $r6 = tMOVr $r5, 14 /* CC::al */, $noreg
- ; CHECK: $r1 = tMOVr $r8, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2DLS renamable $r0
- ; CHECK: bb.6.vector.body:
- ; CHECK: successors: %bb.6(0x7c000000), %bb.7(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r12
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
- ; CHECK: $q2 = MVE_VORR killed $q1, killed $q1, 0, $noreg, $noreg, undef $q2
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r6, renamable $q1 = MVE_VLDRHS32_post killed renamable $r6, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1012, align 2)
- ; CHECK: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv46, align 2)
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q2, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.6
- ; CHECK: bb.7.middle.block:
- ; CHECK: successors: %bb.8(0x04000000), %bb.5(0x7c000000)
- ; CHECK: liveins: $q0, $q1, $q2, $r0, $r3, $r4, $r5, $r7, $r8, $r9, $r10, $r12
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r9, 0, $noreg, $noreg
- ; CHECK: renamable $r5 = tADDhirr killed renamable $r5, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VPSEL killed renamable $q1, killed renamable $q2, 0, killed renamable $vpr, $noreg
- ; CHECK: $lr = tMOVr $r10, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = MVE_VADDVu32no_acc killed renamable $q1, 0, $noreg, $noreg
- ; CHECK: t2STRs killed renamable $r2, renamable $r4, renamable $r7, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.27)
- ; CHECK: renamable $r7, dead $cpsr = nuw nsw tADDi8 killed renamable $r7, 1, 14 /* CC::al */, $noreg
- ; CHECK: tCMPhir renamable $r7, $r10, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.5, 1 /* CC::ne */, killed $cpsr
- ; CHECK: bb.8.for.end16:
- ; CHECK: successors: %bb.9(0x50000000), %bb.13(0x30000000)
- ; CHECK: liveins: $lr, $r4, $r12
- ; CHECK: t2CMPri renamable $lr, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.13, 11 /* CC::lt */, killed $cpsr
- ; CHECK: bb.9.for.body.i57.preheader:
- ; CHECK: successors: %bb.10(0x80000000)
- ; CHECK: liveins: $lr, $r4, $r12
- ; CHECK: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1 = IMPLICIT_DEF
- ; CHECK: bb.10.for.body.i57:
- ; CHECK: successors: %bb.10(0x7c000000), %bb.11(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r4, $r12
- ; CHECK: renamable $r2, renamable $r4 = t2LDR_POST killed renamable $r4, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.lsr.iv1)
- ; CHECK: renamable $r1 = nsw tADDhirr killed renamable $r1, renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: tCMPi8 renamable $r1, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r3 = t2CSINC $zr, $zr, 13, implicit killed $cpsr
- ; CHECK: tCMPi8 killed renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r2 = t2CSINC $zr, $zr, 13, implicit killed $cpsr
- ; CHECK: tCMPi8 renamable $r1, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r2 = t2ANDrr killed renamable $r2, killed renamable $r3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2IT 12, 8, implicit-def $itstate
- ; CHECK: $r1 = tMOVi8 $noreg, 0, 12 /* CC::gt */, killed $cpsr, implicit killed renamable $r1, implicit killed $itstate
- ; CHECK: renamable $r0 = tADDhirr killed renamable $r0, killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.10
- ; CHECK: bb.11.c.exit59.loopexit:
- ; CHECK: successors: %bb.14(0x80000000)
- ; CHECK: liveins: $r0, $r12
- ; CHECK: renamable $r7 = tSXTH killed renamable $r0, 14 /* CC::al */, $noreg
- ; CHECK: tB %bb.14, 14 /* CC::al */, $noreg
- ; CHECK: bb.12.c.exit.thread:
- ; CHECK: successors: %bb.14(0x80000000)
- ; CHECK: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r7, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tBL 14 /* CC::al */, $noreg, @crc16, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit-def $sp, implicit-def $r0
- ; CHECK: $r12 = tMOVr killed $r0, 14 /* CC::al */, $noreg
- ; CHECK: tB %bb.14, 14 /* CC::al */, $noreg
- ; CHECK: bb.13:
- ; CHECK: successors: %bb.14(0x80000000)
- ; CHECK: liveins: $r12
- ; CHECK: renamable $r7, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.14.c.exit59:
- ; CHECK: liveins: $r7, $r12
- ; CHECK: $r0 = tMOVr killed $r7, 14 /* CC::al */, $noreg
- ; CHECK: $r1 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $lr
- ; CHECK: tTAILJMPdND @crc16, 14 /* CC::al */, $noreg, implicit $sp, implicit $sp, implicit killed $r0, implicit killed $r1
+ ; CHECK-NEXT: successors: %bb.1(0x50000000), %bb.12(0x30000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $lr
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r10, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -28
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -32
+ ; CHECK-NEXT: tCMPi8 renamable $r0, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2Bcc %bb.12, 11 /* CC::lt */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.for.body.i.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r5 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r8 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r4 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = IMPLICIT_DEF
+ ; CHECK-NEXT: $r10 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2DLS killed renamable $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.body.i:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r1, $r2, $r4, $r5, $r6, $r8, $r10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, renamable $r1 = t2LDR_POST killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.lsr.iv15)
+ ; CHECK-NEXT: renamable $r2 = nsw tADDhirr killed renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r7 = t2CSINC $zr, $zr, 13, implicit killed $cpsr
+ ; CHECK-NEXT: tCMPi8 killed renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r3 = t2CSINC $zr, $zr, 13, implicit killed $cpsr
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r3 = t2ANDrr killed renamable $r3, killed renamable $r7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2IT 12, 8, implicit-def $itstate
+ ; CHECK-NEXT: $r2 = tMOVi8 $noreg, 0, 12 /* CC::gt */, killed $cpsr, implicit killed renamable $r2, implicit killed $itstate
+ ; CHECK-NEXT: renamable $r6 = tADDhirr killed renamable $r6, killed renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.c.exit:
+ ; CHECK-NEXT: successors: %bb.4(0x50000000), %bb.14(0x30000000)
+ ; CHECK-NEXT: liveins: $r4, $r5, $r6, $r8, $r10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = tSXTH killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tBL 14 /* CC::al */, $noreg, @crc16, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit-def $sp, implicit-def $r0
+ ; CHECK-NEXT: $r12 = tMOVr killed $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2CMPri $r10, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.14, 11 /* CC::lt */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.for.cond4.preheader.us.preheader:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $r4, $r5, $r7, $r8, $r10, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = t2ADDri $r10, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $lr = tMOVr $r10, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0 = t2BICri killed renamable $r0, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2LSLri $r10, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r0, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 renamable $r7, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r0 = nuw nsw t2ADDrs killed renamable $r0, renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tLSRri killed renamable $r1, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r9 = t2SUBrs $r10, killed renamable $r1, 18, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.for.cond4.preheader.us:
+ ; CHECK-NEXT: successors: %bb.6(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r3, $r4, $r5, $r7, $r8, $r9, $r10, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1 = t2LDRs renamable $r4, renamable $r7, 2, 14 /* CC::al */, $noreg :: (load (s32) from %ir.arrayidx12.us)
+ ; CHECK-NEXT: $q1 = MVE_VORR $q0, $q0, 0, $noreg, $noreg, undef $q1
+ ; CHECK-NEXT: $r2 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VMOV_to_lane_32 killed renamable $q1, killed renamable $r1, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r6 = tMOVr $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r1 = tMOVr $r8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2DLS renamable $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.vector.body:
+ ; CHECK-NEXT: successors: %bb.6(0x7c000000), %bb.7(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: $q2 = MVE_VORR killed $q1, killed $q1, 0, $noreg, $noreg, undef $q2
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r6, renamable $q1 = MVE_VLDRHS32_post killed renamable $r6, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1012, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q3 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv46, align 2)
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q3, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q2, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7.middle.block:
+ ; CHECK-NEXT: successors: %bb.8(0x04000000), %bb.5(0x7c000000)
+ ; CHECK-NEXT: liveins: $q0, $q1, $q2, $r0, $r3, $r4, $r5, $r7, $r8, $r9, $r10, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r9, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r5 = tADDhirr killed renamable $r5, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VPSEL killed renamable $q1, killed renamable $q2, 0, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $lr = tMOVr $r10, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = MVE_VADDVu32no_acc killed renamable $q1, 0, $noreg, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r2, renamable $r4, renamable $r7, 2, 14 /* CC::al */, $noreg :: (store (s32) into %ir.27)
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = nuw nsw tADDi8 killed renamable $r7, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCMPhir renamable $r7, $r10, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.5, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8.for.end16:
+ ; CHECK-NEXT: successors: %bb.9(0x50000000), %bb.13(0x30000000)
+ ; CHECK-NEXT: liveins: $lr, $r4, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: t2CMPri renamable $lr, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.13, 11 /* CC::lt */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9.for.body.i57.preheader:
+ ; CHECK-NEXT: successors: %bb.10(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r4, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1 = IMPLICIT_DEF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.10.for.body.i57:
+ ; CHECK-NEXT: successors: %bb.10(0x7c000000), %bb.11(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r4, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2, renamable $r4 = t2LDR_POST killed renamable $r4, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.lsr.iv1)
+ ; CHECK-NEXT: renamable $r1 = nsw tADDhirr killed renamable $r1, renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCMPi8 renamable $r1, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r3 = t2CSINC $zr, $zr, 13, implicit killed $cpsr
+ ; CHECK-NEXT: tCMPi8 killed renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r2 = t2CSINC $zr, $zr, 13, implicit killed $cpsr
+ ; CHECK-NEXT: tCMPi8 renamable $r1, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r2 = t2ANDrr killed renamable $r2, killed renamable $r3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2IT 12, 8, implicit-def $itstate
+ ; CHECK-NEXT: $r1 = tMOVi8 $noreg, 0, 12 /* CC::gt */, killed $cpsr, implicit killed renamable $r1, implicit killed $itstate
+ ; CHECK-NEXT: renamable $r0 = tADDhirr killed renamable $r0, killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.11.c.exit59.loopexit:
+ ; CHECK-NEXT: successors: %bb.14(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r7 = tSXTH killed renamable $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tB %bb.14, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.12.c.exit.thread:
+ ; CHECK-NEXT: successors: %bb.14(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tBL 14 /* CC::al */, $noreg, @crc16, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit-def $sp, implicit-def $r0
+ ; CHECK-NEXT: $r12 = tMOVr killed $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tB %bb.14, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.13:
+ ; CHECK-NEXT: successors: %bb.14(0x80000000)
+ ; CHECK-NEXT: liveins: $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.14.c.exit59:
+ ; CHECK-NEXT: liveins: $r7, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r1 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $lr
+ ; CHECK-NEXT: tTAILJMPdND @crc16, 14 /* CC::al */, $noreg, implicit $sp, implicit $sp, implicit killed $r0, implicit killed $r1
bb.0.entry:
successors: %bb.1(0x50000000), %bb.12(0x30000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dls.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dls.mir
index 1b6737c9073e..29406adc596f 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dls.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dls.mir
@@ -85,25 +85,29 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: do_copy
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $lr = t2DLS $r0
- ; CHECK: $lr = tMOVr killed $r0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.while.body:
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1
- ; CHECK: renamable $r2, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep6)
- ; CHECK: early-clobber renamable $r0 = t2STR_PRE killed renamable $r2, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep2)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.1
- ; CHECK: bb.2.while.end:
- ; CHECK: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $lr = t2DLS $r0
+ ; CHECK-NEXT: $lr = tMOVr killed $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.body:
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep6)
+ ; CHECK-NEXT: early-clobber renamable $r0 = t2STR_PRE killed renamable $r2, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep2)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.end:
+ ; CHECK-NEXT: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir
index 0580a9725a26..cc39f9850d95 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir
@@ -73,25 +73,18 @@
ret void
}
- ; Function Attrs: nounwind readnone
declare <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) #1
- ; Function Attrs: nounwind readnone
declare <4 x float> @llvm.arm.mve.fma.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x float>, <4 x i1>) #1
- ; Function Attrs: nounwind readnone
declare <4 x i1> @llvm.arm.mve.vctp32(i32) #1
- ; Function Attrs: argmemonly nounwind readonly willreturn
declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>) #2
- ; Function Attrs: nounwind readnone
declare <4 x float> @llvm.arm.mve.add.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) #1
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.start.loop.iterations.i32(i32) #3
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.loop.decrement.reg.i32(i32, i32) #3
attributes #0 = { "target-features"="+mve.fp" }
@@ -148,55 +141,64 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: arm_var_f32_mve
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: $r12 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: $r4 = tMOVr $lr, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.do.body.i:
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r4, $r12
- ; CHECK: renamable $r12, renamable $q1 = MVE_VLDRWU32_post killed renamable $r12, 16, 0, $noreg, $noreg :: (load (s128) from %ir.pSrc.addr.0.i2, align 4)
- ; CHECK: renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VADDf32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, killed renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2.arm_mean_f32_mve.exit:
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4
- ; CHECK: $s4 = VMOVSR $r1, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, killed renamable $s3, 14 /* CC::al */, $noreg, implicit killed $q0
- ; CHECK: $lr = t2DLS killed $r4
- ; CHECK: renamable $s4 = VUITOS killed renamable $s4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = VMOVRS killed renamable $s0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: bb.3.do.body:
- ; CHECK: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.pSrc.addr.01, align 4)
- ; CHECK: renamable $q2 = nnan ninf nsz arcp contract afn reassoc MVE_VSUBf32 killed renamable $q2, renamable $q1, 1, renamable $vpr, $noreg, undef renamable $q2
- ; CHECK: renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VFMAf32 killed renamable $q0, killed renamable $q2, killed renamable $q2, 1, killed renamable $vpr, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.3
- ; CHECK: bb.4.do.end:
- ; CHECK: liveins: $q0, $r1, $r2
- ; CHECK: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, killed renamable $s3, 14 /* CC::al */, $noreg, implicit killed $q0
- ; CHECK: $s2 = VMOVSR killed $r0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg
- ; CHECK: VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.pResult)
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r12 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: $r4 = tMOVr $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.do.body.i:
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2, $r4, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12, renamable $q1 = MVE_VLDRWU32_post killed renamable $r12, 16, 0, $noreg, $noreg :: (load (s128) from %ir.pSrc.addr.0.i2, align 4)
+ ; CHECK-NEXT: renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VADDf32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, killed renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.arm_mean_f32_mve.exit:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $s4 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, killed renamable $s3, 14 /* CC::al */, $noreg, implicit killed $q0
+ ; CHECK-NEXT: $lr = t2DLS killed $r4
+ ; CHECK-NEXT: renamable $s4 = VUITOS killed renamable $s4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = VMOVRS killed renamable $s0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $r3 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.do.body:
+ ; CHECK-NEXT: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q2 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.pSrc.addr.01, align 4)
+ ; CHECK-NEXT: renamable $q2 = nnan ninf nsz arcp contract afn reassoc MVE_VSUBf32 killed renamable $q2, renamable $q1, 1, renamable $vpr, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VFMAf32 killed renamable $q0, killed renamable $q2, killed renamable $q2, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.do.end:
+ ; CHECK-NEXT: liveins: $q0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, killed renamable $s3, 14 /* CC::al */, $noreg, implicit killed $q0
+ ; CHECK-NEXT: $s2 = VMOVSR killed $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.pResult)
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r4, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-lr-terminator.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-lr-terminator.mir
index ae8acddcd14b..4f96e04f0fa8 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-lr-terminator.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-lr-terminator.mir
@@ -99,35 +99,41 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: start_before_elems
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2CMPrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2LSRri killed renamable $r3, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r12
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 0, $noreg, $noreg :: (load (s32) from %ir.scevgep45, align 1)
- ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 0, $noreg, $noreg :: (load (s32) from %ir.scevgep23, align 1)
- ; CHECK: renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2CMPrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2LSRri killed renamable $r3, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 0, $noreg, $noreg :: (load (s32) from %ir.scevgep45, align 1)
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 0, $noreg, $noreg :: (load (s32) from %ir.scevgep23, align 1)
+ ; CHECK-NEXT: renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-def-before-start.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-def-before-start.mir
index e9f0dbe1901f..80dc97b52d79 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-def-before-start.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-def-before-start.mir
@@ -104,46 +104,52 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: start_before_elems
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2CMPrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2MOVi 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = nuw t2ADDrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r12 = t2MOVr killed $r3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2LSRri killed renamable $r12, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep45, align 1)
- ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep23, align 1)
- ; CHECK: renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2CMPrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = nuw t2ADDrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r12 = t2MOVr killed $r3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2LSRri killed renamable $r12, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep45, align 1)
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep23, align 1)
+ ; CHECK-NEXT: renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-start-after-def.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-start-after-def.mir
index fef4daf5b6bb..9d9170f1e6ad 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-start-after-def.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-start-after-def.mir
@@ -104,46 +104,52 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: start_before_elems
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2CMPrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2MOVi 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = nuw t2ADDrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r12 = t2MOVr killed $r3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2LSRri killed renamable $r12, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep45, align 1)
- ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep23, align 1)
- ; CHECK: renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2CMPrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = nuw t2ADDrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r12 = t2MOVr killed $r3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2LSRri killed renamable $r12, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep45, align 1)
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep23, align 1)
+ ; CHECK-NEXT: renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multi-block-cond-iter-count.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multi-block-cond-iter-count.mir
index 6b16683ba646..945f9d8172e0 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multi-block-cond-iter-count.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multi-block-cond-iter-count.mir
@@ -192,118 +192,138 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: multi_cond_iter_count
; CHECK: bb.0 (%ir-block.4):
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r9, $r10
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -20
- ; CHECK: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r8, killed $r9, killed $r10
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r10, -24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -28
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -32
- ; CHECK: tCMPi8 renamable $r3, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: $r12 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: t2IT 1, 8, implicit-def $itstate
- ; CHECK: $r12 = t2MOVi 4, 1 /* CC::ne */, killed $cpsr, $noreg, implicit killed renamable $r12, implicit killed $itstate
- ; CHECK: tCMPi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: $r12 = t2MOVi 1, 0 /* CC::eq */, killed $cpsr, $noreg, implicit killed renamable $r12, implicit killed $itstate
- ; CHECK: renamable $r3 = t2LSLrr killed renamable $r2, killed renamable $r12, 14 /* CC::al */, $noreg, def $cpsr
- ; CHECK: tBcc %bb.4, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1 (%ir-block.11):
- ; CHECK: successors: %bb.2(0x55555555), %bb.5(0x2aaaaaab)
- ; CHECK: liveins: $r0, $r1, $r3
- ; CHECK: renamable $r2 = t2ADDrs renamable $r1, renamable $r3, 18, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPr killed renamable $r2, renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 8, 4, implicit-def $itstate
- ; CHECK: renamable $r2 = t2ADDrs renamable $r0, renamable $r3, 18, 8 /* CC::hi */, $cpsr, $noreg, implicit $itstate
- ; CHECK: tCMPr killed renamable $r2, renamable $r1, 8 /* CC::hi */, killed $cpsr, implicit-def $cpsr, implicit killed $itstate
- ; CHECK: tBcc %bb.5, 8 /* CC::hi */, killed $cpsr
- ; CHECK: bb.2 (%ir-block.32):
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r3
- ; CHECK: $r2 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.3 (%ir-block.33):
- ; CHECK: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg
- ; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg
- ; CHECK: $r0 = tMOVr $r2, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.3
- ; CHECK: bb.4 (%ir-block.64):
- ; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r10
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
- ; CHECK: bb.5 (%ir-block.23):
- ; CHECK: successors: %bb.6(0x40000000), %bb.7(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r3
- ; CHECK: renamable $r2, dead $cpsr = tSUBi3 renamable $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2ANDri renamable $r3, 2, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPi8 killed renamable $r2, 3, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.7, 2 /* CC::hs */, killed $cpsr
- ; CHECK: bb.6:
- ; CHECK: successors: %bb.9(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r12
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tB %bb.9, 14 /* CC::al */, $noreg
- ; CHECK: bb.7 (%ir-block.31):
- ; CHECK: successors: %bb.8(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r3, $r12
- ; CHECK: renamable $r2 = t2BICri killed renamable $r3, 2, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.8 (%ir-block.65):
- ; CHECK: successors: %bb.8(0x7c000000), %bb.9(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $r4 = tLDRr renamable $r1, $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r5 = tLDRr renamable $r0, $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tMUL killed renamable $r5, killed renamable $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r5, dead $cpsr = tADDrr renamable $r0, renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: $r10, $r8 = t2LDRDi8 $r5, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r9 = t2LDRi12 renamable $r5, 12, 14 /* CC::al */, $noreg
- ; CHECK: tSTRr killed renamable $r4, renamable $r0, $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tADDi8 killed renamable $r2, 16, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = tLDRi renamable $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = nsw t2MUL killed renamable $r10, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r6, renamable $r5, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = tLDRi renamable $r4, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r6 = nsw t2MUL killed renamable $r8, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r6, renamable $r5, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4 = tLDRi killed renamable $r4, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4 = nsw t2MUL killed renamable $r9, killed renamable $r4, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r4, killed renamable $r5, 3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.8
- ; CHECK: bb.9 (%ir-block.49):
- ; CHECK: successors: %bb.4(0x40000000), %bb.10(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r3, $r12
- ; CHECK: t2CMPri killed renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.4, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.10 (%ir-block.52):
- ; CHECK: liveins: $r0, $r1, $r3
- ; CHECK: renamable $r12 = t2LDRs renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = nsw t2MUL killed renamable $r2, killed renamable $r12, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tADDi3 renamable $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = t2LDRs renamable $r0, renamable $r2, 2, 14 /* CC::al */, $noreg
- ; CHECK: t2STRs killed renamable $r12, renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1 = t2LDRs killed renamable $r1, renamable $r2, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1 = nsw t2MUL killed renamable $lr, killed renamable $r1, 14 /* CC::al */, $noreg
- ; CHECK: t2STRs killed renamable $r1, killed renamable $r0, killed renamable $r2, 2, 14 /* CC::al */, $noreg
- ; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r10
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r9, $r10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -20
+ ; CHECK-NEXT: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r8, killed $r9, killed $r10
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r10, -24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -28
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -32
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: $r12 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2IT 1, 8, implicit-def $itstate
+ ; CHECK-NEXT: $r12 = t2MOVi 4, 1 /* CC::ne */, killed $cpsr, $noreg, implicit killed renamable $r12, implicit killed $itstate
+ ; CHECK-NEXT: tCMPi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: $r12 = t2MOVi 1, 0 /* CC::eq */, killed $cpsr, $noreg, implicit killed renamable $r12, implicit killed $itstate
+ ; CHECK-NEXT: renamable $r3 = t2LSLrr killed renamable $r2, killed renamable $r12, 14 /* CC::al */, $noreg, def $cpsr
+ ; CHECK-NEXT: tBcc %bb.4, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1 (%ir-block.11):
+ ; CHECK-NEXT: successors: %bb.2(0x55555555), %bb.5(0x2aaaaaab)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2 = t2ADDrs renamable $r1, renamable $r3, 18, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPr killed renamable $r2, renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 8, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r2 = t2ADDrs renamable $r0, renamable $r3, 18, 8 /* CC::hi */, $cpsr, $noreg, implicit $itstate
+ ; CHECK-NEXT: tCMPr killed renamable $r2, renamable $r1, 8 /* CC::hi */, killed $cpsr, implicit-def $cpsr, implicit killed $itstate
+ ; CHECK-NEXT: tBcc %bb.5, 8 /* CC::hi */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2 (%ir-block.32):
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r2 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (%ir-block.33):
+ ; CHECK-NEXT: successors: %bb.3(0x7c000000), %bb.4(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4 (%ir-block.64):
+ ; CHECK-NEXT: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r10
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5 (%ir-block.23):
+ ; CHECK-NEXT: successors: %bb.6(0x40000000), %bb.7(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi3 renamable $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2ANDri renamable $r3, 2, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPi8 killed renamable $r2, 3, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.7, 2 /* CC::hs */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6:
+ ; CHECK-NEXT: successors: %bb.9(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tB %bb.9, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7 (%ir-block.31):
+ ; CHECK-NEXT: successors: %bb.8(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2 = t2BICri killed renamable $r3, 2, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8 (%ir-block.65):
+ ; CHECK-NEXT: successors: %bb.8(0x7c000000), %bb.9(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4 = tLDRr renamable $r1, $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r5 = tLDRr renamable $r0, $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tMUL killed renamable $r5, killed renamable $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tADDrr renamable $r0, renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r10, $r8 = t2LDRDi8 $r5, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r9 = t2LDRi12 renamable $r5, 12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRr killed renamable $r4, renamable $r0, $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tADDi8 killed renamable $r2, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = tLDRi renamable $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = nsw t2MUL killed renamable $r10, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r6, renamable $r5, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = tLDRi renamable $r4, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r6 = nsw t2MUL killed renamable $r8, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r6, renamable $r5, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4 = tLDRi killed renamable $r4, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4 = nsw t2MUL killed renamable $r9, killed renamable $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r4, killed renamable $r5, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9 (%ir-block.49):
+ ; CHECK-NEXT: successors: %bb.4(0x40000000), %bb.10(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: t2CMPri killed renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.4, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.10 (%ir-block.52):
+ ; CHECK-NEXT: liveins: $r0, $r1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2LDRs renamable $r1, renamable $r3, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2LDRs renamable $r0, renamable $r3, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = nsw t2MUL killed renamable $r2, killed renamable $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tADDi3 renamable $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = t2LDRs renamable $r0, renamable $r2, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r12, renamable $r0, killed renamable $r3, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1 = t2LDRs killed renamable $r1, renamable $r2, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1 = nsw t2MUL killed renamable $lr, killed renamable $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRs killed renamable $r1, killed renamable $r0, killed renamable $r2, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r10
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
bb.0 (%ir-block.4):
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8, $r9, $r10
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multi-cond-iter-count.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multi-cond-iter-count.mir
index a530c1ec5557..9206be68f981 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multi-cond-iter-count.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multi-cond-iter-count.mir
@@ -78,40 +78,46 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: multi_cond_iter_count
; CHECK: bb.0 (%ir-block.4):
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: tCMPi8 renamable $r3, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: $r12 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: t2IT 1, 8, implicit-def $itstate
- ; CHECK: $r12 = t2MOVi 4, 1 /* CC::ne */, killed $cpsr, $noreg, implicit killed renamable $r12, implicit killed $itstate
- ; CHECK: tCMPi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: $r12 = t2MOVi 1, 0 /* CC::eq */, killed $cpsr, $noreg, implicit killed renamable $r12, implicit killed $itstate
- ; CHECK: renamable $r2 = t2LSLrr killed renamable $r2, killed renamable $r12, 14 /* CC::al */, $noreg, def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1 (%ir-block.17):
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2 (%ir-block.18):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r3
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg
- ; CHECK: renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 0, $noreg, $noreg
- ; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg
- ; CHECK: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3 (%ir-block.34):
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: $r12 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2IT 1, 8, implicit-def $itstate
+ ; CHECK-NEXT: $r12 = t2MOVi 4, 1 /* CC::ne */, killed $cpsr, $noreg, implicit killed renamable $r12, implicit killed $itstate
+ ; CHECK-NEXT: tCMPi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: $r12 = t2MOVi 1, 0 /* CC::eq */, killed $cpsr, $noreg, implicit killed renamable $r12, implicit killed $itstate
+ ; CHECK-NEXT: renamable $r2 = t2LSLrr killed renamable $r2, killed renamable $r12, 14 /* CC::al */, $noreg, def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1 (%ir-block.17):
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2 (%ir-block.18):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (%ir-block.34):
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0 (%ir-block.4):
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiblock-massive.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiblock-massive.mir
index f4377a399612..06dae765c8e3 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiblock-massive.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiblock-massive.mir
@@ -42,16 +42,12 @@
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
- ; Function Attrs: nounwind
declare i32 @llvm.arm.space(i32 immarg, i32) #0
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.start.loop.iterations.i32(i32) #1
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #1
- ; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #0
attributes #0 = { nounwind }
@@ -107,49 +103,59 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: size_limit
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCMPi8 $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.for.body.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: dead $lr = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: tB %bb.2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.for.end:
- ; CHECK: successors: %bb.5(0x04000000), %bb.3(0x7c000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: renamable $r1, dead $cpsr = tADDi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tADDi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = tADDi8 killed renamable $r0, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
- ; CHECK: tBcc %bb.3, 1 /* CC::ne */, killed $cpsr
- ; CHECK: t2B %bb.5, 14 /* CC::al */, $noreg
- ; CHECK: bb.3.for.body:
- ; CHECK: successors: %bb.4(0x50000000), %bb.2(0x30000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: dead renamable $r3 = SPACE 3072, undef renamable $r0
- ; CHECK: renamable $r3 = tLDRi renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.lsr.iv4)
- ; CHECK: renamable $r12 = t2LDRi12 renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.lsr.iv2)
- ; CHECK: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r4 = nsw t2MUL renamable $r12, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r4, renamable $r0, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.lsr.iv1)
- ; CHECK: t2Bcc %bb.2, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.4.middle.block:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $r3 = t2UDIV killed renamable $r12, killed renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: tSTRi killed renamable $r3, renamable $r0, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.lsr.iv1)
- ; CHECK: dead renamable $r3 = SPACE 1024, undef renamable $r0
- ; CHECK: t2B %bb.2, 14 /* CC::al */, $noreg
- ; CHECK: bb.5.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCMPi8 $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.for.body.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $lr = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tB %bb.2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.end:
+ ; CHECK-NEXT: successors: %bb.5(0x04000000), %bb.3(0x7c000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tADDi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tADDi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tADDi8 killed renamable $r0, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
+ ; CHECK-NEXT: tBcc %bb.3, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: t2B %bb.5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.body:
+ ; CHECK-NEXT: successors: %bb.4(0x50000000), %bb.2(0x30000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead renamable $r3 = SPACE 3072, undef renamable $r0
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.lsr.iv4)
+ ; CHECK-NEXT: renamable $r12 = t2LDRi12 renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.lsr.iv2)
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r4 = nsw t2MUL renamable $r12, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r4, renamable $r0, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.lsr.iv1)
+ ; CHECK-NEXT: t2Bcc %bb.2, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.middle.block:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = t2UDIV killed renamable $r12, killed renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r3, renamable $r0, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.lsr.iv1)
+ ; CHECK-NEXT: dead renamable $r3 = SPACE 1024, undef renamable $r0
+ ; CHECK-NEXT: t2B %bb.2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiple-do-loops.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiple-do-loops.mir
index c2f8cc04bffe..8e5172cadc32 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiple-do-loops.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiple-do-loops.mir
@@ -80,7 +80,6 @@
for.cond.cleanup6: ; preds = %vector.body38, %entry, %for.cond4.preheader
ret void
}
- ; Function Attrs: nofree norecurse nounwind
define dso_local arm_aapcs_vfpcc void @test2(ptr noalias nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) local_unnamed_addr {
entry:
%div = lshr i32 %N, 1
@@ -160,7 +159,6 @@
for.cond.cleanup6: ; preds = %vector.body38, %for.cond4.preheader
ret void
}
- ; Function Attrs: nofree norecurse nounwind
define dso_local arm_aapcs_vfpcc void @test3(ptr noalias nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) local_unnamed_addr {
entry:
%cmp54 = icmp eq i32 %N, 0
@@ -346,60 +344,72 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test1
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.6(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -20
- ; CHECK: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
- ; CHECK: early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -24
- ; CHECK: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.6, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $r5 = tMOVr $r2, 14 /* CC::al */, $noreg
- ; CHECK: $r4 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $r6 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r4
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8
- ; CHECK: renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6264, align 4)
- ; CHECK: renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6567, align 4)
- ; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv6870, align 4)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond4.preheader:
- ; CHECK: successors: %bb.6(0x30000000), %bb.4(0x50000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: tCBZ $r3, %bb.6
- ; CHECK: bb.4.vector.ph39:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $r12 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.5.vector.body38:
- ; CHECK: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r12
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv55, align 4)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5658, align 4)
- ; CHECK: renamable $r12, renamable $q2 = MVE_VLDRWU32_post killed renamable $r12, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5961, align 4)
- ; CHECK: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv5961, align 4)
- ; CHECK: $r0 = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.5
- ; CHECK: bb.6.for.cond.cleanup6:
- ; CHECK: $r8, $sp = t2LDR_POST $sp, 4, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.6(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -20
+ ; CHECK-NEXT: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
+ ; CHECK-NEXT: early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -24
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.6, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r5 = tMOVr $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r4 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r6 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6264, align 4)
+ ; CHECK-NEXT: renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6567, align 4)
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv6870, align 4)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond4.preheader:
+ ; CHECK-NEXT: successors: %bb.6(0x30000000), %bb.4(0x50000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCBZ $r3, %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.vector.ph39:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r12 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.vector.body38:
+ ; CHECK-NEXT: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv55, align 4)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5658, align 4)
+ ; CHECK-NEXT: renamable $r12, renamable $q2 = MVE_VLDRWU32_post killed renamable $r12, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5961, align 4)
+ ; CHECK-NEXT: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv5961, align 4)
+ ; CHECK-NEXT: $r0 = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.for.cond.cleanup6:
+ ; CHECK-NEXT: $r8, $sp = t2LDR_POST $sp, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
bb.0.entry:
successors: %bb.6(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8
@@ -549,61 +559,73 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test2
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -20
- ; CHECK: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
- ; CHECK: early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -24
- ; CHECK: renamable $r6, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: t2CMPrs killed renamable $r6, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r4, dead $cpsr = tLSRri renamable $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $r5 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: $r6 = tMOVr $r2, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r4
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8
- ; CHECK: renamable $r5, renamable $q0 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6264, align 4)
- ; CHECK: renamable $r6, renamable $q1 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6567, align 4)
- ; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv6870, align 4)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond4.preheader:
- ; CHECK: successors: %bb.6(0x30000000), %bb.4(0x50000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: tCBZ $r3, %bb.6
- ; CHECK: bb.4.vector.ph39:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $r4 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.5.vector.body38:
- ; CHECK: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv55, align 4)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5658, align 4)
- ; CHECK: renamable $r4, renamable $q2 = MVE_VLDRWU32_post killed renamable $r4, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5961, align 4)
- ; CHECK: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv5961, align 4)
- ; CHECK: $r0 = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.5
- ; CHECK: bb.6.for.cond.cleanup6:
- ; CHECK: $r8, $sp = t2LDR_POST $sp, 4, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -20
+ ; CHECK-NEXT: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
+ ; CHECK-NEXT: early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -24
+ ; CHECK-NEXT: renamable $r6, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2CMPrs killed renamable $r6, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tLSRri renamable $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r5 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r6 = tMOVr $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r5, renamable $q0 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6264, align 4)
+ ; CHECK-NEXT: renamable $r6, renamable $q1 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv6567, align 4)
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv6870, align 4)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond4.preheader:
+ ; CHECK-NEXT: successors: %bb.6(0x30000000), %bb.4(0x50000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCBZ $r3, %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.vector.ph39:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r4 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.vector.body38:
+ ; CHECK-NEXT: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv55, align 4)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5658, align 4)
+ ; CHECK-NEXT: renamable $r4, renamable $q2 = MVE_VLDRWU32_post killed renamable $r4, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv5961, align 4)
+ ; CHECK-NEXT: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv5961, align 4)
+ ; CHECK-NEXT: $r0 = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.for.cond.cleanup6:
+ ; CHECK-NEXT: $r8, $sp = t2LDR_POST $sp, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
bb.0.entry:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8
@@ -763,88 +785,106 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test3
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.9(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r9, $r10
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -20
- ; CHECK: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r8, killed $r9, killed $r10
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r10, -24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -28
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -32
- ; CHECK: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.9, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $r5 = tMOVr $r2, 14 /* CC::al */, $noreg
- ; CHECK: $r4 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $r6 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r4
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8
- ; CHECK: renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv117119, align 4)
- ; CHECK: renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv120122, align 4)
- ; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv123125, align 4)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond4.preheader:
- ; CHECK: successors: %bb.6(0x30000000), %bb.4(0x50000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r6, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: t2CMPrs killed renamable $r6, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.6, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.4.vector.ph66:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r5, dead $cpsr = tLSRri renamable $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: $r10 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $r9 = tMOVr $r2, 14 /* CC::al */, $noreg
- ; CHECK: $r4 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: $r6 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r5
- ; CHECK: bb.5.vector.body65:
- ; CHECK: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r6, $r9, $r10
- ; CHECK: renamable $r4, renamable $q0 = MVE_VLDRWU32_post killed renamable $r4, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv108110, align 4)
- ; CHECK: renamable $r9, renamable $q1 = MVE_VLDRWU32_post killed renamable $r9, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv111113, align 4)
- ; CHECK: renamable $r6, renamable $q2 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv114116, align 4)
- ; CHECK: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r10, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv114116, align 4)
- ; CHECK: $r10 = tMOVr $r6, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.5
- ; CHECK: bb.6.for.cond15.preheader:
- ; CHECK: successors: %bb.9(0x30000000), %bb.7(0x50000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: tCBZ $r3, %bb.9
- ; CHECK: bb.7.vector.ph85:
- ; CHECK: successors: %bb.8(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $r5 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.8.vector.body84:
- ; CHECK: successors: %bb.8(0x7c000000), %bb.9(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r5
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv101, align 4)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv102104, align 4)
- ; CHECK: renamable $r5, renamable $q2 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv105107, align 4)
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q0 = MVE_VSUBi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv105107, align 4)
- ; CHECK: $r0 = tMOVr $r5, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.8
- ; CHECK: bb.9.for.cond.cleanup17:
- ; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r10
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.9(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r9, $r10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -20
+ ; CHECK-NEXT: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r8, killed $r9, killed $r10
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r10, -24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -28
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -32
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.9, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r5 = tMOVr $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r4 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r6 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv117119, align 4)
+ ; CHECK-NEXT: renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv120122, align 4)
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv123125, align 4)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond4.preheader:
+ ; CHECK-NEXT: successors: %bb.6(0x30000000), %bb.4(0x50000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r6, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2CMPrs killed renamable $r6, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.6, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.vector.ph66:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tLSRri renamable $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r10 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r9 = tMOVr $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r4 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r6 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.vector.body65:
+ ; CHECK-NEXT: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r6, $r9, $r10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4, renamable $q0 = MVE_VLDRWU32_post killed renamable $r4, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv108110, align 4)
+ ; CHECK-NEXT: renamable $r9, renamable $q1 = MVE_VLDRWU32_post killed renamable $r9, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv111113, align 4)
+ ; CHECK-NEXT: renamable $r6, renamable $q2 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv114116, align 4)
+ ; CHECK-NEXT: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r10, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv114116, align 4)
+ ; CHECK-NEXT: $r10 = tMOVr $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.for.cond15.preheader:
+ ; CHECK-NEXT: successors: %bb.9(0x30000000), %bb.7(0x50000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCBZ $r3, %bb.9
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7.vector.ph85:
+ ; CHECK-NEXT: successors: %bb.8(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r5 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8.vector.body84:
+ ; CHECK-NEXT: successors: %bb.8(0x7c000000), %bb.9(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv101, align 4)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv102104, align 4)
+ ; CHECK-NEXT: renamable $r5, renamable $q2 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv105107, align 4)
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q0 = MVE_VSUBi32 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv105107, align 4)
+ ; CHECK-NEXT: $r0 = tMOVr $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9.for.cond.cleanup17:
+ ; CHECK-NEXT: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r10
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
bb.0.entry:
successors: %bb.9(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8, $r9, $r10
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-cbnz.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-cbnz.mir
index 0c50a954ddfd..15719baece36 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-cbnz.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-cbnz.mir
@@ -109,117 +109,160 @@ machineFunctionInfo: {}
body: |
; CHECK-LOB-LABEL: name: search
; CHECK-LOB: bb.0.entry:
- ; CHECK-LOB: successors: %bb.1(0x50000000), %bb.5(0x30000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r2 = t2LDRSHi12 renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx)
- ; CHECK-LOB: t2CMPri renamable $r2, -1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-LOB: tBcc %bb.5, 13 /* CC::le */, killed $cpsr
- ; CHECK-LOB: bb.1.while.cond.preheader:
- ; CHECK-LOB: successors: %bb.9(0x30000000), %bb.2(0x50000000)
- ; CHECK-LOB: liveins: $r0, $r2
- ; CHECK-LOB: tCBZ renamable $r0, %bb.9
- ; CHECK-LOB: bb.2.land.rhs.preheader:
- ; CHECK-LOB: successors: %bb.3(0x80000000)
- ; CHECK-LOB: liveins: $r0, $r2
- ; CHECK-LOB: renamable $r1 = tUXTH killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK-LOB: bb.3.land.rhs:
- ; CHECK-LOB: successors: %bb.4(0x80000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info2)
- ; CHECK-LOB: renamable $r2 = tLDRHi killed renamable $r2, 1, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx3)
- ; CHECK-LOB: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-LOB: t2IT 0, 8, implicit-def $itstate
- ; CHECK-LOB: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK-LOB: bb.4.while.body:
- ; CHECK-LOB: successors: %bb.9(0x04000000), %bb.3(0x7c000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next4)
- ; CHECK-LOB: tCBNZ renamable $r0, %bb.9
- ; CHECK-LOB: t2LE %bb.3
- ; CHECK-LOB: bb.5.while.cond9.preheader:
- ; CHECK-LOB: successors: %bb.9(0x30000000), %bb.6(0x50000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: tCBZ renamable $r0, %bb.9
- ; CHECK-LOB: bb.6.land.rhs11.lr.ph:
- ; CHECK-LOB: successors: %bb.7(0x80000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r1 = t2LDRSHi12 killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s16) from %ir.data16143)
- ; CHECK-LOB: bb.7.land.rhs11:
- ; CHECK-LOB: successors: %bb.10(0x04000000), %bb.8(0x7c000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info12)
- ; CHECK-LOB: renamable $r2 = tLDRBi killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s8) from %ir.data165, align 2)
- ; CHECK-LOB: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-LOB: tBcc %bb.10, 0 /* CC::eq */, killed $cpsr
- ; CHECK-LOB: bb.8.while.body19:
- ; CHECK-LOB: successors: %bb.9(0x04000000), %bb.7(0x7c000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next206)
- ; CHECK-LOB: tCBZ renamable $r0, %bb.9
- ; CHECK-LOB: t2LE %bb.7
- ; CHECK-LOB: bb.9:
- ; CHECK-LOB: successors: %bb.10(0x80000000)
- ; CHECK-LOB: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK-LOB: bb.10.return:
- ; CHECK-LOB: liveins: $r0
- ; CHECK-LOB: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
+ ; CHECK-LOB-NEXT: successors: %bb.1(0x50000000), %bb.5(0x30000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r2 = t2LDRSHi12 renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx)
+ ; CHECK-LOB-NEXT: t2CMPri renamable $r2, -1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-LOB-NEXT: tBcc %bb.5, 13 /* CC::le */, killed $cpsr
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.1.while.cond.preheader:
+ ; CHECK-LOB-NEXT: successors: %bb.9(0x30000000), %bb.2(0x50000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r2
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: tCBZ renamable $r0, %bb.9
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.2.land.rhs.preheader:
+ ; CHECK-LOB-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r2
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r1 = tUXTH killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.3.land.rhs:
+ ; CHECK-LOB-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info2)
+ ; CHECK-LOB-NEXT: renamable $r2 = tLDRHi killed renamable $r2, 1, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx3)
+ ; CHECK-LOB-NEXT: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-LOB-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-LOB-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.4.while.body:
+ ; CHECK-LOB-NEXT: successors: %bb.9(0x04000000), %bb.3(0x7c000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next4)
+ ; CHECK-LOB-NEXT: tCBNZ renamable $r0, %bb.9
+ ; CHECK-LOB-NEXT: t2LE %bb.3
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.5.while.cond9.preheader:
+ ; CHECK-LOB-NEXT: successors: %bb.9(0x30000000), %bb.6(0x50000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: tCBZ renamable $r0, %bb.9
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.6.land.rhs11.lr.ph:
+ ; CHECK-LOB-NEXT: successors: %bb.7(0x80000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r1 = t2LDRSHi12 killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s16) from %ir.data16143)
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.7.land.rhs11:
+ ; CHECK-LOB-NEXT: successors: %bb.10(0x04000000), %bb.8(0x7c000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info12)
+ ; CHECK-LOB-NEXT: renamable $r2 = tLDRBi killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s8) from %ir.data165, align 2)
+ ; CHECK-LOB-NEXT: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-LOB-NEXT: tBcc %bb.10, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.8.while.body19:
+ ; CHECK-LOB-NEXT: successors: %bb.9(0x04000000), %bb.7(0x7c000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next206)
+ ; CHECK-LOB-NEXT: tCBZ renamable $r0, %bb.9
+ ; CHECK-LOB-NEXT: t2LE %bb.7
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.9:
+ ; CHECK-LOB-NEXT: successors: %bb.10(0x80000000)
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.10.return:
+ ; CHECK-LOB-NEXT: liveins: $r0
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
+ ;
; CHECK-NOLOB-LABEL: name: search
; CHECK-NOLOB: bb.0.entry:
- ; CHECK-NOLOB: successors: %bb.1(0x50000000), %bb.5(0x30000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r2 = t2LDRSHi12 renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx)
- ; CHECK-NOLOB: t2CMPri renamable $r2, -1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-NOLOB: tBcc %bb.5, 13 /* CC::le */, killed $cpsr
- ; CHECK-NOLOB: bb.1.while.cond.preheader:
- ; CHECK-NOLOB: successors: %bb.9(0x30000000), %bb.2(0x50000000)
- ; CHECK-NOLOB: liveins: $r0, $r2
- ; CHECK-NOLOB: tCBZ renamable $r0, %bb.9
- ; CHECK-NOLOB: bb.2.land.rhs.preheader:
- ; CHECK-NOLOB: successors: %bb.3(0x80000000)
- ; CHECK-NOLOB: liveins: $r0, $r2
- ; CHECK-NOLOB: renamable $r1 = tUXTH killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK-NOLOB: bb.3.land.rhs:
- ; CHECK-NOLOB: successors: %bb.4(0x80000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info2)
- ; CHECK-NOLOB: renamable $r2 = tLDRHi killed renamable $r2, 1, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx3)
- ; CHECK-NOLOB: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-NOLOB: t2IT 0, 8, implicit-def $itstate
- ; CHECK-NOLOB: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK-NOLOB: bb.4.while.body:
- ; CHECK-NOLOB: successors: %bb.9(0x04000000), %bb.3(0x7c000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next4)
- ; CHECK-NOLOB: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-NOLOB: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
- ; CHECK-NOLOB: tB %bb.9, 14 /* CC::al */, $noreg
- ; CHECK-NOLOB: bb.5.while.cond9.preheader:
- ; CHECK-NOLOB: successors: %bb.9(0x30000000), %bb.6(0x50000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: tCBZ renamable $r0, %bb.9
- ; CHECK-NOLOB: bb.6.land.rhs11.lr.ph:
- ; CHECK-NOLOB: successors: %bb.7(0x80000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r1 = t2LDRSHi12 killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s16) from %ir.data16143)
- ; CHECK-NOLOB: bb.7.land.rhs11:
- ; CHECK-NOLOB: successors: %bb.10(0x04000000), %bb.8(0x7c000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info12)
- ; CHECK-NOLOB: renamable $r2 = tLDRBi killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s8) from %ir.data165, align 2)
- ; CHECK-NOLOB: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-NOLOB: tBcc %bb.10, 0 /* CC::eq */, killed $cpsr
- ; CHECK-NOLOB: bb.8.while.body19:
- ; CHECK-NOLOB: successors: %bb.9(0x04000000), %bb.7(0x7c000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next206)
- ; CHECK-NOLOB: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-NOLOB: tBcc %bb.7, 1 /* CC::ne */, killed $cpsr
- ; CHECK-NOLOB: bb.9:
- ; CHECK-NOLOB: successors: %bb.10(0x80000000)
- ; CHECK-NOLOB: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK-NOLOB: bb.10.return:
- ; CHECK-NOLOB: liveins: $r0
- ; CHECK-NOLOB: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
+ ; CHECK-NOLOB-NEXT: successors: %bb.1(0x50000000), %bb.5(0x30000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r2 = t2LDRSHi12 renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx)
+ ; CHECK-NOLOB-NEXT: t2CMPri renamable $r2, -1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NOLOB-NEXT: tBcc %bb.5, 13 /* CC::le */, killed $cpsr
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.1.while.cond.preheader:
+ ; CHECK-NOLOB-NEXT: successors: %bb.9(0x30000000), %bb.2(0x50000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r2
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: tCBZ renamable $r0, %bb.9
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.2.land.rhs.preheader:
+ ; CHECK-NOLOB-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r2
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r1 = tUXTH killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.3.land.rhs:
+ ; CHECK-NOLOB-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info2)
+ ; CHECK-NOLOB-NEXT: renamable $r2 = tLDRHi killed renamable $r2, 1, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx3)
+ ; CHECK-NOLOB-NEXT: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NOLOB-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NOLOB-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.4.while.body:
+ ; CHECK-NOLOB-NEXT: successors: %bb.9(0x04000000), %bb.3(0x7c000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next4)
+ ; CHECK-NOLOB-NEXT: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NOLOB-NEXT: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NOLOB-NEXT: tB %bb.9, 14 /* CC::al */, $noreg
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.5.while.cond9.preheader:
+ ; CHECK-NOLOB-NEXT: successors: %bb.9(0x30000000), %bb.6(0x50000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: tCBZ renamable $r0, %bb.9
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.6.land.rhs11.lr.ph:
+ ; CHECK-NOLOB-NEXT: successors: %bb.7(0x80000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r1 = t2LDRSHi12 killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s16) from %ir.data16143)
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.7.land.rhs11:
+ ; CHECK-NOLOB-NEXT: successors: %bb.10(0x04000000), %bb.8(0x7c000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info12)
+ ; CHECK-NOLOB-NEXT: renamable $r2 = tLDRBi killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s8) from %ir.data165, align 2)
+ ; CHECK-NOLOB-NEXT: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NOLOB-NEXT: tBcc %bb.10, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.8.while.body19:
+ ; CHECK-NOLOB-NEXT: successors: %bb.9(0x04000000), %bb.7(0x7c000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next206)
+ ; CHECK-NOLOB-NEXT: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NOLOB-NEXT: tBcc %bb.7, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.9:
+ ; CHECK-NOLOB-NEXT: successors: %bb.10(0x80000000)
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.10.return:
+ ; CHECK-NOLOB-NEXT: liveins: $r0
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
bb.0.entry:
successors: %bb.5(0x50000000), %bb.1(0x30000000)
liveins: $r0, $r1
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-reorder.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-reorder.mir
index 0ea3b26903f3..fc88475399a7 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-reorder.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec-reorder.mir
@@ -108,72 +108,91 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: search
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x50000000), %bb.6(0x30000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: renamable $r2 = t2LDRSHi12 renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx)
- ; CHECK: t2CMPri renamable $r2, -1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.6, 13 /* CC::le */, killed $cpsr
- ; CHECK: bb.1.while.cond.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r2
- ; CHECK: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: tB %bb.2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2:
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: liveins: $r0, $r2
- ; CHECK: renamable $r1 = tUXTH killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: bb.3.land.rhs:
- ; CHECK: successors: %bb.5(0x04000000), %bb.4(0x7c000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info2)
- ; CHECK: renamable $r2 = tLDRHi killed renamable $r2, 1, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx3)
- ; CHECK: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.5, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.4.while.body:
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next4)
- ; CHECK: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: tB %bb.3, 14 /* CC::al */, $noreg
- ; CHECK: bb.5.return:
- ; CHECK: liveins: $r0
- ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
- ; CHECK: bb.6.while.cond9.preheader:
- ; CHECK: successors: %bb.7(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: tB %bb.7, 14 /* CC::al */, $noreg
- ; CHECK: bb.7.land.rhs11.lr.ph:
- ; CHECK: successors: %bb.8(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: renamable $r1 = t2LDRSHi12 killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s16) from %ir.data16143)
- ; CHECK: bb.8.land.rhs11:
- ; CHECK: successors: %bb.9(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info12)
- ; CHECK: renamable $r2 = tLDRBi killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s8) from %ir.data166, align 2)
- ; CHECK: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: tB %bb.9, 14 /* CC::al */, $noreg
- ; CHECK: bb.9.while.body19:
- ; CHECK: successors: %bb.8(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next205)
- ; CHECK: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: tB %bb.8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: successors: %bb.1(0x50000000), %bb.6(0x30000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2 = t2LDRSHi12 renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx)
+ ; CHECK-NEXT: t2CMPri renamable $r2, -1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.6, 13 /* CC::le */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.cond.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: tB %bb.2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1 = tUXTH killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.land.rhs:
+ ; CHECK-NEXT: successors: %bb.5(0x04000000), %bb.4(0x7c000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info2)
+ ; CHECK-NEXT: renamable $r2 = tLDRHi killed renamable $r2, 1, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx3)
+ ; CHECK-NEXT: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.5, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.while.body:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next4)
+ ; CHECK-NEXT: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: tB %bb.3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.return:
+ ; CHECK-NEXT: liveins: $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.while.cond9.preheader:
+ ; CHECK-NEXT: successors: %bb.7(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: tB %bb.7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7.land.rhs11.lr.ph:
+ ; CHECK-NEXT: successors: %bb.8(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1 = t2LDRSHi12 killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s16) from %ir.data16143)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8.land.rhs11:
+ ; CHECK-NEXT: successors: %bb.9(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info12)
+ ; CHECK-NEXT: renamable $r2 = tLDRBi killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s8) from %ir.data166, align 2)
+ ; CHECK-NEXT: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: tB %bb.9, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9.while.body19:
+ ; CHECK-NEXT: successors: %bb.8(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next205)
+ ; CHECK-NEXT: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: tB %bb.8, 14 /* CC::al */, $noreg
bb.0.entry:
successors: %bb.2(0x50000000), %bb.1(0x30000000)
liveins: $r0, $r1
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec.mir
index 87694e3c3932..a80f4e9ffae6 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-dec.mir
@@ -9,7 +9,6 @@
%struct.head_s = type { ptr, ptr }
%struct.data_s = type { i16, i16 }
- ; Function Attrs: norecurse nounwind readonly
define dso_local arm_aapcscc ptr @search(ptr readonly %list, ptr nocapture readonly %info) local_unnamed_addr {
entry:
%idx = getelementptr inbounds %struct.data_s, ptr %info, i32 0, i32 1
@@ -113,117 +112,160 @@ machineFunctionInfo: {}
body: |
; CHECK-LOB-LABEL: name: search
; CHECK-LOB: bb.0.entry:
- ; CHECK-LOB: successors: %bb.1(0x50000000), %bb.5(0x30000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r2 = t2LDRSHi12 renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx)
- ; CHECK-LOB: t2CMPri renamable $r2, -1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-LOB: tBcc %bb.5, 13 /* CC::le */, killed $cpsr
- ; CHECK-LOB: bb.1.while.cond.preheader:
- ; CHECK-LOB: successors: %bb.9(0x30000000), %bb.2(0x50000000)
- ; CHECK-LOB: liveins: $r0, $r2
- ; CHECK-LOB: tCBZ renamable $r0, %bb.9
- ; CHECK-LOB: bb.2.land.rhs.preheader:
- ; CHECK-LOB: successors: %bb.3(0x80000000)
- ; CHECK-LOB: liveins: $r0, $r2
- ; CHECK-LOB: renamable $r1 = tUXTH killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK-LOB: bb.3.land.rhs:
- ; CHECK-LOB: successors: %bb.4(0x80000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info2)
- ; CHECK-LOB: renamable $r2 = tLDRHi killed renamable $r2, 1, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx3)
- ; CHECK-LOB: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-LOB: t2IT 0, 8, implicit-def $itstate
- ; CHECK-LOB: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK-LOB: bb.4.while.body:
- ; CHECK-LOB: successors: %bb.9(0x04000000), %bb.3(0x7c000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next4)
- ; CHECK-LOB: tCBZ renamable $r0, %bb.9
- ; CHECK-LOB: t2LE %bb.3
- ; CHECK-LOB: bb.5.while.cond9.preheader:
- ; CHECK-LOB: successors: %bb.9(0x30000000), %bb.6(0x50000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: tCBZ renamable $r0, %bb.9
- ; CHECK-LOB: bb.6.land.rhs11.lr.ph:
- ; CHECK-LOB: successors: %bb.7(0x80000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r1 = t2LDRSHi12 killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s16) from %ir.data16143)
- ; CHECK-LOB: bb.7.land.rhs11:
- ; CHECK-LOB: successors: %bb.10(0x04000000), %bb.8(0x7c000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info12)
- ; CHECK-LOB: renamable $r2 = tLDRBi killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s8) from %ir.data165, align 2)
- ; CHECK-LOB: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-LOB: tBcc %bb.10, 0 /* CC::eq */, killed $cpsr
- ; CHECK-LOB: bb.8.while.body19:
- ; CHECK-LOB: successors: %bb.9(0x04000000), %bb.7(0x7c000000)
- ; CHECK-LOB: liveins: $r0, $r1
- ; CHECK-LOB: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next206)
- ; CHECK-LOB: tCBZ renamable $r0, %bb.9
- ; CHECK-LOB: t2LE %bb.7
- ; CHECK-LOB: bb.9:
- ; CHECK-LOB: successors: %bb.10(0x80000000)
- ; CHECK-LOB: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK-LOB: bb.10.return:
- ; CHECK-LOB: liveins: $r0
- ; CHECK-LOB: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
+ ; CHECK-LOB-NEXT: successors: %bb.1(0x50000000), %bb.5(0x30000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r2 = t2LDRSHi12 renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx)
+ ; CHECK-LOB-NEXT: t2CMPri renamable $r2, -1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-LOB-NEXT: tBcc %bb.5, 13 /* CC::le */, killed $cpsr
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.1.while.cond.preheader:
+ ; CHECK-LOB-NEXT: successors: %bb.9(0x30000000), %bb.2(0x50000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r2
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: tCBZ renamable $r0, %bb.9
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.2.land.rhs.preheader:
+ ; CHECK-LOB-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r2
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r1 = tUXTH killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.3.land.rhs:
+ ; CHECK-LOB-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info2)
+ ; CHECK-LOB-NEXT: renamable $r2 = tLDRHi killed renamable $r2, 1, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx3)
+ ; CHECK-LOB-NEXT: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-LOB-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-LOB-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.4.while.body:
+ ; CHECK-LOB-NEXT: successors: %bb.9(0x04000000), %bb.3(0x7c000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next4)
+ ; CHECK-LOB-NEXT: tCBZ renamable $r0, %bb.9
+ ; CHECK-LOB-NEXT: t2LE %bb.3
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.5.while.cond9.preheader:
+ ; CHECK-LOB-NEXT: successors: %bb.9(0x30000000), %bb.6(0x50000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: tCBZ renamable $r0, %bb.9
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.6.land.rhs11.lr.ph:
+ ; CHECK-LOB-NEXT: successors: %bb.7(0x80000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r1 = t2LDRSHi12 killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s16) from %ir.data16143)
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.7.land.rhs11:
+ ; CHECK-LOB-NEXT: successors: %bb.10(0x04000000), %bb.8(0x7c000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info12)
+ ; CHECK-LOB-NEXT: renamable $r2 = tLDRBi killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s8) from %ir.data165, align 2)
+ ; CHECK-LOB-NEXT: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-LOB-NEXT: tBcc %bb.10, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.8.while.body19:
+ ; CHECK-LOB-NEXT: successors: %bb.9(0x04000000), %bb.7(0x7c000000)
+ ; CHECK-LOB-NEXT: liveins: $r0, $r1
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next206)
+ ; CHECK-LOB-NEXT: tCBZ renamable $r0, %bb.9
+ ; CHECK-LOB-NEXT: t2LE %bb.7
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.9:
+ ; CHECK-LOB-NEXT: successors: %bb.10(0x80000000)
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: bb.10.return:
+ ; CHECK-LOB-NEXT: liveins: $r0
+ ; CHECK-LOB-NEXT: {{ $}}
+ ; CHECK-LOB-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
+ ;
; CHECK-NOLOB-LABEL: name: search
; CHECK-NOLOB: bb.0.entry:
- ; CHECK-NOLOB: successors: %bb.1(0x50000000), %bb.5(0x30000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r2 = t2LDRSHi12 renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx)
- ; CHECK-NOLOB: t2CMPri renamable $r2, -1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-NOLOB: tBcc %bb.5, 13 /* CC::le */, killed $cpsr
- ; CHECK-NOLOB: bb.1.while.cond.preheader:
- ; CHECK-NOLOB: successors: %bb.9(0x30000000), %bb.2(0x50000000)
- ; CHECK-NOLOB: liveins: $r0, $r2
- ; CHECK-NOLOB: tCBZ renamable $r0, %bb.9
- ; CHECK-NOLOB: bb.2.land.rhs.preheader:
- ; CHECK-NOLOB: successors: %bb.3(0x80000000)
- ; CHECK-NOLOB: liveins: $r0, $r2
- ; CHECK-NOLOB: renamable $r1 = tUXTH killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK-NOLOB: bb.3.land.rhs:
- ; CHECK-NOLOB: successors: %bb.4(0x80000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info2)
- ; CHECK-NOLOB: renamable $r2 = tLDRHi killed renamable $r2, 1, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx3)
- ; CHECK-NOLOB: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-NOLOB: t2IT 0, 8, implicit-def $itstate
- ; CHECK-NOLOB: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK-NOLOB: bb.4.while.body:
- ; CHECK-NOLOB: successors: %bb.9(0x04000000), %bb.3(0x7c000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next4)
- ; CHECK-NOLOB: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-NOLOB: tBcc %bb.3, 1 /* CC::ne */, killed $cpsr
- ; CHECK-NOLOB: tB %bb.9, 14 /* CC::al */, $noreg
- ; CHECK-NOLOB: bb.5.while.cond9.preheader:
- ; CHECK-NOLOB: successors: %bb.9(0x30000000), %bb.6(0x50000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: tCBZ renamable $r0, %bb.9
- ; CHECK-NOLOB: bb.6.land.rhs11.lr.ph:
- ; CHECK-NOLOB: successors: %bb.7(0x80000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r1 = t2LDRSHi12 killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s16) from %ir.data16143)
- ; CHECK-NOLOB: bb.7.land.rhs11:
- ; CHECK-NOLOB: successors: %bb.10(0x04000000), %bb.8(0x7c000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info12)
- ; CHECK-NOLOB: renamable $r2 = tLDRBi killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s8) from %ir.data165, align 2)
- ; CHECK-NOLOB: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-NOLOB: tBcc %bb.10, 0 /* CC::eq */, killed $cpsr
- ; CHECK-NOLOB: bb.8.while.body19:
- ; CHECK-NOLOB: successors: %bb.9(0x04000000), %bb.7(0x7c000000)
- ; CHECK-NOLOB: liveins: $r0, $r1
- ; CHECK-NOLOB: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next206)
- ; CHECK-NOLOB: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK-NOLOB: tBcc %bb.7, 1 /* CC::ne */, killed $cpsr
- ; CHECK-NOLOB: bb.9:
- ; CHECK-NOLOB: successors: %bb.10(0x80000000)
- ; CHECK-NOLOB: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK-NOLOB: bb.10.return:
- ; CHECK-NOLOB: liveins: $r0
- ; CHECK-NOLOB: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
+ ; CHECK-NOLOB-NEXT: successors: %bb.1(0x50000000), %bb.5(0x30000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r2 = t2LDRSHi12 renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx)
+ ; CHECK-NOLOB-NEXT: t2CMPri renamable $r2, -1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NOLOB-NEXT: tBcc %bb.5, 13 /* CC::le */, killed $cpsr
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.1.while.cond.preheader:
+ ; CHECK-NOLOB-NEXT: successors: %bb.9(0x30000000), %bb.2(0x50000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r2
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: tCBZ renamable $r0, %bb.9
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.2.land.rhs.preheader:
+ ; CHECK-NOLOB-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r2
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r1 = tUXTH killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.3.land.rhs:
+ ; CHECK-NOLOB-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info2)
+ ; CHECK-NOLOB-NEXT: renamable $r2 = tLDRHi killed renamable $r2, 1, 14 /* CC::al */, $noreg :: (load (s16) from %ir.idx3)
+ ; CHECK-NOLOB-NEXT: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NOLOB-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NOLOB-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.4.while.body:
+ ; CHECK-NOLOB-NEXT: successors: %bb.9(0x04000000), %bb.3(0x7c000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next4)
+ ; CHECK-NOLOB-NEXT: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NOLOB-NEXT: tBcc %bb.3, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NOLOB-NEXT: tB %bb.9, 14 /* CC::al */, $noreg
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.5.while.cond9.preheader:
+ ; CHECK-NOLOB-NEXT: successors: %bb.9(0x30000000), %bb.6(0x50000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: tCBZ renamable $r0, %bb.9
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.6.land.rhs11.lr.ph:
+ ; CHECK-NOLOB-NEXT: successors: %bb.7(0x80000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r1 = t2LDRSHi12 killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (load (s16) from %ir.data16143)
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.7.land.rhs11:
+ ; CHECK-NOLOB-NEXT: successors: %bb.10(0x04000000), %bb.8(0x7c000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r2 = tLDRi renamable $r0, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.info12)
+ ; CHECK-NOLOB-NEXT: renamable $r2 = tLDRBi killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (load (s8) from %ir.data165, align 2)
+ ; CHECK-NOLOB-NEXT: tCMPr killed renamable $r2, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NOLOB-NEXT: tBcc %bb.10, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.8.while.body19:
+ ; CHECK-NOLOB-NEXT: successors: %bb.9(0x04000000), %bb.7(0x7c000000)
+ ; CHECK-NOLOB-NEXT: liveins: $r0, $r1
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.next206)
+ ; CHECK-NOLOB-NEXT: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NOLOB-NEXT: tBcc %bb.7, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.9:
+ ; CHECK-NOLOB-NEXT: successors: %bb.10(0x80000000)
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: renamable $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: bb.10.return:
+ ; CHECK-NOLOB-NEXT: liveins: $r0
+ ; CHECK-NOLOB-NEXT: {{ $}}
+ ; CHECK-NOLOB-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
bb.0.entry:
successors: %bb.5(0x50000000), %bb.1(0x30000000)
liveins: $r0, $r1
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir
index e445598f1a31..ab47e5a181eb 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir
@@ -102,33 +102,40 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: no_vpsel_liveout
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, killed $noreg, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, killed $noreg, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $lr, $r7
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir
index 14019372e0e8..5279f13bfc87 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir
@@ -107,50 +107,57 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: non_masked_load
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 2, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: renamable $r0 = tUXTB killed renamable $r0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: renamable $r3 = t2ADDri renamable $r2, 15, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 15, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 16, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 35, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = t2LSRri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = t2SUBrs renamable $r2, killed renamable $r3, 34, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP8 renamable $r2, 0, $noreg, $noreg
- ; CHECK: $q1 = MVE_VORR killed $q0, killed $q0, 0, $noreg, $noreg, undef $q1
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv2022, align 1)
- ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, $noreg, $noreg :: (load (s128) from %ir.lsr.iv19, align 1)
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 16, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q2 = MVE_VADDi8 killed renamable $q2, renamable $q1, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q0 = MVE_VADDi8 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0, $q1, $r3
- ; CHECK: renamable $vpr = MVE_VCTP8 killed renamable $r3, 0, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr, $noreg
- ; CHECK: renamable $r0 = MVE_VADDVu8no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
- ; CHECK: renamable $r0 = tUXTB killed renamable $r0, 14 /* CC::al */, $noreg
- ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 2, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: renamable $r0 = tUXTB killed renamable $r0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: renamable $r3 = t2ADDri renamable $r2, 15, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 15, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 16, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 35, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2LSRri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2SUBrs renamable $r2, killed renamable $r3, 34, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP8 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: $q1 = MVE_VORR killed $q0, killed $q0, 0, $noreg, $noreg, undef $q1
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv2022, align 1)
+ ; CHECK-NEXT: renamable $r0, renamable $q2 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, $noreg, $noreg :: (load (s128) from %ir.lsr.iv19, align 1)
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q2 = MVE_VADDi8 killed renamable $q2, renamable $q1, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi8 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0, $q1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP8 killed renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu8no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
+ ; CHECK-NEXT: renamable $r0 = tUXTB killed renamable $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-store.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-store.mir
index d0959c0d491f..1b7fec49238a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-store.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-store.mir
@@ -99,38 +99,44 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: non_masked_store
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2ADDri renamable $r3, 15, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2BICri killed renamable $r12, 15, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 16, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 35, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP8 renamable $r3, 0, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv15, align 1)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRBU8_post killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1618, align 1)
- ; CHECK: renamable $q0 = MVE_VADDi8 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r0 = MVE_VSTRBU8_post killed renamable $q0, killed renamable $r0, 16, 1, $noreg, $noreg :: (store (s128) into %ir.lsr.iv1921, align 1)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2ADDri renamable $r3, 15, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 15, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 16, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 35, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP8 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv15, align 1)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRBU8_post killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1618, align 1)
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi8 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRBU8_post killed renamable $q0, killed renamable $r0, 16, 1, $noreg, $noreg :: (store (s128) into %ir.lsr.iv1921, align 1)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/out-of-range-cbz.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/out-of-range-cbz.mir
index 5815b149859b..eb10cd7692cc 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/out-of-range-cbz.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/out-of-range-cbz.mir
@@ -160,137 +160,171 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: f
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.5(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $r0, $r1, $r7, $lr
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead renamable $r1, $cpsr = tORR killed renamable $r1, renamable $r0, 14 /* CC::al */, $noreg
- ; CHECK: tBcc %bb.5, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.entry.split:
- ; CHECK: successors: %bb.15(0x30000000), %bb.2(0x50000000)
- ; CHECK: liveins: $r0
- ; CHECK: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.15, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.2.j.preheader:
- ; CHECK: successors: %bb.3(0x80000000)
- ; CHECK: liveins: $r0
- ; CHECK: $r1 = t2MOVi16 target-flags(arm-lo16) @a, 14 /* CC::al */, $noreg
- ; CHECK: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @a, 14 /* CC::al */, $noreg
- ; CHECK: tCMPr killed renamable $r0, killed renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: $r1 = t2MOVi16 target-flags(arm-lo16) @d, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0 = t2CSINC $zr, $zr, 10, implicit killed $cpsr
- ; CHECK: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @d, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = tLDRi killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
- ; CHECK: $r1 = t2MOVi16 target-flags(arm-lo16) @e, 14 /* CC::al */, $noreg
- ; CHECK: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @e, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = tLDRi renamable $r1, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @e)
- ; CHECK: bb.3.j (align 4):
- ; CHECK: successors: %bb.4(0x04000000), %bb.3(0x7c000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r3, dead $cpsr = tAND killed renamable $r3, renamable $r0, 14 /* CC::al */, $noreg
- ; CHECK: tCBZ renamable $r2, %bb.4
- ; CHECK: t2LE %bb.3
- ; CHECK: bb.4.if.end:
- ; CHECK: liveins: $r1, $r3
- ; CHECK: tSTRi killed renamable $r3, killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (store (s32) into @e)
- ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.5.j.us.us.preheader:
- ; CHECK: successors: %bb.6(0x80000000)
- ; CHECK: $r12 = t2MOVi16 target-flags(arm-lo16) @d, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2MOVi16 target-flags(arm-lo16) @a, 14 /* CC::al */, $noreg
- ; CHECK: $r12 = t2MOVTi16 killed $r12, target-flags(arm-hi16) @d, 14 /* CC::al */, $noreg
- ; CHECK: $r2 = t2MOVi16 target-flags(arm-lo16) @e, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
- ; CHECK: $lr = t2MOVTi16 killed $lr, target-flags(arm-hi16) @a, 14 /* CC::al */, $noreg
- ; CHECK: $r2 = t2MOVTi16 killed $r2, target-flags(arm-hi16) @e, 14 /* CC::al */, $noreg
- ; CHECK: bb.6.j.us.us (align 4):
- ; CHECK: successors: %bb.7(0x40000000), %bb.6(0x40000000)
- ; CHECK: liveins: $lr, $r2, $r3, $r12
- ; CHECK: tCMPhir renamable $r3, renamable $lr, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r1 = tLDRi renamable $r2, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @e)
- ; CHECK: renamable $r0 = t2CSINC $zr, $zr, 10, implicit killed $cpsr
- ; CHECK: renamable $r0 = t2ANDrr killed renamable $r0, killed renamable $r1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tSTRi killed renamable $r0, renamable $r2, 0, 14 /* CC::al */, $noreg :: (store (s32) into @e)
- ; CHECK: tCBZ renamable $r3, %bb.7
- ; CHECK: t2LE %bb.6
- ; CHECK: bb.7.if.end.us.us.us:
- ; CHECK: successors: %bb.8(0x40000000), %bb.6(0x40000000)
- ; CHECK: liveins: $lr, $r2, $r12
- ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
- ; CHECK: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
- ; CHECK: tCBZ renamable $r3, %bb.8
- ; CHECK: t2LE %bb.6
- ; CHECK: bb.8.if.end.us.us.us.1:
- ; CHECK: successors: %bb.9(0x40000000), %bb.6(0x40000000)
- ; CHECK: liveins: $lr, $r2, $r12
- ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
- ; CHECK: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
- ; CHECK: tCBZ renamable $r3, %bb.9
- ; CHECK: t2LE %bb.6
- ; CHECK: bb.9.if.end.us.us.us.2:
- ; CHECK: successors: %bb.10(0x40000000), %bb.6(0x40000000)
- ; CHECK: liveins: $lr, $r2, $r12
- ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
- ; CHECK: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
- ; CHECK: tCBZ renamable $r3, %bb.10
- ; CHECK: t2LE %bb.6
- ; CHECK: bb.10.if.end.us.us.us.3:
- ; CHECK: successors: %bb.11(0x40000000), %bb.6(0x40000000)
- ; CHECK: liveins: $lr, $r2, $r12
- ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
- ; CHECK: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
- ; CHECK: tCBZ renamable $r3, %bb.11
- ; CHECK: t2LE %bb.6
- ; CHECK: bb.11.if.end.us.us.us.4:
- ; CHECK: successors: %bb.12(0x40000000), %bb.6(0x40000000)
- ; CHECK: liveins: $lr, $r2, $r12
- ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
- ; CHECK: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
- ; CHECK: tCBZ renamable $r3, %bb.12
- ; CHECK: t2LE %bb.6
- ; CHECK: bb.12.if.end.us.us.us.5:
- ; CHECK: successors: %bb.13(0x40000000), %bb.6(0x40000000)
- ; CHECK: liveins: $lr, $r2, $r12
- ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
- ; CHECK: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
- ; CHECK: tCBZ renamable $r3, %bb.13
- ; CHECK: t2LE %bb.6
- ; CHECK: bb.13.if.end.us.us.us.6:
- ; CHECK: successors: %bb.14(0x04000000), %bb.6(0x7c000000)
- ; CHECK: liveins: $lr, $r2, $r12
- ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
- ; CHECK: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
- ; CHECK: tCBZ renamable $r3, %bb.14
- ; CHECK: t2LE %bb.6
- ; CHECK: bb.14.if.end.us.us.us.7:
- ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.15.j.us27.preheader:
- ; CHECK: successors: %bb.16(0x80000000)
- ; CHECK: $r0 = t2MOVi16 target-flags(arm-lo16) @d, 14 /* CC::al */, $noreg
- ; CHECK: $r1 = t2MOVi16 target-flags(arm-lo16) @a, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = t2MOVTi16 killed $r0, target-flags(arm-hi16) @d, 14 /* CC::al */, $noreg
- ; CHECK: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @a, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
- ; CHECK: tCMPr renamable $r0, killed renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: $r1 = t2MOVi16 target-flags(arm-lo16) @e, 14 /* CC::al */, $noreg
- ; CHECK: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @e, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2CSINC $zr, $zr, 10, implicit killed $cpsr
- ; CHECK: renamable $r3 = tLDRi renamable $r1, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @e)
- ; CHECK: bb.16.j.us27 (align 4):
- ; CHECK: successors: %bb.17(0x04000000), %bb.16(0x7c000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r3, dead $cpsr = tAND killed renamable $r3, renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: tCBZ renamable $r0, %bb.17
- ; CHECK: t2LE %bb.16
- ; CHECK: bb.17.if.end.us38:
- ; CHECK: liveins: $r1, $r3
- ; CHECK: tSTRi killed renamable $r3, killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (store (s32) into @e)
- ; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.5(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r7, $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead renamable $r1, $cpsr = tORR killed renamable $r1, renamable $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tBcc %bb.5, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.entry.split:
+ ; CHECK-NEXT: successors: %bb.15(0x30000000), %bb.2(0x50000000)
+ ; CHECK-NEXT: liveins: $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r0, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.15, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.j.preheader:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: liveins: $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r1 = t2MOVi16 target-flags(arm-lo16) @a, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @a, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCMPr killed renamable $r0, killed renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: $r1 = t2MOVi16 target-flags(arm-lo16) @d, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0 = t2CSINC $zr, $zr, 10, implicit killed $cpsr
+ ; CHECK-NEXT: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @d, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = tLDRi killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
+ ; CHECK-NEXT: $r1 = t2MOVi16 target-flags(arm-lo16) @e, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @e, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r1, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @e)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.j (align 4):
+ ; CHECK-NEXT: successors: %bb.4(0x04000000), %bb.3(0x7c000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tAND killed renamable $r3, renamable $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCBZ renamable $r2, %bb.4
+ ; CHECK-NEXT: t2LE %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.if.end:
+ ; CHECK-NEXT: liveins: $r1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tSTRi killed renamable $r3, killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (store (s32) into @e)
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.j.us.us.preheader:
+ ; CHECK-NEXT: successors: %bb.6(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r12 = t2MOVi16 target-flags(arm-lo16) @d, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2MOVi16 target-flags(arm-lo16) @a, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r12 = t2MOVTi16 killed $r12, target-flags(arm-hi16) @d, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r2 = t2MOVi16 target-flags(arm-lo16) @e, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
+ ; CHECK-NEXT: $lr = t2MOVTi16 killed $lr, target-flags(arm-hi16) @a, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r2 = t2MOVTi16 killed $r2, target-flags(arm-hi16) @e, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.j.us.us (align 4):
+ ; CHECK-NEXT: successors: %bb.7(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPhir renamable $r3, renamable $lr, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r1 = tLDRi renamable $r2, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @e)
+ ; CHECK-NEXT: renamable $r0 = t2CSINC $zr, $zr, 10, implicit killed $cpsr
+ ; CHECK-NEXT: renamable $r0 = t2ANDrr killed renamable $r0, killed renamable $r1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tSTRi killed renamable $r0, renamable $r2, 0, 14 /* CC::al */, $noreg :: (store (s32) into @e)
+ ; CHECK-NEXT: tCBZ renamable $r3, %bb.7
+ ; CHECK-NEXT: t2LE %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7.if.end.us.us.us:
+ ; CHECK-NEXT: successors: %bb.8(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
+ ; CHECK-NEXT: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
+ ; CHECK-NEXT: tCBZ renamable $r3, %bb.8
+ ; CHECK-NEXT: t2LE %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8.if.end.us.us.us.1:
+ ; CHECK-NEXT: successors: %bb.9(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
+ ; CHECK-NEXT: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
+ ; CHECK-NEXT: tCBZ renamable $r3, %bb.9
+ ; CHECK-NEXT: t2LE %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9.if.end.us.us.us.2:
+ ; CHECK-NEXT: successors: %bb.10(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
+ ; CHECK-NEXT: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
+ ; CHECK-NEXT: tCBZ renamable $r3, %bb.10
+ ; CHECK-NEXT: t2LE %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.10.if.end.us.us.us.3:
+ ; CHECK-NEXT: successors: %bb.11(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
+ ; CHECK-NEXT: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
+ ; CHECK-NEXT: tCBZ renamable $r3, %bb.11
+ ; CHECK-NEXT: t2LE %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.11.if.end.us.us.us.4:
+ ; CHECK-NEXT: successors: %bb.12(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
+ ; CHECK-NEXT: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
+ ; CHECK-NEXT: tCBZ renamable $r3, %bb.12
+ ; CHECK-NEXT: t2LE %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.12.if.end.us.us.us.5:
+ ; CHECK-NEXT: successors: %bb.13(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
+ ; CHECK-NEXT: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
+ ; CHECK-NEXT: tCBZ renamable $r3, %bb.13
+ ; CHECK-NEXT: t2LE %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.13.if.end.us.us.us.6:
+ ; CHECK-NEXT: successors: %bb.14(0x04000000), %bb.6(0x7c000000)
+ ; CHECK-NEXT: liveins: $lr, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
+ ; CHECK-NEXT: renamable $r3 = t2LDRi12 renamable $r12, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
+ ; CHECK-NEXT: tCBZ renamable $r3, %bb.14
+ ; CHECK-NEXT: t2LE %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.14.if.end.us.us.us.7:
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.15.j.us27.preheader:
+ ; CHECK-NEXT: successors: %bb.16(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = t2MOVi16 target-flags(arm-lo16) @d, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r1 = t2MOVi16 target-flags(arm-lo16) @a, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = t2MOVTi16 killed $r0, target-flags(arm-hi16) @d, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @a, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0 = tLDRi killed renamable $r0, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
+ ; CHECK-NEXT: tCMPr renamable $r0, killed renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: $r1 = t2MOVi16 target-flags(arm-lo16) @e, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r1 = t2MOVTi16 killed $r1, target-flags(arm-hi16) @e, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2CSINC $zr, $zr, 10, implicit killed $cpsr
+ ; CHECK-NEXT: renamable $r3 = tLDRi renamable $r1, 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @e)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.16.j.us27 (align 4):
+ ; CHECK-NEXT: successors: %bb.17(0x04000000), %bb.16(0x7c000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tAND killed renamable $r3, renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCBZ renamable $r0, %bb.17
+ ; CHECK-NEXT: t2LE %bb.16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.17.if.end.us38:
+ ; CHECK-NEXT: liveins: $r1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tSTRi killed renamable $r3, killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (store (s32) into @e)
+ ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x30000000), %bb.11(0x50000000)
liveins: $r0, $r1, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-invariant.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-invariant.mir
index 911e1d607a9d..67cfe707e671 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-invariant.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-invariant.mir
@@ -74,34 +74,43 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: invariant_predicated_add_use
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCBZ $r2, %bb.3
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r2
- ; CHECK: renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.4(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $q1 = MVE_VADDi32 renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: tB %bb.4, 14 /* CC::al */, $noreg
- ; CHECK: bb.3:
- ; CHECK: successors: %bb.4(0x80000000)
- ; CHECK: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: bb.4.exit:
- ; CHECK: liveins: $q1
- ; CHECK: renamable $r0, renamable $r1 = VMOVRRD renamable $d2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, renamable $r3 = VMOVRRD killed renamable $d3, 14 /* CC::al */, $noreg, implicit killed $q1
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCBZ $r2, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: tB %bb.4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.exit:
+ ; CHECK-NEXT: liveins: $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $r1 = VMOVRRD renamable $d2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, renamable $r3 = VMOVRRD killed renamable $d3, 14 /* CC::al */, $noreg, implicit killed $q1
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3
bb.0.entry:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir
index 35cc7dd73180..8d2bfb3af6e2 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir
@@ -78,35 +78,44 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: predicated_livout
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x40000000), %bb.4(0x40000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: $lr = MVE_WLSTP_16 killed renamable $r2, %bb.4
- ; CHECK: bb.1.for.body.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.for.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r3
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, renamable $q1 = MVE_VLDRBU16_post killed renamable $r1, 8, 0, $noreg, $noreg :: (load (s64) from %ir.input_2_cast, align 1)
- ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRBU16_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.input_1_cast, align 1)
- ; CHECK: renamable $q1 = MVE_VADDi16 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VADDi16 killed renamable $q1, killed renamable $q0, 0, killed $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4.for.cond.cleanup:
- ; CHECK: liveins: $lr
- ; CHECK: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.4(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: $lr = MVE_WLSTP_16 killed renamable $r2, %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.for.body.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, renamable $q1 = MVE_VLDRBU16_post killed renamable $r1, 8, 0, $noreg, $noreg :: (load (s64) from %ir.input_2_cast, align 1)
+ ; CHECK-NEXT: renamable $r0, renamable $q2 = MVE_VLDRBU16_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.input_1_cast, align 1)
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi16 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi16 killed renamable $q1, killed renamable $q0, 0, killed $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.for.cond.cleanup:
+ ; CHECK-NEXT: liveins: $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x40000000), %bb.4(0x40000000)
liveins: $r0, $r1, $r2, $lr, $r7
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions-vpt-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions-vpt-liveout.mir
index 3ef1569829ca..02232a8b5a0a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions-vpt-liveout.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions-vpt-liveout.mir
@@ -190,7 +190,6 @@
ret i32 %res.0.lcssa
}
- ; Function Attrs: norecurse nounwind readonly
define dso_local arm_aapcs_vfpcc i32 @mul_var_i32(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
entry:
%cmp8.not = icmp eq i32 %N, 0
@@ -236,7 +235,6 @@
ret i32 %res.0.lcssa
}
- ; Function Attrs: norecurse nounwind readonly
define dso_local arm_aapcs_vfpcc i32 @add_var_i32(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
entry:
%cmp9.not = icmp eq i32 %N, 0
@@ -318,35 +316,42 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: mul_var_i8
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body (align 4):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRBU32_post killed renamable $r0, 4, 0, $noreg, $noreg :: (load (s32) from %ir.lsr.iv13, align 1)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRBU32_post killed renamable $r1, 4, 0, $noreg, $noreg :: (load (s32) from %ir.lsr.iv1416, align 1)
- ; CHECK: renamable $q1 = nuw nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, killed renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRBU32_post killed renamable $r0, 4, 0, $noreg, $noreg :: (load (s32) from %ir.lsr.iv13, align 1)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRBU32_post killed renamable $r1, 4, 0, $noreg, $noreg :: (load (s32) from %ir.lsr.iv1416, align 1)
+ ; CHECK-NEXT: renamable $q1 = nuw nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, killed renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $lr
@@ -424,35 +429,42 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: add_var_i8
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body (align 4):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRBU32_post killed renamable $r0, 4, 0, $noreg, $noreg :: (load (s32) from %ir.lsr.iv14, align 1)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRBU32_post killed renamable $r1, 4, 0, $noreg, $noreg :: (load (s32) from %ir.lsr.iv1517, align 1)
- ; CHECK: renamable $q1 = MVE_VADDi32 renamable $q0, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q2, 0, killed $noreg, $noreg, killed renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRBU32_post killed renamable $r0, 4, 0, $noreg, $noreg :: (load (s32) from %ir.lsr.iv14, align 1)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRBU32_post killed renamable $r1, 4, 0, $noreg, $noreg :: (load (s32) from %ir.lsr.iv1517, align 1)
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 renamable $q0, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q2, 0, killed $noreg, $noreg, killed renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $lr
@@ -531,35 +543,42 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: mul_var_i16
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body (align 4):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv13, align 2)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv1416, align 2)
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, killed renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv13, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv1416, align 2)
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, killed renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $lr
@@ -637,35 +656,42 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: add_var_i16
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body (align 4):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv14, align 2)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv1517, align 2)
- ; CHECK: renamable $q1 = MVE_VADDi32 renamable $q0, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q2, 0, killed $noreg, $noreg, killed renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv14, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv1517, align 2)
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 renamable $q0, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q2, 0, killed $noreg, $noreg, killed renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $lr
@@ -743,35 +769,42 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: mul_var_i32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body (align 4):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv12, align 4)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv1315, align 4)
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, killed renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv12, align 4)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv1315, align 4)
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, killed renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $lr
@@ -849,35 +882,42 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: add_var_i32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body (align 4):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q2, 0, killed $noreg, $noreg, killed renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0
- ; CHECK: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q2, 0, killed $noreg, $noreg, killed renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remove-elem-moves.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remove-elem-moves.mir
index 26336836c370..345fec361c69 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remove-elem-moves.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remove-elem-moves.mir
@@ -140,85 +140,103 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: remove_mov_lr_chain
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.9(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4, $r5, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -16
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.9, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.while.body.preheader:
- ; CHECK: successors: %bb.6(0x40000000), %bb.2(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: tCMPi8 renamable $r2, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.6, 3 /* CC::lo */, killed $cpsr
- ; CHECK: bb.2.vector.memcheck:
- ; CHECK: successors: %bb.3(0x40000000), %bb.6(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3 = t2ADDrs renamable $r0, renamable $r2, 18, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPr killed renamable $r3, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 8, 4, implicit-def $itstate
- ; CHECK: renamable $r3 = t2ADDrs renamable $r1, renamable $r2, 18, 8 /* CC::hi */, $cpsr, $noreg, implicit $itstate
- ; CHECK: tCMPr killed renamable $r3, renamable $r0, 8 /* CC::hi */, killed $cpsr, implicit-def $cpsr, implicit killed $itstate
- ; CHECK: tBcc %bb.6, 8 /* CC::hi */, killed $cpsr
- ; CHECK: bb.3.vector.ph:
- ; CHECK: successors: %bb.4(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r4 = t2BICri renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri renamable $r4, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r7, dead $cpsr = tSUBrr renamable $r2, renamable $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2ADDrs renamable $r0, renamable $r4, 18, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = tMOVr renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = tSUBi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
- ; CHECK: $r5 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2ADDrs renamable $r1, renamable $r4, 18, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
- ; CHECK: bb.4.vector.body:
- ; CHECK: successors: %bb.4(0x7c000000), %bb.5(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r7, $r12
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_pre killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.scevgep18, align 4)
- ; CHECK: $lr = tMOVr killed $r5, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VABSf32 killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r1 = MVE_VSTRBU8_pre killed renamable $q0, killed renamable $r1, 16, 0, $noreg, $noreg :: (store (s128) into %ir.scevgep13, align 4)
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
- ; CHECK: $r5 = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: tBcc %bb.4, 1 /* CC::ne */, killed $cpsr
- ; CHECK: tB %bb.5, 14 /* CC::al */, $noreg
- ; CHECK: bb.5.middle.block:
- ; CHECK: successors: %bb.7(0x80000000)
- ; CHECK: liveins: $r2, $r3, $r4, $r7, $r12
- ; CHECK: tCMPr killed renamable $r4, killed renamable $r2, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: $lr = tMOVr killed $r7, 14 /* CC::al */, $noreg
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $r5, def $r7, def $pc, implicit killed $itstate
- ; CHECK: tB %bb.7, 14 /* CC::al */, $noreg
- ; CHECK: bb.6:
- ; CHECK: successors: %bb.7(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: $lr = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: $r12 = tMOVr killed $r0, 14 /* CC::al */, $noreg
- ; CHECK: $r3 = tMOVr killed $r1, 14 /* CC::al */, $noreg
- ; CHECK: bb.7.while.body.preheader19:
- ; CHECK: successors: %bb.8(0x80000000)
- ; CHECK: liveins: $lr, $r3, $r12
- ; CHECK: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.8.while.body:
- ; CHECK: successors: %bb.8(0x7c000000), %bb.9(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1
- ; CHECK: renamable $s0 = VLDRS renamable $r1, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
- ; CHECK: renamable $r1, dead $cpsr = tADDi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VABSS killed renamable $s0, 14 /* CC::al */, $noreg
- ; CHECK: VSTRS killed renamable $s0, renamable $r0, 1, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep7)
- ; CHECK: renamable $r0, dead $cpsr = tADDi8 killed renamable $r0, 4, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.8
- ; CHECK: bb.9.while.end:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.9(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4, $r5, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -16
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.9, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.body.preheader:
+ ; CHECK-NEXT: successors: %bb.6(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.6, 3 /* CC::lo */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.memcheck:
+ ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = t2ADDrs renamable $r0, renamable $r2, 18, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPr killed renamable $r3, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 8, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r3 = t2ADDrs renamable $r1, renamable $r2, 18, 8 /* CC::hi */, $cpsr, $noreg, implicit $itstate
+ ; CHECK-NEXT: tCMPr killed renamable $r3, renamable $r0, 8 /* CC::hi */, killed $cpsr, implicit-def $cpsr, implicit killed $itstate
+ ; CHECK-NEXT: tBcc %bb.6, 8 /* CC::hi */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.vector.ph:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4 = t2BICri renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri renamable $r4, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r7, dead $cpsr = tSUBrr renamable $r2, renamable $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2ADDrs renamable $r0, renamable $r4, 18, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = tMOVr renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi8 killed renamable $r0, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r5 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2ADDrs renamable $r1, renamable $r4, 18, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.vector.body:
+ ; CHECK-NEXT: successors: %bb.4(0x7c000000), %bb.5(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r7, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRWU32_pre killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.scevgep18, align 4)
+ ; CHECK-NEXT: $lr = tMOVr killed $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = nnan ninf nsz arcp contract afn reassoc MVE_VABSf32 killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r1 = MVE_VSTRBU8_pre killed renamable $q0, killed renamable $r1, 16, 0, $noreg, $noreg :: (store (s128) into %ir.scevgep13, align 4)
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
+ ; CHECK-NEXT: $r5 = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tBcc %bb.4, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: tB %bb.5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.middle.block:
+ ; CHECK-NEXT: successors: %bb.7(0x80000000)
+ ; CHECK-NEXT: liveins: $r2, $r3, $r4, $r7, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPr killed renamable $r4, killed renamable $r2, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: $lr = tMOVr killed $r7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $r5, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: tB %bb.7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6:
+ ; CHECK-NEXT: successors: %bb.7(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r12 = tMOVr killed $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r3 = tMOVr killed $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7.while.body.preheader19:
+ ; CHECK-NEXT: successors: %bb.8(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8.while.body:
+ ; CHECK-NEXT: successors: %bb.8(0x7c000000), %bb.9(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $s0 = VLDRS renamable $r1, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tADDi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VABSS killed renamable $s0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: VSTRS killed renamable $s0, renamable $r0, 1, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep7)
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tADDi8 killed renamable $r0, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9.while.end:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r7, def $pc
bb.0.entry:
successors: %bb.9(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r4, $r5, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-while.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-while.mir
index 5dd75d94d319..3a55b4905ec5 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-while.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-while.mir
@@ -34,13 +34,10 @@
ret void
}
- ; Function Attrs: nounwind
declare i32 @llvm.arm.space(i32 immarg, i32) #1
- ; Function Attrs: noduplicate nounwind
declare i1 @llvm.test.set.loop.iterations.i32(i32) #2
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #2
attributes #0 = { "target-features"="+lob" }
@@ -96,32 +93,38 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: ne_trip_count
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x40000000), %bb.3(0x40000000)
- ; CHECK: liveins: $lr, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $lr = t2SUBri $r3, 0, 14 /* CC::al */, $noreg, def $cpsr
- ; CHECK: t2Bcc %bb.3, 0 /* CC::eq */, killed $cpsr
- ; CHECK: tB %bb.1, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.do.body.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r1, $r2, $r3
- ; CHECK: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: $lr = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.do.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1
- ; CHECK: dead renamable $r2 = SPACE 4096, undef renamable $r0
- ; CHECK: renamable $r2, renamable $r0 = t2LDR_PRE killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep)
- ; CHECK: early-clobber renamable $r1 = t2STR_PRE killed renamable $r2, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep1)
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
- ; CHECK: t2Bcc %bb.2, 1 /* CC::ne */, killed $cpsr
- ; CHECK: tB %bb.3, 14 /* CC::al */, $noreg
- ; CHECK: bb.3.if.end:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $lr = t2SUBri $r3, 0, 14 /* CC::al */, $noreg, def $cpsr
+ ; CHECK-NEXT: t2Bcc %bb.3, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: tB %bb.1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.do.body.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.do.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead renamable $r2 = SPACE 4096, undef renamable $r0
+ ; CHECK-NEXT: renamable $r2, renamable $r0 = t2LDR_PRE killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep)
+ ; CHECK-NEXT: early-clobber renamable $r1 = t2STR_PRE killed renamable $r2, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep1)
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, def $cpsr
+ ; CHECK-NEXT: t2Bcc %bb.2, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: tB %bb.3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.if.end:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x40000000), %bb.3(0x40000000)
liveins: $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revertcallearly.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revertcallearly.mir
index 2f9019ddeca8..f94b78feb53d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revertcallearly.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revertcallearly.mir
@@ -65,40 +65,48 @@ liveins: []
body: |
; CHECK-LABEL: name: e
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x50000000), %bb.4(0x30000000)
- ; CHECK: [[t2MOVi32imm:%[0-9]+]]:rgpr = t2MOVi32imm @d
- ; CHECK: [[t2LDRi12_:%[0-9]+]]:gprnopc = t2LDRi12 [[t2MOVi32imm]], 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
- ; CHECK: t2CMPri [[t2LDRi12_]], 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2Bcc %bb.4, 4 /* CC::mi */, $cpsr
- ; CHECK: t2B %bb.1, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.for.cond1.preheader.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: [[t2ADDri:%[0-9]+]]:rgpr = t2ADDri [[t2LDRi12_]], 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: [[tMOVr:%[0-9]+]]:gprlr = tMOVr killed [[t2ADDri]], 14 /* CC::al */, $noreg
- ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY [[tMOVr]]
- ; CHECK: [[t2MOVi32imm1:%[0-9]+]]:rgpr = t2MOVi32imm @c
- ; CHECK: [[t2MOVi:%[0-9]+]]:rgpr = t2MOVi 24, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.for.cond1.preheader:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: [[PHI:%[0-9]+]]:gprlr = PHI [[COPY]], %bb.1, %3, %bb.2
- ; CHECK: ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
- ; CHECK: $r0 = COPY [[t2MOVi32imm1]]
- ; CHECK: $r1 = COPY [[t2MOVi]]
- ; CHECK: tBL 14 /* CC::al */, $noreg, &__aeabi_memclr4, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $sp
- ; CHECK: ADJCALLSTACKUP 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
- ; CHECK: [[t2SUBri:%[0-9]+]]:gprlr = t2SUBri [[PHI]], 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY [[t2SUBri]]
- ; CHECK: t2CMPri [[t2SUBri]], 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2Bcc %bb.2, 1 /* CC::ne */, $cpsr
- ; CHECK: t2B %bb.3, 14 /* CC::al */, $noreg
- ; CHECK: bb.3.for.cond.for.end9_crit_edge:
- ; CHECK: successors: %bb.4(0x80000000)
- ; CHECK: [[t2MOVi1:%[0-9]+]]:rgpr = t2MOVi -1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2STRi12 killed [[t2MOVi1]], [[t2MOVi32imm]], 0, 14 /* CC::al */, $noreg :: (store (s32) into @d)
- ; CHECK: bb.4.for.end9:
- ; CHECK: [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
- ; CHECK: $r0 = COPY [[DEF]]
- ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit $r0
+ ; CHECK-NEXT: successors: %bb.1(0x50000000), %bb.4(0x30000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[t2MOVi32imm:%[0-9]+]]:rgpr = t2MOVi32imm @d
+ ; CHECK-NEXT: [[t2LDRi12_:%[0-9]+]]:gprnopc = t2LDRi12 [[t2MOVi32imm]], 0, 14 /* CC::al */, $noreg :: (dereferenceable load (s32) from @d)
+ ; CHECK-NEXT: t2CMPri [[t2LDRi12_]], 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2Bcc %bb.4, 4 /* CC::mi */, $cpsr
+ ; CHECK-NEXT: t2B %bb.1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.for.cond1.preheader.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[t2ADDri:%[0-9]+]]:rgpr = t2ADDri [[t2LDRi12_]], 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[tMOVr:%[0-9]+]]:gprlr = tMOVr killed [[t2ADDri]], 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY [[tMOVr]]
+ ; CHECK-NEXT: [[t2MOVi32imm1:%[0-9]+]]:rgpr = t2MOVi32imm @c
+ ; CHECK-NEXT: [[t2MOVi:%[0-9]+]]:rgpr = t2MOVi 24, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.cond1.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gprlr = PHI [[COPY]], %bb.1, %3, %bb.2
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: $r0 = COPY [[t2MOVi32imm1]]
+ ; CHECK-NEXT: $r1 = COPY [[t2MOVi]]
+ ; CHECK-NEXT: tBL 14 /* CC::al */, $noreg, &__aeabi_memclr4, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $sp
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
+ ; CHECK-NEXT: [[t2SUBri:%[0-9]+]]:gprlr = t2SUBri [[PHI]], 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[t2SUBri]]
+ ; CHECK-NEXT: t2CMPri [[t2SUBri]], 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2Bcc %bb.2, 1 /* CC::ne */, $cpsr
+ ; CHECK-NEXT: t2B %bb.3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.for.end9_crit_edge:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[t2MOVi1:%[0-9]+]]:rgpr = t2MOVi -1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2STRi12 killed [[t2MOVi1]], [[t2MOVi32imm]], 0, 14 /* CC::al */, $noreg :: (store (s32) into @d)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.for.end9:
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
+ ; CHECK-NEXT: $r0 = COPY [[DEF]]
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit $r0
bb.0.entry:
successors: %bb.1(0x50000000), %bb.4(0x30000000)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-def-no-mov.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-def-no-mov.mir
index f7bac9107f55..4ee131d1d46e 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-def-no-mov.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-def-no-mov.mir
@@ -90,28 +90,34 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: do_copy
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $lr = t2DLS killed $r0
- ; CHECK: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: $lr = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.while.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1
- ; CHECK: renamable $r2, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep6)
- ; CHECK: early-clobber renamable $r0 = t2STR_PRE killed renamable $r2, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep2)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.while.end:
- ; CHECK: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $lr = t2DLS killed $r0
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep6)
+ ; CHECK-NEXT: early-clobber renamable $r0 = t2STR_PRE killed renamable $r2, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep2)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.while.end:
+ ; CHECK-NEXT: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir
index 4e4923a3cead..8e172f1553fc 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir
@@ -116,33 +116,39 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_vqrshruntq_n_s32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.loop.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: dead $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.loop.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r12
- ; CHECK: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.addr.b, align 4)
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.addr.a, align 4)
- ; CHECK: renamable $q1 = MVE_VQSHRUNs32th killed renamable $q1, killed renamable $q0, 3, 0, $noreg, $noreg
- ; CHECK: renamable $r2 = MVE_VSTRWU32_post killed renamable $q1, killed renamable $r2, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.addr.c, align 4)
- ; CHECK: dead $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.loop.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: dead $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.loop.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.addr.b, align 4)
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg :: (load (s128) from %ir.addr.a, align 4)
+ ; CHECK-NEXT: renamable $q1 = MVE_VQSHRUNs32th killed renamable $q1, killed renamable $q0, 3, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2 = MVE_VSTRWU32_post killed renamable $q1, killed renamable $r2, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.addr.c, align 4)
+ ; CHECK-NEXT: dead $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
@@ -216,34 +222,40 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_vqrshruntq_n_s16
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.loop.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: dead $lr = MVE_DLSTP_16 killed renamable $r3
- ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.loop.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r4
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4, dead $cpsr = tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.addr.b, align 2)
- ; CHECK: renamable $q1 = MVE_VLDRHU16 killed renamable $r0, 0, 0, $noreg, $noreg :: (load (s128) from %ir.addr.a, align 2)
- ; CHECK: $r0 = tMOVr $r1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VQSHRUNs16th killed renamable $q1, killed renamable $q0, 1, 0, $noreg, $noreg
- ; CHECK: renamable $r2 = MVE_VSTRHU16_post killed renamable $q1, killed renamable $r2, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.addr.c, align 2)
- ; CHECK: dead $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.loop.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2LDRi12 $sp, 8, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: dead $lr = MVE_DLSTP_16 killed renamable $r3
+ ; CHECK-NEXT: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.loop.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.addr.b, align 2)
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRHU16 killed renamable $r0, 0, 0, $noreg, $noreg :: (load (s128) from %ir.addr.a, align 2)
+ ; CHECK-NEXT: $r0 = tMOVr $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VQSHRUNs16th killed renamable $q1, killed renamable $q0, 1, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2 = MVE_VSTRHU16_post killed renamable $q1, killed renamable $r2, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.addr.c, align 2)
+ ; CHECK-NEXT: dead $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/size-limit.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/size-limit.mir
index c87c0bb1e2fd..8406720f0292 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/size-limit.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/size-limit.mir
@@ -2,7 +2,6 @@
# RUN: llc -mtriple=armv8.1m.main -run-pass=arm-low-overhead-loops --verify-machineinstrs %s -o - | FileCheck %s
--- |
- ; ModuleID = 'size-limit.ll'
source_filename = "size-limit.ll"
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "thumbv8.1m.main"
@@ -43,16 +42,12 @@
br i1 %4, label %for.body, label %for.cond.cleanup
}
- ; Function Attrs: nounwind
declare i32 @llvm.arm.space(i32 immarg, i32) #0
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.start.loop.iterations.i32(i32) #1
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #1
- ; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #0
attributes #0 = { nounwind }
@@ -108,34 +103,40 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: size_limit
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.for.body.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = tSUBi8 killed renamable $r0, 4, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2DLS killed $r3
- ; CHECK: bb.2.for.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: dead renamable $r3 = SPACE 4070, undef renamable $r0
- ; CHECK: renamable $r12, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
- ; CHECK: renamable $r3, renamable $r2 = t2LDR_PRE killed renamable $r2, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
- ; CHECK: renamable $r3 = nsw t2MUL killed renamable $r3, killed renamable $r12, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r0 = t2STR_PRE killed renamable $r3, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep11)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.for.body.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi8 killed renamable $r0, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2DLS killed $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead renamable $r3 = SPACE 4070, undef renamable $r0
+ ; CHECK-NEXT: renamable $r12, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
+ ; CHECK-NEXT: renamable $r3, renamable $r2 = t2LDR_PRE killed renamable $r2, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
+ ; CHECK-NEXT: renamable $r3 = nsw t2MUL killed renamable $r3, killed renamable $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r0 = t2STR_PRE killed renamable $r3, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep11)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-debug.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-debug.mir
index 12bc89481460..43287b3c5f03 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-debug.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-debug.mir
@@ -167,62 +167,71 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: skip_debug
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4, $r6
- ; CHECK: DBG_VALUE $r0, $noreg, !17, !DIExpression(), debug-location !23
- ; CHECK: DBG_VALUE $r0, $noreg, !17, !DIExpression(), debug-location !23
- ; CHECK: DBG_VALUE $r1, $noreg, !18, !DIExpression(), debug-location !23
- ; CHECK: DBG_VALUE $r1, $noreg, !18, !DIExpression(), debug-location !23
- ; CHECK: DBG_VALUE $r2, $noreg, !19, !DIExpression(), debug-location !23
- ; CHECK: DBG_VALUE $r2, $noreg, !19, !DIExpression(), debug-location !23
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r6, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -16
- ; CHECK: dead $r7 = frame-setup tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
- ; CHECK: renamable $r12 = t2LDRi12 renamable $r0, 0, 14 /* CC::al */, $noreg, debug-location !24 :: (load (s32) from %ir.a)
- ; CHECK: DBG_VALUE 0, $noreg, !21, !DIExpression(), debug-location !25
- ; CHECK: DBG_VALUE $r12, $noreg, !20, !DIExpression(), debug-location !23
- ; CHECK: tCBZ $r2, %bb.4, debug-location !28
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r12
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg, debug-location !28
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg, debug-location !28
- ; CHECK: renamable $r4, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg, debug-location !28
- ; CHECK: renamable $q0 = MVE_VDUP32 killed renamable $r4, 0, $noreg, $noreg, undef renamable $q0, debug-location !28
- ; CHECK: renamable $q0 = MVE_VMOV_to_lane_32 killed renamable $q0, killed renamable $r12, 0, 14 /* CC::al */, $noreg, debug-location !28
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg, debug-location !28
- ; CHECK: renamable $r3, dead $cpsr = tLSRri killed renamable $r3, 2, 14 /* CC::al */, $noreg, debug-location !28
- ; CHECK: renamable $r3 = t2SUBrs renamable $r2, killed renamable $r3, 18, 14 /* CC::al */, $noreg, $noreg, debug-location !28
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg, debug-location !30
- ; CHECK: DBG_VALUE $vpr, $noreg, !17, !DIExpression(), debug-location !30
- ; CHECK: $q1 = MVE_VORR killed $q0, killed $q0, 0, $noreg, $noreg, undef $q1
- ; CHECK: MVE_VPST 8, implicit $vpr, debug-location !30
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRHU32_post killed renamable $r1, 8, 1, killed renamable $vpr, $noreg, debug-location !30 :: (load (s64) from %ir.lsr.iv14, align 2)
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg, debug-location !30
- ; CHECK: renamable $q0 = MVE_VMOVLs16bh killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0, debug-location !30
- ; CHECK: renamable $q0 = MVE_VSUBi32 renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0, debug-location !32
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2, debug-location !29
- ; CHECK: bb.3.middle.block:
- ; CHECK: successors: %bb.4(0x80000000)
- ; CHECK: liveins: $q0, $q1, $r0, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 killed renamable $r3, 0, $noreg, $noreg, debug-location !30
- ; CHECK: renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr, $noreg, debug-location !32
- ; CHECK: renamable $r12 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg, debug-location !28
- ; CHECK: bb.4.for.cond.cleanup:
- ; CHECK: liveins: $r0, $r12
- ; CHECK: DBG_VALUE $r12, $noreg, !20, !DIExpression(), debug-location !23
- ; CHECK: t2STRi12 killed renamable $r12, killed renamable $r0, 0, 14 /* CC::al */, $noreg, debug-location !33 :: (store (s32) into %ir.a)
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r6, def $r7, def $pc, debug-location !34
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4, $r6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: DBG_VALUE $r0, $noreg, !17, !DIExpression(), debug-location !23
+ ; CHECK-NEXT: DBG_VALUE $r0, $noreg, !17, !DIExpression(), debug-location !23
+ ; CHECK-NEXT: DBG_VALUE $r1, $noreg, !18, !DIExpression(), debug-location !23
+ ; CHECK-NEXT: DBG_VALUE $r1, $noreg, !18, !DIExpression(), debug-location !23
+ ; CHECK-NEXT: DBG_VALUE $r2, $noreg, !19, !DIExpression(), debug-location !23
+ ; CHECK-NEXT: DBG_VALUE $r2, $noreg, !19, !DIExpression(), debug-location !23
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r6, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -16
+ ; CHECK-NEXT: dead $r7 = frame-setup tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
+ ; CHECK-NEXT: renamable $r12 = t2LDRi12 renamable $r0, 0, 14 /* CC::al */, $noreg, debug-location !24 :: (load (s32) from %ir.a)
+ ; CHECK-NEXT: DBG_VALUE 0, $noreg, !21, !DIExpression(), debug-location !25
+ ; CHECK-NEXT: DBG_VALUE $r12, $noreg, !20, !DIExpression(), debug-location !23
+ ; CHECK-NEXT: tCBZ $r2, %bb.4, debug-location !28
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg, debug-location !28
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg, debug-location !28
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg, debug-location !28
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 killed renamable $r4, 0, $noreg, $noreg, undef renamable $q0, debug-location !28
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOV_to_lane_32 killed renamable $q0, killed renamable $r12, 0, 14 /* CC::al */, $noreg, debug-location !28
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg, debug-location !28
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tLSRri killed renamable $r3, 2, 14 /* CC::al */, $noreg, debug-location !28
+ ; CHECK-NEXT: renamable $r3 = t2SUBrs renamable $r2, killed renamable $r3, 18, 14 /* CC::al */, $noreg, $noreg, debug-location !28
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg, debug-location !30
+ ; CHECK-NEXT: DBG_VALUE $vpr, $noreg, !17, !DIExpression(), debug-location !30
+ ; CHECK-NEXT: $q1 = MVE_VORR killed $q0, killed $q0, 0, $noreg, $noreg, undef $q1
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr, debug-location !30
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRHU32_post killed renamable $r1, 8, 1, killed renamable $vpr, $noreg, debug-location !30 :: (load (s64) from %ir.lsr.iv14, align 2)
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg, debug-location !30
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVLs16bh killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0, debug-location !30
+ ; CHECK-NEXT: renamable $q0 = MVE_VSUBi32 renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0, debug-location !32
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2, debug-location !29
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: liveins: $q0, $q1, $r0, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 killed renamable $r3, 0, $noreg, $noreg, debug-location !30
+ ; CHECK-NEXT: renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr, $noreg, debug-location !32
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg, debug-location !28
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.for.cond.cleanup:
+ ; CHECK-NEXT: liveins: $r0, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: DBG_VALUE $r12, $noreg, !20, !DIExpression(), debug-location !23
+ ; CHECK-NEXT: t2STRi12 killed renamable $r12, killed renamable $r0, 0, 14 /* CC::al */, $noreg, debug-location !33 :: (store (s32) into %ir.a)
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r6, def $r7, def $pc, debug-location !34
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r4, $r6, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-vpt-debug.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-vpt-debug.mir
index 6c9cd153e03c..a11d5e70876a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-vpt-debug.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-vpt-debug.mir
@@ -1,12 +1,10 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
--- |
- ; ModuleID = 'skip-vpt-debug.ll'
source_filename = "skip-vpt-debug.c"
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "thumbv8.1m.main-arm-none-eabihf"
- ; Function Attrs: nofree norecurse nounwind optsize
define hidden void @arm_max_no_idx_f32(ptr nocapture readonly %pSrc, i32 %blockSize, ptr nocapture %pResult) local_unnamed_addr #0 !dbg !13 {
entry:
call void @llvm.dbg.value(metadata ptr %pSrc, metadata !24, metadata !DIExpression()), !dbg !29
@@ -52,25 +50,18 @@
ret void, !dbg !46
}
- ; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
declare void @llvm.dbg.value(metadata, metadata, metadata) #1
- ; Function Attrs: nofree nosync nounwind readnone willreturn
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) #2
- ; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn
declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>) #3
- ; Function Attrs: nofree nosync nounwind readnone willreturn
declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>) #2
- ; Function Attrs: noduplicate nofree nosync nounwind willreturn
declare i32 @llvm.start.loop.iterations.i32(i32) #4
- ; Function Attrs: noduplicate nofree nosync nounwind willreturn
declare i32 @llvm.loop.decrement.reg.i32(i32, i32) #4
- ; Function Attrs: nounwind readnone
declare <4 x i1> @llvm.arm.mve.vctp32(i32) #5
attributes #0 = { nofree norecurse nounwind optsize "denormal-fp-math"="preserve-sign,preserve-sign" "denormal-fp-math-f32"="ieee,ieee" "frame-pointer"="none" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-dotprod,-fp16fml,-hwdiv-arm,-i8mm,-sb,-sha2" }
@@ -186,65 +177,77 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: arm_max_no_idx_f32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: DBG_VALUE $r0, $noreg, !24, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r1, $noreg, !25, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r1, $noreg, !25, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r0, $noreg, !24, !DIExpression(), debug-location !29
- ; CHECK: tCBZ renamable $r1, %bb.4, debug-location !31
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r1, $noreg, !25, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r0, $noreg, !24, !DIExpression(), debug-location !29
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 1152, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1, debug-location !31
- ; CHECK: bb.2.vector.body (align 4):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r2
- ; CHECK: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg, debug-location !32 :: (load (s128) from %ir.lsr.iv12, align 4, !tbaa !34)
- ; CHECK: DBG_VALUE $r0, $noreg, !24, !DIExpression(DW_OP_LLVM_entry_value, 1), debug-location !29
- ; CHECK: MVE_VPTv4f32 8, renamable $q1, renamable $q0, 12, implicit-def $vpr, debug-location !40
- ; CHECK: renamable $q0 = MVE_VORR killed renamable $q1, killed renamable $q1, 1, killed renamable $vpr, $noreg, killed renamable $q0, debug-location !40
- ; CHECK: DBG_VALUE $r1, $noreg, !25, !DIExpression(DW_OP_LLVM_entry_value, 1), debug-location !29
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $q0, $r2
- ; CHECK: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
- ; CHECK: renamable $s4 = nnan ninf nsz VFP_VMAXNMS renamable $s2, renamable $s3, debug-location !31
- ; CHECK: renamable $s0 = nnan ninf nsz VFP_VMAXNMS killed renamable $s0, killed renamable $s1, implicit killed $q0, debug-location !31
- ; CHECK: renamable $s0 = nnan ninf nsz VFP_VMAXNMS killed renamable $s0, killed renamable $s4, debug-location !31
- ; CHECK: tB %bb.5, 14 /* CC::al */, $noreg
- ; CHECK: bb.4:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $r2
- ; CHECK: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r1, $noreg, !25, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r0, $noreg, !24, !DIExpression(), debug-location !29
- ; CHECK: renamable $s0 = VLDRS %const.0, 0, 14 /* CC::al */, $noreg :: (load (s32) from constant-pool)
- ; CHECK: bb.5.while.end:
- ; CHECK: liveins: $r2, $s0
- ; CHECK: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
- ; CHECK: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
- ; CHECK: VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg, debug-location !45 :: (store (s32) into %ir.pResult, !tbaa !34)
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, debug-location !46
- ; CHECK: bb.6 (align 4):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: DBG_VALUE $r0, $noreg, !24, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r1, $noreg, !25, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r1, $noreg, !25, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r0, $noreg, !24, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: tCBZ renamable $r1, %bb.4, debug-location !31
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r1, $noreg, !25, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r0, $noreg, !24, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 1152, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1, debug-location !31
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg, debug-location !32 :: (load (s128) from %ir.lsr.iv12, align 4, !tbaa !34)
+ ; CHECK-NEXT: DBG_VALUE $r0, $noreg, !24, !DIExpression(DW_OP_LLVM_entry_value, 1), debug-location !29
+ ; CHECK-NEXT: MVE_VPTv4f32 8, renamable $q1, renamable $q0, 12, implicit-def $vpr, debug-location !40
+ ; CHECK-NEXT: renamable $q0 = MVE_VORR killed renamable $q1, killed renamable $q1, 1, killed renamable $vpr, $noreg, killed renamable $q0, debug-location !40
+ ; CHECK-NEXT: DBG_VALUE $r1, $noreg, !25, !DIExpression(DW_OP_LLVM_entry_value, 1), debug-location !29
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $q0, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: renamable $s4 = nnan ninf nsz VFP_VMAXNMS renamable $s2, renamable $s3, debug-location !31
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz VFP_VMAXNMS killed renamable $s0, killed renamable $s1, implicit killed $q0, debug-location !31
+ ; CHECK-NEXT: renamable $s0 = nnan ninf nsz VFP_VMAXNMS killed renamable $s0, killed renamable $s4, debug-location !31
+ ; CHECK-NEXT: tB %bb.5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r1, $noreg, !25, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r0, $noreg, !24, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: renamable $s0 = VLDRS %const.0, 0, 14 /* CC::al */, $noreg :: (load (s32) from constant-pool)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.while.end:
+ ; CHECK-NEXT: liveins: $r2, $s0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: DBG_VALUE float 0x3810000000000000, $noreg, !27, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: DBG_VALUE $r2, $noreg, !26, !DIExpression(), debug-location !29
+ ; CHECK-NEXT: VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg, debug-location !45 :: (store (s32) into %ir.pResult, !tbaa !34)
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, debug-location !46
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6 (align 4):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 4
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/subreg-liveness.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/subreg-liveness.mir
index 046b5bf2f349..898e6b15e18f 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/subreg-liveness.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/subreg-liveness.mir
@@ -69,45 +69,54 @@ stack:
body: |
; CHECK-LABEL: name: test
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x50000000), %bb.3(0x30000000)
- ; CHECK: liveins: $lr, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r2, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.3, 11 /* CC::lt */, killed $cpsr
- ; CHECK: bb.1.while.body.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r1, $r2
- ; CHECK: $r0 = tMOVr $r2, 14 /* CC::al */, $noreg
- ; CHECK: tCMPi8 renamable $r2, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 10, 8, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 4, 10 /* CC::ge */, killed $cpsr, implicit killed renamable $r0, implicit killed $itstate
- ; CHECK: renamable $r0, dead $cpsr = tSUBrr renamable $r2, killed renamable $r0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = tADDi8 killed renamable $r0, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r0, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 1, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = t2DLS killed renamable $r0
- ; CHECK: bb.2.while.body (align 4):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.4(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r1, $r2
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, killed renamable $vpr, $lr :: (load (s128) from %ir.y.addr.0161, align 4)
- ; CHECK: renamable $q0 = MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $lr, undef renamable $q0
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: tB %bb.4, 14 /* CC::al */, $noreg
- ; CHECK: bb.3:
- ; CHECK: successors: %bb.4(0x80000000)
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 1, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: bb.4.while.end:
- ; CHECK: liveins: $d0
- ; CHECK: renamable $r0, renamable $r1 = VMOVRRD killed renamable $d0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0 = nsw tADDhirr killed renamable $r0, killed renamable $r1, 14 /* CC::al */, $noreg
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x50000000), %bb.3(0x30000000)
+ ; CHECK-NEXT: liveins: $lr, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.3, 11 /* CC::lt */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.body.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 10, 8, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 4, 10 /* CC::ge */, killed $cpsr, implicit killed renamable $r0, implicit killed $itstate
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBrr renamable $r2, killed renamable $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tADDi8 killed renamable $r0, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r0, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 1, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = t2DLS killed renamable $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.body (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, killed renamable $vpr, $lr :: (load (s128) from %ir.y.addr.0161, align 4)
+ ; CHECK-NEXT: renamable $q0 = MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $lr, undef renamable $q0
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: tB %bb.4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 1, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.while.end:
+ ; CHECK-NEXT: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $r1 = VMOVRRD killed renamable $d0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0 = nsw tADDhirr killed renamable $r0, killed renamable $r1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x50000000), %bb.3(0x30000000)
liveins: $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir
index 9afdce1d9280..c2536942d982 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir
@@ -73,42 +73,48 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: variant_max_use
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r5
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r5, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -8
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r5, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r12 = t2MOVi16 32768, 14 /* CC::al */, $noreg
- ; CHECK: $r12 = t2MOVTi16 killed $r12, 65535, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r3
- ; CHECK: $r5 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r5, $r12
- ; CHECK: $r3 = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r3 = MVE_VMAXVs16 killed renamable $r3, killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: $lr = tMOVr $r5, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r1 = t2STRH_POST killed renamable $r3, killed renamable $r1, 2, 14 /* CC::al */, $noreg :: (store (s16) into %ir.lsr.iv.2)
- ; CHECK: renamable $r5, dead $cpsr = nsw tSUBi8 killed $r5, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r5, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r5, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r5, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r12 = t2MOVi16 32768, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r12 = t2MOVTi16 killed $r12, 65535, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r3
+ ; CHECK-NEXT: $r5 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r5, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r3 = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r3 = MVE_VMAXVs16 killed renamable $r3, killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: $lr = tMOVr $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r1 = t2STRH_POST killed renamable $r3, killed renamable $r1, 2, 14 /* CC::al */, $noreg :: (store (s16) into %ir.lsr.iv.2)
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = nsw tSUBi8 killed $r5, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r5, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r5, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unrolled-and-vector.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unrolled-and-vector.mir
index 482a87ee6fb5..0b50c2df4c65 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unrolled-and-vector.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unrolled-and-vector.mir
@@ -231,136 +231,160 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: unrolled_and_vector
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.11(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r9, $r11
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -20
- ; CHECK: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
- ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r8, killed $r9, killed $r11
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r11, -24
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -28
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -32
- ; CHECK: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.11, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.1.vector.memcheck:
- ; CHECK: successors: %bb.2(0x40000000), %bb.4(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r5, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: tCMPr renamable $r4, renamable $r2, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
- ; CHECK: tCMPr killed renamable $r5, renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r6 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
- ; CHECK: tCMPr killed renamable $r4, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r5 = t2ADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r4 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
- ; CHECK: tCMPr killed renamable $r5, renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r5 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
- ; CHECK: renamable $r5, dead $cpsr = tAND killed renamable $r5, killed renamable $r4, 14 /* CC::al */, $noreg
- ; CHECK: dead renamable $r5, $cpsr = tLSLri killed renamable $r5, 31, 14 /* CC::al */, $noreg
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r6 = t2ANDrr killed renamable $r6, killed renamable $r12, 0 /* CC::eq */, $cpsr, $noreg, implicit killed $r6, implicit $itstate
- ; CHECK: dead renamable $r6 = t2LSLri killed renamable $r6, 31, 0 /* CC::eq */, killed $cpsr, def $cpsr, implicit killed $r6, implicit killed $itstate
- ; CHECK: tBcc %bb.4, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.2.for.body.preheader:
- ; CHECK: successors: %bb.3(0x40000000), %bb.6(0x40000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $r4, dead $cpsr = tSUBi3 renamable $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2ANDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: tCMPi8 killed renamable $r4, 3, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.6, 2 /* CC::hs */, killed $cpsr
- ; CHECK: bb.3:
- ; CHECK: successors: %bb.8(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r12
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tB %bb.8, 14 /* CC::al */, $noreg
- ; CHECK: bb.4.vector.ph:
- ; CHECK: successors: %bb.5(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $lr = MVE_DLSTP_8 killed renamable $r3
- ; CHECK: bb.5.vector.body:
- ; CHECK: successors: %bb.5(0x7c000000), %bb.11(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv46, align 1)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRBU8_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv4749, align 1)
- ; CHECK: renamable $q0 = MVE_VADDi8 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r0 = MVE_VSTRBU8_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv5052, align 1)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.5
- ; CHECK: tB %bb.11, 14 /* CC::al */, $noreg
- ; CHECK: bb.6.for.body.preheader.new:
- ; CHECK: successors: %bb.7(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.7.for.body:
- ; CHECK: successors: %bb.7(0x7c000000), %bb.8(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $r4 = tLDRBr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep2453)
- ; CHECK: renamable $r9 = t2ADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r5 = tLDRBr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep2854)
- ; CHECK: renamable $r6, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r5, 14 /* CC::al */, $noreg
- ; CHECK: tSTRBr killed renamable $r4, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep3255)
- ; CHECK: renamable $r8 = t2LDRBi12 renamable $r9, 1, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep40)
- ; CHECK: renamable $r5 = tLDRBi renamable $r6, 1, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep42)
- ; CHECK: renamable $r8 = tADDhirr killed renamable $r8, killed renamable $r5, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r5, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: t2STRBi12 killed renamable $r8, renamable $r5, 1, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep44)
- ; CHECK: renamable $r8 = t2LDRBi12 renamable $r9, 2, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep34)
- ; CHECK: renamable $r4 = tLDRBi renamable $r6, 2, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep36)
- ; CHECK: renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r8, 14 /* CC::al */, $noreg
- ; CHECK: tSTRBi killed renamable $r4, renamable $r5, 2, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep38)
- ; CHECK: renamable $r4 = t2LDRBi12 killed renamable $r9, 3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep22)
- ; CHECK: renamable $r6 = tLDRBi killed renamable $r6, 3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep26)
- ; CHECK: renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r6, 14 /* CC::al */, $noreg
- ; CHECK: tSTRBi killed renamable $r4, killed renamable $r5, 3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep30)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.7
- ; CHECK: bb.8.for.cond.cleanup.loopexit.unr-lcssa:
- ; CHECK: successors: %bb.11(0x30000000), %bb.9(0x50000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
- ; CHECK: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.11, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.9.for.body.epil:
- ; CHECK: successors: %bb.11(0x40000000), %bb.10(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $r6 = tLDRBr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx.epil)
- ; CHECK: t2CMPri renamable $r12, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r5 = tLDRBr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx1.epil)
- ; CHECK: renamable $r6 = tADDhirr killed renamable $r6, killed renamable $r5, 14 /* CC::al */, $noreg
- ; CHECK: tSTRBr killed renamable $r6, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.arrayidx4.epil)
- ; CHECK: tBcc %bb.11, 0 /* CC::eq */, killed $cpsr
- ; CHECK: bb.10.for.body.epil.1:
- ; CHECK: successors: %bb.11(0x40000000), %bb.12(0x40000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $r6, dead $cpsr = nuw tADDi3 renamable $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: t2CMPri killed renamable $r12, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r5 = tLDRBr renamable $r1, $r6, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx.epil.1)
- ; CHECK: renamable $r4 = tLDRBr renamable $r2, $r6, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx1.epil.1)
- ; CHECK: renamable $r5 = tADDhirr killed renamable $r5, killed renamable $r4, 14 /* CC::al */, $noreg
- ; CHECK: tSTRBr killed renamable $r5, renamable $r0, killed $r6, 14 /* CC::al */, $noreg :: (store (s8) into %ir.arrayidx4.epil.1)
- ; CHECK: tBcc %bb.12, 1 /* CC::ne */, killed $cpsr
- ; CHECK: bb.11.for.cond.cleanup:
- ; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r11
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
- ; CHECK: bb.12.for.body.epil.2:
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1 = tLDRBr killed renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx.epil.2)
- ; CHECK: renamable $r2 = tLDRBr killed renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx1.epil.2)
- ; CHECK: renamable $r1 = tADDhirr killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: tSTRBr killed renamable $r1, killed renamable $r0, killed $r3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.arrayidx4.epil.2)
- ; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r11
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.11(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r9, $r11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -20
+ ; CHECK-NEXT: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
+ ; CHECK-NEXT: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r8, killed $r9, killed $r11
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r11, -24
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r9, -28
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r8, -32
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.11, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.memcheck:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCMPr renamable $r4, renamable $r2, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
+ ; CHECK-NEXT: tCMPr killed renamable $r5, renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r6 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
+ ; CHECK-NEXT: tCMPr killed renamable $r4, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r5 = t2ADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
+ ; CHECK-NEXT: tCMPr killed renamable $r5, renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r5 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tAND killed renamable $r5, killed renamable $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead renamable $r5, $cpsr = tLSLri killed renamable $r5, 31, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r6 = t2ANDrr killed renamable $r6, killed renamable $r12, 0 /* CC::eq */, $cpsr, $noreg, implicit killed $r6, implicit $itstate
+ ; CHECK-NEXT: dead renamable $r6 = t2LSLri killed renamable $r6, 31, 0 /* CC::eq */, killed $cpsr, def $cpsr, implicit killed $r6, implicit killed $itstate
+ ; CHECK-NEXT: tBcc %bb.4, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.body.preheader:
+ ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = tSUBi3 renamable $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2ANDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: tCMPi8 killed renamable $r4, 3, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.6, 2 /* CC::hs */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.8(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tB %bb.8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.vector.ph:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = MVE_DLSTP_8 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.vector.body:
+ ; CHECK-NEXT: successors: %bb.5(0x7c000000), %bb.11(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv46, align 1)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRBU8_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv4749, align 1)
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi8 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRBU8_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv5052, align 1)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.5
+ ; CHECK-NEXT: tB %bb.11, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.for.body.preheader.new:
+ ; CHECK-NEXT: successors: %bb.7(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7.for.body:
+ ; CHECK-NEXT: successors: %bb.7(0x7c000000), %bb.8(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4 = tLDRBr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep2453)
+ ; CHECK-NEXT: renamable $r9 = t2ADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r5 = tLDRBr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep2854)
+ ; CHECK-NEXT: renamable $r6, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRBr killed renamable $r4, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep3255)
+ ; CHECK-NEXT: renamable $r8 = t2LDRBi12 renamable $r9, 1, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep40)
+ ; CHECK-NEXT: renamable $r5 = tLDRBi renamable $r6, 1, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep42)
+ ; CHECK-NEXT: renamable $r8 = tADDhirr killed renamable $r8, killed renamable $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r5, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2STRBi12 killed renamable $r8, renamable $r5, 1, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep44)
+ ; CHECK-NEXT: renamable $r8 = t2LDRBi12 renamable $r9, 2, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep34)
+ ; CHECK-NEXT: renamable $r4 = tLDRBi renamable $r6, 2, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep36)
+ ; CHECK-NEXT: renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRBi killed renamable $r4, renamable $r5, 2, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep38)
+ ; CHECK-NEXT: renamable $r4 = t2LDRBi12 killed renamable $r9, 3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep22)
+ ; CHECK-NEXT: renamable $r6 = tLDRBi killed renamable $r6, 3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep26)
+ ; CHECK-NEXT: renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r6, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRBi killed renamable $r4, killed renamable $r5, 3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep30)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8.for.cond.cleanup.loopexit.unr-lcssa:
+ ; CHECK-NEXT: successors: %bb.11(0x30000000), %bb.9(0x50000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.11, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9.for.body.epil:
+ ; CHECK-NEXT: successors: %bb.11(0x40000000), %bb.10(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r6 = tLDRBr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx.epil)
+ ; CHECK-NEXT: t2CMPri renamable $r12, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r5 = tLDRBr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx1.epil)
+ ; CHECK-NEXT: renamable $r6 = tADDhirr killed renamable $r6, killed renamable $r5, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRBr killed renamable $r6, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.arrayidx4.epil)
+ ; CHECK-NEXT: tBcc %bb.11, 0 /* CC::eq */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.10.for.body.epil.1:
+ ; CHECK-NEXT: successors: %bb.11(0x40000000), %bb.12(0x40000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r6, dead $cpsr = nuw tADDi3 renamable $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2CMPri killed renamable $r12, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r5 = tLDRBr renamable $r1, $r6, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx.epil.1)
+ ; CHECK-NEXT: renamable $r4 = tLDRBr renamable $r2, $r6, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx1.epil.1)
+ ; CHECK-NEXT: renamable $r5 = tADDhirr killed renamable $r5, killed renamable $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRBr killed renamable $r5, renamable $r0, killed $r6, 14 /* CC::al */, $noreg :: (store (s8) into %ir.arrayidx4.epil.1)
+ ; CHECK-NEXT: tBcc %bb.12, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.11.for.cond.cleanup:
+ ; CHECK-NEXT: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r11
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.12.for.body.epil.2:
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1 = tLDRBr killed renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx.epil.2)
+ ; CHECK-NEXT: renamable $r2 = tLDRBr killed renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx1.epil.2)
+ ; CHECK-NEXT: renamable $r1 = tADDhirr killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tSTRBr killed renamable $r1, killed renamable $r0, killed $r3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.arrayidx4.epil.2)
+ ; CHECK-NEXT: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r11
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
bb.0.entry:
successors: %bb.11(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8, $r9, $r11
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-def.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-def.mir
index 14a64e9946fd..f2eef49c02f9 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-def.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-def.mir
@@ -32,13 +32,10 @@
ret i32 0
}
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.start.loop.iterations.i32(i32) #0
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #0
- ; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #1
attributes #0 = { noduplicate nounwind }
@@ -93,32 +90,36 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: do_copy
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r7, $lr
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: $lr = tMOVr killed $r0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2LSRri renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $lr = tMOVr renamable $lr, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.while.body:
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: renamable $r3, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
- ; CHECK: tCMPhir renamable $lr, renamable $r2, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2IT 2, 8, implicit-def $itstate
- ; CHECK: renamable $r3 = tLSRri $noreg, killed renamable $r3, 1, 2 /* CC::hs */, killed $cpsr, implicit renamable $r3, implicit killed $itstate
- ; CHECK: early-clobber renamable $r0 = t2STR_PRE killed renamable $r3, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep4)
- ; CHECK: t2CMPri renamable $lr, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.1, 4 /* CC::mi */, killed $cpsr
- ; CHECK: tB %bb.2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.while.end:
- ; CHECK: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r7, $lr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: $lr = tMOVr killed $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2LSRri renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $lr = tMOVr renamable $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.body:
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
+ ; CHECK-NEXT: tCMPhir renamable $lr, renamable $r2, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2IT 2, 8, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r3 = tLSRri $noreg, killed renamable $r3, 1, 2 /* CC::hs */, killed $cpsr, implicit renamable $r3, implicit killed $itstate
+ ; CHECK-NEXT: early-clobber renamable $r0 = t2STR_PRE killed renamable $r3, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep4)
+ ; CHECK-NEXT: t2CMPri renamable $lr, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.1, 4 /* CC::mi */, killed $cpsr
+ ; CHECK-NEXT: tB %bb.2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.end:
+ ; CHECK-NEXT: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir
index d64e975b7749..f847e6afd69a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir
@@ -32,13 +32,10 @@
ret i32 0
}
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.start.loop.iterations.i32(i32) #0
- ; Function Attrs: noduplicate nounwind
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #0
- ; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #1
attributes #0 = { noduplicate nounwind }
@@ -93,33 +90,37 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: do_copy
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = tMOVr renamable $r0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2LSRri renamable $r0, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $lr = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.while.body:
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: renamable $r3, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
- ; CHECK: tCMPhir renamable $lr, renamable $r2, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: t2IT 2, 8, implicit-def $itstate
- ; CHECK: renamable $r3 = tLSRri $noreg, killed renamable $r3, 1, 2 /* CC::hs */, killed $cpsr, implicit killed renamable $r3, implicit killed $itstate
- ; CHECK: early-clobber renamable $r0 = t2STR_PRE killed renamable $r3, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep4)
- ; CHECK: renamable $lr = tMOVr killed $lr, 14 /* CC::al */, $noreg
- ; CHECK: t2CMPri renamable $lr, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.1, 1 /* CC::ne */, killed $cpsr
- ; CHECK: tB %bb.2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.while.end:
- ; CHECK: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = tMOVr renamable $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2LSRri renamable $r0, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $lr = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.body:
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
+ ; CHECK-NEXT: tCMPhir renamable $lr, renamable $r2, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $lr = t2SUBri killed renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: t2IT 2, 8, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r3 = tLSRri $noreg, killed renamable $r3, 1, 2 /* CC::hs */, killed $cpsr, implicit killed renamable $r3, implicit killed $itstate
+ ; CHECK-NEXT: early-clobber renamable $r0 = t2STR_PRE killed renamable $r3, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep4)
+ ; CHECK-NEXT: renamable $lr = tMOVr killed $lr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2CMPri renamable $lr, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.1, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: tB %bb.2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.end:
+ ; CHECK-NEXT: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir
index 6642c1ad9779..8d6125e3459d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir
@@ -114,38 +114,44 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_vmvn
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.loop.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: dead $lr = t2DLS renamable $r4
- ; CHECK: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.loop.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
- ; CHECK: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.a, align 4)
- ; CHECK: renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.b, align 4)
- ; CHECK: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VMVN killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VQSHRNbhs32 killed renamable $q0, killed renamable $q1, 15, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r2 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.addr.c, align 4)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.loop.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r4
+ ; CHECK-NEXT: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.loop.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.a, align 4)
+ ; CHECK-NEXT: renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.b, align 4)
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VMVN killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VQSHRNbhs32 killed renamable $q0, killed renamable $q1, 15, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r2 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.addr.c, align 4)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
@@ -220,38 +226,44 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_vorn
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.loop.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: dead $lr = t2DLS renamable $r4
- ; CHECK: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.loop.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.b, align 4)
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.a, align 4)
- ; CHECK: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VORN renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q1 = MVE_VQSHRUNs32th killed renamable $q1, killed renamable $q0, 3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r2 = MVE_VSTRWU32_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.addr.c, align 4)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.loop.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r4 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r4
+ ; CHECK-NEXT: $r12 = tMOVr killed $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.loop.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.b, align 4)
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.addr.a, align 4)
+ ; CHECK-NEXT: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VORN renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q1 = MVE_VQSHRUNs32th killed renamable $q1, killed renamable $q0, 3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r2 = MVE_VSTRWU32_post killed renamable $q1, killed renamable $r2, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.addr.c, align 4)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-use-after.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-use-after.mir
index 2c8caa162db7..ebae1717fcf8 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-use-after.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-use-after.mir
@@ -88,28 +88,34 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: do_copy
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $r0, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: $lr = t2DLS killed $r0
- ; CHECK: renamable $r0 = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, def dead $cpsr
- ; CHECK: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: bb.1.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: $lr = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.while.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1
- ; CHECK: renamable $r2, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep6)
- ; CHECK: early-clobber renamable $r0 = t2STR_PRE killed renamable $r2, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep2)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.while.end:
- ; CHECK: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: $lr = t2DLS killed $r0
+ ; CHECK-NEXT: renamable $r0 = t2SUBri killed renamable $lr, 4, 14 /* CC::al */, $noreg, def dead $cpsr
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep6)
+ ; CHECK-NEXT: early-clobber renamable $r0 = t2STR_PRE killed renamable $r2, killed renamable $r0, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.scevgep2)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.while.end:
+ ; CHECK-NEXT: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir
index ec17e1e6721e..9acbeb2c1ea0 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir
@@ -844,28 +844,34 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: legal_vaddv_s32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, killed $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r12 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: early-clobber renamable $r1 = t2STR_POST killed renamable $r12, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, killed $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r1 = t2STR_POST killed renamable $r12, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -943,28 +949,34 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: legal_vaddv_s16
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: $lr = MVE_DLSTP_16 killed renamable $r2
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRHU16_post killed renamable $r0, 16, 0, killed $noreg, $noreg :: (load (s128) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r12 = MVE_VADDVs16no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: early-clobber renamable $r1 = t2STR_POST killed renamable $r12, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = MVE_DLSTP_16 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRHU16_post killed renamable $r0, 16, 0, killed $noreg, $noreg :: (load (s128) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVs16no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r1 = t2STR_POST killed renamable $r12, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -1042,28 +1054,34 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: legal_vaddv_s8
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: $lr = MVE_DLSTP_8 killed renamable $r2
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRBU8_post killed renamable $r0, 16, 0, killed $noreg, $noreg :: (load (s128) from %ir.lsr.iv17, align 1)
- ; CHECK: renamable $r12 = MVE_VADDVs8no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: early-clobber renamable $r1 = t2STR_POST killed renamable $r12, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = MVE_DLSTP_8 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRBU8_post killed renamable $r0, 16, 0, killed $noreg, $noreg :: (load (s128) from %ir.lsr.iv17, align 1)
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVs8no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r1 = t2STR_POST killed renamable $r12, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -1140,32 +1158,40 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: legal_vaddva_s32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCBZ $r1, %bb.4
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r2
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, killed $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r2 = MVE_VADDVu32acc killed renamable $r2, killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $r2
- ; CHECK: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCBZ $r1, %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, killed $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r2 = MVE_VADDVu32acc killed renamable $r2, killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r7, $lr
@@ -1249,40 +1275,46 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddv_s32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r12
- ; CHECK: $r3 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $q0 = MVE_VMVN killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r1 = t2STR_POST killed renamable $r12, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r12
+ ; CHECK-NEXT: $r3 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $q0 = MVE_VMVN killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r1 = t2STR_POST killed renamable $r12, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -1360,44 +1392,52 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddva_s32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCBZ $r1, %bb.4
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2BICri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r2
- ; CHECK: $r3 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r1, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: $lr = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VMVN killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = MVE_VADDVu32acc killed renamable $r2, killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $r2
- ; CHECK: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCBZ $r1, %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2BICri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r2
+ ; CHECK-NEXT: $r3 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r1, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: $lr = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMVN killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = MVE_VADDVu32acc killed renamable $r2, killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r7, $lr
@@ -1482,40 +1522,46 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddv_u32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r12
- ; CHECK: $r3 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRHU32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $q0 = MVE_VMVN killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r1 = t2STR_POST killed renamable $r12, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r12
+ ; CHECK-NEXT: $r3 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRHU32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $q0 = MVE_VMVN killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r1 = t2STR_POST killed renamable $r12, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -1593,44 +1639,52 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddva_u32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCBZ $r1, %bb.4
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2BICri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r2
- ; CHECK: $r3 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r1, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRHU32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: $lr = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VMVN killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = MVE_VADDVu32acc killed renamable $r2, killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $r2
- ; CHECK: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCBZ $r1, %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2BICri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r2
+ ; CHECK-NEXT: $r3 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r1, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRHU32_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: $lr = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMVN killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = MVE_VADDVu32acc killed renamable $r2, killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r7, $lr
@@ -1718,43 +1772,49 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddv_s16
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
- ; CHECK: dead $lr = t2DLS renamable $r12
- ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4
- ; CHECK: renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRBS16_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 1)
- ; CHECK: renamable $q1 = MVE_VSUBi16 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = MVE_VADDVu16no_acc killed renamable $q1, 0, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2SXTH killed renamable $r12, 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r1 = t2STR_POST killed renamable $r3, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r12
+ ; CHECK-NEXT: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRBS16_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 1)
+ ; CHECK-NEXT: renamable $q1 = MVE_VSUBi16 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVu16no_acc killed renamable $q1, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2SXTH killed renamable $r12, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r1 = t2STR_POST killed renamable $r3, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
@@ -1841,47 +1901,55 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddva_s16
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCBZ $r1, %bb.4
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $d0 = VMOVDRR killed renamable $r2, killed renamable $r3, 14 /* CC::al */, $noreg, implicit-def $q0
- ; CHECK: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2BICri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load (s64) from %fixed-stack.0)
- ; CHECK: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r2
- ; CHECK: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r3, $r4
- ; CHECK: renamable $vpr = MVE_VCTP16 renamable $r1, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRBS16_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 1)
- ; CHECK: renamable $q1 = MVE_VSUBi16 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = MVE_VADDVu16no_acc killed renamable $q1, 0, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2SXTAH killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 8, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $r3
- ; CHECK: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCBZ $r1, %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $d0 = VMOVDRR killed renamable $r2, killed renamable $r3, 14 /* CC::al */, $noreg, implicit-def $q0
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2BICri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load (s64) from %fixed-stack.0)
+ ; CHECK-NEXT: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r2
+ ; CHECK-NEXT: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 renamable $r1, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRBS16_post killed renamable $r0, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 1)
+ ; CHECK-NEXT: renamable $q1 = MVE_VSUBi16 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = MVE_VADDVu16no_acc killed renamable $q1, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2SXTAH killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
@@ -1972,43 +2040,49 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddv_u16
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
- ; CHECK: dead $lr = t2DLS renamable $r12
- ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4
- ; CHECK: renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $q1 = MVE_VSUBi16 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = MVE_VADDVu16no_acc killed renamable $q1, 0, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2UXTH killed renamable $r12, 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r1 = t2STR_POST killed renamable $r3, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r12
+ ; CHECK-NEXT: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $q1 = MVE_VSUBi16 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVu16no_acc killed renamable $q1, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2UXTH killed renamable $r12, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r1 = t2STR_POST killed renamable $r3, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r4, $lr
@@ -2094,47 +2168,55 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddva_u16
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCBZ $r1, %bb.4
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $d0 = VMOVDRR killed renamable $r2, killed renamable $r3, 14 /* CC::al */, $noreg, implicit-def $q0
- ; CHECK: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2BICri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load (s64) from %fixed-stack.0)
- ; CHECK: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r2
- ; CHECK: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r3, $r4
- ; CHECK: renamable $vpr = MVE_VCTP16 renamable $r1, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $q1 = MVE_VSUBi16 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = MVE_VADDVu16no_acc killed renamable $q1, 0, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2UXTAH killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 8, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $r3
- ; CHECK: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCBZ $r1, %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $d0 = VMOVDRR killed renamable $r2, killed renamable $r3, 14 /* CC::al */, $noreg, implicit-def $q0
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2BICri killed renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load (s64) from %fixed-stack.0)
+ ; CHECK-NEXT: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r2
+ ; CHECK-NEXT: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 renamable $r1, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $q1 = MVE_VSUBi16 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = MVE_VADDVu16no_acc killed renamable $q1, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2UXTAH killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
@@ -2225,43 +2307,49 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddv_s8
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
- ; CHECK: dead $lr = t2DLS renamable $r12
- ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4
- ; CHECK: renamable $vpr = MVE_VCTP8 renamable $r2, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 1)
- ; CHECK: renamable $q1 = MVE_VEOR killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = MVE_VADDVu8no_acc killed renamable $q1, 0, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2SXTB killed renamable $r12, 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 16, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r1 = t2STR_POST killed renamable $r3, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r12
+ ; CHECK-NEXT: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP8 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 1)
+ ; CHECK-NEXT: renamable $q1 = MVE_VEOR killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVu8no_acc killed renamable $q1, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2SXTB killed renamable $r12, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r1 = t2STR_POST killed renamable $r3, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r4, $lr
@@ -2347,47 +2435,55 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddva_s8
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCBZ $r1, %bb.4
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $d0 = VMOVDRR killed renamable $r2, killed renamable $r3, 14 /* CC::al */, $noreg, implicit-def $q0
- ; CHECK: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 7, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2BICri killed renamable $r2, 7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 7, 14 /* CC::al */, $noreg
- ; CHECK: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load (s64) from %fixed-stack.0)
- ; CHECK: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 27, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r2
- ; CHECK: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r3, $r4
- ; CHECK: renamable $vpr = MVE_VCTP8 renamable $r1, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 1)
- ; CHECK: renamable $q1 = MVE_VEOR killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = MVE_VADDVu8no_acc killed renamable $q1, 0, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2SXTAB killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $r3
- ; CHECK: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCBZ $r1, %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $d0 = VMOVDRR killed renamable $r2, killed renamable $r3, 14 /* CC::al */, $noreg, implicit-def $q0
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2BICri killed renamable $r2, 7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load (s64) from %fixed-stack.0)
+ ; CHECK-NEXT: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 27, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r2
+ ; CHECK-NEXT: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP8 renamable $r1, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 1)
+ ; CHECK-NEXT: renamable $q1 = MVE_VEOR killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = MVE_VADDVu8no_acc killed renamable $q1, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2SXTAB killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
@@ -2478,43 +2574,49 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddv_u8
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
- ; CHECK: dead $lr = t2DLS renamable $r12
- ; CHECK: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r2, $r4
- ; CHECK: renamable $vpr = MVE_VCTP8 renamable $r2, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 1)
- ; CHECK: renamable $q1 = MVE_VEOR killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = MVE_VADDVu8no_acc killed renamable $q1, 0, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2UXTB killed renamable $r12, 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 16, 14 /* CC::al */, $noreg
- ; CHECK: early-clobber renamable $r1 = t2STR_POST killed renamable $r3, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r12
+ ; CHECK-NEXT: $r4 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r2, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP8 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 1)
+ ; CHECK-NEXT: renamable $q1 = MVE_VEOR killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVu8no_acc killed renamable $q1, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2UXTB killed renamable $r12, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: early-clobber renamable $r1 = t2STR_POST killed renamable $r3, killed renamable $r1, 4, 14 /* CC::al */, $noreg :: (store (s32) into %ir.store.addr)
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r4, $lr
@@ -2600,47 +2702,55 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vaddva_u8
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
- ; CHECK: tCBZ $r1, %bb.4
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $d0 = VMOVDRR killed renamable $r2, killed renamable $r3, 14 /* CC::al */, $noreg, implicit-def $q0
- ; CHECK: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 7, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2BICri killed renamable $r2, 7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 7, 14 /* CC::al */, $noreg
- ; CHECK: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load (s64) from %fixed-stack.0)
- ; CHECK: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 27, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r2
- ; CHECK: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q0, $r0, $r1, $r3, $r4
- ; CHECK: renamable $vpr = MVE_VCTP8 renamable $r1, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 1)
- ; CHECK: renamable $q1 = MVE_VEOR killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = MVE_VADDVu8no_acc killed renamable $q1, 0, $noreg, $noreg
- ; CHECK: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2UXTAB killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.exit:
- ; CHECK: liveins: $r3
- ; CHECK: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.4(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK-NEXT: tCBZ $r1, %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $d0 = VMOVDRR killed renamable $r2, killed renamable $r3, 14 /* CC::al */, $noreg, implicit-def $q0
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tADDi3 renamable $r1, 7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2BICri killed renamable $r2, 7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $d1 = VLDRD $sp, 2, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0 :: (load (s64) from %fixed-stack.0)
+ ; CHECK-NEXT: renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 27, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r2
+ ; CHECK-NEXT: $r4 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q0, $r0, $r1, $r3, $r4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP8 renamable $r1, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv17, align 1)
+ ; CHECK-NEXT: renamable $q1 = MVE_VEOR killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: $lr = tMOVr $r4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = MVE_VADDVu8no_acc killed renamable $q1, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r4, dead $cpsr = nsw tSUBi8 killed $r4, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2UXTAB killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 16, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.exit:
+ ; CHECK-NEXT: liveins: $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.4(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $lr
@@ -2728,35 +2838,43 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: regalloc_legality_vaddva_u32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x50000000), %bb.4(0x30000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r2, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.4, 11 /* CC::lt */, killed $cpsr
- ; CHECK: bb.1.while.body.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.while.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r12
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRHU32_post killed renamable $r1, 8, 0, $noreg, $noreg :: (load (s64) from %ir.tmp3, align 2)
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHU32_post killed renamable $r0, 8, 0, killed $noreg, $noreg :: (load (s64) from %ir.tmp1, align 2)
- ; CHECK: renamable $q0 = MVE_VORR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r12 = MVE_VADDVu32acc killed renamable $r12, killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.while.end:
- ; CHECK: liveins: $r12
- ; CHECK: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x50000000), %bb.4(0x30000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.4, 11 /* CC::lt */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.body.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRHU32_post killed renamable $r1, 8, 0, $noreg, $noreg :: (load (s64) from %ir.tmp3, align 2)
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHU32_post killed renamable $r0, 8, 0, killed $noreg, $noreg :: (load (s64) from %ir.tmp1, align 2)
+ ; CHECK-NEXT: renamable $q0 = MVE_VORR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVu32acc killed renamable $r12, killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.while.end:
+ ; CHECK-NEXT: liveins: $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x50000000), %bb.4(0x30000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -2843,36 +2961,44 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: regalloc_legality_vaddv_u16
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x50000000), %bb.4(0x30000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r2, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.4, 11 /* CC::lt */, killed $cpsr
- ; CHECK: bb.1.while.body.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_16 killed renamable $r2
- ; CHECK: bb.2.while.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r3
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.tmp3, align 2)
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHU16_post killed renamable $r0, 16, 0, killed $noreg, $noreg :: (load (s128) from %ir.tmp1, align 2)
- ; CHECK: renamable $q0 = MVE_VORR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r12 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: renamable $r3 = t2UXTAH killed renamable $r3, killed renamable $r12, 0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.while.end:
- ; CHECK: liveins: $r3
- ; CHECK: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x50000000), %bb.4(0x30000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.4, 11 /* CC::lt */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.body.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_16 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.tmp3, align 2)
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHU16_post killed renamable $r0, 16, 0, killed $noreg, $noreg :: (load (s128) from %ir.tmp1, align 2)
+ ; CHECK-NEXT: renamable $q0 = MVE_VORR killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r12 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2UXTAH killed renamable $r3, killed renamable $r12, 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.while.end:
+ ; CHECK-NEXT: liveins: $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x50000000), %bb.4(0x30000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -2960,47 +3086,55 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: regalloc_illegality_vaddva_s32
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x50000000), %bb.4(0x30000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r3, 8, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: $r2 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: t2IT 10, 8, implicit-def $itstate
- ; CHECK: renamable $r2 = tMOVi8 $noreg, 8, 10 /* CC::ge */, killed $cpsr, implicit killed renamable $r2, implicit killed $itstate
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.4, 11 /* CC::lt */, killed $cpsr
- ; CHECK: bb.1.while.body.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r2, dead $cpsr = tSUBrr renamable $r3, killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2ADDri killed renamable $r2, 7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r2, killed renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.while.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP16 renamable $r3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.tmp3, align 2)
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.tmp1, align 2)
- ; CHECK: renamable $q2 = MVE_VMULLBs16 renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q0 = MVE_VMULLTs16 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q0, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3, dead $cpsr = nsw tSUBi8 killed renamable $r3, 8, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = MVE_VADDVu32acc killed renamable $r2, killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.while.end:
- ; CHECK: liveins: $r2
- ; CHECK: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x50000000), %bb.4(0x30000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 8, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: $r2 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2IT 10, 8, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r2 = tMOVi8 $noreg, 8, 10 /* CC::ge */, killed $cpsr, implicit killed renamable $r2, implicit killed $itstate
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.4, 11 /* CC::lt */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.body.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBrr renamable $r3, killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2ADDri killed renamable $r2, 7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r2, killed renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.tmp3, align 2)
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.tmp1, align 2)
+ ; CHECK-NEXT: renamable $q2 = MVE_VMULLBs16 renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q0 = MVE_VMULLTs16 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q0, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tSUBi8 killed renamable $r3, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = MVE_VADDVu32acc killed renamable $r2, killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.while.end:
+ ; CHECK-NEXT: liveins: $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x50000000), %bb.4(0x30000000)
liveins: $r0, $r1, $r3, $r7, $lr
@@ -3083,49 +3217,57 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: illegal_vmull_non_zero
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x50000000), %bb.4(0x30000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r3, 8, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: $r2 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: t2IT 10, 8, implicit-def $itstate
- ; CHECK: renamable $r2 = tMOVi8 $noreg, 8, 10 /* CC::ge */, killed $cpsr, implicit killed renamable $r2, implicit killed $itstate
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.4, 11 /* CC::lt */, killed $cpsr
- ; CHECK: bb.1.while.body.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r2, dead $cpsr = tSUBrr renamable $r3, killed renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2ADDri killed renamable $r2, 7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = nuw nsw t2ADDrs killed renamable $r2, killed renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r2
- ; CHECK: $r12 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.while.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
- ; CHECK: renamable $vpr = MVE_VCTP16 renamable $r3, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.tmp3, align 2)
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.tmp1, align 2)
- ; CHECK: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VMULLTs16 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r12 = nsw t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = nsw tSUBi8 killed renamable $r3, 8, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = MVE_VADDVu32acc killed renamable $r2, killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.while.end:
- ; CHECK: liveins: $r2
- ; CHECK: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
- ; CHECK: bb.4:
- ; CHECK: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x50000000), %bb.4(0x30000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 8, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: $r2 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: t2IT 10, 8, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r2 = tMOVi8 $noreg, 8, 10 /* CC::ge */, killed $cpsr, implicit killed renamable $r2, implicit killed $itstate
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.4, 11 /* CC::lt */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.body.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBrr renamable $r3, killed renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2ADDri killed renamable $r2, 7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = nuw nsw t2ADDrs killed renamable $r2, killed renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r2
+ ; CHECK-NEXT: $r12 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRHU16_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.tmp3, align 2)
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.tmp1, align 2)
+ ; CHECK-NEXT: $lr = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMULLTs16 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r12 = nsw t2SUBri killed $r12, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tSUBi8 killed renamable $r3, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = MVE_VADDVu32acc killed renamable $r2, killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.while.end:
+ ; CHECK-NEXT: liveins: $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x50000000), %bb.4(0x30000000)
liveins: $r0, $r1, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination-across-blocks.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination-across-blocks.mir
index cb059cc6796d..fb714f80f76f 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination-across-blocks.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vcmp-vpst-combination-across-blocks.mir
@@ -68,22 +68,27 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: combine_previous
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1
- ; CHECK: bb.1 (align 4):
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VORR renamable $q1, renamable $q1, 0, $noreg, $noreg, killed renamable $q0
- ; CHECK: MVE_VPTv4f32 8, renamable $q1, renamable $q0, 12, implicit-def $vpr
- ; CHECK: renamable $q0 = MVE_VORR killed renamable $q1, killed renamable $q1, 1, killed renamable $vpr, $noreg, killed renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.3 (align 4):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1 (align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VORR renamable $q1, renamable $q1, 0, $noreg, $noreg, killed renamable $q0
+ ; CHECK-NEXT: MVE_VPTv4f32 8, renamable $q1, renamable $q0, 12, implicit-def $vpr
+ ; CHECK-NEXT: renamable $q0 = MVE_VORR killed renamable $q1, killed renamable $q1, 1, killed renamable $vpr, $noreg, killed renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (align 4):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 4
bb.0:
successors: %bb.6(0x80000000)
liveins: $r0, $r1, $r2
@@ -156,24 +161,29 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: combine_middle
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $q2, $r0, $r1
- ; CHECK: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1
- ; CHECK: bb.1 (align 4):
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q2, $r0
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
- ; CHECK: renamable $q2 = MVE_VORR renamable $q1, renamable $q1, 0, $noreg, $noreg, killed renamable $q2
- ; CHECK: MVE_VPTv4f32 8, renamable $q1, renamable $q0, 12, implicit-def $vpr
- ; CHECK: renamable $q2 = MVE_VORR renamable $q1, renamable $q1, 1, renamable $vpr, $noreg, killed renamable $q2
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: dead renamable $q1 = MVE_VORR killed renamable $q1, renamable $q0, 1, killed renamable $vpr, $noreg, killed renamable $q1
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.3 (align 4):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $q2, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1 (align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q2, $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q2 = MVE_VORR renamable $q1, renamable $q1, 0, $noreg, $noreg, killed renamable $q2
+ ; CHECK-NEXT: MVE_VPTv4f32 8, renamable $q1, renamable $q0, 12, implicit-def $vpr
+ ; CHECK-NEXT: renamable $q2 = MVE_VORR renamable $q1, renamable $q1, 1, renamable $vpr, $noreg, killed renamable $q2
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: dead renamable $q1 = MVE_VORR killed renamable $q1, renamable $q0, 1, killed renamable $vpr, $noreg, killed renamable $q1
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (align 4):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 4
bb.0:
successors: %bb.6(0x80000000)
liveins: $r0, $r1, $r2
@@ -247,23 +257,28 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: combine_last
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $q2, $r0, $r1
- ; CHECK: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1
- ; CHECK: bb.1 (align 4):
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q2, $r0
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
- ; CHECK: MVE_VPTv4f32 8, renamable $q1, renamable $q0, 12, implicit-def $vpr
- ; CHECK: renamable $q2 = MVE_VORR renamable $q1, renamable $q1, 1, killed renamable $vpr, $noreg, killed renamable $q2
- ; CHECK: MVE_VPTv4f32 8, renamable $q2, renamable $q1, 12, implicit-def $vpr
- ; CHECK: dead renamable $q1 = MVE_VORR killed renamable $q1, renamable $q0, 1, killed renamable $vpr, $noreg, killed renamable $q1
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.3 (align 4):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $q2, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1 (align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q2, $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPTv4f32 8, renamable $q1, renamable $q0, 12, implicit-def $vpr
+ ; CHECK-NEXT: renamable $q2 = MVE_VORR renamable $q1, renamable $q1, 1, killed renamable $vpr, $noreg, killed renamable $q2
+ ; CHECK-NEXT: MVE_VPTv4f32 8, renamable $q2, renamable $q1, 12, implicit-def $vpr
+ ; CHECK-NEXT: dead renamable $q1 = MVE_VORR killed renamable $q1, renamable $q0, 1, killed renamable $vpr, $noreg, killed renamable $q1
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (align 4):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 4
bb.0:
successors: %bb.6(0x80000000)
liveins: $r0, $r1, $r2
@@ -337,22 +352,27 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: combine_kill_flags
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $q2, $r0, $r1
- ; CHECK: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1
- ; CHECK: bb.1 (align 4):
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q2, $r0
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
- ; CHECK: renamable $q2 = MVE_VORR killed renamable $q2, renamable $q1, 0, $noreg, $noreg, killed renamable $q2
- ; CHECK: MVE_VPTv4f32 8, renamable $q0, killed renamable $q1, 12, implicit-def $vpr
- ; CHECK: renamable $q0 = MVE_VORR killed renamable $q0, killed renamable $q0, 1, killed renamable $vpr, $noreg, killed renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.3 (align 4):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $q2, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1 (align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q2, $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q2 = MVE_VORR killed renamable $q2, renamable $q1, 0, $noreg, $noreg, killed renamable $q2
+ ; CHECK-NEXT: MVE_VPTv4f32 8, renamable $q0, killed renamable $q1, 12, implicit-def $vpr
+ ; CHECK-NEXT: renamable $q0 = MVE_VORR killed renamable $q0, killed renamable $q0, 1, killed renamable $vpr, $noreg, killed renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (align 4):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 4
bb.0:
successors: %bb.6(0x80000000)
liveins: $r0, $r1, $r2
@@ -424,24 +444,29 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: no_combination_diff_reg_value
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $r0, $r1
- ; CHECK: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1
- ; CHECK: bb.1 (align 4):
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
- ; CHECK: renamable $q1 = MVE_VORR killed renamable $q1, renamable $q0, 0, $noreg, $noreg, killed renamable $q1
- ; CHECK: renamable $vpr = MVE_VCMPf32 renamable $q1, renamable $q0, 12, 0, killed $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VORR renamable $q1, renamable $q1, 0, $noreg, $noreg, killed renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $q0 = MVE_VORR killed renamable $q1, killed renamable $q1, 1, killed renamable $vpr, $noreg, killed renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.3 (align 4):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1 (align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VORR killed renamable $q1, renamable $q0, 0, $noreg, $noreg, killed renamable $q1
+ ; CHECK-NEXT: renamable $vpr = MVE_VCMPf32 renamable $q1, renamable $q0, 12, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VORR renamable $q1, renamable $q1, 0, $noreg, $noreg, killed renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $q0 = MVE_VORR killed renamable $q1, killed renamable $q1, 1, killed renamable $vpr, $noreg, killed renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (align 4):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 4
bb.0:
successors: %bb.6(0x80000000)
liveins: $r0, $r1, $r2
@@ -515,23 +540,28 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: no_combination_vcmp_already_merged
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $q2, $r0, $r1
- ; CHECK: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1
- ; CHECK: bb.1 (align 4):
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q2, $r0
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
- ; CHECK: MVE_VPTv4f32 8, renamable $q1, renamable $q0, 12, implicit-def $vpr
- ; CHECK: renamable $q2 = MVE_VORR renamable $q1, renamable $q1, 1, renamable $vpr, $noreg, killed renamable $q2
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: dead renamable $q1 = MVE_VORR killed renamable $q1, renamable $q0, 1, killed renamable $vpr, $noreg, killed renamable $q1
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.3 (align 4):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $q2, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 renamable $r1, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1 (align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q2, $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPTv4f32 8, renamable $q1, renamable $q0, 12, implicit-def $vpr
+ ; CHECK-NEXT: renamable $q2 = MVE_VORR renamable $q1, renamable $q1, 1, renamable $vpr, $noreg, killed renamable $q2
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: dead renamable $q1 = MVE_VORR killed renamable $q1, renamable $q0, 1, killed renamable $vpr, $noreg, killed renamable $q1
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (align 4):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 4
bb.0:
successors: %bb.6(0x80000000)
liveins: $r0, $r1, $r2
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir
index 25f64484d503..08b71c43b51d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir
@@ -109,48 +109,55 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: wrong_vctp_liveout
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 4, implicit-def $itstate
- ; CHECK: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead $lr = t2DLS renamable $r12
- ; CHECK: $r3 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $q1, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
- ; CHECK: $q0 = MVE_VORR killed $q1, killed $q1, 0, $noreg, $noreg, undef $q0
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
- ; CHECK: $lr = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0, $q1, $r2
- ; CHECK: renamable $r0, dead $cpsr = tADDi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $vpr = MVE_VCTP32 killed renamable $r0, 0, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr, $noreg
- ; CHECK: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 4, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = tMOVi8 $noreg, 0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead $lr = t2DLS renamable $r12
+ ; CHECK-NEXT: $r3 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $q1, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: $q0 = MVE_VORR killed $q1, killed $q1, 0, $noreg, $noreg, undef $q0
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRHS32_post killed renamable $r1, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv1820, align 2)
+ ; CHECK-NEXT: $lr = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 killed renamable $q1, renamable $q0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0, $q1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tADDi3 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 killed renamable $r0, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VPSEL killed renamable $q1, killed renamable $q0, 0, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu32no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $lr, $r7
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-in-vpt-2.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-in-vpt-2.mir
index ad3e5fd59de1..d634e6a7ff7b 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-in-vpt-2.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-in-vpt-2.mir
@@ -2,7 +2,6 @@
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops -verify-machineinstrs %s -o - | FileCheck %s
--- |
- ; Function Attrs: nofree norecurse nounwind
define dso_local void @test(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1, i32 %arg2, i16 zeroext %mask) local_unnamed_addr #0 {
bb:
%tmp = icmp eq i32 %arg2, 0
@@ -105,39 +104,45 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: tCBZ $r2, %bb.3
- ; CHECK: bb.1.bb3:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $vpr = VMSR_P0 killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.bb9:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r3
- ; CHECK: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv24, align 4)
- ; CHECK: MVE_VPTv4i32r 8, renamable $q0, $zr, 1, implicit-def $vpr
- ; CHECK: renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1, align 4)
- ; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
- ; CHECK: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.bb27:
- ; CHECK: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCBZ $r2, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb3:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $vpr = VMSR_P0 killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.bb9:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, killed renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv24, align 4)
+ ; CHECK-NEXT: MVE_VPTv4i32r 8, renamable $q0, $zr, 1, implicit-def $vpr
+ ; CHECK-NEXT: renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1, align 4)
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
+ ; CHECK-NEXT: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.bb27:
+ ; CHECK-NEXT: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.bb:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-in-vpt.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-in-vpt.mir
index 7af79712b9f0..7164ff9a9a21 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-in-vpt.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-in-vpt.mir
@@ -1,7 +1,6 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -verify-machineinstrs -o - | FileCheck %s
--- |
- ; Function Attrs: nofree norecurse nounwind
define dso_local void @test_vldr_p0(ptr noalias nocapture %arg, ptr noalias nocapture readonly %arg1, i32 %arg2, i16 zeroext %mask) local_unnamed_addr #0 {
bb:
%tmp = icmp eq i32 %arg2, 0
@@ -136,38 +135,44 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_vldr_p0
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: tCBZ $r2, %bb.3
- ; CHECK: bb.1.bb3:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $vpr = VMSR_P0 killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2.bb9:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r3
- ; CHECK: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv24, align 4)
- ; CHECK: renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1, align 4)
- ; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
- ; CHECK: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.bb27:
- ; CHECK: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCBZ $r2, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb3:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $vpr = VMSR_P0 killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.bb9:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv24, align 4)
+ ; CHECK-NEXT: renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1, align 4)
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
+ ; CHECK-NEXT: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.bb27:
+ ; CHECK-NEXT: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.bb:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $lr
@@ -270,45 +275,51 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_vstr_p0
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: tCBZ $r2, %bb.3
- ; CHECK: bb.1.bb3:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2ADDri renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $vpr = VMSR_P0 killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.bb9:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg
- ; CHECK: renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, renamable $vpr, $noreg
- ; CHECK: VSTR_P0_off renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg
- ; CHECK: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.bb27:
- ; CHECK: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCBZ $r2, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb3:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2ADDri renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $vpr = VMSR_P0 killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.bb9:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, renamable $vpr, $noreg
+ ; CHECK-NEXT: VSTR_P0_off renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.bb27:
+ ; CHECK-NEXT: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.bb:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $lr
@@ -412,45 +423,51 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_vmsr_p0
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: tCBZ $r2, %bb.3
- ; CHECK: bb.1.bb3:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2ADDri renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $vpr = VMSR_P0 killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.bb9:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg
- ; CHECK: renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, killed renamable $vpr, $noreg
- ; CHECK: $vpr = VMSR_P0 $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg
- ; CHECK: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.bb27:
- ; CHECK: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCBZ $r2, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb3:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2ADDri renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $vpr = VMSR_P0 killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.bb9:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $vpr = VMSR_P0 $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.bb27:
+ ; CHECK-NEXT: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.bb:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $lr
@@ -554,45 +571,51 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test_vmrs_p0
; CHECK: bb.0.bb:
- ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: tCBZ $r2, %bb.3
- ; CHECK: bb.1.bb3:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: renamable $r12 = t2ADDri renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $vpr = VMSR_P0 killed $r3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.bb9:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg
- ; CHECK: dead renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, renamable $vpr, $noreg
- ; CHECK: $r3 = VMRS_P0 $vpr, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg
- ; CHECK: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.bb27:
- ; CHECK: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCBZ $r2, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb3:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2ADDri renamable $r2, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $vpr = VMSR_P0 killed $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.bb9:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg
+ ; CHECK-NEXT: dead renamable $r3, renamable $q1 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, renamable $vpr, $noreg
+ ; CHECK-NEXT: $r3 = VMRS_P0 $vpr, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.bb27:
+ ; CHECK-NEXT: $sp = tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.bb:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subi3.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subi3.mir
index 5153320d6367..7d42b407517b 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subi3.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subi3.mir
@@ -99,29 +99,35 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: vctp_tsubi3
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
- ; CHECK: renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subri.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subri.mir
index 303d93652a28..2b2999f81a2e 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subri.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subri.mir
@@ -98,29 +98,35 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: vctp_tsubi3
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
- ; CHECK: renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subri12.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subri12.mir
index 2516e5bf290b..9f8c6dab8c46 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subri12.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-subri12.mir
@@ -98,29 +98,35 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: vctp_tsubi3
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2, $r3
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
- ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
- ; CHECK: renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
+ ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
+ ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir
index 4b015f81b8f3..74039faa45e8 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir
@@ -112,53 +112,61 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: wrong_liveout_shift
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 0, 2, implicit-def $itstate
- ; CHECK: renamable $r0 = t2MOVi16 32767, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: renamable $r0 = tSXTH killed renamable $r0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
- ; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 7, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 8, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r12 = t2LSRri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
- ; CHECK: renamable $r3 = t2SUBrs renamable $r2, killed renamable $r12, 26, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg, $noreg
- ; CHECK: $q1 = MVE_VORR killed $q0, killed $q0, 0, $noreg, $noreg, undef $q1
- ; CHECK: MVE_VPST 4, implicit $vpr
- ; CHECK: renamable $r0, renamable $q0 = MVE_VLDRBU16_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv19, align 1)
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRBU16_post killed renamable $r1, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv2022, align 1)
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = nuw MVE_VMULi16 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q0 = MVE_VSUBi16 renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.middle.block:
- ; CHECK: liveins: $q0, $q1, $r3
- ; CHECK: renamable $vpr = MVE_VCTP16 killed renamable $r3, 0, $noreg, $noreg
- ; CHECK: renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr, $noreg
- ; CHECK: renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg, $noreg
- ; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
- ; CHECK: renamable $r0 = tSXTH killed renamable $r0, 14 /* CC::al */, $noreg
- ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
- ; CHECK: bb.4 (align 16):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 0, 2, implicit-def $itstate
+ ; CHECK-NEXT: renamable $r0 = t2MOVi16 32767, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: renamable $r0 = tSXTH killed renamable $r0, 0 /* CC::eq */, $cpsr, implicit killed $r0, implicit $itstate
+ ; CHECK-NEXT: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $r0, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_register $r7
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 7, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 7, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 8, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 27, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3 = tLEApcrel %const.0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2LSRri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool)
+ ; CHECK-NEXT: renamable $r3 = t2SUBrs renamable $r2, killed renamable $r12, 26, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 renamable $r2, 0, $noreg, $noreg
+ ; CHECK-NEXT: $q1 = MVE_VORR killed $q0, killed $q0, 0, $noreg, $noreg, undef $q1
+ ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q0 = MVE_VLDRBU16_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv19, align 1)
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRBU16_post killed renamable $r1, 8, 1, killed renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv2022, align 1)
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 8, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = nuw MVE_VMULi16 killed renamable $q2, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q0 = MVE_VSUBi16 renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.middle.block:
+ ; CHECK-NEXT: liveins: $q0, $q1, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 killed renamable $r3, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r0 = MVE_VADDVu16no_acc killed renamable $q0, 0, $noreg, $noreg
+ ; CHECK-NEXT: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
+ ; CHECK-NEXT: renamable $r0 = tSXTH killed renamable $r0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4 (align 16):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector_spill_in_loop.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector_spill_in_loop.mir
index 09ad938b529b..0854e436346c 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector_spill_in_loop.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector_spill_in_loop.mir
@@ -21,37 +21,43 @@ stack:
body: |
; CHECK-LABEL: name: vector_spill_in_loop
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $q0, $r1, $r2, $r3, $r4, $r5, $r7, $r8, $r10, $r11, $r12
- ; CHECK: $r6 = tMOVr $r2, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_16 renamable $r3
- ; CHECK: bb.1:
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r10, $r11, $r12
- ; CHECK: renamable $r0, renamable $q6 = MVE_VLDRHU16_post killed renamable $r0, 16, 0, $noreg, $noreg
- ; CHECK: renamable $q3 = MVE_VLDRHU16 renamable $r6, 0, 0, $noreg, $noreg
- ; CHECK: renamable $q5 = MVE_VSHR_immu16 killed renamable $q3, 11, 0, $noreg, $noreg, undef renamable $q5
- ; CHECK: MVE_VSTRWU32 killed renamable $q5, $sp, 80, 0, $noreg, $noreg :: (store (s128) into %stack.0, align 8)
- ; CHECK: dead renamable $q7 = MVE_VLDRWU32 $sp, 80, 0, $noreg, $noreg :: (load (s128) from %stack.0, align 8)
- ; CHECK: dead renamable $vpr = MVE_VCMPi16r killed renamable $q6, renamable $r8, 1, 0, killed $noreg, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1
- ; CHECK: bb.2:
- ; CHECK: successors: %bb.3(0x04000000), %bb.0(0x7c000000)
- ; CHECK: liveins: $q0, $r1, $r2, $r3, $r4, $r5, $r7, $r8, $r10, $r11, $r12
- ; CHECK: renamable $r0 = tLDRspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r10 = nuw t2ADDri killed renamable $r10, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2ADDrs killed renamable $r12, killed renamable $r0, 10, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r0 = tLDRspi $sp, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2ADDrs killed renamable $r2, killed renamable $r0, 10, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r0 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg
- ; CHECK: tCMPhir renamable $r10, killed renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.0, 1 /* CC::ne */, killed $cpsr
- ; CHECK: bb.3:
- ; CHECK: $sp = frame-destroy tADDspi $sp, 24, 14 /* CC::al */, $noreg
- ; CHECK: $sp = frame-destroy VLDMDIA_UPD $sp, 14 /* CC::al */, $noreg, def $d8, def $d9, def $d10, def $d11, def $d12, def $d13, def $d14, def $d15
- ; CHECK: $sp = frame-destroy tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: $sp = frame-destroy t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $q0, $r1, $r2, $r3, $r4, $r5, $r7, $r8, $r10, $r11, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r6 = tMOVr $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_16 renamable $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r10, $r11, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0, renamable $q6 = MVE_VLDRHU16_post killed renamable $r0, 16, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q3 = MVE_VLDRHU16 renamable $r6, 0, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q5 = MVE_VSHR_immu16 killed renamable $q3, 11, 0, $noreg, $noreg, undef renamable $q5
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q5, $sp, 80, 0, $noreg, $noreg :: (store (s128) into %stack.0, align 8)
+ ; CHECK-NEXT: dead renamable $q7 = MVE_VLDRWU32 $sp, 80, 0, $noreg, $noreg :: (load (s128) from %stack.0, align 8)
+ ; CHECK-NEXT: dead renamable $vpr = MVE_VCMPi16r killed renamable $q6, renamable $r8, 1, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x04000000), %bb.0(0x7c000000)
+ ; CHECK-NEXT: liveins: $q0, $r1, $r2, $r3, $r4, $r5, $r7, $r8, $r10, $r11, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r0 = tLDRspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r10 = nuw t2ADDri killed renamable $r10, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2ADDrs killed renamable $r12, killed renamable $r0, 10, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r0 = tLDRspi $sp, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2ADDrs killed renamable $r2, killed renamable $r0, 10, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r0 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCMPhir renamable $r10, killed renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.0, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: $sp = frame-destroy tADDspi $sp, 24, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = frame-destroy VLDMDIA_UPD $sp, 14 /* CC::al */, $noreg, def $d8, def $d9, def $d10, def $d11, def $d12, def $d13, def $d14, def $d15
+ ; CHECK-NEXT: $sp = frame-destroy tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = frame-destroy t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
bb.0:
successors: %bb.1(0x80000000)
liveins: $q0, $r1, $r2, $r3, $r4, $r5, $r7, $r8, $r10, $r11, $r12
@@ -109,45 +115,51 @@ tracksRegLiveness: true
body: |
; CHECK-LABEL: name: vector_spill_load_outside
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $q0, $r1, $r2, $r3, $r4, $r5, $r7, $r8, $r10, $r11, $r12
- ; CHECK: $r6 = tMOVr $r2, 14 /* CC::al */, $noreg
- ; CHECK: $r0 = tMOVr $r12, 14 /* CC::al */, $noreg
- ; CHECK: $r9 = tMOVr $r3, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2DLS renamable $r1
- ; CHECK: bb.1:
- ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $r12
- ; CHECK: renamable $vpr = MVE_VCTP16 renamable $r9, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0, renamable $q6 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, renamable $vpr, $noreg
- ; CHECK: renamable $q3 = MVE_VLDRHU16 renamable $r6, 0, 1, renamable $vpr, $noreg
- ; CHECK: MVE_VPST 2, implicit $vpr
- ; CHECK: renamable $q5 = MVE_VSHR_immu16 killed renamable $q3, 11, 1, renamable $vpr, $noreg, undef renamable $q5
- ; CHECK: renamable $r9 = nsw t2SUBri killed renamable $r9, 8, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: MVE_VSTRWU32 killed renamable $q5, $sp, 80, 0, $noreg, $noreg :: (store (s128) into %stack.0, align 8)
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: dead renamable $q7 = MVE_VLDRWU32 $sp, 80, 0, $noreg, $noreg :: (load (s128) from %stack.0, align 8)
- ; CHECK: MVE_VPST 1, implicit $vpr
- ; CHECK: dead renamable $vpr = MVE_VCMPi16r killed renamable $q6, renamable $r8, 1, 1, killed renamable $vpr, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.1
- ; CHECK: bb.2:
- ; CHECK: successors: %bb.3(0x04000000), %bb.0(0x7c000000)
- ; CHECK: liveins: $q0, $r1, $r2, $r3, $r4, $r5, $r7, $r8, $r10, $r11, $r12
- ; CHECK: dead renamable $q7 = MVE_VLDRWU32 $sp, 80, 0, $noreg, $noreg :: (load (s128) from %stack.0, align 8)
- ; CHECK: renamable $r0 = tLDRspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r10 = nuw t2ADDri killed renamable $r10, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2ADDrs killed renamable $r12, killed renamable $r0, 10, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r0 = tLDRspi $sp, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r2 = t2ADDrs killed renamable $r2, killed renamable $r0, 10, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r0 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg
- ; CHECK: tCMPhir renamable $r10, killed renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.0, 1 /* CC::ne */, killed $cpsr
- ; CHECK: bb.3:
- ; CHECK: $sp = frame-destroy tADDspi $sp, 24, 14 /* CC::al */, $noreg
- ; CHECK: $sp = frame-destroy VLDMDIA_UPD $sp, 14 /* CC::al */, $noreg, def $d8, def $d9, def $d10, def $d11, def $d12, def $d13, def $d14, def $d15
- ; CHECK: $sp = frame-destroy tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: $sp = frame-destroy t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $q0, $r1, $r2, $r3, $r4, $r5, $r7, $r8, $r10, $r11, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r6 = tMOVr $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r0 = tMOVr $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r9 = tMOVr $r3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2DLS renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r8, $r9, $r10, $r11, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP16 renamable $r9, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0, renamable $q6 = MVE_VLDRHU16_post killed renamable $r0, 16, 1, renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $q3 = MVE_VLDRHU16 renamable $r6, 0, 1, renamable $vpr, $noreg
+ ; CHECK-NEXT: MVE_VPST 2, implicit $vpr
+ ; CHECK-NEXT: renamable $q5 = MVE_VSHR_immu16 killed renamable $q3, 11, 1, renamable $vpr, $noreg, undef renamable $q5
+ ; CHECK-NEXT: renamable $r9 = nsw t2SUBri killed renamable $r9, 8, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VSTRWU32 killed renamable $q5, $sp, 80, 0, $noreg, $noreg :: (store (s128) into %stack.0, align 8)
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: dead renamable $q7 = MVE_VLDRWU32 $sp, 80, 0, $noreg, $noreg :: (load (s128) from %stack.0, align 8)
+ ; CHECK-NEXT: MVE_VPST 1, implicit $vpr
+ ; CHECK-NEXT: dead renamable $vpr = MVE_VCMPi16r killed renamable $q6, renamable $r8, 1, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x04000000), %bb.0(0x7c000000)
+ ; CHECK-NEXT: liveins: $q0, $r1, $r2, $r3, $r4, $r5, $r7, $r8, $r10, $r11, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead renamable $q7 = MVE_VLDRWU32 $sp, 80, 0, $noreg, $noreg :: (load (s128) from %stack.0, align 8)
+ ; CHECK-NEXT: renamable $r0 = tLDRspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r10 = nuw t2ADDri killed renamable $r10, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2ADDrs killed renamable $r12, killed renamable $r0, 10, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r0 = tLDRspi $sp, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2ADDrs killed renamable $r2, killed renamable $r0, 10, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r0 = tLDRspi $sp, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tCMPhir renamable $r10, killed renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.0, 1 /* CC::ne */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: $sp = frame-destroy tADDspi $sp, 24, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = frame-destroy VLDMDIA_UPD $sp, 14 /* CC::al */, $noreg, def $d8, def $d9, def $d10, def $d11, def $d12, def $d13, def $d14, def $d15
+ ; CHECK-NEXT: $sp = frame-destroy tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $sp = frame-destroy t2LDMIA_RET $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11, def $pc
bb.0:
successors: %bb.1(0x80000000)
liveins: $q0, $r1, $r2, $r3, $r4, $r5, $r7, $r8, $r10, $r11, $r12
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vmaxmin_vpred_r.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vmaxmin_vpred_r.mir
index 1f9cde8789a3..c89ecd3d0bba 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vmaxmin_vpred_r.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vmaxmin_vpred_r.mir
@@ -140,40 +140,46 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: arm_elementwise_mul_s8
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x40000000), %bb.3(0x40000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -20
- ; CHECK: renamable $r12 = t2LDRi12 $sp, 44, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.6, align 8)
- ; CHECK: $lr = MVE_WLSTP_32 killed renamable $r12, %bb.3
- ; CHECK: bb.1.for.body.lr.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: $r7, $r6 = t2LDRDi8 $sp, 36, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.4, align 8), (load (s32) from %fixed-stack.5)
- ; CHECK: $r5, $r4 = t2LDRDi8 $sp, 20, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8), (load (s32) from %fixed-stack.1)
- ; CHECK: renamable $q0 = MVE_VDUP32 killed renamable $r6, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r7, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: bb.2.for.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3, $r4, $r5
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 4, 0, $noreg, $noreg :: (load (s128) from %ir.input_2_cast, align 4)
- ; CHECK: renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 4, 0, $noreg, $noreg :: (load (s128) from %ir.input_1_cast, align 4)
- ; CHECK: renamable $q2 = MVE_VADD_qr_i32 killed renamable $q2, renamable $r3, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q3 = MVE_VADD_qr_i32 killed renamable $q3, renamable $r2, 0, $noreg, $noreg, undef renamable $q3
- ; CHECK: renamable $q2 = MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q2 = MVE_VADD_qr_i32 killed renamable $q2, renamable $r4, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q2 = MVE_VMAXu32 killed renamable $q2, renamable $q1, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q2 = MVE_VMINu32 killed renamable $q2, renamable $q0, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $r5 = MVE_VSTRWU32_post killed renamable $q2, killed renamable $r5, 4, 0, killed $noreg, $noreg :: (store (s128) into %ir.output_cast, align 4)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 20
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -20
+ ; CHECK-NEXT: renamable $r12 = t2LDRi12 $sp, 44, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.6, align 8)
+ ; CHECK-NEXT: $lr = MVE_WLSTP_32 killed renamable $r12, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.for.body.lr.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r7, $r6 = t2LDRDi8 $sp, 36, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.4, align 8), (load (s32) from %fixed-stack.5)
+ ; CHECK-NEXT: $r5, $r4 = t2LDRDi8 $sp, 20, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8), (load (s32) from %fixed-stack.1)
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 killed renamable $r6, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q1 = MVE_VDUP32 killed renamable $r7, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3, $r4, $r5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 4, 0, $noreg, $noreg :: (load (s128) from %ir.input_2_cast, align 4)
+ ; CHECK-NEXT: renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 4, 0, $noreg, $noreg :: (load (s128) from %ir.input_1_cast, align 4)
+ ; CHECK-NEXT: renamable $q2 = MVE_VADD_qr_i32 killed renamable $q2, renamable $r3, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q3 = MVE_VADD_qr_i32 killed renamable $q3, renamable $r2, 0, $noreg, $noreg, undef renamable $q3
+ ; CHECK-NEXT: renamable $q2 = MVE_VMULi32 killed renamable $q3, killed renamable $q2, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q2 = MVE_VADD_qr_i32 killed renamable $q2, renamable $r4, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q2 = MVE_VMAXu32 killed renamable $q2, renamable $q1, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q2 = MVE_VMINu32 killed renamable $q2, renamable $q0, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $r5 = MVE_VSTRWU32_post killed renamable $q2, killed renamable $r5, 4, 0, killed $noreg, $noreg :: (store (s128) into %ir.output_cast, align 4)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: $r0, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x40000000), %bb.3(0x40000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vmldava_in_vpt.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vmldava_in_vpt.mir
index 4d3593a2c27c..c260c3a89dc4 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vmldava_in_vpt.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vmldava_in_vpt.mir
@@ -136,41 +136,48 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: vmldava_in_vpt
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x40000000), %bb.3(0x40000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 16
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -12
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -16
- ; CHECK: renamable $r4 = tLDRspi $sp, 9, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.5)
- ; CHECK: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $lr = MVE_WLSTP_32 killed renamable $r4, %bb.3
- ; CHECK: bb.1.for.body.lr.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
- ; CHECK: renamable $r5 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
- ; CHECK: $r6, $r12 = t2LDRDi8 $sp, 28, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.3), (load (s32) from %fixed-stack.4, align 8)
- ; CHECK: renamable $q0 = MVE_VDUP32 killed renamable $r12, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r6, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: bb.2.for.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3, $r5, $r12
- ; CHECK: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 4, 0, $noreg, $noreg :: (load (s128) from %ir.input_2_cast, align 4)
- ; CHECK: renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 4, 0, $noreg, $noreg :: (load (s128) from %ir.input_1_cast, align 4)
- ; CHECK: renamable $q2 = MVE_VADD_qr_i32 killed renamable $q2, renamable $r3, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q3 = MVE_VADD_qr_i32 killed renamable $q3, renamable $r2, 0, $noreg, $noreg, undef renamable $q3
- ; CHECK: renamable $q3 = MVE_VMLAS_qr_i32 killed renamable $q3, killed renamable $q2, renamable $r5, 0, $noreg, $noreg
- ; CHECK: renamable $q2 = MVE_VMAXu32 killed renamable $q3, renamable $q1, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: renamable $q3 = MVE_VMINu32 renamable $q2, renamable $q0, 0, $noreg, $noreg, undef renamable $q3
- ; CHECK: renamable $r12 = MVE_VMLADAVas32 killed renamable $r12, killed renamable $q3, killed renamable $q2, 0, killed $noreg, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: liveins: $r12
- ; CHECK: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $pc, implicit killed $r0
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r6, -8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r5, -12
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -16
+ ; CHECK-NEXT: renamable $r4 = tLDRspi $sp, 9, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.5)
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $lr = MVE_WLSTP_32 killed renamable $r4, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.for.body.lr.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r5 = tLDRspi $sp, 4, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.0, align 8)
+ ; CHECK-NEXT: $r6, $r12 = t2LDRDi8 $sp, 28, 14 /* CC::al */, $noreg :: (load (s32) from %fixed-stack.3), (load (s32) from %fixed-stack.4, align 8)
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 killed renamable $r12, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $q1 = MVE_VDUP32 killed renamable $r6, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.for.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q1, $r0, $r1, $r2, $r3, $r5, $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 4, 0, $noreg, $noreg :: (load (s128) from %ir.input_2_cast, align 4)
+ ; CHECK-NEXT: renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 4, 0, $noreg, $noreg :: (load (s128) from %ir.input_1_cast, align 4)
+ ; CHECK-NEXT: renamable $q2 = MVE_VADD_qr_i32 killed renamable $q2, renamable $r3, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q3 = MVE_VADD_qr_i32 killed renamable $q3, renamable $r2, 0, $noreg, $noreg, undef renamable $q3
+ ; CHECK-NEXT: renamable $q3 = MVE_VMLAS_qr_i32 killed renamable $q3, killed renamable $q2, renamable $r5, 0, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q2 = MVE_VMAXu32 killed renamable $q3, renamable $q1, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: renamable $q3 = MVE_VMINu32 renamable $q2, renamable $q0, 0, $noreg, $noreg, undef renamable $q3
+ ; CHECK-NEXT: renamable $r12 = MVE_VMLADAVas32 killed renamable $r12, killed renamable $q3, killed renamable $q2, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: liveins: $r12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $pc, implicit killed $r0
bb.0.entry:
successors: %bb.1(0x40000000), %bb.3(0x40000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-block-debug.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-block-debug.mir
index 34821c1d7e5c..26e7b8041a23 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-block-debug.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-block-debug.mir
@@ -1,4 +1,3 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
--- |
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-blocks.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-blocks.mir
index 84fd81098cd9..284282023170 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-blocks.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-blocks.mir
@@ -211,31 +211,37 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: vpt_block
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r2, $r3
- ; CHECK: renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 0, killed $noreg, $noreg
- ; CHECK: MVE_VPTv4s32r 4, renamable $q1, renamable $r2, 11, implicit-def $vpr
- ; CHECK: renamable $vpr = MVE_VCMPs32r killed renamable $q1, renamable $r3, 12, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPTv4s32r 4, renamable $q1, renamable $r2, 11, implicit-def $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCMPs32r killed renamable $q1, renamable $r3, 12, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -280,6 +286,8 @@ body: |
bb.3.for.cond.cleanup:
frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
...
+# Tests that secondary VCTPs are refused when their operand's reaching definition is not the same as the main
+# VCTP's.
---
name: different_vcpt_reaching_def
alignment: 2
@@ -328,44 +336,46 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: different_vcpt_reaching_def
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r1, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r1 = MVE_VSTRWU32_post renamable $q0, killed renamable $r1, 16, 1, renamable $vpr, $noreg
- ; CHECK: renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 1, killed renamable $vpr, $noreg
- ; CHECK: MVE_VPTv4s32r 2, renamable $q1, renamable $r2, 11, implicit-def $vpr
- ; CHECK: renamable $vpr = MVE_VCMPs32r killed renamable $q1, renamable $r3, 12, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r1, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ;
- ; Tests that secondary VCTPs are refused when their operand's reaching definition is not the same as the main
- ; VCTP's.
- ;
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r1, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r1 = MVE_VSTRWU32_post renamable $q0, killed renamable $r1, 16, 1, renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: MVE_VPTv4s32r 2, renamable $q1, renamable $r2, 11, implicit-def $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCMPs32r killed renamable $q1, renamable $r3, 12, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r1, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -411,6 +421,7 @@ body: |
bb.3.for.cond.cleanup:
frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
...
+# Tests that secondary VCTPs are refused when their operand is not the same register as the main VCTP's.
---
name: different_vcpt_operand
alignment: 2
@@ -459,42 +470,45 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: different_vcpt_operand
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r1, 0, $noreg, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 1, killed renamable $vpr, $noreg
- ; CHECK: MVE_VPTv4s32r 2, renamable $q1, renamable $r2, 11, implicit-def $vpr
- ; CHECK: renamable $vpr = MVE_VCMPs32r killed renamable $q1, renamable $r3, 12, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ;
- ; Tests that secondary VCTPs are refused when their operand is not the same register as the main VCTP's.
- ;
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r1, 0, $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: MVE_VPTv4s32r 2, renamable $q1, renamable $r2, 11, implicit-def $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCMPs32r killed renamable $q1, renamable $r3, 12, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -539,6 +553,7 @@ body: |
bb.3.for.cond.cleanup:
frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
...
+# Test including a else-predicated VCTP.
---
name: else_vcpt
alignment: 2
@@ -587,34 +602,37 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: else_vcpt
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r2, $r3
- ; CHECK: renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 0, killed $noreg, $noreg
- ; CHECK: MVE_VPTv4s32r 12, renamable $q1, renamable $r2, 10, implicit-def $vpr
- ; CHECK: renamable $vpr = MVE_VCMPs32r killed renamable $q1, renamable $r3, 13, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post renamable $q0, killed renamable $r0, 16, 2, killed renamable $vpr, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ;
- ; Test including a else-predicated VCTP.
- ;
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPTv4s32r 12, renamable $q1, renamable $r2, 10, implicit-def $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCMPs32r killed renamable $q1, renamable $r3, 13, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post renamable $q0, killed renamable $r0, 16, 2, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -707,31 +725,37 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: loop_invariant_vpt_operands
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r2, $r3
- ; CHECK: renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 0, killed $noreg, $noreg
- ; CHECK: MVE_VPTv4s32r 4, renamable $q0, renamable $r2, 11, implicit-def $vpr
- ; CHECK: renamable $vpr = MVE_VCMPs32r killed renamable $q1, renamable $r3, 12, 1, killed renamable $vpr, $noreg
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRWU32 renamable $r0, 0, 0, killed $noreg, $noreg
+ ; CHECK-NEXT: MVE_VPTv4s32r 4, renamable $q0, renamable $r2, 11, implicit-def $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCMPs32r killed renamable $q1, renamable $r3, 12, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -823,29 +847,35 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: vctp_before_vpt
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r1, $r2
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r1
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r2, $r3
- ; CHECK: MVE_VPTv4s32r 8, renamable $q0, renamable $r2, 8, implicit-def $vpr
- ; CHECK: dead renamable $vpr = MVE_VCMPs32r renamable $q0, renamable $r3, 12, 1, killed renamable $vpr, $noreg
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r2, $r3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: MVE_VPTv4s32r 8, renamable $q0, renamable $r2, 8, implicit-def $vpr
+ ; CHECK-NEXT: dead renamable $vpr = MVE_VCMPs32r renamable $q0, renamable $r3, 12, 1, killed renamable $vpr, $noreg
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -885,6 +915,7 @@ body: |
bb.3.for.cond.cleanup:
frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
...
+# This shouldn't be tail-predicated because the VLDR isn't predicated on the VCTP.
---
name: vpt_load_vctp_store
alignment: 2
@@ -933,39 +964,42 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: vpt_load_vctp_store
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1.vector.ph:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: dead renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.vector.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r0, $r1, $r2
- ; CHECK: MVE_VPTv4s32r 2, killed renamable $q0, renamable $r2, 2, implicit-def $vpr
- ; CHECK: renamable $q0 = MVE_VLDRWU32 renamable $r0, 0, 1, $vpr, $noreg
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r1, 1, killed $vpr, $noreg
- ; CHECK: MVE_VSTRWU32 renamable $q0, renamable $r0, 0, 1, killed $vpr, $noreg
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.for.cond.cleanup:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ;
- ; This shouldn't be tail-predicated because the VLDR isn't predicated on the VCTP.
- ;
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r1, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.ph:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi3 renamable $r1, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: dead renamable $r3, dead $cpsr = nsw tRSB renamable $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: MVE_VPTv4s32r 2, killed renamable $q0, renamable $r2, 2, implicit-def $vpr
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 renamable $r0, 0, 1, $vpr, $noreg
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r1, 1, killed $vpr, $noreg
+ ; CHECK-NEXT: MVE_VSTRWU32 renamable $q0, renamable $r0, 0, 1, killed $vpr, $noreg
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.for.cond.cleanup:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -1026,37 +1060,43 @@ stack:
body: |
; CHECK-LABEL: name: emptyblock
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x50000000), %bb.3(0x30000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 12
- ; CHECK: tCMPi8 renamable $r0, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: tBcc %bb.3, 11 /* CC::lt */, killed $cpsr
- ; CHECK: bb.1:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: tCMPi8 killed renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: renamable $r2 = t2CSINC $zr, $zr, 0, implicit killed $cpsr
- ; CHECK: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: renamable $r12 = t2ANDri killed renamable $r2, 1, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r2 = t2RSBri killed renamable $r12, 0, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: $vpr = VMSR_P0 killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r0
- ; CHECK: bb.2 (align 4):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $r1
- ; CHECK: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r1 = MVE_VSTRWU32_post renamable $q0, killed renamable $r1, 16, 1, killed renamable $vpr, $noreg :: (store (s128), align 4)
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3:
- ; CHECK: $sp = frame-destroy tADDspi $sp, 1, 14 /* CC::al */, $noreg
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit undef $r0
+ ; CHECK-NEXT: successors: %bb.1(0x50000000), %bb.3(0x30000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: $sp = frame-setup tSUBspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 12
+ ; CHECK-NEXT: tCMPi8 renamable $r0, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: tBcc %bb.3, 11 /* CC::lt */, killed $cpsr
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: tCMPi8 killed renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: renamable $r2 = t2CSINC $zr, $zr, 0, implicit killed $cpsr
+ ; CHECK-NEXT: renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: renamable $r12 = t2ANDri killed renamable $r2, 1, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r2 = t2RSBri killed renamable $r12, 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: $vpr = VMSR_P0 killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: VSTR_P0_off killed renamable $vpr, $sp, 0, 14 /* CC::al */, $noreg :: (store (s32) into %stack.0)
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2 (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vpr = VLDR_P0_off $sp, 0, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r1 = MVE_VSTRWU32_post renamable $q0, killed renamable $r1, 16, 1, killed renamable $vpr, $noreg :: (store (s128), align 4)
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: $sp = frame-destroy tADDspi $sp, 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit undef $r0
bb.0:
successors: %bb.1(0x50000000), %bb.3(0x30000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -1129,34 +1169,41 @@ constants:
body: |
; CHECK-LABEL: name: predvcmp
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r2, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r12 = t2LEApcrel %const.0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r12, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool, align 8)
- ; CHECK: renamable $q2 = MVE_VMOVimmi32 4, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
- ; CHECK: bb.2 (align 4):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q1, $q2, $r0, $r1
- ; CHECK: MVE_VPTv4s32r 8, renamable $q0, renamable $r1, 11, implicit-def $vpr
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post renamable $q1, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128), align 4)
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q0, renamable $q2, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
- ; CHECK: bb.3:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.4 (align 8):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2LEApcrel %const.0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r12, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool, align 8)
+ ; CHECK-NEXT: renamable $q2 = MVE_VMOVimmi32 4, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2 (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q1, $q2, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: MVE_VPTv4s32r 8, renamable $q0, renamable $r1, 11, implicit-def $vpr
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post renamable $q1, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128), align 4)
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q0, renamable $q2, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4 (align 8):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
@@ -1228,41 +1275,48 @@ constants:
body: |
; CHECK-LABEL: name: predvpt
; CHECK: bb.0:
- ; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: tCMPi8 renamable $r2, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
- ; CHECK: t2IT 11, 8, implicit-def $itstate
- ; CHECK: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
- ; CHECK: bb.1:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r12 = t2LEApcrel %const.0, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3, dead $cpsr = nuw tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
- ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r12, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool, align 8)
- ; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
- ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
- ; CHECK: renamable $q2 = MVE_VMOVimmi32 4, 0, $noreg, $noreg, undef renamable $q2
- ; CHECK: bb.2 (align 4):
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $q0, $q1, $q2, $r0, $r1, $r2
- ; CHECK: MVE_VPTv4s32r 8, renamable $q0, renamable $r1, 11, implicit-def $vpr
- ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed $vpr, $noreg
- ; CHECK: MVE_VPST 8, implicit $vpr
- ; CHECK: renamable $r0 = MVE_VSTRWU32_post renamable $q1, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128), align 4)
- ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
- ; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q0, renamable $q2, 0, $noreg, $noreg, undef renamable $q0
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3:
- ; CHECK: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
- ; CHECK: bb.4 (align 8):
- ; CHECK: CONSTPOOL_ENTRY 0, %const.0, 16
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: tCMPi8 renamable $r2, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
+ ; CHECK-NEXT: frame-destroy tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r12 = t2LEApcrel %const.0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = nuw tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
+ ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r12, 0, 0, $noreg, $noreg :: (load (s128) from constant-pool, align 8)
+ ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: renamable $q2 = MVE_VMOVimmi32 4, 0, $noreg, $noreg, undef renamable $q2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2 (align 4):
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $q0, $q1, $q2, $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: MVE_VPTv4s32r 8, renamable $q0, renamable $r1, 11, implicit-def $vpr
+ ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed $vpr, $noreg
+ ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
+ ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post renamable $q1, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128), align 4)
+ ; CHECK-NEXT: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDi32 killed renamable $q0, renamable $q2, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4 (align 8):
+ ; CHECK-NEXT: CONSTPOOL_ENTRY 0, %const.0, 16
bb.0:
successors: %bb.1(0x80000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while.mir
index d91cd958347c..bc739dea40ef 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while.mir
@@ -90,27 +90,33 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: copy
; CHECK: bb.0.entry:
- ; CHECK: successors: %bb.1(0x40000000), %bb.3(0x40000000)
- ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
- ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
- ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
- ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
- ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
- ; CHECK: dead $lr = t2WLS $r2, %bb.3
- ; CHECK: bb.1.while.body.preheader:
- ; CHECK: successors: %bb.2(0x80000000)
- ; CHECK: liveins: $r0, $r1, $r2
- ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 2, 14 /* CC::al */, $noreg
- ; CHECK: renamable $r0, dead $cpsr = tSUBi8 killed renamable $r0, 2, 14 /* CC::al */, $noreg
- ; CHECK: $lr = tMOVr killed $r2, 14 /* CC::al */, $noreg
- ; CHECK: bb.2.while.body:
- ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
- ; CHECK: liveins: $lr, $r0, $r1
- ; CHECK: renamable $r2, renamable $r1 = t2LDRH_PRE killed renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.scevgep4)
- ; CHECK: early-clobber renamable $r0 = t2STRH_PRE killed renamable $r2, killed renamable $r0, 2, 14 /* CC::al */, $noreg :: (store (s16) into %ir.scevgep7)
- ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
- ; CHECK: bb.3.while.end:
- ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: dead $lr = t2WLS $r2, %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.while.body.preheader:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $r0, dead $cpsr = tSUBi8 killed renamable $r0, 2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $lr = tMOVr killed $r2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.while.body:
+ ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $r2, renamable $r1 = t2LDRH_PRE killed renamable $r1, 2, 14 /* CC::al */, $noreg :: (load (s16) from %ir.scevgep4)
+ ; CHECK-NEXT: early-clobber renamable $r0 = t2STRH_PRE killed renamable $r2, killed renamable $r0, 2, 14 /* CC::al */, $noreg :: (store (s16) into %ir.scevgep7)
+ ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.while.end:
+ ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
bb.0.entry:
successors: %bb.1(0x40000000), %bb.3(0x40000000)
liveins: $r0, $r1, $r2, $r7, $lr
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
index 83d7275358ce..3300d46bf856 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
@@ -130,26 +130,26 @@ define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_zext(<8 x i16> %x) {
; CHECK-NEXT: vmov.i64 q1, #0xffff
; CHECK-NEXT: vand q2, q2, q1
; CHECK-NEXT: vmov.u16 r3, q0[2]
-; CHECK-NEXT: vmov r0, r1, d5
-; CHECK-NEXT: vmov r2, s8
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov.u16 r2, q0[3]
-; CHECK-NEXT: vmov q2[2], q2[0], r3, r2
+; CHECK-NEXT: vmov r0, s10
+; CHECK-NEXT: vmov r1, r2, d4
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov q2[2], q2[0], r3, r1
; CHECK-NEXT: vmov.u16 r3, q0[4]
; CHECK-NEXT: vand q2, q2, q1
-; CHECK-NEXT: vmov r2, s8
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov r2, s10
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov.u16 r2, q0[5]
-; CHECK-NEXT: vmov q2[2], q2[0], r3, r2
+; CHECK-NEXT: vmov r1, s8
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov r1, s10
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov.u16 r1, q0[5]
+; CHECK-NEXT: vmov q2[2], q2[0], r3, r1
; CHECK-NEXT: vand q2, q2, q1
-; CHECK-NEXT: vmov r2, s8
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov r2, r3, d5
-; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: vmov r1, s8
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov r1, r3, d5
+; CHECK-NEXT: adds r0, r0, r1
+; CHECK-NEXT: adc.w r1, r2, r3
; CHECK-NEXT: vmov.u16 r2, q0[7]
-; CHECK-NEXT: adcs r1, r3
; CHECK-NEXT: vmov.u16 r3, q0[6]
; CHECK-NEXT: vmov q0[2], q0[0], r3, r2
; CHECK-NEXT: vand q0, q0, q1
@@ -228,8 +228,8 @@ define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_zext(<2 x i16> %x) {
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i64 q1, #0xffff
; CHECK-NEXT: vand q0, q0, q1
-; CHECK-NEXT: vmov r0, r1, d1
-; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r2, r1, d0
; CHECK-NEXT: add r0, r2
; CHECK-NEXT: bx lr
entry:
@@ -397,26 +397,26 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x) {
; CHECK-NEXT: vmov.i64 q1, #0xff
; CHECK-NEXT: vand q2, q2, q1
; CHECK-NEXT: vmov.u8 r3, q0[2]
-; CHECK-NEXT: vmov r0, r1, d5
-; CHECK-NEXT: vmov r2, s8
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov.u8 r2, q0[3]
-; CHECK-NEXT: vmov q2[2], q2[0], r3, r2
+; CHECK-NEXT: vmov r0, s10
+; CHECK-NEXT: vmov r1, r2, d4
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov.u8 r1, q0[3]
+; CHECK-NEXT: vmov q2[2], q2[0], r3, r1
; CHECK-NEXT: vmov.u8 r3, q0[4]
; CHECK-NEXT: vand q2, q2, q1
-; CHECK-NEXT: vmov r2, s8
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov r2, s10
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov.u8 r2, q0[5]
-; CHECK-NEXT: vmov q2[2], q2[0], r3, r2
+; CHECK-NEXT: vmov r1, s8
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov r1, s10
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov.u8 r1, q0[5]
+; CHECK-NEXT: vmov q2[2], q2[0], r3, r1
; CHECK-NEXT: vand q2, q2, q1
-; CHECK-NEXT: vmov r2, s8
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov r2, r3, d5
-; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: vmov r1, s8
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov r1, r3, d5
+; CHECK-NEXT: adds r0, r0, r1
+; CHECK-NEXT: adc.w r1, r2, r3
; CHECK-NEXT: vmov.u8 r2, q0[7]
-; CHECK-NEXT: adcs r1, r3
; CHECK-NEXT: vmov.u8 r3, q0[6]
; CHECK-NEXT: vmov q2[2], q2[0], r3, r2
; CHECK-NEXT: vand q2, q2, q1
@@ -540,26 +540,26 @@ define arm_aapcs_vfpcc i64 @add_v8i8_v8i64_zext(<8 x i8> %x) {
; CHECK-NEXT: vmov q2[2], q2[0], r1, r0
; CHECK-NEXT: vmov.u16 r3, q0[2]
; CHECK-NEXT: vand q2, q2, q1
-; CHECK-NEXT: vmov r0, r1, d5
-; CHECK-NEXT: vmov r2, s8
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov.u16 r2, q0[3]
-; CHECK-NEXT: vmov q2[2], q2[0], r3, r2
+; CHECK-NEXT: vmov r0, s10
+; CHECK-NEXT: vmov r1, r2, d4
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov q2[2], q2[0], r3, r1
; CHECK-NEXT: vmov.u16 r3, q0[4]
; CHECK-NEXT: vand q2, q2, q1
-; CHECK-NEXT: vmov r2, s8
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov r2, s10
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov.u16 r2, q0[5]
-; CHECK-NEXT: vmov q2[2], q2[0], r3, r2
+; CHECK-NEXT: vmov r1, s8
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov r1, s10
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov.u16 r1, q0[5]
+; CHECK-NEXT: vmov q2[2], q2[0], r3, r1
; CHECK-NEXT: vand q2, q2, q1
-; CHECK-NEXT: vmov r2, s8
-; CHECK-NEXT: add r0, r2
-; CHECK-NEXT: vmov r2, r3, d5
-; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: vmov r1, s8
+; CHECK-NEXT: add r0, r1
+; CHECK-NEXT: vmov r1, r3, d5
+; CHECK-NEXT: adds r0, r0, r1
+; CHECK-NEXT: adc.w r1, r2, r3
; CHECK-NEXT: vmov.u16 r2, q0[7]
-; CHECK-NEXT: adcs r1, r3
; CHECK-NEXT: vmov.u16 r3, q0[6]
; CHECK-NEXT: vmov q0[2], q0[0], r3, r2
; CHECK-NEXT: vand q0, q0, q1
@@ -648,8 +648,8 @@ define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_zext(<2 x i8> %x) {
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i64 q1, #0xff
; CHECK-NEXT: vand q0, q0, q1
-; CHECK-NEXT: vmov r0, r1, d1
-; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r2, r1, d0
; CHECK-NEXT: add r0, r2
; CHECK-NEXT: bx lr
entry:
@@ -834,8 +834,8 @@ define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_zext(<8 x i16> %x, i64 %a) {
; CHECK-NEXT: vmov q2[2], q2[0], r3, r2
; CHECK-NEXT: vmov.i64 q1, #0xffff
; CHECK-NEXT: vand q2, q2, q1
-; CHECK-NEXT: vmov r2, r12, d5
-; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: vmov r3, r12, d4
; CHECK-NEXT: add.w lr, r3, r2
; CHECK-NEXT: vmov.u16 r3, q0[3]
; CHECK-NEXT: vmov.u16 r2, q0[2]
@@ -943,8 +943,8 @@ define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_acc_zext(<2 x i16> %x, i64 %a) {
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i64 q1, #0xffff
; CHECK-NEXT: vand q0, q0, q1
-; CHECK-NEXT: vmov r2, r12, d1
-; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: vmov r3, r12, d0
; CHECK-NEXT: add r2, r3
; CHECK-NEXT: adds r0, r0, r2
; CHECK-NEXT: adc.w r1, r1, r12
@@ -1130,8 +1130,8 @@ define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, i64 %a) {
; CHECK-NEXT: vmov q2[2], q2[0], r3, r2
; CHECK-NEXT: vmov.i64 q1, #0xff
; CHECK-NEXT: vand q2, q2, q1
-; CHECK-NEXT: vmov r2, r12, d5
-; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: vmov r3, r12, d4
; CHECK-NEXT: add.w lr, r3, r2
; CHECK-NEXT: vmov.u8 r3, q0[3]
; CHECK-NEXT: vmov.u8 r2, q0[2]
@@ -1283,8 +1283,8 @@ define arm_aapcs_vfpcc i64 @add_v8i8_v8i64_acc_zext(<8 x i8> %x, i64 %a) {
; CHECK-NEXT: vmov.u16 r3, q0[0]
; CHECK-NEXT: vmov q2[2], q2[0], r3, r2
; CHECK-NEXT: vand q2, q2, q1
-; CHECK-NEXT: vmov r2, r12, d5
-; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: vmov r3, r12, d4
; CHECK-NEXT: add.w lr, r3, r2
; CHECK-NEXT: vmov.u16 r3, q0[3]
; CHECK-NEXT: vmov.u16 r2, q0[2]
@@ -1402,8 +1402,8 @@ define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_acc_zext(<2 x i8> %x, i64 %a) {
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.i64 q1, #0xff
; CHECK-NEXT: vand q0, q0, q1
-; CHECK-NEXT: vmov r2, r12, d1
-; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: vmov r3, r12, d0
; CHECK-NEXT: add r2, r3
; CHECK-NEXT: adds r0, r0, r2
; CHECK-NEXT: adc.w r1, r1, r12
diff --git a/llvm/test/CodeGen/WebAssembly/fast-isel-call-indirect64.ll b/llvm/test/CodeGen/WebAssembly/fast-isel-call-indirect64.ll
deleted file mode 100644
index 8224c3bc4e37..000000000000
--- a/llvm/test/CodeGen/WebAssembly/fast-isel-call-indirect64.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -fast-isel --mtriple=wasm64 -asm-verbose=false -wasm-keep-registers | FileCheck %s
-
-target triple = "wasm64"
-
-; Ensure fast isel also lowers function pointers to 32-bit.
-
-; CHECK: local.get $push[[L0:[0-9]+]]=, 0
-; CHECK-NEXT: i32.wrap_i64 $push[[L1:[0-9]+]]=, $pop[[L0]]
-; CHECK-NEXT: call_indirect $pop[[L1]]
-
-define hidden void @f(ptr %g) {
- call void %g()
- ret void
-}
diff --git a/llvm/test/CodeGen/WebAssembly/function-pointer64.ll b/llvm/test/CodeGen/WebAssembly/function-pointer64.ll
index c7c90f6b7ac2..7f98d3e648bd 100644
--- a/llvm/test/CodeGen/WebAssembly/function-pointer64.ll
+++ b/llvm/test/CodeGen/WebAssembly/function-pointer64.ll
@@ -34,7 +34,6 @@ entry:
; CHECK: .functype foo (i64) -> ()
; CHECK-NEXT: i32.const 1
; CHECK-NEXT: local.get 0
-; CHECK-NEXT: i32.wrap_i64
; CHECK-NEXT: call_indirect (i32) -> ()
; REF: call_indirect __indirect_function_table, (i32) -> ()
@@ -53,10 +52,10 @@ entry:
; YAML: - Type: CODE
; YAML: - Type: R_WASM_TABLE_INDEX_SLEB64
; YAML-NEXT: Index: 0
-; YAML-NEXT: Offset: 0x16
+; YAML-NEXT: Offset: 0x15
; YAML: - Type: R_WASM_TABLE_INDEX_SLEB64
; YAML-NEXT: Index: 0
-; YAML-NEXT: Offset: 0x29
+; YAML-NEXT: Offset: 0x28
; YAML: - Type: DATA
; YAML: - Type: R_WASM_TABLE_INDEX_I64
diff --git a/llvm/test/CodeGen/WebAssembly/half-precision.ll b/llvm/test/CodeGen/WebAssembly/half-precision.ll
index 89e9c42637c1..d9d3f6be800f 100644
--- a/llvm/test/CodeGen/WebAssembly/half-precision.ll
+++ b/llvm/test/CodeGen/WebAssembly/half-precision.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+half-precision | FileCheck %s
-; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+half-precision | FileCheck %s
+; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+half-precision,+simd128 | FileCheck %s
+; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+half-precision,+simd128 | FileCheck %s
declare float @llvm.wasm.loadf32.f16(ptr)
declare void @llvm.wasm.storef16.f32(float, ptr)
@@ -19,3 +19,19 @@ define void @stf16_32(float %v, ptr %p) {
tail call void @llvm.wasm.storef16.f32(float %v, ptr %p)
ret void
}
+
+; CHECK-LABEL: splat_v8f16:
+; CHECK: f16x8.splat $push0=, $0
+; CHECK-NEXT: return $pop0
+define <8 x half> @splat_v8f16(float %x) {
+ %v = call <8 x half> @llvm.wasm.splat.f16x8(float %x)
+ ret <8 x half> %v
+}
+
+; CHECK-LABEL: extract_lane_v8f16:
+; CHECK: f16x8.extract_lane $push0=, $0, 1
+; CHECK-NEXT: return $pop0
+define float @extract_lane_v8f16(<8 x half> %v) {
+ %r = call float @llvm.wasm.extract.lane.f16x8(<8 x half> %v, i32 1)
+ ret float %r
+}
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-exceptions.ll b/llvm/test/CodeGen/WebAssembly/lower-em-exceptions.ll
index d17a5b419e35..f6b36c56c6d3 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-exceptions.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-exceptions.ll
@@ -44,7 +44,7 @@ lpad: ; preds = %entry
; CHECK-NEXT: %[[CDR:.*]] = extractvalue { ptr, i32 } %[[IVI2]], 1
catch.dispatch: ; preds = %lpad
- %3 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
+ %3 = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
%matches = icmp eq i32 %2, %3
br i1 %matches, label %catch1, label %catch
; CHECK: catch.dispatch:
@@ -139,7 +139,7 @@ lpad: ; preds = %entry
br label %catch.dispatch
catch.dispatch: ; preds = %lpad
- %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
+ %4 = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
%matches = icmp eq i32 %3, %4
br i1 %matches, label %catch1, label %catch
@@ -162,7 +162,7 @@ declare void @foo(i32)
declare ptr @bar(i8, i8)
declare i32 @__gxx_personality_v0(...)
-declare i32 @llvm.eh.typeid.for(ptr)
+declare i32 @llvm.eh.typeid.for.p0(ptr)
declare ptr @__cxa_begin_catch(ptr)
declare void @__cxa_end_catch()
declare void @__cxa_call_unexpected(ptr)
diff --git a/llvm/test/CodeGen/X86/abds-vector-128.ll b/llvm/test/CodeGen/X86/abds-vector-128.ll
index 3143bf619065..bcb42002fb08 100644
--- a/llvm/test/CodeGen/X86/abds-vector-128.ll
+++ b/llvm/test/CodeGen/X86/abds-vector-128.ll
@@ -12,14 +12,12 @@
define <16 x i8> @abd_ext_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: abd_ext_v16i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psubb %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: psubb %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm3
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubb %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_ext_v16i8:
@@ -47,14 +45,12 @@ define <16 x i8> @abd_ext_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
define <16 x i8> @abd_ext_v16i8_undef(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: abd_ext_v16i8_undef:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psubb %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: psubb %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm3
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubb %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_ext_v16i8_undef:
@@ -128,14 +124,12 @@ define <8 x i16> @abd_ext_v8i16_undef(<8 x i16> %a, <8 x i16> %b) nounwind {
define <4 x i32> @abd_ext_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: abd_ext_v4i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psubd %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm3
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_ext_v4i32:
@@ -163,14 +157,12 @@ define <4 x i32> @abd_ext_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <4 x i32> @abd_ext_v4i32_undef(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: abd_ext_v4i32_undef:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psubd %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm3
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_ext_v4i32_undef:
@@ -198,61 +190,48 @@ define <4 x i32> @abd_ext_v4i32_undef(<4 x i32> %a, <4 x i32> %b) nounwind {
define <2 x i64> @abd_ext_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: abd_ext_v2i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: movq %rdx, %rsi
-; SSE2-NEXT: sarq $63, %rsi
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdi
-; SSE2-NEXT: movq %rdi, %r8
-; SSE2-NEXT: sarq $63, %r8
-; SSE2-NEXT: movq %xmm1, %r9
-; SSE2-NEXT: movq %r9, %r10
-; SSE2-NEXT: sarq $63, %r10
-; SSE2-NEXT: subq %r9, %rdx
-; SSE2-NEXT: sbbq %r10, %rsi
-; SSE2-NEXT: subq %rdi, %rax
-; SSE2-NEXT: sbbq %r8, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: xorq %rcx, %rax
-; SSE2-NEXT: subq %rcx, %rax
-; SSE2-NEXT: sarq $63, %rsi
-; SSE2-NEXT: xorq %rsi, %rdx
-; SSE2-NEXT: subq %rsi, %rdx
-; SSE2-NEXT: movq %rdx, %xmm0
-; SSE2-NEXT: movq %rax, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubq %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_ext_v2i64:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm1, %xmm2
-; SSE42-NEXT: movdqa %xmm0, %xmm3
-; SSE42-NEXT: psubq %xmm1, %xmm3
-; SSE42-NEXT: psubq %xmm0, %xmm1
+; SSE42-NEXT: psubq %xmm1, %xmm0
+; SSE42-NEXT: pxor %xmm2, %xmm0
+; SSE42-NEXT: psubq %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
-; SSE42-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: abd_ext_v2i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_ext_v2i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_ext_v2i64:
@@ -272,61 +251,48 @@ define <2 x i64> @abd_ext_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <2 x i64> @abd_ext_v2i64_undef(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: abd_ext_v2i64_undef:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: movq %rdx, %rsi
-; SSE2-NEXT: sarq $63, %rsi
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdi
-; SSE2-NEXT: movq %rdi, %r8
-; SSE2-NEXT: sarq $63, %r8
-; SSE2-NEXT: movq %xmm1, %r9
-; SSE2-NEXT: movq %r9, %r10
-; SSE2-NEXT: sarq $63, %r10
-; SSE2-NEXT: subq %r9, %rdx
-; SSE2-NEXT: sbbq %r10, %rsi
-; SSE2-NEXT: subq %rdi, %rax
-; SSE2-NEXT: sbbq %r8, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: xorq %rcx, %rax
-; SSE2-NEXT: subq %rcx, %rax
-; SSE2-NEXT: sarq $63, %rsi
-; SSE2-NEXT: xorq %rsi, %rdx
-; SSE2-NEXT: subq %rsi, %rdx
-; SSE2-NEXT: movq %rdx, %xmm0
-; SSE2-NEXT: movq %rax, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubq %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_ext_v2i64_undef:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm1, %xmm2
-; SSE42-NEXT: movdqa %xmm0, %xmm3
-; SSE42-NEXT: psubq %xmm1, %xmm3
-; SSE42-NEXT: psubq %xmm0, %xmm1
+; SSE42-NEXT: psubq %xmm1, %xmm0
+; SSE42-NEXT: pxor %xmm2, %xmm0
+; SSE42-NEXT: psubq %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
-; SSE42-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: abd_ext_v2i64_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_ext_v2i64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_ext_v2i64_undef:
@@ -350,14 +316,12 @@ define <2 x i64> @abd_ext_v2i64_undef(<2 x i64> %a, <2 x i64> %b) nounwind {
define <16 x i8> @abd_minmax_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: abd_minmax_v16i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psubb %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: psubb %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm3
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubb %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_minmax_v16i8:
@@ -404,14 +368,12 @@ define <8 x i16> @abd_minmax_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <4 x i32> @abd_minmax_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: abd_minmax_v4i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psubd %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm3
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_minmax_v4i32:
@@ -445,47 +407,40 @@ define <2 x i64> @abd_minmax_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubq %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_minmax_v2i64:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm1, %xmm2
-; SSE42-NEXT: movdqa %xmm0, %xmm3
-; SSE42-NEXT: psubq %xmm1, %xmm3
-; SSE42-NEXT: psubq %xmm0, %xmm1
+; SSE42-NEXT: psubq %xmm1, %xmm0
+; SSE42-NEXT: pxor %xmm2, %xmm0
+; SSE42-NEXT: psubq %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
-; SSE42-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: abd_minmax_v2i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_minmax_v2i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_minmax_v2i64:
@@ -507,14 +462,12 @@ define <2 x i64> @abd_minmax_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <16 x i8> @abd_cmp_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: abd_cmp_v16i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psubb %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
; SSE2-NEXT: psubb %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm3
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubb %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_cmp_v16i8:
@@ -563,14 +516,12 @@ define <8 x i16> @abd_cmp_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
define <4 x i32> @abd_cmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: abd_cmp_v4i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psubd %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm3
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_cmp_v4i32:
@@ -598,9 +549,9 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: abd_cmp_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
@@ -609,12 +560,9 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psubq %xmm1, %xmm3
-; SSE2-NEXT: psubq %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubq %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
@@ -622,28 +570,26 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm1, %xmm2
-; SSE42-NEXT: movdqa %xmm0, %xmm3
-; SSE42-NEXT: psubq %xmm1, %xmm3
-; SSE42-NEXT: psubq %xmm0, %xmm1
+; SSE42-NEXT: psubq %xmm1, %xmm0
+; SSE42-NEXT: pxor %xmm2, %xmm0
+; SSE42-NEXT: psubq %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
-; SSE42-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: abd_cmp_v2i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_cmp_v2i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_cmp_v2i64:
@@ -790,50 +736,52 @@ define <2 x i64> @abd_subnsw_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <2 x i64> @abd_cmp_v2i64_multiuse_cmp(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: abd_cmp_v2i64_multiuse_cmp:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psubq %xmm1, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: psubq %xmm0, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm4
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: psubq %xmm0, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
+; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: paddq %xmm4, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_cmp_v2i64_multiuse_cmp:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
-; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
-; SSE42-NEXT: movdqa %xmm2, %xmm3
+; SSE42-NEXT: pcmpgtq %xmm1, %xmm2
+; SSE42-NEXT: movdqa %xmm0, %xmm3
; SSE42-NEXT: psubq %xmm1, %xmm3
-; SSE42-NEXT: movdqa %xmm1, %xmm4
-; SSE42-NEXT: psubq %xmm2, %xmm4
-; SSE42-NEXT: blendvpd %xmm0, %xmm3, %xmm4
-; SSE42-NEXT: pcmpgtq %xmm2, %xmm1
+; SSE42-NEXT: pxor %xmm2, %xmm3
+; SSE42-NEXT: psubq %xmm3, %xmm2
+; SSE42-NEXT: pcmpgtq %xmm0, %xmm1
; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
; SSE42-NEXT: pxor %xmm1, %xmm0
-; SSE42-NEXT: paddq %xmm4, %xmm0
+; SSE42-NEXT: paddq %xmm2, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: abd_cmp_v2i64_multiuse_cmp:
; AVX1: # %bb.0:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm4
-; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT: vpxor %xmm2, %xmm3, %xmm3
+; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -844,8 +792,8 @@ define <2 x i64> @abd_cmp_v2i64_multiuse_cmp(<2 x i64> %a, <2 x i64> %b) nounwin
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm4
-; AVX2-NEXT: vblendvpd %xmm2, %xmm3, %xmm4, %xmm2
+; AVX2-NEXT: vpxor %xmm2, %xmm3, %xmm3
+; AVX2-NEXT: vpsubq %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/abds-vector-256.ll b/llvm/test/CodeGen/X86/abds-vector-256.ll
index 78190d2cb7d8..cc63ad04c08a 100644
--- a/llvm/test/CodeGen/X86/abds-vector-256.ll
+++ b/llvm/test/CodeGen/X86/abds-vector-256.ll
@@ -223,22 +223,22 @@ define <4 x i64> @abd_ext_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm5
-; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm4, %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_ext_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm3
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_ext_v4i64:
@@ -261,22 +261,22 @@ define <4 x i64> @abd_ext_v4i64_undef(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm5
-; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm4, %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_ext_v4i64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm3
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_ext_v4i64_undef:
@@ -402,22 +402,22 @@ define <4 x i64> @abd_minmax_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm5
-; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm4, %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_minmax_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm3
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_minmax_v4i64:
@@ -544,22 +544,22 @@ define <4 x i64> @abd_cmp_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm5
-; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm4, %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_cmp_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm3
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_cmp_v4i64:
diff --git a/llvm/test/CodeGen/X86/abdu-vector-128.ll b/llvm/test/CodeGen/X86/abdu-vector-128.ll
index 0c33e8973c2d..78b315a3773e 100644
--- a/llvm/test/CodeGen/X86/abdu-vector-128.ll
+++ b/llvm/test/CodeGen/X86/abdu-vector-128.ll
@@ -125,12 +125,10 @@ define <4 x i32> @abd_ext_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-NEXT: pxor %xmm2, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: psubd %xmm0, %xmm3
; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_ext_v4i32:
@@ -163,12 +161,10 @@ define <4 x i32> @abd_ext_v4i32_undef(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-NEXT: pxor %xmm2, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: psubd %xmm0, %xmm3
; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_ext_v4i32_undef:
@@ -196,27 +192,22 @@ define <4 x i32> @abd_ext_v4i32_undef(<4 x i32> %a, <4 x i32> %b) nounwind {
define <2 x i64> @abd_ext_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: abd_ext_v2i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: movq %xmm1, %rsi
-; SSE2-NEXT: xorl %edi, %edi
-; SSE2-NEXT: subq %rsi, %rcx
-; SSE2-NEXT: movl $0, %esi
-; SSE2-NEXT: sbbq %rsi, %rsi
-; SSE2-NEXT: subq %rdx, %rax
-; SSE2-NEXT: sbbq %rdi, %rdi
-; SSE2-NEXT: sarq $63, %rdi
-; SSE2-NEXT: xorq %rdi, %rax
-; SSE2-NEXT: subq %rdi, %rax
-; SSE2-NEXT: sarq $63, %rsi
-; SSE2-NEXT: xorq %rsi, %rcx
-; SSE2-NEXT: subq %rsi, %rcx
-; SSE2-NEXT: movq %rcx, %xmm0
-; SSE2-NEXT: movq %rax, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubq %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_ext_v2i64:
@@ -226,12 +217,10 @@ define <2 x i64> @abd_ext_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE42-NEXT: pxor %xmm2, %xmm3
; SSE42-NEXT: pxor %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm3, %xmm2
-; SSE42-NEXT: movdqa %xmm0, %xmm3
-; SSE42-NEXT: psubq %xmm1, %xmm3
-; SSE42-NEXT: psubq %xmm0, %xmm1
+; SSE42-NEXT: psubq %xmm1, %xmm0
+; SSE42-NEXT: pxor %xmm2, %xmm0
+; SSE42-NEXT: psubq %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
-; SSE42-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: abd_ext_v2i64:
@@ -241,9 +230,9 @@ define <2 x i64> @abd_ext_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_ext_v2i64:
@@ -252,9 +241,9 @@ define <2 x i64> @abd_ext_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_ext_v2i64:
@@ -274,27 +263,22 @@ define <2 x i64> @abd_ext_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <2 x i64> @abd_ext_v2i64_undef(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: abd_ext_v2i64_undef:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: movq %xmm1, %rsi
-; SSE2-NEXT: xorl %edi, %edi
-; SSE2-NEXT: subq %rsi, %rcx
-; SSE2-NEXT: movl $0, %esi
-; SSE2-NEXT: sbbq %rsi, %rsi
-; SSE2-NEXT: subq %rdx, %rax
-; SSE2-NEXT: sbbq %rdi, %rdi
-; SSE2-NEXT: sarq $63, %rdi
-; SSE2-NEXT: xorq %rdi, %rax
-; SSE2-NEXT: subq %rdi, %rax
-; SSE2-NEXT: sarq $63, %rsi
-; SSE2-NEXT: xorq %rsi, %rcx
-; SSE2-NEXT: subq %rsi, %rcx
-; SSE2-NEXT: movq %rcx, %xmm0
-; SSE2-NEXT: movq %rax, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubq %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_ext_v2i64_undef:
@@ -304,12 +288,10 @@ define <2 x i64> @abd_ext_v2i64_undef(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE42-NEXT: pxor %xmm2, %xmm3
; SSE42-NEXT: pxor %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm3, %xmm2
-; SSE42-NEXT: movdqa %xmm0, %xmm3
-; SSE42-NEXT: psubq %xmm1, %xmm3
-; SSE42-NEXT: psubq %xmm0, %xmm1
+; SSE42-NEXT: psubq %xmm1, %xmm0
+; SSE42-NEXT: pxor %xmm2, %xmm0
+; SSE42-NEXT: psubq %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
-; SSE42-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: abd_ext_v2i64_undef:
@@ -319,9 +301,9 @@ define <2 x i64> @abd_ext_v2i64_undef(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_ext_v2i64_undef:
@@ -330,9 +312,9 @@ define <2 x i64> @abd_ext_v2i64_undef(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_ext_v2i64_undef:
@@ -411,12 +393,10 @@ define <4 x i32> @abd_minmax_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-NEXT: pxor %xmm2, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: psubd %xmm0, %xmm3
; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_minmax_v4i32:
@@ -450,19 +430,14 @@ define <2 x i64> @abd_minmax_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubq %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_minmax_v2i64:
@@ -472,12 +447,10 @@ define <2 x i64> @abd_minmax_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE42-NEXT: pxor %xmm2, %xmm3
; SSE42-NEXT: pxor %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm3, %xmm2
-; SSE42-NEXT: movdqa %xmm0, %xmm3
-; SSE42-NEXT: psubq %xmm1, %xmm3
-; SSE42-NEXT: psubq %xmm0, %xmm1
+; SSE42-NEXT: psubq %xmm1, %xmm0
+; SSE42-NEXT: pxor %xmm2, %xmm0
+; SSE42-NEXT: psubq %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
-; SSE42-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: abd_minmax_v2i64:
@@ -487,9 +460,9 @@ define <2 x i64> @abd_minmax_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_minmax_v2i64:
@@ -498,9 +471,9 @@ define <2 x i64> @abd_minmax_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_minmax_v2i64:
@@ -579,12 +552,10 @@ define <4 x i32> @abd_cmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-NEXT: pxor %xmm2, %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: psubd %xmm0, %xmm3
; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_cmp_v4i32:
@@ -612,9 +583,9 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-LABEL: abd_cmp_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
@@ -623,12 +594,9 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psubq %xmm1, %xmm3
-; SSE2-NEXT: psubq %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: psubq %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubq %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
;
@@ -639,12 +607,10 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; SSE42-NEXT: pxor %xmm2, %xmm3
; SSE42-NEXT: pxor %xmm0, %xmm2
; SSE42-NEXT: pcmpgtq %xmm3, %xmm2
-; SSE42-NEXT: movdqa %xmm0, %xmm3
-; SSE42-NEXT: psubq %xmm1, %xmm3
-; SSE42-NEXT: psubq %xmm0, %xmm1
+; SSE42-NEXT: psubq %xmm1, %xmm0
+; SSE42-NEXT: pxor %xmm2, %xmm0
+; SSE42-NEXT: psubq %xmm0, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
-; SSE42-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: abd_cmp_v2i64:
@@ -654,9 +620,9 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_cmp_v2i64:
@@ -665,9 +631,9 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm3
-; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_cmp_v2i64:
@@ -692,63 +658,59 @@ define <2 x i64> @abd_cmp_v2i64_multiuse_cmp(<2 x i64> %a, <2 x i64> %b) nounwin
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psubq %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: psubq %xmm0, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: pxor %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pandn %xmm3, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: paddq %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psubq %xmm2, %xmm0
+; SSE2-NEXT: paddq %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: abd_cmp_v2i64_multiuse_cmp:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: psubq %xmm1, %xmm2
-; SSE42-NEXT: movdqa %xmm1, %xmm3
-; SSE42-NEXT: psubq %xmm0, %xmm3
-; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; SSE42-NEXT: pxor %xmm4, %xmm1
-; SSE42-NEXT: pxor %xmm4, %xmm0
+; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; SSE42-NEXT: pxor %xmm3, %xmm1
+; SSE42-NEXT: pxor %xmm3, %xmm0
; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
-; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm3
-; SSE42-NEXT: paddq %xmm3, %xmm0
+; SSE42-NEXT: pxor %xmm0, %xmm2
+; SSE42-NEXT: movdqa %xmm0, %xmm1
+; SSE42-NEXT: psubq %xmm2, %xmm1
+; SSE42-NEXT: paddq %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: abd_cmp_v2i64_multiuse_cmp:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm3
-; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: # xmm4 = mem[0,0]
-; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: # xmm3 = mem[0,0]
+; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm1
+; AVX1-NEXT: vpxor %xmm0, %xmm2, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: abd_cmp_v2i64_multiuse_cmp:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm3
-; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX2-NEXT: vpxor %xmm4, %xmm1, %xmm1
-; AVX2-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX2-NEXT: vpxor %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpxor %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm1
+; AVX2-NEXT: vpxor %xmm0, %xmm2, %xmm1
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/abdu-vector-256.ll b/llvm/test/CodeGen/X86/abdu-vector-256.ll
index 884515cfedd0..080fb779fecb 100644
--- a/llvm/test/CodeGen/X86/abdu-vector-256.ll
+++ b/llvm/test/CodeGen/X86/abdu-vector-256.ll
@@ -227,15 +227,15 @@ define <4 x i64> @abd_ext_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vpsubq %xmm2, %xmm5, %xmm6
-; AVX1-NEXT: vpsubq %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm4
; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -245,9 +245,9 @@ define <4 x i64> @abd_ext_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm3
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_ext_v4i64:
@@ -274,15 +274,15 @@ define <4 x i64> @abd_ext_v4i64_undef(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vpsubq %xmm2, %xmm5, %xmm6
-; AVX1-NEXT: vpsubq %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm4
; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -292,9 +292,9 @@ define <4 x i64> @abd_ext_v4i64_undef(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm3
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_ext_v4i64_undef:
@@ -424,15 +424,15 @@ define <4 x i64> @abd_minmax_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vpsubq %xmm2, %xmm5, %xmm6
-; AVX1-NEXT: vpsubq %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm4
; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -442,9 +442,9 @@ define <4 x i64> @abd_minmax_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm3
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_minmax_v4i64:
@@ -575,15 +575,15 @@ define <4 x i64> @abd_cmp_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vpsubq %xmm2, %xmm5, %xmm6
-; AVX1-NEXT: vpsubq %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm4
; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -593,9 +593,9 @@ define <4 x i64> @abd_cmp_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm3
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vblendvpd %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: abd_cmp_v4i64:
diff --git a/llvm/test/CodeGen/X86/apx/ccmp-flags-copy-lowering.mir b/llvm/test/CodeGen/X86/apx/ccmp-flags-copy-lowering.mir
index 52d4c4cfb2aa..25204be145c0 100644
--- a/llvm/test/CodeGen/X86/apx/ccmp-flags-copy-lowering.mir
+++ b/llvm/test/CodeGen/X86/apx/ccmp-flags-copy-lowering.mir
@@ -14,13 +14,13 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: MUL32r $edi, implicit-def $eax, implicit-def dead $edx, implicit-def $eflags, implicit $eax
; CHECK-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 1, implicit $eflags
- ; CHECK-NEXT: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr $edi, $edi, implicit-def $eflags
+ ; CHECK-NEXT: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr $edi, $edi, implicit-def dead $eflags
; CHECK-NEXT: TEST8rr [[SETCCr]], [[SETCCr]], implicit-def $eflags
; CHECK-NEXT: CCMP32rr [[ADD32rr]], [[ADD32rr]], 0, 5, implicit-def $eflags, implicit killed $eflags
; CHECK-NEXT: RET 0, $al
MUL32r $edi, implicit-def $eax, implicit-def dead $edx, implicit-def $eflags, implicit $eax
%1:gr64 = COPY $eflags
- %2:gr32 = ADD32rr $edi, $edi, implicit-def $eflags
+ %2:gr32 = ADD32rr $edi, $edi, implicit-def dead $eflags
$eflags = COPY %1
CCMP32rr %2, %2, 0, 1, implicit-def $eflags, implicit $eflags
RET 0, $al
@@ -37,13 +37,13 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: MUL32r $edi, implicit-def $eax, implicit-def dead $edx, implicit-def $eflags, implicit $eax
; CHECK-NEXT: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 1, implicit $eflags
- ; CHECK-NEXT: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr $edi, $edi, implicit-def $eflags
+ ; CHECK-NEXT: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr $edi, $edi, implicit-def dead $eflags
; CHECK-NEXT: TEST8rr [[SETCCr]], [[SETCCr]], implicit-def $eflags
; CHECK-NEXT: CTEST32rr [[ADD32rr]], [[ADD32rr]], 0, 5, implicit-def $eflags, implicit killed $eflags
; CHECK-NEXT: RET 0, $al
MUL32r $edi, implicit-def $eax, implicit-def dead $edx, implicit-def $eflags, implicit $eax
%1:gr64 = COPY $eflags
- %2:gr32 = ADD32rr $edi, $edi, implicit-def $eflags
+ %2:gr32 = ADD32rr $edi, $edi, implicit-def dead $eflags
$eflags = COPY %1
CTEST32rr %2, %2, 0, 1, implicit-def $eflags, implicit $eflags
RET 0, $al
diff --git a/llvm/test/CodeGen/X86/apx/ccmp.ll b/llvm/test/CodeGen/X86/apx/ccmp.ll
new file mode 100644
index 000000000000..e081024b8698
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/ccmp.ll
@@ -0,0 +1,1102 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp,+ndd -verify-machineinstrs | FileCheck %s --check-prefix=NDD
+
+define void @ccmp8rr_zf(i8 noundef %a, i8 noundef %b, i8 noundef %c) {
+; CHECK-LABEL: ccmp8rr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %dl, %dil
+; CHECK-NEXT: ccmpneb {dfv=zf} %dl, %sil
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB0_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8rr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %dl, %dil
+; NDD-NEXT: ccmpneb {dfv=zf} %dl, %sil
+; NDD-NEXT: jne .LBB0_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB0_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i8 %a, %c
+ %cmp1 = icmp eq i8 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp8rr_cf(i8 noundef %a, i8 noundef %b) {
+; CHECK-LABEL: ccmp8rr_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb $2, %dil
+; CHECK-NEXT: ccmpgeb {dfv=cf} $2, %sil
+; CHECK-NEXT: jb .LBB1_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB1_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8rr_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb $2, %dil
+; NDD-NEXT: ccmpgeb {dfv=cf} $2, %sil
+; NDD-NEXT: jb .LBB1_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB1_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i8 %a, 1
+ %tobool = icmp ugt i8 %b, 1
+ %or.cond = and i1 %cmp, %tobool
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+define i8 @ccmp8rr_sf(i8 %a, i8 %b, i8* nocapture %c) {
+; CHECK-LABEL: ccmp8rr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ccmpneb {dfv=sf} $2, %sil
+; CHECK-NEXT: jl .LBB2_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx)
+; CHECK-NEXT: .LBB2_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8rr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ccmpneb {dfv=sf} $2, %sil
+; NDD-NEXT: jl .LBB2_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx)
+; NDD-NEXT: .LBB2_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = icmp sgt i8 %b, 1
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ccmp8rr_none(i8 %a, i8 %b, i8* nocapture %c) {
+; CHECK-LABEL: ccmp8rr_none:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ccmpeb {dfv=} $2, %sil
+; CHECK-NEXT: jl .LBB3_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx)
+; CHECK-NEXT: .LBB3_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8rr_none:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ccmpeb {dfv=} $2, %sil
+; NDD-NEXT: jl .LBB3_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx)
+; NDD-NEXT: .LBB3_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = icmp sgt i8 %b, 1
+ %or.cond = select i1 %tobool, i1 true, i1 %cmp
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define void @ccmp16rr_sf(i16 noundef %a, i16 noundef %b, i16 noundef %c) {
+; CHECK-LABEL: ccmp16rr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %dx, %di
+; CHECK-NEXT: ccmplew {dfv=sf} %dx, %si
+; CHECK-NEXT: jge .LBB4_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB4_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp16rr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %dx, %di
+; NDD-NEXT: ccmplew {dfv=sf} %dx, %si
+; NDD-NEXT: jge .LBB4_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB4_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32rr_cf(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ccmp32rr_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %edx, %edi
+; CHECK-NEXT: ccmpbl {dfv=cf} %edx, %esi
+; CHECK-NEXT: ja .LBB5_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB5_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp32rr_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %edx, %edi
+; NDD-NEXT: ccmpbl {dfv=cf} %edx, %esi
+; NDD-NEXT: ja .LBB5_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB5_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp uge i32 %a, %c
+ %cmp1 = icmp ule i32 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64rr_of(i64 %a, i64 %b, i64 %c) {
+; CHECK-LABEL: ccmp64rr_of:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: cmpq %rdx, %rdi
+; CHECK-NEXT: ccmpbq {dfv=of} %rsi, %rdi
+; CHECK-NEXT: jno .LBB6_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB6_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64rr_of:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: cmpq %rdx, %rdi
+; NDD-NEXT: ccmpbq {dfv=of} %rsi, %rdi
+; NDD-NEXT: jno .LBB6_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB6_1: # %if.end
+; NDD-NEXT: retq
+bb:
+ %cmp = icmp uge i64 %a, %c
+ %smul = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+ %obit = extractvalue {i64, i1} %smul, 1
+ %or.cond = or i1 %cmp, %obit
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64rr_of_crossbb(i64 %a, i64 %b) {
+; CHECK-LABEL: ccmp64rr_of_crossbb:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: je .LBB7_2
+; CHECK-NEXT: # %bb.1: # %bb1
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: .LBB7_2: # %bb3
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64rr_of_crossbb:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testq %rdi, %rdi
+; NDD-NEXT: je .LBB7_2
+; NDD-NEXT: # %bb.1: # %bb1
+; NDD-NEXT: cmpq %rsi, %rdi
+; NDD-NEXT: .LBB7_2: # %bb3
+; NDD-NEXT: retq
+bb:
+ %cond1 = icmp eq i64 %a, 0
+ br i1 %cond1, label %bb3, label %bb1
+
+bb1: ; preds = %bb
+ %smul = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+ %obit = extractvalue {i64, i1} %smul, 1
+ br i1 %obit, label %bb3, label %bb2
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ccmp8ri_zf(i8 noundef %a, i8 noundef %b, i8 noundef %c) {
+; CHECK-LABEL: ccmp8ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %dl, %dil
+; CHECK-NEXT: ccmpleb {dfv=zf} $123, %sil
+; CHECK-NEXT: jne .LBB8_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB8_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %dl, %dil
+; NDD-NEXT: ccmpleb {dfv=zf} $123, %sil
+; NDD-NEXT: jne .LBB8_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB8_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i8 %a, %c
+ %cmp1 = icmp eq i8 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define i8 @ccmp8ri_zf_double(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ccmp8ri_zf_double:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: ccmpeb {dfv=zf} $123, %dil
+; CHECK-NEXT: je .LBB9_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB9_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8ri_zf_double:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: xorpd %xmm1, %xmm1
+; NDD-NEXT: ucomisd %xmm1, %xmm0
+; NDD-NEXT: ccmpeb {dfv=zf} $123, %dil
+; NDD-NEXT: je .LBB9_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB9_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 123
+ %cmp = fcmp ueq double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ccmp8ri_zf_double_p(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ccmp8ri_zf_double_p:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb $123, %dil
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: ucomisd %xmm0, %xmm0
+; CHECK-NEXT: setp %cl
+; CHECK-NEXT: andb %al, %cl
+; CHECK-NEXT: cmpb $1, %cl
+; CHECK-NEXT: jne .LBB10_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB10_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8ri_zf_double_p:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb $123, %dil
+; NDD-NEXT: setne %al
+; NDD-NEXT: ucomisd %xmm0, %xmm0
+; NDD-NEXT: setp %cl
+; NDD-NEXT: andb %cl, %al
+; NDD-NEXT: cmpb $1, %al
+; NDD-NEXT: jne .LBB10_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB10_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 123
+ %cmp = fcmp uno double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ccmp8ri_zf_double_np(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ccmp8ri_zf_double_np:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb $123, %dil
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: ucomisd %xmm0, %xmm0
+; CHECK-NEXT: setnp %cl
+; CHECK-NEXT: andb %al, %cl
+; CHECK-NEXT: cmpb $1, %cl
+; CHECK-NEXT: jne .LBB11_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB11_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8ri_zf_double_np:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb $123, %dil
+; NDD-NEXT: setne %al
+; NDD-NEXT: ucomisd %xmm0, %xmm0
+; NDD-NEXT: setnp %cl
+; NDD-NEXT: andb %cl, %al
+; NDD-NEXT: cmpb $1, %al
+; NDD-NEXT: jne .LBB11_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB11_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 123
+ %cmp = fcmp ord double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define void @ccmp16ri_zf(i16 noundef %a, i16 noundef %b, i16 noundef %c) {
+; CHECK-LABEL: ccmp16ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %dx, %di
+; CHECK-NEXT: movswl %si, %eax
+; CHECK-NEXT: ccmpael {dfv=sf} $1234, %eax # imm = 0x4D2
+; CHECK-NEXT: jge .LBB12_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB12_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp16ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %dx, %di
+; NDD-NEXT: movswl %si, %eax
+; NDD-NEXT: ccmpael {dfv=sf} $1234, %eax # imm = 0x4D2
+; NDD-NEXT: jge .LBB12_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB12_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp ult i16 %a, %c
+ %cmp1 = icmp slt i16 %b, 1234
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32ri_cf(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ccmp32ri_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %edx, %edi
+; CHECK-NEXT: ccmpbl {dfv=cf} $123457, %esi # imm = 0x1E241
+; CHECK-NEXT: jae .LBB13_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB13_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp32ri_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %edx, %edi
+; NDD-NEXT: ccmpbl {dfv=cf} $123457, %esi # imm = 0x1E241
+; NDD-NEXT: jae .LBB13_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB13_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp uge i32 %a, %c
+ %cmp1 = icmp ule i32 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64ri32_zf(i64 noundef %a, i64 noundef %b, i64 noundef %c) {
+; CHECK-LABEL: ccmp64ri32_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rdx, %rdi
+; CHECK-NEXT: ccmpbeq {dfv=sf} $123456, %rsi # imm = 0x1E240
+; CHECK-NEXT: jge .LBB14_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB14_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64ri32_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rdx, %rdi
+; NDD-NEXT: ccmpbeq {dfv=sf} $123456, %rsi # imm = 0x1E240
+; NDD-NEXT: jge .LBB14_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB14_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp ugt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp8rm_zf(i8 noundef %a, i8 noundef %b, i8 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp8rm_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %dl, %dil
+; CHECK-NEXT: ccmpneb {dfv=zf} (%rcx), %sil
+; CHECK-NEXT: jne .LBB15_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB15_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8rm_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %dl, %dil
+; NDD-NEXT: ccmpneb {dfv=zf} (%rcx), %sil
+; NDD-NEXT: jne .LBB15_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB15_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %d = load i8, ptr %ptr
+ %cmp = icmp eq i8 %a, %c
+ %cmp1 = icmp eq i8 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16rm_sf(i16 noundef %a, i16 noundef %b, i16 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp16rm_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %dx, %di
+; CHECK-NEXT: ccmplew {dfv=sf} (%rcx), %si
+; CHECK-NEXT: jge .LBB16_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB16_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp16rm_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %dx, %di
+; NDD-NEXT: ccmplew {dfv=sf} (%rcx), %si
+; NDD-NEXT: jge .LBB16_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB16_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %d = load i16, ptr %ptr
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32rm_cf(i32 noundef %a, i32 noundef %b, i32 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp32rm_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %edx, %edi
+; CHECK-NEXT: ccmpgl {dfv=cf} (%rcx), %esi
+; CHECK-NEXT: ja .LBB17_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB17_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp32rm_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %edx, %edi
+; NDD-NEXT: ccmpgl {dfv=cf} (%rcx), %esi
+; NDD-NEXT: ja .LBB17_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB17_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %d = load i32, ptr %ptr
+ %cmp = icmp sle i32 %a, %c
+ %cmp1 = icmp ule i32 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64rm_sf(i64 noundef %a, i64 noundef %b, i64 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp64rm_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rdx, %rdi
+; CHECK-NEXT: ccmpleq {dfv=sf} (%rcx), %rsi
+; CHECK-NEXT: jge .LBB18_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB18_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64rm_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rdx, %rdi
+; NDD-NEXT: ccmpleq {dfv=sf} (%rcx), %rsi
+; NDD-NEXT: jge .LBB18_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB18_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %d = load i64, ptr %ptr
+ %cmp = icmp sgt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp8mr_zf(i8 noundef %a, i8 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp8mr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %sil, %dil
+; CHECK-NEXT: ccmpgeb {dfv=zf} %sil, (%rdx)
+; CHECK-NEXT: jne .LBB19_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB19_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8mr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %sil, %dil
+; NDD-NEXT: ccmpgeb {dfv=zf} %sil, (%rdx)
+; NDD-NEXT: jne .LBB19_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB19_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i8, ptr %ptr
+ %cmp = icmp slt i8 %a, %c
+ %cmp1 = icmp eq i8 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16mr_sf(i16 noundef %a, i16 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp16mr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %si, %di
+; CHECK-NEXT: ccmplew {dfv=sf} %si, (%rdx)
+; CHECK-NEXT: jge .LBB20_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB20_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp16mr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %si, %di
+; NDD-NEXT: ccmplew {dfv=sf} %si, (%rdx)
+; NDD-NEXT: jge .LBB20_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB20_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i16, ptr %ptr
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32mr_cf(i32 noundef %a, i32 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp32mr_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: ccmpll {dfv=cf} %esi, (%rdx)
+; CHECK-NEXT: ja .LBB21_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB21_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp32mr_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %esi, %edi
+; NDD-NEXT: ccmpll {dfv=cf} %esi, (%rdx)
+; NDD-NEXT: ja .LBB21_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB21_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i32, ptr %ptr
+ %cmp = icmp sge i32 %a, %c
+ %cmp1 = icmp ule i32 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64mr_sf(i64 noundef %a, i64 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp64mr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: ccmpleq {dfv=sf} %rsi, (%rdx)
+; CHECK-NEXT: jge .LBB22_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB22_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64mr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rsi, %rdi
+; NDD-NEXT: ccmpleq {dfv=sf} %rsi, (%rdx)
+; NDD-NEXT: jge .LBB22_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB22_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i64, ptr %ptr
+ %cmp = icmp sgt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp8mi_zf(i8 noundef %a, i8 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp8mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %sil, %dil
+; CHECK-NEXT: ccmpneb {dfv=zf} $123, (%rdx)
+; CHECK-NEXT: jne .LBB23_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB23_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %sil, %dil
+; NDD-NEXT: ccmpneb {dfv=zf} $123, (%rdx)
+; NDD-NEXT: jne .LBB23_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB23_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i8, ptr %ptr
+ %cmp = icmp eq i8 %a, %c
+ %cmp1 = icmp eq i8 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16mi_zf(i16 noundef %a, i16 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp16mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %si, %di
+; CHECK-NEXT: ccmplew {dfv=sf} $1234, (%rdx) # imm = 0x4D2
+; CHECK-NEXT: jge .LBB24_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB24_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp16mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %si, %di
+; NDD-NEXT: ccmplew {dfv=sf} $1234, (%rdx) # imm = 0x4D2
+; NDD-NEXT: jge .LBB24_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB24_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i16, ptr %ptr
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, 1234
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32mi_cf(i32 noundef %a, i32 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp32mi_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: ccmpnel {dfv=cf} $123457, (%rdx) # imm = 0x1E241
+; CHECK-NEXT: jae .LBB25_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB25_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp32mi_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %esi, %edi
+; NDD-NEXT: ccmpnel {dfv=cf} $123457, (%rdx) # imm = 0x1E241
+; NDD-NEXT: jae .LBB25_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB25_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i32, ptr %ptr
+ %cmp = icmp eq i32 %a, %c
+ %cmp1 = icmp ule i32 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64mi32_zf(i64 noundef %a, i64 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp64mi32_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: ccmpleq {dfv=sf} $123456, (%rdx) # imm = 0x1E240
+; CHECK-NEXT: jge .LBB26_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB26_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64mi32_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rsi, %rdi
+; NDD-NEXT: ccmpleq {dfv=sf} $123456, (%rdx) # imm = 0x1E240
+; NDD-NEXT: jge .LBB26_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB26_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i64, ptr %ptr
+ %cmp = icmp sgt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp_continous(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ccmp_continous:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ccmplel {dfv=} $2, %esi
+; CHECK-NEXT: ccmpll {dfv=} $3, %edx
+; CHECK-NEXT: jge .LBB27_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB27_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp_continous:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ccmplel {dfv=} $2, %esi
+; NDD-NEXT: ccmpll {dfv=} $3, %edx
+; NDD-NEXT: jge .LBB27_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB27_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp slt i32 %a, 1
+ %cmp1 = icmp slt i32 %b, 2
+ %or.cond = and i1 %cmp, %cmp1
+ %cmp3 = icmp slt i32 %c, 3
+ %or.cond4 = and i1 %or.cond, %cmp3
+ br i1 %or.cond4, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+define i32 @ccmp_nobranch(i32 noundef %a, i32 noundef %b) {
+; CHECK-LABEL: ccmp_nobranch:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ccmplel {dfv=} $2, %esi
+; CHECK-NEXT: setge %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp_nobranch:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ccmplel {dfv=} $2, %esi
+; NDD-NEXT: setge %al
+; NDD-NEXT: movzbl %al, %eax
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i32 %a, 0
+ %cmp1 = icmp sgt i32 %b, 1
+ %or.cond.not = or i1 %cmp, %cmp1
+ %. = zext i1 %or.cond.not to i32
+ ret i32 %.
+}
+
+define i32 @ccmp_continous_nobranch(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ccmp_continous_nobranch:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl $2, %edi
+; CHECK-NEXT: ccmpll {dfv=sf} $2, %esi
+; CHECK-NEXT: ccmpll {dfv=sf} $4, %edx
+; CHECK-NEXT: setge %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp_continous_nobranch:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl $2, %edi
+; NDD-NEXT: ccmpll {dfv=sf} $2, %esi
+; NDD-NEXT: ccmpll {dfv=sf} $4, %edx
+; NDD-NEXT: setge %al
+; NDD-NEXT: movzbl %al, %eax
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i32 %a, 1
+ %cmp1 = icmp slt i32 %b, 2
+ %cmp2 = icmp sgt i32 %c, 3
+ %or1 = or i1 %cmp, %cmp1
+ %or2 = and i1 %or1, %cmp2
+ %. = zext i1 %or2 to i32
+ ret i32 %.
+}
+
+declare dso_local void @foo(...)
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/apx/ctest.ll b/llvm/test/CodeGen/X86/apx/ctest.ll
new file mode 100644
index 000000000000..22afc39fd40c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/ctest.ll
@@ -0,0 +1,905 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp,+ndd -verify-machineinstrs | FileCheck %s --check-prefix=NDD
+
+define void @ctest8rr_zf(i8 noundef %a, i8 noundef %b) {
+; CHECK-LABEL: ctest8rr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestneb {dfv=zf} %sil, %sil
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB0_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestneb {dfv=zf} %sil, %sil
+; NDD-NEXT: jne .LBB0_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB0_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %cmp1 = icmp eq i8 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define i8 @ctest8rr_zf_double(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8rr_zf_double:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: ctesteb {dfv=zf} %dil, %dil
+; CHECK-NEXT: je .LBB1_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB1_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_zf_double:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: xorpd %xmm1, %xmm1
+; NDD-NEXT: ucomisd %xmm1, %xmm0
+; NDD-NEXT: ctesteb {dfv=zf} %dil, %dil
+; NDD-NEXT: je .LBB1_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB1_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = fcmp ueq double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ctest8rr_zf_double_p(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8rr_zf_double_p:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: ucomisd %xmm0, %xmm0
+; CHECK-NEXT: setp %cl
+; CHECK-NEXT: andb %al, %cl
+; CHECK-NEXT: cmpb $1, %cl
+; CHECK-NEXT: jne .LBB2_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB2_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_zf_double_p:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: setne %al
+; NDD-NEXT: ucomisd %xmm0, %xmm0
+; NDD-NEXT: setp %cl
+; NDD-NEXT: andb %cl, %al
+; NDD-NEXT: cmpb $1, %al
+; NDD-NEXT: jne .LBB2_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB2_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = fcmp uno double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ctest8rr_zf_double_np(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8rr_zf_double_np:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: ucomisd %xmm0, %xmm0
+; CHECK-NEXT: setnp %cl
+; CHECK-NEXT: andb %al, %cl
+; CHECK-NEXT: cmpb $1, %cl
+; CHECK-NEXT: jne .LBB3_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB3_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_zf_double_np:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: setne %al
+; NDD-NEXT: ucomisd %xmm0, %xmm0
+; NDD-NEXT: setnp %cl
+; NDD-NEXT: andb %cl, %al
+; NDD-NEXT: cmpb $1, %al
+; NDD-NEXT: jne .LBB3_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB3_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = fcmp ord double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define void @ctest8rr_sf(i8 noundef %a, i8 noundef %b) {
+; CHECK-LABEL: ctest8rr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctesteb {dfv=sf} %sil, %sil
+; CHECK-NEXT: js .LBB4_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB4_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctesteb {dfv=sf} %sil, %sil
+; NDD-NEXT: js .LBB4_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB4_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp ule i8 %a, 0
+ %tobool = icmp sge i8 %b, 0
+ %or.cond = and i1 %cmp, %tobool
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+define i8 @ctest8rr_sf_2(i8 %a, i8 %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8rr_sf_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestleb {dfv=sf} %sil, %sil
+; CHECK-NEXT: jns .LBB5_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx)
+; CHECK-NEXT: .LBB5_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_sf_2:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestleb {dfv=sf} %sil, %sil
+; NDD-NEXT: jns .LBB5_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx)
+; NDD-NEXT: .LBB5_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp sgt i8 %a, 0
+ %cmp = icmp slt i8 %b, 0
+ %or.cond = select i1 %tobool, i1 true, i1 %cmp
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ctest8rr_none(i8 %a, i8 %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8rr_none:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestneb {dfv=} %sil, %sil
+; CHECK-NEXT: jne .LBB6_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx)
+; CHECK-NEXT: .LBB6_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_none:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestneb {dfv=} %sil, %sil
+; NDD-NEXT: jne .LBB6_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx)
+; NDD-NEXT: .LBB6_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = icmp eq i8 %b, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define void @ctest16rr_sf(i16 noundef %a, i16 noundef %b) {
+; CHECK-LABEL: ctest16rr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testw %di, %di
+; CHECK-NEXT: ctestlew {dfv=sf} %si, %si
+; CHECK-NEXT: jns .LBB7_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB7_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest16rr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testw %di, %di
+; NDD-NEXT: ctestlew {dfv=sf} %si, %si
+; NDD-NEXT: jns .LBB7_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB7_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i16 %a, 0
+ %cmp1 = icmp slt i16 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest32rr_zf(i32 noundef %a, i32 noundef %b) {
+; CHECK-LABEL: ctest32rr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestsl {dfv=zf} %esi, %esi
+; CHECK-NEXT: jne .LBB8_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB8_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest32rr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestsl {dfv=zf} %esi, %esi
+; NDD-NEXT: jne .LBB8_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB8_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sge i32 %a, 0
+ %cmp1 = icmp eq i32 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest8ri_zf(i8 noundef %a, i8 noundef %b) {
+; CHECK-LABEL: ctest8ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestneb {dfv=zf} $123, %sil
+; CHECK-NEXT: jne .LBB9_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB9_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestneb {dfv=zf} $123, %sil
+; NDD-NEXT: jne .LBB9_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB9_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %and = and i8 %b, 123
+ %cmp1 = icmp eq i8 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest16ri_zf(i16 noundef %a, i16 noundef %b) {
+; CHECK-LABEL: ctest16ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl $1234, %esi # imm = 0x4D2
+; CHECK-NEXT: testw %di, %di
+; CHECK-NEXT: ctestnew {dfv=zf} %si, %si
+; CHECK-NEXT: jne .LBB10_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB10_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest16ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andl $1234, %esi, %eax # imm = 0x4D2
+; NDD-NEXT: testw %di, %di
+; NDD-NEXT: ctestnew {dfv=zf} %ax, %ax
+; NDD-NEXT: jne .LBB10_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB10_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %and = and i16 %b, 1234
+ %cmp1 = icmp eq i16 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest32ri_zf(i32 noundef %a, i32 noundef %b) {
+; CHECK-LABEL: ctest32ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestnel {dfv=zf} $12345, %esi # imm = 0x3039
+; CHECK-NEXT: jne .LBB11_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB11_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest32ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestnel {dfv=zf} $12345, %esi # imm = 0x3039
+; NDD-NEXT: jne .LBB11_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB11_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %and = and i32 %b, 12345
+ %cmp1 = icmp eq i32 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest64ri32_zf(i64 noundef %a, i64 noundef %b) {
+; CHECK-LABEL: ctest64ri32_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: ctestneq {dfv=zf} $123456, %rsi # imm = 0x1E240
+; CHECK-NEXT: jne .LBB12_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB12_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest64ri32_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testq %rdi, %rdi
+; NDD-NEXT: ctestneq {dfv=zf} $123456, %rsi # imm = 0x1E240
+; NDD-NEXT: jne .LBB12_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB12_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i64 %a, 0
+ %and = and i64 %b, 123456
+ %cmp1 = icmp eq i64 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest8mr_zf(i8 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest8mr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movzbl (%rsi), %eax
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestneb {dfv=zf} %al, %al
+; CHECK-NEXT: jne .LBB13_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB13_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8mr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movzbl (%rsi), %eax
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestneb {dfv=zf} %al, %al
+; NDD-NEXT: jne .LBB13_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB13_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i8, ptr %ptr
+ %cmp = icmp eq i8 %a, 0
+ %cmp1 = icmp eq i8 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest16mr_zf(i16 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest16mr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movzwl (%rsi), %eax
+; CHECK-NEXT: testw %di, %di
+; CHECK-NEXT: ctestnew {dfv=zf} %ax, %ax
+; CHECK-NEXT: jne .LBB14_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB14_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest16mr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movzwl (%rsi), %eax
+; NDD-NEXT: testw %di, %di
+; NDD-NEXT: ctestnew {dfv=zf} %ax, %ax
+; NDD-NEXT: jne .LBB14_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB14_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i16, ptr %ptr
+ %cmp = icmp eq i16 %a, 0
+ %cmp1 = icmp eq i16 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest32mr_cf(i32 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest32mr_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movl (%rsi), %eax
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestnel {dfv=zf} %eax, %eax
+; CHECK-NEXT: jne .LBB15_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB15_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest32mr_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movl (%rsi), %eax
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestnel {dfv=zf} %eax, %eax
+; NDD-NEXT: jne .LBB15_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB15_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i32, ptr %ptr
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp eq i32 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest64mr_zf(i64 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest64mr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq (%rsi), %rax
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: ctestneq {dfv=zf} %rax, %rax
+; CHECK-NEXT: jne .LBB16_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB16_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest64mr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movq (%rsi), %rax
+; NDD-NEXT: testq %rdi, %rdi
+; NDD-NEXT: ctestneq {dfv=zf} %rax, %rax
+; NDD-NEXT: jne .LBB16_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB16_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i64, ptr %ptr
+ %cmp = icmp eq i64 %a, 0
+ %cmp1 = icmp eq i64 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest8mi_zf(i8 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest8mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestneb {dfv=zf} $123, (%rsi)
+; CHECK-NEXT: jne .LBB17_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB17_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestneb {dfv=zf} $123, (%rsi)
+; NDD-NEXT: jne .LBB17_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB17_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i8, ptr %ptr
+ %cmp = icmp eq i8 %a, 0
+ %and = and i8 %b, 123
+ %cmp1 = icmp eq i8 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest16mi_zf(i16 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest16mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movzwl (%rsi), %eax
+; CHECK-NEXT: andl $1234, %eax # imm = 0x4D2
+; CHECK-NEXT: testw %di, %di
+; CHECK-NEXT: ctestnew {dfv=zf} %ax, %ax
+; CHECK-NEXT: jne .LBB18_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB18_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest16mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movzwl (%rsi), %eax
+; NDD-NEXT: andl $1234, %eax # imm = 0x4D2
+; NDD-NEXT: testw %di, %di
+; NDD-NEXT: ctestnew {dfv=zf} %ax, %ax
+; NDD-NEXT: jne .LBB18_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB18_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i16, ptr %ptr
+ %cmp = icmp eq i16 %a, 0
+ %and = and i16 %b, 1234
+ %cmp1 = icmp eq i16 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest32mi_zf(i32 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest32mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movzwl (%rsi), %eax
+; CHECK-NEXT: andl $12345, %eax # imm = 0x3039
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestnew {dfv=zf} %ax, %ax
+; CHECK-NEXT: jne .LBB19_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB19_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest32mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movzwl (%rsi), %eax
+; NDD-NEXT: andl $12345, %eax # imm = 0x3039
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestnew {dfv=zf} %ax, %ax
+; NDD-NEXT: jne .LBB19_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB19_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i32, ptr %ptr
+ %cmp = icmp eq i32 %a, 0
+ %and = and i32 %b, 12345
+ %cmp1 = icmp eq i32 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest64mi32_zf(i64 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest64mi32_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: ctestnel {dfv=zf} $123456, (%rsi) # imm = 0x1E240
+; CHECK-NEXT: jne .LBB20_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB20_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest64mi32_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testq %rdi, %rdi
+; NDD-NEXT: ctestnel {dfv=zf} $123456, (%rsi) # imm = 0x1E240
+; NDD-NEXT: jne .LBB20_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB20_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i64, ptr %ptr
+ %cmp = icmp eq i64 %a, 0
+ %and = and i64 %b, 123456
+ %cmp1 = icmp eq i64 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest_continous(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ctest_continous:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: ctestll {dfv=} %esi, %esi
+; CHECK-NEXT: ctestnsl {dfv=sf} %edx, %edx
+; CHECK-NEXT: jns .LBB21_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB21_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest_continous:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %esi, %edi
+; NDD-NEXT: ctestll {dfv=} %esi, %esi
+; NDD-NEXT: ctestnsl {dfv=sf} %edx, %edx
+; NDD-NEXT: jns .LBB21_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB21_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp slt i32 %a, %b
+ %cmp1 = icmp slt i32 %b, 0
+ %or.cond = and i1 %cmp, %cmp1
+ %cmp2 = icmp slt i32 %c, 0
+ %or.cond3 = or i1 %or.cond, %cmp2
+ br i1 %or.cond3, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+define i32 @ctest_nobranch(i32 noundef %a, i32 noundef %b) {
+; CHECK-LABEL: ctest_nobranch:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestlel {dfv=} %esi, %esi
+; CHECK-NEXT: setg %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest_nobranch:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestlel {dfv=} %esi, %esi
+; NDD-NEXT: setg %al
+; NDD-NEXT: movzbl %al, %eax
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i32 %a, 0
+ %cmp1 = icmp sgt i32 %b, 0
+ %or.cond.not = or i1 %cmp, %cmp1
+ %. = zext i1 %or.cond.not to i32
+ ret i32 %.
+}
+
+define i32 @ctest_continous_nobranch(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ctest_continous_nobranch:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestlel {dfv=sf} %esi, %esi
+; CHECK-NEXT: ctestsl {dfv=zf} %edx, %edx
+; CHECK-NEXT: setg %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest_continous_nobranch:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestlel {dfv=sf} %esi, %esi
+; NDD-NEXT: ctestsl {dfv=zf} %edx, %edx
+; NDD-NEXT: setg %al
+; NDD-NEXT: movzbl %al, %eax
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i32 %a, 0
+ %cmp1 = icmp slt i32 %b, 0
+ %cmp2 = icmp sgt i32 %c, 0
+ %or1 = or i1 %cmp, %cmp1
+ %or2 = and i1 %or1, %cmp2
+ %. = zext i1 %or2 to i32
+ ret i32 %.
+}
+
+declare dso_local void @foo(...)
diff --git a/llvm/test/CodeGen/X86/avx512-cmp-kor-sequence.ll b/llvm/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
index bb86f307afa8..b4ba23934d54 100644
--- a/llvm/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
+++ b/llvm/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
@@ -48,5 +48,5 @@ entry:
; Function Attrs: nounwind readnone
declare <16 x i1> @llvm.x86.avx512.mask.cmp.ps.512(<16 x float>, <16 x float>, i32, <16 x i1>, i32) #1
-attributes #0 = { nounwind readnone uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="knl" "target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512er,+avx512f,+avx512pf,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+prefetchwt1,+rdrnd,+rdseed,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind readnone uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="broadwell" "target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512f,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+evex512,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+rdrnd,+rdseed,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt,-vzeroupper" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
diff --git a/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin-deprecated.ll b/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin-deprecated.ll
index 8d09497cefb1..77053e2c1bc9 100644
--- a/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin-deprecated.ll
+++ b/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin-deprecated.ll
@@ -268,30 +268,6 @@ define void @gather_qps(<8 x i64> %ind, <8 x float> %src, ptr %base, ptr %stbuf)
ret void
}
-declare void @llvm.x86.avx512.gatherpf.qps.512(i8, <8 x i64>, ptr , i32, i32);
-declare void @llvm.x86.avx512.scatterpf.qps.512(i8, <8 x i64>, ptr , i32, i32);
-define void @prefetch(<8 x i64> %ind, ptr %base) {
-; CHECK-LABEL: prefetch:
-; CHECK: ## %bb.0:
-; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherpf0qps (%rdi,%zmm0,4) {%k1}
-; CHECK-NEXT: kxorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherpf1qps (%rdi,%zmm0,4) {%k1}
-; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vscatterpf0qps (%rdi,%zmm0,2) {%k1}
-; CHECK-NEXT: movb $120, %al
-; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vscatterpf1qps (%rdi,%zmm0,2) {%k1}
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
- call void @llvm.x86.avx512.gatherpf.qps.512(i8 -1, <8 x i64> %ind, ptr %base, i32 4, i32 3)
- call void @llvm.x86.avx512.gatherpf.qps.512(i8 0, <8 x i64> %ind, ptr %base, i32 4, i32 2)
- call void @llvm.x86.avx512.scatterpf.qps.512(i8 1, <8 x i64> %ind, ptr %base, i32 2, i32 3)
- call void @llvm.x86.avx512.scatterpf.qps.512(i8 120, <8 x i64> %ind, ptr %base, i32 2, i32 2)
- ret void
-}
-
declare <2 x double> @llvm.x86.avx512.gather3div2.df(<2 x double>, ptr, <2 x i64>, i8, i32)
define <2 x double>@test_int_x86_avx512_gather3div2_df(<2 x double> %x0, ptr %x1, <2 x i64> %x2, i8 %x3) {
diff --git a/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
index acbf4387255c..df71e3c3afa5 100644
--- a/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
+++ b/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
@@ -265,30 +265,6 @@ define dso_local void @gather_qps(<8 x i64> %ind, <8 x float> %src, ptr %base, p
ret void
}
-declare void @llvm.x86.avx512.gatherpf.qps.512(i8, <8 x i64>, ptr , i32, i32);
-declare void @llvm.x86.avx512.scatterpf.qps.512(i8, <8 x i64>, ptr , i32, i32);
-define dso_local void @prefetch(<8 x i64> %ind, ptr %base) {
-; CHECK-LABEL: prefetch:
-; CHECK: # %bb.0:
-; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherpf0qps (%rdi,%zmm0,4) {%k1}
-; CHECK-NEXT: kxorw %k0, %k0, %k1
-; CHECK-NEXT: vgatherpf1qps (%rdi,%zmm0,4) {%k1}
-; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vscatterpf0qps (%rdi,%zmm0,2) {%k1}
-; CHECK-NEXT: movb $120, %al
-; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vscatterpf1qps (%rdi,%zmm0,2) {%k1}
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
- call void @llvm.x86.avx512.gatherpf.qps.512(i8 -1, <8 x i64> %ind, ptr %base, i32 4, i32 3)
- call void @llvm.x86.avx512.gatherpf.qps.512(i8 0, <8 x i64> %ind, ptr %base, i32 4, i32 2)
- call void @llvm.x86.avx512.scatterpf.qps.512(i8 1, <8 x i64> %ind, ptr %base, i32 2, i32 3)
- call void @llvm.x86.avx512.scatterpf.qps.512(i8 120, <8 x i64> %ind, ptr %base, i32 2, i32 2)
- ret void
-}
-
define <2 x double> @test_int_x86_avx512_mask_gather3div2_df(<2 x double> %x0, ptr %x1, <2 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_gather3div2_df:
; CHECK: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/avx512er-intrinsics.ll b/llvm/test/CodeGen/X86/avx512er-intrinsics.ll
deleted file mode 100644
index fa4025f76b57..000000000000
--- a/llvm/test/CodeGen/X86/avx512er-intrinsics.ll
+++ /dev/null
@@ -1,306 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512er --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512er --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64
-
-define <16 x float> @test_rsqrt28_ps(<16 x float> %a0) {
-; CHECK-LABEL: test_rsqrt28_ps:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vrsqrt28ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcc,0xc0]
-; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
- ret <16 x float> %res
-}
-
-define <16 x float> @test1_rsqrt28_ps(<16 x float> %a0, <16 x float> %a1) {
-; CHECK-LABEL: test1_rsqrt28_ps:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
-; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; CHECK-NEXT: vrsqrt28ps {sae}, %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcc,0xc8]
-; CHECK-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1]
-; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> %a1, i16 6, i32 8)
- ret <16 x float> %res
-}
-
-define <16 x float> @test2_rsqrt28_ps(<16 x float> %a0) {
-; CHECK-LABEL: test2_rsqrt28_ps:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
-; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; CHECK-NEXT: vrsqrt28ps %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xcc,0xc0]
-; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> undef, i16 6, i32 4)
- ret <16 x float> %res
-}
-
-define <16 x float> @test3_rsqrt28_ps(<16 x float> %a0) {
-; CHECK-LABEL: test3_rsqrt28_ps:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
-; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; CHECK-NEXT: vrsqrt28ps %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xcc,0xc0]
-; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 6, i32 4)
- ret <16 x float> %res
-}
-
-define <16 x float> @test4_rsqrt28_ps(<16 x float> %a0) {
-; CHECK-LABEL: test4_rsqrt28_ps:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movw $6, %ax # encoding: [0x66,0xb8,0x06,0x00]
-; CHECK-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; CHECK-NEXT: vrsqrt28ps {sae}, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcc,0xc0]
-; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float> %a0, <16 x float> undef, i16 6, i32 8)
- ret <16 x float> %res
-}
-
-declare <16 x float> @llvm.x86.avx512.rsqrt28.ps(<16 x float>, <16 x float>, i16, i32) nounwind readnone
-
-define <16 x float> @test_rcp28_ps_512(<16 x float> %a0) {
-; CHECK-LABEL: test_rcp28_ps_512:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vrcp28ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xca,0xc0]
-; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <16 x float> @llvm.x86.avx512.rcp28.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
- ret <16 x float> %res
-}
-declare <16 x float> @llvm.x86.avx512.rcp28.ps(<16 x float>, <16 x float>, i16, i32) nounwind readnone
-
-define <8 x double> @test_rcp28_pd_512(<8 x double> %a0) {
-; CHECK-LABEL: test_rcp28_pd_512:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vrcp28pd {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x18,0xca,0xc0]
-; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <8 x double> @llvm.x86.avx512.rcp28.pd(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 8)
- ret <8 x double> %res
-}
-declare <8 x double> @llvm.x86.avx512.rcp28.pd(<8 x double>, <8 x double>, i8, i32) nounwind readnone
-
-define <16 x float> @test_exp2_ps_512(<16 x float> %a0) {
-; CHECK-LABEL: test_exp2_ps_512:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vexp2ps {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xc8,0xc0]
-; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <16 x float> @llvm.x86.avx512.exp2.ps(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
- ret <16 x float> %res
-}
-declare <16 x float> @llvm.x86.avx512.exp2.ps(<16 x float>, <16 x float>, i16, i32) nounwind readnone
-
-define <8 x double> @test_exp2_pd_512(<8 x double> %a0) {
-; CHECK-LABEL: test_exp2_pd_512:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vexp2pd {sae}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x18,0xc8,0xc0]
-; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <8 x double> @llvm.x86.avx512.exp2.pd(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1, i32 8)
- ret <8 x double> %res
-}
-declare <8 x double> @llvm.x86.avx512.exp2.pd(<8 x double>, <8 x double>, i8, i32) nounwind readnone
-
-define <4 x float> @test_rsqrt28_ss(<4 x float> %a0) {
-; CHECK-LABEL: test_rsqrt28_ss:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcd,0xc0]
-; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1, i32 8) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
-
-define <4 x float> @test_rcp28_ss(<4 x float> %a0) {
-; CHECK-LABEL: test_rcp28_ss:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vrcp28ss {sae}, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x18,0xcb,0xc0]
-; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 -1, i32 8) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
-
-define <4 x float> @test_rcp28_ss_load(<4 x float> %a0, ptr %a1ptr) {
-; X86-LABEL: test_rcp28_ss_load:
-; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT: vrcp28ss (%eax), %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0xcb,0x00]
-; X86-NEXT: retl # encoding: [0xc3]
-;
-; X64-LABEL: test_rcp28_ss_load:
-; X64: # %bb.0:
-; X64-NEXT: vrcp28ss (%rdi), %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0xcb,0x07]
-; X64-NEXT: retq # encoding: [0xc3]
- %a1 = load <4 x float>, ptr %a1ptr
- %res = call <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1, i32 4) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-
-define <4 x float> @test_rsqrt28_ss_load(<4 x float> %a0, ptr %a1ptr) {
-; X86-LABEL: test_rsqrt28_ss_load:
-; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT: vrsqrt28ss (%eax), %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0xcd,0x00]
-; X86-NEXT: retl # encoding: [0xc3]
-;
-; X64-LABEL: test_rsqrt28_ss_load:
-; X64: # %bb.0:
-; X64-NEXT: vrsqrt28ss (%rdi), %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0xcd,0x07]
-; X64-NEXT: retq # encoding: [0xc3]
- %a1 = load <4 x float>, ptr %a1ptr
- %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1, i32 4) ; <<4 x float>> [#uses=1]
- ret <4 x float> %res
-}
-
-define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0, i8 %mask) {
-; X86-LABEL: test_rsqrt28_ss_maskz:
-; X86: # %bb.0:
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcd,0xc0]
-; X86-NEXT: retl # encoding: [0xc3]
-;
-; X64-LABEL: test_rsqrt28_ss_maskz:
-; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcd,0xc0]
-; X64-NEXT: retq # encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %a0, <4 x float> zeroinitializer, i8 %mask, i32 8) ;
- ret <4 x float> %res
-}
-
-define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0, i8 %mask) {
-; X86-LABEL: test_rsqrt28_ss_mask:
-; X86: # %bb.0:
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT: vrsqrt28ss {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcd,0xd1]
-; X86-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2]
-; X86-NEXT: retl # encoding: [0xc3]
-;
-; X64-LABEL: test_rsqrt28_ss_mask:
-; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT: vrsqrt28ss {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcd,0xd1]
-; X64-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2]
-; X64-NEXT: retq # encoding: [0xc3]
- %res = call <4 x float> @llvm.x86.avx512.rsqrt28.ss(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0, i8 %mask, i32 8) ;
- ret <4 x float> %res
-}
-
-define <2 x double> @test_rcp28_sd_mask_load(<2 x double> %a0, ptr %a1ptr, <2 x double> %a2, i8 %mask) {
-; X86-LABEL: test_rcp28_sd_mask_load:
-; X86: # %bb.0:
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT: vrcp28sd %xmm0, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xcb,0xc8]
-; X86-NEXT: vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
-; X86-NEXT: retl # encoding: [0xc3]
-;
-; X64-LABEL: test_rcp28_sd_mask_load:
-; X64: # %bb.0:
-; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT: vrcp28sd %xmm0, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xcb,0xc8]
-; X64-NEXT: vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
-; X64-NEXT: retq # encoding: [0xc3]
- %a1 = load <2 x double>, ptr %a1ptr
- %res = call <2 x double> @llvm.x86.avx512.rcp28.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> %a2, i8 %mask, i32 4) ;
- ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.avx512.rcp28.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
-
-define <2 x double> @test_rsqrt28_sd_maskz_load(<2 x double> %a0, ptr %a1ptr, i8 %mask) {
-; X86-LABEL: test_rsqrt28_sd_maskz_load:
-; X86: # %bb.0:
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT: vrsqrt28sd %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0xc0]
-; X86-NEXT: retl # encoding: [0xc3]
-;
-; X64-LABEL: test_rsqrt28_sd_maskz_load:
-; X64: # %bb.0:
-; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT: vrsqrt28sd %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0xc0]
-; X64-NEXT: retq # encoding: [0xc3]
- %a1 = load <2 x double>, ptr %a1ptr
- %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> zeroinitializer, i8 %mask, i32 4) ;
- ret <2 x double> %res
-}
-
-define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0, i8 %mask) {
-; X86-LABEL: test_rsqrt28_sd_maskz:
-; X86: # %bb.0:
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT: vrsqrt28sd {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x99,0xcd,0xc0]
-; X86-NEXT: retl # encoding: [0xc3]
-;
-; X64-LABEL: test_rsqrt28_sd_maskz:
-; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT: vrsqrt28sd {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x99,0xcd,0xc0]
-; X64-NEXT: retq # encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %a0, <2 x double> zeroinitializer, i8 %mask, i32 8) ;
- ret <2 x double> %res
-}
-
-define <2 x double> @test_rsqrt28_sd_mask(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0, i8 %mask) {
-; X86-LABEL: test_rsqrt28_sd_mask:
-; X86: # %bb.0:
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
-; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT: vrsqrt28sd {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x19,0xcd,0xd1]
-; X86-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2]
-; X86-NEXT: retl # encoding: [0xc3]
-;
-; X64-LABEL: test_rsqrt28_sd_mask:
-; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
-; X64-NEXT: vrsqrt28sd {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x19,0xcd,0xd1]
-; X64-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2]
-; X64-NEXT: retq # encoding: [0xc3]
- %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0, i8 %mask, i32 8) ;
- ret <2 x double> %res
-}
-
-declare <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
-
-define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, ptr %ptr, i8 %mask) {
-; X86-LABEL: test_rsqrt28_sd_maskz_mem:
-; X86: # %bb.0:
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT: vrsqrt28sd (%eax), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x00]
-; X86-NEXT: retl # encoding: [0xc3]
-;
-; X64-LABEL: test_rsqrt28_sd_maskz_mem:
-; X64: # %bb.0:
-; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT: vrsqrt28sd (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x07]
-; X64-NEXT: retq # encoding: [0xc3]
- %mem = load double , ptr %ptr, align 8
- %mem_v = insertelement <2 x double> undef, double %mem, i32 0
- %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 %mask, i32 4) ;
- ret <2 x double> %res
-}
-
-define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, ptr %ptr, i8 %mask) {
-; X86-LABEL: test_rsqrt28_sd_maskz_mem_offset:
-; X86: # %bb.0:
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
-; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
-; X86-NEXT: vrsqrt28sd 144(%eax), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x40,0x12]
-; X86-NEXT: retl # encoding: [0xc3]
-;
-; X64-LABEL: test_rsqrt28_sd_maskz_mem_offset:
-; X64: # %bb.0:
-; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
-; X64-NEXT: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12]
-; X64-NEXT: retq # encoding: [0xc3]
- %ptr1 = getelementptr double, ptr %ptr, i32 18
- %mem = load double , ptr %ptr1, align 8
- %mem_v = insertelement <2 x double> undef, double %mem, i32 0
- %res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 %mask, i32 4) ;
- ret <2 x double> %res
-}
-
diff --git a/llvm/test/CodeGen/X86/coalescer-add-implicit-def-subreg-to-reg-regression.ll b/llvm/test/CodeGen/X86/coalescer-add-implicit-def-subreg-to-reg-regression.ll
new file mode 100644
index 000000000000..0e6cb7a3aff2
--- /dev/null
+++ b/llvm/test/CodeGen/X86/coalescer-add-implicit-def-subreg-to-reg-regression.ll
@@ -0,0 +1,45 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+; Not from issue 76416, but separate testcase reported on the same
+; regressing commit.
+define void @other_regression(i1 %cmp.not.i.i.i) {
+; CHECK-LABEL: other_regression:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movl 0, %eax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: sarl %cl, %eax
+; CHECK-NEXT: movl $1, %edx
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: shrl %cl, %edx
+; CHECK-NEXT: imull %eax, %edx
+; CHECK-NEXT: movslq %edx, %rsi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: xorl %edi, %edi
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: callq *%rax
+entry:
+ br label %for.cond10.preheader
+
+trap: ; preds = %for.body13
+ unreachable
+
+for.cond10.preheader: ; preds = %while.cond.i.i.i, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ 1, %while.cond.i.i.i ]
+ %i = trunc i64 %indvars.iv to i32
+ br label %for.body13
+
+for.body13: ; preds = %for.cond10.preheader
+ %i1 = load i32, ptr null, align 4
+ %shr = ashr i32 %i1, %i
+ %shr15 = ashr i32 1, %i
+ %mul16 = mul i32 %shr15, %shr
+ %conv = sext i32 %mul16 to i64
+ call void null(ptr null, i64 %conv, ptr null)
+ br i1 false, label %while.cond.i.i.i, label %trap
+
+while.cond.i.i.i: ; preds = %while.cond.i.i.i, %for.body13
+ br i1 %cmp.not.i.i.i, label %for.cond10.preheader, label %while.cond.i.i.i
+}
diff --git a/llvm/test/CodeGen/X86/combine-srem.ll b/llvm/test/CodeGen/X86/combine-srem.ll
index 49ce2455ae8c..4ed00a9d66bd 100644
--- a/llvm/test/CodeGen/X86/combine-srem.ll
+++ b/llvm/test/CodeGen/X86/combine-srem.ll
@@ -329,7 +329,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b_neg(<4 x i32> %x) {
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrad $3, %xmm2
-; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -351,7 +351,7 @@ define <4 x i32> @combine_vec_srem_by_pow2b_neg(<4 x i32> %x) {
; AVX1-NEXT: vpsrad $2, %xmm1, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpsrad $3, %xmm1, %xmm3
-; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/crc32-target-feature.ll b/llvm/test/CodeGen/X86/crc32-target-feature.ll
index ef4fafcae5dc..9dfe27e65351 100644
--- a/llvm/test/CodeGen/X86/crc32-target-feature.ll
+++ b/llvm/test/CodeGen/X86/crc32-target-feature.ll
@@ -25,5 +25,5 @@ define i32 @test3(i32 %a, i8 %b) nounwind #2 {
declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8) nounwind
attributes #0 = { "target-features"="+crc32" }
-attributes #1 = { "target-features"="+cx8,+fxsr,-3dnow,-3dnowa,-aes,-avx,-avx2,-avx512bf16,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vp2intersect,-avx512vpopcntdq,-avxvnni,-f16c,-fma,-fma4,-gfni,-kl,-mmx,-pclmul,-sha,-sse,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-vaes,-vpclmulqdq,-widekl,-x87,-xop,+crc32" }
-attributes #2 = { "target-features"="+crc32,+cx8,+fxsr,-3dnow,-3dnowa,-aes,-avx,-avx2,-avx512bf16,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vp2intersect,-avx512vpopcntdq,-avxvnni,-f16c,-fma,-fma4,-gfni,-kl,-mmx,-pclmul,-sha,-sse,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-vaes,-vpclmulqdq,-widekl,-x87,-xop" }
+attributes #1 = { "target-features"="+cx8,+fxsr,-3dnow,-3dnowa,-aes,-avx,-avx2,-avx512bf16,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vp2intersect,-avx512vpopcntdq,-avxvnni,-f16c,-fma,-fma4,-gfni,-kl,-mmx,-pclmul,-sha,-sse,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-vaes,-vpclmulqdq,-widekl,-x87,-xop,+crc32" }
+attributes #2 = { "target-features"="+crc32,+cx8,+fxsr,-3dnow,-3dnowa,-aes,-avx,-avx2,-avx512bf16,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vp2intersect,-avx512vpopcntdq,-avxvnni,-f16c,-fma,-fma4,-gfni,-kl,-mmx,-pclmul,-sha,-sse,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-vaes,-vpclmulqdq,-widekl,-x87,-xop" }
diff --git a/llvm/test/CodeGen/X86/exp10-libcall-names.ll b/llvm/test/CodeGen/X86/exp10-libcall-names.ll
new file mode 100644
index 000000000000..ce26a0e738e9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/exp10-libcall-names.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=x86_64-linux-gnu < %s | FileCheck -check-prefix=LINUX %s
+; RUN: llc -mtriple=x86_64-apple-macos10.9 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=x86_64-apple-ios9.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=x86_64-apple-tvos9.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=x86_64-apple-watchos9.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=x86_64-apple-xros9.0 < %s | FileCheck -check-prefix=APPLE %s
+
+; RUN: not llc -mtriple=x86_64-apple-macos10.8 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=x86_64-apple-ios8.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=x86_64-apple-tvos8.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=x86_64-apple-xros8.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+
+; Check exp10/exp10f is emitted as __exp10/__exp10f on assorted systems.
+
+; ERR: no libcall available for fexp10
+
+define float @test_exp10_f32(float %x) {
+; LINUX-LABEL: test_exp10_f32:
+; LINUX: # %bb.0:
+; LINUX-NEXT: jmp exp10f@PLT # TAILCALL
+;
+; APPLE-LABEL: test_exp10_f32:
+; APPLE: ## %bb.0:
+; APPLE-NEXT: jmp ___exp10f ## TAILCALL
+ %ret = call float @llvm.exp10.f32(float %x)
+ ret float %ret
+}
+
+define double @test_exp10_f64(double %x) {
+; LINUX-LABEL: test_exp10_f64:
+; LINUX: # %bb.0:
+; LINUX-NEXT: jmp exp10@PLT # TAILCALL
+;
+; APPLE-LABEL: test_exp10_f64:
+; APPLE: ## %bb.0:
+; APPLE-NEXT: jmp ___exp10 ## TAILCALL
+ %ret = call double @llvm.exp10.f64(double %x)
+ ret double %ret
+}
diff --git a/llvm/test/CodeGen/X86/fat-lto-section.ll b/llvm/test/CodeGen/X86/fat-lto-section.ll
index 30c56229a0e2..f3ca8436affb 100644
--- a/llvm/test/CodeGen/X86/fat-lto-section.ll
+++ b/llvm/test/CodeGen/X86/fat-lto-section.ll
@@ -5,6 +5,6 @@
; RUN: | FileCheck %s --check-prefix=EXCLUDE
; EXCLUDE: Name Type {{.*}} ES Flg Lk Inf Al
-; EXCLUDE: .llvm.lto PROGBITS {{.*}} 00 E 0 0 1
+; EXCLUDE: .llvm.lto LLVM_LTO {{.*}} 00 E 0 0 1
@a = global i32 1
diff --git a/llvm/test/CodeGen/X86/freeze-binary.ll b/llvm/test/CodeGen/X86/freeze-binary.ll
index dbc027495297..1209e2633c06 100644
--- a/llvm/test/CodeGen/X86/freeze-binary.ll
+++ b/llvm/test/CodeGen/X86/freeze-binary.ll
@@ -546,9 +546,8 @@ define <8 x i16> @freeze_ashr_vec(<8 x i16> %a0) nounwind {
define <4 x i32> @freeze_ashr_vec_outofrange(<4 x i32> %a0) nounwind {
; X86-LABEL: freeze_ashr_vec_outofrange:
; X86: # %bb.0:
-; X86-NEXT: psrad $1, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
-; X86-NEXT: psrad $2, %xmm0
+; X86-NEXT: psrad $3, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: freeze_ashr_vec_outofrange:
@@ -660,9 +659,8 @@ define <8 x i16> @freeze_lshr_vec(<8 x i16> %a0) nounwind {
define <4 x i32> @freeze_lshr_vec_outofrange(<4 x i32> %a0) nounwind {
; X86-LABEL: freeze_lshr_vec_outofrange:
; X86: # %bb.0:
-; X86-NEXT: psrld $1, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
-; X86-NEXT: psrld $2, %xmm0
+; X86-NEXT: psrld $3, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: freeze_lshr_vec_outofrange:
diff --git a/llvm/test/CodeGen/X86/funnel-shift.ll b/llvm/test/CodeGen/X86/funnel-shift.ll
index c6f0662cadd6..a464d78f9af3 100644
--- a/llvm/test/CodeGen/X86/funnel-shift.ll
+++ b/llvm/test/CodeGen/X86/funnel-shift.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-- -mattr=sse2 | FileCheck %s --check-prefixes=CHECK,X86-SSE2
-; RUN: llc < %s -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,X64-AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,X64-AVX,X64-AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512vbmi,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64-AVX,X64-VBMI2
declare i8 @llvm.fshl.i8(i8, i8, i8)
declare i16 @llvm.fshl.i16(i16, i16, i16)
@@ -26,13 +27,13 @@ define i32 @fshl_i32(i32 %x, i32 %y, i32 %z) nounwind {
; X86-SSE2-NEXT: shldl %cl, %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shldl %cl, %esi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shldl %cl, %esi, %eax
+; X64-AVX-NEXT: retq
%f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %z)
ret i32 %f
}
@@ -58,13 +59,13 @@ define i64 @fshl_i64(i64 %x, i64 %y, i64 %z) nounwind {
; X86-SSE2-NEXT: popl %edi
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i64:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movq %rdx, %rcx
-; X64-AVX2-NEXT: movq %rdi, %rax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx
-; X64-AVX2-NEXT: shldq %cl, %rsi, %rax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i64:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movq %rdx, %rcx
+; X64-AVX-NEXT: movq %rdi, %rax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $rcx
+; X64-AVX-NEXT: shldq %cl, %rsi, %rax
+; X64-AVX-NEXT: retq
%f = call i64 @llvm.fshl.i64(i64 %x, i64 %y, i64 %z)
ret i64 %f
}
@@ -116,18 +117,18 @@ define i128 @fshl_i128(i128 %x, i128 %y, i128 %z) nounwind {
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: retl $4
;
-; X64-AVX2-LABEL: fshl_i128:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: testb $64, %r8b
-; X64-AVX2-NEXT: cmovneq %rdi, %rsi
-; X64-AVX2-NEXT: cmoveq %rcx, %rdx
-; X64-AVX2-NEXT: cmovneq %rcx, %rdi
-; X64-AVX2-NEXT: movq %rdi, %rax
-; X64-AVX2-NEXT: movl %r8d, %ecx
-; X64-AVX2-NEXT: shldq %cl, %rdx, %rax
-; X64-AVX2-NEXT: shldq %cl, %rdi, %rsi
-; X64-AVX2-NEXT: movq %rsi, %rdx
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i128:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: testb $64, %r8b
+; X64-AVX-NEXT: cmovneq %rdi, %rsi
+; X64-AVX-NEXT: cmoveq %rcx, %rdx
+; X64-AVX-NEXT: cmovneq %rcx, %rdi
+; X64-AVX-NEXT: movq %rdi, %rax
+; X64-AVX-NEXT: movl %r8d, %ecx
+; X64-AVX-NEXT: shldq %cl, %rdx, %rax
+; X64-AVX-NEXT: shldq %cl, %rdi, %rsi
+; X64-AVX-NEXT: movq %rsi, %rdx
+; X64-AVX-NEXT: retq
%f = call i128 @llvm.fshl.i128(i128 %x, i128 %y, i128 %z)
ret i128 %f
}
@@ -173,21 +174,21 @@ define i37 @fshl_i37(i37 %x, i37 %y, i37 %z) nounwind {
; X86-SSE2-NEXT: popl %ebx
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i37:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movq %rdx, %rcx
-; X64-AVX2-NEXT: movabsq $137438953471, %rax # imm = 0x1FFFFFFFFF
-; X64-AVX2-NEXT: andq %rdx, %rax
-; X64-AVX2-NEXT: movabsq $498560650640798693, %rdx # imm = 0x6EB3E45306EB3E5
-; X64-AVX2-NEXT: mulq %rdx
-; X64-AVX2-NEXT: leal (%rdx,%rdx,8), %eax
-; X64-AVX2-NEXT: leal (%rdx,%rax,4), %eax
-; X64-AVX2-NEXT: subl %eax, %ecx
-; X64-AVX2-NEXT: shlq $27, %rsi
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx
-; X64-AVX2-NEXT: shldq %cl, %rsi, %rdi
-; X64-AVX2-NEXT: movq %rdi, %rax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i37:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movq %rdx, %rcx
+; X64-AVX-NEXT: movabsq $137438953471, %rax # imm = 0x1FFFFFFFFF
+; X64-AVX-NEXT: andq %rdx, %rax
+; X64-AVX-NEXT: movabsq $498560650640798693, %rdx # imm = 0x6EB3E45306EB3E5
+; X64-AVX-NEXT: mulq %rdx
+; X64-AVX-NEXT: leal (%rdx,%rdx,8), %eax
+; X64-AVX-NEXT: leal (%rdx,%rax,4), %eax
+; X64-AVX-NEXT: subl %eax, %ecx
+; X64-AVX-NEXT: shlq $27, %rsi
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $rcx
+; X64-AVX-NEXT: shldq %cl, %rsi, %rdi
+; X64-AVX-NEXT: movq %rdi, %rax
+; X64-AVX-NEXT: retq
%f = call i37 @llvm.fshl.i37(i37 %x, i37 %y, i37 %z)
ret i37 %f
}
@@ -214,11 +215,11 @@ define i32 @fshl_i32_const_shift(i32 %x, i32 %y) nounwind {
; X86-SSE2-NEXT: shldl $9, %ecx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_const_shift:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shldl $9, %esi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_const_shift:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shldl $9, %esi, %eax
+; X64-AVX-NEXT: retq
%f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 9)
ret i32 %f
}
@@ -233,11 +234,11 @@ define i32 @fshl_i32_const_overshift(i32 %x, i32 %y) nounwind {
; X86-SSE2-NEXT: shldl $9, %ecx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_const_overshift:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shldl $9, %esi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_const_overshift:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shldl $9, %esi, %eax
+; X64-AVX-NEXT: retq
%f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 41)
ret i32 %f
}
@@ -254,11 +255,11 @@ define i64 @fshl_i64_const_overshift(i64 %x, i64 %y) nounwind {
; X86-SSE2-NEXT: shrdl $23, %ecx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i64_const_overshift:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movq %rdi, %rax
-; X64-AVX2-NEXT: shldq $41, %rsi, %rax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i64_const_overshift:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movq %rdi, %rax
+; X64-AVX-NEXT: shldq $41, %rsi, %rax
+; X64-AVX-NEXT: retq
%f = call i64 @llvm.fshl.i64(i64 %x, i64 %y, i64 105)
ret i64 %f
}
@@ -287,13 +288,13 @@ define i32 @fshr_i32(i32 %x, i32 %y, i32 %z) nounwind {
; X86-SSE2-NEXT: shrdl %cl, %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shrdl %cl, %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shrdl %cl, %edi, %eax
+; X64-AVX-NEXT: retq
%f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %z)
ret i32 %f
}
@@ -340,22 +341,22 @@ define i37 @fshr_i37(i37 %x, i37 %y, i37 %z) nounwind {
; X86-SSE2-NEXT: popl %ebx
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i37:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movq %rdx, %rcx
-; X64-AVX2-NEXT: movabsq $137438953471, %rax # imm = 0x1FFFFFFFFF
-; X64-AVX2-NEXT: andq %rdx, %rax
-; X64-AVX2-NEXT: movabsq $498560650640798693, %rdx # imm = 0x6EB3E45306EB3E5
-; X64-AVX2-NEXT: mulq %rdx
-; X64-AVX2-NEXT: leal (%rdx,%rdx,8), %eax
-; X64-AVX2-NEXT: leal (%rdx,%rax,4), %eax
-; X64-AVX2-NEXT: subl %eax, %ecx
-; X64-AVX2-NEXT: addl $27, %ecx
-; X64-AVX2-NEXT: shlq $27, %rsi
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx
-; X64-AVX2-NEXT: shrdq %cl, %rdi, %rsi
-; X64-AVX2-NEXT: movq %rsi, %rax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i37:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movq %rdx, %rcx
+; X64-AVX-NEXT: movabsq $137438953471, %rax # imm = 0x1FFFFFFFFF
+; X64-AVX-NEXT: andq %rdx, %rax
+; X64-AVX-NEXT: movabsq $498560650640798693, %rdx # imm = 0x6EB3E45306EB3E5
+; X64-AVX-NEXT: mulq %rdx
+; X64-AVX-NEXT: leal (%rdx,%rdx,8), %eax
+; X64-AVX-NEXT: leal (%rdx,%rax,4), %eax
+; X64-AVX-NEXT: subl %eax, %ecx
+; X64-AVX-NEXT: addl $27, %ecx
+; X64-AVX-NEXT: shlq $27, %rsi
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $rcx
+; X64-AVX-NEXT: shrdq %cl, %rdi, %rsi
+; X64-AVX-NEXT: movq %rsi, %rax
+; X64-AVX-NEXT: retq
%f = call i37 @llvm.fshr.i37(i37 %x, i37 %y, i37 %z)
ret i37 %f
}
@@ -382,11 +383,11 @@ define i32 @fshl_i32_demandedbits(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shldl $9, %ecx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_demandedbits:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shldl $9, %esi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_demandedbits:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shldl $9, %esi, %eax
+; X64-AVX-NEXT: retq
%x = or i32 %a0, 2147483648
%y = or i32 %a1, 1
%res = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 9)
@@ -401,11 +402,11 @@ define i32 @fshr_i32_demandedbits(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shrdl $9, %ecx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_demandedbits:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shldl $23, %esi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_demandedbits:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shldl $23, %esi, %eax
+; X64-AVX-NEXT: retq
%x = or i32 %a0, 2147483648
%y = or i32 %a1, 1
%res = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 9)
@@ -422,12 +423,12 @@ define i32 @fshl_i32_undef0(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shldl %cl, %eax, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_undef0:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shldl %cl, %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_undef0:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shldl %cl, %edi, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshl.i32(i32 undef, i32 %a0, i32 %a1)
ret i32 %res
}
@@ -442,13 +443,13 @@ define i32 @fshl_i32_undef0_msk(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shldl %cl, %eax, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_undef0_msk:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: andl $7, %ecx
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shldl %cl, %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_undef0_msk:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: andl $7, %ecx
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shldl %cl, %edi, %eax
+; X64-AVX-NEXT: retq
%m = and i32 %a1, 7
%res = call i32 @llvm.fshl.i32(i32 undef, i32 %a0, i32 %m)
ret i32 %res
@@ -461,15 +462,43 @@ define i32 @fshl_i32_undef0_cst(i32 %a0) nounwind {
; X86-SSE2-NEXT: shrl $23, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_undef0_cst:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shrl $23, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_undef0_cst:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shrl $23, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshl.i32(i32 undef, i32 %a0, i32 9)
ret i32 %res
}
+define <4 x i32> @fshl_v4i32_undef0_cst(<4 x i32> %a0) nounwind {
+; X86-SSE2-LABEL: fshl_v4i32_undef0_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
+; X86-SSE2-NEXT: psrld $20, %xmm1
+; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
+; X86-SSE2-NEXT: psrld $21, %xmm2
+; X86-SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
+; X86-SSE2-NEXT: psrld $22, %xmm1
+; X86-SSE2-NEXT: psrld $23, %xmm0
+; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
+; X86-SSE2-NEXT: retl
+;
+; X64-AVX2-LABEL: fshl_v4i32_undef0_cst:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: retq
+;
+; X64-VBMI2-LABEL: fshl_v4i32_undef0_cst:
+; X64-VBMI2: # %bb.0:
+; X64-VBMI2-NEXT: vpshldvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-VBMI2-NEXT: retq
+ %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> undef, <4 x i32> %a0, <4 x i32> <i32 9, i32 10, i32 11, i32 12>)
+ ret <4 x i32> %res
+}
+
define i32 @fshl_i32_undef1(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-LABEL: fshl_i32_undef1:
; X86-SSE2: # %bb.0:
@@ -478,13 +507,13 @@ define i32 @fshl_i32_undef1(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shldl %cl, %eax, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_undef1:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shldl %cl, %eax, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_undef1:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shldl %cl, %eax, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshl.i32(i32 %a0, i32 undef, i32 %a1)
ret i32 %res
}
@@ -498,14 +527,14 @@ define i32 @fshl_i32_undef1_msk(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shll %cl, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_undef1_msk:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: andb $7, %cl
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shll %cl, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_undef1_msk:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: andb $7, %cl
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shll %cl, %eax
+; X64-AVX-NEXT: retq
%m = and i32 %a1, 7
%res = call i32 @llvm.fshl.i32(i32 %a0, i32 undef, i32 %m)
ret i32 %res
@@ -518,15 +547,34 @@ define i32 @fshl_i32_undef1_cst(i32 %a0) nounwind {
; X86-SSE2-NEXT: shll $9, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_undef1_cst:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shll $9, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_undef1_cst:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shll $9, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshl.i32(i32 %a0, i32 undef, i32 9)
ret i32 %res
}
+define <4 x i32> @fshl_v4i32_undef1_cst(<4 x i32> %a0) nounwind {
+; X86-SSE2-LABEL: fshl_v4i32_undef1_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-SSE2-NEXT: retl
+;
+; X64-AVX-LABEL: fshl_v4i32_undef1_cst:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: retq
+ %res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 9, i32 10, i32 11, i32 12>)
+ ret <4 x i32> %res
+}
+
define i32 @fshl_i32_undef2(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-LABEL: fshl_i32_undef2:
; X86-SSE2: # %bb.0:
@@ -535,11 +583,11 @@ define i32 @fshl_i32_undef2(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shldl %cl, %ecx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_undef2:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shldl %cl, %esi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_undef2:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shldl %cl, %esi, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshl.i32(i32 %a0, i32 %a1, i32 undef)
ret i32 %res
}
@@ -552,13 +600,13 @@ define i32 @fshr_i32_undef0(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shrdl %cl, %eax, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_undef0:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shrdl %cl, %eax, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_undef0:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shrdl %cl, %eax, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshr.i32(i32 undef, i32 %a0, i32 %a1)
ret i32 %res
}
@@ -572,14 +620,14 @@ define i32 @fshr_i32_undef0_msk(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shrl %cl, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_undef0_msk:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: andb $7, %cl
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shrl %cl, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_undef0_msk:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: andb $7, %cl
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shrl %cl, %eax
+; X64-AVX-NEXT: retq
%m = and i32 %a1, 7
%res = call i32 @llvm.fshr.i32(i32 undef, i32 %a0, i32 %m)
ret i32 %res
@@ -592,15 +640,38 @@ define i32 @fshr_i32_undef0_cst(i32 %a0) nounwind {
; X86-SSE2-NEXT: shrl $9, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_undef0_cst:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shrl $9, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_undef0_cst:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shrl $9, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshr.i32(i32 undef, i32 %a0, i32 9)
ret i32 %res
}
+define <4 x i32> @fshr_v4i32_undef0_cst(<4 x i32> %a0) nounwind {
+; X86-SSE2-LABEL: fshr_v4i32_undef0_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
+; X86-SSE2-NEXT: psrld $12, %xmm1
+; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
+; X86-SSE2-NEXT: psrld $11, %xmm2
+; X86-SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
+; X86-SSE2-NEXT: psrld $10, %xmm1
+; X86-SSE2-NEXT: psrld $9, %xmm0
+; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
+; X86-SSE2-NEXT: retl
+;
+; X64-AVX-LABEL: fshr_v4i32_undef0_cst:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: retq
+ %res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> undef, <4 x i32> %a0, <4 x i32> <i32 9, i32 10, i32 11, i32 12>)
+ ret <4 x i32> %res
+}
+
define i32 @fshr_i32_undef1(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-LABEL: fshr_i32_undef1:
; X86-SSE2: # %bb.0:
@@ -609,12 +680,12 @@ define i32 @fshr_i32_undef1(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shrdl %cl, %eax, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_undef1:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shrdl %cl, %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_undef1:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shrdl %cl, %edi, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshr.i32(i32 %a0, i32 undef, i32 %a1)
ret i32 %res
}
@@ -629,13 +700,13 @@ define i32 @fshr_i32_undef1_msk(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shrdl %cl, %eax, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_undef1_msk:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: andl $7, %ecx
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shrdl %cl, %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_undef1_msk:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: andl $7, %ecx
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shrdl %cl, %edi, %eax
+; X64-AVX-NEXT: retq
%m = and i32 %a1, 7
%res = call i32 @llvm.fshr.i32(i32 %a0, i32 undef, i32 %m)
ret i32 %res
@@ -648,15 +719,39 @@ define i32 @fshr_i32_undef1_cst(i32 %a0) nounwind {
; X86-SSE2-NEXT: shll $23, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_undef1_cst:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shll $23, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_undef1_cst:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shll $23, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshr.i32(i32 %a0, i32 undef, i32 9)
ret i32 %res
}
+define <4 x i32> @fshr_v4i32_undef1_cst(<4 x i32> %a0) nounwind {
+; X86-SSE2-LABEL: fshr_v4i32_undef1_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-SSE2-NEXT: retl
+;
+; X64-AVX2-LABEL: fshr_v4i32_undef1_cst:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: retq
+;
+; X64-VBMI2-LABEL: fshr_v4i32_undef1_cst:
+; X64-VBMI2: # %bb.0:
+; X64-VBMI2-NEXT: vpshrdvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-VBMI2-NEXT: retq
+ %res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 9, i32 10, i32 11, i32 12>)
+ ret <4 x i32> %res
+}
+
define i32 @fshr_i32_undef2(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-LABEL: fshr_i32_undef2:
; X86-SSE2: # %bb.0:
@@ -665,11 +760,11 @@ define i32 @fshr_i32_undef2(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shrdl %cl, %ecx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_undef2:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: shrdl %cl, %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_undef2:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: shrdl %cl, %edi, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshr.i32(i32 %a0, i32 %a1, i32 undef)
ret i32 %res
}
@@ -685,13 +780,13 @@ define i32 @fshl_i32_zero0(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shldl %cl, %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_zero0:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: xorl %eax, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shldl %cl, %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_zero0:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: xorl %eax, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shldl %cl, %edi, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshl.i32(i32 0, i32 %a0, i32 %a1)
ret i32 %res
}
@@ -703,11 +798,11 @@ define i32 @fshl_i32_zero0_cst(i32 %a0) nounwind {
; X86-SSE2-NEXT: shrl $23, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_zero0_cst:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shrl $23, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_zero0_cst:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shrl $23, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshl.i32(i32 0, i32 %a0, i32 9)
ret i32 %res
}
@@ -721,14 +816,14 @@ define i32 @fshl_i32_zero1(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shldl %cl, %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_zero1:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: xorl %edx, %edx
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shldl %cl, %edx, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_zero1:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: xorl %edx, %edx
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shldl %cl, %edx, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshl.i32(i32 %a0, i32 0, i32 %a1)
ret i32 %res
}
@@ -740,11 +835,11 @@ define i32 @fshl_i32_zero1_cst(i32 %a0) nounwind {
; X86-SSE2-NEXT: shll $9, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_zero1_cst:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shll $9, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_zero1_cst:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shll $9, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshl.i32(i32 %a0, i32 0, i32 9)
ret i32 %res
}
@@ -758,14 +853,14 @@ define i32 @fshr_i32_zero0(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shrdl %cl, %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_zero0:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: xorl %edx, %edx
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shrdl %cl, %edx, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_zero0:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: xorl %edx, %edx
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shrdl %cl, %edx, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshr.i32(i32 0, i32 %a0, i32 %a1)
ret i32 %res
}
@@ -777,11 +872,11 @@ define i32 @fshr_i32_zero0_cst(i32 %a0) nounwind {
; X86-SSE2-NEXT: shrl $9, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_zero0_cst:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shrl $9, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_zero0_cst:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shrl $9, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshr.i32(i32 0, i32 %a0, i32 9)
ret i32 %res
}
@@ -795,13 +890,13 @@ define i32 @fshr_i32_zero1(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: shrdl %cl, %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_zero1:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %ecx
-; X64-AVX2-NEXT: xorl %eax, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shrdl %cl, %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_zero1:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %ecx
+; X64-AVX-NEXT: xorl %eax, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shrdl %cl, %edi, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshr.i32(i32 %a0, i32 0, i32 %a1)
ret i32 %res
}
@@ -813,11 +908,11 @@ define i32 @fshr_i32_zero1_cst(i32 %a0) nounwind {
; X86-SSE2-NEXT: shll $23, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_zero1_cst:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shll $23, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_zero1_cst:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shll $23, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshr.i32(i32 %a0, i32 0, i32 9)
ret i32 %res
}
@@ -830,10 +925,10 @@ define i32 @fshl_i32_zero2(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_zero2:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_zero2:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshl.i32(i32 %a0, i32 %a1, i32 0)
ret i32 %res
}
@@ -844,10 +939,10 @@ define i32 @fshr_i32_zero2(i32 %a0, i32 %a1) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_zero2:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_zero2:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: retq
%res = call i32 @llvm.fshr.i32(i32 %a0, i32 %a1, i32 0)
ret i32 %res
}
@@ -862,11 +957,11 @@ define i32 @fshr_i32_const_shift(i32 %x, i32 %y) nounwind {
; X86-SSE2-NEXT: shrdl $9, %ecx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_const_shift:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shldl $23, %esi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_const_shift:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shldl $23, %esi, %eax
+; X64-AVX-NEXT: retq
%f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 9)
ret i32 %f
}
@@ -881,11 +976,11 @@ define i32 @fshr_i32_const_overshift(i32 %x, i32 %y) nounwind {
; X86-SSE2-NEXT: shrdl $9, %ecx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_const_overshift:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: shldl $23, %esi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_const_overshift:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: shldl $23, %esi, %eax
+; X64-AVX-NEXT: retq
%f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 41)
ret i32 %f
}
@@ -902,11 +997,11 @@ define i64 @fshr_i64_const_overshift(i64 %x, i64 %y) nounwind {
; X86-SSE2-NEXT: shldl $23, %ecx, %edx
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i64_const_overshift:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movq %rdi, %rax
-; X64-AVX2-NEXT: shldq $23, %rsi, %rax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i64_const_overshift:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movq %rdi, %rax
+; X64-AVX-NEXT: shldq $23, %rsi, %rax
+; X64-AVX-NEXT: retq
%f = call i64 @llvm.fshr.i64(i64 %x, i64 %y, i64 105)
ret i64 %f
}
@@ -928,10 +1023,10 @@ define i32 @fshl_i32_shift_by_bitwidth(i32 %x, i32 %y) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshl_i32_shift_by_bitwidth:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshl_i32_shift_by_bitwidth:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edi, %eax
+; X64-AVX-NEXT: retq
%f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 32)
ret i32 %f
}
@@ -942,10 +1037,10 @@ define i32 @fshr_i32_shift_by_bitwidth(i32 %x, i32 %y) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_i32_shift_by_bitwidth:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_i32_shift_by_bitwidth:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: retq
%f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 32)
ret i32 %f
}
@@ -964,10 +1059,10 @@ define <4 x i32> @fshr_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) nounw
; X86-SSE2-NEXT: movaps %xmm1, %xmm0
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: fshr_v4i32_shift_by_bitwidth:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vmovaps %xmm1, %xmm0
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: fshr_v4i32_shift_by_bitwidth:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovaps %xmm1, %xmm0
+; X64-AVX-NEXT: retq
%f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)
ret <4 x i32> %f
}
@@ -996,30 +1091,30 @@ define void @PR45265(i32 %0, ptr nocapture readonly %1) nounwind {
; X86-SSE2-NEXT: shldl $24, %edx, %ecx
; X86-SSE2-NEXT: xorl %eax, %ecx
; X86-SSE2-NEXT: orl %ecx, %edi
-; X86-SSE2-NEXT: jne .LBB46_1
+; X86-SSE2-NEXT: jne .LBB50_1
; X86-SSE2-NEXT: # %bb.2:
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: popl %edi
; X86-SSE2-NEXT: jmp _Z3foov # TAILCALL
-; X86-SSE2-NEXT: .LBB46_1:
+; X86-SSE2-NEXT: .LBB50_1:
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: popl %edi
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: PR45265:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movslq %edi, %rax
-; X64-AVX2-NEXT: leaq (%rax,%rax,2), %rcx
-; X64-AVX2-NEXT: movsbq 10(%rsi,%rcx,4), %rdx
-; X64-AVX2-NEXT: shlq $16, %rdx
-; X64-AVX2-NEXT: movzwl 8(%rsi,%rcx,4), %edi
-; X64-AVX2-NEXT: orq %rdx, %rdi
-; X64-AVX2-NEXT: movq (%rsi,%rcx,4), %rcx
-; X64-AVX2-NEXT: shrdq $40, %rdi, %rcx
-; X64-AVX2-NEXT: cmpq %rax, %rcx
-; X64-AVX2-NEXT: je _Z3foov # TAILCALL
-; X64-AVX2-NEXT: # %bb.1:
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: PR45265:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movslq %edi, %rax
+; X64-AVX-NEXT: leaq (%rax,%rax,2), %rcx
+; X64-AVX-NEXT: movsbq 10(%rsi,%rcx,4), %rdx
+; X64-AVX-NEXT: shlq $16, %rdx
+; X64-AVX-NEXT: movzwl 8(%rsi,%rcx,4), %edi
+; X64-AVX-NEXT: orq %rdx, %rdi
+; X64-AVX-NEXT: movq (%rsi,%rcx,4), %rcx
+; X64-AVX-NEXT: shrdq $40, %rdi, %rcx
+; X64-AVX-NEXT: cmpq %rax, %rcx
+; X64-AVX-NEXT: je _Z3foov # TAILCALL
+; X64-AVX-NEXT: # %bb.1:
+; X64-AVX-NEXT: retq
%3 = sext i32 %0 to i64
%4 = getelementptr inbounds %struct.S, ptr %1, i64 %3
%5 = bitcast ptr %4 to ptr
@@ -1052,15 +1147,15 @@ define i32 @or_shl_fshl(i32 %x, i32 %y, i32 %s) nounwind {
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: or_shl_fshl:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: shll %cl, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shldl %cl, %esi, %edi
-; X64-AVX2-NEXT: orl %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: or_shl_fshl:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: shll %cl, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shldl %cl, %esi, %edi
+; X64-AVX-NEXT: orl %edi, %eax
+; X64-AVX-NEXT: retq
%shy = shl i32 %y, %s
%fun = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %s)
%or = or i32 %fun, %shy
@@ -1078,15 +1173,15 @@ define i32 @or_shl_rotl(i32 %x, i32 %y, i32 %s) nounwind {
; X86-SSE2-NEXT: orl %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: or_shl_rotl:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: shll %cl, %edi
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: roll %cl, %eax
-; X64-AVX2-NEXT: orl %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: or_shl_rotl:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: shll %cl, %edi
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: roll %cl, %eax
+; X64-AVX-NEXT: orl %edi, %eax
+; X64-AVX-NEXT: retq
%shx = shl i32 %x, %s
%rot = call i32 @llvm.fshl.i32(i32 %y, i32 %y, i32 %s)
%or = or i32 %rot, %shx
@@ -1107,15 +1202,15 @@ define i32 @or_shl_fshl_commute(i32 %x, i32 %y, i32 %s) nounwind {
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: or_shl_fshl_commute:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: shll %cl, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shldl %cl, %esi, %edi
-; X64-AVX2-NEXT: orl %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: or_shl_fshl_commute:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: shll %cl, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shldl %cl, %esi, %edi
+; X64-AVX-NEXT: orl %edi, %eax
+; X64-AVX-NEXT: retq
%shy = shl i32 %y, %s
%fun = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %s)
%or = or i32 %shy, %fun
@@ -1133,15 +1228,15 @@ define i32 @or_shl_rotl_commute(i32 %x, i32 %y, i32 %s) nounwind {
; X86-SSE2-NEXT: orl %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: or_shl_rotl_commute:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: shll %cl, %edi
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: roll %cl, %eax
-; X64-AVX2-NEXT: orl %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: or_shl_rotl_commute:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: shll %cl, %edi
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: roll %cl, %eax
+; X64-AVX-NEXT: orl %edi, %eax
+; X64-AVX-NEXT: retq
%shx = shl i32 %x, %s
%rot = call i32 @llvm.fshl.i32(i32 %y, i32 %y, i32 %s)
%or = or i32 %shx, %rot
@@ -1162,15 +1257,15 @@ define i32 @or_lshr_fshr(i32 %x, i32 %y, i32 %s) nounwind {
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: or_lshr_fshr:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: shrl %cl, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shrdl %cl, %esi, %edi
-; X64-AVX2-NEXT: orl %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: or_lshr_fshr:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: shrl %cl, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shrdl %cl, %esi, %edi
+; X64-AVX-NEXT: orl %edi, %eax
+; X64-AVX-NEXT: retq
%shy = lshr i32 %y, %s
%fun = call i32 @llvm.fshr.i32(i32 %y, i32 %x, i32 %s)
%or = or i32 %fun, %shy
@@ -1188,15 +1283,15 @@ define i32 @or_lshr_rotr(i32 %x, i32 %y, i32 %s) nounwind {
; X86-SSE2-NEXT: orl %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: or_lshr_rotr:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: shrl %cl, %edi
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: rorl %cl, %eax
-; X64-AVX2-NEXT: orl %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: or_lshr_rotr:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: shrl %cl, %edi
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: rorl %cl, %eax
+; X64-AVX-NEXT: orl %edi, %eax
+; X64-AVX-NEXT: retq
%shx = lshr i32 %x, %s
%rot = call i32 @llvm.fshr.i32(i32 %y, i32 %y, i32 %s)
%or = or i32 %rot, %shx
@@ -1217,15 +1312,15 @@ define i32 @or_lshr_fshr_commute(i32 %x, i32 %y, i32 %s) nounwind {
; X86-SSE2-NEXT: popl %esi
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: or_lshr_fshr_commute:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: shrl %cl, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shrdl %cl, %esi, %edi
-; X64-AVX2-NEXT: orl %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: or_lshr_fshr_commute:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: shrl %cl, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shrdl %cl, %esi, %edi
+; X64-AVX-NEXT: orl %edi, %eax
+; X64-AVX-NEXT: retq
%shy = lshr i32 %y, %s
%fun = call i32 @llvm.fshr.i32(i32 %y, i32 %x, i32 %s)
%or = or i32 %shy, %fun
@@ -1243,15 +1338,15 @@ define i32 @or_lshr_rotr_commute(i32 %x, i32 %y, i32 %s) nounwind {
; X86-SSE2-NEXT: orl %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: or_lshr_rotr_commute:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: shrl %cl, %edi
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: rorl %cl, %eax
-; X64-AVX2-NEXT: orl %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: or_lshr_rotr_commute:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: shrl %cl, %edi
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: rorl %cl, %eax
+; X64-AVX-NEXT: orl %edi, %eax
+; X64-AVX-NEXT: retq
%shx = lshr i32 %x, %s
%rot = call i32 @llvm.fshr.i32(i32 %y, i32 %y, i32 %s)
%or = or i32 %shx, %rot
@@ -1267,13 +1362,13 @@ define i32 @or_shl_fshl_simplify(i32 %x, i32 %y, i32 %s) nounwind {
; X86-SSE2-NEXT: shldl %cl, %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: or_shl_fshl_simplify:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shldl %cl, %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: or_shl_fshl_simplify:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shldl %cl, %edi, %eax
+; X64-AVX-NEXT: retq
%shy = shl i32 %y, %s
%fun = call i32 @llvm.fshl.i32(i32 %y, i32 %x, i32 %s)
%or = or i32 %fun, %shy
@@ -1289,13 +1384,13 @@ define i32 @or_lshr_fshr_simplify(i32 %x, i32 %y, i32 %s) nounwind {
; X86-SSE2-NEXT: shrdl %cl, %edx, %eax
; X86-SSE2-NEXT: retl
;
-; X64-AVX2-LABEL: or_lshr_fshr_simplify:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: movl %edx, %ecx
-; X64-AVX2-NEXT: movl %esi, %eax
-; X64-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-AVX2-NEXT: shrdl %cl, %edi, %eax
-; X64-AVX2-NEXT: retq
+; X64-AVX-LABEL: or_lshr_fshr_simplify:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl %edx, %ecx
+; X64-AVX-NEXT: movl %esi, %eax
+; X64-AVX-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-AVX-NEXT: shrdl %cl, %edi, %eax
+; X64-AVX-NEXT: retq
%shy = lshr i32 %y, %s
%fun = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %s)
%or = or i32 %shy, %fun
diff --git a/llvm/test/CodeGen/X86/insert-prefetch-invalid-instr.ll b/llvm/test/CodeGen/X86/insert-prefetch-invalid-instr.ll
index 2f5a36865d4a..f8e25028cfde 100644
--- a/llvm/test/CodeGen/X86/insert-prefetch-invalid-instr.ll
+++ b/llvm/test/CodeGen/X86/insert-prefetch-invalid-instr.ll
@@ -8,17 +8,13 @@ target triple = "x86_64-unknown-linux-gnu"
define dso_local i32 @main() local_unnamed_addr #0 !dbg !7 {
entry:
tail call void @llvm.prefetch(ptr inttoptr (i64 291 to ptr), i32 0, i32 0, i32 1), !dbg !9
- tail call void @llvm.x86.avx512.gatherpf.dpd.512(i8 97, <8 x i32> undef, ptr null, i32 1, i32 2), !dbg !10
ret i32 291, !dbg !11
}
; Function Attrs: inaccessiblemem_or_argmemonly nounwind
declare void @llvm.prefetch(ptr nocapture readonly, i32, i32, i32) #1
-; Function Attrs: argmemonly nounwind
-declare void @llvm.x86.avx512.gatherpf.dpd.512(i8, <8 x i32>, ptr, i32, i32) #2
-
-attributes #0 = {"target-cpu"="x86-64" "target-features"="+avx512pf,+sse4.2,+ssse3"}
+attributes #0 = {"target-cpu"="x86-64" "target-features"="+sse4.2,+ssse3"}
attributes #1 = { inaccessiblemem_or_argmemonly nounwind }
attributes #2 = { argmemonly nounwind }
@@ -43,4 +39,3 @@ attributes #2 = { argmemonly nounwind }
;CHECK: # %bb.0:
;CHECK: prefetchnta 291
;CHECK-NOT: prefetchnta 42(%rax,%ymm0)
-;CHECK: vgatherpf1dpd (%rax,%ymm0) {%k1}
diff --git a/llvm/test/CodeGen/X86/issue76416.ll b/llvm/test/CodeGen/X86/issue76416.ll
new file mode 100644
index 000000000000..d0f7fe684a84
--- /dev/null
+++ b/llvm/test/CodeGen/X86/issue76416.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=x86_64-unknown-freebsd15.0 < %s | FileCheck %s
+
+%struct.anon.5.28.78.99.149.119 = type { [4 x i8] }
+
+@vga_load_state_p = external dso_local global ptr, align 8
+@vga_load_state_data = external dso_local global i8, align 1
+
+define dso_local void @vga_load_state() #0 {
+; CHECK-LABEL: vga_load_state:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: cmpl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jg .LBB0_3
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_2: # %for.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: incl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: cmpl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jle .LBB0_2
+; CHECK-NEXT: .LBB0_3: # %for.end
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_4: # %for.cond1
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movq vga_load_state_p(%rip), %rax
+; CHECK-NEXT: movslq -{{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT: movzbl (%rax,%rcx), %eax
+; CHECK-NEXT: movb %al, vga_load_state_data(%rip)
+; CHECK-NEXT: leal 1(%rcx), %eax
+; CHECK-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jmp .LBB0_4
+entry:
+ %i = alloca i32, align 4
+ store i32 0, ptr %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.body, %entry
+ %i1 = load i32, ptr %i, align 4
+ %cmp = icmp slt i32 %i1, 4
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ call void asm sideeffect "", "{ax},~{dirflag},~{fpsr},~{flags}"(i8 0) #1
+ %i2 = load i32, ptr %i, align 4
+ %inc = add nsw i32 %i2, 1
+ store i32 %inc, ptr %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ store i32 0, ptr %i, align 4
+ br label %for.cond1
+
+for.cond1: ; preds = %for.cond1, %for.end
+ call void asm sideeffect "", "N{dx},~{dirflag},~{fpsr},~{flags}"(i32 poison) #1
+ %i3 = load ptr, ptr @vga_load_state_p, align 8
+ %regs = getelementptr inbounds %struct.anon.5.28.78.99.149.119, ptr %i3, i32 0, i32 0
+ %i4 = load i32, ptr %i, align 4
+ %idxprom = sext i32 %i4 to i64
+ %arrayidx = getelementptr inbounds [4 x i8], ptr %regs, i64 0, i64 %idxprom
+ %i5 = load i8, ptr %arrayidx, align 1
+ store i8 %i5, ptr @vga_load_state_data, align 1
+ %i6 = load i32, ptr %i, align 4
+ %inc5 = add nsw i32 %i6, 1
+ store i32 %inc5, ptr %i, align 4
+ br label %for.cond1, !llvm.loop !0
+}
+
+attributes #0 = { "tune-cpu"="generic" }
+attributes #1 = { nounwind }
+
+!0 = distinct !{!0, !1}
+!1 = !{!"llvm.loop.mustprogress"}
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
index c6e8b7532505..3b5ff12fb4ec 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
@@ -31,10 +31,8 @@ define <4 x i32> @vec128_i32_signed_reg_reg(<4 x i32> %a1, <4 x i32> %a2) nounwi
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: psubd %xmm1, %xmm4
-; SSE2-NEXT: psubd %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: pxor %xmm2, %xmm4
+; SSE2-NEXT: psubd %xmm4, %xmm2
; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm2
@@ -179,25 +177,22 @@ define <4 x i32> @vec128_i32_unsigned_reg_reg(<4 x i32> %a1, <4 x i32> %a2) noun
; SSE2-LABEL: vec128_i32_unsigned_reg_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psubd %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psubd %xmm1, %xmm4
-; SSE2-NEXT: psubd %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,1,1,1]
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: psubd %xmm3, %xmm2
; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm3, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm3, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: retq
@@ -349,10 +344,8 @@ define <4 x i32> @vec128_i32_signed_mem_reg(ptr %a1_addr, <4 x i32> %a2) nounwin
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: psubd %xmm0, %xmm4
-; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: pxor %xmm2, %xmm4
+; SSE2-NEXT: psubd %xmm4, %xmm2
; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm2
@@ -511,10 +504,8 @@ define <4 x i32> @vec128_i32_signed_reg_mem(<4 x i32> %a1, ptr %a2_addr) nounwin
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: psubd %xmm1, %xmm4
-; SSE2-NEXT: psubd %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: pxor %xmm2, %xmm4
+; SSE2-NEXT: psubd %xmm4, %xmm2
; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm2
@@ -674,10 +665,8 @@ define <4 x i32> @vec128_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: psubd %xmm0, %xmm4
-; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: pxor %xmm2, %xmm4
+; SSE2-NEXT: psubd %xmm4, %xmm2
; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm2
@@ -844,74 +833,66 @@ define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwi
; SSE2-LABEL: vec128_i64_signed_reg_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psubq %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: pandn %xmm1, %xmm5
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: por %xmm5, %xmm3
-; SSE2-NEXT: psubq %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: psrlq $33, %xmm3
-; SSE2-NEXT: pmuludq %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,1]
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: psubq %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: psrlq $33, %xmm2
+; SSE2-NEXT: pmuludq %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: psrlq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm1, %xmm4
-; SSE2-NEXT: paddq %xmm3, %xmm4
+; SSE2-NEXT: pmuludq %xmm3, %xmm4
+; SSE2-NEXT: paddq %xmm2, %xmm4
; SSE2-NEXT: psllq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm2, %xmm1
-; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-NEXT: paddq %xmm3, %xmm0
; SSE2-NEXT: paddq %xmm4, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i64_signed_reg_reg:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: pxor %xmm0, %xmm3
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: pand %xmm5, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: pmovsxbq {{.*#+}} xmm3 = [1,1]
-; SSE41-NEXT: por %xmm0, %xmm3
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psubq %xmm1, %xmm3
+; SSE41-NEXT: pxor %xmm2, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm4
-; SSE41-NEXT: psubq %xmm1, %xmm4
-; SSE41-NEXT: psubq %xmm2, %xmm1
-; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm0
-; SSE41-NEXT: psrlq $1, %xmm0
-; SSE41-NEXT: psrlq $33, %xmm1
-; SSE41-NEXT: pmuludq %xmm3, %xmm1
-; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm1, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm1, %xmm2
+; SSE41-NEXT: pmovsxbq {{.*#+}} xmm1 = [1,1]
+; SSE41-NEXT: por %xmm2, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm3
+; SSE41-NEXT: psubq %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psrlq $1, %xmm3
+; SSE41-NEXT: psrlq $33, %xmm2
+; SSE41-NEXT: pmuludq %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: psrlq $32, %xmm4
-; SSE41-NEXT: pmuludq %xmm0, %xmm4
-; SSE41-NEXT: paddq %xmm1, %xmm4
+; SSE41-NEXT: pmuludq %xmm3, %xmm4
+; SSE41-NEXT: paddq %xmm2, %xmm4
; SSE41-NEXT: psllq $32, %xmm4
-; SSE41-NEXT: pmuludq %xmm3, %xmm0
-; SSE41-NEXT: paddq %xmm2, %xmm0
+; SSE41-NEXT: pmuludq %xmm1, %xmm3
+; SSE41-NEXT: paddq %xmm3, %xmm0
; SSE41-NEXT: paddq %xmm4, %xmm0
; SSE41-NEXT: retq
;
@@ -919,9 +900,9 @@ define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwi
; AVX: # %bb.0:
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; AVX-NEXT: vpsrlq $1, %xmm1, %xmm2
; AVX-NEXT: vpsrlq $33, %xmm1, %xmm1
; AVX-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
@@ -938,9 +919,9 @@ define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwi
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; XOP-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm2
; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
@@ -1027,74 +1008,66 @@ define <2 x i64> @vec128_i64_unsigned_reg_reg(<2 x i64> %a1, <2 x i64> %a2) noun
; SSE2-LABEL: vec128_i64_unsigned_reg_reg:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psubq %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: pandn %xmm1, %xmm5
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: por %xmm5, %xmm3
-; SSE2-NEXT: psubq %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: psrlq $33, %xmm3
-; SSE2-NEXT: pmuludq %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,1]
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: psubq %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm3
+; SSE2-NEXT: psrlq $33, %xmm2
+; SSE2-NEXT: pmuludq %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: psrlq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm1, %xmm4
-; SSE2-NEXT: paddq %xmm3, %xmm4
+; SSE2-NEXT: pmuludq %xmm3, %xmm4
+; SSE2-NEXT: paddq %xmm2, %xmm4
; SSE2-NEXT: psllq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm2, %xmm1
-; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-NEXT: paddq %xmm3, %xmm0
; SSE2-NEXT: paddq %xmm4, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i64_unsigned_reg_reg:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: pxor %xmm0, %xmm3
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: pand %xmm5, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: pmovsxbq {{.*#+}} xmm3 = [1,1]
-; SSE41-NEXT: por %xmm0, %xmm3
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psubq %xmm1, %xmm3
+; SSE41-NEXT: pxor %xmm2, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm4
-; SSE41-NEXT: psubq %xmm1, %xmm4
-; SSE41-NEXT: psubq %xmm2, %xmm1
-; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm0
-; SSE41-NEXT: psrlq $1, %xmm0
-; SSE41-NEXT: psrlq $33, %xmm1
-; SSE41-NEXT: pmuludq %xmm3, %xmm1
-; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm1, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm1, %xmm2
+; SSE41-NEXT: pmovsxbq {{.*#+}} xmm1 = [1,1]
+; SSE41-NEXT: por %xmm2, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm3
+; SSE41-NEXT: psubq %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psrlq $1, %xmm3
+; SSE41-NEXT: psrlq $33, %xmm2
+; SSE41-NEXT: pmuludq %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm1, %xmm4
; SSE41-NEXT: psrlq $32, %xmm4
-; SSE41-NEXT: pmuludq %xmm0, %xmm4
-; SSE41-NEXT: paddq %xmm1, %xmm4
+; SSE41-NEXT: pmuludq %xmm3, %xmm4
+; SSE41-NEXT: paddq %xmm2, %xmm4
; SSE41-NEXT: psllq $32, %xmm4
-; SSE41-NEXT: pmuludq %xmm3, %xmm0
-; SSE41-NEXT: paddq %xmm2, %xmm0
+; SSE41-NEXT: pmuludq %xmm1, %xmm3
+; SSE41-NEXT: paddq %xmm3, %xmm0
; SSE41-NEXT: paddq %xmm4, %xmm0
; SSE41-NEXT: retq
;
@@ -1106,9 +1079,9 @@ define <2 x i64> @vec128_i64_unsigned_reg_reg(<2 x i64> %a1, <2 x i64> %a2) noun
; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm2
; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1
; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
@@ -1128,9 +1101,9 @@ define <2 x i64> @vec128_i64_unsigned_reg_reg(<2 x i64> %a1, <2 x i64> %a2) noun
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; AVX2-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpsrlq $1, %xmm1, %xmm2
; AVX2-NEXT: vpsrlq $33, %xmm1, %xmm1
; AVX2-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
@@ -1147,9 +1120,9 @@ define <2 x i64> @vec128_i64_unsigned_reg_reg(<2 x i64> %a1, <2 x i64> %a2) noun
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtuq %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; XOP-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm2
; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
@@ -1239,76 +1212,67 @@ define <2 x i64> @vec128_i64_signed_mem_reg(ptr %a1_addr, <2 x i64> %a2) nounwin
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psubq %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: por %xmm5, %xmm3
-; SSE2-NEXT: psubq %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
+; SSE2-NEXT: por %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: psubq %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: psrlq $33, %xmm3
-; SSE2-NEXT: pmuludq %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: psrlq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm0, %xmm4
-; SSE2-NEXT: paddq %xmm3, %xmm4
-; SSE2-NEXT: psllq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm2, %xmm0
+; SSE2-NEXT: psrlq $33, %xmm2
+; SSE2-NEXT: pmuludq %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm4, %xmm3
+; SSE2-NEXT: psrlq $32, %xmm3
+; SSE2-NEXT: pmuludq %xmm0, %xmm3
+; SSE2-NEXT: paddq %xmm2, %xmm3
+; SSE2-NEXT: psllq $32, %xmm3
+; SSE2-NEXT: pmuludq %xmm4, %xmm0
; SSE2-NEXT: paddq %xmm1, %xmm0
-; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: paddq %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i64_signed_mem_reg:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa (%rdi), %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa (%rdi), %xmm1
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = [2147483648,2147483648]
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: pxor %xmm0, %xmm3
+; SSE41-NEXT: psubq %xmm0, %xmm3
; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE41-NEXT: pxor %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: pand %xmm5, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
-; SSE41-NEXT: por %xmm3, %xmm6
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
; SSE41-NEXT: pand %xmm5, %xmm0
-; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psubq %xmm1, %xmm3
-; SSE41-NEXT: psubq %xmm2, %xmm1
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm0, %xmm2
+; SSE41-NEXT: pmovsxbq {{.*#+}} xmm4 = [1,1]
+; SSE41-NEXT: por %xmm2, %xmm4
+; SSE41-NEXT: pxor %xmm2, %xmm3
+; SSE41-NEXT: psubq %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: psrlq $1, %xmm0
-; SSE41-NEXT: psrlq $33, %xmm1
-; SSE41-NEXT: pmuludq %xmm6, %xmm1
-; SSE41-NEXT: movdqa %xmm6, %xmm3
+; SSE41-NEXT: psrlq $33, %xmm2
+; SSE41-NEXT: pmuludq %xmm4, %xmm2
+; SSE41-NEXT: movdqa %xmm4, %xmm3
; SSE41-NEXT: psrlq $32, %xmm3
; SSE41-NEXT: pmuludq %xmm0, %xmm3
-; SSE41-NEXT: paddq %xmm1, %xmm3
+; SSE41-NEXT: paddq %xmm2, %xmm3
; SSE41-NEXT: psllq $32, %xmm3
-; SSE41-NEXT: pmuludq %xmm6, %xmm0
-; SSE41-NEXT: paddq %xmm2, %xmm0
+; SSE41-NEXT: pmuludq %xmm4, %xmm0
+; SSE41-NEXT: paddq %xmm1, %xmm0
; SSE41-NEXT: paddq %xmm3, %xmm0
; SSE41-NEXT: retq
;
@@ -1317,9 +1281,9 @@ define <2 x i64> @vec128_i64_signed_mem_reg(ptr %a1_addr, <2 x i64> %a2) nounwin
; AVX-NEXT: vmovdqa (%rdi), %xmm1
; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm4
-; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vblendvpd %xmm2, %xmm4, %xmm0, %xmm0
+; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX-NEXT: vpsrlq $1, %xmm0, %xmm2
; AVX-NEXT: vpsrlq $33, %xmm0, %xmm0
; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
@@ -1337,9 +1301,9 @@ define <2 x i64> @vec128_i64_signed_mem_reg(ptr %a1_addr, <2 x i64> %a2) nounwin
; XOP-NEXT: vmovdqa (%rdi), %xmm1
; XOP-NEXT: vpcomgtq %xmm0, %xmm1, %xmm2
; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm4
-; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm0
-; XOP-NEXT: vblendvpd %xmm2, %xmm4, %xmm0, %xmm0
+; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; XOP-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; XOP-NEXT: vpsrlq $1, %xmm0, %xmm2
; XOP-NEXT: vpsrlq $33, %xmm0, %xmm0
; XOP-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
@@ -1442,15 +1406,10 @@ define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, ptr %a2_addr) nounwin
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: pandn %xmm1, %xmm5
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: por %xmm5, %xmm3
-; SSE2-NEXT: psubq %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psubq %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm3, %xmm4
+; SSE2-NEXT: psubq %xmm4, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: psrlq $1, %xmm1
; SSE2-NEXT: psrlq $33, %xmm3
@@ -1467,39 +1426,37 @@ define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, ptr %a2_addr) nounwin
;
; SSE41-LABEL: vec128_i64_signed_reg_mem:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa (%rdi), %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: pxor %xmm0, %xmm3
-; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: movdqa (%rdi), %xmm1
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: pxor %xmm2, %xmm3
+; SSE41-NEXT: pxor %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm2, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: pand %xmm5, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
-; SSE41-NEXT: por %xmm3, %xmm6
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
-; SSE41-NEXT: pand %xmm5, %xmm0
-; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: psubq %xmm2, %xmm3
-; SSE41-NEXT: psubq %xmm1, %xmm2
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movapd %xmm2, %xmm0
-; SSE41-NEXT: psrlq $1, %xmm0
-; SSE41-NEXT: psrlq $33, %xmm2
-; SSE41-NEXT: pmuludq %xmm6, %xmm2
-; SSE41-NEXT: movdqa %xmm6, %xmm3
-; SSE41-NEXT: psrlq $32, %xmm3
-; SSE41-NEXT: pmuludq %xmm0, %xmm3
-; SSE41-NEXT: paddq %xmm2, %xmm3
-; SSE41-NEXT: psllq $32, %xmm3
-; SSE41-NEXT: pmuludq %xmm6, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm3, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm2, %xmm3
+; SSE41-NEXT: pmovsxbq {{.*#+}} xmm2 = [1,1]
+; SSE41-NEXT: por %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: psubq %xmm1, %xmm4
+; SSE41-NEXT: pxor %xmm3, %xmm4
+; SSE41-NEXT: psubq %xmm4, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm1
+; SSE41-NEXT: psrlq $1, %xmm1
+; SSE41-NEXT: psrlq $33, %xmm3
+; SSE41-NEXT: pmuludq %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: psrlq $32, %xmm4
+; SSE41-NEXT: pmuludq %xmm1, %xmm4
+; SSE41-NEXT: paddq %xmm3, %xmm4
+; SSE41-NEXT: psllq $32, %xmm4
+; SSE41-NEXT: pmuludq %xmm2, %xmm1
; SSE41-NEXT: paddq %xmm1, %xmm0
-; SSE41-NEXT: paddq %xmm3, %xmm0
+; SSE41-NEXT: paddq %xmm4, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: vec128_i64_signed_reg_mem:
@@ -1507,9 +1464,9 @@ define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, ptr %a2_addr) nounwin
; AVX-NEXT: vmovdqa (%rdi), %xmm1
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; AVX-NEXT: vpsrlq $1, %xmm1, %xmm2
; AVX-NEXT: vpsrlq $33, %xmm1, %xmm1
; AVX-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
@@ -1527,9 +1484,9 @@ define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, ptr %a2_addr) nounwin
; XOP-NEXT: vmovdqa (%rdi), %xmm1
; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; XOP-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm2
; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
@@ -1620,75 +1577,67 @@ define <2 x i64> @vec128_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psubq %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: por %xmm5, %xmm3
-; SSE2-NEXT: psubq %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
+; SSE2-NEXT: por %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: psubq %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: psrlq $33, %xmm3
-; SSE2-NEXT: pmuludq %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: psrlq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm0, %xmm4
-; SSE2-NEXT: paddq %xmm3, %xmm4
-; SSE2-NEXT: psllq $32, %xmm4
-; SSE2-NEXT: pmuludq %xmm2, %xmm0
+; SSE2-NEXT: psrlq $33, %xmm2
+; SSE2-NEXT: pmuludq %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm4, %xmm3
+; SSE2-NEXT: psrlq $32, %xmm3
+; SSE2-NEXT: pmuludq %xmm0, %xmm3
+; SSE2-NEXT: paddq %xmm2, %xmm3
+; SSE2-NEXT: psllq $32, %xmm3
+; SSE2-NEXT: pmuludq %xmm4, %xmm0
; SSE2-NEXT: paddq %xmm1, %xmm0
-; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: paddq %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: vec128_i64_signed_mem_mem:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa (%rdi), %xmm1
-; SSE41-NEXT: movdqa (%rsi), %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: pxor %xmm0, %xmm3
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE41-NEXT: movdqa (%rsi), %xmm0
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psubq %xmm0, %xmm3
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: pand %xmm5, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
-; SSE41-NEXT: por %xmm3, %xmm6
-; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
; SSE41-NEXT: pand %xmm5, %xmm0
-; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: psubq %xmm2, %xmm3
-; SSE41-NEXT: psubq %xmm1, %xmm2
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movapd %xmm2, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm0, %xmm2
+; SSE41-NEXT: pmovsxbq {{.*#+}} xmm4 = [1,1]
+; SSE41-NEXT: por %xmm2, %xmm4
+; SSE41-NEXT: pxor %xmm2, %xmm3
+; SSE41-NEXT: psubq %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: psrlq $1, %xmm0
; SSE41-NEXT: psrlq $33, %xmm2
-; SSE41-NEXT: pmuludq %xmm6, %xmm2
-; SSE41-NEXT: movdqa %xmm6, %xmm3
+; SSE41-NEXT: pmuludq %xmm4, %xmm2
+; SSE41-NEXT: movdqa %xmm4, %xmm3
; SSE41-NEXT: psrlq $32, %xmm3
; SSE41-NEXT: pmuludq %xmm0, %xmm3
; SSE41-NEXT: paddq %xmm2, %xmm3
; SSE41-NEXT: psllq $32, %xmm3
-; SSE41-NEXT: pmuludq %xmm6, %xmm0
+; SSE41-NEXT: pmuludq %xmm4, %xmm0
; SSE41-NEXT: paddq %xmm1, %xmm0
; SSE41-NEXT: paddq %xmm3, %xmm0
; SSE41-NEXT: retq
@@ -1699,9 +1648,9 @@ define <2 x i64> @vec128_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; AVX-NEXT: vmovdqa (%rsi), %xmm1
; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; AVX-NEXT: vpsrlq $1, %xmm1, %xmm2
; AVX-NEXT: vpsrlq $33, %xmm1, %xmm1
; AVX-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
@@ -1720,9 +1669,9 @@ define <2 x i64> @vec128_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; XOP-NEXT: vmovdqa (%rsi), %xmm1
; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm4
-; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; XOP-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm2
; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
@@ -2389,10 +2338,8 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin
; SSE2-NEXT: por %xmm3, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: psubb %xmm1, %xmm4
-; SSE2-NEXT: psubb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: por %xmm4, %xmm3
+; SSE2-NEXT: pxor %xmm3, %xmm4
+; SSE2-NEXT: psubb %xmm4, %xmm3
; SSE2-NEXT: psrlw $1, %xmm3
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm1
@@ -2852,10 +2799,8 @@ define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: psubb %xmm1, %xmm4
-; SSE2-NEXT: psubb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: por %xmm4, %xmm3
+; SSE2-NEXT: pxor %xmm3, %xmm4
+; SSE2-NEXT: psubb %xmm4, %xmm3
; SSE2-NEXT: psrlw $1, %xmm3
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm1
@@ -3083,30 +3028,28 @@ define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind
define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, ptr %a2_addr) nounwind {
; SSE2-LABEL: vec128_i8_signed_reg_mem:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm2
+; SSE2-NEXT: movdqa (%rdi), %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psubb %xmm3, %xmm4
-; SSE2-NEXT: psubb %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: psrlw $1, %xmm2
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psubb %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm3, %xmm4
+; SSE2-NEXT: psubb %xmm4, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: pmullw %xmm3, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pmullw %xmm2, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pmullw %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pmullw %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: packuswb %xmm4, %xmm1
; SSE2-NEXT: paddb %xmm1, %xmm0
; SSE2-NEXT: retq
@@ -3321,30 +3264,28 @@ define <16 x i8> @vec128_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; SSE2-LABEL: vec128_i8_signed_mem_mem:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa (%rsi), %xmm3
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm2
+; SSE2-NEXT: movdqa (%rsi), %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: psubb %xmm3, %xmm4
-; SSE2-NEXT: psubb %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: psrlw $1, %xmm2
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psubb %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm3, %xmm4
+; SSE2-NEXT: psubb %xmm4, %xmm3
+; SSE2-NEXT: psrlw $1, %xmm3
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: pmullw %xmm3, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pmullw %xmm2, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pmullw %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: pmullw %xmm3, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: packuswb %xmm4, %xmm0
; SSE2-NEXT: paddb %xmm1, %xmm0
; SSE2-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
index cc08396ae8c7..92060aec3074 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
@@ -390,12 +390,12 @@ define <4 x i64> @vec256_i64_signed_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwi
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm5
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm6
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm6
-; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm6
; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm7
; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1
@@ -427,9 +427,9 @@ define <4 x i64> @vec256_i64_signed_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwi
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm3
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm4
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm1
-; AVX2-NEXT: vblendvpd %ymm2, %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm2
; AVX2-NEXT: vpsrlq $33, %ymm1, %ymm1
; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
@@ -448,12 +448,12 @@ define <4 x i64> @vec256_i64_signed_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwi
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOP-NEXT: vpcomgtq %xmm2, %xmm3, %xmm4
; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm5
-; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm6
-; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; XOP-NEXT: vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-NEXT: vpsubq %xmm2, %xmm3, %xmm6
-; XOP-NEXT: vpsubq %xmm3, %xmm2, %xmm2
-; XOP-NEXT: vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm5, %xmm1
+; XOP-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; XOP-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; XOP-NEXT: vpsrlq $1, %xmm2, %xmm6
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm7
; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
@@ -561,25 +561,25 @@ define <4 x i64> @vec256_i64_unsigned_reg_reg(<4 x i64> %a1, <4 x i64> %a2) noun
; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm6
; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm6
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm6
-; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm3
-; AVX1-NEXT: vblendvpd %xmm5, %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm3
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpsubq %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm6
; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm7
+; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1
; AVX1-NEXT: vpmovsxbq {{.*#+}} xmm8 = [1,1]
; AVX1-NEXT: vpor %xmm4, %xmm8, %xmm4
-; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1
; AVX1-NEXT: vpmuludq %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm9
; AVX1-NEXT: vpmuludq %xmm7, %xmm9, %xmm9
; AVX1-NEXT: vpaddq %xmm1, %xmm9, %xmm1
; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
; AVX1-NEXT: vpmuludq %xmm4, %xmm7, %xmm4
-; AVX1-NEXT: vpor %xmm5, %xmm8, %xmm5
; AVX1-NEXT: vpsrlq $33, %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm5, %xmm8, %xmm5
; AVX1-NEXT: vpmuludq %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm7
; AVX1-NEXT: vpmuludq %xmm7, %xmm6, %xmm7
@@ -601,9 +601,9 @@ define <4 x i64> @vec256_i64_unsigned_reg_reg(<4 x i64> %a1, <4 x i64> %a2) noun
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm3
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm4
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm1
-; AVX2-NEXT: vblendvpd %ymm2, %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm2
; AVX2-NEXT: vpsrlq $33, %ymm1, %ymm1
; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
@@ -622,12 +622,12 @@ define <4 x i64> @vec256_i64_unsigned_reg_reg(<4 x i64> %a1, <4 x i64> %a2) noun
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOP-NEXT: vpcomgtuq %xmm2, %xmm3, %xmm4
; XOP-NEXT: vpcomgtuq %xmm1, %xmm0, %xmm5
-; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm6
-; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; XOP-NEXT: vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-NEXT: vpsubq %xmm2, %xmm3, %xmm6
-; XOP-NEXT: vpsubq %xmm3, %xmm2, %xmm2
-; XOP-NEXT: vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm5, %xmm1
+; XOP-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; XOP-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; XOP-NEXT: vpsrlq $1, %xmm2, %xmm6
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm7
; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
@@ -732,12 +732,12 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm5
-; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm6
-; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
-; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm6
-; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm5, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm6
; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm7
; AVX1-NEXT: vpsrlq $33, %xmm0, %xmm0
@@ -770,9 +770,9 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm3
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm4
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vblendvpd %ymm2, %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm2
; AVX2-NEXT: vpsrlq $33, %ymm0, %ymm0
; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm0
@@ -792,12 +792,12 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
; XOP-NEXT: vpcomgtq %xmm1, %xmm3, %xmm4
; XOP-NEXT: vpcomgtq %xmm0, %xmm2, %xmm5
-; XOP-NEXT: vpsubq %xmm0, %xmm2, %xmm6
-; XOP-NEXT: vpsubq %xmm2, %xmm0, %xmm0
-; XOP-NEXT: vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
-; XOP-NEXT: vpsubq %xmm1, %xmm3, %xmm6
-; XOP-NEXT: vpsubq %xmm3, %xmm1, %xmm1
-; XOP-NEXT: vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; XOP-NEXT: vpxor %xmm5, %xmm0, %xmm0
+; XOP-NEXT: vpsubq %xmm0, %xmm5, %xmm0
+; XOP-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; XOP-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm4, %xmm1
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm6
; XOP-NEXT: vpsrlq $1, %xmm0, %xmm7
; XOP-NEXT: vpsrlq $33, %xmm0, %xmm0
@@ -902,12 +902,12 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm5
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm6
-; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm6
-; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm5, %xmm1
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm6
; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm7
; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1
@@ -940,9 +940,9 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm3
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm4
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm1
-; AVX2-NEXT: vblendvpd %ymm2, %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm2
; AVX2-NEXT: vpsrlq $33, %ymm1, %ymm1
; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
@@ -962,12 +962,12 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOP-NEXT: vpcomgtq %xmm2, %xmm3, %xmm4
; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm5
-; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm6
-; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm1
-; XOP-NEXT: vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-NEXT: vpsubq %xmm2, %xmm3, %xmm6
-; XOP-NEXT: vpsubq %xmm3, %xmm2, %xmm2
-; XOP-NEXT: vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpxor %xmm5, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm5, %xmm1
+; XOP-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; XOP-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; XOP-NEXT: vpsubq %xmm2, %xmm4, %xmm2
; XOP-NEXT: vpsrlq $1, %xmm2, %xmm6
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm7
; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
@@ -1073,12 +1073,12 @@ define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; AVX1-NEXT: vmovdqa 16(%rdi), %xmm3
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm5
-; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm6
-; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
-; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm6
-; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm5, %xmm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm6
; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm7
; AVX1-NEXT: vpsrlq $33, %xmm0, %xmm0
@@ -1112,9 +1112,9 @@ define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm3
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm4
-; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm1
-; AVX2-NEXT: vblendvpd %ymm2, %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm2
; AVX2-NEXT: vpsrlq $33, %ymm1, %ymm1
; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
@@ -1135,12 +1135,12 @@ define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
; XOP-NEXT: vmovdqa 16(%rdi), %xmm3
; XOP-NEXT: vpcomgtq %xmm1, %xmm3, %xmm4
; XOP-NEXT: vpcomgtq %xmm0, %xmm2, %xmm5
-; XOP-NEXT: vpsubq %xmm0, %xmm2, %xmm6
-; XOP-NEXT: vpsubq %xmm2, %xmm0, %xmm0
-; XOP-NEXT: vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
-; XOP-NEXT: vpsubq %xmm1, %xmm3, %xmm6
-; XOP-NEXT: vpsubq %xmm3, %xmm1, %xmm1
-; XOP-NEXT: vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; XOP-NEXT: vpxor %xmm5, %xmm0, %xmm0
+; XOP-NEXT: vpsubq %xmm0, %xmm5, %xmm0
+; XOP-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; XOP-NEXT: vpxor %xmm4, %xmm1, %xmm1
+; XOP-NEXT: vpsubq %xmm1, %xmm4, %xmm1
; XOP-NEXT: vpsrlq $1, %xmm1, %xmm6
; XOP-NEXT: vpsrlq $1, %xmm0, %xmm7
; XOP-NEXT: vpsrlq $33, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/misched-critical-path.ll b/llvm/test/CodeGen/X86/misched-critical-path.ll
new file mode 100644
index 000000000000..2a95aaa46d4a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/misched-critical-path.ll
@@ -0,0 +1,35 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin8 -misched-print-dags -o - 2>&1 > /dev/null | FileCheck %s
+; REQUIRES: asserts
+
+@sc = common global i8 0
+@uc = common global i8 0
+@ui = common global i32 0
+
+; Regression Test for PR92368.
+;
+; CHECK: SU(8): CMP8rr %4:gr8, %3:gr8, implicit-def $eflags
+; CHECK: Predecessors:
+; CHECK-NEXT: SU(6): Data Latency=0 Reg=%4
+; CHECK-NEXT: SU(7): Out Latency=0
+; CHECK-NEXT: SU(5): Out Latency=0
+; CHECK-NEXT: SU(3): Data Latency=4 Reg=%3
+define void @misched_bug() nounwind {
+entry:
+ %v0 = load i8, ptr @sc, align 1
+ %v1 = zext i8 %v0 to i32
+ %v2 = load i8, ptr @uc, align 1
+ %v3 = zext i8 %v2 to i32
+ %v4 = trunc i32 %v3 to i8
+ %v5 = trunc i32 %v1 to i8
+ %pair74 = cmpxchg ptr @sc, i8 %v4, i8 %v5 monotonic monotonic
+ %v6 = extractvalue { i8, i1 } %pair74, 0
+ %v7 = icmp eq i8 %v6, %v4
+ %v8 = zext i1 %v7 to i8
+ %v9 = zext i8 %v8 to i32
+ store i32 %v9, ptr @ui, align 4
+ br label %return
+
+return: ; preds = %ventry
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/X86/opt-pipeline.ll b/llvm/test/CodeGen/X86/opt-pipeline.ll
index 43589dc993da..3f57a03decd0 100644
--- a/llvm/test/CodeGen/X86/opt-pipeline.ll
+++ b/llvm/test/CodeGen/X86/opt-pipeline.ll
@@ -197,8 +197,6 @@
; CHECK-NEXT: BreakFalseDeps
; CHECK-NEXT: X86 Indirect Branch Tracking
; CHECK-NEXT: X86 vzeroupper inserter
-; CHECK-NEXT: MachineDominator Tree Construction
-; CHECK-NEXT: Machine Natural Loop Construction
; CHECK-NEXT: Lazy Machine Block Frequency Analysis
; CHECK-NEXT: X86 Byte/Word Instruction Fixup
; CHECK-NEXT: Lazy Machine Block Frequency Analysis
diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll
index dcded7a877ab..1f82c4a5a2d9 100644
--- a/llvm/test/CodeGen/X86/pmul.ll
+++ b/llvm/test/CodeGen/X86/pmul.ll
@@ -1173,13 +1173,14 @@ define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) {
;
; SSE41-LABEL: mul_v4i64_zero_lower:
; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
; SSE41-NEXT: psrlq $32, %xmm2
-; SSE41-NEXT: pmuludq %xmm3, %xmm2
+; SSE41-NEXT: pmuludq %xmm0, %xmm2
; SSE41-NEXT: psrlq $32, %xmm1
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: pmuludq %xmm1, %xmm0
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; SSE41-NEXT: pmuludq %xmm1, %xmm3
+; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm2[0,2]
+; SSE41-NEXT: movaps %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i64_zero_lower:
diff --git a/llvm/test/CodeGen/X86/pr59305.ll b/llvm/test/CodeGen/X86/pr59305.ll
index 4d59192fdc4d..46c9da5a5193 100644
--- a/llvm/test/CodeGen/X86/pr59305.ll
+++ b/llvm/test/CodeGen/X86/pr59305.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=X64
-; RUN: llc -mtriple=i686-pc-linux < %s | FileCheck %s --check-prefix=X86
+; RUN: sed -e "s/SETROUND/ldmxcsr/g" %s | llc -mtriple=x86_64-pc-linux - | FileCheck %s --check-prefix=X64
+; RUN: sed -e "s/SETROUND/fldcw/g" %s | llc -mtriple=i686-pc-linux - | FileCheck %s --check-prefix=X86
define double @foo(double %0) #0 {
; X64-LABEL: foo:
@@ -74,6 +74,71 @@ define double @foo(double %0) #0 {
ret double %8
}
+define double @bar(double %0) #0 {
+; X64-LABEL: bar:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: #APP
+; X64-NEXT: ldmxcsr 0
+; X64-NEXT: #NO_APP
+; X64-NEXT: wait
+; X64-NEXT: movsd {{.*#+}} xmm2 = [1.0E+0,0.0E+0]
+; X64-NEXT: movapd %xmm2, %xmm3
+; X64-NEXT: divsd %xmm0, %xmm3
+; X64-NEXT: #APP
+; X64-NEXT: ldmxcsr 0
+; X64-NEXT: #NO_APP
+; X64-NEXT: wait
+; X64-NEXT: movapd %xmm2, %xmm1
+; X64-NEXT: divsd %xmm0, %xmm1
+; X64-NEXT: #APP
+; X64-NEXT: ldmxcsr 0
+; X64-NEXT: #NO_APP
+; X64-NEXT: wait
+; X64-NEXT: divsd %xmm0, %xmm2
+; X64-NEXT: movapd %xmm3, %xmm0
+; X64-NEXT: callq fma@PLT
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: bar:
+; X86: # %bb.0:
+; X86-NEXT: subl $28, %esp
+; X86-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NEXT: #APP
+; X86-NEXT: fldcw 0
+; X86-NEXT: #NO_APP
+; X86-NEXT: fld1
+; X86-NEXT: fld %st(0)
+; X86-NEXT: fdiv %st(2), %st
+; X86-NEXT: #APP
+; X86-NEXT: fldcw 0
+; X86-NEXT: #NO_APP
+; X86-NEXT: fld %st(1)
+; X86-NEXT: fdiv %st(3), %st
+; X86-NEXT: #APP
+; X86-NEXT: fldcw 0
+; X86-NEXT: #NO_APP
+; X86-NEXT: fxch %st(2)
+; X86-NEXT: fdivp %st, %st(3)
+; X86-NEXT: fxch %st(2)
+; X86-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NEXT: fstpl (%esp)
+; X86-NEXT: wait
+; X86-NEXT: calll fma
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: retl
+ call void asm sideeffect "SETROUND $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) null)
+ %2 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+ call void asm sideeffect "SETROUND $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) null)
+ %3 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+ call void asm sideeffect "SETROUND $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) null)
+ %4 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+ %5 = call double @llvm.experimental.constrained.fma.f64(double %2, double %3, double %4, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
+ ret double %5
+}
+
declare i32 @fesetround(i32) #0
declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) #0
declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) #0
diff --git a/llvm/test/CodeGen/X86/pr90703.ll b/llvm/test/CodeGen/X86/pr90703.ll
new file mode 100644
index 000000000000..c02342ffeec1
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr90703.ll
@@ -0,0 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi | FileCheck %s
+
+define i64 @pr90730(i64 %x, i64 %y, ptr %p) {
+; CHECK-LABEL: pr90730:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movabsq $33181731808, %rax # imm = 0x7B9C90BE0
+; CHECK-NEXT: andnq %rax, %rdi, %rax
+; CHECK-NEXT: movq $0, (%rdx)
+; CHECK-NEXT: retq
+entry:
+ %ext = and i64 %y, 1
+ %xor1 = xor i64 %ext, 33181731817
+ %and1 = and i64 %xor1, %x
+ store i64 %and1, ptr %p, align 4
+ %v = load i64, ptr %p, align 4
+ %and2 = and i64 %v, 33181731808
+ %xor2 = xor i64 %and2, 33181731808
+ store i64 0, ptr %p, align 4
+ ret i64 %xor2
+}
diff --git a/llvm/test/CodeGen/X86/pr90844.ll b/llvm/test/CodeGen/X86/pr90844.ll
index 6feece7f66d8..b250c3f6f9a2 100644
--- a/llvm/test/CodeGen/X86/pr90844.ll
+++ b/llvm/test/CodeGen/X86/pr90844.ll
@@ -17,3 +17,20 @@ entry:
store <2 x i64> %5, ptr poison, align 16
ret void
}
+
+define void @foo(ptr %0) {
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vpbroadcastw {{.*#+}} ymm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; CHECK-NEXT: vpxor 32(%rdi), %ymm0, %ymm1
+; CHECK-NEXT: vpxor (%rdi), %ymm0, %ymm0
+; CHECK-NEXT: vmovdqa %ymm0, (%rdi)
+; CHECK-NEXT: vmovdqa %ymm1, 32(%rdi)
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %1 = load <32 x half>, ptr %0
+ %2 = fneg <32 x half> %1
+ store <32 x half> %2, ptr %0
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/pr92569.ll b/llvm/test/CodeGen/X86/pr92569.ll
new file mode 100644
index 000000000000..f91063089e3a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr92569.ll
@@ -0,0 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s
+
+define void @PR92569(i64 %arg, <8 x i8> %arg1) {
+; CHECK-LABEL: PR92569:
+; CHECK: # %bb.0:
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: je .LBB0_1
+; CHECK-NEXT: # %bb.2: # %cond.false
+; CHECK-NEXT: rep bsfq %rdi, %rax
+; CHECK-NEXT: jmp .LBB0_3
+; CHECK-NEXT: .LBB0_1:
+; CHECK-NEXT: movl $64, %eax
+; CHECK-NEXT: .LBB0_3: # %cond.end
+; CHECK-NEXT: shrb $3, %al
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: movzbl -24(%rsp,%rax), %eax
+; CHECK-NEXT: movl %eax, 0
+; CHECK-NEXT: retq
+ %cttz = call i64 @llvm.cttz.i64(i64 %arg, i1 false)
+ %trunc = trunc i64 %cttz to i8
+ %lshr = lshr i8 %trunc, 3
+ %extractelement = extractelement <8 x i8> %arg1, i8 %lshr
+ %freeze = freeze i8 %extractelement
+ %zext = zext i8 %freeze to i32
+ store i32 %zext, ptr addrspace(1) null, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/pr92720.ll b/llvm/test/CodeGen/X86/pr92720.ll
new file mode 100644
index 000000000000..b2543c08328c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr92720.ll
@@ -0,0 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s
+
+; Make sure we don't crash when shrinking the shift amount before legalization.
+define i64 @pr92720(i64 %x) {
+; CHECK-LABEL: pr92720:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movabsq $8589934592, %rax # imm = 0x200000000
+; CHECK-NEXT: retq
+ %or = or i64 %x, 255
+ %sub = sub i64 0, %or
+ %shl = shl i64 1, %sub
+ %sext = shl i64 %shl, 32
+ ret i64 %sext
+}
diff --git a/llvm/test/CodeGen/X86/pr93000.ll b/llvm/test/CodeGen/X86/pr93000.ll
new file mode 100644
index 000000000000..0bd5da48847e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr93000.ll
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc < %s -mtriple=x86_64- -mcpu=x86-64-v4 | FileCheck %s
+
+define void @PR93000(ptr %a0, ptr %a1, ptr %a2, <32 x i16> %a3) {
+; CHECK-LABEL: PR93000:
+; CHECK: # %bb.0: # %Entry
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: addq $4, %rdi
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_1: # %Loop
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: kmovd %eax, %k1
+; CHECK-NEXT: knotd %k1, %k2
+; CHECK-NEXT: vpblendmw (%rsi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqu16 (%rdx), %zmm1 {%k2}
+; CHECK-NEXT: vmovdqu64 %zmm1, (%rsi)
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: addq $4, %rdi
+; CHECK-NEXT: testl %eax, %eax
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: # %bb.2: # %Then
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+Entry:
+ %pre = load i32, ptr %a0, align 4
+ br label %Loop
+
+Loop: ; preds = %Loop, %Entry
+ %p = phi i32 [ %limit, %Loop ], [ %pre, %Entry ]
+ %lsr.iv.pn = phi ptr [ %lsr.iv, %Loop ], [ %a0, %Entry ]
+ %lsr.iv = getelementptr i8, ptr %lsr.iv.pn, i64 4
+ %pn = xor i32 %p, -1
+ %m = bitcast i32 %p to <32 x i1>
+ %mn = bitcast i32 %pn to <32 x i1>
+ %mload0 = tail call <32 x i16> @llvm.masked.load.v32i16.p0(ptr %a1, i32 2, <32 x i1> %m, <32 x i16> %a3)
+ %mload1 = tail call <32 x i16> @llvm.masked.load.v32i16.p0(ptr %a2, i32 2, <32 x i1> %mn, <32 x i16> %mload0)
+ store <32 x i16> %mload1, ptr %a1, align 2
+ %limit = load i32, ptr %lsr.iv, align 4
+ %icmp = icmp eq i32 %limit, 0
+ br i1 %icmp, label %Then, label %Loop
+
+Then: ; preds = %Loop
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/prefetch.ll b/llvm/test/CodeGen/X86/prefetch.ll
index 404d49b63f25..c10e0526787d 100644
--- a/llvm/test/CodeGen/X86/prefetch.ll
+++ b/llvm/test/CodeGen/X86/prefetch.ll
@@ -6,9 +6,6 @@
; RUN: llc < %s -mtriple=i686-- -mcpu=slm | FileCheck %s -check-prefix=X86-PRFCHWSSE
; RUN: llc < %s -mtriple=i686-- -mcpu=btver2 | FileCheck %s -check-prefix=X86-PRFCHWSSE
; RUN: llc < %s -mtriple=i686-- -mcpu=btver2 -mattr=-prfchw | FileCheck %s -check-prefix=X86-SSE
-; RUN: llc < %s -mtriple=i686-- -mattr=+sse,+prefetchwt1 | FileCheck %s -check-prefix=X86-PREFETCHWT1
-; RUN: llc < %s -mtriple=i686-- -mattr=-sse,+prefetchwt1 | FileCheck %s -check-prefix=X86-PREFETCHWT1
-; RUN: llc < %s -mtriple=i686-- -mattr=-sse,+3dnow,+prefetchwt1 | FileCheck %s -check-prefix=X86-PREFETCHWT1
; RUN: llc < %s -mtriple=i686-- -mattr=+3dnow | FileCheck %s -check-prefix=X86-3DNOW
; RUN: llc < %s -mtriple=i686-- -mattr=+3dnow,+prfchw | FileCheck %s -check-prefix=X86-3DNOW
@@ -16,7 +13,6 @@
; 3dnow by itself get you just the single prefetch instruction with no hints
; sse provides prefetch0/1/2/nta
; supporting prefetchw, but not 3dnow implicitly provides prefetcht0/1/2/nta regardless of sse setting as we need something to fall back to for the non-write hint.
-; supporting prefetchwt1 implies prefetcht0/1/2/nta and prefetchw regardless of other settings. this allows levels for non-write and gives us an instruction for write+T0
; 3dnow prefetch instruction will only get used if you have no other prefetch instructions enabled
; rdar://10538297
@@ -48,19 +44,6 @@ define void @t(ptr %ptr) nounwind {
; X86-PRFCHWSSE-NEXT: prefetchw (%eax)
; X86-PRFCHWSSE-NEXT: retl
;
-; X86-PREFETCHWT1-LABEL: t:
-; X86-PREFETCHWT1: # %bb.0: # %entry
-; X86-PREFETCHWT1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-PREFETCHWT1-NEXT: prefetcht2 (%eax)
-; X86-PREFETCHWT1-NEXT: prefetcht1 (%eax)
-; X86-PREFETCHWT1-NEXT: prefetcht0 (%eax)
-; X86-PREFETCHWT1-NEXT: prefetchnta (%eax)
-; X86-PREFETCHWT1-NEXT: prefetchwt1 (%eax)
-; X86-PREFETCHWT1-NEXT: prefetchwt1 (%eax)
-; X86-PREFETCHWT1-NEXT: prefetchw (%eax)
-; X86-PREFETCHWT1-NEXT: prefetchwt1 (%eax)
-; X86-PREFETCHWT1-NEXT: retl
-;
; X86-3DNOW-LABEL: t:
; X86-3DNOW: # %bb.0: # %entry
; X86-3DNOW-NEXT: movl {{[0-9]+}}(%esp), %eax
diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll
index 62051d170994..f3f7f0515e30 100644
--- a/llvm/test/CodeGen/X86/shrink_vmul.ll
+++ b/llvm/test/CodeGen/X86/shrink_vmul.ll
@@ -1863,7 +1863,7 @@ define void @mul_2xi16_varconst3(ptr nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: movl c, %edx
; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: psrld $16, %xmm0
+; X86-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; X86-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psllq $32, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
@@ -1884,7 +1884,7 @@ define void @mul_2xi16_varconst3(ptr nocapture readonly %a, i64 %index) {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: movq c(%rip), %rax
; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X64-SSE-NEXT: psrld $16, %xmm0
+; X64-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; X64-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE-NEXT: psllq $32, %xmm0
; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)
diff --git a/llvm/test/CodeGen/X86/speculative-load-hardening-gather.ll b/llvm/test/CodeGen/X86/speculative-load-hardening-gather.ll
index 6e89445bead6..7b3667420ec6 100644
--- a/llvm/test/CodeGen/X86/speculative-load-hardening-gather.ll
+++ b/llvm/test/CodeGen/X86/speculative-load-hardening-gather.ll
@@ -558,28 +558,6 @@ entry:
ret <8 x i64> %v
}
-declare void @llvm.x86.avx512.gatherpf.qps.512(i8, <8 x i64>, ptr, i32, i32);
-
-define void @test_llvm_x86_avx512_gatherpf_qps_512(<8 x i64> %iv, ptr %b) #1 {
-; CHECK-LABEL: test_llvm_x86_avx512_gatherpf_qps_512:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movq %rsp, %rax
-; CHECK-NEXT: movq $-1, %rcx
-; CHECK-NEXT: sarq $63, %rax
-; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: orq %rax, %rdi
-; CHECK-NEXT: vpbroadcastq %rax, %zmm1
-; CHECK-NEXT: vporq %zmm0, %zmm1, %zmm0
-; CHECK-NEXT: vgatherpf0qps (%rdi,%zmm0,4) {%k1}
-; CHECK-NEXT: shlq $47, %rax
-; CHECK-NEXT: orq %rax, %rsp
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: retq
-entry:
- call void @llvm.x86.avx512.gatherpf.qps.512(i8 -1, <8 x i64> %iv, ptr %b, i32 4, i32 3)
- ret void
-}
-
declare <4 x float> @llvm.x86.avx512.gather3siv4.sf(<4 x float>, ptr, <4 x i32>, i8, i32)
define <4 x float> @test_llvm_x86_avx512_gather3siv4_sf(ptr %b, <4 x i32> %iv) #2 {
diff --git a/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16.ll b/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16.ll
index e4eca6b744af..ed7109c416e7 100644
--- a/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16.ll
@@ -265,7 +265,6 @@ define i32 @stack_fold_fpclassph_mask(<32 x half> %a0, ptr %p) {
}
define i8 @stack_fold_fpclasssh(<8 x half> %a0) {
- ;CHECK-LABEl: stack_fold_fpclasssh:
; CHECK-LABEL: stack_fold_fpclasssh:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
diff --git a/llvm/test/CodeGen/X86/stack-frame-layout-remarks.ll b/llvm/test/CodeGen/X86/stack-frame-layout-remarks.ll
index d32a37efcb5a..cd5edcf2ae50 100644
--- a/llvm/test/CodeGen/X86/stack-frame-layout-remarks.ll
+++ b/llvm/test/CodeGen/X86/stack-frame-layout-remarks.ll
@@ -35,7 +35,7 @@ entry:
declare void @llvm.dbg.declare(metadata, metadata, metadata) #0
; BOTH: Function: cleanup_array
-; BOTH-Next: Offset: [SP+4], Type: Protector, Align: 16, Size: 4
+; BOTH-NEXT: Offset: [SP+4], Type: Protector, Align: 16, Size: 4
; DEBUG: a @ dot.c:13
; STRIPPED-NOT: a @ dot.c:13
; BOTH: Offset: [SP-4], Type: Spill, Align: 8, Size: 4
diff --git a/llvm/test/CodeGen/X86/unfoldMemoryOperand.mir b/llvm/test/CodeGen/X86/unfoldMemoryOperand.mir
index 4c715b894fae..af57d972f224 100644
--- a/llvm/test/CodeGen/X86/unfoldMemoryOperand.mir
+++ b/llvm/test/CodeGen/X86/unfoldMemoryOperand.mir
@@ -23,7 +23,7 @@
br i1 %6, label %4, label %5, !llvm.loop !9
}
- attributes #0 = { nofree norecurse nosync nounwind uwtable writeonly mustprogress "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+x87,-aes,-avx,-avx2,-avx512bf16,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vp2intersect,-avx512vpopcntdq,-avxvnni,-f16c,-fma,-fma4,-gfni,-kl,-pclmul,-sha,-sse,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-vaes,-vpclmulqdq,-widekl,-xop" "tune-cpu"="generic" }
+ attributes #0 = { nofree norecurse nosync nounwind uwtable writeonly mustprogress "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+x87,-aes,-avx,-avx2,-avx512bf16,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vp2intersect,-avx512vpopcntdq,-avxvnni,-f16c,-fma,-fma4,-gfni,-kl,-pclmul,-sha,-sse,-sse2,-sse3,-sse4.1,-sse4.2,-sse4a,-ssse3,-vaes,-vpclmulqdq,-widekl,-xop" "tune-cpu"="generic" }
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/CodeGen/X86/vec-strict-cmp-512-skx.ll b/llvm/test/CodeGen/X86/vec-strict-cmp-512-skx.ll
new file mode 100644
index 000000000000..3028b7496737
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vec-strict-cmp-512-skx.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64 -mcpu=skx | FileCheck %s --check-prefixes=SKX
+
+;; Test no crash for AVX512 targets without prefer-vector-width=512.
+
+define <16 x i32> @test_v16f32_oeq_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
+; SKX-LABEL: test_v16f32_oeq_q:
+; SKX: # %bb.0:
+; SKX-NEXT: vcmpeqps %ymm7, %ymm5, %k1
+; SKX-NEXT: vcmpeqps %ymm6, %ymm4, %k2
+; SKX-NEXT: vpblendmd %ymm0, %ymm2, %ymm0 {%k2}
+; SKX-NEXT: vpblendmd %ymm1, %ymm3, %ymm1 {%k1}
+; SKX-NEXT: retq
+ %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
+ <16 x float> %f1, <16 x float> %f2, metadata !"oeq",
+ metadata !"fpexcept.strict") #0
+ %res = select <16 x i1> %cond, <16 x i32> %a, <16 x i32> %b
+ ret <16 x i32> %res
+}
+
+define <8 x i32> @test_v8f64_oeq_q(<8 x i32> %a, <8 x i32> %b, <8 x double> %f1, <8 x double> %f2) #0 {
+; SKX-LABEL: test_v8f64_oeq_q:
+; SKX: # %bb.0:
+; SKX-NEXT: vcmpeqpd %ymm4, %ymm2, %k0
+; SKX-NEXT: vcmpeqpd %ymm5, %ymm3, %k1
+; SKX-NEXT: kshiftlb $4, %k1, %k1
+; SKX-NEXT: korb %k1, %k0, %k1
+; SKX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; SKX-NEXT: retq
+ %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
+ <8 x double> %f1, <8 x double> %f2, metadata !"oeq",
+ metadata !"fpexcept.strict") #0
+ %res = select <8 x i1> %cond, <8 x i32> %a, <8 x i32> %b
+ ret <8 x i32> %res
+}
+
+declare <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(<16 x float>, <16 x float>, metadata, metadata)
+declare <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(<8 x double>, <8 x double>, metadata, metadata)
+
+attributes #0 = { nounwind strictfp "min-legal-vector-width"="0" }
diff --git a/llvm/test/CodeGen/X86/xray-custom-log.ll b/llvm/test/CodeGen/X86/xray-custom-log.ll
index c41aa1e98681..9ccda8ecb048 100644
--- a/llvm/test/CodeGen/X86/xray-custom-log.ll
+++ b/llvm/test/CodeGen/X86/xray-custom-log.ll
@@ -75,6 +75,21 @@ define i32 @typedevent() nounwind "function-instrument"="xray-always" !dbg !2 {
; CHECK-LABEL: Lxray_sleds_start1:
; CHECK: .quad {{.*}}xray_typed_event_sled_0
+; Verify that custom event calls are done with proper stack alignment,
+; even in leaf functions.
+@leaf_func.event_id = internal constant i32 1, align 4
+define void @leaf_func() "function-instrument"="xray-always" "frame-pointer"="none" nounwind {
+ ; CHECK-LABEL: leaf_func:
+ ; CHECK-NEXT: .Lfunc_begin2:
+ ; CHECK: pushq %rax
+ ; CHECK: movl $leaf_func.event_id, %eax
+ ; CHECK-NEXT: movl $4, %ecx
+ ; CHECK-NEXT: .p2align 1, 0x90
+ ; CHECK-NEXT: .Lxray_event_sled_1:
+ call void @llvm.xray.customevent(ptr @leaf_func.event_id, i64 4)
+ ret void
+}
+
declare void @llvm.xray.customevent(ptr, i64)
declare void @llvm.xray.typedevent(i64, ptr, i64)
diff --git a/llvm/test/CodeGen/X86/xray-tail-call-sled.ll b/llvm/test/CodeGen/X86/xray-tail-call-sled.ll
index 4d0c359f0dc3..126e5db52a5b 100644
--- a/llvm/test/CodeGen/X86/xray-tail-call-sled.ll
+++ b/llvm/test/CodeGen/X86/xray-tail-call-sled.ll
@@ -66,3 +66,54 @@ define dso_local i32 @caller() nounwind noinline uwtable "function-instrument"="
; CHECK-MACOS: [[IDX:lxray_fn_idx[0-9]+]]:
; CHECK-MACOS-NEXT: .quad lxray_sleds_start1-[[IDX]]
; CHECK-MACOS-NEXT: .quad 2
+
+define dso_local i32 @conditional_tail_call(i32 %cond) nounwind noinline uwtable "function-instrument"="xray-always" {
+; CHECK-LABEL: conditional_tail_call:
+; CHECK: .p2align 1, 0x90
+; CHECK-LABEL: Lxray_sled_4:
+; CHECK: .ascii "\353\t"
+; CHECK-NEXT: nopw 512(%rax,%rax)
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: je {{\.?Ltmp5}}
+; CHECK: .p2align 1, 0x90
+; CHECK-LABEL: Lxray_sled_5:
+; CHECK-NEXT: .ascii "\353\t"
+; CHECK-NEXT: nopw 512(%rax,%rax)
+; CHECK-LABEL: Ltmp6:
+; CHECK-NEXT: jmp {{.*}}callee {{.*}}# TAILCALL
+; CHECK-LABEL: Ltmp5:
+; CHECK: xorl %eax, %eax
+; CHECK-NEXT: .p2align 1, 0x90
+; CHECK-LABEL: Lxray_sled_6:
+; CHECK-NEXT: retq
+; CHECK-NEXT: nopw %cs:512(%rax,%rax)
+ %cmp = icmp ne i32 %cond, 0
+ br i1 %cmp, label %docall, label %ret
+docall:
+ %retval = tail call i32 @callee()
+ ret i32 %retval
+ret:
+ ret i32 0
+}
+
+; CHECK-LINUX-LABEL: .section xray_instr_map,"ao",@progbits,conditional_tail_call{{$}}
+; CHECK-LINUX-LABEL: .Lxray_sleds_start2:
+; CHECK-LINUX: .quad .Lxray_sled_4
+; CHECK-LINUX: .quad .Lxray_sled_5
+; CHECK-LINUX: .quad .Lxray_sled_6
+; CHECK-LINUX-LABEL: .Lxray_sleds_end2:
+; CHECK-LINUX-LABEL: .section xray_fn_idx,"ao",@progbits,conditional_tail_call{{$}}
+; CHECK-LINUX: [[IDX:\.Lxray_fn_idx[0-9]+]]:
+; CHECK-LINUX-NEXT: .quad .Lxray_sleds_start2-[[IDX]]
+; CHECK-LINUX-NEXT: .quad 3
+
+; CHECK-MACOS-LABEL: .section __DATA,xray_instr_map,regular,live_support{{$}}
+; CHECK-MACOS-LABEL: lxray_sleds_start2:
+; CHECK-MACOS: .quad Lxray_sled_4
+; CHECK-MACOS: .quad Lxray_sled_5
+; CHECK-MACOS: .quad Lxray_sled_6
+; CHECK-MACOS-LABEL: Lxray_sleds_end2:
+; CHECK-MACOS-LABEL: .section __DATA,xray_fn_idx,regular,live_support{{$}}
+; CHECK-MACOS: [[IDX:lxray_fn_idx[0-9]+]]:
+; CHECK-MACOS-NEXT: .quad lxray_sleds_start2-[[IDX]]
+; CHECK-MACOS-NEXT: .quad 3
diff --git a/llvm/test/DebugInfo/X86/debug-names-types.ll b/llvm/test/DebugInfo/X86/debug-names-types.ll
index ff0d4d52c1f0..81016e3874ee 100644
--- a/llvm/test/DebugInfo/X86/debug-names-types.ll
+++ b/llvm/test/DebugInfo/X86/debug-names-types.ll
@@ -48,11 +48,6 @@
; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
; CHECK-NEXT: DW_IDX_parent: DW_FORM_flag_present
; CHECK-NEXT: }
-; CHECK-NEXT: Abbreviation [[ABBREV1:0x[0-9a-f]*]] {
-; CHECK-NEXT: Tag: DW_TAG_structure_type
-; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
-; CHECK-NEXT: DW_IDX_parent: DW_FORM_flag_present
-; CHECK-NEXT: }
; CHECK-NEXT: Abbreviation [[ABBREV2:0x[0-9a-f]*]] {
; CHECK-NEXT: Tag: DW_TAG_subprogram
; CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
@@ -88,12 +83,6 @@
; CHECK-NEXT: DW_IDX_die_offset: 0x00000023
; CHECK-NEXT: DW_IDX_parent: <parent not indexed>
; CHECK-NEXT: }
-; CHECK-NEXT: Entry @ {{.+}} {
-; CHECK-NEXT: Abbrev: [[ABBREV1]]
-; CHECK-NEXT: Tag: DW_TAG_structure_type
-; CHECK-NEXT: DW_IDX_die_offset: 0x00000042
-; CHECK-NEXT: DW_IDX_parent: <parent not indexed>
-; CHECK-NEXT: }
; CHECK-NEXT: }
; CHECK-NEXT: ]
; CHECK-NEXT: Bucket 2 [
@@ -130,7 +119,7 @@
; CHECK-SPLIT: Foreign TU count: 1
; CHECK-SPLIT-NEXT: Bucket count: 4
; CHECK-SPLIT-NEXT: Name count: 4
-; CHECK-SPLIT-NEXT: Abbreviations table size: 0x2D
+; CHECK-SPLIT-NEXT: Abbreviations table size: 0x25
; CHECK-SPLIT-NEXT: Augmentation: 'LLVM0700'
; CHECK-SPLIT-NEXT: }
; CHECK-SPLIT-NEXT: Compilation Unit offsets [
@@ -151,11 +140,6 @@
; CHECK-SPLIT-NEXT: DW_IDX_die_offset: DW_FORM_ref4
; CHECK-SPLIT-NEXT: DW_IDX_parent: DW_FORM_flag_present
; CHECK-SPLIT-NEXT: }
-; CHECK-SPLIT-NEXT: Abbreviation [[ABBREV:0x[0-9a-f]*]] {
-; CHECK-SPLIT-NEXT: Tag: DW_TAG_structure_type
-; CHECK-SPLIT-NEXT: DW_IDX_die_offset: DW_FORM_ref4
-; CHECK-SPLIT-NEXT: DW_IDX_parent: DW_FORM_flag_present
-; CHECK-SPLIT-NEXT: }
; CHECK-SPLIT-NEXT: Abbreviation [[ABBREV3:0x[0-9a-f]*]] {
; CHECK-SPLIT-NEXT: Tag: DW_TAG_subprogram
; CHECK-SPLIT-NEXT: DW_IDX_die_offset: DW_FORM_ref4
@@ -191,12 +175,6 @@
; CHECK-SPLIT-NEXT: DW_IDX_die_offset: 0x00000021
; CHECK-SPLIT-NEXT: DW_IDX_parent: <parent not indexed>
; CHECK-SPLIT-NEXT: }
-; CHECK-SPLIT-NEXT: Entry @ {{.*}} {
-; CHECK-SPLIT-NEXT: Abbrev: [[ABBREV]]
-; CHECK-SPLIT-NEXT: Tag: DW_TAG_structure_type
-; CHECK-SPLIT-NEXT: DW_IDX_die_offset: 0x00000039
-; CHECK-SPLIT-NEXT: DW_IDX_parent: <parent not indexed>
-; CHECK-SPLIT-NEXT: }
; CHECK-SPLIT-NEXT: }
; CHECK-SPLIT-NEXT: ]
; CHECK-SPLIT-NEXT: Bucket 2 [
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/mem-attr.ll b/llvm/test/Instrumentation/HWAddressSanitizer/mem-attr.ll
new file mode 100644
index 000000000000..c0e370f20213
--- /dev/null
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/mem-attr.ll
@@ -0,0 +1,15 @@
+; Test that HWASan remove writeonly and memory(*) attributes from instrumented functions.
+; RUN: opt -S -passes=hwasan %s | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+target triple = "aarch64-unknown-linux-android30"
+
+; CHECK: define dso_local void @test_writeonly(ptr nocapture noundef %p) local_unnamed_addr #0
+define dso_local void @test_writeonly(ptr nocapture noundef writeonly %p) local_unnamed_addr #0 {
+entry:
+ store i32 42, ptr %p, align 4
+ ret void
+}
+
+; CHECK: attributes #0 = { sanitize_hwaddress uwtable }
+attributes #0 = { sanitize_hwaddress memory(argmem: write) uwtable }
diff --git a/llvm/test/Linker/darwin-target-variant.ll b/llvm/test/Linker/darwin-target-variant.ll
new file mode 100644
index 000000000000..7d46b2dda4a9
--- /dev/null
+++ b/llvm/test/Linker/darwin-target-variant.ll
@@ -0,0 +1,42 @@
+; RUN: rm -rf %t && split-file %s %t
+; RUN: llvm-link %t/1.ll %t/2.ll -S -o - | FileCheck %s
+; CHECK: {i32 2, !"darwin.target_variant.triple", !"x86_64-apple-ios13.1-macabi"}
+
+; RUN: llvm-link %t/1.ll %t/old.ll -S -o - | FileCheck %s -check-prefix OLD
+; OLD: {i32 4, !"darwin.target_variant.triple", !"x86_64-apple-ios14.0-macabi"}
+
+;--- 1.ll
+target triple = "x86_64-apple-macos10.15";
+!llvm.module.flags = !{!0, !1, !2};
+!0 = !{i32 2, !"SDK Version", [3 x i32] [ i32 10, i32 15, i32 1 ] };
+!1 = !{i32 2, !"darwin.target_variant.triple", !"x86_64-apple-ios13.1-macabi"};
+!2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [ i32 13, i32 2 ] };
+
+define void @foo() {
+entry:
+ ret void
+}
+
+;--- 2.ll
+target triple = "x86_64-apple-macos10.15";
+!llvm.module.flags = !{!0, !1, !2};
+!0 = !{i32 2, !"SDK Version", [3 x i32] [ i32 10, i32 15, i32 1 ] };
+!1 = !{i32 2, !"darwin.target_variant.triple", !"x86_64-apple-ios14.0-macabi"};
+!2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [ i32 13, i32 2 ] };
+
+define void @bar() {
+entry:
+ ret void
+}
+
+;--- old.ll
+target triple = "x86_64-apple-macos10.15";
+!llvm.module.flags = !{!0, !1, !2};
+!0 = !{i32 2, !"SDK Version", [3 x i32] [ i32 10, i32 15, i32 1 ] };
+!1 = !{i32 4, !"darwin.target_variant.triple", !"x86_64-apple-ios14.0-macabi"};
+!2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [ i32 13, i32 2 ] };
+
+define void @old() {
+entry:
+ ret void
+}
diff --git a/llvm/test/MC/AArch64/FP8/system-regs.s b/llvm/test/MC/AArch64/FP8/system-regs.s
index 4a396d4dff82..8959a7727b19 100644
--- a/llvm/test/MC/AArch64/FP8/system-regs.s
+++ b/llvm/test/MC/AArch64/FP8/system-regs.s
@@ -1,11 +1,9 @@
-// RUN: llvm-mc -triple=aarch64 -show-encoding -mattr=+fpmr < %s \
+// RUN: llvm-mc -triple=aarch64 -show-encoding < %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-// RUN: not llvm-mc -triple=aarch64 -show-encoding < %s 2>&1 \
-// RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+fpmr < %s \
-// RUN: | llvm-objdump -d --mattr=+fpmr - | FileCheck %s --check-prefix=CHECK-INST
-// RUN: llvm-mc -triple=aarch64 -filetype=obj -mattr=+fpmr < %s \
-// RUN: | llvm-objdump --mattr=-fpmr -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+// RUN: llvm-mc -triple=aarch64 -filetype=obj < %s \
+// RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj < %s \
+// RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
// --------------------------------------------------------------------------//
// read
@@ -13,14 +11,13 @@
mrs x3, FPMR
// CHECK-INST: mrs x3, FPMR
// CHECK-ENCODING: [0x43,0x44,0x3b,0xd5]
-// CHECK-ERROR: expected readable system register
-// CHECK-UNKNOWN: d53b4443 mrs x3, S3_3_C4_C4_2
+// CHECK-UNKNOWN: d53b4443 mrs x3, FPMR
+
mrs x3, ID_AA64FPFR0_EL1
// CHECK-INST: mrs x3, ID_AA64FPFR0_EL1
// CHECK-ENCODING: [0xe3,0x04,0x38,0xd5]
-// CHECK-ERROR: expected readable system register
-// CHECK-UNKNOWN: d53804e3 mrs x3, S3_0_C0_C4_7
+// CHECK-UNKNOWN: d53804e3 mrs x3, ID_AA64FPFR0_EL1
// --------------------------------------------------------------------------//
// write
@@ -28,5 +25,4 @@ mrs x3, ID_AA64FPFR0_EL1
msr FPMR, x3
// CHECK-INST: msr FPMR, x3
// CHECK-ENCODING: [0x43,0x44,0x1b,0xd5]
-// CHECK-ERROR: expected writable system register or pstate
-// CHECK-UNKNOWN: d51b4443 msr S3_3_C4_C4_2, x3
+// CHECK-UNKNOWN: d51b4443 msr FPMR, x3 \ No newline at end of file
diff --git a/llvm/test/MC/AArch64/SVE/condtion-codes.s b/llvm/test/MC/AArch64/SVE/condition-codes.s
index c1d8e2ad715d..c1d8e2ad715d 100644
--- a/llvm/test/MC/AArch64/SVE/condtion-codes.s
+++ b/llvm/test/MC/AArch64/SVE/condition-codes.s
diff --git a/llvm/test/MC/AArch64/SVE/sqdecd-diagnostics.s b/llvm/test/MC/AArch64/SVE/sqdecd-diagnostics.s
index 658af848c363..96b14b9ec112 100644
--- a/llvm/test/MC/AArch64/SVE/sqdecd-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE/sqdecd-diagnostics.s
@@ -18,9 +18,9 @@ sqdecd sp
// CHECK-NEXT: sqdecd sp
// CHECK-NOT: [[@LINE-1]]:{{[0-9]+}}:
-uqdecd z0.s
+sqdecd z0.s
// CHECK: [[@LINE-1]]:{{[0-9]+}}: error: invalid element width
-// CHECK-NEXT: uqdecd z0.s
+// CHECK-NEXT: sqdecd z0.s
// CHECK-NOT: [[@LINE-1]]:{{[0-9]+}}:
diff --git a/llvm/test/MC/AArch64/SVE/sqincp-diagnostics.s b/llvm/test/MC/AArch64/SVE/sqincp-diagnostics.s
index 2dfd49584908..862af7c9203b 100644
--- a/llvm/test/MC/AArch64/SVE/sqincp-diagnostics.s
+++ b/llvm/test/MC/AArch64/SVE/sqincp-diagnostics.s
@@ -3,48 +3,48 @@
// ------------------------------------------------------------------------- //
// Invalid result register
-uqdecp sp, p0
+sqincp sp, p0
// CHECK: [[@LINE-1]]:{{[0-9]+}}: error: invalid operand
-// CHECK-NEXT: uqdecp sp, p0
+// CHECK-NEXT: sqincp sp, p0
// CHECK-NOT: [[@LINE-1]]:{{[0-9]+}}:
-uqdecp z0.b, p0
+sqincp z0.b, p0
// CHECK: [[@LINE-1]]:{{[0-9]+}}: error: invalid element width
-// CHECK-NEXT: uqdecp z0.b, p0
+// CHECK-NEXT: sqincp z0.b, p0
// CHECK-NOT: [[@LINE-1]]:{{[0-9]+}}:
-uqdecp x0, p0.b, w0
+sqincp w0, p0.b, w0
// CHECK: [[@LINE-1]]:{{[0-9]+}}: error: invalid operand
-// CHECK-NEXT: uqdecp x0, p0.b, w0
+// CHECK-NEXT: sqincp w0, p0.b, w0
// CHECK-NOT: [[@LINE-1]]:{{[0-9]+}}:
-uqdecp x0, p0.b, x1
+sqincp x0, p0.b, x1
// CHECK: [[@LINE-1]]:{{[0-9]+}}: error: invalid operand
-// CHECK-NEXT: uqdecp x0, p0.b, x1
+// CHECK-NEXT: sqincp x0, p0.b, x1
// CHECK-NOT: [[@LINE-1]]:{{[0-9]+}}:
// ------------------------------------------------------------------------- //
// Invalid predicate operand
-uqdecp x0, p0
+sqincp x0, p0
// CHECK: [[@LINE-1]]:{{[0-9]+}}: error: invalid predicate register
-// CHECK-NEXT: uqdecp x0, p0
+// CHECK-NEXT: sqincp x0, p0
// CHECK-NOT: [[@LINE-1]]:{{[0-9]+}}:
-uqdecp x0, p0/z
+sqincp x0, p0/z
// CHECK: [[@LINE-1]]:{{[0-9]+}}: error: invalid predicate register
-// CHECK-NEXT: uqdecp x0, p0/z
+// CHECK-NEXT: sqincp x0, p0/z
// CHECK-NOT: [[@LINE-1]]:{{[0-9]+}}:
-uqdecp x0, p0/m
+sqincp x0, p0/m
// CHECK: [[@LINE-1]]:{{[0-9]+}}: error: invalid predicate register
-// CHECK-NEXT: uqdecp x0, p0/m
+// CHECK-NEXT: sqincp x0, p0/m
// CHECK-NOT: [[@LINE-1]]:{{[0-9]+}}:
-uqdecp x0, p0.q
+sqincp x0, p0.q
// CHECK: [[@LINE-1]]:{{[0-9]+}}: error: invalid predicate register
-// CHECK-NEXT: uqdecp x0, p0.q
+// CHECK-NEXT: sqincp x0, p0.q
// CHECK-NOT: [[@LINE-1]]:{{[0-9]+}}:
sqincp z0.d, p0.b
diff --git a/llvm/test/MC/AMDGPU/amd_kernel_code_t.s b/llvm/test/MC/AMDGPU/amd_kernel_code_t.s
new file mode 100644
index 000000000000..052ec0bfabb8
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/amd_kernel_code_t.s
@@ -0,0 +1,171 @@
+; RUN: llvm-mc -triple=amdgcn-mesa-mesa3d -mcpu=gfx900 -filetype=asm < %s | FileCheck --check-prefix=ASM %s
+; RUN: llvm-mc -triple=amdgcn-mesa-mesa3d -mcpu=gfx900 -filetype=obj < %s > %t
+; RUN: llvm-objdump -s %t | FileCheck --check-prefix=OBJDUMP %s
+
+; OBJDUMP: Contents of section .known_is_dynamic_callstack:
+; OBJDUMP: 0030 00000000 00000000 00001000 00000000
+
+; OBJDUMP: Contents of section .known_wavefront_sgpr_count:
+; OBJDUMP: 0050 00000000 01000000 00000000 00000000
+
+; OBJDUMP: Contents of section .known_workitem_vgpr_count:
+; OBJDUMP: 0050 00000000 00000100 00000000 00000000
+
+; OBJDUMP: Contents of section .known_workitem_private_segment_byte_size:
+; OBJDUMP: 0030 00000000 00000000 00000000 01000000
+
+; OBJDUMP: Contents of section .known_granulated_workitem_vgpr_count:
+; OBJDUMP: 0030 01000000 00000000 00000000 00000000
+
+; OBJDUMP: Contents of section .known_enable_sgpr_workgroup_id_x:
+; OBJDUMP: 0030 00000000 80000000 00000000 00000000
+
+; OBJDUMP: Contents of section .unknown_is_dynamic_callstack:
+; OBJDUMP: 0030 00000000 00000000 00001000 00000000
+
+; OBJDUMP: Contents of section .unknown_wavefront_sgpr_count:
+; OBJDUMP: 0050 00000000 01000000 00000000 00000000
+
+; OBJDUMP: Contents of section .unknown_workitem_vgpr_count:
+; OBJDUMP: 0050 00000000 00000100 00000000 00000000
+
+; OBJDUMP: Contents of section .unknown_workitem_private_segment_byte_size:
+; OBJDUMP: 0030 00000000 00000000 00000000 01000000
+
+; OBJDUMP: Contents of section .unknown_granulated_workitem_vgpr_count:
+; OBJDUMP: 0030 01000000 00000000 00000000 00000000
+
+; OBJDUMP: Contents of section .unknown_enable_sgpr_workgroup_id_x:
+; OBJDUMP: 0030 00000000 80000000 00000000 00000000
+
+.set known, 1
+
+; ASM-LABEL: known_is_dynamic_callstack:
+; ASM: is_dynamic_callstack = 1
+.section .known_is_dynamic_callstack
+known_is_dynamic_callstack:
+ .amd_kernel_code_t
+ is_dynamic_callstack = known
+ .end_amd_kernel_code_t
+ s_endpgm
+
+; ASM-LABEL: known_wavefront_sgpr_count:
+; ASM: wavefront_sgpr_count = 1
+.section .known_wavefront_sgpr_count
+known_wavefront_sgpr_count:
+ .amd_kernel_code_t
+ wavefront_sgpr_count = known
+ .end_amd_kernel_code_t
+ s_endpgm
+
+; ASM-LABEL: known_workitem_vgpr_count:
+; ASM: workitem_vgpr_count = 1
+.section .known_workitem_vgpr_count
+known_workitem_vgpr_count:
+ .amd_kernel_code_t
+ workitem_vgpr_count = known
+ .end_amd_kernel_code_t
+ s_endpgm
+
+; ASM-LABEL: known_workitem_private_segment_byte_size:
+; ASM: workitem_private_segment_byte_size = 1
+.section .known_workitem_private_segment_byte_size
+known_workitem_private_segment_byte_size:
+ .amd_kernel_code_t
+ workitem_private_segment_byte_size = known
+ .end_amd_kernel_code_t
+ s_endpgm
+
+; ASM-LABEL: known_granulated_workitem_vgpr_count:
+; ASM: granulated_workitem_vgpr_count = 1
+.section .known_granulated_workitem_vgpr_count
+known_granulated_workitem_vgpr_count:
+ .amd_kernel_code_t
+ granulated_workitem_vgpr_count = known
+ .end_amd_kernel_code_t
+ s_endpgm
+
+; ASM-LABEL: known_enable_sgpr_workgroup_id_x:
+; ASM: enable_sgpr_workgroup_id_x = 1
+.section .known_enable_sgpr_workgroup_id_x
+known_enable_sgpr_workgroup_id_x:
+ .amd_kernel_code_t
+ enable_sgpr_workgroup_id_x = known
+ .end_amd_kernel_code_t
+ s_endpgm
+
+; ASM-LABEL: unknown_is_dynamic_callstack:
+; ASM: is_dynamic_callstack = unknown
+.section .unknown_is_dynamic_callstack
+unknown_is_dynamic_callstack:
+ .amd_kernel_code_t
+ is_dynamic_callstack = unknown
+ .end_amd_kernel_code_t
+ s_endpgm
+
+; ASM-LABEL: unknown_wavefront_sgpr_count:
+; ASM: wavefront_sgpr_count = unknown
+.section .unknown_wavefront_sgpr_count
+unknown_wavefront_sgpr_count:
+ .amd_kernel_code_t
+ wavefront_sgpr_count = unknown
+ .end_amd_kernel_code_t
+ s_endpgm
+
+; ASM-LABEL: unknown_workitem_vgpr_count:
+; ASM: workitem_vgpr_count = unknown
+.section .unknown_workitem_vgpr_count
+unknown_workitem_vgpr_count:
+ .amd_kernel_code_t
+ workitem_vgpr_count = unknown
+ .end_amd_kernel_code_t
+ s_endpgm
+
+; ASM-LABEL: unknown_workitem_private_segment_byte_size:
+; ASM: workitem_private_segment_byte_size = unknown
+.section .unknown_workitem_private_segment_byte_size
+unknown_workitem_private_segment_byte_size:
+ .amd_kernel_code_t
+ workitem_private_segment_byte_size = unknown
+ .end_amd_kernel_code_t
+ s_endpgm
+
+; ASM-LABEL: unknown_granulated_workitem_vgpr_count:
+; ASM: granulated_workitem_vgpr_count = ((0&4294967232)|(unknown&63))&63
+; ASM: granulated_wavefront_sgpr_count = (((0&4294967232)|(unknown&63))>>6)&15
+; ASM: priority = (((0&4294967232)|(unknown&63))>>10)&3
+; ASM: float_mode = (((0&4294967232)|(unknown&63))>>12)&255
+; ASM: priv = (((0&4294967232)|(unknown&63))>>20)&1
+; ASM: enable_dx10_clamp = (((0&4294967232)|(unknown&63))>>21)&1
+; ASM: debug_mode = (((0&4294967232)|(unknown&63))>>22)&1
+; ASM: enable_ieee_mode = (((0&4294967232)|(unknown&63))>>23)&1
+; ASM: enable_wgp_mode = (((0&4294967232)|(unknown&63))>>29)&1
+; ASM: enable_mem_ordered = (((0&4294967232)|(unknown&63))>>30)&1
+; ASM: enable_fwd_progress = (((0&4294967232)|(unknown&63))>>31)&1
+.section .unknown_granulated_workitem_vgpr_count
+unknown_granulated_workitem_vgpr_count:
+ .amd_kernel_code_t
+ granulated_workitem_vgpr_count = unknown
+ .end_amd_kernel_code_t
+ s_endpgm
+
+; ASM-LABEL: unknown_enable_sgpr_workgroup_id_x:
+; ASM: enable_sgpr_private_segment_wave_byte_offset = ((0&4294967167)|((unknown&1)<<7))&1
+; ASM: user_sgpr_count = (((0&4294967167)|((unknown&1)<<7))>>1)&31
+; ASM: enable_trap_handler = (((0&4294967167)|((unknown&1)<<7))>>6)&1
+; ASM: enable_sgpr_workgroup_id_x = (((0&4294967167)|((unknown&1)<<7))>>7)&1
+; ASM: enable_sgpr_workgroup_id_y = (((0&4294967167)|((unknown&1)<<7))>>8)&1
+; ASM: enable_sgpr_workgroup_id_z = (((0&4294967167)|((unknown&1)<<7))>>9)&1
+; ASM: enable_sgpr_workgroup_info = (((0&4294967167)|((unknown&1)<<7))>>10)&1
+; ASM: enable_vgpr_workitem_id = (((0&4294967167)|((unknown&1)<<7))>>11)&3
+; ASM: enable_exception_msb = (((0&4294967167)|((unknown&1)<<7))>>13)&3
+; ASM: granulated_lds_size = (((0&4294967167)|((unknown&1)<<7))>>15)&511
+; ASM: enable_exception = (((0&4294967167)|((unknown&1)<<7))>>24)&127
+.section .unknown_enable_sgpr_workgroup_id_x
+unknown_enable_sgpr_workgroup_id_x:
+ .amd_kernel_code_t
+ enable_sgpr_workgroup_id_x = unknown
+ .end_amd_kernel_code_t
+ s_endpgm
+
+.set unknown, 1
diff --git a/llvm/test/MC/AsmParser/assembler-expressions-inlineasm.ll b/llvm/test/MC/AsmParser/assembler-expressions-inlineasm.ll
index 35f110f37e2f..9d9a38f5b5a5 100644
--- a/llvm/test/MC/AsmParser/assembler-expressions-inlineasm.ll
+++ b/llvm/test/MC/AsmParser/assembler-expressions-inlineasm.ll
@@ -1,13 +1,17 @@
-; RUN: not llc -mtriple x86_64-unknown-linux-gnu -o %t.s -filetype=asm %s 2>&1 | FileCheck %s
-; RUN: not llc -mtriple x86_64-unknown-linux-gnu -o %t.o -filetype=obj %s 2>&1 | FileCheck %s
-
-; Assembler-aware expression evaluation should be disabled in inline
-; assembly to prevent differences in behavior between object and
-; assembly output.
+; RUN: not llc -mtriple=x86_64 %s -o /dev/null 2>&1 | FileCheck %s
+; RUN: llc -mtriple=x86_64 -no-integrated-as < %s | FileCheck %s --check-prefix=GAS
+; RUN: llc -mtriple=x86_64 -filetype=obj %s -o - | llvm-objdump -d - | FileCheck %s --check-prefix=DISASM
+; GAS: nop; .if . - foo==1; nop;.endif
; CHECK: <inline asm>:1:17: error: expected absolute expression
+; DISASM: <main>:
+; DISASM-NEXT: nop
+; DISASM-NEXT: nop
+; DISASM-NEXT: xorl %eax, %eax
+; DISASM-NEXT: retq
+
define i32 @main() local_unnamed_addr {
tail call void asm sideeffect "foo: nop; .if . - foo==1; nop;.endif", "~{dirflag},~{fpsr},~{flags}"()
ret i32 0
diff --git a/llvm/test/MC/MachO/darwin-target-variant-reverse.ll b/llvm/test/MC/MachO/darwin-target-variant-reverse.ll
index 6d51cd8fffa8..fd527b204546 100644
--- a/llvm/test/MC/MachO/darwin-target-variant-reverse.ll
+++ b/llvm/test/MC/MachO/darwin-target-variant-reverse.ll
@@ -3,7 +3,7 @@
target triple = "x86_64-apple-ios13.1-macabi";
!llvm.module.flags = !{!0, !1, !2};
!0 = !{i32 2, !"SDK Version", [2 x i32] [ i32 13, i32 1 ] };
-!1 = !{i32 1, !"darwin.target_variant.triple", !"x86_64-apple-macos10.15"};
+!1 = !{i32 2, !"darwin.target_variant.triple", !"x86_64-apple-macos10.15"};
!2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [ i32 10, i32 15 ] };
define void @foo() {
diff --git a/llvm/test/MC/MachO/darwin-target-variant.ll b/llvm/test/MC/MachO/darwin-target-variant.ll
index d506ed92c9cc..78bd1e98410f 100644
--- a/llvm/test/MC/MachO/darwin-target-variant.ll
+++ b/llvm/test/MC/MachO/darwin-target-variant.ll
@@ -4,7 +4,7 @@
target triple = "x86_64-apple-macos10.15";
!llvm.module.flags = !{!0, !1, !2};
!0 = !{i32 2, !"SDK Version", [3 x i32] [ i32 10, i32 15, i32 1 ] };
-!1 = !{i32 1, !"darwin.target_variant.triple", !"x86_64-apple-ios13.1-macabi"};
+!1 = !{i32 2, !"darwin.target_variant.triple", !"x86_64-apple-ios13.1-macabi"};
!2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [ i32 13, i32 2 ] };
define void @foo() {
diff --git a/llvm/test/MC/RISCV/attribute-arch.s b/llvm/test/MC/RISCV/attribute-arch.s
index a028d4025ec1..0e5eddd83e40 100644
--- a/llvm/test/MC/RISCV/attribute-arch.s
+++ b/llvm/test/MC/RISCV/attribute-arch.s
@@ -397,7 +397,7 @@
# CHECK: attribute 5, "rv32i2p1_xcvbi1p0"
.attribute arch, "rv32i_zicfilp0p4"
-# CHECK: attribute 5, "rv32i2p1_zicfilp0p4"
+# CHECK: attribute 5, "rv32i2p1_zicfilp0p4_zicsr2p0"
.attribute arch, "rv32i_zicfiss0p4"
# CHECK: .attribute 5, "rv32i2p1_zicfiss0p4_zicsr2p0_zimop1p0"
diff --git a/llvm/test/MC/RISCV/rv32zaamo-invalid.s b/llvm/test/MC/RISCV/rv32zaamo-invalid.s
index fb4dab4542d6..984a0d61e2d0 100644
--- a/llvm/test/MC/RISCV/rv32zaamo-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zaamo-invalid.s
@@ -1,5 +1,5 @@
# RUN: not llvm-mc -triple riscv32 -mattr=+a < %s 2>&1 | FileCheck %s
-# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zaamo < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv32 -mattr=+zaamo < %s 2>&1 | FileCheck %s
# Final operand must have parentheses
amoswap.w a1, a2, a3 # CHECK: :[[@LINE]]:19: error: expected '(' or optional integer offset
diff --git a/llvm/test/MC/RISCV/rv32zaamo-valid.s b/llvm/test/MC/RISCV/rv32zaamo-valid.s
index f6b5799b46f8..d9ba6ef0240b 100644
--- a/llvm/test/MC/RISCV/rv32zaamo-valid.s
+++ b/llvm/test/MC/RISCV/rv32zaamo-valid.s
@@ -8,15 +8,15 @@
# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+a < %s \
# RUN: | llvm-objdump --mattr=+a -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zaamo -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+zaamo -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zaamo -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zaamo -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zaamo < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zaamo -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+zaamo < %s \
+# RUN: | llvm-objdump --mattr=+zaamo -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zaamo < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zaamo -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zaamo < %s \
+# RUN: | llvm-objdump --mattr=+zaamo -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# CHECK-ASM-AND-OBJ: amoswap.w a4, ra, (s0)
diff --git a/llvm/test/MC/RISCV/rv32zalrsc-invalid.s b/llvm/test/MC/RISCV/rv32zalrsc-invalid.s
index 9233c978f033..b1eb982a9763 100644
--- a/llvm/test/MC/RISCV/rv32zalrsc-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zalrsc-invalid.s
@@ -1,5 +1,5 @@
# RUN: not llvm-mc -triple riscv32 -mattr=+a < %s 2>&1 | FileCheck %s
-# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zalrsc < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv32 -mattr=+zalrsc < %s 2>&1 | FileCheck %s
# Final operand must have parentheses
lr.w a4, a5 # CHECK: :[[@LINE]]:10: error: expected '(' or optional integer offset
diff --git a/llvm/test/MC/RISCV/rv32zalrsc-valid.s b/llvm/test/MC/RISCV/rv32zalrsc-valid.s
index f59a4df0d667..f84c0fd62f69 100644
--- a/llvm/test/MC/RISCV/rv32zalrsc-valid.s
+++ b/llvm/test/MC/RISCV/rv32zalrsc-valid.s
@@ -8,15 +8,15 @@
# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+a < %s \
# RUN: | llvm-objdump --mattr=+a -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zalrsc -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+zalrsc -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zalrsc -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zalrsc -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zalrsc < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zalrsc -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+zalrsc < %s \
+# RUN: | llvm-objdump --mattr=+zalrsc -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zalrsc < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zalrsc -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zalrsc < %s \
+# RUN: | llvm-objdump --mattr=+zalrsc -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# CHECK-ASM-AND-OBJ: lr.w t0, (t1)
diff --git a/llvm/test/MC/RISCV/rv64zaamo-invalid.s b/llvm/test/MC/RISCV/rv64zaamo-invalid.s
index e00c1ec7bed7..cb219a79bc7e 100644
--- a/llvm/test/MC/RISCV/rv64zaamo-invalid.s
+++ b/llvm/test/MC/RISCV/rv64zaamo-invalid.s
@@ -1,5 +1,5 @@
# RUN: not llvm-mc -triple riscv64 -mattr=+a < %s 2>&1 | FileCheck %s
-# RUN: not llvm-mc -triple riscv64 -mattr=+experimental-zaamo < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv64 -mattr=+zaamo < %s 2>&1 | FileCheck %s
# Final operand must have parentheses
amoswap.d a1, a2, a3 # CHECK: :[[@LINE]]:19: error: expected '(' or optional integer offset
diff --git a/llvm/test/MC/RISCV/rv64zaamo-valid.s b/llvm/test/MC/RISCV/rv64zaamo-valid.s
index 51493b97c875..96d3e619b4c1 100644
--- a/llvm/test/MC/RISCV/rv64zaamo-valid.s
+++ b/llvm/test/MC/RISCV/rv64zaamo-valid.s
@@ -7,13 +7,13 @@
# RUN: not llvm-mc -triple riscv32 -mattr=+a < %s 2>&1 \
# RUN: | FileCheck -check-prefix=CHECK-RV32 %s
#
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zaamo -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zaamo -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zaamo < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zaamo -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zaamo < %s \
+# RUN: | llvm-objdump --mattr=+zaamo -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
#
-# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zaamo < %s 2>&1 \
+# RUN: not llvm-mc -triple riscv32 -mattr=+zaamo < %s 2>&1 \
# RUN: | FileCheck -check-prefix=CHECK-RV32 %s
# CHECK-ASM-AND-OBJ: amoswap.d a4, ra, (s0)
diff --git a/llvm/test/MC/RISCV/rv64zalrsc-invalid.s b/llvm/test/MC/RISCV/rv64zalrsc-invalid.s
index e2ad2fc49139..4a9d55e752f0 100644
--- a/llvm/test/MC/RISCV/rv64zalrsc-invalid.s
+++ b/llvm/test/MC/RISCV/rv64zalrsc-invalid.s
@@ -1,5 +1,5 @@
# RUN: not llvm-mc -triple riscv64 -mattr=+a < %s 2>&1 | FileCheck %s
-# RUN: not llvm-mc -triple riscv64 -mattr=+experimental-zalrsc < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv64 -mattr=+zalrsc < %s 2>&1 | FileCheck %s
# Final operand must have parentheses
lr.d a4, a5 # CHECK: :[[@LINE]]:10: error: expected '(' or optional integer offset
diff --git a/llvm/test/MC/RISCV/rv64zalrsc-valid.s b/llvm/test/MC/RISCV/rv64zalrsc-valid.s
index 5f4437250d9d..2bbde96b6e07 100644
--- a/llvm/test/MC/RISCV/rv64zalrsc-valid.s
+++ b/llvm/test/MC/RISCV/rv64zalrsc-valid.s
@@ -7,13 +7,13 @@
# RUN: not llvm-mc -triple riscv32 -mattr=+a < %s 2>&1 \
# RUN: | FileCheck -check-prefix=CHECK-RV32 %s
#
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zalrsc -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zalrsc -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zalrsc < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zalrsc -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zalrsc < %s \
+# RUN: | llvm-objdump --mattr=+zalrsc -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
#
-# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zalrsc < %s 2>&1 \
+# RUN: not llvm-mc -triple riscv32 -mattr=+zalrsc < %s 2>&1 \
# RUN: | FileCheck -check-prefix=CHECK-RV32 %s
# CHECK-ASM-AND-OBJ: lr.d t0, (t1)
diff --git a/llvm/test/MC/WebAssembly/simd-encodings.s b/llvm/test/MC/WebAssembly/simd-encodings.s
index 57fa71e74b8d..d397188a9882 100644
--- a/llvm/test/MC/WebAssembly/simd-encodings.s
+++ b/llvm/test/MC/WebAssembly/simd-encodings.s
@@ -845,4 +845,10 @@ main:
# CHECK: f32.store_f16 32 # encoding: [0xfc,0x31,0x01,0x20]
f32.store_f16 32
+ # CHECK: f16x8.splat # encoding: [0xfd,0xa0,0x02]
+ f16x8.splat
+
+ # CHECK: f16x8.extract_lane 1 # encoding: [0xfd,0xa1,0x02,0x01]
+ f16x8.extract_lane 1
+
end_function
diff --git a/llvm/test/MC/X86/apx/ccmp-reloc.s b/llvm/test/MC/X86/apx/ccmp-reloc.s
new file mode 100644
index 000000000000..6b9d11f26da8
--- /dev/null
+++ b/llvm/test/MC/X86/apx/ccmp-reloc.s
@@ -0,0 +1,14 @@
+// RUN: llvm-mc -triple x86_64-linux-gnu -filetype=obj %s | llvm-readobj -r - | FileCheck %s
+
+// CHECK: Relocations [
+// CHECK-NEXT: Section ({{[0-9]+}}) .rela.text {
+ccmpbb {dfv=of} $foo, %bl // CHECK-NEXT: R_X86_64_8
+ccmpbb {dfv=of} $foo, 123(%r8,%rax,4) // CHECK-NEXT: R_X86_64_8
+ccmpbw {dfv=of} $foo, %bx // CHECK-NEXT: R_X86_64_16
+ccmpbw {dfv=of} $foo, 123(%r8,%rax,4) // CHECK-NEXT: R_X86_64_16
+ccmpbl {dfv=of} $foo, %ebx // CHECK-NEXT: R_X86_64_32
+ccmpbl {dfv=of} $foo, 123(%r8,%rax,4) // CHECK-NEXT: R_X86_64_32
+ccmpbq {dfv=of} $foo, %rbx // CHECK-NEXT: R_X86_64_32S
+ccmpbq {dfv=of} $foo, 123(%r8,%rax,4) // CHECK-NEXT: R_X86_64_32S
+// CHECK-NEXT: }
+// CHECK-NEXT: ]
diff --git a/llvm/test/Other/constant-fold-gep.ll b/llvm/test/Other/constant-fold-gep.ll
index 0c1ca129bdb3..9af300ac9907 100644
--- a/llvm/test/Other/constant-fold-gep.ll
+++ b/llvm/test/Other/constant-fold-gep.ll
@@ -106,10 +106,10 @@
; PLAIN: @Y = global ptr getelementptr inbounds ([3 x { i32, i32 }], ptr @ext, i64 2)
; PLAIN: @Z = global ptr getelementptr inbounds (i32, ptr getelementptr inbounds ([3 x { i32, i32 }], ptr @ext, i64 0, i64 1, i32 0), i64 1)
-; OPT: @Y = local_unnamed_addr global ptr getelementptr inbounds ([3 x { i32, i32 }], ptr @ext, i64 2)
-; OPT: @Z = local_unnamed_addr global ptr getelementptr inbounds ([3 x { i32, i32 }], ptr @ext, i64 0, i64 1, i32 1)
-; TO: @Y = local_unnamed_addr global ptr getelementptr inbounds ([3 x { i32, i32 }], ptr @ext, i64 2)
-; TO: @Z = local_unnamed_addr global ptr getelementptr inbounds ([3 x { i32, i32 }], ptr @ext, i64 0, i64 1, i32 1)
+; OPT: @Y = local_unnamed_addr global ptr getelementptr inbounds (i8, ptr @ext, i64 48)
+; OPT: @Z = local_unnamed_addr global ptr getelementptr inbounds (i8, ptr @ext, i64 12)
+; TO: @Y = local_unnamed_addr global ptr getelementptr inbounds (i8, ptr @ext, i64 48)
+; TO: @Z = local_unnamed_addr global ptr getelementptr inbounds (i8, ptr @ext, i64 12)
@ext = external global [3 x { i32, i32 }]
@Y = global ptr getelementptr inbounds ([3 x { i32, i32 }], ptr getelementptr inbounds ([3 x { i32, i32 }], ptr @ext, i64 1), i64 1)
@@ -433,10 +433,10 @@ define ptr @fO() nounwind {
; PLAIN: ret ptr %t
; PLAIN: }
; OPT: define ptr @fZ() local_unnamed_addr #0 {
-; OPT: ret ptr getelementptr inbounds ([3 x { i32, i32 }], ptr @ext, i64 0, i64 1, i32 1)
+; OPT: ret ptr getelementptr inbounds (i8, ptr @ext, i64 12)
; OPT: }
; TO: define ptr @fZ() local_unnamed_addr #0 {
-; TO: ret ptr getelementptr inbounds ([3 x { i32, i32 }], ptr @ext, i64 0, i64 1, i32 1)
+; TO: ret ptr getelementptr inbounds (i8, ptr @ext, i64 12)
; TO: }
; SCEV: Classifying expressions for: @fZ
; SCEV: %t = bitcast ptr getelementptr inbounds (i32, ptr getelementptr inbounds ([3 x { i32, i32 }], ptr @ext, i64 0, i64 1, i32 0), i64 1) to ptr
@@ -464,7 +464,7 @@ define ptr @same_addrspace() nounwind noinline {
; OPT: same_addrspace
%p = getelementptr inbounds i8, ptr @p0, i32 2
ret ptr %p
-; OPT: ret ptr getelementptr inbounds ([4 x i8], ptr @p0, i64 0, i64 2)
+; OPT: ret ptr getelementptr inbounds (i8, ptr @p0, i64 2)
}
@gv1 = internal global i32 1
diff --git a/llvm/test/Other/optimize-inrange-gep.ll b/llvm/test/Other/optimize-inrange-gep.ll
index 2eae34bdb09b..e7465fddd80f 100644
--- a/llvm/test/Other/optimize-inrange-gep.ll
+++ b/llvm/test/Other/optimize-inrange-gep.ll
@@ -20,7 +20,7 @@ define void @foo(ptr %p) {
;
; CHECK-LABEL: define void @foo(
; CHECK-SAME: ptr nocapture writeonly [[P:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: store ptr getelementptr inbounds inrange(-24, 0) ({ [3 x ptr] }, ptr @vtable, i64 1, i32 0, i64 0), ptr [[P]], align 8
+; CHECK-NEXT: store ptr getelementptr inbounds inrange(-24, 0) (i8, ptr @vtable, i64 24), ptr [[P]], align 8
; CHECK-NEXT: ret void
;
store ptr getelementptr inrange(-24, 0) ({ [3 x ptr], [3 x ptr] }, ptr @vtable, i32 0, i32 0, i32 3), ptr %p
diff --git a/llvm/test/TableGen/predicate-patfags.td b/llvm/test/TableGen/predicate-patfags.td
index 2cf29769dc13..39133f324f30 100644
--- a/llvm/test/TableGen/predicate-patfags.td
+++ b/llvm/test/TableGen/predicate-patfags.td
@@ -1,5 +1,7 @@
-// RUN: llvm-tblgen -gen-dag-isel -I %p/../../include -I %p/Common %s 2>&1 | FileCheck -check-prefix=SDAG %s
-// RUN: llvm-tblgen -gen-global-isel -I %p/../../include -I %p/Common %s 2>&1 | FileCheck -check-prefix=GISEL %s
+// RUN: llvm-tblgen -gen-dag-isel -I %p/../../include -I %p/Common %s 2>&1 | FileCheck -check-prefixes=SDAG,SCUSTOM %s
+// RUN: llvm-tblgen -gen-dag-isel -I %p/../../include -I %p/Common %s -DHASONEUSE 2>&1 | FileCheck -check-prefixes=SDAG,SBUILTIN %s
+// RUN: llvm-tblgen -gen-global-isel -I %p/../../include -I %p/Common %s 2>&1 | FileCheck -check-prefixes=GISEL,GCUSTOM %s
+// RUN: llvm-tblgen -gen-global-isel -I %p/../../include -I %p/Common %s -DHASONEUSE 2>&1 | FileCheck -check-prefixes=GISEL,GBUILTIN %s
include "llvm/Target/Target.td"
include "GlobalISelEmitterCommon.td"
@@ -31,11 +33,16 @@ def : GINodeEquiv<G_TGT_MUL24, TGTmul24_impl>;
def TGTmul24_oneuse : PatFrag<
(ops node:$src0, node:$src1),
- (TGTmul24 $src0, $src1),
- [{ return N->hasOneUse(); }]> {
+ (TGTmul24 $src0, $src1)
+#ifndef HASONEUSE
+ , [{ return N->hasOneUse(); }]> {
let GISelPredicateCode = [{
return MRI->hasOneNonDBGUse(MI.getOperand(0).getReg());
}];
+#else
+ > {
+ let HasOneUse = 1;
+#endif
}
// SDAG: OPC_CheckOpcode, TARGET_VAL(ISD::INTRINSIC_W_CHAIN),
@@ -44,19 +51,26 @@ def TGTmul24_oneuse : PatFrag<
// SDAG: OPC_CheckOpcode, TARGET_VAL(TargetISD::MUL24),
// SDAG: OPC_CheckPredicate0, // Predicate_TGTmul24_oneuse
+// SCUSTOM: return N->hasOneUse();
+// SBUILTIN: if (!SDValue(N, 0).hasOneUse()) return false;
+
// GISEL: GIM_CheckOpcode, /*MI*/1, GIMT_Encode2(TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS),
// GISEL: GIM_CheckIntrinsicID, /*MI*/1, /*Op*/1, GIMT_Encode2(Intrinsic::tgt_mul24),
-// GISEL: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
+// GBUILTIN: GIM_CheckHasOneUse, /*MI*/1,
+// GCUSTOM: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
// GISEL: GIM_CheckOpcode, /*MI*/1, GIMT_Encode2(TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS),
// GISEL: GIM_CheckIntrinsicID, /*MI*/1, /*Op*/1, GIMT_Encode2(Intrinsic::tgt_mul24),
-// GISEL: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
+// GBUILTIN: GIM_CheckHasOneUse, /*MI*/1,
+// GCUSTOM: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
// GISEL: GIM_CheckOpcode, /*MI*/1, GIMT_Encode2(MyTarget::G_TGT_MUL24),
-// GISEL: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
+// GBUILTIN: GIM_CheckHasOneUse, /*MI*/1,
+// GCUSTOM: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
// GISEL: GIM_CheckOpcode, /*MI*/1, GIMT_Encode2(MyTarget::G_TGT_MUL24),
-// GISEL: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
+// GBUILTIN: GIM_CheckHasOneUse, /*MI*/1,
+// GCUSTOM: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
def inst_mad24 : I<
(outs GPR32:$dst),
(ins GPR32:$src0, GPR32:$src1, GPR32:$src2),
diff --git a/llvm/test/ThinLTO/X86/funcimport-stats.ll b/llvm/test/ThinLTO/X86/funcimport-stats.ll
index 913b13004c1c..7fcd33855fe1 100644
--- a/llvm/test/ThinLTO/X86/funcimport-stats.ll
+++ b/llvm/test/ThinLTO/X86/funcimport-stats.ll
@@ -9,8 +9,8 @@
; RUN: cat %t4 | grep 'Is importing aliasee' | count 1
; RUN: cat %t4 | FileCheck %s
-; CHECK: - [[NUM_FUNCS:[0-9]+]] functions imported from
-; CHECK-NEXT: - [[NUM_VARS:[0-9]+]] global vars imported from
+; CHECK: - [[NUM_FUNCS:[0-9]+]] function definitions and 0 function declarations imported from
+; CHECK-NEXT: - [[NUM_VARS:[0-9]+]] global vars definition and 0 global vars declaration imported from
; CHECK: [[NUM_FUNCS]] function-import - Number of functions imported in backend
; CHECK-NEXT: [[NUM_FUNCS]] function-import - Number of functions thin link decided to import
diff --git a/llvm/test/ThinLTO/X86/import_callee_declaration.ll b/llvm/test/ThinLTO/X86/import_callee_declaration.ll
new file mode 100644
index 000000000000..246920e5db0d
--- /dev/null
+++ b/llvm/test/ThinLTO/X86/import_callee_declaration.ll
@@ -0,0 +1,221 @@
+; "-debug-only" requires asserts.
+; REQUIRES: asserts
+; RUN: rm -rf %t && split-file %s %t && cd %t
+
+; Generate per-module summaries.
+; RUN: opt -module-summary main.ll -o main.bc
+; RUN: opt -module-summary lib.ll -o lib.bc
+
+; Generate the combined summary and distributed indices.
+
+; - For function import, set 'import-instr-limit' to 7 and fall back to import
+; function declarations.
+; - In main.ll, function 'main' calls 'small_func' and 'large_func'. Both callees
+; are defined in lib.ll. 'small_func' has two indirect callees, one is smaller
+; and the other one is larger. Both callees of 'small_func' are defined in lib.ll.
+; - Given the import limit, in main's combined summary, the import type of 'small_func'
+; and 'small_indirect_callee' will be 'definition', and the import type of
+; large* functions and their aliasees will be 'declaration'.
+;
+; The test will disassemble combined summaries and check the import type is
+; correct. Right now postlink optimizer pipeline doesn't do anything (e.g.,
+; import the declaration or de-serialize summary attributes yet) so there is
+; nothing to test more than the summary content.
+;
+; TODO: Extend this test case to test IR once postlink optimizer makes use of
+; the import type for declarations.
+;
+; RUN: llvm-lto2 run \
+; RUN: -debug-only=function-import \
+; RUN: -import-instr-limit=7 \
+; RUN: -import-instr-evolution-factor=1.0 \
+; RUN: -import-declaration \
+; RUN: -thinlto-distributed-indexes \
+; RUN: -r=main.bc,main,px \
+; RUN: -r=main.bc,small_func, \
+; RUN: -r=main.bc,large_func, \
+; RUN: -r=lib.bc,callee,pl \
+; RUN: -r=lib.bc,large_indirect_callee,px \
+; RUN: -r=lib.bc,large_indirect_bar,px \
+; RUN: -r=lib.bc,small_func,px \
+; RUN: -r=lib.bc,large_func,px \
+; RUN: -r=lib.bc,large_indirect_callee_alias,px \
+; RUN: -r=lib.bc,large_indirect_bar_alias,px \
+; RUN: -r=lib.bc,calleeAddrs,px -r=lib.bc,calleeAddrs2,px -o summary main.bc lib.bc 2>&1 | FileCheck %s --check-prefix=DUMP
+;
+; RUN: llvm-lto -thinlto-action=thinlink -import-declaration -import-instr-limit=7 -import-instr-evolution-factor=1.0 -o combined.index.bc main.bc lib.bc
+; RUN: llvm-lto -thinlto-action=distributedindexes -debug-only=function-import -import-declaration -import-instr-limit=7 -import-instr-evolution-factor=1.0 -thinlto-index combined.index.bc main.bc lib.bc 2>&1 | FileCheck %s --check-prefix=DUMP
+
+; DUMP: - 2 function definitions and 4 function declarations imported from lib.bc
+
+; First disassemble per-module summary and find out the GUID for {large_func, large_indirect_callee}.
+;
+; RUN: llvm-dis lib.bc -o - | FileCheck %s --check-prefix=LIB-DIS
+; LIB-DIS: module: (path: "lib.bc", hash: (0, 0, 0, 0, 0))
+; LIB-DIS: gv: (name: "large_func", summaries: {{.*}}) ; guid = 2418497564662708935
+; LIB-DIS: gv: (name: "large_indirect_bar_alias", summaries: {{.*}}, aliasee: [[LARGEINDIRECT_BAR:\^[0-9]+]]{{.*}}guid = 13590951773474913315
+; LIB-DIS: [[LARGEINDIRECT_BAR]] = gv: (name: "large_indirect_bar", summaries: {{.*}}) ; guid = 13770917885399536773
+; LIB-DIS: [[LARGEINDIRECT:\^[0-9]+]] = gv: (name: "large_indirect_callee", summaries: {{.*}}) ; guid = 14343440786664691134
+; LIB-DIS: gv: (name: "large_indirect_callee_alias", summaries: {{.*}}, aliasee: [[LARGEINDIRECT]]{{.*}}guid = 16730173943625350469
+;
+; Secondly disassemble main's combined summary and verify the import type of
+; these two GUIDs are declaration.
+;
+; RUN: llvm-dis main.bc.thinlto.bc -o - | FileCheck %s --check-prefix=MAIN-DIS
+;
+; MAIN-DIS: [[LIBMOD:\^[0-9]+]] = module: (path: "lib.bc", hash: (0, 0, 0, 0, 0))
+; MAIN-DIS: gv: (guid: 2418497564662708935, summaries: (function: (module: [[LIBMOD]], flags: ({{.*}} importType: declaration), insts: 8, {{.*}})))
+; When alias is imported as a copy of the aliasee, but the aliasee is not being
+; imported by itself, the aliasee should be null.
+; MAIN-DIS: gv: (guid: 13590951773474913315, summaries: (alias: (module: [[LIBMOD]], flags: ({{.*}} importType: declaration), aliasee: null)))
+; MAIN-DIS: [[LARGEINDIRECT:\^[0-9]+]] = gv: (guid: 14343440786664691134, summaries: (function: (module: [[LIBMOD]], flags: ({{.*}} importType: declaration), insts: 8, {{.*}})))
+; MAIN-DIS: gv: (guid: 16730173943625350469, summaries: (alias: (module: [[LIBMOD]], flags: ({{.*}} importType: declaration), aliasee: [[LARGEINDIRECT]])))
+
+; Run in-process ThinLTO and tests that
+; 1. `callee` remains internalized even if the symbols of its callers
+; (large_func, large_indirect_callee, large_indirect_bar) are exported as
+; declarations and visible to main module.
+; 2. the debugging logs from `function-import` pass are expected.
+
+; RUN: llvm-lto2 run \
+; RUN: -debug-only=function-import \
+; RUN: -save-temps \
+; RUN: -thinlto-threads=1 \
+; RUN: -import-instr-limit=7 \
+; RUN: -import-instr-evolution-factor=1.0 \
+; RUN: -import-declaration \
+; RUN: -r=main.bc,main,px \
+; RUN: -r=main.bc,small_func, \
+; RUN: -r=main.bc,large_func, \
+; RUN: -r=lib.bc,callee,pl \
+; RUN: -r=lib.bc,large_indirect_callee,px \
+; RUN: -r=lib.bc,large_indirect_bar,px \
+; RUN: -r=lib.bc,small_func,px \
+; RUN: -r=lib.bc,large_func,px \
+; RUN: -r=lib.bc,large_indirect_callee_alias,px \
+; RUN: -r=lib.bc,large_indirect_bar_alias,px \
+; RUN: -r=lib.bc,calleeAddrs,px -r=lib.bc,calleeAddrs2,px -o in-process main.bc lib.bc 2>&1 | FileCheck %s --check-prefix=IMPORTDUMP
+
+; TODO: Extend this test case to test IR once postlink optimizer makes use of
+; the import type for declarations.
+; IMPORTDUMP-DAG: Not importing function 11825436545918268459 callee from lib.cc
+; IMPORTDUMP-DAG: Is importing function declaration 14343440786664691134 large_indirect_callee from lib.cc
+; IMPORTDUMP-DAG: Is importing function definition 13568239288960714650 small_indirect_callee from lib.cc
+; IMPORTDUMP-DAG: Is importing function definition 6976996067367342685 small_func from lib.cc
+; IMPORTDUMP-DAG: Is importing function declaration 2418497564662708935 large_func from lib.cc
+; IMPORTDUMP-DAG: Not importing global 7680325410415171624 calleeAddrs from lib.cc
+; IMPORTDUMP-DAG: Is importing alias declaration 16730173943625350469 large_indirect_callee_alias from lib.cc
+; IMPORTDUMP-DAG: Is importing alias declaration 13590951773474913315 large_indirect_bar_alias from lib.cc
+; IMPORTDUMP-DAG: Not importing function 13770917885399536773 large_indirect_bar
+
+; RUN: llvm-dis in-process.1.3.import.bc -o - | FileCheck %s --check-prefix=IMPORT
+
+; RUN: llvm-dis in-process.2.2.internalize.bc -o - | FileCheck %s --check-prefix=INTERNALIZE
+
+; IMPORT-DAG: define available_externally void @small_func
+; IMPORT-DAG: define available_externally hidden void @small_indirect_callee
+; IMPORT-DAG: declare void @large_func
+; IMPORT-NOT: large_indirect_callee
+; IMPORT-NOT: large_indirect_callee_alias
+; IMPORT-NOT: large_indirect_bar
+; IMPORT-NOT: large_indirect_bar_alias
+
+; INTERNALIZE: define internal void @callee()
+
+;--- main.ll
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @main() {
+ call void @small_func()
+ call void @large_func()
+ ret i32 0
+}
+
+declare void @small_func()
+
+; large_func without attributes
+declare void @large_func()
+
+;--- lib.ll
+source_filename = "lib.cc"
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Both large_indirect_callee and large_indirect_callee_alias are referenced
+; and visible to main.ll.
+@calleeAddrs = global [3 x ptr] [ptr @large_indirect_callee, ptr @small_indirect_callee, ptr @large_indirect_callee_alias]
+
+; large_indirect_bar_alias is visible to main.ll but its aliasee isn't.
+@calleeAddrs2 = global [1 x ptr] [ptr @large_indirect_bar_alias]
+
+define void @callee() #1 {
+ ret void
+}
+
+define void @large_indirect_callee()#2 {
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ ret void
+}
+
+define void @large_indirect_bar()#2 {
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ ret void
+}
+
+define internal void @small_indirect_callee() #0 {
+entry:
+ %0 = load ptr, ptr @calleeAddrs2
+ call void %0(), !prof !3
+ ret void
+}
+
+@large_indirect_callee_alias = alias void(), ptr @large_indirect_callee
+
+@large_indirect_bar_alias = alias void(), ptr @large_indirect_bar
+
+define void @small_func() {
+entry:
+ %0 = load ptr, ptr @calleeAddrs
+ call void %0(), !prof !0
+ %1 = load ptr, ptr getelementptr inbounds ([3 x ptr], ptr @calleeAddrs, i64 0, i64 1)
+ call void %1(), !prof !1
+ %2 = load ptr, ptr getelementptr inbounds ([3 x ptr], ptr @calleeAddrs, i64 0, i64 2)
+ call void %2(), !prof !2
+ ret void
+}
+
+define void @large_func() #0 {
+entry:
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ ret void
+}
+
+attributes #0 = { nounwind norecurse }
+
+attributes #1 = { noinline }
+
+attributes #2 = { norecurse }
+
+!0 = !{!"VP", i32 0, i64 1, i64 14343440786664691134, i64 1}
+!1 = !{!"VP", i32 0, i64 1, i64 13568239288960714650, i64 1}
+!2 = !{!"VP", i32 0, i64 1, i64 16730173943625350469, i64 1}
+!3 = !{!"VP", i32 0, i64 1, i64 13590951773474913315, i64 1}
diff --git a/llvm/test/ThinLTO/X86/memprof-tailcall-nonunique.ll b/llvm/test/ThinLTO/X86/memprof-tailcall-nonunique.ll
index d7cfafec89fe..49c22bf590e6 100644
--- a/llvm/test/ThinLTO/X86/memprof-tailcall-nonunique.ll
+++ b/llvm/test/ThinLTO/X86/memprof-tailcall-nonunique.ll
@@ -14,10 +14,11 @@
; RUN: -r=%t.o,_Z4baz1v,plx \
; RUN: -r=%t.o,_Z4baz2v,plx \
; RUN: -r=%t.o,_Z3foob,plx \
+; RUN: -r=%t.o,xyz,plx \
; RUN: -r=%t.o,main,plx \
; RUN: -r=%t.o,_Znam, \
; RUN: -stats -debug -save-temps \
-; RUN: -o %t.out 2>&1 | FileCheck %s --check-prefix=STATS
+; RUN: -o %t.out 2>&1 | FileCheck %s --check-prefix=STATS --check-prefix=DEBUG
; RUN: llvm-dis %t.out.1.4.opt.bc -o - | FileCheck %s --check-prefix=IR
@@ -31,22 +32,20 @@
; RUN: -r=%t.o,_Z4baz1v,plx \
; RUN: -r=%t.o,_Z4baz2v,plx \
; RUN: -r=%t.o,_Z3foob,plx \
+; RUN: -r=%t.o,xyz,plx \
; RUN: -r=%t.o,main,plx \
; RUN: -r=%t.o,_Znam, \
; RUN: -stats -debug \
-; RUN: -o %t2.out 2>&1 | FileCheck %s --check-prefix=STATS
+; RUN: -o %t2.out 2>&1 | FileCheck %s --check-prefix=STATS --check-prefix=DEBUG
;; Run ThinLTO backend
; RUN: opt -passes=memprof-context-disambiguation \
; RUN: -memprof-import-summary=%t.o.thinlto.bc \
; RUN: -stats %t.o -S 2>&1 | FileCheck %s --check-prefix=IR
-; DEBUG: Not found through unique tail call chain: _Z3barv from main that actually called _Z3foob (found multiple possible chains)
-; DEBUG: Not found through unique tail call chain: _Z3barv from main that actually called _Z3foob (found multiple possible chains)
-; DEBUG: Not found through unique tail call chain: _Z3barv from main that actually called _Z3foob (found multiple possible chains)
-; DEBUG: Not found through unique tail call chain: _Z3barv from main that actually called _Z3foob (found multiple possible chains)
+; DEBUG: Not found through unique tail call chain: 17377440600225628772 (_Z3barv) from 15822663052811949562 (main) that actually called 8716735811002003409 (xyz) (found multiple possible chains)
-; STATS: 4 memprof-context-disambiguation - Number of profiled callees found via multiple tail call chains
+; STATS: 1 memprof-context-disambiguation - Number of profiled callees found via multiple tail call chains
;; Check that all calls in the IR are to the original functions, leading to a
;; non-cold operator new call.
@@ -125,17 +124,24 @@ return: ; preds = %if.else, %if.then
}
; Function Attrs: noinline
-; IR-LABEL: @main()
-define dso_local i32 @main() local_unnamed_addr #0 {
+; IR-LABEL: @xyz()
+define dso_local i32 @xyz() local_unnamed_addr #0 {
delete.end13:
; IR: call ptr @_Z3foob(i1 true)
- %call = tail call ptr @_Z3foob(i1 true), !callsite !10
+ %call = tail call ptr @_Z3foob(i1 true)
; IR: call ptr @_Z3foob(i1 true)
- %call1 = tail call ptr @_Z3foob(i1 true), !callsite !11
+ %call1 = tail call ptr @_Z3foob(i1 true)
; IR: call ptr @_Z3foob(i1 false)
- %call2 = tail call ptr @_Z3foob(i1 false), !callsite !12
+ %call2 = tail call ptr @_Z3foob(i1 false)
; IR: call ptr @_Z3foob(i1 false)
- %call3 = tail call ptr @_Z3foob(i1 false), !callsite !13
+ %call3 = tail call ptr @_Z3foob(i1 false)
+ ret i32 0
+}
+
+define dso_local i32 @main() local_unnamed_addr #0 {
+delete.end13:
+ ; IR: call i32 @xyz()
+ %call1 = tail call i32 @xyz(), !callsite !11
ret i32 0
}
@@ -145,17 +151,10 @@ attributes #0 = { noinline }
attributes #1 = { nobuiltin allocsize(0) }
attributes #2 = { builtin allocsize(0) }
-!0 = !{!1, !3, !5, !7}
-!1 = !{!2, !"notcold"}
-!2 = !{i64 3186456655321080972, i64 6307901912192269588}
-!3 = !{!4, !"cold"}
-!4 = !{i64 3186456655321080972, i64 6792096022461663180}
+!0 = !{!5, !7}
!5 = !{!6, !"notcold"}
!6 = !{i64 3186456655321080972, i64 8632435727821051414}
!7 = !{!8, !"cold"}
!8 = !{i64 3186456655321080972, i64 -3421689549917153178}
!9 = !{i64 3186456655321080972}
-!10 = !{i64 8632435727821051414}
!11 = !{i64 -3421689549917153178}
-!12 = !{i64 6307901912192269588}
-!13 = !{i64 6792096022461663180}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
index ce8524c70af6..0acb8f8d0fcf 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
@@ -176,9 +176,9 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4(ptr addrspace(1) %ptr, i1
ret i16 %res
}
-; Preserve unknown metadata
-define i16 @test_atomicrmw_and_i16_global_agent_preserve_md(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_preserve_md(
+; Drop unknown metadata and noundef
+define i16 @test_atomicrmw_and_i16_global_agent_drop_md(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_drop_md(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
@@ -198,9 +198,9 @@ define i16 @test_atomicrmw_and_i16_global_agent_preserve_md(ptr addrspace(1) %pt
ret i16 %res
}
-; Preserve unknown metadata
-define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_md(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_md(
+; Drop unknown metadata
+define i16 @test_atomicrmw_and_i16_global_agent_align4_drop_md(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_drop_md(
; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
@@ -211,6 +211,89 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_md(ptr addrspace
ret i16 %res
}
+; Drop noundef, preserve mmra
+define i16 @test_atomicrmw_and_i16_global_agent_preserve_mmra(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_preserve_mmra(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !mmra [[META0:![0-9]+]]
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
+;
+ %res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !noundef !0, !mmra !1
+ ret i16 %res
+}
+
+; Drop noundef, preserve mmra
+define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_mmra(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_mmra(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !mmra [[META0]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
+;
+ %res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !noundef !0, !mmra !1
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_alias_scope(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_alias_scope(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !alias.scope [[META1:![0-9]+]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
+;
+ %res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !alias.scope !2
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_noalias(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_noalias(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !noalias [[META1]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
+;
+ %res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !noalias !2
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa_struct(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa_struct(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !tbaa.struct [[TBAA_STRUCT4:![0-9]+]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
+;
+ %res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !tbaa.struct !5
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !tbaa [[TBAA5:![0-9]+]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
+;
+ %res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !tbaa !6
+ ret i16 %res
+}
+
define i16 @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i16 %value) {
; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(
; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
@@ -223,7 +306,7 @@ define i16 @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(ptr add
; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META8:![0-9]+]]
; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED]]
@@ -236,7 +319,7 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_remote_memory(
; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_remote_memory(
; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META8]]
; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
@@ -256,7 +339,7 @@ define i16 @test_atomicrmw_and_i16_global_agent__amdgpu_no_fine_grained_memory(p
; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META8]]
; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED]]
@@ -269,7 +352,7 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_fine_grained_m
; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_fine_grained_memory(
; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META8]]
; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
; CHECK-NEXT: ret i16 [[EXTRACTED]]
;
@@ -1180,6 +1263,15 @@ define bfloat @test_atomicrmw_xchg_bf16_global_agent_align4(ptr addrspace(1) %pt
}
!0 = !{}
+!1 = !{!"foo", !"bar"}
+!2 = !{!3}
+!3 = distinct !{!3, !4}
+!4 = distinct !{!4}
+!5 = !{i64 0, i64 4, !1, i64 8, i64 4}
+!6 = !{!7, !7, i64 0}
+!7 = !{!"omnipotent char", !8, i64 0}
+!8 = !{!"Simple C/C++ TBAA"}
+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; BASE: {{.*}}
; GCN: {{.*}}
diff --git a/llvm/test/Transforms/Attributor/issue87856.ll b/llvm/test/Transforms/Attributor/issue87856.ll
new file mode 100644
index 000000000000..4da29cc4448d
--- /dev/null
+++ b/llvm/test/Transforms/Attributor/issue87856.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
+; RUN: opt -S -passes=attributor < %s | FileCheck %s
+
+define void @null_ptr_is_valid_call_with_null() #0 {
+; CHECK-LABEL: define void @null_ptr_is_valid_call_with_null(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: call void @store_as0(ptr nofree noundef writeonly align 4294967296 null) #[[ATTR4:[0-9]+]]
+; CHECK-NEXT: ret void
+;
+ call void @store_as0(ptr null)
+ ret void
+}
+
+define void @null_ptr_is_valid_call_with_undef() #0 {
+; CHECK-LABEL: define void @null_ptr_is_valid_call_with_undef(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: call void @store_as0(ptr undef) #[[ATTR4]]
+; CHECK-NEXT: ret void
+;
+ call void @store_as0(ptr undef)
+ ret void
+}
+
+define void @store_as0(ptr %0) {
+; CHECK-LABEL: define void @store_as0(
+; CHECK-SAME: ptr nocapture nofree noundef nonnull writeonly align 2 dereferenceable(2) [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT: store i16 0, ptr [[TMP0]], align 2
+; CHECK-NEXT: ret void
+;
+ store i16 0, ptr %0, align 2
+ ret void
+}
+
+define void @call_store_as1() {
+; CHECK-LABEL: define void @call_store_as1(
+; CHECK-SAME: ) #[[ATTR3:[0-9]+]] {
+; CHECK-NEXT: call void @store_as1(ptr addrspace(1) nocapture nofree noundef writeonly align 4294967296 null) #[[ATTR4]]
+; CHECK-NEXT: ret void
+;
+ call void @store_as1(ptr addrspace(1) null)
+ ret void
+}
+
+define void @store_as1(ptr addrspace(1) %arg) {
+; CHECK-LABEL: define void @store_as1(
+; CHECK-SAME: ptr addrspace(1) nocapture nofree noundef writeonly align 2 dereferenceable_or_null(2) [[ARG:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT: store i16 0, ptr addrspace(1) [[ARG]], align 2
+; CHECK-NEXT: ret void
+;
+ store i16 0, ptr addrspace(1) %arg, align 2
+ ret void
+}
+
+attributes #0 = { null_pointer_is_valid }
+;.
+; CHECK: attributes #[[ATTR0]] = { mustprogress nofree norecurse nosync nounwind null_pointer_is_valid willreturn memory(write) }
+; CHECK: attributes #[[ATTR1]] = { mustprogress nofree norecurse nosync nounwind null_pointer_is_valid willreturn memory(none) }
+; CHECK: attributes #[[ATTR2]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) }
+; CHECK: attributes #[[ATTR3]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR4]] = { nofree nosync nounwind willreturn memory(write) }
+;.
diff --git a/llvm/test/Transforms/Attributor/nofpclass.ll b/llvm/test/Transforms/Attributor/nofpclass.ll
index 5945fc5e7b0b..b38f9bae50cc 100644
--- a/llvm/test/Transforms/Attributor/nofpclass.ll
+++ b/llvm/test/Transforms/Attributor/nofpclass.ll
@@ -114,7 +114,7 @@ define <2 x double> @returned_strange_constant_vector_elt() {
; Test a vector element that's undef
define <3 x double> @returned_undef_constant_vector_elt() {
-; CHECK-LABEL: define nofpclass(nan inf sub norm) <3 x double> @returned_undef_constant_vector_elt() {
+; CHECK-LABEL: define <3 x double> @returned_undef_constant_vector_elt() {
; CHECK-NEXT: call void @unknown()
; CHECK-NEXT: ret <3 x double> <double -0.000000e+00, double 0.000000e+00, double undef>
;
diff --git a/llvm/test/Transforms/ConstraintElimination/sext-unsigned-predicates.ll b/llvm/test/Transforms/ConstraintElimination/sext-unsigned-predicates.ll
index ac3e57768ae5..00dc48ef89c9 100644
--- a/llvm/test/Transforms/ConstraintElimination/sext-unsigned-predicates.ll
+++ b/llvm/test/Transforms/ConstraintElimination/sext-unsigned-predicates.ll
@@ -13,7 +13,8 @@ define void @uge_sext(i16 %x, i32 %y) {
; CHECK-NEXT: [[AND:%.*]] = and i1 [[C_1]], [[C_2]]
; CHECK-NEXT: br i1 [[AND]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: [[T_1:%.*]] = icmp uge i32 [[X_EXT]], [[Y]]
+; CHECK-NEXT: call void @use(i1 [[T_1]])
; CHECK-NEXT: [[C_3:%.*]] = icmp uge i16 [[X]], -10
; CHECK-NEXT: call void @use(i1 [[C_3]])
; CHECK-NEXT: [[C_4:%.*]] = icmp uge i32 [[X_EXT]], -9
@@ -65,8 +66,7 @@ define void @uge_sext_known_positive(i16 %x, i32 %y) {
; CHECK-NEXT: br i1 [[AND]], label [[BB1:%.*]], label [[BB2:%.*]]
; CHECK: bb1:
; CHECK-NEXT: call void @use(i1 true)
-; CHECK-NEXT: [[T_2:%.*]] = icmp uge i16 [[X]], 10
-; CHECK-NEXT: call void @use(i1 [[T_2]])
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: [[C_3:%.*]] = icmp uge i32 [[X_EXT]], 11
; CHECK-NEXT: call void @use(i1 [[C_3]])
; CHECK-NEXT: [[C_4:%.*]] = icmp uge i32 [[X_EXT]], 11
diff --git a/llvm/test/Transforms/ConstraintElimination/transfer-signed-facts-to-unsigned.ll b/llvm/test/Transforms/ConstraintElimination/transfer-signed-facts-to-unsigned.ll
index 2fe92628dfa3..68e48c7d2944 100644
--- a/llvm/test/Transforms/ConstraintElimination/transfer-signed-facts-to-unsigned.ll
+++ b/llvm/test/Transforms/ConstraintElimination/transfer-signed-facts-to-unsigned.ll
@@ -503,11 +503,9 @@ define i32 @sge_2_gep(i32 %idx, ptr %src, i32 %idx.2) {
; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[IDX]], 2
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADD_PTR_2:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IDX_2:%.*]]
-; CHECK-NEXT: [[T_1:%.*]] = icmp ult ptr [[SRC]], [[ADD_PTR]]
; CHECK-NEXT: [[C_1:%.*]] = icmp ult ptr [[SRC]], [[ADD_PTR_2]]
-; CHECK-NEXT: [[X_1:%.*]] = xor i1 [[T_1]], [[C_1]]
-; CHECK-NEXT: [[F_1:%.*]] = icmp uge ptr [[SRC]], [[ADD_PTR]]
-; CHECK-NEXT: [[X_2:%.*]] = xor i1 [[X_1]], [[F_1]]
+; CHECK-NEXT: [[X_1:%.*]] = xor i1 true, [[C_1]]
+; CHECK-NEXT: [[X_2:%.*]] = xor i1 [[X_1]], false
; CHECK-NEXT: br i1 [[X_2]], label [[THEN:%.*]], label [[ELSE:%.*]]
; CHECK: then:
; CHECK-NEXT: ret i32 0
diff --git a/llvm/test/Transforms/Coroutines/coro-await-suspend-handle-in-ramp.ll b/llvm/test/Transforms/Coroutines/coro-await-suspend-handle-in-ramp.ll
new file mode 100644
index 000000000000..ee64ce6e4482
--- /dev/null
+++ b/llvm/test/Transforms/Coroutines/coro-await-suspend-handle-in-ramp.ll
@@ -0,0 +1,59 @@
+; Tests lowerings of different versions of coro.await.suspend
+; RUN: opt < %s -passes='module(coro-early),cgscc(coro-split),simplifycfg' -S | FileCheck %s
+
+%Awaiter = type {}
+
+define void @f() presplitcoroutine {
+entry:
+ %awaiter = alloca %Awaiter
+ %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
+ %size = call i32 @llvm.coro.size.i32()
+ %alloc = call ptr @malloc(i32 %size)
+ %hdl = call ptr @llvm.coro.begin(token %id, ptr %alloc)
+ call void @llvm.coro.await.suspend.handle(ptr %awaiter, ptr %hdl, ptr @await_suspend_wrapper_handle)
+ %suspend.init = call i8 @llvm.coro.suspend(token none, i1 false)
+ switch i8 %suspend.init, label %ret [
+ i8 0, label %step
+ i8 1, label %cleanup
+ ]
+
+; Check the calling convention for resuming function is fastcc
+; CHECK: define {{[^@]*}} @f()
+; CHECK: entry:
+; CHECK: %[[NEXT_HDL:.+]] = call ptr @await_suspend_wrapper_handle(
+; CHECK-NEXT: %[[CONT:.+]] = call ptr @llvm.coro.subfn.addr(ptr %[[NEXT_HDL]], i8 0)
+; CHECK-NEXT: call fastcc void %[[CONT]](ptr %[[NEXT_HDL]])
+step:
+ br label %cleanup
+
+cleanup:
+ %mem = call ptr @llvm.coro.free(token %id, ptr %hdl)
+ call void @free(ptr %mem)
+ br label %ret
+
+ret:
+ call i1 @llvm.coro.end(ptr %hdl, i1 0, token none)
+ ret void
+}
+
+; check that we were haven't accidentally went out of @f body
+; CHECK-LABEL: @f.resume(
+; CHECK-LABEL: @f.destroy(
+; CHECK-LABEL: @f.cleanup(
+
+declare ptr @await_suspend_wrapper_handle(ptr, ptr)
+
+declare ptr @llvm.coro.free(token, ptr)
+declare i32 @llvm.coro.size.i32()
+declare i8 @llvm.coro.suspend(token, i1)
+declare void @llvm.coro.resume(ptr)
+declare void @llvm.coro.destroy(ptr)
+
+declare token @llvm.coro.id(i32, ptr, ptr, ptr)
+declare i1 @llvm.coro.alloc(token)
+declare ptr @llvm.coro.begin(token, ptr)
+declare void @llvm.coro.await.suspend.handle(ptr, ptr, ptr)
+declare i1 @llvm.coro.end(ptr, i1, token)
+
+declare noalias ptr @malloc(i32)
+declare void @free(ptr)
diff --git a/llvm/test/Transforms/Coroutines/coro-debug-frame-variable-O1.ll b/llvm/test/Transforms/Coroutines/coro-debug-frame-variable-inlined.ll
index acd6a08d7c1b..ff070d9b02ac 100644
--- a/llvm/test/Transforms/Coroutines/coro-debug-frame-variable-O1.ll
+++ b/llvm/test/Transforms/Coroutines/coro-debug-frame-variable-inlined.ll
@@ -1,5 +1,5 @@
-; RUN: opt < %s -passes='module(coro-early),cgscc(inline,coro-split<reuse-storage>)' -S | FileCheck %s
-; RUN: opt --try-experimental-debuginfo-iterators < %s -passes='module(coro-early),cgscc(inline,coro-split<reuse-storage>)' -S | FileCheck %s
+; RUN: opt < %s -passes='module(coro-early),cgscc(inline,coro-split)' -S | FileCheck %s
+; RUN: opt --try-experimental-debuginfo-iterators < %s -passes='module(coro-early),cgscc(inline,coro-split)' -S | FileCheck %s
; Simplified version from pr#75104.
; Make sure we do not update debug location for hosited dbg.declare intrinsics when optimizing coro frame.
diff --git a/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll
new file mode 100644
index 000000000000..330c61360e20
--- /dev/null
+++ b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll
@@ -0,0 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes='cgscc(coro-split),simplifycfg,early-cse' -S | FileCheck %s
+
+declare ptr @malloc(i64)
+
+%i8.array = type { [100 x i8] }
+declare void @consume.i8.array(ptr)
+
+@testbool = external local_unnamed_addr global i8, align 1
+
+; testval does not contain an explicit lifetime end. We must assume that it may
+; live across suspension.
+define void @HasNoLifetimeEnd() presplitcoroutine {
+; CHECK-LABEL: define void @HasNoLifetimeEnd() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ID:%.*]] = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr @HasNoLifetimeEnd.resumers)
+; CHECK-NEXT: [[ALLOC:%.*]] = call ptr @malloc(i64 16)
+; CHECK-NEXT: [[VFRAME:%.*]] = call noalias nonnull ptr @llvm.coro.begin(token [[ID]], ptr [[ALLOC]])
+; CHECK-NEXT: store ptr @HasNoLifetimeEnd.resume, ptr [[VFRAME]], align 8
+; CHECK-NEXT: [[DESTROY_ADDR:%.*]] = getelementptr inbounds [[HASNOLIFETIMEEND_FRAME:%.*]], ptr [[VFRAME]], i32 0, i32 1
+; CHECK-NEXT: store ptr @HasNoLifetimeEnd.destroy, ptr [[DESTROY_ADDR]], align 8
+; CHECK-NEXT: [[INDEX_ADDR1:%.*]] = getelementptr inbounds [[HASNOLIFETIMEEND_FRAME]], ptr [[VFRAME]], i32 0, i32 2
+; CHECK-NEXT: call void @consume.i8.array(ptr [[INDEX_ADDR1]])
+; CHECK-NEXT: [[INDEX_ADDR2:%.*]] = getelementptr inbounds [[HASNOLIFETIMEEND_FRAME]], ptr [[VFRAME]], i32 0, i32 3
+; CHECK-NEXT: store i1 false, ptr [[INDEX_ADDR2]], align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %testval = alloca %i8.array
+ %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
+ %alloc = call ptr @malloc(i64 16) #3
+ %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
+
+ call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @consume.i8.array(ptr %testval)
+
+ %save = call token @llvm.coro.save(ptr null)
+ %suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
+ switch i8 %suspend, label %exit [
+ i8 0, label %await.ready
+ i8 1, label %exit
+ ]
+await.ready:
+ br label %exit
+exit:
+ call i1 @llvm.coro.end(ptr null, i1 false, token none)
+ ret void
+}
+
+define void @LifetimeEndAfterCoroEnd() presplitcoroutine {
+; CHECK-LABEL: define void @LifetimeEndAfterCoroEnd() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ID:%.*]] = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr @LifetimeEndAfterCoroEnd.resumers)
+; CHECK-NEXT: [[ALLOC:%.*]] = call ptr @malloc(i64 16)
+; CHECK-NEXT: [[VFRAME:%.*]] = call noalias nonnull ptr @llvm.coro.begin(token [[ID]], ptr [[ALLOC]])
+; CHECK-NEXT: store ptr @LifetimeEndAfterCoroEnd.resume, ptr [[VFRAME]], align 8
+; CHECK-NEXT: [[DESTROY_ADDR:%.*]] = getelementptr inbounds [[LIFETIMEENDAFTERCOROEND_FRAME:%.*]], ptr [[VFRAME]], i32 0, i32 1
+; CHECK-NEXT: store ptr @LifetimeEndAfterCoroEnd.destroy, ptr [[DESTROY_ADDR]], align 8
+; CHECK-NEXT: [[INDEX_ADDR1:%.*]] = getelementptr inbounds [[LIFETIMEENDAFTERCOROEND_FRAME]], ptr [[VFRAME]], i32 0, i32 2
+; CHECK-NEXT: call void @consume.i8.array(ptr [[INDEX_ADDR1]])
+; CHECK-NEXT: [[INDEX_ADDR2:%.*]] = getelementptr inbounds [[LIFETIMEENDAFTERCOROEND_FRAME]], ptr [[VFRAME]], i32 0, i32 3
+; CHECK-NEXT: store i1 false, ptr [[INDEX_ADDR2]], align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %testval = alloca %i8.array
+ %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
+ %alloc = call ptr @malloc(i64 16) #3
+ %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
+
+ call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @consume.i8.array(ptr %testval)
+
+ %save = call token @llvm.coro.save(ptr null)
+ %suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
+ switch i8 %suspend, label %exit [
+ i8 0, label %await.ready
+ i8 1, label %exit
+ ]
+await.ready:
+ br label %exit
+exit:
+ call i1 @llvm.coro.end(ptr null, i1 false, token none)
+ call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ ret void
+}
+
+define void @BranchWithoutLifetimeEnd() presplitcoroutine {
+; CHECK-LABEL: define void @BranchWithoutLifetimeEnd() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ID:%.*]] = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr @BranchWithoutLifetimeEnd.resumers)
+; CHECK-NEXT: [[ALLOC:%.*]] = call ptr @malloc(i64 16)
+; CHECK-NEXT: [[VFRAME:%.*]] = call noalias nonnull ptr @llvm.coro.begin(token [[ID]], ptr [[ALLOC]])
+; CHECK-NEXT: store ptr @BranchWithoutLifetimeEnd.resume, ptr [[VFRAME]], align 8
+; CHECK-NEXT: [[DESTROY_ADDR:%.*]] = getelementptr inbounds [[BRANCHWITHOUTLIFETIMEEND_FRAME:%.*]], ptr [[VFRAME]], i32 0, i32 1
+; CHECK-NEXT: store ptr @BranchWithoutLifetimeEnd.destroy, ptr [[DESTROY_ADDR]], align 8
+; CHECK-NEXT: [[TESTVAL:%.*]] = getelementptr inbounds [[BRANCHWITHOUTLIFETIMEEND_FRAME]], ptr [[VFRAME]], i32 0, i32 2
+; CHECK-NEXT: call void @consume.i8.array(ptr [[TESTVAL]])
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr @testbool, align 1
+; CHECK-NEXT: [[INDEX_ADDR1:%.*]] = getelementptr inbounds [[BRANCHWITHOUTLIFETIMEEND_FRAME]], ptr [[VFRAME]], i32 0, i32 3
+; CHECK-NEXT: store i1 false, ptr [[INDEX_ADDR1]], align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %testval = alloca %i8.array
+ %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
+ %alloc = call ptr @malloc(i64 16) #3
+ %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
+
+ call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @consume.i8.array(ptr %testval)
+
+ %0 = load i8, ptr @testbool, align 1
+ %tobool = trunc nuw i8 %0 to i1
+ br i1 %tobool, label %if.then, label %if.end
+
+if.then:
+ call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ br label %if.end
+
+if.end:
+ %save = call token @llvm.coro.save(ptr null)
+ %suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
+ switch i8 %suspend, label %exit [
+ i8 0, label %await.ready
+ i8 1, label %exit
+ ]
+await.ready:
+ br label %exit
+exit:
+ call i1 @llvm.coro.end(ptr null, i1 false, token none)
+ ret void
+}
+
+
+declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
+declare ptr @llvm.coro.begin(token, ptr writeonly) #3
+declare ptr @llvm.coro.frame() #5
+declare i8 @llvm.coro.suspend(token, i1) #3
+declare i1 @llvm.coro.end(ptr, i1, token) #3
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/no-suspend.ll b/llvm/test/Transforms/Coroutines/no-suspend.ll
index 53eb98f1273a..fd8c5ac99095 100644
--- a/llvm/test/Transforms/Coroutines/no-suspend.ll
+++ b/llvm/test/Transforms/Coroutines/no-suspend.ll
@@ -325,7 +325,7 @@ body:
%save = call token @llvm.coro.save(ptr %hdl)
%subfn = call ptr @llvm.coro.subfn.addr(ptr %hdl, i8 1)
call fastcc void %subfn(ptr %hdl)
- ; memcpy separates destory from suspend, therefore cannot simplify.
+ ; memcpy separates destroy from suspend, therefore cannot simplify.
call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 1, i1 false)
%0 = call i8 @llvm.coro.suspend(token %save, i1 false)
switch i8 %0, label %suspend [i8 0, label %resume
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/mul.ll b/llvm/test/Transforms/CorrelatedValuePropagation/mul.ll
index b28107ef9d18..086043d4b7c1 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/mul.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/mul.ll
@@ -179,8 +179,7 @@ define i1 @nuw_range1(i8 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[C:%.*]] = add nuw nsw i8 [[B:%.*]], 1
; CHECK-NEXT: [[MUL:%.*]] = mul nuw i8 [[C]], 4
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[MUL]], 0
-; CHECK-NEXT: ret i1 [[CMP]]
+; CHECK-NEXT: ret i1 false
;
entry:
%c = add nuw nsw i8 %b, 1
@@ -194,8 +193,7 @@ define i1 @nuw_range2(i8 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[C:%.*]] = add nuw nsw i8 [[B:%.*]], 3
; CHECK-NEXT: [[MUL:%.*]] = mul nuw i8 [[C]], 4
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[MUL]], 2
-; CHECK-NEXT: ret i1 [[CMP]]
+; CHECK-NEXT: ret i1 false
;
entry:
%c = add nuw nsw i8 %b, 3
diff --git a/llvm/test/Transforms/DeadStoreElimination/simple.ll b/llvm/test/Transforms/DeadStoreElimination/simple.ll
index e5d3dd09fa14..ef2c4ef564b2 100644
--- a/llvm/test/Transforms/DeadStoreElimination/simple.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/simple.ll
@@ -790,3 +790,16 @@ define i32 @test48(ptr %P, ptr noalias %Q, ptr %R) {
%l = load i32, ptr %R
ret i32 %l
}
+
+define void @test49() {
+; CHECK-LABEL: @test49(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr readonly null, i8 0, i64 0, i1 false)
+; CHECK-NEXT: store ptr null, ptr null, align 8
+; CHECK-NEXT: ret void
+;
+bb:
+ call void @llvm.memset.p0.i64(ptr readonly null, i8 0, i64 0, i1 false)
+ store ptr null, ptr null, align 8
+ ret void
+}
diff --git a/llvm/test/Transforms/DivRemPairs/AMDGPU/div-rem-pairs.ll b/llvm/test/Transforms/DivRemPairs/AMDGPU/div-rem-pairs.ll
new file mode 100644
index 000000000000..d01ded9ebbfd
--- /dev/null
+++ b/llvm/test/Transforms/DivRemPairs/AMDGPU/div-rem-pairs.ll
@@ -0,0 +1,129 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=div-rem-pairs -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+define i32 @basic(ptr %p, i32 %x, i32 %y) {
+; CHECK-LABEL: define i32 @basic(
+; CHECK-SAME: ptr [[P:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[X_FROZEN:%.*]] = freeze i32 [[X]]
+; CHECK-NEXT: [[Y_FROZEN:%.*]] = freeze i32 [[Y]]
+; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[X_FROZEN]], [[Y_FROZEN]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y_FROZEN]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X_FROZEN]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %div = udiv i32 %x, %y
+ %rem = urem i32 %x, %y
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
+
+define i32 @no_freezes(ptr %p, i32 noundef %x, i32 noundef %y) {
+; CHECK-LABEL: define i32 @no_freezes(
+; CHECK-SAME: ptr [[P:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[X]], [[Y]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %div = udiv i32 %x, %y
+ %rem = urem i32 %x, %y
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
+
+define i32 @poison_does_not_freeze(ptr %p, i32 noundef %x, i32 noundef %y) {
+; CHECK-LABEL: define i32 @poison_does_not_freeze(
+; CHECK-SAME: ptr [[P:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[X2:%.*]] = shl nuw nsw i32 [[X]], 5
+; CHECK-NEXT: [[Y2:%.*]] = add nuw nsw i32 [[Y]], 1
+; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[X2]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y2]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X2]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %x2 = shl nuw nsw i32 %x, 5
+ %y2 = add nuw nsw i32 %y, 1
+ %div = udiv i32 %x2, %y2
+ %rem = urem i32 %x2, %y2
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
+
+define i32 @poison_does_not_freeze_signed(ptr %p, i32 noundef %x, i32 noundef %y) {
+; CHECK-LABEL: define i32 @poison_does_not_freeze_signed(
+; CHECK-SAME: ptr [[P:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[X2:%.*]] = shl nuw nsw i32 [[X]], 5
+; CHECK-NEXT: [[Y2:%.*]] = add nuw nsw i32 [[Y]], 1
+; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[X2]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y2]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X2]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %x2 = shl nuw nsw i32 %x, 5
+ %y2 = add nuw nsw i32 %y, 1
+ %div = sdiv i32 %x2, %y2
+ %rem = srem i32 %x2, %y2
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
+
+define <4 x i8> @poison_does_not_freeze_vector(ptr %p, <4 x i8> noundef %x, <4 x i8> noundef %y) {
+; CHECK-LABEL: define <4 x i8> @poison_does_not_freeze_vector(
+; CHECK-SAME: ptr [[P:%.*]], <4 x i8> noundef [[X:%.*]], <4 x i8> noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[X2:%.*]] = shl nuw nsw <4 x i8> [[X]], <i8 5, i8 5, i8 5, i8 5>
+; CHECK-NEXT: [[Y2:%.*]] = add nuw nsw <4 x i8> [[Y]], <i8 1, i8 1, i8 1, i8 1>
+; CHECK-NEXT: [[DIV:%.*]] = udiv <4 x i8> [[X2]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul <4 x i8> [[DIV]], [[Y2]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub <4 x i8> [[X2]], [[TMP1]]
+; CHECK-NEXT: store <4 x i8> [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret <4 x i8> [[REM_DECOMPOSED]]
+;
+ %x2 = shl nuw nsw <4 x i8> %x, <i8 5, i8 5, i8 5, i8 5>
+ %y2 = add nuw nsw <4 x i8> %y, <i8 1, i8 1, i8 1, i8 1>
+ %div = udiv <4 x i8> %x2, %y2
+ %rem = urem <4 x i8> %x2, %y2
+ store <4 x i8> %div, ptr %p, align 4
+ ret <4 x i8> %rem
+}
+
+define i32 @explicit_poison_does_not_freeze(ptr %p, i32 noundef %y) {
+; CHECK-LABEL: define i32 @explicit_poison_does_not_freeze(
+; CHECK-SAME: ptr [[P:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[X:%.*]] = add i32 poison, 1
+; CHECK-NEXT: [[Y2:%.*]] = add nuw nsw i32 [[Y]], 1
+; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[X]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y2]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %x = add i32 poison, 1
+ %y2 = add nuw nsw i32 %y, 1
+ %div = udiv i32 %x, %y2
+ %rem = urem i32 %x, %y2
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
+
+define i32 @explicit_poison_does_not_freeze_signed(ptr %p, i32 noundef %y) {
+; CHECK-LABEL: define i32 @explicit_poison_does_not_freeze_signed(
+; CHECK-SAME: ptr [[P:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[X:%.*]] = add i32 poison, 1
+; CHECK-NEXT: [[Y2:%.*]] = add nuw nsw i32 [[Y]], 1
+; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[X]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y2]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %x = add i32 poison, 1
+ %y2 = add nuw nsw i32 %y, 1
+ %div = sdiv i32 %x, %y2
+ %rem = srem i32 %x, %y2
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
diff --git a/llvm/test/Transforms/DivRemPairs/AMDGPU/lit.local.cfg b/llvm/test/Transforms/DivRemPairs/AMDGPU/lit.local.cfg
new file mode 100644
index 000000000000..7c492428aec7
--- /dev/null
+++ b/llvm/test/Transforms/DivRemPairs/AMDGPU/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "AMDGPU" in config.root.targets:
+ config.unsupported = True
diff --git a/llvm/test/Transforms/EntryExitInstrumenter/mcount-aix.ll b/llvm/test/Transforms/EntryExitInstrumenter/mcount-aix.ll
new file mode 100644
index 000000000000..82551f012d0b
--- /dev/null
+++ b/llvm/test/Transforms/EntryExitInstrumenter/mcount-aix.ll
@@ -0,0 +1,12 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes="function(ee-instrument),cgscc(inline),function(ee-instrument<post-inline>)" -S < %s | FileCheck %s
+
+target triple = "powerpc-ibm-aix7.2.0.0"
+
+define void @f1() "instrument-function-entry-inlined"="__mcount" {
+; CHECK-LABEL: define void @f1() {
+; CHECK-NEXT: call void @__mcount(ptr @[[GLOB0:[0-9]+]])
+; CHECK-NEXT: ret void
+;
+ ret void
+}
diff --git a/llvm/test/Transforms/EntryExitInstrumenter/mcount.ll b/llvm/test/Transforms/EntryExitInstrumenter/mcount.ll
index c444b060d613..bd5f4c2b51a8 100644
--- a/llvm/test/Transforms/EntryExitInstrumenter/mcount.ll
+++ b/llvm/test/Transforms/EntryExitInstrumenter/mcount.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes="function(ee-instrument),cgscc(inline),function(ee-instrument<post-inline>)" -S < %s | FileCheck %s
; Running the passes twice should not result in more instrumentation.
@@ -7,104 +8,126 @@ target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux"
define void @leaf_function() #0 {
-entry:
+; CHECK-LABEL: define void @leaf_function() {
+; CHECK-NEXT: call void @mcount()
+; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT: call void @__cyg_profile_func_enter(ptr @leaf_function, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP2:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT: call void @__cyg_profile_func_exit(ptr @leaf_function, ptr [[TMP2]])
+; CHECK-NEXT: ret void
+;
ret void
-
-; CHECK-LABEL: define void @leaf_function()
-; CHECK: entry:
-; CHECK-NEXT: call void @mcount()
-; CHECK-NEXT: %0 = call ptr @llvm.returnaddress(i32 0)
-; CHECK-NEXT: call void @__cyg_profile_func_enter(ptr @leaf_function, ptr %0)
-; CHECK-NEXT: %1 = call ptr @llvm.returnaddress(i32 0)
-; CHECK-NEXT: call void @__cyg_profile_func_exit(ptr @leaf_function, ptr %1)
-; CHECK-NEXT: ret void
}
define void @root_function() #0 {
-entry:
+; CHECK-LABEL: define void @root_function() {
+; CHECK-NEXT: call void @mcount()
+; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT: call void @__cyg_profile_func_enter(ptr @root_function, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP2:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT: call void @__cyg_profile_func_enter(ptr @leaf_function, ptr [[TMP2]])
+; CHECK-NEXT: [[TMP3:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT: call void @__cyg_profile_func_exit(ptr @leaf_function, ptr [[TMP3]])
+; CHECK-NEXT: [[TMP4:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT: call void @__cyg_profile_func_exit(ptr @root_function, ptr [[TMP4]])
+; CHECK-NEXT: ret void
+;
call void @leaf_function()
ret void
-
-; CHECK-LABEL: define void @root_function()
-; CHECK: entry:
-; CHECK-NEXT: call void @mcount()
-
-; CHECK-NEXT: %0 = call ptr @llvm.returnaddress(i32 0)
-; CHECK-NEXT: call void @__cyg_profile_func_enter(ptr @root_function, ptr %0)
-
-; Entry and exit calls, inlined from @leaf_function()
-; CHECK-NEXT: %1 = call ptr @llvm.returnaddress(i32 0)
-; CHECK-NEXT: call void @__cyg_profile_func_enter(ptr @leaf_function, ptr %1)
-; CHECK-NEXT: %2 = call ptr @llvm.returnaddress(i32 0)
-; CHECK-NEXT: call void @__cyg_profile_func_exit(ptr @leaf_function, ptr %2)
-; CHECK-NEXT: %3 = call ptr @llvm.returnaddress(i32 0)
-
-; CHECK-NEXT: call void @__cyg_profile_func_exit(ptr @root_function, ptr %3)
-; CHECK-NEXT: ret void
}
-
-
; The mcount function has many different names.
-define void @f1() #1 { entry: ret void }
-; CHECK-LABEL: define void @f1
-; CHECK: call void @.mcount
-
-define void @f2() #2 { entry: ret void }
-; CHECK-LABEL: define void @f2
-; CHECK: call void @llvm.arm.gnu.eabi.mcount
+define void @f1() #1 {
+; CHECK-LABEL: define void @f1() {
+; CHECK-NEXT: call void @.mcount()
+; CHECK-NEXT: ret void
+;
+ ret void
+}
-define void @f3() #3 { entry: ret void }
-; CHECK-LABEL: define void @f3
-; CHECK: call void @"\01_mcount"
+define void @f2() #2 {
+; CHECK-LABEL: define void @f2() {
+; CHECK-NEXT: call void @llvm.arm.gnu.eabi.mcount()
+; CHECK-NEXT: ret void
+;
+ ret void
+}
-define void @f4() #4 { entry: ret void }
-; CHECK-LABEL: define void @f4
-; CHECK: call void @"\01mcount"
+define void @f3() #3 {
+; CHECK-LABEL: define void @f3() {
+; CHECK-NEXT: call void @"\01_mcount"()
+; CHECK-NEXT: ret void
+;
+ ret void
+}
-define void @f5() #5 { entry: ret void }
-; CHECK-LABEL: define void @f5
-; CHECK: call void @__mcount
+define void @f4() #4 {
+; CHECK-LABEL: define void @f4() {
+; CHECK-NEXT: call void @"\01mcount"()
+; CHECK-NEXT: ret void
+;
+ ret void
+}
-define void @f6() #6 { entry: ret void }
-; CHECK-LABEL: define void @f6
-; CHECK: call void @_mcount
+define void @f5() #5 {
+; CHECK-LABEL: define void @f5() {
+; CHECK-NEXT: call void @__mcount()
+; CHECK-NEXT: ret void
+;
+ ret void
+}
-define void @f7() #7 { entry: ret void }
-; CHECK-LABEL: define void @f7
-; CHECK: call void @__cyg_profile_func_enter_bare
+define void @f6() #6 {
+; CHECK-LABEL: define void @f6() {
+; CHECK-NEXT: call void @_mcount()
+; CHECK-NEXT: ret void
+;
+ ret void
+}
+define void @f7() #7 {
+; CHECK-LABEL: define void @f7() {
+; CHECK-NEXT: call void @__cyg_profile_func_enter_bare()
+; CHECK-NEXT: ret void
+;
+ ret void
+}
; Treat musttail calls as terminators; inserting between the musttail call and
; ret is not allowed.
declare ptr @tailcallee()
define ptr @tailcaller() #8 {
+; CHECK-LABEL: define ptr @tailcaller() {
+; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT: call void @__cyg_profile_func_exit(ptr @tailcaller, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP2:%.*]] = musttail call ptr @tailcallee()
+; CHECK-NEXT: ret ptr [[TMP2]]
+;
%1 = musttail call ptr @tailcallee()
ret ptr %1
-; CHECK-LABEL: define ptr @tailcaller
-; CHECK: call void @__cyg_profile_func_exit
-; CHECK: musttail call ptr @tailcallee
-; CHECK: ret
}
define ptr @tailcaller2() #8 {
+; CHECK-LABEL: define ptr @tailcaller2() {
+; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT: call void @__cyg_profile_func_exit(ptr @tailcaller2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP2:%.*]] = musttail call ptr @tailcallee()
+; CHECK-NEXT: ret ptr [[TMP2]]
+;
%1 = musttail call ptr @tailcallee()
- %2 = bitcast ptr %1 to ptr
- ret ptr %2
-; CHECK-LABEL: define ptr @tailcaller2
-; CHECK: call void @__cyg_profile_func_exit
-; CHECK: musttail call ptr @tailcallee
-; CHECK: bitcast
-; CHECK: ret
+ ret ptr %1
}
;; naked functions are not instrumented, otherwise the argument registers
;; and the return address register (if present) would be clobbered.
-define void @naked() naked { entry: ret void }
-; CHECK-LABEL: define void @naked(
-; CHECK-LABEL-NEXT: entry:
-; CHECK-LABEL-NEXT: ret void
+define void @naked() naked {
+; CHECK-LABEL: define void @naked(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: ret void
+;
+ ret void
+}
; The attributes are "consumed" when the instrumentation is inserted.
; CHECK: attributes
diff --git a/llvm/test/Transforms/FunctionAttrs/nocapture.ll b/llvm/test/Transforms/FunctionAttrs/nocapture.ll
index 8d6f6a7c73f8..7df6132ac6a3 100644
--- a/llvm/test/Transforms/FunctionAttrs/nocapture.ll
+++ b/llvm/test/Transforms/FunctionAttrs/nocapture.ll
@@ -163,24 +163,24 @@ define i1 @c6(ptr %q, i8 %bit) personality ptr @__gxx_personality_v0 {
; FNATTRS-LABEL: define noundef i1 @c6
; FNATTRS-SAME: (ptr readonly [[Q:%.*]], i8 [[BIT:%.*]]) #[[ATTR5:[0-9]+]] personality ptr @__gxx_personality_v0 {
; FNATTRS-NEXT: invoke void @throw_if_bit_set(ptr [[Q]], i8 [[BIT]])
-; FNATTRS-NEXT: to label [[RET0:%.*]] unwind label [[RET1:%.*]]
+; FNATTRS-NEXT: to label [[RET0:%.*]] unwind label [[RET1:%.*]]
; FNATTRS: ret0:
; FNATTRS-NEXT: ret i1 false
; FNATTRS: ret1:
; FNATTRS-NEXT: [[EXN:%.*]] = landingpad { ptr, i32 }
-; FNATTRS-NEXT: cleanup
+; FNATTRS-NEXT: cleanup
; FNATTRS-NEXT: ret i1 true
;
; ATTRIBUTOR: Function Attrs: nosync memory(read)
; ATTRIBUTOR-LABEL: define i1 @c6
; ATTRIBUTOR-SAME: (ptr readonly [[Q:%.*]], i8 [[BIT:%.*]]) #[[ATTR4:[0-9]+]] personality ptr @__gxx_personality_v0 {
; ATTRIBUTOR-NEXT: invoke void @throw_if_bit_set(ptr [[Q]], i8 [[BIT]]) #[[ATTR4]]
-; ATTRIBUTOR-NEXT: to label [[RET0:%.*]] unwind label [[RET1:%.*]]
+; ATTRIBUTOR-NEXT: to label [[RET0:%.*]] unwind label [[RET1:%.*]]
; ATTRIBUTOR: ret0:
; ATTRIBUTOR-NEXT: ret i1 false
; ATTRIBUTOR: ret1:
; ATTRIBUTOR-NEXT: [[EXN:%.*]] = landingpad { ptr, i32 }
-; ATTRIBUTOR-NEXT: cleanup
+; ATTRIBUTOR-NEXT: cleanup
; ATTRIBUTOR-NEXT: ret i1 true
;
invoke void @throw_if_bit_set(ptr %q, i8 %bit)
diff --git a/llvm/test/Transforms/FunctionAttrs/nonnull.ll b/llvm/test/Transforms/FunctionAttrs/nonnull.ll
index ec5545b969e5..4432c4f3c541 100644
--- a/llvm/test/Transforms/FunctionAttrs/nonnull.ll
+++ b/llvm/test/Transforms/FunctionAttrs/nonnull.ll
@@ -246,7 +246,7 @@ define ptr @test10(ptr %a, i64 %n) {
; ATTRIBUTOR-LABEL: define ptr @test10(
; ATTRIBUTOR-SAME: ptr nofree readnone [[A:%.*]], i64 [[N:%.*]]) #[[ATTR3:[0-9]+]] {
; ATTRIBUTOR-NEXT: [[CMP:%.*]] = icmp ne i64 [[N]], 0
-; ATTRIBUTOR-NEXT: call void @llvm.assume(i1 [[CMP]]) #[[ATTR14:[0-9]+]]
+; ATTRIBUTOR-NEXT: call void @llvm.assume(i1 [[CMP]]) #[[ATTR13:[0-9]+]]
; ATTRIBUTOR-NEXT: [[B:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[N]]
; ATTRIBUTOR-NEXT: ret ptr [[B]]
;
@@ -338,7 +338,7 @@ define internal void @test13(ptr %a, ptr %b, ptr %c) {
; FNATTRS-NEXT: ret void
;
; ATTRIBUTOR-LABEL: define internal void @test13(
-; ATTRIBUTOR-SAME: ptr nocapture nofree readnone [[A:%.*]], ptr nocapture nofree readnone [[B:%.*]], ptr nocapture nofree readnone [[C:%.*]]) #[[ATTR4:[0-9]+]] {
+; ATTRIBUTOR-SAME: ptr nocapture nofree nonnull readnone [[A:%.*]], ptr nocapture nofree readnone [[B:%.*]], ptr nocapture nofree readnone [[C:%.*]]) #[[ATTR0]] {
; ATTRIBUTOR-NEXT: ret void
;
ret void
@@ -382,7 +382,7 @@ define internal ptr @f1(ptr %arg) {
; FNATTRS-NEXT: ret ptr [[TMP10]]
;
; ATTRIBUTOR-LABEL: define internal ptr @f1(
-; ATTRIBUTOR-SAME: ptr nofree readonly [[ARG:%.*]]) #[[ATTR5:[0-9]+]] {
+; ATTRIBUTOR-SAME: ptr nofree readonly [[ARG:%.*]]) #[[ATTR4:[0-9]+]] {
; ATTRIBUTOR-NEXT: bb:
; ATTRIBUTOR-NEXT: [[TMP:%.*]] = icmp eq ptr [[ARG]], null
; ATTRIBUTOR-NEXT: br i1 [[TMP]], label [[BB9:%.*]], label [[BB1:%.*]]
@@ -392,11 +392,11 @@ define internal ptr @f1(ptr %arg) {
; ATTRIBUTOR-NEXT: br i1 [[TMP3]], label [[BB6:%.*]], label [[BB4:%.*]]
; ATTRIBUTOR: bb4:
; ATTRIBUTOR-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 1
-; ATTRIBUTOR-NEXT: [[TMP5B:%.*]] = tail call ptr @f3(ptr readonly [[TMP5]]) #[[ATTR15:[0-9]+]]
+; ATTRIBUTOR-NEXT: [[TMP5B:%.*]] = tail call ptr @f3(ptr nofree nonnull readonly [[TMP5]]) #[[ATTR14:[0-9]+]]
; ATTRIBUTOR-NEXT: [[TMP5C:%.*]] = getelementptr inbounds i32, ptr [[TMP5B]], i64 -1
; ATTRIBUTOR-NEXT: br label [[BB9]]
; ATTRIBUTOR: bb6:
-; ATTRIBUTOR-NEXT: [[TMP7:%.*]] = tail call ptr @f2(ptr readonly [[ARG]]) #[[ATTR15]]
+; ATTRIBUTOR-NEXT: [[TMP7:%.*]] = tail call ptr @f2(ptr nofree nonnull readonly [[ARG]]) #[[ATTR14]]
; ATTRIBUTOR-NEXT: ret ptr [[TMP7]]
; ATTRIBUTOR: bb9:
; ATTRIBUTOR-NEXT: [[TMP10:%.*]] = phi ptr [ [[TMP5C]], [[BB4]] ], [ inttoptr (i64 4 to ptr), [[BB:%.*]] ]
@@ -436,9 +436,9 @@ define internal ptr @f2(ptr %arg) {
; FNATTRS-NEXT: ret ptr [[TMP]]
;
; ATTRIBUTOR-LABEL: define internal ptr @f2(
-; ATTRIBUTOR-SAME: ptr readonly [[ARG:%.*]]) #[[ATTR5]] {
+; ATTRIBUTOR-SAME: ptr nofree nonnull readonly [[ARG:%.*]]) #[[ATTR4]] {
; ATTRIBUTOR-NEXT: bb:
-; ATTRIBUTOR-NEXT: [[TMP:%.*]] = tail call ptr @f1(ptr readonly [[ARG]]) #[[ATTR15]]
+; ATTRIBUTOR-NEXT: [[TMP:%.*]] = tail call ptr @f1(ptr nofree nonnull readonly [[ARG]]) #[[ATTR14]]
; ATTRIBUTOR-NEXT: ret ptr [[TMP]]
;
bb:
@@ -457,9 +457,9 @@ define dso_local noalias ptr @f3(ptr %arg) {
; FNATTRS-NEXT: ret ptr [[TMP]]
;
; ATTRIBUTOR-LABEL: define dso_local noalias ptr @f3(
-; ATTRIBUTOR-SAME: ptr nofree readonly [[ARG:%.*]]) #[[ATTR5]] {
+; ATTRIBUTOR-SAME: ptr nofree readonly [[ARG:%.*]]) #[[ATTR4]] {
; ATTRIBUTOR-NEXT: bb:
-; ATTRIBUTOR-NEXT: [[TMP:%.*]] = call ptr @f1(ptr nofree readonly [[ARG]]) #[[ATTR15]]
+; ATTRIBUTOR-NEXT: [[TMP:%.*]] = call ptr @f1(ptr nofree readonly [[ARG]]) #[[ATTR14]]
; ATTRIBUTOR-NEXT: ret ptr [[TMP]]
;
bb:
@@ -508,14 +508,14 @@ define void @f16(ptr %a, ptr %b, i8 %c) {
; FNATTRS-NEXT: ret void
;
; ATTRIBUTOR-LABEL: define void @f16(
-; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], ptr [[B:%.*]], i8 [[C:%.*]]) #[[ATTR7:[0-9]+]] {
+; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], ptr [[B:%.*]], i8 [[C:%.*]]) #[[ATTR6:[0-9]+]] {
; ATTRIBUTOR-NEXT: [[CMP:%.*]] = icmp eq i8 [[C]], 0
; ATTRIBUTOR-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; ATTRIBUTOR: if.then:
-; ATTRIBUTOR-NEXT: tail call void @fun2(ptr nonnull [[A]], ptr nonnull [[B]]) #[[ATTR16:[0-9]+]]
+; ATTRIBUTOR-NEXT: tail call void @fun2(ptr nonnull [[A]], ptr nonnull [[B]]) #[[ATTR15:[0-9]+]]
; ATTRIBUTOR-NEXT: ret void
; ATTRIBUTOR: if.else:
-; ATTRIBUTOR-NEXT: tail call void @fun2(ptr nonnull [[A]], ptr [[B]]) #[[ATTR16]]
+; ATTRIBUTOR-NEXT: tail call void @fun2(ptr nonnull [[A]], ptr [[B]]) #[[ATTR15]]
; ATTRIBUTOR-NEXT: ret void
;
%cmp = icmp eq i8 %c, 0
@@ -550,17 +550,17 @@ define void @f17(ptr %a, i8 %c) {
; FNATTRS-NEXT: ret void
;
; ATTRIBUTOR-LABEL: define void @f17(
-; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], i8 [[C:%.*]]) #[[ATTR7]] {
+; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], i8 [[C:%.*]]) #[[ATTR6]] {
; ATTRIBUTOR-NEXT: [[CMP:%.*]] = icmp eq i8 [[C]], 0
; ATTRIBUTOR-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; ATTRIBUTOR: if.then:
-; ATTRIBUTOR-NEXT: tail call void @fun0() #[[ATTR16]]
+; ATTRIBUTOR-NEXT: tail call void @fun0() #[[ATTR15]]
; ATTRIBUTOR-NEXT: br label [[CONT:%.*]]
; ATTRIBUTOR: if.else:
-; ATTRIBUTOR-NEXT: tail call void @fun0() #[[ATTR16]]
+; ATTRIBUTOR-NEXT: tail call void @fun0() #[[ATTR15]]
; ATTRIBUTOR-NEXT: br label [[CONT]]
; ATTRIBUTOR: cont:
-; ATTRIBUTOR-NEXT: tail call void @fun1(ptr nonnull [[A]]) #[[ATTR16]]
+; ATTRIBUTOR-NEXT: tail call void @fun1(ptr nonnull [[A]]) #[[ATTR15]]
; ATTRIBUTOR-NEXT: ret void
;
%cmp = icmp eq i8 %c, 0
@@ -611,26 +611,26 @@ define void @f18(ptr %a, ptr %b, i8 %c) {
; FNATTRS-NEXT: ret void
;
; ATTRIBUTOR-LABEL: define void @f18(
-; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], ptr [[B:%.*]], i8 [[C:%.*]]) #[[ATTR7]] {
+; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], ptr [[B:%.*]], i8 [[C:%.*]]) #[[ATTR6]] {
; ATTRIBUTOR-NEXT: [[CMP1:%.*]] = icmp eq i8 [[C]], 0
; ATTRIBUTOR-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; ATTRIBUTOR: if.then:
-; ATTRIBUTOR-NEXT: tail call void @fun0() #[[ATTR16]]
+; ATTRIBUTOR-NEXT: tail call void @fun0() #[[ATTR15]]
; ATTRIBUTOR-NEXT: br label [[CONT:%.*]]
; ATTRIBUTOR: if.else:
-; ATTRIBUTOR-NEXT: tail call void @fun0() #[[ATTR16]]
+; ATTRIBUTOR-NEXT: tail call void @fun0() #[[ATTR15]]
; ATTRIBUTOR-NEXT: br label [[CONT]]
; ATTRIBUTOR: cont:
; ATTRIBUTOR-NEXT: [[CMP2:%.*]] = icmp eq i8 [[C]], 1
; ATTRIBUTOR-NEXT: br i1 [[CMP2]], label [[CONT_THEN:%.*]], label [[CONT_ELSE:%.*]]
; ATTRIBUTOR: cont.then:
-; ATTRIBUTOR-NEXT: tail call void @fun1(ptr nonnull [[B]]) #[[ATTR16]]
+; ATTRIBUTOR-NEXT: tail call void @fun1(ptr nonnull [[B]]) #[[ATTR15]]
; ATTRIBUTOR-NEXT: br label [[CONT2:%.*]]
; ATTRIBUTOR: cont.else:
-; ATTRIBUTOR-NEXT: tail call void @fun0() #[[ATTR16]]
+; ATTRIBUTOR-NEXT: tail call void @fun0() #[[ATTR15]]
; ATTRIBUTOR-NEXT: br label [[CONT2]]
; ATTRIBUTOR: cont2:
-; ATTRIBUTOR-NEXT: tail call void @fun1(ptr nonnull [[A]]) #[[ATTR16]]
+; ATTRIBUTOR-NEXT: tail call void @fun1(ptr nonnull [[A]]) #[[ATTR15]]
; ATTRIBUTOR-NEXT: ret void
;
%cmp1 = icmp eq i8 %c, 0
@@ -674,7 +674,7 @@ define void @f19(ptr %a, ptr %b, i8 %c) {
; FNATTRS-NEXT: ret void
;
; ATTRIBUTOR-LABEL: define void @f19(
-; ATTRIBUTOR-SAME: ptr [[A:%.*]], ptr nonnull [[B:%.*]], i8 [[C:%.*]]) #[[ATTR8:[0-9]+]] {
+; ATTRIBUTOR-SAME: ptr [[A:%.*]], ptr nonnull [[B:%.*]], i8 [[C:%.*]]) #[[ATTR7:[0-9]+]] {
; ATTRIBUTOR-NEXT: br label [[LOOP_HEADER:%.*]]
; ATTRIBUTOR: loop.header:
; ATTRIBUTOR-NEXT: [[CMP2:%.*]] = icmp eq i8 [[C]], 0
@@ -883,7 +883,7 @@ define i8 @parent7(ptr %a) {
;
; ATTRIBUTOR-LABEL: define i8 @parent7(
; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]]) {
-; ATTRIBUTOR-NEXT: [[RET:%.*]] = call i8 @use1safecall(ptr nonnull [[A]]) #[[ATTR16]]
+; ATTRIBUTOR-NEXT: [[RET:%.*]] = call i8 @use1safecall(ptr nonnull [[A]]) #[[ATTR15]]
; ATTRIBUTOR-NEXT: call void @use1nonnull(ptr nonnull [[A]])
; ATTRIBUTOR-NEXT: ret i8 [[RET]]
;
@@ -915,7 +915,7 @@ define i1 @parent8(ptr %a, ptr %bogus1, ptr %b) personality ptr @esfp{
; FNATTRS-NEXT: unreachable
;
; ATTRIBUTOR-LABEL: define i1 @parent8(
-; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], ptr nocapture nofree readnone [[BOGUS1:%.*]], ptr nonnull [[B:%.*]]) #[[ATTR8]] personality ptr @esfp {
+; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], ptr nocapture nofree readnone [[BOGUS1:%.*]], ptr nonnull [[B:%.*]]) #[[ATTR7]] personality ptr @esfp {
; ATTRIBUTOR-NEXT: entry:
; ATTRIBUTOR-NEXT: invoke void @use2nonnull(ptr nonnull [[A]], ptr nonnull [[B]])
; ATTRIBUTOR-NEXT: to label [[CONT:%.*]] unwind label [[EXC:%.*]]
@@ -965,7 +965,7 @@ define ptr @gep1_no_null_opt(ptr %p) #0 {
; FNATTRS-NEXT: ret ptr [[Q]]
;
; ATTRIBUTOR-LABEL: define ptr @gep1_no_null_opt(
-; ATTRIBUTOR-SAME: ptr nofree readnone [[P:%.*]]) #[[ATTR9:[0-9]+]] {
+; ATTRIBUTOR-SAME: ptr nofree readnone [[P:%.*]]) #[[ATTR8:[0-9]+]] {
; ATTRIBUTOR-NEXT: [[Q:%.*]] = getelementptr inbounds i32, ptr [[P]], i32 1
; ATTRIBUTOR-NEXT: ret ptr [[Q]]
;
@@ -1006,8 +1006,8 @@ define internal ptr @g2() {
; FNATTRS-SAME: ) #[[ATTR0]] {
; FNATTRS-NEXT: ret ptr inttoptr (i64 4 to ptr)
;
-; ATTRIBUTOR-LABEL: define internal ptr @g2(
-; ATTRIBUTOR-SAME: ) #[[ATTR10:[0-9]+]] {
+; ATTRIBUTOR-LABEL: define internal nonnull ptr @g2(
+; ATTRIBUTOR-SAME: ) #[[ATTR0]] {
; ATTRIBUTOR-NEXT: ret ptr inttoptr (i64 4 to ptr)
;
ret ptr inttoptr (i64 4 to ptr)
@@ -1021,7 +1021,7 @@ define ptr @g1() {
;
; ATTRIBUTOR-LABEL: define ptr @g1(
; ATTRIBUTOR-SAME: ) #[[ATTR0]] {
-; ATTRIBUTOR-NEXT: [[C:%.*]] = call ptr @g2() #[[ATTR10]]
+; ATTRIBUTOR-NEXT: [[C:%.*]] = call ptr @g2() #[[ATTR16:[0-9]+]]
; ATTRIBUTOR-NEXT: ret ptr [[C]]
;
%c = call ptr @g2()
@@ -1036,8 +1036,8 @@ define internal void @called_by_weak(ptr %a) {
; FNATTRS-NEXT: ret void
;
; ATTRIBUTOR-LABEL: define internal void @called_by_weak(
-; ATTRIBUTOR-SAME: ptr nocapture readnone [[A:%.*]]) #[[ATTR11:[0-9]+]] {
-; ATTRIBUTOR-NEXT: call void @use_i32_ptr(ptr [[A]])
+; ATTRIBUTOR-SAME: ptr nocapture nonnull readnone [[A:%.*]]) #[[ATTR10:[0-9]+]] {
+; ATTRIBUTOR-NEXT: call void @use_i32_ptr(ptr nonnull [[A]]) #[[ATTR17:[0-9]+]]
; ATTRIBUTOR-NEXT: ret void
;
call void @use_i32_ptr(ptr %a)
@@ -1068,8 +1068,8 @@ define internal void @control(ptr dereferenceable(4) %a) {
; FNATTRS-NEXT: ret void
;
; ATTRIBUTOR-LABEL: define internal void @control(
-; ATTRIBUTOR-SAME: ptr nocapture readnone dereferenceable(4) [[A:%.*]]) #[[ATTR11]] {
-; ATTRIBUTOR-NEXT: call void @use_i32_ptr(ptr [[A]])
+; ATTRIBUTOR-SAME: ptr nocapture nonnull readnone dereferenceable(4) [[A:%.*]]) #[[ATTR10]] {
+; ATTRIBUTOR-NEXT: call void @use_i32_ptr(ptr [[A]]) #[[ATTR17]]
; ATTRIBUTOR-NEXT: ret void
;
call void @use_i32_ptr(ptr %a)
@@ -1083,7 +1083,7 @@ define internal void @naked(ptr dereferenceable(4) %a) naked {
; FNATTRS-NEXT: ret void
;
; ATTRIBUTOR-LABEL: define internal void @naked(
-; ATTRIBUTOR-SAME: ptr dereferenceable(4) [[A:%.*]]) #[[ATTR12:[0-9]+]] {
+; ATTRIBUTOR-SAME: ptr nonnull dereferenceable(4) [[A:%.*]]) #[[ATTR11:[0-9]+]] {
; ATTRIBUTOR-NEXT: call void @use_i32_ptr(ptr [[A]])
; ATTRIBUTOR-NEXT: ret void
;
@@ -1098,7 +1098,7 @@ define internal void @optnone(ptr dereferenceable(4) %a) optnone noinline {
; FNATTRS-NEXT: ret void
;
; ATTRIBUTOR-LABEL: define internal void @optnone(
-; ATTRIBUTOR-SAME: ptr dereferenceable(4) [[A:%.*]]) #[[ATTR13:[0-9]+]] {
+; ATTRIBUTOR-SAME: ptr nonnull dereferenceable(4) [[A:%.*]]) #[[ATTR12:[0-9]+]] {
; ATTRIBUTOR-NEXT: call void @use_i32_ptr(ptr [[A]])
; ATTRIBUTOR-NEXT: ret void
;
@@ -1135,35 +1135,20 @@ define void @make_live(ptr nonnull dereferenceable(8) %a) {
declare void @h(ptr) willreturn nounwind
declare i32 @g(ptr) willreturn nounwind
define i32 @nonnull_exec_ctx_1(ptr %a, i32 %b) {
-; FNATTRS-LABEL: define i32 @nonnull_exec_ctx_1(
-; FNATTRS-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR7]] {
-; FNATTRS-NEXT: en:
-; FNATTRS-NEXT: [[TMP3:%.*]] = icmp eq i32 [[B]], 0
-; FNATTRS-NEXT: br i1 [[TMP3]], label [[EX:%.*]], label [[HD:%.*]]
-; FNATTRS: ex:
-; FNATTRS-NEXT: [[TMP5:%.*]] = tail call i32 @g(ptr nonnull [[A]])
-; FNATTRS-NEXT: ret i32 [[TMP5]]
-; FNATTRS: hd:
-; FNATTRS-NEXT: [[TMP7:%.*]] = phi i32 [ [[TMP8:%.*]], [[HD]] ], [ 0, [[EN:%.*]] ]
-; FNATTRS-NEXT: tail call void @h(ptr [[A]])
-; FNATTRS-NEXT: [[TMP8]] = add nuw i32 [[TMP7]], 1
-; FNATTRS-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], [[B]]
-; FNATTRS-NEXT: br i1 [[TMP9]], label [[EX]], label [[HD]]
-;
-; ATTRIBUTOR-LABEL: define i32 @nonnull_exec_ctx_1(
-; ATTRIBUTOR-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR8]] {
-; ATTRIBUTOR-NEXT: en:
-; ATTRIBUTOR-NEXT: [[TMP3:%.*]] = icmp eq i32 [[B]], 0
-; ATTRIBUTOR-NEXT: br i1 [[TMP3]], label [[EX:%.*]], label [[HD:%.*]]
-; ATTRIBUTOR: ex:
-; ATTRIBUTOR-NEXT: [[TMP5:%.*]] = tail call i32 @g(ptr nonnull [[A]])
-; ATTRIBUTOR-NEXT: ret i32 [[TMP5]]
-; ATTRIBUTOR: hd:
-; ATTRIBUTOR-NEXT: [[TMP7:%.*]] = phi i32 [ [[TMP8:%.*]], [[HD]] ], [ 0, [[EN:%.*]] ]
-; ATTRIBUTOR-NEXT: tail call void @h(ptr [[A]])
-; ATTRIBUTOR-NEXT: [[TMP8]] = add nuw i32 [[TMP7]], 1
-; ATTRIBUTOR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], [[B]]
-; ATTRIBUTOR-NEXT: br i1 [[TMP9]], label [[EX]], label [[HD]]
+; COMMON-LABEL: define i32 @nonnull_exec_ctx_1(
+; COMMON-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR7:[0-9]+]] {
+; COMMON-NEXT: en:
+; COMMON-NEXT: [[TMP3:%.*]] = icmp eq i32 [[B]], 0
+; COMMON-NEXT: br i1 [[TMP3]], label [[EX:%.*]], label [[HD:%.*]]
+; COMMON: ex:
+; COMMON-NEXT: [[TMP5:%.*]] = tail call i32 @g(ptr nonnull [[A]])
+; COMMON-NEXT: ret i32 [[TMP5]]
+; COMMON: hd:
+; COMMON-NEXT: [[TMP7:%.*]] = phi i32 [ [[TMP8:%.*]], [[HD]] ], [ 0, [[EN:%.*]] ]
+; COMMON-NEXT: tail call void @h(ptr [[A]])
+; COMMON-NEXT: [[TMP8]] = add nuw i32 [[TMP7]], 1
+; COMMON-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], [[B]]
+; COMMON-NEXT: br i1 [[TMP9]], label [[EX]], label [[HD]]
;
en:
%tmp3 = icmp eq i32 %b, 0
@@ -1182,39 +1167,22 @@ hd:
}
define i32 @nonnull_exec_ctx_1b(ptr %a, i32 %b) {
-; FNATTRS-LABEL: define i32 @nonnull_exec_ctx_1b(
-; FNATTRS-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR7]] {
-; FNATTRS-NEXT: en:
-; FNATTRS-NEXT: [[TMP3:%.*]] = icmp eq i32 [[B]], 0
-; FNATTRS-NEXT: br i1 [[TMP3]], label [[EX:%.*]], label [[HD:%.*]]
-; FNATTRS: ex:
-; FNATTRS-NEXT: [[TMP5:%.*]] = tail call i32 @g(ptr nonnull [[A]])
-; FNATTRS-NEXT: ret i32 [[TMP5]]
-; FNATTRS: hd:
-; FNATTRS-NEXT: [[TMP7:%.*]] = phi i32 [ [[TMP8:%.*]], [[HD2:%.*]] ], [ 0, [[EN:%.*]] ]
-; FNATTRS-NEXT: tail call void @h(ptr [[A]])
-; FNATTRS-NEXT: br label [[HD2]]
-; FNATTRS: hd2:
-; FNATTRS-NEXT: [[TMP8]] = add nuw i32 [[TMP7]], 1
-; FNATTRS-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], [[B]]
-; FNATTRS-NEXT: br i1 [[TMP9]], label [[EX]], label [[HD]]
-;
-; ATTRIBUTOR-LABEL: define i32 @nonnull_exec_ctx_1b(
-; ATTRIBUTOR-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR8]] {
-; ATTRIBUTOR-NEXT: en:
-; ATTRIBUTOR-NEXT: [[TMP3:%.*]] = icmp eq i32 [[B]], 0
-; ATTRIBUTOR-NEXT: br i1 [[TMP3]], label [[EX:%.*]], label [[HD:%.*]]
-; ATTRIBUTOR: ex:
-; ATTRIBUTOR-NEXT: [[TMP5:%.*]] = tail call i32 @g(ptr nonnull [[A]])
-; ATTRIBUTOR-NEXT: ret i32 [[TMP5]]
-; ATTRIBUTOR: hd:
-; ATTRIBUTOR-NEXT: [[TMP7:%.*]] = phi i32 [ [[TMP8:%.*]], [[HD2:%.*]] ], [ 0, [[EN:%.*]] ]
-; ATTRIBUTOR-NEXT: tail call void @h(ptr [[A]])
-; ATTRIBUTOR-NEXT: br label [[HD2]]
-; ATTRIBUTOR: hd2:
-; ATTRIBUTOR-NEXT: [[TMP8]] = add nuw i32 [[TMP7]], 1
-; ATTRIBUTOR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], [[B]]
-; ATTRIBUTOR-NEXT: br i1 [[TMP9]], label [[EX]], label [[HD]]
+; COMMON-LABEL: define i32 @nonnull_exec_ctx_1b(
+; COMMON-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR7]] {
+; COMMON-NEXT: en:
+; COMMON-NEXT: [[TMP3:%.*]] = icmp eq i32 [[B]], 0
+; COMMON-NEXT: br i1 [[TMP3]], label [[EX:%.*]], label [[HD:%.*]]
+; COMMON: ex:
+; COMMON-NEXT: [[TMP5:%.*]] = tail call i32 @g(ptr nonnull [[A]])
+; COMMON-NEXT: ret i32 [[TMP5]]
+; COMMON: hd:
+; COMMON-NEXT: [[TMP7:%.*]] = phi i32 [ [[TMP8:%.*]], [[HD2:%.*]] ], [ 0, [[EN:%.*]] ]
+; COMMON-NEXT: tail call void @h(ptr [[A]])
+; COMMON-NEXT: br label [[HD2]]
+; COMMON: hd2:
+; COMMON-NEXT: [[TMP8]] = add nuw i32 [[TMP7]], 1
+; COMMON-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], [[B]]
+; COMMON-NEXT: br i1 [[TMP9]], label [[EX]], label [[HD]]
;
en:
%tmp3 = icmp eq i32 %b, 0
@@ -1252,7 +1220,7 @@ define i32 @nonnull_exec_ctx_2(ptr %a, i32 %b) willreturn nounwind {
; FNATTRS-NEXT: br i1 [[TMP9]], label [[EX]], label [[HD]]
;
; ATTRIBUTOR-LABEL: define i32 @nonnull_exec_ctx_2(
-; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], i32 [[B:%.*]]) #[[ATTR7]] {
+; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], i32 [[B:%.*]]) #[[ATTR6]] {
; ATTRIBUTOR-NEXT: en:
; ATTRIBUTOR-NEXT: [[TMP3:%.*]] = icmp eq i32 [[B]], 0
; ATTRIBUTOR-NEXT: br i1 [[TMP3]], label [[EX:%.*]], label [[HD:%.*]]
@@ -1301,7 +1269,7 @@ define i32 @nonnull_exec_ctx_2b(ptr %a, i32 %b) willreturn nounwind {
; FNATTRS-NEXT: br i1 [[TMP9]], label [[EX]], label [[HD]]
;
; ATTRIBUTOR-LABEL: define i32 @nonnull_exec_ctx_2b(
-; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], i32 [[B:%.*]]) #[[ATTR7]] {
+; ATTRIBUTOR-SAME: ptr nonnull [[A:%.*]], i32 [[B:%.*]]) #[[ATTR6]] {
; ATTRIBUTOR-NEXT: en:
; ATTRIBUTOR-NEXT: [[TMP3:%.*]] = icmp eq i32 [[B]], 0
; ATTRIBUTOR-NEXT: br i1 [[TMP3]], label [[EX:%.*]], label [[HD:%.*]]
diff --git a/llvm/test/Transforms/FunctionAttrs/norecurse.ll b/llvm/test/Transforms/FunctionAttrs/norecurse.ll
index 7924428fb498..a902974fed28 100644
--- a/llvm/test/Transforms/FunctionAttrs/norecurse.ll
+++ b/llvm/test/Transforms/FunctionAttrs/norecurse.ll
@@ -4,10 +4,15 @@
define i32 @leaf() {
-; COMMON: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
-; COMMON-LABEL: define {{[^@]+}}@leaf
-; COMMON-SAME: () #[[ATTR0:[0-9]+]] {
-; COMMON-NEXT: ret i32 1
+; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
+; FNATTRS-LABEL: define {{[^@]+}}@leaf
+; FNATTRS-SAME: () #[[ATTR0:[0-9]+]] {
+; FNATTRS-NEXT: ret i32 1
+;
+; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
+; ATTRIBUTOR-LABEL: define {{[^@]+}}@leaf
+; ATTRIBUTOR-SAME: () #[[ATTR0:[0-9]+]] {
+; ATTRIBUTOR-NEXT: ret i32 1
;
ret i32 1
}
@@ -108,9 +113,9 @@ define internal i32 @called_by_norecurse() {
; FNATTRS-NEXT: [[A:%.*]] = call i32 @k()
; FNATTRS-NEXT: ret i32 [[A]]
;
-; ATTRIBUTOR: Function Attrs: nosync memory(none)
+; ATTRIBUTOR: Function Attrs: norecurse nosync memory(none)
; ATTRIBUTOR-LABEL: define {{[^@]+}}@called_by_norecurse
-; ATTRIBUTOR-SAME: () #[[ATTR2]] {
+; ATTRIBUTOR-SAME: () #[[ATTR6:[0-9]+]] {
; ATTRIBUTOR-NEXT: [[A:%.*]] = call i32 @k() #[[ATTR7]]
; ATTRIBUTOR-NEXT: ret i32 [[A]]
;
@@ -127,7 +132,7 @@ define void @m() norecurse {
;
; ATTRIBUTOR: Function Attrs: norecurse nosync memory(none)
; ATTRIBUTOR-LABEL: define {{[^@]+}}@m
-; ATTRIBUTOR-SAME: () #[[ATTR6:[0-9]+]] {
+; ATTRIBUTOR-SAME: () #[[ATTR6]] {
; ATTRIBUTOR-NEXT: [[A:%.*]] = call i32 @called_by_norecurse() #[[ATTR2]]
; ATTRIBUTOR-NEXT: ret void
;
@@ -142,9 +147,9 @@ define internal i32 @called_by_norecurse_indirectly() {
; FNATTRS-NEXT: [[A:%.*]] = call i32 @k()
; FNATTRS-NEXT: ret i32 [[A]]
;
-; ATTRIBUTOR: Function Attrs: nosync memory(none)
+; ATTRIBUTOR: Function Attrs: norecurse nosync memory(none)
; ATTRIBUTOR-LABEL: define {{[^@]+}}@called_by_norecurse_indirectly
-; ATTRIBUTOR-SAME: () #[[ATTR2]] {
+; ATTRIBUTOR-SAME: () #[[ATTR6]] {
; ATTRIBUTOR-NEXT: [[A:%.*]] = call i32 @k() #[[ATTR7]]
; ATTRIBUTOR-NEXT: ret i32 [[A]]
;
@@ -159,9 +164,9 @@ define internal void @o() {
; FNATTRS-NEXT: [[A:%.*]] = call i32 @called_by_norecurse_indirectly()
; FNATTRS-NEXT: ret void
;
-; ATTRIBUTOR: Function Attrs: nosync memory(none)
+; ATTRIBUTOR: Function Attrs: norecurse nosync memory(none)
; ATTRIBUTOR-LABEL: define {{[^@]+}}@o
-; ATTRIBUTOR-SAME: () #[[ATTR2]] {
+; ATTRIBUTOR-SAME: () #[[ATTR6]] {
; ATTRIBUTOR-NEXT: [[A:%.*]] = call i32 @called_by_norecurse_indirectly() #[[ATTR2]]
; ATTRIBUTOR-NEXT: ret void
;
@@ -213,7 +218,7 @@ define internal void @q() {
; ATTRIBUTOR: Function Attrs: norecurse nosync memory(none)
; ATTRIBUTOR-LABEL: define {{[^@]+}}@q
; ATTRIBUTOR-SAME: () #[[ATTR6]] {
-; ATTRIBUTOR-NEXT: [[A:%.*]] = call i32 @escapes_as_parameter(ptr nonnull @escapes_as_parameter) #[[ATTR2]]
+; ATTRIBUTOR-NEXT: [[A:%.*]] = call i32 @escapes_as_parameter(ptr nocapture nofree nonnull readnone @escapes_as_parameter) #[[ATTR2]]
; ATTRIBUTOR-NEXT: ret void
;
%a = call i32 @escapes_as_parameter(ptr @escapes_as_parameter)
@@ -255,3 +260,5 @@ define void @r() norecurse {
; ATTRIBUTOR: attributes #[[ATTR7]] = { nosync }
; ATTRIBUTOR: attributes #[[ATTR8]] = { nofree willreturn }
;.
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; COMMON: {{.*}}
diff --git a/llvm/test/Transforms/FunctionAttrs/read-write-scc.ll b/llvm/test/Transforms/FunctionAttrs/read-write-scc.ll
index 3640eb59b884..be61990fd627 100644
--- a/llvm/test/Transforms/FunctionAttrs/read-write-scc.ll
+++ b/llvm/test/Transforms/FunctionAttrs/read-write-scc.ll
@@ -4,7 +4,7 @@
@i = global i32 0
define void @foo() {
-; CHECK: Function Attrs: nofree nosync nounwind
+; CHECK: Function Attrs: nofree nosync nounwind memory(readwrite, argmem: none, inaccessiblemem: none)
; CHECK-LABEL: define {{[^@]+}}@foo
; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: store i32 1, ptr @i, align 4
@@ -17,7 +17,7 @@ define void @foo() {
}
define void @bar() {
-; CHECK: Function Attrs: nofree nosync nounwind
+; CHECK: Function Attrs: nofree nosync nounwind memory(readwrite, argmem: none, inaccessiblemem: none)
; CHECK-LABEL: define {{[^@]+}}@bar
; CHECK-SAME: () #[[ATTR0]] {
; CHECK-NEXT: [[I:%.*]] = load i32, ptr @i, align 4
diff --git a/llvm/test/Transforms/FunctionAttrs/willreturn.ll b/llvm/test/Transforms/FunctionAttrs/willreturn.ll
index bf3f4adf7eaa..70926345ce27 100644
--- a/llvm/test/Transforms/FunctionAttrs/willreturn.ll
+++ b/llvm/test/Transforms/FunctionAttrs/willreturn.ll
@@ -102,23 +102,23 @@ define i64 @mustprogress_mayunwind() mustprogress personality ptr @__gxx_persona
; FNATTRS: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none)
; FNATTRS-LABEL: @mustprogress_mayunwind(
; FNATTRS-NEXT: [[A:%.*]] = invoke i64 @fn_noread()
-; FNATTRS-NEXT: to label [[A:%.*]] unwind label [[B:%.*]]
+; FNATTRS-NEXT: to label [[A:%.*]] unwind label [[B:%.*]]
; FNATTRS: A:
; FNATTRS-NEXT: ret i64 10
; FNATTRS: B:
; FNATTRS-NEXT: [[VAL:%.*]] = landingpad { ptr, i32 }
-; FNATTRS-NEXT: catch ptr null
+; FNATTRS-NEXT: catch ptr null
; FNATTRS-NEXT: ret i64 0
;
; ATTRIBUTOR: Function Attrs: mustprogress nosync nounwind willreturn memory(none)
; ATTRIBUTOR-LABEL: @mustprogress_mayunwind(
-; ATTRIBUTOR-NEXT: [[A:%.*]] = invoke i64 @fn_noread()
-; ATTRIBUTOR-NEXT: to label [[A:%.*]] unwind label [[B:%.*]]
+; ATTRIBUTOR-NEXT: [[A:%.*]] = invoke i64 @fn_noread() #[[ATTR13:[0-9]+]]
+; ATTRIBUTOR-NEXT: to label [[A:%.*]] unwind label [[B:%.*]]
; ATTRIBUTOR: A:
; ATTRIBUTOR-NEXT: ret i64 10
; ATTRIBUTOR: B:
; ATTRIBUTOR-NEXT: [[VAL:%.*]] = landingpad { ptr, i32 }
-; ATTRIBUTOR-NEXT: catch ptr null
+; ATTRIBUTOR-NEXT: catch ptr null
; ATTRIBUTOR-NEXT: ret i64 0
;
%a = invoke i64 @fn_noread()
diff --git a/llvm/test/Transforms/FunctionImport/funcimport.ll b/llvm/test/Transforms/FunctionImport/funcimport.ll
index a0968a67f5ce..635750b33fff 100644
--- a/llvm/test/Transforms/FunctionImport/funcimport.ll
+++ b/llvm/test/Transforms/FunctionImport/funcimport.ll
@@ -166,7 +166,8 @@ declare void @variadic_va_start(...)
; GUID-DAG: GUID {{.*}} is linkoncefunc
; DUMP: Module [[M1:.*]] imports from 1 module
-; DUMP-NEXT: 15 functions imported from [[M2:.*]]
-; DUMP-NEXT: 4 vars imported from [[M2]]
+; DUMP-NEXT: 15 function definitions and 0 function declarations imported from [[M2:.*]]
+; DUMP-NEXT: 4 var definitions and 0 var declarations imported from [[M2]]
+
; DUMP: Imported 15 functions for Module [[M1]]
; DUMP-NEXT: Imported 4 global variables for Module [[M1]]
diff --git a/llvm/test/Transforms/FunctionSpecialization/function-specialization-constant-expression.ll b/llvm/test/Transforms/FunctionSpecialization/function-specialization-constant-expression.ll
index c242816b91d4..16a468511631 100644
--- a/llvm/test/Transforms/FunctionSpecialization/function-specialization-constant-expression.ll
+++ b/llvm/test/Transforms/FunctionSpecialization/function-specialization-constant-expression.ll
@@ -30,13 +30,13 @@ define internal i64 @zoo(i1 %flag) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[FLAG:%.*]], label [[PLUS:%.*]], label [[MINUS:%.*]]
; CHECK: plus:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @func2.specialized.2(ptr getelementptr inbounds ([[STRUCT:%.*]], ptr @Global, i64 0, i32 3))
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @func2.specialized.2(ptr getelementptr inbounds (i8, ptr @Global, i64 8))
; CHECK-NEXT: br label [[MERGE:%.*]]
; CHECK: minus:
-; CHECK-NEXT: [[TMP1:%.*]] = call i64 @func2.specialized.1(ptr getelementptr inbounds ([[STRUCT]], ptr @Global, i64 0, i32 4))
+; CHECK-NEXT: [[TMP1:%.*]] = call i64 @func2.specialized.1(ptr getelementptr inbounds (i8, ptr @Global, i64 16))
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
-; CHECK-NEXT: [[TMP2:%.*]] = phi i64 [ ptrtoint (ptr getelementptr inbounds ([[STRUCT]], ptr @Global, i64 0, i32 3) to i64), [[PLUS]] ], [ ptrtoint (ptr getelementptr inbounds ([[STRUCT]], ptr @Global, i64 0, i32 4) to i64), [[MINUS]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = phi i64 [ ptrtoint (ptr getelementptr inbounds (i8, ptr @Global, i64 8) to i64), [[PLUS]] ], [ ptrtoint (ptr getelementptr inbounds (i8, ptr @Global, i64 16) to i64), [[MINUS]] ]
; CHECK-NEXT: ret i64 [[TMP2]]
;
entry:
diff --git a/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll b/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll
index c52f46b4f63e..6a05d5b17dde 100644
--- a/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll
+++ b/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll
@@ -8,7 +8,7 @@ target triple = "i386-apple-darwin11.0.0"
define void @Bubble() nounwind noinline {
; CHECK-LABEL: @Bubble(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP7_PRE:%.*]] = load i32, ptr getelementptr inbounds ([5001 x i32], ptr @sortlist, i32 0, i32 1), align 4
+; CHECK-NEXT: [[TMP7_PRE:%.*]] = load i32, ptr getelementptr inbounds (i8, ptr @sortlist, i32 4), align 4
; CHECK-NEXT: br label [[WHILE_BODY5:%.*]]
; CHECK: while.body5:
; CHECK-NEXT: [[TMP7:%.*]] = phi i32 [ [[TMP7_PRE]], [[ENTRY:%.*]] ], [ [[TMP71:%.*]], [[IF_END:%.*]] ]
diff --git a/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll b/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll
index 46fde7a0a48c..bd54de4acd4f 100644
--- a/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll
+++ b/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll
@@ -63,8 +63,8 @@ define void @test2(i64 %i) {
; CHECK: if.then:
; CHECK-NEXT: [[CALL:%.*]] = tail call i64 (...) @goo()
; CHECK-NEXT: store i64 [[CALL]], ptr @g2, align 8
-; CHECK-NEXT: [[T2_PRE:%.*]] = load i64, ptr getelementptr inbounds ([100 x i64], ptr @a, i64 0, i64 3), align 8
-; CHECK-NEXT: [[T3_PRE:%.*]] = load i64, ptr getelementptr inbounds ([100 x i64], ptr @b, i64 0, i64 3), align 8
+; CHECK-NEXT: [[T2_PRE:%.*]] = load i64, ptr getelementptr inbounds (i8, ptr @a, i64 24), align 8
+; CHECK-NEXT: [[T3_PRE:%.*]] = load i64, ptr getelementptr inbounds (i8, ptr @b, i64 24), align 8
; CHECK-NEXT: [[DOTPRE:%.*]] = mul nsw i64 [[T3_PRE]], [[T2_PRE]]
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
diff --git a/llvm/test/Transforms/GVNHoist/infinite-loop-indirect.ll b/llvm/test/Transforms/GVNHoist/infinite-loop-indirect.ll
index aef55af81dca..a7e6ff30d8b2 100644
--- a/llvm/test/Transforms/GVNHoist/infinite-loop-indirect.ll
+++ b/llvm/test/Transforms/GVNHoist/infinite-loop-indirect.ll
@@ -292,7 +292,7 @@ define i32 @foo2(ptr nocapture readonly %i) local_unnamed_addr personality ptr @
; CHECK-NEXT: [[BC1:%.*]] = add i32 [[TMP0]], 10
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP2]], 0
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP2]], 1
-; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #[[ATTR1]]
+; CHECK-NEXT: [[TMP5:%.*]] = tail call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi) #[[ATTR1]]
; CHECK-NEXT: [[MATCHES:%.*]] = icmp eq i32 [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[BC7:%.*]] = add i32 [[TMP0]], 10
; CHECK-NEXT: [[TMP6:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[TMP3]]) #[[ATTR1]]
@@ -340,7 +340,7 @@ lpad:
%bc1 = add i32 %0, 10
%3 = extractvalue { ptr, i32 } %2, 0
%4 = extractvalue { ptr, i32 } %2, 1
- %5 = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #2
+ %5 = tail call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi) #2
%matches = icmp eq i32 %4, %5
%bc7 = add i32 %0, 10
%6 = tail call ptr @__cxa_begin_catch(ptr %3) #2
@@ -383,7 +383,7 @@ declare void @__cxa_throw(ptr, ptr, ptr) local_unnamed_addr
declare i32 @__gxx_personality_v0(...)
; Function Attrs: nounwind readnone
-declare i32 @llvm.eh.typeid.for(ptr) #1
+declare i32 @llvm.eh.typeid.for.p0(ptr) #1
declare ptr @__cxa_begin_catch(ptr) local_unnamed_addr
diff --git a/llvm/test/Transforms/GVNSink/sink-common-code-dbg.ll b/llvm/test/Transforms/GVNSink/sink-common-code-dbg.ll
new file mode 100644
index 000000000000..6f8ca4da5f80
--- /dev/null
+++ b/llvm/test/Transforms/GVNSink/sink-common-code-dbg.ll
@@ -0,0 +1,112 @@
+; RUN: opt < %s -passes=gvn-sink -S | FileCheck %s
+
+; Test that GVNSink correctly merges the debug locations of sinked instruction
+
+define zeroext i1 @test18(i32 %flag, i32 %blksA, i32 %blksB, i32 %nblks) !dbg !5 {
+; CHECK: if.end:
+; CHECK-NEXT: [[CMP2_SINK:%.*]] = phi i1 [ %cmp2, %if.then2 ], [ %cmp, %if.then ], [ %cmp3, %if.then3 ]
+; CHECK-NEXT: [[FROMBOOL4:%.*]] = zext i1 [[CMP2_SINK]] to i8, !dbg [[DBG17:![0-9]+]]
+;
+entry:
+ switch i32 %flag, label %if.then3 [
+ i32 0, label %if.then
+ i32 1, label %if.then2
+ ], !dbg !8
+
+if.then: ; preds = %entry
+ %cmp = icmp uge i32 %blksA, %nblks, !dbg !9
+ %frombool1 = zext i1 %cmp to i8, !dbg !10
+ br label %if.end, !dbg !11
+
+if.then2: ; preds = %entry
+ %add = add i32 %nblks, %blksB, !dbg !12
+ %cmp2 = icmp ule i32 %add, %blksA, !dbg !13
+ %frombool3 = zext i1 %cmp2 to i8, !dbg !14
+ br label %if.end, !dbg !15
+
+if.then3: ; preds = %entry
+ %add2 = add i32 %nblks, %blksA, !dbg !16
+ %cmp3 = icmp ule i32 %add2, %blksA, !dbg !17
+ %frombool4 = zext i1 %cmp3 to i8, !dbg !18
+ br label %if.end, !dbg !19
+
+if.end: ; preds = %if.then3, %if.then2, %if.then
+ %obeys.0 = phi i8 [ %frombool1, %if.then ], [ %frombool3, %if.then2 ], [ %frombool4, %if.then3 ], !dbg !20
+ %tobool4 = icmp ne i8 %obeys.0, 0, !dbg !21
+ ret i1 %tobool4, !dbg !22
+}
+
+define zeroext i1 @test_pr30244(i1 zeroext %flag, i1 zeroext %flag2, i32 %blksA, i32 %blksB, i32 %nblks) !dbg !23 {
+; CHECK: if.end.gvnsink.split:
+; CHECK-NEXT: [[CMP2_SINK:%.*]] = phi i1 [ %cmp2, %if.then2 ], [ %cmp, %if.then ]
+; CHECK-NEXT: [[FROMBOOL1:%.*]] = zext i1 [[CMP2_SINK]] to i8, !dbg [[DBG29:![0-9]+]]
+;
+entry:
+ %p = alloca i8, align 1, !dbg !24
+ br i1 %flag, label %if.then, label %if.else, !dbg !25
+
+if.then: ; preds = %entry
+ %cmp = icmp uge i32 %blksA, %nblks, !dbg !26
+ %frombool1 = zext i1 %cmp to i8, !dbg !27
+ store i8 %frombool1, ptr %p, align 1, !dbg !28
+ br label %if.end, !dbg !29
+
+if.else: ; preds = %entry
+ br i1 %flag2, label %if.then2, label %if.end, !dbg !30
+
+if.then2: ; preds = %if.else
+ %add = add i32 %nblks, %blksB, !dbg !31
+ %cmp2 = icmp ule i32 %add, %blksA, !dbg !32
+ %frombool3 = zext i1 %cmp2 to i8, !dbg !33
+ store i8 %frombool3, ptr %p, align 1, !dbg !34
+ br label %if.end, !dbg !35
+
+if.end: ; preds = %if.then2, %if.else, %if.then
+ ret i1 true, !dbg !36
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!2, !3}
+!llvm.module.flags = !{!4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = !DIFile(filename: "sink-common-code.ll", directory: "/")
+!2 = !{i32 28}
+!3 = !{i32 0}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "test18", linkageName: "test18", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!6 = !DISubroutineType(types: !7)
+!7 = !{}
+!8 = !DILocation(line: 1, column: 1, scope: !5)
+!9 = !DILocation(line: 2, column: 1, scope: !5)
+!10 = !DILocation(line: 3, column: 1, scope: !5)
+!11 = !DILocation(line: 4, column: 1, scope: !5)
+!12 = !DILocation(line: 5, column: 1, scope: !5)
+!13 = !DILocation(line: 6, column: 1, scope: !5)
+!14 = !DILocation(line: 7, column: 1, scope: !5)
+!15 = !DILocation(line: 8, column: 1, scope: !5)
+!16 = !DILocation(line: 9, column: 1, scope: !5)
+!17 = !DILocation(line: 10, column: 1, scope: !5)
+!18 = !DILocation(line: 11, column: 1, scope: !5)
+!19 = !DILocation(line: 12, column: 1, scope: !5)
+!20 = !DILocation(line: 13, column: 1, scope: !5)
+!21 = !DILocation(line: 14, column: 1, scope: !5)
+!22 = !DILocation(line: 15, column: 1, scope: !5)
+!23 = distinct !DISubprogram(name: "test_pr30244", linkageName: "test_pr30244", scope: null, file: !1, line: 16, type: !6, scopeLine: 16, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!24 = !DILocation(line: 16, column: 1, scope: !23)
+!25 = !DILocation(line: 17, column: 1, scope: !23)
+!26 = !DILocation(line: 18, column: 1, scope: !23)
+!27 = !DILocation(line: 19, column: 1, scope: !23)
+!28 = !DILocation(line: 20, column: 1, scope: !23)
+!29 = !DILocation(line: 21, column: 1, scope: !23)
+!30 = !DILocation(line: 22, column: 1, scope: !23)
+!31 = !DILocation(line: 23, column: 1, scope: !23)
+!32 = !DILocation(line: 24, column: 1, scope: !23)
+!33 = !DILocation(line: 25, column: 1, scope: !23)
+!34 = !DILocation(line: 26, column: 1, scope: !23)
+!35 = !DILocation(line: 27, column: 1, scope: !23)
+!36 = !DILocation(line: 28, column: 1, scope: !23)
+;.
+; CHECK: [[DBG17]] = !DILocation(line: 0
+; CHECK: [[DBG29]] = !DILocation(line: 0
+;.
diff --git a/llvm/test/Transforms/GVNSink/sink-ignore-dbg-intrinsics.ll b/llvm/test/Transforms/GVNSink/sink-ignore-dbg-intrinsics.ll
new file mode 100644
index 000000000000..e54a17a155b8
--- /dev/null
+++ b/llvm/test/Transforms/GVNSink/sink-ignore-dbg-intrinsics.ll
@@ -0,0 +1,92 @@
+; RUN: opt < %s -passes=gvn-sink -S | FileCheck %s
+
+; Test that GVNSink correctly performs the sink optimization in the presence of debug information
+; Test that GVNSink correctly merges the debug locations of sinked instruction, eg, propagating
+; the merged debug location of `%add` and `%add1` to the sinked add instruction.
+
+; Function Attrs: noinline nounwind uwtable
+define dso_local i32 @fun(i32 noundef %a, i32 noundef %b) #0 !dbg !10 {
+; CHECK-LABEL: define dso_local i32 @fun(
+; CHECK-SAME: i32 noundef [[A:%.*]], i32 noundef [[B:%.*]])
+; CHECK: if.end:
+; CHECK: [[B_SINK:%.*]] = phi i32 [ [[B]], %if.else ], [ [[A]], %if.then ]
+; CHECK: [[ADD1:%.*]] = add nsw i32 [[B_SINK]], 1, !dbg [[DBG:![0-9]+]]
+; CHECK: [[XOR2:%.*]] = xor i32 [[ADD1]], 1, !dbg [[DBG:![0-9]+]]
+; CHECK: [[DBG]] = !DILocation(line: 0,
+;
+entry:
+ tail call void @llvm.dbg.value(metadata i32 %a, metadata !15, metadata !DIExpression()), !dbg !16
+ tail call void @llvm.dbg.value(metadata i32 %b, metadata !17, metadata !DIExpression()), !dbg !16
+ %cmp = icmp sgt i32 %b, 10, !dbg !18
+ br i1 %cmp, label %if.then, label %if.else, !dbg !20
+
+if.then: ; preds = %entry
+ %add = add nsw i32 %a, 1, !dbg !21
+ tail call void @llvm.dbg.value(metadata i32 %add, metadata !23, metadata !DIExpression()), !dbg !24
+ %xor = xor i32 %add, 1, !dbg !25
+ tail call void @llvm.dbg.value(metadata i32 %xor, metadata !26, metadata !DIExpression()), !dbg !24
+ tail call void @llvm.dbg.value(metadata i32 %xor, metadata !27, metadata !DIExpression()), !dbg !16
+ br label %if.end, !dbg !28
+
+if.else: ; preds = %entry
+ %add1 = add nsw i32 %b, 1, !dbg !29
+ tail call void @llvm.dbg.value(metadata i32 %add1, metadata !31, metadata !DIExpression()), !dbg !32
+ %xor2 = xor i32 %add1, 1, !dbg !33
+ tail call void @llvm.dbg.value(metadata i32 %xor2, metadata !34, metadata !DIExpression()), !dbg !32
+ tail call void @llvm.dbg.value(metadata i32 %xor2, metadata !27, metadata !DIExpression()), !dbg !16
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ %ret.0 = phi i32 [ %xor, %if.then ], [ %xor2, %if.else ], !dbg !35
+ tail call void @llvm.dbg.value(metadata i32 %ret.0, metadata !27, metadata !DIExpression()), !dbg !16
+ ret i32 %ret.0, !dbg !36
+}
+
+; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+
+attributes #0 = { noinline nounwind uwtable "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" }
+attributes #1 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3, !4, !5, !6, !7, !8}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 18.0.0git (https://github.com/llvm/llvm-project.git 5dfcb3e5d1d16bb4f8fce52b3c089119ed977e7f)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "main.c", directory: "/")
+!2 = !{i32 7, !"Dwarf Version", i32 5}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"wchar_size", i32 4}
+!5 = !{i32 8, !"PIC Level", i32 2}
+!6 = !{i32 7, !"PIE Level", i32 2}
+!7 = !{i32 7, !"uwtable", i32 2}
+!8 = !{i32 7, !"frame-pointer", i32 2}
+!10 = distinct !DISubprogram(name: "fun", scope: !1, file: !1, line: 1, type: !11, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !14)
+!11 = !DISubroutineType(types: !12)
+!12 = !{!13, !13, !13}
+!13 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!14 = !{}
+!15 = !DILocalVariable(name: "a", arg: 1, scope: !10, file: !1, line: 1, type: !13)
+!16 = !DILocation(line: 0, scope: !10)
+!17 = !DILocalVariable(name: "b", arg: 2, scope: !10, file: !1, line: 1, type: !13)
+!18 = !DILocation(line: 3, column: 11, scope: !19)
+!19 = distinct !DILexicalBlock(scope: !10, file: !1, line: 3, column: 9)
+!20 = !DILocation(line: 3, column: 9, scope: !10)
+!21 = !DILocation(line: 4, column: 20, scope: !22)
+!22 = distinct !DILexicalBlock(scope: !19, file: !1, line: 3, column: 17)
+!23 = !DILocalVariable(name: "a1", scope: !22, file: !1, line: 4, type: !13)
+!24 = !DILocation(line: 0, scope: !22)
+!25 = !DILocation(line: 5, column: 21, scope: !22)
+!26 = !DILocalVariable(name: "a2", scope: !22, file: !1, line: 5, type: !13)
+!27 = !DILocalVariable(name: "ret", scope: !10, file: !1, line: 2, type: !13)
+!28 = !DILocation(line: 7, column: 5, scope: !22)
+!29 = !DILocation(line: 8, column: 20, scope: !30)
+!30 = distinct !DILexicalBlock(scope: !19, file: !1, line: 7, column: 12)
+!31 = !DILocalVariable(name: "b1", scope: !30, file: !1, line: 8, type: !13)
+!32 = !DILocation(line: 0, scope: !30)
+!33 = !DILocation(line: 9, column: 21, scope: !30)
+!34 = !DILocalVariable(name: "b2", scope: !30, file: !1, line: 9, type: !13)
+!35 = !DILocation(line: 0, scope: !19)
+!36 = !DILocation(line: 12, column: 5, scope: !10)
diff --git a/llvm/test/Transforms/IndVarSimplify/AArch64/widen-loop-comp.ll b/llvm/test/Transforms/IndVarSimplify/AArch64/widen-loop-comp.ll
index c5f656c870a2..99541b398226 100644
--- a/llvm/test/Transforms/IndVarSimplify/AArch64/widen-loop-comp.ll
+++ b/llvm/test/Transforms/IndVarSimplify/AArch64/widen-loop-comp.ll
@@ -265,16 +265,17 @@ define i32 @test5(ptr %a, i32 %b) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[B:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY:%.*]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ule i64 [[INDVARS_IV]], [[TMP0]]
-; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV]], [[TMP1]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUM_0]], [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUM_0]], [[TMP2]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: for.end:
@@ -349,22 +350,23 @@ define i32 @test7(ptr %a, i32 %b) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[B:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[B]], i32 -1)
-; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[SMAX]], 2
-; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[SMAX]], 2
+; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[TMP2]] to i64
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY:%.*]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ule i64 [[INDVARS_IV]], [[TMP0]]
-; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV]], [[TMP1]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUM_0]], [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUM_0]], [[TMP3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND]], label [[FOR_END]]
+; CHECK-NEXT: [[EXITCOND2:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; CHECK-NEXT: br i1 [[EXITCOND2]], label [[FOR_COND]], label [[FOR_END]]
; CHECK: for.end:
; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM_0]], [[FOR_BODY]] ], [ [[SUM_0]], [[FOR_COND]] ]
; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
diff --git a/llvm/test/Transforms/IndVarSimplify/D108043.ll b/llvm/test/Transforms/IndVarSimplify/D108043.ll
index ab95f0bb9039..cc553e205ad3 100644
--- a/llvm/test/Transforms/IndVarSimplify/D108043.ll
+++ b/llvm/test/Transforms/IndVarSimplify/D108043.ll
@@ -9,7 +9,7 @@ define internal fastcc void @func_2() unnamed_addr {
; CHECK-NEXT: lbl_2898.preheader:
; CHECK-NEXT: br label [[LBL_2898:%.*]]
; CHECK: lbl_2898.loopexit:
-; CHECK-NEXT: store ptr getelementptr inbounds ([4 x [6 x i32]], ptr @g_2168, i64 0, i64 3, i64 1), ptr @g_1150, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @g_2168, i64 76), ptr @g_1150, align 8
; CHECK-NEXT: br label [[LBL_2898]]
; CHECK: lbl_2898:
; CHECK-NEXT: br label [[FOR_COND884:%.*]]
diff --git a/llvm/test/Transforms/IndVarSimplify/eliminate-exit-no-dl.ll b/llvm/test/Transforms/IndVarSimplify/eliminate-exit-no-dl.ll
index e605512cb23b..a3c4002626a7 100644
--- a/llvm/test/Transforms/IndVarSimplify/eliminate-exit-no-dl.ll
+++ b/llvm/test/Transforms/IndVarSimplify/eliminate-exit-no-dl.ll
@@ -14,7 +14,7 @@ define void @foo() {
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb3:
-; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr getelementptr inbounds ([0 x i8], ptr @global, i64 0, i64 1), align 1
+; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr @global, i64 1), align 1
; CHECK-NEXT: br i1 false, label [[BB7:%.*]], label [[BB11:%.*]]
; CHECK: bb7:
; CHECK-NEXT: [[TMP8:%.*]] = zext i8 [[TMP6]] to i64
diff --git a/llvm/test/Transforms/IndVarSimplify/floating-point-small-iv.ll b/llvm/test/Transforms/IndVarSimplify/floating-point-small-iv.ll
index 599e69c814d9..bebd314f7375 100644
--- a/llvm/test/Transforms/IndVarSimplify/floating-point-small-iv.ll
+++ b/llvm/test/Transforms/IndVarSimplify/floating-point-small-iv.ll
@@ -357,7 +357,7 @@ define void @uitofp_fptoui_range_with_negative() {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: store i32 100, ptr getelementptr inbounds ([16777219 x i32], ptr @array, i64 0, i64 100), align 4
+; CHECK-NEXT: store i32 100, ptr getelementptr inbounds (i8, ptr @array, i64 400), align 4
; CHECK-NEXT: br i1 false, label [[FOR_BODY]], label [[CLEANUP:%.*]]
; CHECK: cleanup:
; CHECK-NEXT: ret void
@@ -418,7 +418,7 @@ define void @uitofp_fptosi_range_with_negative () {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: store i32 100, ptr getelementptr inbounds ([16777219 x i32], ptr @array, i64 0, i64 100), align 4
+; CHECK-NEXT: store i32 100, ptr getelementptr inbounds (i8, ptr @array, i64 400), align 4
; CHECK-NEXT: br i1 false, label [[FOR_BODY]], label [[CLEANUP:%.*]]
; CHECK: cleanup:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll b/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll
index a628a5357f6d..6c15eb4af4f1 100644
--- a/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll
+++ b/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll
@@ -112,7 +112,7 @@ define void @dom_store_preinc() #0 {
; CHECK-NEXT: [[P_0:%.*]] = phi ptr [ @data, [[ENTRY:%.*]] ], [ [[TMP3:%.*]], [[LOOP]] ]
; CHECK-NEXT: store volatile i8 0, ptr [[P_0]], align 1
; CHECK-NEXT: [[TMP3]] = getelementptr inbounds i8, ptr [[P_0]], i64 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[P_0]], getelementptr ([240 x i8], ptr @data, i64 1, i64 5)
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[P_0]], getelementptr (i8, ptr @data, i64 245)
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
@@ -141,7 +141,7 @@ define void @dom_store_postinc() #0 {
; CHECK-NEXT: [[P_0:%.*]] = phi ptr [ @data, [[ENTRY:%.*]] ], [ [[TMP3:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP3]] = getelementptr inbounds i8, ptr [[P_0]], i64 1
; CHECK-NEXT: store volatile i8 0, ptr [[TMP3]], align 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[TMP3]], getelementptr ([240 x i8], ptr @data, i64 1, i64 6)
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[TMP3]], getelementptr (i8, ptr @data, i64 246)
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
@@ -170,7 +170,7 @@ define i8 @dom_load() #0 {
; CHECK-NEXT: [[P_0:%.*]] = phi ptr [ @data, [[ENTRY:%.*]] ], [ [[TMP3:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP3]] = getelementptr inbounds i8, ptr [[P_0]], i64 1
; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[TMP3]], align 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[TMP3]], getelementptr ([240 x i8], ptr @data, i64 1, i64 6)
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[TMP3]], getelementptr (i8, ptr @data, i64 246)
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: [[V_LCSSA:%.*]] = phi i8 [ [[V]], [[LOOP]] ]
diff --git a/llvm/test/Transforms/IndVarSimplify/lftr.ll b/llvm/test/Transforms/IndVarSimplify/lftr.ll
index 7f4820f093e5..e37a34019ccd 100644
--- a/llvm/test/Transforms/IndVarSimplify/lftr.ll
+++ b/llvm/test/Transforms/IndVarSimplify/lftr.ll
@@ -196,7 +196,7 @@ define void @test_zext(ptr %a) #0 {
; CHECK-NEXT: [[T2:%.*]] = load i8, ptr [[DOT0]], align 1
; CHECK-NEXT: [[T3]] = getelementptr inbounds i8, ptr [[P_0]], i64 1
; CHECK-NEXT: store i8 [[T2]], ptr [[P_0]], align 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[P_0]], getelementptr inbounds ([240 x i8], ptr @data, i64 0, i64 239)
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[P_0]], getelementptr inbounds (i8, ptr @data, i64 239)
; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/Inline/access-attributes-prop.ll b/llvm/test/Transforms/Inline/access-attributes-prop.ll
index ffd31fbe8ae1..48665e9bbafd 100644
--- a/llvm/test/Transforms/Inline/access-attributes-prop.ll
+++ b/llvm/test/Transforms/Inline/access-attributes-prop.ll
@@ -6,6 +6,7 @@
declare void @bar1(ptr %p)
declare void @bar2(ptr %p, ptr %p2)
declare void @bar3(ptr writable %p)
+declare void @bar4(ptr byval([4 x i32]) %p)
define dso_local void @foo1_rdonly(ptr readonly %p) {
; CHECK-LABEL: define {{[^@]+}}@foo1_rdonly
; CHECK-SAME: (ptr readonly [[P:%.*]]) {
@@ -186,6 +187,15 @@ define dso_local void @foo2_through_obj(ptr %p, ptr %p2) {
ret void
}
+define dso_local void @foo_byval_readonly(ptr readonly %p) {
+; CHECK-LABEL: define {{[^@]+}}@foo_byval_readonly
+; CHECK-SAME: (ptr readonly [[P:%.*]])
+; CHECK-NEXT: call void @bar4(ptr byval([4 x i32]) [[P]])
+; CHECK-NEXT: ret void
+ call void @bar4(ptr byval([4 x i32]) %p)
+ ret void
+}
+
define void @prop_param_func_decl(ptr %p) {
; CHECK-LABEL: define {{[^@]+}}@prop_param_func_decl
; CHECK-SAME: (ptr [[P:%.*]]) {
@@ -539,3 +549,11 @@ define void @prop_no_conflict_writable2(ptr %p) {
ret void
}
+define void @prop_byval_readonly(ptr %p) {
+; CHECK-LABEL: define {{[^@]+}}@prop_byval_readonly
+; CHECK-SAME: (ptr [[P:%.*]]) {
+; CHECK-NEXT: call void @bar4(ptr byval([4 x i32]) [[P]])
+; CHECK-NEXT: ret void
+ call void @foo_byval_readonly(ptr %p)
+ ret void
+}
diff --git a/llvm/test/Transforms/Inline/inline_invoke.ll b/llvm/test/Transforms/Inline/inline_invoke.ll
index 89c56447c07b..5441e2a9e63b 100644
--- a/llvm/test/Transforms/Inline/inline_invoke.ll
+++ b/llvm/test/Transforms/Inline/inline_invoke.ll
@@ -19,7 +19,7 @@ declare void @use(i32) nounwind
declare void @opaque()
-declare i32 @llvm.eh.typeid.for(ptr) nounwind
+declare i32 @llvm.eh.typeid.for.p0(ptr) nounwind
declare i32 @__gxx_personality_v0(...)
@@ -74,7 +74,7 @@ lpad: ; preds = %entry
catch ptr @_ZTIi
%eh.exc = extractvalue { ptr, i32 } %exn, 0
%eh.selector = extractvalue { ptr, i32 } %exn, 1
- %0 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) nounwind
+ %0 = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi) nounwind
%1 = icmp eq i32 %eh.selector, %0
br i1 %1, label %catch, label %eh.resume
@@ -109,7 +109,7 @@ eh.resume:
; CHECK-NEXT: phi { ptr, i32 } [
; CHECK-NEXT: extractvalue { ptr, i32 }
; CHECK-NEXT: extractvalue { ptr, i32 }
-; CHECK-NEXT: call i32 @llvm.eh.typeid.for(
+; CHECK-NEXT: call i32 @llvm.eh.typeid.for.p0(
;; Test 1 - Correctly handle phis in outer landing pads.
@@ -133,7 +133,7 @@ lpad:
catch ptr @_ZTIi
%eh.exc = extractvalue { ptr, i32 } %exn, 0
%eh.selector = extractvalue { ptr, i32 } %exn, 1
- %0 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) nounwind
+ %0 = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi) nounwind
%1 = icmp eq i32 %eh.selector, %0
br i1 %1, label %catch, label %eh.resume
@@ -212,7 +212,7 @@ eh.resume:
; CHECK-NEXT: [[EXNJ1:%.*]] = phi { ptr, i32 } [ [[EXNJ2]], %[[LPAD_JOIN2]] ], [ [[LPADVAL1]], %[[RESUME1]] ]
; CHECK-NEXT: extractvalue { ptr, i32 } [[EXNJ1]], 0
; CHECK-NEXT: [[SELJ1:%.*]] = extractvalue { ptr, i32 } [[EXNJ1]], 1
-; CHECK-NEXT: [[T:%.*]] = call i32 @llvm.eh.typeid.for(
+; CHECK-NEXT: [[T:%.*]] = call i32 @llvm.eh.typeid.for.p0(
; CHECK-NEXT: icmp eq i32 [[SELJ1]], [[T]]
; CHECK: call void @use(i32 [[XJ1]])
diff --git a/llvm/test/Transforms/InstCombine/addrspacecast.ll b/llvm/test/Transforms/InstCombine/addrspacecast.ll
index cbb88b9a09c9..35a1066a6b31 100644
--- a/llvm/test/Transforms/InstCombine/addrspacecast.ll
+++ b/llvm/test/Transforms/InstCombine/addrspacecast.ll
@@ -141,7 +141,7 @@ define i32 @memcpy_addrspacecast() nounwind {
; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[I_INC:%.*]], [[LOOP_BODY]] ]
; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_INC:%.*]], [[LOOP_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[I]] to i16
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr i8, ptr addrspace(2) getelementptr inbounds ([60 x i8], ptr addrspace(2) @const_array, i16 0, i16 4), i16 [[TMP0]]
+; CHECK-NEXT: [[PTR:%.*]] = getelementptr i8, ptr addrspace(2) getelementptr inbounds (i8, ptr addrspace(2) @const_array, i16 4), i16 [[TMP0]]
; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(2) [[PTR]], align 1
; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[LOAD]] to i32
; CHECK-NEXT: [[SUM_INC]] = add i32 [[SUM]], [[EXT]]
diff --git a/llvm/test/Transforms/InstCombine/align-addr.ll b/llvm/test/Transforms/InstCombine/align-addr.ll
index facb5df08a82..58647dc9595d 100644
--- a/llvm/test/Transforms/InstCombine/align-addr.ll
+++ b/llvm/test/Transforms/InstCombine/align-addr.ll
@@ -81,7 +81,7 @@ define <16 x i8> @test1_as1(<2 x i64> %x) {
define <16 x i8> @test1_as1_gep(<2 x i64> %x) {
; CHECK-LABEL: @test1_as1_gep(
-; CHECK-NEXT: [[TMP:%.*]] = load <16 x i8>, ptr addrspace(1) getelementptr inbounds ([8 x i32], ptr addrspace(1) @GLOBAL_as1_gep, i32 0, i32 4), align 1
+; CHECK-NEXT: [[TMP:%.*]] = load <16 x i8>, ptr addrspace(1) getelementptr inbounds (i8, ptr addrspace(1) @GLOBAL_as1_gep, i32 16), align 1
; CHECK-NEXT: ret <16 x i8> [[TMP]]
;
%tmp = load <16 x i8>, ptr addrspace(1) getelementptr ([8 x i32], ptr addrspace(1) @GLOBAL_as1_gep, i16 0, i16 4), align 1
diff --git a/llvm/test/Transforms/InstCombine/and-fcmp.ll b/llvm/test/Transforms/InstCombine/and-fcmp.ll
index f1ae2e74ac2e..c163802fcc93 100644
--- a/llvm/test/Transforms/InstCombine/and-fcmp.ll
+++ b/llvm/test/Transforms/InstCombine/and-fcmp.ll
@@ -39,7 +39,9 @@ define i1 @PR1738_logical_noundef(double %x, double noundef %y) {
define <2 x i1> @PR1738_vec_undef(<2 x double> %x, <2 x double> %y) {
; CHECK-LABEL: @PR1738_vec_undef(
-; CHECK-NEXT: [[OR:%.*]] = fcmp ord <2 x double> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp ord <2 x double> [[X:%.*]], <double 0.000000e+00, double undef>
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp ord <2 x double> [[Y:%.*]], <double undef, double 0.000000e+00>
+; CHECK-NEXT: [[OR:%.*]] = and <2 x i1> [[CMP1]], [[CMP2]]
; CHECK-NEXT: ret <2 x i1> [[OR]]
;
%cmp1 = fcmp ord <2 x double> %x, <double 0.0, double undef>
@@ -48,6 +50,17 @@ define <2 x i1> @PR1738_vec_undef(<2 x double> %x, <2 x double> %y) {
ret <2 x i1> %or
}
+define <2 x i1> @PR1738_vec_poison(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: @PR1738_vec_poison(
+; CHECK-NEXT: [[OR:%.*]] = fcmp ord <2 x double> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret <2 x i1> [[OR]]
+;
+ %cmp1 = fcmp ord <2 x double> %x, <double 0.0, double poison>
+ %cmp2 = fcmp ord <2 x double> %y, <double poison, double 0.0>
+ %or = and <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %or
+}
+
define i1 @PR41069(i1 %z, float %c, float %d) {
; CHECK-LABEL: @PR41069(
; CHECK-NEXT: [[TMP1:%.*]] = fcmp ord float [[D:%.*]], [[C:%.*]]
@@ -111,8 +124,10 @@ define i1 @PR41069_commute_logical(i1 %z, float %c, float %d) {
define <2 x i1> @PR41069_vec(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %d) {
; CHECK-LABEL: @PR41069_vec(
; CHECK-NEXT: [[ORD1:%.*]] = fcmp ord <2 x double> [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = fcmp ord <2 x double> [[D:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[R:%.*]] = and <2 x i1> [[TMP1]], [[ORD1]]
+; CHECK-NEXT: [[ORD2:%.*]] = fcmp ord <2 x double> [[C:%.*]], <double 0.000000e+00, double undef>
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i1> [[ORD1]], [[ORD2]]
+; CHECK-NEXT: [[ORD3:%.*]] = fcmp ord <2 x double> [[D:%.*]], zeroinitializer
+; CHECK-NEXT: [[R:%.*]] = and <2 x i1> [[AND]], [[ORD3]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%ord1 = fcmp ord <2 x double> %a, %b
@@ -126,8 +141,10 @@ define <2 x i1> @PR41069_vec(<2 x double> %a, <2 x double> %b, <2 x double> %c,
define <2 x i1> @PR41069_vec_commute(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %d) {
; CHECK-LABEL: @PR41069_vec_commute(
; CHECK-NEXT: [[ORD1:%.*]] = fcmp ord <2 x double> [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = fcmp ord <2 x double> [[D:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[R:%.*]] = and <2 x i1> [[TMP1]], [[ORD1]]
+; CHECK-NEXT: [[ORD2:%.*]] = fcmp ord <2 x double> [[C:%.*]], <double 0.000000e+00, double undef>
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i1> [[ORD1]], [[ORD2]]
+; CHECK-NEXT: [[ORD3:%.*]] = fcmp ord <2 x double> [[D:%.*]], zeroinitializer
+; CHECK-NEXT: [[R:%.*]] = and <2 x i1> [[ORD3]], [[AND]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%ord1 = fcmp ord <2 x double> %a, %b
diff --git a/llvm/test/Transforms/InstCombine/binop-select-cast-of-select-cond.ll b/llvm/test/Transforms/InstCombine/binop-select-cast-of-select-cond.ll
index 7dc2fe1cb88e..b0da6d80d05a 100644
--- a/llvm/test/Transforms/InstCombine/binop-select-cast-of-select-cond.ll
+++ b/llvm/test/Transforms/InstCombine/binop-select-cast-of-select-cond.ll
@@ -232,7 +232,7 @@ define i64 @pr64669(i64 %a) {
; CHECK-LABEL: define i64 @pr64669
; CHECK-SAME: (i64 [[A:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[A]], 1
-; CHECK-NEXT: [[ADD:%.*]] = select i1 icmp ne (ptr getelementptr inbounds ([72 x i32], ptr @b, i64 0, i64 25), ptr @c), i64 [[TMP1]], i64 0
+; CHECK-NEXT: [[ADD:%.*]] = select i1 icmp ne (ptr getelementptr inbounds (i8, ptr @b, i64 100), ptr @c), i64 [[TMP1]], i64 0
; CHECK-NEXT: ret i64 [[ADD]]
;
%mul = select i1 icmp ne (ptr getelementptr inbounds ([72 x i32], ptr @b, i64 0, i64 25), ptr @c), i64 %a, i64 0
diff --git a/llvm/test/Transforms/InstCombine/cast_ptr.ll b/llvm/test/Transforms/InstCombine/cast_ptr.ll
index 5c6c012064e0..786ea876ddea 100644
--- a/llvm/test/Transforms/InstCombine/cast_ptr.ll
+++ b/llvm/test/Transforms/InstCombine/cast_ptr.ll
@@ -244,3 +244,154 @@ define <2 x i32> @insertelt_extra_use2(<2 x i32> %x, ptr %p) {
%r = ptrtoint <2 x ptr> %i to <2 x i32>
ret <2 x i32> %r
}
+
+define i32 @ptr_add_in_int(i32 %x, i32 %y) {
+; CHECK-LABEL: @ptr_add_in_int(
+; CHECK-NEXT: [[R:%.*]] = add i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %ptr = inttoptr i32 %x to ptr
+ %p2 = getelementptr inbounds i8, ptr %ptr, i32 %y
+ %r = ptrtoint ptr %p2 to i32
+ ret i32 %r
+}
+
+define i32 @ptr_add_in_int_2(i32 %x, i32 %y) {
+; CHECK-LABEL: @ptr_add_in_int_2(
+; CHECK-NEXT: [[P2_IDX:%.*]] = shl nsw i32 [[Y:%.*]], 2
+; CHECK-NEXT: [[R:%.*]] = add i32 [[P2_IDX]], [[X:%.*]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %ptr = inttoptr i32 %x to ptr
+ %p2 = getelementptr inbounds i32, ptr %ptr, i32 %y
+ %r = ptrtoint ptr %p2 to i32
+ ret i32 %r
+}
+
+define i32 @ptr_add_in_int_nneg(i32 %x, i32 %y) {
+; CHECK-LABEL: @ptr_add_in_int_nneg(
+; CHECK-NEXT: [[Z:%.*]] = call i32 @llvm.abs.i32(i32 [[Y:%.*]], i1 true)
+; CHECK-NEXT: [[R:%.*]] = add nuw i32 [[Z]], [[X:%.*]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %z = call i32 @llvm.abs.i32(i32 %y, i1 true)
+ %ptr = inttoptr i32 %x to ptr
+ %p2 = getelementptr inbounds i8, ptr %ptr, i32 %z
+ %r = ptrtoint ptr %p2 to i32
+ ret i32 %r
+}
+
+define i64 @ptr_add_in_int_different_type_1(i32 %x, i32 %y) {
+; CHECK-LABEL: @ptr_add_in_int_different_type_1(
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT: ret i64 [[R]]
+;
+ %ptr = inttoptr i32 %x to ptr
+ %p2 = getelementptr i8, ptr %ptr, i32 %y
+ %r = ptrtoint ptr %p2 to i64
+ ret i64 %r
+}
+
+define i16 @ptr_add_in_int_different_type_2(i32 %x, i32 %y) {
+; CHECK-LABEL: @ptr_add_in_int_different_type_2(
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT: ret i16 [[R]]
+;
+ %ptr = inttoptr i32 %x to ptr
+ %p2 = getelementptr i8, ptr %ptr, i32 %y
+ %r = ptrtoint ptr %p2 to i16
+ ret i16 %r
+}
+
+define i32 @ptr_add_in_int_different_type_3(i16 %x, i32 %y) {
+; CHECK-LABEL: @ptr_add_in_int_different_type_3(
+; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32
+; CHECK-NEXT: [[R:%.*]] = add i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %ptr = inttoptr i16 %x to ptr
+ %p2 = getelementptr i8, ptr %ptr, i32 %y
+ %r = ptrtoint ptr %p2 to i32
+ ret i32 %r
+}
+
+define i32 @ptr_add_in_int_different_type_4(i64 %x, i32 %y) {
+; CHECK-LABEL: @ptr_add_in_int_different_type_4(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32
+; CHECK-NEXT: [[R:%.*]] = add i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %ptr = inttoptr i64 %x to ptr
+ %p2 = getelementptr i8, ptr %ptr, i32 %y
+ %r = ptrtoint ptr %p2 to i32
+ ret i32 %r
+}
+
+define i32 @ptr_add_in_int_not_inbounds(i32 %x, i32 %y) {
+; CHECK-LABEL: @ptr_add_in_int_not_inbounds(
+; CHECK-NEXT: [[Z:%.*]] = call i32 @llvm.abs.i32(i32 [[Y:%.*]], i1 true)
+; CHECK-NEXT: [[R:%.*]] = add i32 [[Z]], [[X:%.*]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %z = call i32 @llvm.abs.i32(i32 %y, i1 true)
+ %ptr = inttoptr i32 %x to ptr
+ %p2 = getelementptr i8, ptr %ptr, i32 %z
+ %r = ptrtoint ptr %p2 to i32
+ ret i32 %r
+}
+
+define i32 @ptr_add_in_int_const(i32 %x) {
+; CHECK-LABEL: @ptr_add_in_int_const(
+; CHECK-NEXT: [[R:%.*]] = add nuw i32 [[X:%.*]], 4096
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %ptr = inttoptr i32 %x to ptr
+ %p2 = getelementptr inbounds i8, ptr %ptr, i32 4096
+ %r = ptrtoint ptr %p2 to i32
+ ret i32 %r
+}
+
+define i32 @ptr_add_in_int_const_negative(i32 %x) {
+; CHECK-LABEL: @ptr_add_in_int_const_negative(
+; CHECK-NEXT: [[R:%.*]] = add i32 [[X:%.*]], -4096
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %ptr = inttoptr i32 %x to ptr
+ %p2 = getelementptr inbounds i8, ptr %ptr, i32 -4096
+ %r = ptrtoint ptr %p2 to i32
+ ret i32 %r
+}
+
+declare void @use_ptr(ptr)
+
+define i32 @ptr_add_in_int_extra_use1(i32 %x) {
+; CHECK-LABEL: @ptr_add_in_int_extra_use1(
+; CHECK-NEXT: [[PTR:%.*]] = inttoptr i32 [[X:%.*]] to ptr
+; CHECK-NEXT: call void @use_ptr(ptr [[PTR]])
+; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 4096
+; CHECK-NEXT: [[R:%.*]] = ptrtoint ptr [[P2]] to i32
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %ptr = inttoptr i32 %x to ptr
+ call void @use_ptr(ptr %ptr)
+ %p2 = getelementptr inbounds i8, ptr %ptr, i32 4096
+ %r = ptrtoint ptr %p2 to i32
+ ret i32 %r
+}
+
+define i32 @ptr_add_in_int_extra_use2(i32 %x) {
+; CHECK-LABEL: @ptr_add_in_int_extra_use2(
+; CHECK-NEXT: [[PTR:%.*]] = inttoptr i32 [[X:%.*]] to ptr
+; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 4096
+; CHECK-NEXT: call void @use_ptr(ptr nonnull [[P2]])
+; CHECK-NEXT: [[R:%.*]] = ptrtoint ptr [[P2]] to i32
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %ptr = inttoptr i32 %x to ptr
+ %p2 = getelementptr inbounds i8, ptr %ptr, i32 4096
+ call void @use_ptr(ptr %p2)
+ %r = ptrtoint ptr %p2 to i32
+ ret i32 %r
+}
diff --git a/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll b/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
index 30d5cd66066b..857704f58028 100644
--- a/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
+++ b/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
@@ -223,7 +223,7 @@ define i32 @test_cast_gep_large_indices_as() {
define i32 @test_constant_cast_gep_struct_indices_as() {
; CHECK-LABEL: @test_constant_cast_gep_struct_indices_as(
-; CHECK-NEXT: [[Y:%.*]] = load i32, ptr addrspace(3) getelementptr inbounds ([[STRUCT_FOO:%.*]], ptr addrspace(3) @constant_fold_global_ptr, i16 0, i32 2, i16 2), align 4
+; CHECK-NEXT: [[Y:%.*]] = load i32, ptr addrspace(3) getelementptr inbounds (i8, ptr addrspace(3) @constant_fold_global_ptr, i16 16), align 4
; CHECK-NEXT: ret i32 [[Y]]
;
%x = getelementptr %struct.foo, ptr addrspace(3) @constant_fold_global_ptr, i18 0, i32 2, i12 2
diff --git a/llvm/test/Transforms/InstCombine/constant-fold-gep.ll b/llvm/test/Transforms/InstCombine/constant-fold-gep.ll
index 009c19dfa66c..54b7a6f66ecd 100644
--- a/llvm/test/Transforms/InstCombine/constant-fold-gep.ll
+++ b/llvm/test/Transforms/InstCombine/constant-fold-gep.ll
@@ -12,26 +12,26 @@ target datalayout = "E-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-
define void @frob() {
; CHECK-LABEL: @frob(
; CHECK-NEXT: store i32 1, ptr @Y, align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 1), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 0, i64 2), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 1, i64 0), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 1, i64 1), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 0, i32 1, i64 2), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 0, i64 0), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 0, i64 1), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 0, i64 2), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 1, i64 0), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 1, i64 1), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 1, i32 1, i64 2), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 0, i64 0), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 0, i64 1), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 0, i64 2), align 8
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 1, i64 0), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 1, i64 1), align 8
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 0, i64 2, i32 1, i64 2), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %struct.X], ptr @Y, i64 1, i64 0, i32 0, i64 0), align 8
-; CHECK-NEXT: store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 2, i64 0, i32 0, i64 0), align 8
-; CHECK-NEXT: store i32 1, ptr getelementptr ([3 x %struct.X], ptr @Y, i64 1, i64 0, i32 0, i64 1), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 4), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 8), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 12), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 16), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 20), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 24), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 28), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 32), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 36), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 40), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 44), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 48), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 52), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 56), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 60), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 64), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 68), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr inbounds (i8, ptr @Y, i64 72), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr (i8, ptr @Y, i64 144), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr (i8, ptr @Y, i64 76), align 8
; CHECK-NEXT: ret void
;
store i32 1, ptr @Y, align 4
diff --git a/llvm/test/Transforms/InstCombine/fma.ll b/llvm/test/Transforms/InstCombine/fma.ll
index cf3d7f3c525a..b88250d43428 100644
--- a/llvm/test/Transforms/InstCombine/fma.ll
+++ b/llvm/test/Transforms/InstCombine/fma.ll
@@ -194,8 +194,7 @@ define float @fmuladd_unary_fneg_x_unary_fneg_y(float %x, float %y, float %z) {
define float @fmuladd_fneg_x_fneg_y_fast(float %x, float %y, float %z) {
; CHECK-LABEL: @fmuladd_fneg_x_fneg_y_fast(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[FMULADD:%.*]] = fadd fast float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[FMULADD:%.*]] = call fast float @llvm.fmuladd.f32(float [[X:%.*]], float [[Y:%.*]], float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%x.fneg = fsub float -0.0, %x
@@ -204,10 +203,27 @@ define float @fmuladd_fneg_x_fneg_y_fast(float %x, float %y, float %z) {
ret float %fmuladd
}
+define float @fmuladd_unfold(float %x, float %y, float %z) {
+; CHECK-LABEL: @fmuladd_unfold(
+; CHECK-NEXT: [[FMULADD:%.*]] = call reassoc contract float @llvm.fmuladd.f32(float [[X:%.*]], float [[Y:%.*]], float [[Z:%.*]])
+; CHECK-NEXT: ret float [[FMULADD]]
+;
+ %fmuladd = call reassoc contract float @llvm.fmuladd.f32(float %x, float %y, float %z)
+ ret float %fmuladd
+}
+
+define <8 x half> @fmuladd_unfold_vec(<8 x half> %x, <8 x half> %y, <8 x half> %z) {
+; CHECK-LABEL: @fmuladd_unfold_vec(
+; CHECK-NEXT: [[FMULADD:%.*]] = call reassoc contract <8 x half> @llvm.fmuladd.v8f16(<8 x half> [[X:%.*]], <8 x half> [[Y:%.*]], <8 x half> [[Z:%.*]])
+; CHECK-NEXT: ret <8 x half> [[FMULADD]]
+;
+ %fmuladd = call reassoc contract <8 x half> @llvm.fmuladd.v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z)
+ ret <8 x half> %fmuladd
+}
+
define float @fmuladd_unary_fneg_x_unary_fneg_y_fast(float %x, float %y, float %z) {
; CHECK-LABEL: @fmuladd_unary_fneg_x_unary_fneg_y_fast(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[FMULADD:%.*]] = fadd fast float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[FMULADD:%.*]] = call fast float @llvm.fmuladd.f32(float [[X:%.*]], float [[Y:%.*]], float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%x.fneg = fneg float %x
@@ -285,8 +301,7 @@ define float @fmuladd_fabs_x_fabs_x(float %x, float %z) {
define float @fmuladd_fabs_x_fabs_x_fast(float %x, float %z) {
; CHECK-LABEL: @fmuladd_fabs_x_fabs_x_fast(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[X]]
-; CHECK-NEXT: [[FMULADD:%.*]] = fadd fast float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[FMULADD:%.*]] = call fast float @llvm.fmuladd.f32(float [[X:%.*]], float [[X]], float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%x.fabs = call float @llvm.fabs.f32(float %x)
@@ -312,10 +327,10 @@ define float @fma_k_y_z_fast(float %y, float %z) {
ret float %fma
}
+; Treat fmuladd like an fma intrinsic
define float @fmuladd_k_y_z_fast(float %y, float %z) {
; CHECK-LABEL: @fmuladd_k_y_z_fast(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[Y:%.*]], 4.000000e+00
-; CHECK-NEXT: [[FMULADD:%.*]] = fadd fast float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[FMULADD:%.*]] = call fast float @llvm.fmuladd.f32(float [[Y:%.*]], float 4.000000e+00, float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%fmuladd = call fast float @llvm.fmuladd.f32(float 4.0, float %y, float %z)
diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll
index 1526956c5b24..ae2df634b020 100644
--- a/llvm/test/Transforms/InstCombine/fmul.ll
+++ b/llvm/test/Transforms/InstCombine/fmul.ll
@@ -1131,7 +1131,7 @@ for.body:
define double @fmul_negated_constant_expression(double %x) {
; CHECK-LABEL: @fmul_negated_constant_expression(
-; CHECK-NEXT: [[FSUB:%.*]] = fneg double bitcast (i64 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 1, i32 0, i64 0) to i64) to double)
+; CHECK-NEXT: [[FSUB:%.*]] = fneg double bitcast (i64 ptrtoint (ptr getelementptr inbounds (i8, ptr @g, i64 16) to i64) to double)
; CHECK-NEXT: [[R:%.*]] = fmul double [[FSUB]], [[X:%.*]]
; CHECK-NEXT: ret double [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/force-opaque-ptr.ll b/llvm/test/Transforms/InstCombine/force-opaque-ptr.ll
index ccc34e9134de..3b799e2fb2d0 100644
--- a/llvm/test/Transforms/InstCombine/force-opaque-ptr.ll
+++ b/llvm/test/Transforms/InstCombine/force-opaque-ptr.ll
@@ -5,14 +5,14 @@
define ptr @gep_constexpr_gv_1() {
; CHECK-LABEL: @gep_constexpr_gv_1(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([16 x i16], ptr @g, i64 0, i64 10)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @g, i64 20)
;
ret ptr getelementptr([16 x i16], ptr @g, i64 0, i64 10)
}
define ptr @gep_constexpr_gv_2() {
; CHECK-LABEL: @gep_constexpr_gv_2(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([16 x i16], ptr @g, i64 0, i64 12)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @g, i64 24)
;
ret ptr getelementptr(i32, ptr getelementptr([16 x i16], ptr @g, i64 0, i64 10), i64 1)
}
diff --git a/llvm/test/Transforms/InstCombine/fortify-folding.ll b/llvm/test/Transforms/InstCombine/fortify-folding.ll
index a6b5dc90c364..988726c99edb 100644
--- a/llvm/test/Transforms/InstCombine/fortify-folding.ll
+++ b/llvm/test/Transforms/InstCombine/fortify-folding.ll
@@ -39,7 +39,7 @@ define ptr @test_memccpy_tail() {
define ptr @test_mempcpy() {
; CHECK-LABEL: @test_mempcpy(
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(15) @a, ptr noundef nonnull align 1 dereferenceable(15) @b, i64 15, i1 false)
-; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i64 0, i64 15)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a, i64 15)
;
%ret = call ptr @__mempcpy_chk(ptr @a, ptr @b, i64 15, i64 -1)
ret ptr %ret
@@ -57,7 +57,7 @@ define ptr @test_not_mempcpy() {
define ptr @test_mempcpy_tail() {
; CHECK-LABEL: @test_mempcpy_tail(
; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(15) @a, ptr noundef nonnull align 1 dereferenceable(15) @b, i64 15, i1 false)
-; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i64 0, i64 15)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a, i64 15)
;
%ret = tail call ptr @__mempcpy_chk(ptr @a, ptr @b, i64 15, i64 -1)
ret ptr %ret
diff --git a/llvm/test/Transforms/InstCombine/freeze.ll b/llvm/test/Transforms/InstCombine/freeze.ll
index 391d626a795c..5fedb1f85750 100644
--- a/llvm/test/Transforms/InstCombine/freeze.ll
+++ b/llvm/test/Transforms/InstCombine/freeze.ll
@@ -1160,6 +1160,28 @@ define i32 @propagate_drop_flags_trunc(i64 %arg) {
ret i32 %v1.fr
}
+define ptr @propagate_drop_flags_gep_nusw(ptr %p) {
+; CHECK-LABEL: @propagate_drop_flags_gep_nusw(
+; CHECK-NEXT: [[P_FR:%.*]] = freeze ptr [[P:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[P_FR]], i64 1
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nusw i8, ptr %p, i64 1
+ %gep.fr = freeze ptr %gep
+ ret ptr %gep.fr
+}
+
+define ptr @propagate_drop_flags_gep_nuw(ptr %p) {
+; CHECK-LABEL: @propagate_drop_flags_gep_nuw(
+; CHECK-NEXT: [[P_FR:%.*]] = freeze ptr [[P:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[P_FR]], i64 1
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nuw i8, ptr %p, i64 1
+ %gep.fr = freeze ptr %gep
+ ret ptr %gep.fr
+}
+
declare i32 @llvm.umax.i32(i32 %a, i32 %b)
define i32 @freeze_call_with_range_attr(i32 %a) {
diff --git a/llvm/test/Transforms/InstCombine/gep-custom-dl.ll b/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
index d9449e05612c..e8eaf4e24f7e 100644
--- a/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/gep-custom-dl.ll
@@ -34,7 +34,7 @@ define ptr @test2(ptr %I) {
define void @test3(i8 %B) {
; This should be turned into a constexpr instead of being an instruction
; CHECK-LABEL: @test3(
-; CHECK-NEXT: store i8 [[B:%.*]], ptr getelementptr inbounds ([10 x i8], ptr @Global, i32 0, i32 4), align 1
+; CHECK-NEXT: store i8 [[B:%.*]], ptr getelementptr inbounds (i8, ptr @Global, i32 4), align 1
; CHECK-NEXT: ret void
;
%A = getelementptr [10 x i8], ptr @Global, i32 0, i32 4
@@ -62,7 +62,7 @@ define void @test_evaluate_gep_nested_as_ptrs(ptr addrspace(2) %B) {
define void @test_evaluate_gep_as_ptrs_array(ptr addrspace(2) %B) {
; CHECK-LABEL: @test_evaluate_gep_as_ptrs_array(
-; CHECK-NEXT: store ptr addrspace(2) [[B:%.*]], ptr addrspace(1) getelementptr inbounds ([4 x ptr addrspace(2)], ptr addrspace(1) @arst, i32 0, i32 2), align 8
+; CHECK-NEXT: store ptr addrspace(2) [[B:%.*]], ptr addrspace(1) getelementptr inbounds (i8, ptr addrspace(1) @arst, i32 16), align 8
; CHECK-NEXT: ret void
;
@@ -168,7 +168,7 @@ define i32 @test10() {
define i16 @constant_fold_custom_dl() {
; CHECK-LABEL: @constant_fold_custom_dl(
; CHECK-NEXT: entry:
-; CHECK-NEXT: ret i16 ptrtoint (ptr addrspace(1) getelementptr (i8, ptr addrspace(1) getelementptr inbounds ([1000 x i8], ptr addrspace(1) @X_as1, i32 1, i32 0), i16 sub (i16 0, i16 ptrtoint (ptr addrspace(1) @X_as1 to i16))) to i16)
+; CHECK-NEXT: ret i16 ptrtoint (ptr addrspace(1) getelementptr (i8, ptr addrspace(1) getelementptr inbounds (i8, ptr addrspace(1) @X_as1, i32 1000), i16 sub (i16 0, i16 ptrtoint (ptr addrspace(1) @X_as1 to i16))) to i16)
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index 04b0c196ab51..e82c168ced01 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -63,7 +63,7 @@ define ptr @test4(ptr %I) {
define void @test5(i8 %B) {
; This should be turned into a constexpr instead of being an instruction
; CHECK-LABEL: @test5(
-; CHECK-NEXT: store i8 [[B:%.*]], ptr getelementptr inbounds ([10 x i8], ptr @Global, i64 0, i64 4), align 1
+; CHECK-NEXT: store i8 [[B:%.*]], ptr getelementptr inbounds (i8, ptr @Global, i64 4), align 1
; CHECK-NEXT: ret void
;
%A = getelementptr [10 x i8], ptr @Global, i64 0, i64 4
@@ -74,7 +74,7 @@ define void @test5(i8 %B) {
define void @test5_as1(i8 %B) {
; This should be turned into a constexpr instead of being an instruction
; CHECK-LABEL: @test5_as1(
-; CHECK-NEXT: store i8 [[B:%.*]], ptr addrspace(1) getelementptr inbounds ([10 x i8], ptr addrspace(1) @Global_as1, i16 0, i16 4), align 1
+; CHECK-NEXT: store i8 [[B:%.*]], ptr addrspace(1) getelementptr inbounds (i8, ptr addrspace(1) @Global_as1, i16 4), align 1
; CHECK-NEXT: ret void
;
%A = getelementptr [10 x i8], ptr addrspace(1) @Global_as1, i16 0, i16 4
@@ -102,7 +102,7 @@ define void @test_evaluate_gep_nested_as_ptrs(ptr addrspace(2) %B) {
define void @test_evaluate_gep_as_ptrs_array(ptr addrspace(2) %B) {
; CHECK-LABEL: @test_evaluate_gep_as_ptrs_array(
-; CHECK-NEXT: store ptr addrspace(2) [[B:%.*]], ptr addrspace(1) getelementptr inbounds ([4 x ptr addrspace(2)], ptr addrspace(1) @arst, i16 0, i16 2), align 4
+; CHECK-NEXT: store ptr addrspace(2) [[B:%.*]], ptr addrspace(1) getelementptr inbounds (i8, ptr addrspace(1) @arst, i16 8), align 4
; CHECK-NEXT: ret void
;
@@ -114,7 +114,7 @@ define void @test_evaluate_gep_as_ptrs_array(ptr addrspace(2) %B) {
; This should be turned into a constexpr instead of being an instruction
define void @test_overaligned_vec(i8 %B) {
; CHECK-LABEL: @test_overaligned_vec(
-; CHECK-NEXT: store i8 [[B:%.*]], ptr getelementptr inbounds ([10 x i8], ptr @Global, i64 0, i64 2), align 1
+; CHECK-NEXT: store i8 [[B:%.*]], ptr getelementptr inbounds (i8, ptr @Global, i64 2), align 1
; CHECK-NEXT: ret void
;
%A = getelementptr <2 x half>, ptr @Global, i64 0, i64 1
@@ -267,8 +267,8 @@ define <2 x i1> @test13_fixed_scalable(i64 %X, ptr %P, <2 x i64> %y) nounwind {
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 4
; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i64 0
-; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <2 x i64> [[DOTSPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw <2 x i64> [[DOTSPLAT2]], [[Y:%.*]]
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i64> [[DOTSPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw <2 x i64> [[DOTSPLAT]], [[Y:%.*]]
; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i64> [[A_IDX]], [[B_IDX]]
; CHECK-NEXT: ret <2 x i1> [[C]]
;
@@ -537,7 +537,7 @@ define i32 @test21() {
define i1 @test22() {
; CHECK-LABEL: @test22(
-; CHECK-NEXT: ret i1 icmp ult (ptr getelementptr inbounds (i32, ptr @A, i64 1), ptr getelementptr (i32, ptr @B, i64 2))
+; CHECK-NEXT: ret i1 icmp ult (ptr getelementptr inbounds (i8, ptr @A, i64 4), ptr getelementptr (i8, ptr @B, i64 8))
;
%C = icmp ult ptr getelementptr (i32, ptr @A, i64 1),
getelementptr (i32, ptr @B, i64 2)
@@ -828,7 +828,7 @@ entry:
define i32 @test35() nounwind {
; CHECK-LABEL: @test35(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(1) @"\01LC8", ptr nonnull getelementptr inbounds ([[T0:%.*]], ptr @s, i64 0, i32 1, i64 0)) #[[ATTR0]]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(1) @"\01LC8", ptr nonnull getelementptr inbounds (i8, ptr @s, i64 8)) #[[ATTR0]]
; CHECK-NEXT: ret i32 0
;
call i32 (ptr, ...) @printf(ptr @"\01LC8",
@@ -839,7 +839,7 @@ define i32 @test35() nounwind {
; Don't treat signed offsets as unsigned.
define ptr @test36() nounwind {
; CHECK-LABEL: @test36(
-; CHECK-NEXT: ret ptr getelementptr ([11 x i8], ptr @array, i64 -1, i64 10)
+; CHECK-NEXT: ret ptr getelementptr (i8, ptr @array, i64 -1)
;
ret ptr getelementptr ([11 x i8], ptr @array, i32 0, i64 -1)
}
@@ -1377,14 +1377,14 @@ define ptr @gep_of_gep_multiuse_var_and_var(ptr %p, i64 %idx, i64 %idx2) {
define ptr @const_gep_global_di_i8_smaller() {
; CHECK-LABEL: @const_gep_global_di_i8_smaller(
-; CHECK-NEXT: ret ptr getelementptr (i8, ptr @g_i32_di, i64 3)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @g_i32_di, i64 3)
;
ret ptr getelementptr (i8, ptr @g_i32_di, i64 3)
}
define ptr @const_gep_global_di_i8_exact() {
; CHECK-LABEL: @const_gep_global_di_i8_exact(
-; CHECK-NEXT: ret ptr getelementptr inbounds (i32, ptr @g_i32_di, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @g_i32_di, i64 4)
;
ret ptr getelementptr (i8, ptr @g_i32_di, i64 4)
}
@@ -1398,21 +1398,21 @@ define ptr @const_gep_global_di_i8_larger() {
define ptr @const_gep_global_di_i64_larger() {
; CHECK-LABEL: @const_gep_global_di_i64_larger(
-; CHECK-NEXT: ret ptr getelementptr (i32, ptr @g_i32_di, i64 2)
+; CHECK-NEXT: ret ptr getelementptr (i8, ptr @g_i32_di, i64 8)
;
ret ptr getelementptr (i64, ptr @g_i32_di, i64 1)
}
define ptr @const_gep_global_e_smaller() {
; CHECK-LABEL: @const_gep_global_e_smaller(
-; CHECK-NEXT: ret ptr getelementptr (i8, ptr @g_i32_e, i64 3)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @g_i32_e, i64 3)
;
ret ptr getelementptr (i8, ptr @g_i32_e, i64 3)
}
define ptr @const_gep_global_e_exact() {
; CHECK-LABEL: @const_gep_global_e_exact(
-; CHECK-NEXT: ret ptr getelementptr inbounds (i32, ptr @g_i32_e, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @g_i32_e, i64 4)
;
ret ptr getelementptr (i8, ptr @g_i32_e, i64 4)
}
@@ -1433,7 +1433,7 @@ define ptr @const_gep_global_ew_smaller() {
define ptr @const_gep_global_ew_exact() {
; CHECK-LABEL: @const_gep_global_ew_exact(
-; CHECK-NEXT: ret ptr getelementptr (i32, ptr @g_i32_ew, i64 1)
+; CHECK-NEXT: ret ptr getelementptr (i8, ptr @g_i32_ew, i64 4)
;
ret ptr getelementptr (i8, ptr @g_i32_ew, i64 4)
}
@@ -1447,7 +1447,7 @@ define ptr @const_gep_global_ew_larger() {
define ptr @const_gep_0xi8_global() {
; CHECK-LABEL: @const_gep_0xi8_global(
-; CHECK-NEXT: ret ptr getelementptr ([0 x i8], ptr @g_0xi8_e, i64 0, i64 10)
+; CHECK-NEXT: ret ptr getelementptr (i8, ptr @g_0xi8_e, i64 10)
;
ret ptr getelementptr ([0 x i8], ptr @g_0xi8_e, i64 0, i64 10)
}
diff --git a/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll b/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll
index db2c8e2f22f6..d75dbcf9c9b9 100644
--- a/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll
+++ b/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll
@@ -94,7 +94,7 @@ entry:
define i16 @constantexpr2() {
; CHECK-LABEL: @constantexpr2(
-; CHECK-NEXT: [[I1:%.*]] = zext i1 icmp ne (ptr getelementptr inbounds ([6 x [1 x i64]], ptr @global_constant3, i64 0, i64 5, i64 0), ptr @global_constant4) to i16
+; CHECK-NEXT: [[I1:%.*]] = zext i1 icmp ne (ptr getelementptr inbounds (i8, ptr @global_constant3, i64 40), ptr @global_constant4) to i16
; CHECK-NEXT: [[I2:%.*]] = load ptr, ptr @global_constant5, align 1
; CHECK-NEXT: [[I3:%.*]] = load i16, ptr [[I2]], align 1
; CHECK-NEXT: [[I4:%.*]] = xor i16 [[I3]], [[I1]]
diff --git a/llvm/test/Transforms/InstCombine/known-bits.ll b/llvm/test/Transforms/InstCombine/known-bits.ll
index 82cd24027e4e..41b16f3333c1 100644
--- a/llvm/test/Transforms/InstCombine/known-bits.ll
+++ b/llvm/test/Transforms/InstCombine/known-bits.ll
@@ -1698,6 +1698,44 @@ define i32 @test_none(float nofpclass(all) %x) {
ret i32 %and
}
+; We cannot make assumptions about the sign of result of sqrt
+; when the input is a negative value (except for -0).
+define i1 @pr92217() {
+; CHECK-LABEL: @pr92217(
+; CHECK-NEXT: [[X:%.*]] = call float @llvm.sqrt.f32(float 0xC6DEBE9E60000000)
+; CHECK-NEXT: [[Y:%.*]] = bitcast float [[X]] to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[Y]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %x = call float @llvm.sqrt.f32(float 0xC6DEBE9E60000000)
+ %y = bitcast float %x to i32
+ %cmp = icmp slt i32 %y, 0
+ ret i1 %cmp
+}
+
+define i1 @sqrt_negative_input(float nofpclass(nan zero pnorm psub pinf) %a) {
+; CHECK-LABEL: @sqrt_negative_input(
+; CHECK-NEXT: [[X:%.*]] = call float @llvm.sqrt.f32(float [[A:%.*]])
+; CHECK-NEXT: [[Y:%.*]] = bitcast float [[X]] to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[Y]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %x = call float @llvm.sqrt.f32(float %a)
+ %y = bitcast float %x to i32
+ %cmp = icmp slt i32 %y, 0
+ ret i1 %cmp
+}
+
+define i1 @sqrt_negative_input_nnan(float nofpclass(nan zero pnorm psub pinf) %a) {
+; CHECK-LABEL: @sqrt_negative_input_nnan(
+; CHECK-NEXT: ret i1 false
+;
+ %x = call nnan float @llvm.sqrt.f32(float %a)
+ %y = bitcast float %x to i32
+ %cmp = icmp slt i32 %y, 0
+ ret i1 %cmp
+}
+
define i8 @test_icmp_add(i8 %n, i8 %n2, i8 %other) {
; CHECK-LABEL: @test_icmp_add(
; CHECK-NEXT: entry:
diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll
index e941284a798e..b956de29e0b8 100644
--- a/llvm/test/Transforms/InstCombine/load-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/load-cmp.ll
@@ -334,3 +334,20 @@ define i1 @test10_struct_arr_noinbounds_i64(i64 %x) {
%r = icmp eq i32 %q, 9
ret i1 %r
}
+
+@table = internal constant [2 x ptr] [ptr @g, ptr getelementptr (i8, ptr @g, i64 4)], align 16
+@g = external global [2 x i32]
+
+define i1 @pr93017(i64 %idx) {
+; CHECK-LABEL: @pr93017(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[IDX:%.*]] to i32
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr @table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT: [[V:%.*]] = load ptr, ptr [[GEP]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[V]], null
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %gep = getelementptr inbounds [2 x ptr], ptr @table, i64 0, i64 %idx
+ %v = load ptr, ptr %gep
+ %cmp = icmp ne ptr %v, null
+ ret i1 %cmp
+}
diff --git a/llvm/test/Transforms/InstCombine/loadstore-alignment.ll b/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
index 1027468d6715..098f2eee52df 100644
--- a/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
+++ b/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
@@ -9,7 +9,7 @@ target datalayout = "E-p:64:64:64-p1:64:64:64-p2:32:32:32-a0:0:8-f32:32:32-f64:6
define <2 x i64> @static_hem() {
; CHECK-LABEL: @static_hem(
-; CHECK-NEXT: [[L:%.*]] = load <2 x i64>, ptr getelementptr (<2 x i64>, ptr @x, i64 7), align 1
+; CHECK-NEXT: [[L:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @x, i64 112), align 1
; CHECK-NEXT: ret <2 x i64> [[L]]
;
%t = getelementptr <2 x i64>, ptr @x, i32 7
@@ -66,7 +66,7 @@ define <2 x i64> @bar() {
define void @static_hem_store(<2 x i64> %y) {
; CHECK-LABEL: @static_hem_store(
-; CHECK-NEXT: store <2 x i64> [[Y:%.*]], ptr getelementptr (<2 x i64>, ptr @x, i64 7), align 1
+; CHECK-NEXT: store <2 x i64> [[Y:%.*]], ptr getelementptr (i8, ptr @x, i64 112), align 1
; CHECK-NEXT: ret void
;
%t = getelementptr <2 x i64>, ptr @x, i32 7
diff --git a/llvm/test/Transforms/InstCombine/memchr-2.ll b/llvm/test/Transforms/InstCombine/memchr-2.ll
index 22aae6edcf92..2e85fe4ad1de 100644
--- a/llvm/test/Transforms/InstCombine/memchr-2.ll
+++ b/llvm/test/Transforms/InstCombine/memchr-2.ll
@@ -51,7 +51,7 @@ define ptr @fold_memchr_a12345_4_3() {
define ptr @fold_memchr_a12345_3_3() {
; CHECK-LABEL: @fold_memchr_a12345_3_3(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([5 x i8], ptr @a12345, i64 0, i64 2)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a12345, i64 2)
;
%res = call ptr @memchr(ptr @a12345, i32 3, i64 3)
@@ -63,7 +63,7 @@ define ptr @fold_memchr_a12345_3_3() {
define ptr @fold_memchr_a12345_3_9() {
; CHECK-LABEL: @fold_memchr_a12345_3_9(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([5 x i8], ptr @a12345, i64 0, i64 2)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a12345, i64 2)
;
%res = call ptr @memchr(ptr @a12345, i32 3, i64 9)
@@ -76,7 +76,7 @@ define ptr @fold_memchr_a12345_3_9() {
define ptr @fold_memchr_a123f45_500_9() {
; CHECK-LABEL: @fold_memchr_a123f45_500_9(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([5 x i8], ptr @a123f45, i64 0, i64 3)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a123f45, i64 3)
;
%res = call ptr @memchr(ptr @a123f45, i32 500, i64 9)
@@ -89,7 +89,7 @@ define ptr @fold_memchr_a123f45_500_9() {
define ptr @fold_a12345_3_n(i64 %n) {
; CHECK-LABEL: @fold_a12345_3_n(
; CHECK-NEXT: [[MEMCHR_CMP:%.*]] = icmp ult i64 [[N:%.*]], 3
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[MEMCHR_CMP]], ptr null, ptr getelementptr inbounds ([5 x i8], ptr @a12345, i64 0, i64 2)
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[MEMCHR_CMP]], ptr null, ptr getelementptr inbounds (i8, ptr @a12345, i64 2)
; CHECK-NEXT: ret ptr [[RES]]
;
@@ -104,7 +104,7 @@ define ptr @fold_a12345_3_n(i64 %n) {
define ptr @fold_a12345_259_n(i64 %n) {
; CHECK-LABEL: @fold_a12345_259_n(
; CHECK-NEXT: [[MEMCHR_CMP:%.*]] = icmp ult i64 [[N:%.*]], 3
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[MEMCHR_CMP]], ptr null, ptr getelementptr inbounds ([5 x i8], ptr @a12345, i64 0, i64 2)
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[MEMCHR_CMP]], ptr null, ptr getelementptr inbounds (i8, ptr @a12345, i64 2)
; CHECK-NEXT: ret ptr [[RES]]
;
diff --git a/llvm/test/Transforms/InstCombine/memchr-4.ll b/llvm/test/Transforms/InstCombine/memchr-4.ll
index 93884c73af62..9aec0f1dfe57 100644
--- a/llvm/test/Transforms/InstCombine/memchr-4.ll
+++ b/llvm/test/Transforms/InstCombine/memchr-4.ll
@@ -44,7 +44,7 @@ define ptr @call_memchr_ax_2_uimax_p2() {
define ptr @fold_memchr_a12345_3_uimax_p2() {
; CHECK-LABEL: @fold_memchr_a12345_3_uimax_p2(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([5 x i8], ptr @a12345, i64 0, i64 2)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a12345, i64 2)
;
%res = call ptr @memchr(ptr @a12345, i32 3, i64 4294967297)
diff --git a/llvm/test/Transforms/InstCombine/memchr-6.ll b/llvm/test/Transforms/InstCombine/memchr-6.ll
index 6243c464c6d3..28364a92f54d 100644
--- a/llvm/test/Transforms/InstCombine/memchr-6.ll
+++ b/llvm/test/Transforms/InstCombine/memchr-6.ll
@@ -69,7 +69,7 @@ define ptr @fold_memchr_a111122_c_n(i32 %C, i64 %N) {
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 2
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[N:%.*]], 4
; CHECK-NEXT: [[TMP4:%.*]] = and i1 [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP4]], ptr getelementptr inbounds ([6 x i8], ptr @a111122, i64 0, i64 4), ptr null
+; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP4]], ptr getelementptr inbounds (i8, ptr @a111122, i64 4), ptr null
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i8 [[TMP1]], 1
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[TMP7:%.*]] = and i1 [[TMP6]], [[TMP5]]
@@ -103,7 +103,7 @@ define ptr @call_memchr_a1110111_c_4(i32 %C) {
; CHECK-LABEL: @call_memchr_a1110111_c_4(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0
-; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds ([7 x i8], ptr @a1110111, i64 0, i64 3), ptr null
+; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds (i8, ptr @a1110111, i64 3), ptr null
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP1]], 1
; CHECK-NEXT: [[MEMCHR_SEL2:%.*]] = select i1 [[TMP3]], ptr @a1110111, ptr [[MEMCHR_SEL1]]
; CHECK-NEXT: ret ptr [[MEMCHR_SEL2]]
diff --git a/llvm/test/Transforms/InstCombine/memchr-7.ll b/llvm/test/Transforms/InstCombine/memchr-7.ll
index 50072b5ca148..0b364cce656d 100644
--- a/llvm/test/Transforms/InstCombine/memchr-7.ll
+++ b/llvm/test/Transforms/InstCombine/memchr-7.ll
@@ -76,7 +76,7 @@ define ptr @memchr_no_zero_cmp2(i32 %c) {
; CHECK-LABEL: @memchr_no_zero_cmp2(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 10
-; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds ([2 x i8], ptr @.str.1, i64 0, i64 1), ptr null
+; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds (i8, ptr @.str.1, i64 1), ptr null
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP1]], 13
; CHECK-NEXT: [[MEMCHR_SEL2:%.*]] = select i1 [[TMP3]], ptr @.str.1, ptr [[MEMCHR_SEL1]]
; CHECK-NEXT: ret ptr [[MEMCHR_SEL2]]
diff --git a/llvm/test/Transforms/InstCombine/memchr-8.ll b/llvm/test/Transforms/InstCombine/memchr-8.ll
index 0e878b77e40d..b2ac2e6eda9a 100644
--- a/llvm/test/Transforms/InstCombine/memchr-8.ll
+++ b/llvm/test/Transforms/InstCombine/memchr-8.ll
@@ -15,7 +15,7 @@ declare ptr @memrchr(ptr, i32, i64)
define ptr @call_a_pi32max_p1() {
; CHECK-LABEL: @call_a_pi32max_p1(
-; CHECK-NEXT: [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(2147483647) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, ptr @a, i64 0, i32 1, i64 2147483647), i32 0, i64 2147483647)
+; CHECK-NEXT: [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(2147483647) getelementptr inbounds (i8, ptr @a, i64 2147483648), i32 0, i64 2147483647)
; CHECK-NEXT: ret ptr [[CHR]]
;
%ptr = getelementptr <{ i8, [4294967295 x i8] }>, ptr @a, i32 0, i32 1, i32 2147483647
@@ -28,7 +28,7 @@ define ptr @call_a_pi32max_p1() {
define ptr @call_a_pi32max() {
; CHECK-LABEL: @call_a_pi32max(
-; CHECK-NEXT: [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(2147483647) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, ptr @a, i64 0, i32 1, i64 2147483648), i32 0, i64 2147483647)
+; CHECK-NEXT: [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(2147483647) getelementptr inbounds (i8, ptr @a, i64 2147483649), i32 0, i64 2147483647)
; CHECK-NEXT: ret ptr [[CHR]]
;
%ptr = getelementptr <{ i8, [4294967295 x i8] }>, ptr @a, i32 0, i32 1, i64 2147483648
@@ -42,7 +42,7 @@ define ptr @call_a_pi32max() {
define ptr @call_a_pui32max() {
; CHECK-LABEL: @call_a_pui32max(
-; CHECK-NEXT: [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(4294967295) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, ptr @a, i64 0, i32 1, i64 0), i32 0, i64 4294967295)
+; CHECK-NEXT: [[CHR:%.*]] = tail call ptr @memrchr(ptr noundef nonnull dereferenceable(4294967295) getelementptr inbounds (i8, ptr @a, i64 1), i32 0, i64 4294967295)
; CHECK-NEXT: ret ptr [[CHR]]
;
%ptr = getelementptr <{ i8, [4294967295 x i8] }>, ptr @a, i32 0, i32 1, i32 0
diff --git a/llvm/test/Transforms/InstCombine/memchr-9.ll b/llvm/test/Transforms/InstCombine/memchr-9.ll
index fe80c282eed5..7a5e6c3f863c 100644
--- a/llvm/test/Transforms/InstCombine/memchr-9.ll
+++ b/llvm/test/Transforms/InstCombine/memchr-9.ll
@@ -24,19 +24,19 @@ define void @fold_memchr_A_pIb_cst_cst(ptr %pchr) {
; CHECK-NEXT: [[PST_0_4_4:%.*]] = getelementptr i8, ptr [[PCHR]], i64 16
; CHECK-NEXT: store ptr null, ptr [[PST_0_4_4]], align 8
; CHECK-NEXT: [[PST_1_0_1:%.*]] = getelementptr i8, ptr [[PCHR]], i64 24
-; CHECK-NEXT: store ptr getelementptr (i8, ptr @a, i64 1), ptr [[PST_1_0_1]], align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @a, i64 1), ptr [[PST_1_0_1]], align 8
; CHECK-NEXT: [[PST_1_0_3:%.*]] = getelementptr i8, ptr [[PCHR]], i64 32
-; CHECK-NEXT: store ptr getelementptr (i8, ptr @a, i64 1), ptr [[PST_1_0_3]], align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @a, i64 1), ptr [[PST_1_0_3]], align 8
; CHECK-NEXT: [[PST_1_1_1:%.*]] = getelementptr i8, ptr [[PCHR]], i64 40
; CHECK-NEXT: store ptr null, ptr [[PST_1_1_1]], align 8
; CHECK-NEXT: [[PST_1_1_2:%.*]] = getelementptr i8, ptr [[PCHR]], i64 48
-; CHECK-NEXT: store ptr getelementptr inbounds ([1 x %struct.A], ptr @a, i64 0, i64 0, i32 0, i64 1), ptr [[PST_1_1_2]], align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @a, i64 2), ptr [[PST_1_1_2]], align 8
; CHECK-NEXT: [[PST_1_3_3:%.*]] = getelementptr i8, ptr [[PCHR]], i64 56
; CHECK-NEXT: store ptr null, ptr [[PST_1_3_3]], align 8
; CHECK-NEXT: [[PST_1_3_4:%.*]] = getelementptr i8, ptr [[PCHR]], i64 64
; CHECK-NEXT: store ptr null, ptr [[PST_1_3_4]], align 8
; CHECK-NEXT: [[PST_1_3_6:%.*]] = getelementptr i8, ptr [[PCHR]], i64 80
-; CHECK-NEXT: store ptr getelementptr inbounds ([1 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 1), ptr [[PST_1_3_6]], align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @a, i64 6), ptr [[PST_1_3_6]], align 8
; CHECK-NEXT: ret void
;
@@ -110,25 +110,25 @@ define void @fold_memchr_A_pIb_cst_N(i64 %N, ptr %pchr) {
; CHECK-NEXT: store ptr [[CHR_0_0_N]], ptr [[PCHR:%.*]], align 8
; CHECK-NEXT: [[PST_0_1_N:%.*]] = getelementptr i8, ptr [[PCHR]], i64 8
; CHECK-NEXT: [[MEMCHR_CMP1:%.*]] = icmp ult i64 [[N]], 3
-; CHECK-NEXT: [[CHR_0_1_N:%.*]] = select i1 [[MEMCHR_CMP1]], ptr null, ptr getelementptr inbounds ([1 x %struct.A], ptr @a, i64 0, i64 0, i32 0, i64 1)
+; CHECK-NEXT: [[CHR_0_1_N:%.*]] = select i1 [[MEMCHR_CMP1]], ptr null, ptr getelementptr inbounds (i8, ptr @a, i64 2)
; CHECK-NEXT: store ptr [[CHR_0_1_N]], ptr [[PST_0_1_N]], align 8
; CHECK-NEXT: [[PST_0_4_N:%.*]] = getelementptr i8, ptr [[PCHR]], i64 16
; CHECK-NEXT: store ptr null, ptr [[PST_0_4_N]], align 8
; CHECK-NEXT: [[PST_1_0_N:%.*]] = getelementptr i8, ptr [[PCHR]], i64 24
; CHECK-NEXT: [[MEMCHR_CMP2:%.*]] = icmp eq i64 [[N]], 0
-; CHECK-NEXT: [[CHR_1_0_N:%.*]] = select i1 [[MEMCHR_CMP2]], ptr null, ptr getelementptr (i8, ptr @a, i64 1)
+; CHECK-NEXT: [[CHR_1_0_N:%.*]] = select i1 [[MEMCHR_CMP2]], ptr null, ptr getelementptr inbounds (i8, ptr @a, i64 1)
; CHECK-NEXT: store ptr [[CHR_1_0_N]], ptr [[PST_1_0_N]], align 8
; CHECK-NEXT: [[PST_1_1_N:%.*]] = getelementptr i8, ptr [[PCHR]], i64 32
; CHECK-NEXT: [[MEMCHR_CMP3:%.*]] = icmp ult i64 [[N]], 2
-; CHECK-NEXT: [[CHR_1_1_N:%.*]] = select i1 [[MEMCHR_CMP3]], ptr null, ptr getelementptr inbounds ([1 x %struct.A], ptr @a, i64 0, i64 0, i32 0, i64 1)
+; CHECK-NEXT: [[CHR_1_1_N:%.*]] = select i1 [[MEMCHR_CMP3]], ptr null, ptr getelementptr inbounds (i8, ptr @a, i64 2)
; CHECK-NEXT: store ptr [[CHR_1_1_N]], ptr [[PST_1_1_N]], align 8
; CHECK-NEXT: [[PST_1_2_N:%.*]] = getelementptr i8, ptr [[PCHR]], i64 40
; CHECK-NEXT: [[MEMCHR_CMP4:%.*]] = icmp ult i64 [[N]], 4
-; CHECK-NEXT: [[CHR_1_2_N:%.*]] = select i1 [[MEMCHR_CMP4]], ptr null, ptr getelementptr inbounds ([1 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 0)
+; CHECK-NEXT: [[CHR_1_2_N:%.*]] = select i1 [[MEMCHR_CMP4]], ptr null, ptr getelementptr inbounds (i8, ptr @a, i64 4)
; CHECK-NEXT: store ptr [[CHR_1_2_N]], ptr [[PST_1_2_N]], align 8
; CHECK-NEXT: [[PST_1_3_N:%.*]] = getelementptr i8, ptr [[PCHR]], i64 48
; CHECK-NEXT: [[MEMCHR_CMP5:%.*]] = icmp ult i64 [[N]], 6
-; CHECK-NEXT: [[CHR_1_3_N:%.*]] = select i1 [[MEMCHR_CMP5]], ptr null, ptr getelementptr inbounds ([1 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 1)
+; CHECK-NEXT: [[CHR_1_3_N:%.*]] = select i1 [[MEMCHR_CMP5]], ptr null, ptr getelementptr inbounds (i8, ptr @a, i64 6)
; CHECK-NEXT: store ptr [[CHR_1_3_N]], ptr [[PST_1_3_N]], align 8
; CHECK-NEXT: [[PST_1_4_N:%.*]] = getelementptr i8, ptr [[PCHR]], i64 56
; CHECK-NEXT: store ptr null, ptr [[PST_1_4_N]], align 8
@@ -136,15 +136,15 @@ define void @fold_memchr_A_pIb_cst_N(i64 %N, ptr %pchr) {
; CHECK-NEXT: store ptr null, ptr [[PST_2_0_N]], align 8
; CHECK-NEXT: [[PST_2_1_N:%.*]] = getelementptr i8, ptr [[PCHR]], i64 72
; CHECK-NEXT: [[MEMCHR_CMP6:%.*]] = icmp eq i64 [[N]], 0
-; CHECK-NEXT: [[CHR_2_1_N:%.*]] = select i1 [[MEMCHR_CMP6]], ptr null, ptr getelementptr inbounds ([1 x %struct.A], ptr @a, i64 0, i64 0, i32 0, i64 1)
+; CHECK-NEXT: [[CHR_2_1_N:%.*]] = select i1 [[MEMCHR_CMP6]], ptr null, ptr getelementptr inbounds (i8, ptr @a, i64 2)
; CHECK-NEXT: store ptr [[CHR_2_1_N]], ptr [[PST_2_1_N]], align 8
; CHECK-NEXT: [[PST_2_2_N:%.*]] = getelementptr i8, ptr [[PCHR]], i64 80
; CHECK-NEXT: [[MEMCHR_CMP7:%.*]] = icmp ult i64 [[N]], 3
-; CHECK-NEXT: [[CHR_2_2_N:%.*]] = select i1 [[MEMCHR_CMP7]], ptr null, ptr getelementptr inbounds ([1 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 0)
+; CHECK-NEXT: [[CHR_2_2_N:%.*]] = select i1 [[MEMCHR_CMP7]], ptr null, ptr getelementptr inbounds (i8, ptr @a, i64 4)
; CHECK-NEXT: store ptr [[CHR_2_2_N]], ptr [[PST_2_2_N]], align 8
; CHECK-NEXT: [[PST_2_3_N:%.*]] = getelementptr i8, ptr [[PCHR]], i64 88
; CHECK-NEXT: [[MEMCHR_CMP8:%.*]] = icmp ult i64 [[N]], 5
-; CHECK-NEXT: [[CHR_2_3_N:%.*]] = select i1 [[MEMCHR_CMP8]], ptr null, ptr getelementptr inbounds ([1 x %struct.A], ptr @a, i64 0, i64 0, i32 1, i64 1)
+; CHECK-NEXT: [[CHR_2_3_N:%.*]] = select i1 [[MEMCHR_CMP8]], ptr null, ptr getelementptr inbounds (i8, ptr @a, i64 6)
; CHECK-NEXT: store ptr [[CHR_2_3_N]], ptr [[PST_2_3_N]], align 8
; CHECK-NEXT: [[PST_2_4_N:%.*]] = getelementptr i8, ptr [[PCHR]], i64 96
; CHECK-NEXT: store ptr null, ptr [[PST_2_4_N]], align 8
@@ -230,13 +230,13 @@ define void @fold_memchr_A_pIb_cst_N(i64 %N, ptr %pchr) {
define void @call_memchr_A_pIb_xs_cst(ptr %pchr) {
; CHECK-LABEL: @call_memchr_A_pIb_xs_cst(
-; CHECK-NEXT: [[CHR_1_0_0_2:%.*]] = call ptr @memchr(ptr noundef nonnull dereferenceable(1) getelementptr inbounds ([1 x %struct.A], ptr @a, i64 1, i64 0), i32 0, i64 2)
+; CHECK-NEXT: [[CHR_1_0_0_2:%.*]] = call ptr @memchr(ptr noundef nonnull dereferenceable(1) getelementptr inbounds (i8, ptr @a, i64 8), i32 0, i64 2)
; CHECK-NEXT: store ptr [[CHR_1_0_0_2]], ptr [[PCHR:%.*]], align 8
; CHECK-NEXT: [[PST_1_0_1_2:%.*]] = getelementptr i8, ptr [[PCHR]], i64 8
-; CHECK-NEXT: [[CHR_1_0_1_2:%.*]] = call ptr @memchr(ptr noundef nonnull dereferenceable(1) getelementptr inbounds ([1 x %struct.A], ptr @a, i64 1, i64 0), i32 0, i64 2)
+; CHECK-NEXT: [[CHR_1_0_1_2:%.*]] = call ptr @memchr(ptr noundef nonnull dereferenceable(1) getelementptr inbounds (i8, ptr @a, i64 8), i32 0, i64 2)
; CHECK-NEXT: store ptr [[CHR_1_0_1_2]], ptr [[PST_1_0_1_2]], align 8
; CHECK-NEXT: [[PST_0_0_8_2:%.*]] = getelementptr i8, ptr [[PCHR]], i64 16
-; CHECK-NEXT: [[CHR_0_0_8_2:%.*]] = call ptr @memchr(ptr noundef nonnull dereferenceable(1) getelementptr inbounds ([1 x %struct.A], ptr @a, i64 1, i64 0, i32 0, i64 0), i32 0, i64 2)
+; CHECK-NEXT: [[CHR_0_0_8_2:%.*]] = call ptr @memchr(ptr noundef nonnull dereferenceable(1) getelementptr inbounds (i8, ptr @a, i64 8), i32 0, i64 2)
; CHECK-NEXT: store ptr [[CHR_0_0_8_2]], ptr [[PST_0_0_8_2]], align 8
; CHECK-NEXT: ret void
;
@@ -276,7 +276,7 @@ define void @call_memchr_A_pIb_xs_cst(ptr %pchr) {
define ptr @fold_memchr_gep_gep_gep() {
; CHECK-LABEL: @fold_memchr_gep_gep_gep(
-; CHECK-NEXT: ret ptr getelementptr (i16, ptr getelementptr (i32, ptr getelementptr inbounds ([2 x i64], ptr @ai64, i64 0, i64 1), i64 1), i64 1)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @ai64, i64 14)
;
%p8_1 = getelementptr [2 x i64], ptr @ai64, i64 0, i64 1
@@ -297,10 +297,10 @@ define ptr @fold_memchr_gep_gep_gep() {
define ptr @fold_memchr_union_member() {
; BE-CHECK-LABEL: @fold_memchr_union_member(
-; BE-CHECK-NEXT: ret ptr getelementptr (i8, ptr @u, i64 5)
+; BE-CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @u, i64 5)
;
; LE-CHECK-LABEL: @fold_memchr_union_member(
-; LE-CHECK-NEXT: ret ptr getelementptr inbounds ([[UNION_U:%.*]], ptr @u, i64 0, i32 0, i64 1)
+; LE-CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @u, i64 4)
;
%pi8u_p1 = getelementptr i8, ptr @u, i64 1
%pc = call ptr @memchr(ptr %pi8u_p1, i32 34, i64 8)
diff --git a/llvm/test/Transforms/InstCombine/memchr.ll b/llvm/test/Transforms/InstCombine/memchr.ll
index 2074fd7ba4f7..08435a5e0388 100644
--- a/llvm/test/Transforms/InstCombine/memchr.ll
+++ b/llvm/test/Transforms/InstCombine/memchr.ll
@@ -17,7 +17,7 @@ declare ptr @memchr(ptr, i32, i32)
define void @test1() {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 6), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hello, i32 6), ptr @chp, align 4
; CHECK-NEXT: ret void
;
%dst = call ptr @memchr(ptr @hello, i32 119, i32 14)
@@ -37,7 +37,7 @@ define void @test2() {
define void @test3() {
; CHECK-LABEL: @test3(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 13), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hello, i32 13), ptr @chp, align 4
; CHECK-NEXT: ret void
;
%dst = call ptr @memchr(ptr @hello, i32 0, i32 14)
@@ -58,7 +58,7 @@ define void @test4(i32 %chr) {
define void @test5() {
; CHECK-LABEL: @test5(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 13), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hello, i32 13), ptr @chp, align 4
; CHECK-NEXT: ret void
;
%dst = call ptr @memchr(ptr @hello, i32 65280, i32 14)
@@ -68,7 +68,7 @@ define void @test5() {
define void @test6() {
; CHECK-LABEL: @test6(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 6), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hello, i32 6), ptr @chp, align 4
; CHECK-NEXT: ret void
;
; Overflow, but we still find the right thing.
@@ -90,7 +90,7 @@ define void @test7() {
define void @test8() {
; CHECK-LABEL: @test8(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hellonull, i32 0, i32 6), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hellonull, i32 6), ptr @chp, align 4
; CHECK-NEXT: ret void
;
%dst = call ptr @memchr(ptr @hellonull, i32 119, i32 14)
@@ -100,7 +100,7 @@ define void @test8() {
define void @test9() {
; CHECK-LABEL: @test9(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hellonull, i32 0, i32 6), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hellonull, i32 6), ptr @chp, align 4
; CHECK-NEXT: ret void
;
%str = getelementptr [14 x i8], ptr @hellonull, i32 0, i32 2
diff --git a/llvm/test/Transforms/InstCombine/memcmp-8.ll b/llvm/test/Transforms/InstCombine/memcmp-8.ll
index a3759914ad4f..2bc1efad5c77 100644
--- a/llvm/test/Transforms/InstCombine/memcmp-8.ll
+++ b/llvm/test/Transforms/InstCombine/memcmp-8.ll
@@ -42,7 +42,7 @@ define i32 @fold_memcmp_a5pi_a5p5_n(i32 %i, i64 %n) {
; CHECK-LABEL: @fold_memcmp_a5pi_a5p5_n(
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I:%.*]] to i64
; CHECK-NEXT: [[PA5_PI:%.*]] = getelementptr [5 x i8], ptr @a5, i64 0, i64 [[TMP1]]
-; CHECK-NEXT: [[CMP:%.*]] = call i32 @memcmp(ptr [[PA5_PI]], ptr nonnull getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0), i64 [[N:%.*]])
+; CHECK-NEXT: [[CMP:%.*]] = call i32 @memcmp(ptr [[PA5_PI]], ptr nonnull getelementptr inbounds (i8, ptr @a5, i64 5), i64 [[N:%.*]])
; CHECK-NEXT: ret i32 [[CMP]]
;
%pa5_pi = getelementptr [5 x i8], ptr @a5, i32 0, i32 %i
diff --git a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
index e9ff34735f1c..34e6c601f494 100644
--- a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
+++ b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
@@ -220,7 +220,7 @@ define void @test7() {
define void @test8() {
; CHECK-LABEL: @test8(
; CHECK-NEXT: [[AL:%.*]] = alloca [[U:%.*]], align 16
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(20) [[AL]], ptr noundef nonnull align 4 dereferenceable(20) getelementptr inbounds ([2 x %U], ptr @H, i64 0, i64 1), i64 20, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(20) [[AL]], ptr noundef nonnull align 4 dereferenceable(20) getelementptr inbounds (i8, ptr @H, i64 20), i64 20, i1 false)
; CHECK-NEXT: call void @bar(ptr nonnull [[AL]]) #[[ATTR3]]
; CHECK-NEXT: ret void
;
@@ -234,7 +234,7 @@ define void @test8() {
define void @test8_addrspacecast() {
; CHECK-LABEL: @test8_addrspacecast(
; CHECK-NEXT: [[AL:%.*]] = alloca [[U:%.*]], align 16
-; CHECK-NEXT: call void @llvm.memcpy.p0.p1.i64(ptr noundef nonnull align 16 dereferenceable(20) [[AL]], ptr addrspace(1) noundef align 4 dereferenceable(20) addrspacecast (ptr getelementptr inbounds ([2 x %U], ptr @H, i64 0, i64 1) to ptr addrspace(1)), i64 20, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p1.i64(ptr noundef nonnull align 16 dereferenceable(20) [[AL]], ptr addrspace(1) noundef align 4 dereferenceable(20) addrspacecast (ptr getelementptr inbounds (i8, ptr @H, i64 20) to ptr addrspace(1)), i64 20, i1 false)
; CHECK-NEXT: call void @bar(ptr nonnull [[AL]]) #[[ATTR3]]
; CHECK-NEXT: ret void
;
@@ -246,7 +246,7 @@ define void @test8_addrspacecast() {
define void @test9() {
; CHECK-LABEL: @test9(
-; CHECK-NEXT: call void @bar(ptr nonnull getelementptr inbounds ([2 x %U], ptr @H, i64 0, i64 1)) #[[ATTR3]]
+; CHECK-NEXT: call void @bar(ptr nonnull getelementptr inbounds (i8, ptr @H, i64 20)) #[[ATTR3]]
; CHECK-NEXT: ret void
;
%A = alloca %U, align 4
@@ -257,7 +257,7 @@ define void @test9() {
define void @test9_addrspacecast() {
; CHECK-LABEL: @test9_addrspacecast(
-; CHECK-NEXT: call void @bar(ptr nonnull getelementptr inbounds ([2 x %U], ptr @H, i64 0, i64 1)) #[[ATTR3]]
+; CHECK-NEXT: call void @bar(ptr nonnull getelementptr inbounds (i8, ptr @H, i64 20)) #[[ATTR3]]
; CHECK-NEXT: ret void
;
%A = alloca %U, align 4
diff --git a/llvm/test/Transforms/InstCombine/memrchr-3.ll b/llvm/test/Transforms/InstCombine/memrchr-3.ll
index ca122e5b7dea..d3619432c0d8 100644
--- a/llvm/test/Transforms/InstCombine/memrchr-3.ll
+++ b/llvm/test/Transforms/InstCombine/memrchr-3.ll
@@ -98,7 +98,7 @@ define ptr @fold_memrchr_ax_c_1(i32 %C) {
define ptr @fold_memrchr_a12345_5_5() {
; CHECK-LABEL: @fold_memrchr_a12345_5_5(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([5 x i8], ptr @a12345, i64 0, i64 4)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a12345, i64 4)
;
%ret = call ptr @memrchr(ptr @a12345, i32 5, i64 5)
@@ -122,7 +122,7 @@ define ptr @fold_memrchr_a12345_5_4() {
define ptr @fold_memrchr_a12345_4_5() {
; CHECK-LABEL: @fold_memrchr_a12345_4_5(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([5 x i8], ptr @a12345, i64 0, i64 3)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a12345, i64 3)
;
%ret = call ptr @memrchr(ptr @a12345, i32 4, i64 5)
@@ -147,7 +147,7 @@ define ptr @fold_memrchr_a12345p1_1_4() {
define ptr @fold_memrchr_a12345p1_2_4() {
; CHECK-LABEL: @fold_memrchr_a12345p1_2_4(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([5 x i8], ptr @a12345, i64 0, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a12345, i64 1)
;
%ptr = getelementptr [5 x i8], ptr @a12345, i32 0, i32 1
@@ -160,7 +160,7 @@ define ptr @fold_memrchr_a12345p1_2_4() {
define ptr @fold_memrchr_a12345_2_5() {
; CHECK-LABEL: @fold_memrchr_a12345_2_5(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([5 x i8], ptr @a12345, i64 0, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a12345, i64 1)
;
%ret = call ptr @memrchr(ptr @a12345, i32 2, i64 5)
@@ -185,7 +185,7 @@ define ptr @fold_memrchr_a12345_0_n(i64 %N) {
define ptr @fold_memrchr_a12345_3_n(i64 %n) {
; CHECK-LABEL: @fold_memrchr_a12345_3_n(
; CHECK-NEXT: [[MEMRCHR_CMP:%.*]] = icmp ult i64 [[N:%.*]], 3
-; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[MEMRCHR_CMP]], ptr null, ptr getelementptr inbounds ([5 x i8], ptr @a12345, i64 0, i64 2)
+; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[MEMRCHR_CMP]], ptr null, ptr getelementptr inbounds (i8, ptr @a12345, i64 2)
; CHECK-NEXT: ret ptr [[MEMRCHR_SEL]]
;
@@ -199,7 +199,7 @@ define ptr @fold_memrchr_a12345_3_n(i64 %n) {
define ptr @fold_memrchr_a12345_5_n(i64 %n) {
; CHECK-LABEL: @fold_memrchr_a12345_5_n(
; CHECK-NEXT: [[MEMRCHR_CMP:%.*]] = icmp ult i64 [[N:%.*]], 5
-; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[MEMRCHR_CMP]], ptr null, ptr getelementptr inbounds ([5 x i8], ptr @a12345, i64 0, i64 4)
+; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[MEMRCHR_CMP]], ptr null, ptr getelementptr inbounds (i8, ptr @a12345, i64 4)
; CHECK-NEXT: ret ptr [[MEMRCHR_SEL]]
;
@@ -212,7 +212,7 @@ define ptr @fold_memrchr_a12345_5_n(i64 %n) {
define ptr @fold_memrchr_a123123_3_5() {
; CHECK-LABEL: @fold_memrchr_a123123_3_5(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([6 x i8], ptr @a123123, i64 0, i64 2)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a123123, i64 2)
;
%ret = call ptr @memrchr(ptr @a123123, i32 3, i64 5)
@@ -224,7 +224,7 @@ define ptr @fold_memrchr_a123123_3_5() {
define ptr @fold_memrchr_a123123_3_6() {
; CHECK-LABEL: @fold_memrchr_a123123_3_6(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([6 x i8], ptr @a123123, i64 0, i64 5)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a123123, i64 5)
;
%ret = call ptr @memrchr(ptr @a123123, i32 3, i64 6)
@@ -235,7 +235,7 @@ define ptr @fold_memrchr_a123123_3_6() {
define ptr @fold_memrchr_a123123_2_6() {
; CHECK-LABEL: @fold_memrchr_a123123_2_6(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([6 x i8], ptr @a123123, i64 0, i64 4)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a123123, i64 4)
;
%ret = call ptr @memrchr(ptr @a123123, i32 2, i64 6)
@@ -246,7 +246,7 @@ define ptr @fold_memrchr_a123123_2_6() {
define ptr @fold_memrchr_a123123_1_6() {
; CHECK-LABEL: @fold_memrchr_a123123_1_6(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([6 x i8], ptr @a123123, i64 0, i64 3)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a123123, i64 3)
;
%ret = call ptr @memrchr(ptr @a123123, i32 1, i64 6)
diff --git a/llvm/test/Transforms/InstCombine/memrchr-4.ll b/llvm/test/Transforms/InstCombine/memrchr-4.ll
index 1e57a3b93595..708b4417a7df 100644
--- a/llvm/test/Transforms/InstCombine/memrchr-4.ll
+++ b/llvm/test/Transforms/InstCombine/memrchr-4.ll
@@ -16,7 +16,7 @@ define ptr @fold_memrchr_a11111_c_5(i32 %C) {
; CHECK-LABEL: @fold_memrchr_a11111_c_5(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 1
-; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds ([5 x i8], ptr @a11111, i64 0, i64 4), ptr null
+; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds (i8, ptr @a11111, i64 4), ptr null
; CHECK-NEXT: ret ptr [[MEMRCHR_SEL]]
;
@@ -51,7 +51,7 @@ define ptr @fold_memrchr_a1110111_c_3(i32 %C) {
; CHECK-LABEL: @fold_memrchr_a1110111_c_3(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 1
-; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds ([7 x i8], ptr @a1110111, i64 0, i64 2), ptr null
+; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds (i8, ptr @a1110111, i64 2), ptr null
; CHECK-NEXT: ret ptr [[MEMRCHR_SEL]]
;
diff --git a/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll b/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll
index 9c5bf3cb5a41..fbf58d47a32d 100644
--- a/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll
+++ b/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll
@@ -34,9 +34,9 @@ define void @_Z4testv() {
; CHECK-NEXT: store i16 [[I4]], ptr @arr_4, align 2
; CHECK-NEXT: [[I8:%.*]] = sext i16 [[I4]] to i32
; CHECK-NEXT: store i32 [[I8]], ptr @arr_3, align 4
-; CHECK-NEXT: store i32 [[STOREMERGE]], ptr getelementptr inbounds ([0 x i32], ptr @arr_2, i64 0, i64 1), align 4
-; CHECK-NEXT: store i16 [[I4]], ptr getelementptr inbounds ([0 x i16], ptr @arr_4, i64 0, i64 1), align 2
-; CHECK-NEXT: store i32 [[I8]], ptr getelementptr inbounds ([8 x i32], ptr @arr_3, i64 0, i64 1), align 4
+; CHECK-NEXT: store i32 [[STOREMERGE]], ptr getelementptr inbounds (i8, ptr @arr_2, i64 4), align 4
+; CHECK-NEXT: store i16 [[I4]], ptr getelementptr inbounds (i8, ptr @arr_4, i64 2), align 2
+; CHECK-NEXT: store i32 [[I8]], ptr getelementptr inbounds (i8, ptr @arr_3, i64 4), align 4
; CHECK-NEXT: ret void
;
bb:
diff --git a/llvm/test/Transforms/InstCombine/objsize.ll b/llvm/test/Transforms/InstCombine/objsize.ll
index 33c14f44fc5f..9a3391d91bab 100644
--- a/llvm/test/Transforms/InstCombine/objsize.ll
+++ b/llvm/test/Transforms/InstCombine/objsize.ll
@@ -64,7 +64,7 @@ define i1 @baz() nounwind {
define void @test1(ptr %q, i32 %x) nounwind noinline {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.objectsize.i32.p0(ptr getelementptr inbounds ([0 x i8], ptr @window, i32 0, i32 10), i1 false, i1 false, i1 false)
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.objectsize.i32.p0(ptr getelementptr inbounds (i8, ptr @window, i32 10), i1 false, i1 false, i1 false)
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], -1
; CHECK-NEXT: br i1 [[TMP1]], label %"47", label %"46"
; CHECK: "46":
@@ -112,7 +112,7 @@ define void @test3(i1 %c1, ptr %ptr1, ptr %ptr2, ptr %ptr3) nounwind {
; CHECK: bb11:
; CHECK-NEXT: unreachable
; CHECK: bb12:
-; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__inline_memcpy_chk(ptr nonnull getelementptr inbounds ([480 x float], ptr @array, i32 0, i32 1), ptr [[PTR3:%.*]], i32 512) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__inline_memcpy_chk(ptr nonnull getelementptr inbounds (i8, ptr @array, i32 4), ptr [[PTR3:%.*]], i32 512) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: unreachable
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/or-fcmp.ll b/llvm/test/Transforms/InstCombine/or-fcmp.ll
index ffd927672b41..285b2d958abd 100644
--- a/llvm/test/Transforms/InstCombine/or-fcmp.ll
+++ b/llvm/test/Transforms/InstCombine/or-fcmp.ll
@@ -28,7 +28,9 @@ define i1 @PR1738_logical(double %x, double %y) {
define <2 x i1> @PR1738_vec_undef(<2 x double> %x, <2 x double> %y) {
; CHECK-LABEL: @PR1738_vec_undef(
-; CHECK-NEXT: [[OR:%.*]] = fcmp uno <2 x double> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp uno <2 x double> [[X:%.*]], <double 0.000000e+00, double undef>
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp uno <2 x double> [[Y:%.*]], <double undef, double 0.000000e+00>
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i1> [[CMP1]], [[CMP2]]
; CHECK-NEXT: ret <2 x i1> [[OR]]
;
%cmp1 = fcmp uno <2 x double> %x, <double 0.0, double undef>
@@ -37,6 +39,17 @@ define <2 x i1> @PR1738_vec_undef(<2 x double> %x, <2 x double> %y) {
ret <2 x i1> %or
}
+define <2 x i1> @PR1738_vec_poison(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: @PR1738_vec_poison(
+; CHECK-NEXT: [[OR:%.*]] = fcmp uno <2 x double> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret <2 x i1> [[OR]]
+;
+ %cmp1 = fcmp uno <2 x double> %x, <double 0.0, double poison>
+ %cmp2 = fcmp uno <2 x double> %y, <double poison, double 0.0>
+ %or = or <2 x i1> %cmp1, %cmp2
+ ret <2 x i1> %or
+}
+
define i1 @PR41069(double %a, double %b, double %c, double %d) {
; CHECK-LABEL: @PR41069(
; CHECK-NEXT: [[UNO1:%.*]] = fcmp uno double [[A:%.*]], [[B:%.*]]
@@ -105,26 +118,56 @@ define i1 @PR41069_commute_logical(double %a, double %b, double %c, double %d) {
define <2 x i1> @PR41069_vec(<2 x i1> %z, <2 x float> %c, <2 x float> %d) {
; CHECK-LABEL: @PR41069_vec(
+; CHECK-NEXT: [[UNO1:%.*]] = fcmp uno <2 x float> [[C:%.*]], zeroinitializer
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i1> [[UNO1]], [[Z:%.*]]
+; CHECK-NEXT: [[UNO2:%.*]] = fcmp uno <2 x float> [[D:%.*]], <float 0.000000e+00, float undef>
+; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[OR]], [[UNO2]]
+; CHECK-NEXT: ret <2 x i1> [[R]]
+;
+ %uno1 = fcmp uno <2 x float> %c, zeroinitializer
+ %or = or <2 x i1> %uno1, %z
+ %uno2 = fcmp uno <2 x float> %d, <float 0.0, float undef>
+ %r = or <2 x i1> %or, %uno2
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @PR41069_vec_poison(<2 x i1> %z, <2 x float> %c, <2 x float> %d) {
+; CHECK-LABEL: @PR41069_vec_poison(
; CHECK-NEXT: [[TMP1:%.*]] = fcmp uno <2 x float> [[D:%.*]], [[C:%.*]]
; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%uno1 = fcmp uno <2 x float> %c, zeroinitializer
%or = or <2 x i1> %uno1, %z
- %uno2 = fcmp uno <2 x float> %d, <float 0.0, float undef>
+ %uno2 = fcmp uno <2 x float> %d, <float 0.0, float poison>
%r = or <2 x i1> %or, %uno2
ret <2 x i1> %r
}
define <2 x i1> @PR41069_vec_commute(<2 x i1> %z, <2 x float> %c, <2 x float> %d) {
; CHECK-LABEL: @PR41069_vec_commute(
+; CHECK-NEXT: [[UNO1:%.*]] = fcmp uno <2 x float> [[C:%.*]], zeroinitializer
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i1> [[UNO1]], [[Z:%.*]]
+; CHECK-NEXT: [[UNO2:%.*]] = fcmp uno <2 x float> [[D:%.*]], <float 0.000000e+00, float undef>
+; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[UNO2]], [[OR]]
+; CHECK-NEXT: ret <2 x i1> [[R]]
+;
+ %uno1 = fcmp uno <2 x float> %c, zeroinitializer
+ %or = or <2 x i1> %uno1, %z
+ %uno2 = fcmp uno <2 x float> %d, <float 0.0, float undef>
+ %r = or <2 x i1> %uno2, %or
+ ret <2 x i1> %r
+}
+
+define <2 x i1> @PR41069_vec_commute_poison(<2 x i1> %z, <2 x float> %c, <2 x float> %d) {
+; CHECK-LABEL: @PR41069_vec_commute_poison(
; CHECK-NEXT: [[TMP1:%.*]] = fcmp uno <2 x float> [[D:%.*]], [[C:%.*]]
; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%uno1 = fcmp uno <2 x float> %c, zeroinitializer
%or = or <2 x i1> %uno1, %z
- %uno2 = fcmp uno <2 x float> %d, <float 0.0, float undef>
+ %uno2 = fcmp uno <2 x float> %d, <float 0.0, float poison>
%r = or <2 x i1> %uno2, %or
ret <2 x i1> %r
}
diff --git a/llvm/test/Transforms/InstCombine/pow-to-ldexp.ll b/llvm/test/Transforms/InstCombine/pow-to-ldexp.ll
index 27249dd5d72a..b61f8809bd25 100644
--- a/llvm/test/Transforms/InstCombine/pow-to-ldexp.ll
+++ b/llvm/test/Transforms/InstCombine/pow-to-ldexp.ll
@@ -144,16 +144,10 @@ define half @pow_sitofp_f16_const_base_2(i32 %x) {
}
define <2 x float> @pow_sitofp_v2f32_const_base_2(<2 x i32> %x) {
-; LDEXP-EXP2-LABEL: define <2 x float> @pow_sitofp_v2f32_const_base_2(
-; LDEXP-EXP2-SAME: <2 x i32> [[X:%.*]]) {
-; LDEXP-EXP2-NEXT: [[EXP2:%.*]] = tail call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x i32> [[X]])
-; LDEXP-EXP2-NEXT: ret <2 x float> [[EXP2]]
-;
-; LDEXP-NOEXP2-LABEL: define <2 x float> @pow_sitofp_v2f32_const_base_2(
-; LDEXP-NOEXP2-SAME: <2 x i32> [[X:%.*]]) {
-; LDEXP-NOEXP2-NEXT: [[ITOFP:%.*]] = sitofp <2 x i32> [[X]] to <2 x float>
-; LDEXP-NOEXP2-NEXT: [[POW:%.*]] = tail call <2 x float> @llvm.pow.v2f32(<2 x float> <float 2.000000e+00, float 2.000000e+00>, <2 x float> [[ITOFP]])
-; LDEXP-NOEXP2-NEXT: ret <2 x float> [[POW]]
+; LDEXP-LABEL: define <2 x float> @pow_sitofp_v2f32_const_base_2(
+; LDEXP-SAME: <2 x i32> [[X:%.*]]) {
+; LDEXP-NEXT: [[EXP2:%.*]] = tail call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x i32> [[X]])
+; LDEXP-NEXT: ret <2 x float> [[EXP2]]
;
; NOLDEXP-LABEL: define <2 x float> @pow_sitofp_v2f32_const_base_2(
; NOLDEXP-SAME: <2 x i32> [[X:%.*]]) {
@@ -205,15 +199,10 @@ define <2 x float> @pow_sitofp_v2f32_const_base_mixed_2(<2 x i32> %x) {
}
define <2 x float> @pow_sitofp_v2f32_const_base_2__flags(<2 x i32> %x) {
-; LDEXP-EXP2-LABEL: define <2 x float> @pow_sitofp_v2f32_const_base_2__flags(
-; LDEXP-EXP2-SAME: <2 x i32> [[X:%.*]]) {
-; LDEXP-EXP2-NEXT: [[EXP2:%.*]] = tail call nsz afn <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x i32> [[X]])
-; LDEXP-EXP2-NEXT: ret <2 x float> [[EXP2]]
-;
-; LDEXP-NOEXP2-LABEL: define <2 x float> @pow_sitofp_v2f32_const_base_2__flags(
-; LDEXP-NOEXP2-SAME: <2 x i32> [[X:%.*]]) {
-; LDEXP-NOEXP2-NEXT: [[POW:%.*]] = tail call nsz afn <2 x float> @llvm.powi.v2f32.v2i32(<2 x float> <float 2.000000e+00, float 2.000000e+00>, <2 x i32> [[X]])
-; LDEXP-NOEXP2-NEXT: ret <2 x float> [[POW]]
+; LDEXP-LABEL: define <2 x float> @pow_sitofp_v2f32_const_base_2__flags(
+; LDEXP-SAME: <2 x i32> [[X:%.*]]) {
+; LDEXP-NEXT: [[EXP2:%.*]] = tail call nsz afn <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x i32> [[X]])
+; LDEXP-NEXT: ret <2 x float> [[EXP2]]
;
; NOLDEXP-LABEL: define <2 x float> @pow_sitofp_v2f32_const_base_2__flags(
; NOLDEXP-SAME: <2 x i32> [[X:%.*]]) {
@@ -227,16 +216,10 @@ define <2 x float> @pow_sitofp_v2f32_const_base_2__flags(<2 x i32> %x) {
}
define <vscale x 4 x float> @pow_sitofp_nxv4f32_const_base_2(<vscale x 4 x i32> %x) {
-; LDEXP-EXP2-LABEL: define <vscale x 4 x float> @pow_sitofp_nxv4f32_const_base_2(
-; LDEXP-EXP2-SAME: <vscale x 4 x i32> [[X:%.*]]) {
-; LDEXP-EXP2-NEXT: [[EXP2:%.*]] = tail call <vscale x 4 x float> @llvm.ldexp.nxv4f32.nxv4i32(<vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 1.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> [[X]])
-; LDEXP-EXP2-NEXT: ret <vscale x 4 x float> [[EXP2]]
-;
-; LDEXP-NOEXP2-LABEL: define <vscale x 4 x float> @pow_sitofp_nxv4f32_const_base_2(
-; LDEXP-NOEXP2-SAME: <vscale x 4 x i32> [[X:%.*]]) {
-; LDEXP-NOEXP2-NEXT: [[ITOFP:%.*]] = sitofp <vscale x 4 x i32> [[X]] to <vscale x 4 x float>
-; LDEXP-NOEXP2-NEXT: [[POW:%.*]] = tail call <vscale x 4 x float> @llvm.pow.nxv4f32(<vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 2.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x float> [[ITOFP]])
-; LDEXP-NOEXP2-NEXT: ret <vscale x 4 x float> [[POW]]
+; LDEXP-LABEL: define <vscale x 4 x float> @pow_sitofp_nxv4f32_const_base_2(
+; LDEXP-SAME: <vscale x 4 x i32> [[X:%.*]]) {
+; LDEXP-NEXT: [[EXP2:%.*]] = tail call <vscale x 4 x float> @llvm.ldexp.nxv4f32.nxv4i32(<vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 1.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> [[X]])
+; LDEXP-NEXT: ret <vscale x 4 x float> [[EXP2]]
;
; NOLDEXP-LABEL: define <vscale x 4 x float> @pow_sitofp_nxv4f32_const_base_2(
; NOLDEXP-SAME: <vscale x 4 x i32> [[X:%.*]]) {
@@ -250,16 +233,10 @@ define <vscale x 4 x float> @pow_sitofp_nxv4f32_const_base_2(<vscale x 4 x i32>
}
define <2 x half> @pow_sitofp_v2f16_const_base_2(<2 x i32> %x) {
-; LDEXP-EXP2-LABEL: define <2 x half> @pow_sitofp_v2f16_const_base_2(
-; LDEXP-EXP2-SAME: <2 x i32> [[X:%.*]]) {
-; LDEXP-EXP2-NEXT: [[EXP2:%.*]] = tail call <2 x half> @llvm.ldexp.v2f16.v2i32(<2 x half> <half 0xH3C00, half 0xH3C00>, <2 x i32> [[X]])
-; LDEXP-EXP2-NEXT: ret <2 x half> [[EXP2]]
-;
-; LDEXP-NOEXP2-LABEL: define <2 x half> @pow_sitofp_v2f16_const_base_2(
-; LDEXP-NOEXP2-SAME: <2 x i32> [[X:%.*]]) {
-; LDEXP-NOEXP2-NEXT: [[ITOFP:%.*]] = sitofp <2 x i32> [[X]] to <2 x half>
-; LDEXP-NOEXP2-NEXT: [[POW:%.*]] = tail call <2 x half> @llvm.pow.v2f16(<2 x half> <half 0xH4000, half 0xH4000>, <2 x half> [[ITOFP]])
-; LDEXP-NOEXP2-NEXT: ret <2 x half> [[POW]]
+; LDEXP-LABEL: define <2 x half> @pow_sitofp_v2f16_const_base_2(
+; LDEXP-SAME: <2 x i32> [[X:%.*]]) {
+; LDEXP-NEXT: [[EXP2:%.*]] = tail call <2 x half> @llvm.ldexp.v2f16.v2i32(<2 x half> <half 0xH3C00, half 0xH3C00>, <2 x i32> [[X]])
+; LDEXP-NEXT: ret <2 x half> [[EXP2]]
;
; NOLDEXP-LABEL: define <2 x half> @pow_sitofp_v2f16_const_base_2(
; NOLDEXP-SAME: <2 x i32> [[X:%.*]]) {
@@ -273,16 +250,10 @@ define <2 x half> @pow_sitofp_v2f16_const_base_2(<2 x i32> %x) {
}
define <2 x double> @pow_sitofp_v2f64_const_base_2(<2 x i32> %x) {
-; LDEXP-EXP2-LABEL: define <2 x double> @pow_sitofp_v2f64_const_base_2(
-; LDEXP-EXP2-SAME: <2 x i32> [[X:%.*]]) {
-; LDEXP-EXP2-NEXT: [[EXP2:%.*]] = tail call <2 x double> @llvm.ldexp.v2f64.v2i32(<2 x double> <double 1.000000e+00, double 1.000000e+00>, <2 x i32> [[X]])
-; LDEXP-EXP2-NEXT: ret <2 x double> [[EXP2]]
-;
-; LDEXP-NOEXP2-LABEL: define <2 x double> @pow_sitofp_v2f64_const_base_2(
-; LDEXP-NOEXP2-SAME: <2 x i32> [[X:%.*]]) {
-; LDEXP-NOEXP2-NEXT: [[ITOFP:%.*]] = sitofp <2 x i32> [[X]] to <2 x double>
-; LDEXP-NOEXP2-NEXT: [[POW:%.*]] = tail call <2 x double> @llvm.pow.v2f64(<2 x double> <double 2.000000e+00, double 2.000000e+00>, <2 x double> [[ITOFP]])
-; LDEXP-NOEXP2-NEXT: ret <2 x double> [[POW]]
+; LDEXP-LABEL: define <2 x double> @pow_sitofp_v2f64_const_base_2(
+; LDEXP-SAME: <2 x i32> [[X:%.*]]) {
+; LDEXP-NEXT: [[EXP2:%.*]] = tail call <2 x double> @llvm.ldexp.v2f64.v2i32(<2 x double> <double 1.000000e+00, double 1.000000e+00>, <2 x i32> [[X]])
+; LDEXP-NEXT: ret <2 x double> [[EXP2]]
;
; NOLDEXP-LABEL: define <2 x double> @pow_sitofp_v2f64_const_base_2(
; NOLDEXP-SAME: <2 x i32> [[X:%.*]]) {
diff --git a/llvm/test/Transforms/InstCombine/pr25342.ll b/llvm/test/Transforms/InstCombine/pr25342.ll
index 2f85f99c4ce0..271d69b141dd 100644
--- a/llvm/test/Transforms/InstCombine/pr25342.ll
+++ b/llvm/test/Transforms/InstCombine/pr25342.ll
@@ -17,9 +17,9 @@ define void @_Z3fooi(i32 signext %n) {
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr @dd, align 4
-; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd, i64 0, i32 0, i32 1), align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds (i8, ptr @dd, i64 4), align 4
; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr @dd2, align 4
-; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd2, i64 0, i32 0, i32 1), align 4
+; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr getelementptr inbounds (i8, ptr @dd2, i64 4), align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[TMP2]], [[TMP4]]
; CHECK-NEXT: [[MUL4_I:%.*]] = fmul float [[TMP3]], [[TMP5]]
; CHECK-NEXT: [[SUB_I:%.*]] = fsub float [[MUL_I]], [[MUL4_I]]
@@ -32,7 +32,7 @@ define void @_Z3fooi(i32 signext %n) {
; CHECK-NEXT: br label [[FOR_COND]]
; CHECK: for.end:
; CHECK-NEXT: store float [[TMP0]], ptr @dd, align 4
-; CHECK-NEXT: store float [[TMP1]], ptr getelementptr inbounds (%"struct.std::complex", ptr @dd, i64 0, i32 0, i32 1), align 4
+; CHECK-NEXT: store float [[TMP1]], ptr getelementptr inbounds (i8, ptr @dd, i64 4), align 4
; CHECK-NEXT: ret void
;
entry:
@@ -84,9 +84,9 @@ define void @multi_phi(i32 signext %n) {
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr @dd, align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd, i64 0, i32 0, i32 1), align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr getelementptr inbounds (i8, ptr @dd, i64 4), align 4
; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr @dd2, align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr getelementptr inbounds (%"struct.std::complex", ptr @dd2, i64 0, i32 0, i32 1), align 4
+; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr getelementptr inbounds (i8, ptr @dd2, i64 4), align 4
; CHECK-NEXT: [[MUL_I:%.*]] = fmul float [[TMP1]], [[TMP3]]
; CHECK-NEXT: [[MUL4_I:%.*]] = fmul float [[TMP2]], [[TMP4]]
; CHECK-NEXT: [[SUB_I:%.*]] = fsub float [[MUL_I]], [[MUL4_I]]
diff --git a/llvm/test/Transforms/InstCombine/pr33453.ll b/llvm/test/Transforms/InstCombine/pr33453.ll
index 45f87b753006..23a232dd0b9a 100644
--- a/llvm/test/Transforms/InstCombine/pr33453.ll
+++ b/llvm/test/Transforms/InstCombine/pr33453.ll
@@ -6,7 +6,7 @@
define float @patatino() {
; CHECK-LABEL: @patatino(
-; CHECK-NEXT: [[FMUL:%.*]] = uitofp i1 mul (i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1), i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1)) to float
+; CHECK-NEXT: [[FMUL:%.*]] = uitofp i1 mul (i1 icmp eq (ptr getelementptr inbounds (i8, ptr @g2, i64 2), ptr @g1), i1 icmp eq (ptr getelementptr inbounds (i8, ptr @g2, i64 2), ptr @g1)) to float
; CHECK-NEXT: ret float [[FMUL]]
;
%uitofp1 = uitofp i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1) to float
diff --git a/llvm/test/Transforms/InstCombine/pr38984-inseltpoison.ll b/llvm/test/Transforms/InstCombine/pr38984-inseltpoison.ll
index 6613514c7754..92f55b211b63 100644
--- a/llvm/test/Transforms/InstCombine/pr38984-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/pr38984-inseltpoison.ll
@@ -26,7 +26,7 @@ define <4 x i1> @PR38984_2() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @offsets, align 2
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 [[TMP0]], i64 3
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr getelementptr inbounds ([21 x i16], ptr @a, i16 1, i16 0), <4 x i16> [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr getelementptr inbounds (i8, ptr @a, i16 42), <4 x i16> [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr null, <4 x i16> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x ptr> [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret <4 x i1> [[TMP4]]
diff --git a/llvm/test/Transforms/InstCombine/pr38984.ll b/llvm/test/Transforms/InstCombine/pr38984.ll
index c148765fce59..a7eddcfbe084 100644
--- a/llvm/test/Transforms/InstCombine/pr38984.ll
+++ b/llvm/test/Transforms/InstCombine/pr38984.ll
@@ -26,7 +26,7 @@ define <4 x i1> @PR38984_2() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr @offsets, align 2
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 poison>, i16 [[TMP0]], i64 3
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr getelementptr inbounds ([21 x i16], ptr @a, i16 1, i16 0), <4 x i16> [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr getelementptr inbounds (i8, ptr @a, i16 42), <4 x i16> [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr null, <4 x i16> [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x ptr> [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret <4 x i1> [[TMP4]]
diff --git a/llvm/test/Transforms/InstCombine/pr83947.ll b/llvm/test/Transforms/InstCombine/pr83947.ll
index c1d601ff6371..63a242abc925 100644
--- a/llvm/test/Transforms/InstCombine/pr83947.ll
+++ b/llvm/test/Transforms/InstCombine/pr83947.ll
@@ -6,7 +6,7 @@
define void @masked_scatter1() {
; CHECK-LABEL: define void @masked_scatter1() {
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x ptr> shufflevector (<vscale x 4 x ptr> insertelement (<vscale x 4 x ptr> poison, ptr @c, i64 0), <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer), i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 icmp eq (ptr getelementptr inbounds (i32, ptr @b, i64 1), ptr @c), i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
+; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x ptr> shufflevector (<vscale x 4 x ptr> insertelement (<vscale x 4 x ptr> poison, ptr @c, i64 0), <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer), i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 icmp eq (ptr getelementptr inbounds (i8, ptr @b, i64 4), ptr @c), i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
; CHECK-NEXT: ret void
;
call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x ptr> splat (ptr @c), i32 4, <vscale x 4 x i1> splat (i1 icmp eq (ptr getelementptr (i32, ptr @b, i64 1), ptr @c)))
@@ -59,7 +59,7 @@ define void @masked_scatter6() {
define void @masked_scatter7() {
; CHECK-LABEL: define void @masked_scatter7() {
-; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> zeroinitializer, <2 x ptr> <ptr @c, ptr @c>, i32 4, <2 x i1> <i1 icmp eq (ptr getelementptr inbounds (i32, ptr @b, i64 1), ptr @c), i1 icmp eq (ptr getelementptr inbounds (i32, ptr @b, i64 1), ptr @c)>)
+; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> zeroinitializer, <2 x ptr> <ptr @c, ptr @c>, i32 4, <2 x i1> <i1 icmp eq (ptr getelementptr inbounds (i8, ptr @b, i64 4), ptr @c), i1 icmp eq (ptr getelementptr inbounds (i8, ptr @b, i64 4), ptr @c)>)
; CHECK-NEXT: ret void
;
call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> zeroinitializer, <2 x ptr> splat (ptr @c), i32 4, <2 x i1> splat (i1 icmp eq (ptr getelementptr (i32, ptr @b, i64 1), ptr @c)))
diff --git a/llvm/test/Transforms/InstCombine/ptr-replace-alloca.ll b/llvm/test/Transforms/InstCombine/ptr-replace-alloca.ll
index c783b101251d..7c65a93a0043 100644
--- a/llvm/test/Transforms/InstCombine/ptr-replace-alloca.ll
+++ b/llvm/test/Transforms/InstCombine/ptr-replace-alloca.ll
@@ -15,7 +15,7 @@ define i8 @remove_alloca_use_arg(i1 %cond) {
; CHECK: else:
; CHECK-NEXT: br label [[SINK]]
; CHECK: sink:
-; CHECK-NEXT: [[PTR1:%.*]] = phi ptr [ getelementptr inbounds ([32 x i8], ptr @g1, i64 0, i64 2), [[IF]] ], [ getelementptr inbounds ([32 x i8], ptr @g1, i64 0, i64 1), [[ELSE]] ]
+; CHECK-NEXT: [[PTR1:%.*]] = phi ptr [ getelementptr inbounds (i8, ptr @g1, i64 2), [[IF]] ], [ getelementptr inbounds (i8, ptr @g1, i64 1), [[ELSE]] ]
; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr [[PTR1]], align 1
; CHECK-NEXT: ret i8 [[LOAD]]
;
@@ -114,7 +114,7 @@ define i8 @loop_phi_remove_alloca(i1 %cond) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[BB_0:%.*]]
; CHECK: bb.0:
-; CHECK-NEXT: [[PTR1:%.*]] = phi ptr [ getelementptr inbounds ([32 x i8], ptr @g1, i64 0, i64 1), [[ENTRY:%.*]] ], [ getelementptr inbounds ([32 x i8], ptr @g1, i64 0, i64 2), [[BB_1:%.*]] ]
+; CHECK-NEXT: [[PTR1:%.*]] = phi ptr [ getelementptr inbounds (i8, ptr @g1, i64 1), [[ENTRY:%.*]] ], [ getelementptr inbounds (i8, ptr @g1, i64 2), [[BB_1:%.*]] ]
; CHECK-NEXT: br i1 [[COND:%.*]], label [[BB_1]], label [[EXIT:%.*]]
; CHECK: bb.1:
; CHECK-NEXT: br label [[BB_0]]
@@ -171,7 +171,7 @@ define i8 @loop_phi_late_memtransfer_remove_alloca(i1 %cond) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[BB_0:%.*]]
; CHECK: bb.0:
-; CHECK-NEXT: [[PTR1:%.*]] = phi ptr [ getelementptr inbounds ([32 x i8], ptr @g1, i64 0, i64 1), [[ENTRY:%.*]] ], [ getelementptr inbounds ([32 x i8], ptr @g1, i64 0, i64 2), [[BB_1:%.*]] ]
+; CHECK-NEXT: [[PTR1:%.*]] = phi ptr [ getelementptr inbounds (i8, ptr @g1, i64 1), [[ENTRY:%.*]] ], [ getelementptr inbounds (i8, ptr @g1, i64 2), [[BB_1:%.*]] ]
; CHECK-NEXT: br i1 [[COND:%.*]], label [[BB_1]], label [[EXIT:%.*]]
; CHECK: bb.1:
; CHECK-NEXT: br label [[BB_0]]
@@ -288,7 +288,7 @@ define i32 @addrspace_diff_remove_alloca(i1 %cond) {
; CHECK: if:
; CHECK-NEXT: br label [[JOIN]]
; CHECK: join:
-; CHECK-NEXT: [[PHI1:%.*]] = phi ptr addrspace(1) [ @g2, [[IF]] ], [ getelementptr inbounds ([32 x i8], ptr addrspace(1) @g2, i64 0, i64 2), [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[PHI1:%.*]] = phi ptr addrspace(1) [ @g2, [[IF]] ], [ getelementptr inbounds (i8, ptr addrspace(1) @g2, i64 2), [[ENTRY:%.*]] ]
; CHECK-NEXT: [[V:%.*]] = load i32, ptr addrspace(1) [[PHI1]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
diff --git a/llvm/test/Transforms/InstCombine/rem.ll b/llvm/test/Transforms/InstCombine/rem.ll
index ae390e72a4b7..a8fa72c37d32 100644
--- a/llvm/test/Transforms/InstCombine/rem.ll
+++ b/llvm/test/Transforms/InstCombine/rem.ll
@@ -522,7 +522,7 @@ define i32 @pr27968_0(i1 %c0, ptr %p) {
; CHECK-NEXT: [[V:%.*]] = load volatile i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: br i1 icmp eq (ptr getelementptr inbounds ([5 x i16], ptr @a, i64 0, i64 4), ptr @b), label [[REM_IS_SAFE:%.*]], label [[REM_IS_UNSAFE:%.*]]
+; CHECK-NEXT: br i1 icmp eq (ptr getelementptr inbounds (i8, ptr @a, i64 8), ptr @b), label [[REM_IS_SAFE:%.*]], label [[REM_IS_UNSAFE:%.*]]
; CHECK: rem.is.safe:
; CHECK-NEXT: ret i32 0
; CHECK: rem.is.unsafe:
@@ -591,7 +591,7 @@ define i32 @pr27968_2(i1 %c0, ptr %p) {
; CHECK-NEXT: [[V:%.*]] = load volatile i32, ptr [[P:%.*]], align 4
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: br i1 icmp eq (ptr getelementptr inbounds ([5 x i16], ptr @a, i64 0, i64 4), ptr @b), label [[REM_IS_SAFE:%.*]], label [[REM_IS_UNSAFE:%.*]]
+; CHECK-NEXT: br i1 icmp eq (ptr getelementptr inbounds (i8, ptr @a, i64 8), ptr @b), label [[REM_IS_SAFE:%.*]], label [[REM_IS_UNSAFE:%.*]]
; CHECK: rem.is.safe:
; CHECK-NEXT: ret i32 0
; CHECK: rem.is.unsafe:
diff --git a/llvm/test/Transforms/InstCombine/select-and-or.ll b/llvm/test/Transforms/InstCombine/select-and-or.ll
index 0f7acd4d56c0..0965e1c8348e 100644
--- a/llvm/test/Transforms/InstCombine/select-and-or.ll
+++ b/llvm/test/Transforms/InstCombine/select-and-or.ll
@@ -431,7 +431,7 @@ define i1 @not_false_not_use3(i1 %x, i1 %y) {
define i1 @demorgan_select_infloop1(i1 %L) {
; CHECK-LABEL: @demorgan_select_infloop1(
; CHECK-NEXT: [[NOT_L:%.*]] = xor i1 [[L:%.*]], true
-; CHECK-NEXT: [[C15:%.*]] = select i1 [[NOT_L]], i1 xor (i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1), i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1)), i1 false
+; CHECK-NEXT: [[C15:%.*]] = select i1 [[NOT_L]], i1 xor (i1 icmp eq (ptr getelementptr inbounds (i8, ptr @g2, i64 2), ptr @g1), i1 icmp eq (ptr getelementptr inbounds (i8, ptr @g2, i64 2), ptr @g1)), i1 false
; CHECK-NEXT: ret i1 [[C15]]
;
%not.L = xor i1 %L, true
@@ -443,7 +443,7 @@ define i1 @demorgan_select_infloop1(i1 %L) {
define i1 @demorgan_select_infloop2(i1 %L) {
; CHECK-LABEL: @demorgan_select_infloop2(
; CHECK-NEXT: [[NOT_L:%.*]] = xor i1 [[L:%.*]], true
-; CHECK-NEXT: [[C15:%.*]] = select i1 [[NOT_L]], i1 true, i1 xor (i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1), i1 icmp eq (ptr getelementptr inbounds (i16, ptr @g2, i64 1), ptr @g1))
+; CHECK-NEXT: [[C15:%.*]] = select i1 [[NOT_L]], i1 true, i1 xor (i1 icmp eq (ptr getelementptr inbounds (i8, ptr @g2, i64 2), ptr @g1), i1 icmp eq (ptr getelementptr inbounds (i8, ptr @g2, i64 2), ptr @g1))
; CHECK-NEXT: ret i1 [[C15]]
;
%not.L = xor i1 %L, true
diff --git a/llvm/test/Transforms/InstCombine/shl-bo.ll b/llvm/test/Transforms/InstCombine/shl-bo.ll
index d33d27c912d6..ab6e8c28cf9f 100644
--- a/llvm/test/Transforms/InstCombine/shl-bo.ll
+++ b/llvm/test/Transforms/InstCombine/shl-bo.ll
@@ -294,6 +294,39 @@ define i8 @lshr_and_or(i8 %a, i8 %y) {
ret i8 %l
}
+define i8 @lshr_and_or_disjoint(i8 %a, i8 %y) {
+; CHECK-LABEL: @lshr_and_or_disjoint(
+; CHECK-NEXT: [[X:%.*]] = srem i8 [[A:%.*]], 42
+; CHECK-NEXT: [[B1:%.*]] = shl i8 [[X]], 2
+; CHECK-NEXT: [[Y_MASK:%.*]] = and i8 [[Y:%.*]], 52
+; CHECK-NEXT: [[L:%.*]] = or disjoint i8 [[Y_MASK]], [[B1]]
+; CHECK-NEXT: ret i8 [[L]]
+;
+ %x = srem i8 %a, 42 ; thwart complexity-based canonicalization
+ %r = lshr i8 %y, 2
+ %m = and i8 %r, 13
+ %b = or disjoint i8 %x, %m
+ %l = shl i8 %b, 2
+ ret i8 %l
+}
+
+define i8 @ashr_and_or_disjoint(i8 %a, i8 %y) {
+; CHECK-LABEL: @ashr_and_or_disjoint(
+; CHECK-NEXT: [[X:%.*]] = srem i8 [[A:%.*]], 42
+; CHECK-NEXT: [[B1:%.*]] = shl i8 [[X]], 2
+; CHECK-NEXT: [[Y_MASK:%.*]] = and i8 [[Y:%.*]], 52
+; CHECK-NEXT: [[L:%.*]] = or disjoint i8 [[Y_MASK]], [[B1]]
+; CHECK-NEXT: ret i8 [[L]]
+;
+ %x = srem i8 %a, 42 ; thwart complexity-based canonicalization
+ %r = ashr i8 %y, 2
+ %m = and i8 %r, 13
+ %b = or disjoint i8 %x, %m
+ %l = shl i8 %b, 2
+ ret i8 %l
+}
+
+
define <2 x i8> @lshr_and_or_commute_splat(<2 x i8> %a, <2 x i8> %y) {
; CHECK-LABEL: @lshr_and_or_commute_splat(
; CHECK-NEXT: [[X:%.*]] = srem <2 x i8> [[A:%.*]], <i8 42, i8 42>
@@ -614,8 +647,8 @@ define <8 x i16> @test_FoldShiftByConstant_CreateSHL2(<8 x i16> %in) {
define <16 x i8> @test_FoldShiftByConstant_CreateAnd(<16 x i8> %in0) {
; CHECK-LABEL: @test_FoldShiftByConstant_CreateAnd(
-; CHECK-NEXT: [[TMP1:%.*]] = mul <16 x i8> [[IN0:%.*]], <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
-; CHECK-NEXT: [[VSHL_N:%.*]] = and <16 x i8> [[TMP1]], <i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32>
+; CHECK-NEXT: [[VSRA_N2:%.*]] = mul <16 x i8> [[IN0:%.*]], <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
+; CHECK-NEXT: [[VSHL_N:%.*]] = and <16 x i8> [[VSRA_N2]], <i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32>
; CHECK-NEXT: ret <16 x i8> [[VSHL_N]]
;
%vsra_n = ashr <16 x i8> %in0, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
diff --git a/llvm/test/Transforms/InstCombine/simplify-libcalls-i16.ll b/llvm/test/Transforms/InstCombine/simplify-libcalls-i16.ll
index 2ac9e2996c4f..9a08b6b5cf9f 100644
--- a/llvm/test/Transforms/InstCombine/simplify-libcalls-i16.ll
+++ b/llvm/test/Transforms/InstCombine/simplify-libcalls-i16.ll
@@ -29,11 +29,11 @@ define void @foo(ptr %P, ptr %X) {
define ptr @test1() {
; CHECK32-LABEL: @test1(
-; CHECK32-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds ([5 x i8], ptr @str, i32 0, i32 2), i16 103)
+; CHECK32-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds (i8, ptr @str, i32 2), i16 103)
; CHECK32-NEXT: ret ptr [[TMP3]]
;
; CHECK16-LABEL: @test1(
-; CHECK16-NEXT: ret ptr getelementptr inbounds ([5 x i8], ptr @str, i32 0, i32 3)
+; CHECK16-NEXT: ret ptr getelementptr inbounds (i8, ptr @str, i32 3)
;
%tmp3 = tail call ptr @strchr( ptr getelementptr ([5 x i8], ptr @str, i32 0, i16 2), i16 103 ) ; <ptr> [#uses=1]
ret ptr %tmp3
@@ -45,11 +45,11 @@ declare ptr @strchr(ptr, i16)
define ptr @test2() {
; CHECK32-LABEL: @test2(
-; CHECK32-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds ([8 x i8], ptr @str1, i32 0, i32 2), i16 0)
+; CHECK32-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds (i8, ptr @str1, i32 2), i16 0)
; CHECK32-NEXT: ret ptr [[TMP3]]
;
; CHECK16-LABEL: @test2(
-; CHECK16-NEXT: ret ptr getelementptr inbounds ([8 x i8], ptr @str1, i32 0, i32 7)
+; CHECK16-NEXT: ret ptr getelementptr inbounds (i8, ptr @str1, i32 7)
;
%tmp3 = tail call ptr @strchr( ptr getelementptr ([8 x i8], ptr @str1, i32 0, i32 2), i16 0 ) ; <ptr> [#uses=1]
ret ptr %tmp3
@@ -58,7 +58,7 @@ define ptr @test2() {
define ptr @test3() {
; CHECK32-LABEL: @test3(
; CHECK32-NEXT: entry:
-; CHECK32-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds ([5 x i8], ptr @str2, i32 0, i32 1), i16 80)
+; CHECK32-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds (i8, ptr @str2, i32 1), i16 80)
; CHECK32-NEXT: ret ptr [[TMP3]]
;
; CHECK16-LABEL: @test3(
diff --git a/llvm/test/Transforms/InstCombine/simplify-libcalls.ll b/llvm/test/Transforms/InstCombine/simplify-libcalls.ll
index 5ebb497ee765..bb2728a103ec 100644
--- a/llvm/test/Transforms/InstCombine/simplify-libcalls.ll
+++ b/llvm/test/Transforms/InstCombine/simplify-libcalls.ll
@@ -29,10 +29,10 @@ define void @foo(ptr %P, ptr %X) {
define ptr @test1() {
; CHECK32-LABEL: @test1(
-; CHECK32-NEXT: ret ptr getelementptr inbounds ([5 x i8], ptr @str, i32 0, i32 3)
+; CHECK32-NEXT: ret ptr getelementptr inbounds (i8, ptr @str, i32 3)
;
; CHECK16-LABEL: @test1(
-; CHECK16-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds ([5 x i8], ptr @str, i32 0, i32 2), i32 103)
+; CHECK16-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds (i8, ptr @str, i32 2), i32 103)
; CHECK16-NEXT: ret ptr [[TMP3]]
;
%tmp3 = tail call ptr @strchr( ptr getelementptr ([5 x i8], ptr @str, i32 0, i32 2), i32 103 ) ; <ptr> [#uses=1]
@@ -45,10 +45,10 @@ declare ptr @strchr(ptr, i32)
define ptr @test2() {
; CHECK32-LABEL: @test2(
-; CHECK32-NEXT: ret ptr getelementptr inbounds ([8 x i8], ptr @str1, i32 0, i32 7)
+; CHECK32-NEXT: ret ptr getelementptr inbounds (i8, ptr @str1, i32 7)
;
; CHECK16-LABEL: @test2(
-; CHECK16-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds ([8 x i8], ptr @str1, i32 0, i32 2), i32 0)
+; CHECK16-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds (i8, ptr @str1, i32 2), i32 0)
; CHECK16-NEXT: ret ptr [[TMP3]]
;
%tmp3 = tail call ptr @strchr( ptr getelementptr ([8 x i8], ptr @str1, i32 0, i32 2), i32 0 ) ; <ptr> [#uses=1]
@@ -62,7 +62,7 @@ define ptr @test3() {
;
; CHECK16-LABEL: @test3(
; CHECK16-NEXT: entry:
-; CHECK16-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds ([5 x i8], ptr @str2, i32 0, i32 1), i32 80)
+; CHECK16-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr nonnull getelementptr inbounds (i8, ptr @str2, i32 1), i32 80)
; CHECK16-NEXT: ret ptr [[TMP3]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/snprintf-2.ll b/llvm/test/Transforms/InstCombine/snprintf-2.ll
index 46694e0764a0..0465457aacec 100644
--- a/llvm/test/Transforms/InstCombine/snprintf-2.ll
+++ b/llvm/test/Transforms/InstCombine/snprintf-2.ll
@@ -21,54 +21,54 @@ declare i32 @snprintf(ptr, i64, ptr, ...)
define void @fold_snprintf_fmt() {
; BE-LABEL: @fold_snprintf_fmt(
-; BE-NEXT: [[PDIMAX:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 2147483647), align 8
+; BE-NEXT: [[PDIMAX:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 17179869176), align 8
; BE-NEXT: store i32 825373440, ptr [[PDIMAX]], align 1
; BE-NEXT: store i32 3, ptr @asiz, align 4
-; BE-NEXT: [[PD5:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 5), align 8
+; BE-NEXT: [[PD5:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 40), align 8
; BE-NEXT: store i32 825373440, ptr [[PD5]], align 1
-; BE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 5), align 4
-; BE-NEXT: [[PD4:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 4), align 8
+; BE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 20), align 4
+; BE-NEXT: [[PD4:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 32), align 8
; BE-NEXT: store i32 825373440, ptr [[PD4]], align 1
-; BE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 4), align 4
-; BE-NEXT: [[PD3:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 3), align 8
+; BE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 16), align 4
+; BE-NEXT: [[PD3:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 24), align 8
; BE-NEXT: store i16 12594, ptr [[PD3]], align 1
; BE-NEXT: [[ENDPTR:%.*]] = getelementptr inbounds i8, ptr [[PD3]], i64 2
; BE-NEXT: store i8 0, ptr [[ENDPTR]], align 1
-; BE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 3), align 4
-; BE-NEXT: [[PD2:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 2), align 8
+; BE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 12), align 4
+; BE-NEXT: [[PD2:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 16), align 8
; BE-NEXT: store i8 49, ptr [[PD2]], align 1
; BE-NEXT: [[ENDPTR1:%.*]] = getelementptr inbounds i8, ptr [[PD2]], i64 1
; BE-NEXT: store i8 0, ptr [[ENDPTR1]], align 1
-; BE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 2), align 4
-; BE-NEXT: [[PD1:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 1), align 8
+; BE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 8), align 4
+; BE-NEXT: [[PD1:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 8), align 8
; BE-NEXT: store i8 0, ptr [[PD1]], align 1
-; BE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 1), align 4
+; BE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 4), align 4
; BE-NEXT: store i32 3, ptr @asiz, align 4
; BE-NEXT: ret void
;
; LE-LABEL: @fold_snprintf_fmt(
-; LE-NEXT: [[PDIMAX:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 2147483647), align 8
+; LE-NEXT: [[PDIMAX:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 17179869176), align 8
; LE-NEXT: store i32 3355185, ptr [[PDIMAX]], align 1
; LE-NEXT: store i32 3, ptr @asiz, align 4
-; LE-NEXT: [[PD5:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 5), align 8
+; LE-NEXT: [[PD5:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 40), align 8
; LE-NEXT: store i32 3355185, ptr [[PD5]], align 1
-; LE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 5), align 4
-; LE-NEXT: [[PD4:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 4), align 8
+; LE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 20), align 4
+; LE-NEXT: [[PD4:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 32), align 8
; LE-NEXT: store i32 3355185, ptr [[PD4]], align 1
-; LE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 4), align 4
-; LE-NEXT: [[PD3:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 3), align 8
+; LE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 16), align 4
+; LE-NEXT: [[PD3:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 24), align 8
; LE-NEXT: store i16 12849, ptr [[PD3]], align 1
; LE-NEXT: [[ENDPTR:%.*]] = getelementptr inbounds i8, ptr [[PD3]], i64 2
; LE-NEXT: store i8 0, ptr [[ENDPTR]], align 1
-; LE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 3), align 4
-; LE-NEXT: [[PD2:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 2), align 8
+; LE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 12), align 4
+; LE-NEXT: [[PD2:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 16), align 8
; LE-NEXT: store i8 49, ptr [[PD2]], align 1
; LE-NEXT: [[ENDPTR1:%.*]] = getelementptr inbounds i8, ptr [[PD2]], i64 1
; LE-NEXT: store i8 0, ptr [[ENDPTR1]], align 1
-; LE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 2), align 4
-; LE-NEXT: [[PD1:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 1), align 8
+; LE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 8), align 4
+; LE-NEXT: [[PD1:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 8), align 8
; LE-NEXT: store i8 0, ptr [[PD1]], align 1
-; LE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 1), align 4
+; LE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 4), align 4
; LE-NEXT: store i32 3, ptr @asiz, align 4
; LE-NEXT: ret void
;
@@ -111,9 +111,9 @@ define void @fold_snprintf_fmt() {
define void @call_snprintf_fmt_ximax() {
; ANY-LABEL: @call_snprintf_fmt_ximax(
-; ANY-NEXT: [[PDM1:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 1), align 8
+; ANY-NEXT: [[PDM1:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 8), align 8
; ANY-NEXT: [[NM1:%.*]] = call i32 (ptr, i64, ptr, ...) @snprintf(ptr noundef nonnull dereferenceable(1) [[PDM1]], i64 -1, ptr nonnull @s)
-; ANY-NEXT: store i32 [[NM1]], ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 1), align 4
+; ANY-NEXT: store i32 [[NM1]], ptr getelementptr (i8, ptr @asiz, i64 4), align 4
; ANY-NEXT: [[PDIMAXP1:%.*]] = load ptr, ptr @adst, align 8
; ANY-NEXT: [[NIMAXP1:%.*]] = call i32 (ptr, i64, ptr, ...) @snprintf(ptr noundef nonnull dereferenceable(1) [[PDIMAXP1]], i64 2147483648, ptr nonnull @s)
; ANY-NEXT: store i32 [[NIMAXP1]], ptr @asiz, align 4
diff --git a/llvm/test/Transforms/InstCombine/snprintf-3.ll b/llvm/test/Transforms/InstCombine/snprintf-3.ll
index 0332aa71ad64..7c93580b4ea5 100644
--- a/llvm/test/Transforms/InstCombine/snprintf-3.ll
+++ b/llvm/test/Transforms/InstCombine/snprintf-3.ll
@@ -22,54 +22,54 @@ declare i32 @snprintf(ptr, i64, ptr, ...)
define void @fold_snprintf_pcnt_s() {
; BE-LABEL: @fold_snprintf_pcnt_s(
-; BE-NEXT: [[PDIMAX:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 2147483647), align 8
+; BE-NEXT: [[PDIMAX:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 17179869176), align 8
; BE-NEXT: store i32 825373440, ptr [[PDIMAX]], align 1
; BE-NEXT: store i32 3, ptr @asiz, align 4
-; BE-NEXT: [[PD5:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 5), align 8
+; BE-NEXT: [[PD5:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 40), align 8
; BE-NEXT: store i32 825373440, ptr [[PD5]], align 1
-; BE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 5), align 4
-; BE-NEXT: [[PD4:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 4), align 8
+; BE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 20), align 4
+; BE-NEXT: [[PD4:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 32), align 8
; BE-NEXT: store i32 825373440, ptr [[PD4]], align 1
-; BE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 4), align 4
-; BE-NEXT: [[PD3:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 3), align 8
+; BE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 16), align 4
+; BE-NEXT: [[PD3:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 24), align 8
; BE-NEXT: store i16 12594, ptr [[PD3]], align 1
; BE-NEXT: [[ENDPTR:%.*]] = getelementptr inbounds i8, ptr [[PD3]], i64 2
; BE-NEXT: store i8 0, ptr [[ENDPTR]], align 1
-; BE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 3), align 4
-; BE-NEXT: [[PD2:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 2), align 8
+; BE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 12), align 4
+; BE-NEXT: [[PD2:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 16), align 8
; BE-NEXT: store i8 49, ptr [[PD2]], align 1
; BE-NEXT: [[ENDPTR1:%.*]] = getelementptr inbounds i8, ptr [[PD2]], i64 1
; BE-NEXT: store i8 0, ptr [[ENDPTR1]], align 1
-; BE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 2), align 4
-; BE-NEXT: [[PD1:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 1), align 8
+; BE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 8), align 4
+; BE-NEXT: [[PD1:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 8), align 8
; BE-NEXT: store i8 0, ptr [[PD1]], align 1
-; BE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 1), align 4
+; BE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 4), align 4
; BE-NEXT: store i32 3, ptr @asiz, align 4
; BE-NEXT: ret void
;
; LE-LABEL: @fold_snprintf_pcnt_s(
-; LE-NEXT: [[PDIMAX:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 2147483647), align 8
+; LE-NEXT: [[PDIMAX:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 17179869176), align 8
; LE-NEXT: store i32 3355185, ptr [[PDIMAX]], align 1
; LE-NEXT: store i32 3, ptr @asiz, align 4
-; LE-NEXT: [[PD5:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 5), align 8
+; LE-NEXT: [[PD5:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 40), align 8
; LE-NEXT: store i32 3355185, ptr [[PD5]], align 1
-; LE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 5), align 4
-; LE-NEXT: [[PD4:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 4), align 8
+; LE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 20), align 4
+; LE-NEXT: [[PD4:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 32), align 8
; LE-NEXT: store i32 3355185, ptr [[PD4]], align 1
-; LE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 4), align 4
-; LE-NEXT: [[PD3:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 3), align 8
+; LE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 16), align 4
+; LE-NEXT: [[PD3:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 24), align 8
; LE-NEXT: store i16 12849, ptr [[PD3]], align 1
; LE-NEXT: [[ENDPTR:%.*]] = getelementptr inbounds i8, ptr [[PD3]], i64 2
; LE-NEXT: store i8 0, ptr [[ENDPTR]], align 1
-; LE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 3), align 4
-; LE-NEXT: [[PD2:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 2), align 8
+; LE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 12), align 4
+; LE-NEXT: [[PD2:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 16), align 8
; LE-NEXT: store i8 49, ptr [[PD2]], align 1
; LE-NEXT: [[ENDPTR1:%.*]] = getelementptr inbounds i8, ptr [[PD2]], i64 1
; LE-NEXT: store i8 0, ptr [[ENDPTR1]], align 1
-; LE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 2), align 4
-; LE-NEXT: [[PD1:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 1), align 8
+; LE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 8), align 4
+; LE-NEXT: [[PD1:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 8), align 8
; LE-NEXT: store i8 0, ptr [[PD1]], align 1
-; LE-NEXT: store i32 3, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 1), align 4
+; LE-NEXT: store i32 3, ptr getelementptr (i8, ptr @asiz, i64 4), align 4
; LE-NEXT: store i32 3, ptr @asiz, align 4
; LE-NEXT: ret void
;
@@ -112,9 +112,9 @@ define void @fold_snprintf_pcnt_s() {
define void @call_snprintf_pcnt_s_ximax() {
; ANY-LABEL: @call_snprintf_pcnt_s_ximax(
-; ANY-NEXT: [[PDM1:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 1), align 8
+; ANY-NEXT: [[PDM1:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 8), align 8
; ANY-NEXT: [[NM1:%.*]] = call i32 (ptr, i64, ptr, ...) @snprintf(ptr noundef nonnull dereferenceable(1) [[PDM1]], i64 -1, ptr nonnull @pcnt_s, ptr nonnull @s)
-; ANY-NEXT: store i32 [[NM1]], ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 1), align 4
+; ANY-NEXT: store i32 [[NM1]], ptr getelementptr (i8, ptr @asiz, i64 4), align 4
; ANY-NEXT: [[PDIMAXP1:%.*]] = load ptr, ptr @adst, align 8
; ANY-NEXT: [[NIMAXP1:%.*]] = call i32 (ptr, i64, ptr, ...) @snprintf(ptr noundef nonnull dereferenceable(1) [[PDIMAXP1]], i64 2147483648, ptr nonnull @pcnt_s, ptr nonnull @s)
; ANY-NEXT: store i32 [[NIMAXP1]], ptr @asiz, align 4
diff --git a/llvm/test/Transforms/InstCombine/snprintf-4.ll b/llvm/test/Transforms/InstCombine/snprintf-4.ll
index 4536a6d8817e..7006838ae9b5 100644
--- a/llvm/test/Transforms/InstCombine/snprintf-4.ll
+++ b/llvm/test/Transforms/InstCombine/snprintf-4.ll
@@ -24,29 +24,29 @@ define void @fold_snprintf_pcnt_c(i32 %c) {
; CHECK-NEXT: [[NUL:%.*]] = getelementptr inbounds i8, ptr [[PDIMAX]], i64 1
; CHECK-NEXT: store i8 0, ptr [[NUL]], align 1
; CHECK-NEXT: store i32 1, ptr @asiz, align 4
-; CHECK-NEXT: [[PD2:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 1), align 8
+; CHECK-NEXT: [[PD2:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 8), align 8
; CHECK-NEXT: store i8 2, ptr [[PD2]], align 1
; CHECK-NEXT: [[NUL1:%.*]] = getelementptr inbounds i8, ptr [[PD2]], i64 1
; CHECK-NEXT: store i8 0, ptr [[NUL1]], align 1
-; CHECK-NEXT: store i32 1, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 1), align 4
-; CHECK-NEXT: [[PD2_0:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 2), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr (i8, ptr @asiz, i64 4), align 4
+; CHECK-NEXT: [[PD2_0:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 16), align 8
; CHECK-NEXT: store i8 0, ptr [[PD2_0]], align 1
; CHECK-NEXT: [[NUL2:%.*]] = getelementptr inbounds i8, ptr [[PD2_0]], i64 1
; CHECK-NEXT: store i8 0, ptr [[NUL2]], align 1
-; CHECK-NEXT: store i32 1, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 2), align 4
-; CHECK-NEXT: [[PD1:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 3), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr (i8, ptr @asiz, i64 8), align 4
+; CHECK-NEXT: [[PD1:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 24), align 8
; CHECK-NEXT: store i8 0, ptr [[PD1]], align 1
-; CHECK-NEXT: store i32 1, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 3), align 4
-; CHECK-NEXT: store i32 1, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 4), align 4
-; CHECK-NEXT: [[PD2_C:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 4), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr (i8, ptr @asiz, i64 12), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr (i8, ptr @asiz, i64 16), align 4
+; CHECK-NEXT: [[PD2_C:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 32), align 8
; CHECK-NEXT: [[CHAR:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: store i8 [[CHAR]], ptr [[PD2_C]], align 1
; CHECK-NEXT: [[NUL3:%.*]] = getelementptr inbounds i8, ptr [[PD2_C]], i64 1
; CHECK-NEXT: store i8 0, ptr [[NUL3]], align 1
-; CHECK-NEXT: store i32 1, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 4), align 4
-; CHECK-NEXT: [[PD1_C:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 5), align 8
+; CHECK-NEXT: store i32 1, ptr getelementptr (i8, ptr @asiz, i64 16), align 4
+; CHECK-NEXT: [[PD1_C:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 40), align 8
; CHECK-NEXT: store i8 0, ptr [[PD1_C]], align 1
-; CHECK-NEXT: store i32 1, ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 5), align 4
+; CHECK-NEXT: store i32 1, ptr getelementptr (i8, ptr @asiz, i64 20), align 4
; CHECK-NEXT: ret void
;
@@ -100,12 +100,12 @@ define void @call_snprintf_pcnt_c_ximax(i32 %c) {
; CHECK-NEXT: [[PDM1:%.*]] = load ptr, ptr @adst, align 8
; CHECK-NEXT: [[NM1:%.*]] = call i32 (ptr, i64, ptr, ...) @snprintf(ptr noundef nonnull dereferenceable(1) [[PDM1]], i64 -1, ptr nonnull @pcnt_c, i8 0)
; CHECK-NEXT: store i32 [[NM1]], ptr @asiz, align 4
-; CHECK-NEXT: [[PDIMAXP1:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 1), align 8
+; CHECK-NEXT: [[PDIMAXP1:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 8), align 8
; CHECK-NEXT: [[NIMAXP1:%.*]] = call i32 (ptr, i64, ptr, ...) @snprintf(ptr noundef nonnull dereferenceable(1) [[PDIMAXP1]], i64 2147483648, ptr nonnull @pcnt_c, i8 1)
-; CHECK-NEXT: store i32 [[NIMAXP1]], ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 1), align 4
-; CHECK-NEXT: [[PDM1SL32:%.*]] = load ptr, ptr getelementptr ([0 x ptr], ptr @adst, i64 0, i64 2), align 8
+; CHECK-NEXT: store i32 [[NIMAXP1]], ptr getelementptr (i8, ptr @asiz, i64 4), align 4
+; CHECK-NEXT: [[PDM1SL32:%.*]] = load ptr, ptr getelementptr (i8, ptr @adst, i64 16), align 8
; CHECK-NEXT: [[NM1SL32:%.*]] = call i32 (ptr, i64, ptr, ...) @snprintf(ptr noundef nonnull dereferenceable(1) [[PDM1SL32]], i64 -4294967296, ptr nonnull @pcnt_c, i8 1)
-; CHECK-NEXT: store i32 [[NM1SL32]], ptr getelementptr ([0 x i32], ptr @asiz, i64 0, i64 2), align 4
+; CHECK-NEXT: store i32 [[NM1SL32]], ptr getelementptr (i8, ptr @asiz, i64 8), align 4
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/InstCombine/stpcpy-1.ll b/llvm/test/Transforms/InstCombine/stpcpy-1.ll
index 86691a08a798..2ddacb209744 100644
--- a/llvm/test/Transforms/InstCombine/stpcpy-1.ll
+++ b/llvm/test/Transforms/InstCombine/stpcpy-1.ll
@@ -16,7 +16,7 @@ declare ptr @stpcpy(ptr, ptr)
define ptr @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(6) @a, ptr noundef nonnull align 1 dereferenceable(6) @hello, i32 6, i1 false)
-; CHECK-NEXT: ret ptr getelementptr inbounds ([32 x i8], ptr @a, i32 0, i32 5)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a, i32 5)
;
%ret = call ptr @stpcpy(ptr @a, ptr @hello)
ret ptr %ret
@@ -62,7 +62,7 @@ define ptr @test_no_simplify2(ptr %dst, ptr %src) {
define ptr @test_no_incompatible_attr() {
; CHECK-LABEL: @test_no_incompatible_attr(
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(6) @a, ptr noundef nonnull align 1 dereferenceable(6) @hello, i32 6, i1 false)
-; CHECK-NEXT: ret ptr getelementptr inbounds ([32 x i8], ptr @a, i32 0, i32 5)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a, i32 5)
;
%ret = call dereferenceable(1) ptr @stpcpy(ptr @a, ptr @hello)
ret ptr %ret
diff --git a/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll b/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll
index 5ebd9fae7620..2d775f35c8bd 100644
--- a/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll
+++ b/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll
@@ -15,7 +15,7 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
define ptr @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(12) @a, ptr noundef nonnull align 1 dereferenceable(12) @.str, i32 12, i1 false)
-; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i32 0, i32 11)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a, i32 11)
;
%ret = call ptr @__stpcpy_chk(ptr @a, ptr @.str, i32 60)
@@ -25,7 +25,7 @@ define ptr @test_simplify1() {
define ptr @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(12) @a, ptr noundef nonnull align 1 dereferenceable(12) @.str, i32 12, i1 false)
-; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i32 0, i32 11)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a, i32 11)
;
%ret = call ptr @__stpcpy_chk(ptr @a, ptr @.str, i32 12)
@@ -35,7 +35,7 @@ define ptr @test_simplify2() {
define ptr @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(12) @a, ptr noundef nonnull align 1 dereferenceable(12) @.str, i32 12, i1 false)
-; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i32 0, i32 11)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a, i32 11)
;
%ret = call ptr @__stpcpy_chk(ptr @a, ptr @.str, i32 -1)
@@ -45,7 +45,7 @@ define ptr @test_simplify3() {
define ptr @test_simplify1_tail() {
; CHECK-LABEL: @test_simplify1_tail(
; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(12) @a, ptr noundef nonnull align 1 dereferenceable(12) @.str, i32 12, i1 false)
-; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i32 0, i32 11)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a, i32 11)
;
%ret = tail call ptr @__stpcpy_chk(ptr @a, ptr @.str, i32 60)
@@ -80,7 +80,7 @@ define ptr @test_simplify5() {
; CHECK-LABEL: @test_simplify5(
; CHECK-NEXT: [[LEN:%.*]] = call i32 @llvm.objectsize.i32.p0(ptr @a, i1 false, i1 false, i1 false)
; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__memcpy_chk(ptr nonnull @a, ptr nonnull @.str, i32 12, i32 [[LEN]])
-; CHECK-NEXT: ret ptr getelementptr inbounds ([60 x i8], ptr @a, i32 0, i32 11)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a, i32 11)
;
%len = call i32 @llvm.objectsize.i32.p0(ptr @a, i1 false, i1 false, i1 false)
diff --git a/llvm/test/Transforms/InstCombine/stpncpy-1.ll b/llvm/test/Transforms/InstCombine/stpncpy-1.ll
index 15eee6c10193..0a4caa2c05f9 100644
--- a/llvm/test/Transforms/InstCombine/stpncpy-1.ll
+++ b/llvm/test/Transforms/InstCombine/stpncpy-1.ll
@@ -28,18 +28,18 @@ declare void @sink(ptr, ptr)
; to D + strnlen(D, N) or, equivalently, D + (*D != '\0'), when N < 2.
;.
-; ANY: @[[A4:[a-zA-Z0-9_$"\\.-]+]] = constant [4 x i8] c"1234"
-; ANY: @[[S4:[a-zA-Z0-9_$"\\.-]+]] = constant [5 x i8] c"1234\00"
-; ANY: @[[STR:[a-zA-Z0-9_$"\\.-]+]] = private constant [4 x i8] c"4\00\00\00"
-; ANY: @[[STR_1:[a-zA-Z0-9_$"\\.-]+]] = private constant [10 x i8] c"4\00\00\00\00\00\00\00\00\00"
-; ANY: @[[STR_2:[a-zA-Z0-9_$"\\.-]+]] = private constant [10 x i8] c"1234\00\00\00\00\00\00"
-; ANY: @[[STR_3:[a-zA-Z0-9_$"\\.-]+]] = private unnamed_addr constant [4 x i8] c"4\00\00\00", align 1
-; ANY: @[[STR_4:[a-zA-Z0-9_$"\\.-]+]] = private unnamed_addr constant [10 x i8] c"4\00\00\00\00\00\00\00\00\00", align 1
-; ANY: @[[STR_5:[a-zA-Z0-9_$"\\.-]+]] = private unnamed_addr constant [10 x i8] c"1234\00\00\00\00\00\00", align 1
-; ANY: @[[STR_6:[a-zA-Z0-9_$"\\.-]+]] = private unnamed_addr constant [4 x i8] c"4\00\00\00", align 1
-; ANY: @[[STR_7:[a-zA-Z0-9_$"\\.-]+]] = private unnamed_addr constant [10 x i8] c"4\00\00\00\00\00\00\00\00\00", align 1
-; ANY: @[[STR_8:[a-zA-Z0-9_$"\\.-]+]] = private unnamed_addr constant [10 x i8] c"1234\00\00\00\00\00\00", align 1
-; ANY: @[[STR_9:[a-zA-Z0-9_$"\\.-]+]] = private unnamed_addr constant [10 x i8] c"1234\00\00\00\00\00\00", align 1
+; ANY: @a4 = constant [4 x i8] c"1234"
+; ANY: @s4 = constant [5 x i8] c"1234\00"
+; ANY: @str = private constant [4 x i8] c"4\00\00\00"
+; ANY: @str.1 = private constant [10 x i8] c"4\00\00\00\00\00\00\00\00\00"
+; ANY: @str.2 = private constant [10 x i8] c"1234\00\00\00\00\00\00"
+; ANY: @str.3 = private unnamed_addr constant [4 x i8] c"4\00\00\00", align 1
+; ANY: @str.4 = private unnamed_addr constant [10 x i8] c"4\00\00\00\00\00\00\00\00\00", align 1
+; ANY: @str.5 = private unnamed_addr constant [10 x i8] c"1234\00\00\00\00\00\00", align 1
+; ANY: @str.6 = private unnamed_addr constant [4 x i8] c"4\00\00\00", align 1
+; ANY: @str.7 = private unnamed_addr constant [10 x i8] c"4\00\00\00\00\00\00\00\00\00", align 1
+; ANY: @str.8 = private unnamed_addr constant [10 x i8] c"1234\00\00\00\00\00\00", align 1
+; ANY: @str.9 = private unnamed_addr constant [10 x i8] c"1234\00\00\00\00\00\00", align 1
;.
define void @fold_stpncpy_overlap(ptr %dst, i64 %n) {
; ANY-LABEL: @fold_stpncpy_overlap(
@@ -273,11 +273,11 @@ define void @fold_stpncpy_s4(ptr %dst, i64 %n) {
define void @call_stpncpy_xx_n(ptr %dst, i64 %n) {
; ANY-LABEL: @call_stpncpy_xx_n(
-; ANY-NEXT: [[EA1_N:%.*]] = call ptr @stpncpy(ptr [[DST:%.*]], ptr nonnull dereferenceable(2) getelementptr inbounds ([4 x i8], ptr @a4, i64 0, i64 3), i64 [[N:%.*]])
+; ANY-NEXT: [[EA1_N:%.*]] = call ptr @stpncpy(ptr [[DST:%.*]], ptr nonnull dereferenceable(2) getelementptr inbounds (i8, ptr @a4, i64 3), i64 [[N:%.*]])
; ANY-NEXT: call void @sink(ptr [[DST]], ptr [[EA1_N]])
; ANY-NEXT: [[EA4_N:%.*]] = call ptr @stpncpy(ptr [[DST]], ptr nonnull dereferenceable(5) @a4, i64 [[N]])
; ANY-NEXT: call void @sink(ptr [[DST]], ptr [[EA4_N]])
-; ANY-NEXT: [[ES1_N:%.*]] = call ptr @stpncpy(ptr [[DST]], ptr nonnull dereferenceable(2) getelementptr inbounds ([5 x i8], ptr @s4, i64 0, i64 3), i64 [[N]])
+; ANY-NEXT: [[ES1_N:%.*]] = call ptr @stpncpy(ptr [[DST]], ptr nonnull dereferenceable(2) getelementptr inbounds (i8, ptr @s4, i64 3), i64 [[N]])
; ANY-NEXT: call void @sink(ptr [[DST]], ptr [[ES1_N]])
; ANY-NEXT: [[ES4_N:%.*]] = call ptr @stpncpy(ptr [[DST]], ptr nonnull dereferenceable(5) @s4, i64 [[N]])
; ANY-NEXT: call void @sink(ptr [[DST]], ptr [[ES4_N]])
@@ -448,6 +448,9 @@ define void @call_stpncpy_s(ptr %dst, ptr %src, i64 %n) {
ret void
}
;.
-; ANY: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: write) }
-; ANY: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
+; BE: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: write) }
+; BE: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
+;.
+; LE: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: write) }
+; LE: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
;.
diff --git a/llvm/test/Transforms/InstCombine/str-int-2.ll b/llvm/test/Transforms/InstCombine/str-int-2.ll
index a34714365e21..ae67422d1207 100644
--- a/llvm/test/Transforms/InstCombine/str-int-2.ll
+++ b/llvm/test/Transforms/InstCombine/str-int-2.ll
@@ -44,7 +44,7 @@ define i64 @strtol_hex() #0 {
define i64 @strtol_endptr_not_null(ptr nonnull %pend) {
; CHECK-LABEL: @strtol_endptr_not_null(
-; CHECK-NEXT: store ptr getelementptr inbounds ([3 x i8], ptr @.str, i64 0, i64 2), ptr [[PEND:%.*]], align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @.str, i64 2), ptr [[PEND:%.*]], align 8
; CHECK-NEXT: ret i64 12
;
%call = call i64 @strtol(ptr @.str, ptr %pend, i32 10)
diff --git a/llvm/test/Transforms/InstCombine/str-int-3.ll b/llvm/test/Transforms/InstCombine/str-int-3.ll
index f319a16d211f..100f1a95b135 100644
--- a/llvm/test/Transforms/InstCombine/str-int-3.ll
+++ b/llvm/test/Transforms/InstCombine/str-int-3.ll
@@ -66,9 +66,9 @@ define void @fold_atoi_member(ptr %pi) {
define void @fold_atoi_offset_out_of_bounds(ptr %pi) {
; CHECK-LABEL: @fold_atoi_offset_out_of_bounds(
-; CHECK-NEXT: [[IA_0_0_32:%.*]] = call i32 @atoi(ptr nocapture nonnull getelementptr inbounds ([2 x %struct.A], ptr @a, i64 1, i64 0, i32 0, i64 0))
+; CHECK-NEXT: [[IA_0_0_32:%.*]] = call i32 @atoi(ptr nocapture nonnull getelementptr inbounds (i8, ptr @a, i64 32))
; CHECK-NEXT: store i32 [[IA_0_0_32]], ptr [[PI:%.*]], align 4
-; CHECK-NEXT: [[IA_0_0_33:%.*]] = call i32 @atoi(ptr nocapture getelementptr ([2 x %struct.A], ptr @a, i64 1, i64 0, i32 0, i64 1))
+; CHECK-NEXT: [[IA_0_0_33:%.*]] = call i32 @atoi(ptr nocapture getelementptr (i8, ptr @a, i64 33))
; CHECK-NEXT: store i32 [[IA_0_0_33]], ptr [[PI]], align 4
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/InstCombine/str-int-4.ll b/llvm/test/Transforms/InstCombine/str-int-4.ll
index 6efc5fb4ed1f..9173e122f8dd 100644
--- a/llvm/test/Transforms/InstCombine/str-int-4.ll
+++ b/llvm/test/Transforms/InstCombine/str-int-4.ll
@@ -42,39 +42,39 @@ declare i64 @strtoll(ptr, ptr, i32)
define void @fold_strtol(ptr %ps) {
; CHECK-LABEL: @fold_strtol(
-; CHECK-NEXT: store ptr getelementptr inbounds ([11 x i8], ptr @ws_im123, i64 0, i64 10), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @ws_im123, i64 10), ptr @endptr, align 8
; CHECK-NEXT: store i32 -123, ptr [[PS:%.*]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([11 x i8], ptr @ws_ip234, i64 0, i64 10), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @ws_ip234, i64 10), ptr @endptr, align 8
; CHECK-NEXT: [[PS1:%.*]] = getelementptr i8, ptr [[PS]], i64 4
; CHECK-NEXT: store i32 234, ptr [[PS1]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([3 x i8], ptr @i0, i64 0, i64 2), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i0, i64 2), ptr @endptr, align 8
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i8, ptr [[PS]], i64 8
; CHECK-NEXT: store i32 0, ptr [[PS2]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([3 x i8], ptr @i9, i64 0, i64 2), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i9, i64 2), ptr @endptr, align 8
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i8, ptr [[PS]], i64 12
; CHECK-NEXT: store i32 9, ptr [[PS3]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([3 x i8], ptr @ia, i64 0, i64 2), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @ia, i64 2), ptr @endptr, align 8
; CHECK-NEXT: [[PS4:%.*]] = getelementptr i8, ptr [[PS]], i64 16
; CHECK-NEXT: store i32 10, ptr [[PS4]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([7 x i8], ptr @i19azAZ, i64 0, i64 6), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i19azAZ, i64 6), ptr @endptr, align 8
; CHECK-NEXT: [[PS5:%.*]] = getelementptr i8, ptr [[PS]], i64 20
; CHECK-NEXT: store i32 76095035, ptr [[PS5]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([13 x i8], ptr @i32min, i64 0, i64 12), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i32min, i64 12), ptr @endptr, align 8
; CHECK-NEXT: [[PS6:%.*]] = getelementptr i8, ptr [[PS]], i64 24
; CHECK-NEXT: store i32 -2147483648, ptr [[PS6]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([15 x i8], ptr @mo32min, i64 0, i64 14), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @mo32min, i64 14), ptr @endptr, align 8
; CHECK-NEXT: [[PS7:%.*]] = getelementptr i8, ptr [[PS]], i64 28
; CHECK-NEXT: store i32 -2147483648, ptr [[PS7]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([13 x i8], ptr @mx32min, i64 0, i64 12), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @mx32min, i64 12), ptr @endptr, align 8
; CHECK-NEXT: [[PS8:%.*]] = getelementptr i8, ptr [[PS]], i64 32
; CHECK-NEXT: store i32 -2147483648, ptr [[PS8]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([13 x i8], ptr @mx32min, i64 0, i64 12), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @mx32min, i64 12), ptr @endptr, align 8
; CHECK-NEXT: [[PS9:%.*]] = getelementptr i8, ptr [[PS]], i64 36
; CHECK-NEXT: store i32 -2147483648, ptr [[PS9]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([12 x i8], ptr @i32max, i64 0, i64 11), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i32max, i64 11), ptr @endptr, align 8
; CHECK-NEXT: [[PS10:%.*]] = getelementptr i8, ptr [[PS]], i64 40
; CHECK-NEXT: store i32 2147483647, ptr [[PS10]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([12 x i8], ptr @x32max, i64 0, i64 11), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @x32max, i64 11), ptr @endptr, align 8
; CHECK-NEXT: [[PS11:%.*]] = getelementptr i8, ptr [[PS]], i64 44
; CHECK-NEXT: store i32 2147483647, ptr [[PS11]], align 4
; CHECK-NEXT: ret void
@@ -181,7 +181,7 @@ define void @call_strtol(ptr %ps) {
; CHECK-NEXT: [[NWS:%.*]] = call i32 @strtol(ptr nonnull @ws, ptr nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS11:%.*]] = getelementptr i8, ptr [[PS]], i64 44
; CHECK-NEXT: store i32 [[NWS]], ptr [[PS11]], align 4
-; CHECK-NEXT: [[NWSP6:%.*]] = call i32 @strtol(ptr nonnull getelementptr inbounds ([7 x i8], ptr @ws, i64 0, i64 6), ptr nonnull @endptr, i32 10)
+; CHECK-NEXT: [[NWSP6:%.*]] = call i32 @strtol(ptr nonnull getelementptr inbounds (i8, ptr @ws, i64 6), ptr nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS12:%.*]] = getelementptr i8, ptr [[PS]], i64 48
; CHECK-NEXT: store i32 [[NWSP6]], ptr [[PS12]], align 4
; CHECK-NEXT: [[I0B1:%.*]] = call i32 @strtol(ptr nonnull @i0, ptr nonnull @endptr, i32 1)
@@ -287,15 +287,15 @@ define void @call_strtol(ptr %ps) {
define void @fold_strtoll(ptr %ps) {
; CHECK-LABEL: @fold_strtoll(
-; CHECK-NEXT: store ptr getelementptr inbounds ([11 x i8], ptr @ws_im123, i64 0, i64 10), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @ws_im123, i64 10), ptr @endptr, align 8
; CHECK-NEXT: store i64 -123, ptr [[PS:%.*]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([11 x i8], ptr @ws_ip234, i64 0, i64 10), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @ws_ip234, i64 10), ptr @endptr, align 8
; CHECK-NEXT: [[PS1:%.*]] = getelementptr i8, ptr [[PS]], i64 8
; CHECK-NEXT: store i64 234, ptr [[PS1]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([22 x i8], ptr @i64min, i64 0, i64 21), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i64min, i64 21), ptr @endptr, align 8
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i8, ptr [[PS]], i64 16
; CHECK-NEXT: store i64 -9223372036854775808, ptr [[PS2]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([21 x i8], ptr @i64max, i64 0, i64 20), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i64max, i64 20), ptr @endptr, align 8
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i8, ptr [[PS]], i64 24
; CHECK-NEXT: store i64 9223372036854775807, ptr [[PS3]], align 4
; CHECK-NEXT: ret void
@@ -335,7 +335,7 @@ define void @call_strtoll(ptr %ps) {
; CHECK-NEXT: [[NWS:%.*]] = call i64 @strtoll(ptr nonnull @ws, ptr nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i8, ptr [[PS]], i64 16
; CHECK-NEXT: store i64 [[NWS]], ptr [[PS2]], align 4
-; CHECK-NEXT: [[NWSP6:%.*]] = call i64 @strtoll(ptr nonnull getelementptr inbounds ([7 x i8], ptr @ws, i64 0, i64 6), ptr nonnull @endptr, i32 10)
+; CHECK-NEXT: [[NWSP6:%.*]] = call i64 @strtoll(ptr nonnull getelementptr inbounds (i8, ptr @ws, i64 6), ptr nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i8, ptr [[PS]], i64 24
; CHECK-NEXT: store i64 [[NWSP6]], ptr [[PS3]], align 4
; CHECK-NEXT: ret void
@@ -375,10 +375,10 @@ define void @call_strtol_trailing_space(ptr %ps) {
; CHECK-NEXT: [[N1:%.*]] = call i32 @strtol(ptr nonnull @i_1_2_3_, ptr nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS1:%.*]] = getelementptr i8, ptr [[PS:%.*]], i64 4
; CHECK-NEXT: store i32 [[N1]], ptr [[PS1]], align 4
-; CHECK-NEXT: [[N2:%.*]] = call i32 @strtol(ptr nonnull getelementptr inbounds ([9 x i8], ptr @i_1_2_3_, i64 0, i64 2), ptr nonnull @endptr, i32 10)
+; CHECK-NEXT: [[N2:%.*]] = call i32 @strtol(ptr nonnull getelementptr inbounds (i8, ptr @i_1_2_3_, i64 2), ptr nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i8, ptr [[PS]], i64 8
; CHECK-NEXT: store i32 [[N2]], ptr [[PS2]], align 4
-; CHECK-NEXT: [[N3:%.*]] = call i32 @strtol(ptr nonnull getelementptr inbounds ([9 x i8], ptr @i_1_2_3_, i64 0, i64 4), ptr nonnull @endptr, i32 10)
+; CHECK-NEXT: [[N3:%.*]] = call i32 @strtol(ptr nonnull getelementptr inbounds (i8, ptr @i_1_2_3_, i64 4), ptr nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i8, ptr [[PS]], i64 12
; CHECK-NEXT: store i32 [[N3]], ptr [[PS3]], align 4
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/InstCombine/str-int-5.ll b/llvm/test/Transforms/InstCombine/str-int-5.ll
index ff4f2bffd977..4ccf7ea6407c 100644
--- a/llvm/test/Transforms/InstCombine/str-int-5.ll
+++ b/llvm/test/Transforms/InstCombine/str-int-5.ll
@@ -46,39 +46,39 @@ declare i64 @strtoull(ptr, ptr, i32)
define void @fold_strtoul(ptr %ps) {
; CHECK-LABEL: @fold_strtoul(
-; CHECK-NEXT: store ptr getelementptr inbounds ([11 x i8], ptr @ws_im123, i64 0, i64 10), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @ws_im123, i64 10), ptr @endptr, align 8
; CHECK-NEXT: store i32 -123, ptr [[PS:%.*]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([11 x i8], ptr @ws_ip234, i64 0, i64 10), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @ws_ip234, i64 10), ptr @endptr, align 8
; CHECK-NEXT: [[PS1:%.*]] = getelementptr i8, ptr [[PS]], i64 4
; CHECK-NEXT: store i32 234, ptr [[PS1]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([13 x i8], ptr @i32min_m1, i64 0, i64 12), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i32min_m1, i64 12), ptr @endptr, align 8
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i8, ptr [[PS]], i64 8
; CHECK-NEXT: store i32 2147483647, ptr [[PS2]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([13 x i8], ptr @i32min, i64 0, i64 12), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i32min, i64 12), ptr @endptr, align 8
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i8, ptr [[PS]], i64 12
; CHECK-NEXT: store i32 -2147483648, ptr [[PS3]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([15 x i8], ptr @o32min, i64 0, i64 14), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @o32min, i64 14), ptr @endptr, align 8
; CHECK-NEXT: [[PS4:%.*]] = getelementptr i8, ptr [[PS]], i64 16
; CHECK-NEXT: store i32 -2147483648, ptr [[PS4]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([15 x i8], ptr @mo32min, i64 0, i64 14), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @mo32min, i64 14), ptr @endptr, align 8
; CHECK-NEXT: [[PS5:%.*]] = getelementptr i8, ptr [[PS]], i64 20
; CHECK-NEXT: store i32 -2147483648, ptr [[PS5]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([13 x i8], ptr @x32min, i64 0, i64 12), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @x32min, i64 12), ptr @endptr, align 8
; CHECK-NEXT: [[PS6:%.*]] = getelementptr i8, ptr [[PS]], i64 24
; CHECK-NEXT: store i32 -2147483648, ptr [[PS6]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([13 x i8], ptr @mx32min, i64 0, i64 12), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @mx32min, i64 12), ptr @endptr, align 8
; CHECK-NEXT: [[PS7:%.*]] = getelementptr i8, ptr [[PS]], i64 28
; CHECK-NEXT: store i32 -2147483648, ptr [[PS7]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([12 x i8], ptr @i32max, i64 0, i64 11), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i32max, i64 11), ptr @endptr, align 8
; CHECK-NEXT: [[PS8:%.*]] = getelementptr i8, ptr [[PS]], i64 32
; CHECK-NEXT: store i32 2147483647, ptr [[PS8]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([6 x i8], ptr @mX01, i64 0, i64 5), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @mX01, i64 5), ptr @endptr, align 8
; CHECK-NEXT: [[PS9:%.*]] = getelementptr i8, ptr [[PS]], i64 36
; CHECK-NEXT: store i32 -1, ptr [[PS9]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([12 x i8], ptr @i32max_p1, i64 0, i64 11), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i32max_p1, i64 11), ptr @endptr, align 8
; CHECK-NEXT: [[PS10:%.*]] = getelementptr i8, ptr [[PS]], i64 40
; CHECK-NEXT: store i32 -2147483648, ptr [[PS10]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([12 x i8], ptr @ui32max, i64 0, i64 11), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @ui32max, i64 11), ptr @endptr, align 8
; CHECK-NEXT: [[PS11:%.*]] = getelementptr i8, ptr [[PS]], i64 44
; CHECK-NEXT: store i32 -1, ptr [[PS11]], align 4
; CHECK-NEXT: ret void
@@ -159,7 +159,7 @@ define void @call_strtoul(ptr %ps) {
; CHECK-NEXT: [[NWS:%.*]] = call i32 @strtoul(ptr nonnull @ws, ptr nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i8, ptr [[PS]], i64 8
; CHECK-NEXT: store i32 [[NWS]], ptr [[PS2]], align 4
-; CHECK-NEXT: [[NWSP6:%.*]] = call i32 @strtoul(ptr nonnull getelementptr inbounds ([7 x i8], ptr @ws, i64 0, i64 6), ptr nonnull @endptr, i32 10)
+; CHECK-NEXT: [[NWSP6:%.*]] = call i32 @strtoul(ptr nonnull getelementptr inbounds (i8, ptr @ws, i64 6), ptr nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i8, ptr [[PS]], i64 12
; CHECK-NEXT: store i32 [[NWSP6]], ptr [[PS3]], align 4
; CHECK-NEXT: ret void
@@ -195,36 +195,36 @@ define void @call_strtoul(ptr %ps) {
define void @fold_strtoull(ptr %ps) {
; CHECK-LABEL: @fold_strtoull(
-; CHECK-NEXT: store ptr getelementptr inbounds ([11 x i8], ptr @ws_im123, i64 0, i64 10), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @ws_im123, i64 10), ptr @endptr, align 8
; CHECK-NEXT: store i64 -123, ptr [[PS:%.*]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([11 x i8], ptr @ws_ip234, i64 0, i64 10), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @ws_ip234, i64 10), ptr @endptr, align 8
; CHECK-NEXT: [[PS1:%.*]] = getelementptr i8, ptr [[PS]], i64 8
; CHECK-NEXT: store i64 234, ptr [[PS1]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([22 x i8], ptr @i64min_m1, i64 0, i64 21), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i64min_m1, i64 21), ptr @endptr, align 8
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i8, ptr [[PS]], i64 16
; CHECK-NEXT: store i64 9223372036854775807, ptr [[PS2]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([13 x i8], ptr @i32min, i64 0, i64 12), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i32min, i64 12), ptr @endptr, align 8
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i8, ptr [[PS]], i64 24
; CHECK-NEXT: store i64 -2147483648, ptr [[PS3]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([15 x i8], ptr @o32min, i64 0, i64 14), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @o32min, i64 14), ptr @endptr, align 8
; CHECK-NEXT: [[PS4:%.*]] = getelementptr i8, ptr [[PS]], i64 32
; CHECK-NEXT: store i64 2147483648, ptr [[PS4]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([13 x i8], ptr @x32min, i64 0, i64 12), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @x32min, i64 12), ptr @endptr, align 8
; CHECK-NEXT: [[PS5:%.*]] = getelementptr i8, ptr [[PS]], i64 40
; CHECK-NEXT: store i64 2147483648, ptr [[PS5]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([22 x i8], ptr @i64min, i64 0, i64 21), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i64min, i64 21), ptr @endptr, align 8
; CHECK-NEXT: [[PS6:%.*]] = getelementptr i8, ptr [[PS]], i64 48
; CHECK-NEXT: store i64 -9223372036854775808, ptr [[PS6]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([21 x i8], ptr @i64max, i64 0, i64 20), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i64max, i64 20), ptr @endptr, align 8
; CHECK-NEXT: [[PS7:%.*]] = getelementptr i8, ptr [[PS]], i64 56
; CHECK-NEXT: store i64 9223372036854775807, ptr [[PS7]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([21 x i8], ptr @i64max_p1, i64 0, i64 20), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @i64max_p1, i64 20), ptr @endptr, align 8
; CHECK-NEXT: [[PS8:%.*]] = getelementptr i8, ptr [[PS]], i64 64
; CHECK-NEXT: store i64 -9223372036854775808, ptr [[PS8]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([22 x i8], ptr @ui64max, i64 0, i64 21), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @ui64max, i64 21), ptr @endptr, align 8
; CHECK-NEXT: [[PS9:%.*]] = getelementptr i8, ptr [[PS]], i64 72
; CHECK-NEXT: store i64 -1, ptr [[PS9]], align 4
-; CHECK-NEXT: store ptr getelementptr inbounds ([20 x i8], ptr @x64max, i64 0, i64 19), ptr @endptr, align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @x64max, i64 19), ptr @endptr, align 8
; CHECK-NEXT: [[PS10:%.*]] = getelementptr i8, ptr [[PS]], i64 80
; CHECK-NEXT: store i64 -1, ptr [[PS10]], align 4
; CHECK-NEXT: ret void
@@ -298,7 +298,7 @@ define void @call_strtoull(ptr %ps) {
; CHECK-NEXT: [[NWS:%.*]] = call i64 @strtoull(ptr nonnull @ws, ptr nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS2:%.*]] = getelementptr i8, ptr [[PS]], i64 16
; CHECK-NEXT: store i64 [[NWS]], ptr [[PS2]], align 4
-; CHECK-NEXT: [[NWSP6:%.*]] = call i64 @strtoull(ptr nonnull getelementptr inbounds ([7 x i8], ptr @ws, i64 0, i64 6), ptr nonnull @endptr, i32 10)
+; CHECK-NEXT: [[NWSP6:%.*]] = call i64 @strtoull(ptr nonnull getelementptr inbounds (i8, ptr @ws, i64 6), ptr nonnull @endptr, i32 10)
; CHECK-NEXT: [[PS3:%.*]] = getelementptr i8, ptr [[PS]], i64 24
; CHECK-NEXT: store i64 [[NWSP6]], ptr [[PS3]], align 4
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/InstCombine/str-int.ll b/llvm/test/Transforms/InstCombine/str-int.ll
index 718bfe413333..ee8d04d2f0e2 100644
--- a/llvm/test/Transforms/InstCombine/str-int.ll
+++ b/llvm/test/Transforms/InstCombine/str-int.ll
@@ -46,7 +46,7 @@ define i32 @strtol_hex() #0 {
define i32 @strtol_endptr_not_null(ptr %pend) {
; CHECK-LABEL: @strtol_endptr_not_null(
; CHECK-NEXT: [[ENDP1:%.*]] = getelementptr inbounds i8, ptr [[PEND:%.*]], i64 8
-; CHECK-NEXT: store ptr getelementptr inbounds ([3 x i8], ptr @.str, i64 0, i64 2), ptr [[ENDP1]], align 8
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @.str, i64 2), ptr [[ENDP1]], align 8
; CHECK-NEXT: ret i32 12
;
%endp1 = getelementptr inbounds ptr, ptr %pend, i32 1
diff --git a/llvm/test/Transforms/InstCombine/strcall-bad-sig.ll b/llvm/test/Transforms/InstCombine/strcall-bad-sig.ll
index 5e59db5ef88a..7d3633a5d227 100644
--- a/llvm/test/Transforms/InstCombine/strcall-bad-sig.ll
+++ b/llvm/test/Transforms/InstCombine/strcall-bad-sig.ll
@@ -42,7 +42,7 @@ declare ptr @strncasecmp(ptr, ptr)
define ptr @call_bad_strncasecmp() {
; CHECK-LABEL: @call_bad_strncasecmp(
-; CHECK-NEXT: [[CMP:%.*]] = call ptr @strncasecmp(ptr nonnull @a, ptr nonnull getelementptr inbounds ([2 x i8], ptr @a, i64 0, i64 1))
+; CHECK-NEXT: [[CMP:%.*]] = call ptr @strncasecmp(ptr nonnull @a, ptr nonnull getelementptr inbounds (i8, ptr @a, i64 1))
; CHECK-NEXT: ret ptr [[CMP]]
;
%p1 = getelementptr [2 x i8], ptr @a, i32 0, i32 1
@@ -55,7 +55,7 @@ declare i1 @strcoll(ptr, ptr, ptr)
define i1 @call_bad_strcoll() {
; CHECK-LABEL: @call_bad_strcoll(
-; CHECK-NEXT: [[I:%.*]] = call i1 @strcoll(ptr nonnull @a, ptr nonnull getelementptr inbounds ([2 x i8], ptr @a, i64 0, i64 1), ptr nonnull @a)
+; CHECK-NEXT: [[I:%.*]] = call i1 @strcoll(ptr nonnull @a, ptr nonnull getelementptr inbounds (i8, ptr @a, i64 1), ptr nonnull @a)
; CHECK-NEXT: ret i1 [[I]]
;
%p1 = getelementptr [2 x i8], ptr @a, i32 0, i32 1
@@ -80,7 +80,7 @@ declare i1 @strtok(ptr, ptr, i1)
define i1 @call_bad_strtok() {
; CHECK-LABEL: @call_bad_strtok(
-; CHECK-NEXT: [[RET:%.*]] = call i1 @strtok(ptr nonnull @a, ptr nonnull getelementptr inbounds ([2 x i8], ptr @a, i64 0, i64 1), i1 false)
+; CHECK-NEXT: [[RET:%.*]] = call i1 @strtok(ptr nonnull @a, ptr nonnull getelementptr inbounds (i8, ptr @a, i64 1), i1 false)
; CHECK-NEXT: ret i1 [[RET]]
;
%p1 = getelementptr [2 x i8], ptr @a, i32 0, i32 1
@@ -94,7 +94,7 @@ declare i1 @strtok_r(ptr, ptr)
define i1 @call_bad_strtok_r() {
; CHECK-LABEL: @call_bad_strtok_r(
-; CHECK-NEXT: [[RET:%.*]] = call i1 @strtok_r(ptr nonnull @a, ptr nonnull getelementptr inbounds ([2 x i8], ptr @a, i64 0, i64 1))
+; CHECK-NEXT: [[RET:%.*]] = call i1 @strtok_r(ptr nonnull @a, ptr nonnull getelementptr inbounds (i8, ptr @a, i64 1))
; CHECK-NEXT: ret i1 [[RET]]
;
%p1 = getelementptr [2 x i8], ptr @a, i32 0, i32 1
@@ -146,7 +146,7 @@ declare ptr @strxfrm(ptr, ptr)
define ptr @call_bad_strxfrm() {
; CHECK-LABEL: @call_bad_strxfrm(
-; CHECK-NEXT: [[RET:%.*]] = call ptr @strxfrm(ptr nonnull @a, ptr nonnull getelementptr inbounds ([2 x i8], ptr @a, i64 0, i64 1))
+; CHECK-NEXT: [[RET:%.*]] = call ptr @strxfrm(ptr nonnull @a, ptr nonnull getelementptr inbounds (i8, ptr @a, i64 1))
; CHECK-NEXT: ret ptr [[RET]]
;
%p1 = getelementptr [2 x i8], ptr @a, i32 0, i32 1
diff --git a/llvm/test/Transforms/InstCombine/strcall-no-nul.ll b/llvm/test/Transforms/InstCombine/strcall-no-nul.ll
index 30221ad5b096..96905a273319 100644
--- a/llvm/test/Transforms/InstCombine/strcall-no-nul.ll
+++ b/llvm/test/Transforms/InstCombine/strcall-no-nul.ll
@@ -50,7 +50,7 @@ declare i32 @snprintf(ptr, i64, ptr, ...)
define ptr @fold_strchr_past_end() {
; CHECK-LABEL: @fold_strchr_past_end(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @a5, i64 5)
;
%p = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
%q = call ptr @strchr(ptr %p, i32 0)
@@ -268,7 +268,7 @@ define void @fold_strcspn_past_end(ptr %poff) {
define i32 @fold_atoi_past_end() {
; CHECK-LABEL: @fold_atoi_past_end(
-; CHECK-NEXT: [[I:%.*]] = call i32 @atoi(ptr nocapture nonnull getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0))
+; CHECK-NEXT: [[I:%.*]] = call i32 @atoi(ptr nocapture nonnull getelementptr inbounds (i8, ptr @a5, i64 5))
; CHECK-NEXT: ret i32 [[I]]
;
%p5 = getelementptr [5 x i8], ptr @a5, i32 0, i32 5
@@ -282,21 +282,21 @@ define i32 @fold_atoi_past_end() {
define void @fold_atol_strtol_past_end(ptr %ps) {
; CHECK-LABEL: @fold_atol_strtol_past_end(
-; CHECK-NEXT: [[I0:%.*]] = call i64 @atol(ptr nocapture nonnull getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0))
+; CHECK-NEXT: [[I0:%.*]] = call i64 @atol(ptr nocapture nonnull getelementptr inbounds (i8, ptr @a5, i64 5))
; CHECK-NEXT: store i64 [[I0]], ptr [[PS:%.*]], align 4
-; CHECK-NEXT: [[I1:%.*]] = call i64 @atoll(ptr nocapture nonnull getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0))
+; CHECK-NEXT: [[I1:%.*]] = call i64 @atoll(ptr nocapture nonnull getelementptr inbounds (i8, ptr @a5, i64 5))
; CHECK-NEXT: [[P1:%.*]] = getelementptr i8, ptr [[PS]], i64 8
; CHECK-NEXT: store i64 [[I1]], ptr [[P1]], align 4
-; CHECK-NEXT: [[I2:%.*]] = call i64 @strtol(ptr nocapture nonnull getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0), ptr null, i32 0)
+; CHECK-NEXT: [[I2:%.*]] = call i64 @strtol(ptr nocapture nonnull getelementptr inbounds (i8, ptr @a5, i64 5), ptr null, i32 0)
; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, ptr [[PS]], i64 16
; CHECK-NEXT: store i64 [[I2]], ptr [[P2]], align 4
-; CHECK-NEXT: [[I3:%.*]] = call i64 @strtoul(ptr nocapture nonnull getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0), ptr null, i32 8)
+; CHECK-NEXT: [[I3:%.*]] = call i64 @strtoul(ptr nocapture nonnull getelementptr inbounds (i8, ptr @a5, i64 5), ptr null, i32 8)
; CHECK-NEXT: [[P3:%.*]] = getelementptr i8, ptr [[PS]], i64 24
; CHECK-NEXT: store i64 [[I3]], ptr [[P3]], align 4
-; CHECK-NEXT: [[I4:%.*]] = call i64 @strtoll(ptr nocapture nonnull getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0), ptr null, i32 10)
+; CHECK-NEXT: [[I4:%.*]] = call i64 @strtoll(ptr nocapture nonnull getelementptr inbounds (i8, ptr @a5, i64 5), ptr null, i32 10)
; CHECK-NEXT: [[P4:%.*]] = getelementptr i8, ptr [[PS]], i64 32
; CHECK-NEXT: store i64 [[I4]], ptr [[P4]], align 4
-; CHECK-NEXT: [[I5:%.*]] = call i64 @strtoul(ptr nocapture nonnull getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0), ptr null, i32 16)
+; CHECK-NEXT: [[I5:%.*]] = call i64 @strtoul(ptr nocapture nonnull getelementptr inbounds (i8, ptr @a5, i64 5), ptr null, i32 16)
; CHECK-NEXT: [[P5:%.*]] = getelementptr i8, ptr [[PS]], i64 40
; CHECK-NEXT: store i64 [[I5]], ptr [[P5]], align 4
; CHECK-NEXT: ret void
@@ -358,9 +358,9 @@ define void @fold_sprintf_past_end(ptr %pcnt, ptr %dst) {
define void @fold_snprintf_past_end(ptr %pcnt, ptr %dst, i64 %n) {
; CHECK-LABEL: @fold_snprintf_past_end(
-; CHECK-NEXT: [[N5_:%.*]] = call i32 (ptr, i64, ptr, ...) @snprintf(ptr [[DST:%.*]], i64 [[N:%.*]], ptr nonnull getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0))
+; CHECK-NEXT: [[N5_:%.*]] = call i32 (ptr, i64, ptr, ...) @snprintf(ptr [[DST:%.*]], i64 [[N:%.*]], ptr nonnull getelementptr inbounds (i8, ptr @a5, i64 5))
; CHECK-NEXT: store i32 [[N5_]], ptr [[PCNT:%.*]], align 4
-; CHECK-NEXT: [[N05:%.*]] = call i32 (ptr, i64, ptr, ...) @snprintf(ptr [[DST]], i64 [[N]], ptr nonnull @a5, ptr nonnull getelementptr inbounds ([5 x i8], ptr @a5, i64 1, i64 0))
+; CHECK-NEXT: [[N05:%.*]] = call i32 (ptr, i64, ptr, ...) @snprintf(ptr [[DST]], i64 [[N]], ptr nonnull @a5, ptr nonnull getelementptr inbounds (i8, ptr @a5, i64 5))
; CHECK-NEXT: [[PN05:%.*]] = getelementptr i8, ptr [[PCNT]], i64 4
; CHECK-NEXT: store i32 [[N05]], ptr [[PN05]], align 4
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/InstCombine/strchr-1.ll b/llvm/test/Transforms/InstCombine/strchr-1.ll
index 191e0a18fced..0cedc3ad5181 100644
--- a/llvm/test/Transforms/InstCombine/strchr-1.ll
+++ b/llvm/test/Transforms/InstCombine/strchr-1.ll
@@ -13,7 +13,7 @@ declare ptr @strchr(ptr, i32)
define void @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 6), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hello, i32 6), ptr @chp, align 4
; CHECK-NEXT: ret void
;
@@ -35,7 +35,7 @@ define void @test_simplify2() {
define void @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 13), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hello, i32 13), ptr @chp, align 4
; CHECK-NEXT: ret void
;
@@ -58,7 +58,7 @@ define void @test_simplify4(i32 %chr) {
define void @test_simplify5() {
; CHECK-LABEL: @test_simplify5(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 13), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hello, i32 13), ptr @chp, align 4
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/InstCombine/strchr-3.ll b/llvm/test/Transforms/InstCombine/strchr-3.ll
index 55fb44563920..7cbbdf8c69bc 100644
--- a/llvm/test/Transforms/InstCombine/strchr-3.ll
+++ b/llvm/test/Transforms/InstCombine/strchr-3.ll
@@ -20,7 +20,7 @@ define ptr @fold_strchr_s1_C(i32 %C) {
; CHECK-LABEL: @fold_strchr_s1_C(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0
-; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds ([2 x i8], ptr @s1, i64 0, i64 1), ptr null
+; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds (i8, ptr @s1, i64 1), ptr null
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP1]], 1
; CHECK-NEXT: [[MEMCHR_SEL2:%.*]] = select i1 [[TMP3]], ptr @s1, ptr [[MEMCHR_SEL1]]
; CHECK-NEXT: ret ptr [[MEMCHR_SEL2]]
@@ -36,7 +36,7 @@ define ptr @fold_strchr_s11_C(i32 %C) {
; CHECK-LABEL: @fold_strchr_s11_C(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0
-; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds ([3 x i8], ptr @s11, i64 0, i64 2), ptr null
+; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds (i8, ptr @s11, i64 2), ptr null
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP1]], 1
; CHECK-NEXT: [[MEMCHR_SEL2:%.*]] = select i1 [[TMP3]], ptr @s11, ptr [[MEMCHR_SEL1]]
; CHECK-NEXT: ret ptr [[MEMCHR_SEL2]]
@@ -52,7 +52,7 @@ define ptr @fold_strchr_s111_C(i32 %C) {
; CHECK-LABEL: @fold_strchr_s111_C(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0
-; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds ([4 x i8], ptr @s111, i64 0, i64 3), ptr null
+; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds (i8, ptr @s111, i64 3), ptr null
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP1]], 1
; CHECK-NEXT: [[MEMCHR_SEL2:%.*]] = select i1 [[TMP3]], ptr @s111, ptr [[MEMCHR_SEL1]]
; CHECK-NEXT: ret ptr [[MEMCHR_SEL2]]
@@ -96,9 +96,9 @@ define ptr @fold_strchr_s21111p1_C(i32 %C) {
; CHECK-LABEL: @fold_strchr_s21111p1_C(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0
-; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds ([6 x i8], ptr @s21111, i64 0, i64 5), ptr null
+; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds (i8, ptr @s21111, i64 5), ptr null
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP1]], 1
-; CHECK-NEXT: [[MEMCHR_SEL2:%.*]] = select i1 [[TMP3]], ptr getelementptr inbounds ([6 x i8], ptr @s21111, i64 0, i64 1), ptr [[MEMCHR_SEL1]]
+; CHECK-NEXT: [[MEMCHR_SEL2:%.*]] = select i1 [[TMP3]], ptr getelementptr inbounds (i8, ptr @s21111, i64 1), ptr [[MEMCHR_SEL1]]
; CHECK-NEXT: ret ptr [[MEMCHR_SEL2]]
;
%ptr = getelementptr inbounds [6 x i8], ptr @s21111, i64 0, i64 1
@@ -113,7 +113,7 @@ define ptr @fold_strchr_s11102_C(i32 %C) {
; CHECK-LABEL: @fold_strchr_s11102_C(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0
-; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds ([6 x i8], ptr @s11102, i64 0, i64 3), ptr null
+; CHECK-NEXT: [[MEMCHR_SEL1:%.*]] = select i1 [[TMP2]], ptr getelementptr inbounds (i8, ptr @s11102, i64 3), ptr null
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP1]], 1
; CHECK-NEXT: [[MEMCHR_SEL2:%.*]] = select i1 [[TMP3]], ptr @s11102, ptr [[MEMCHR_SEL1]]
; CHECK-NEXT: ret ptr [[MEMCHR_SEL2]]
diff --git a/llvm/test/Transforms/InstCombine/strcmp-4.ll b/llvm/test/Transforms/InstCombine/strcmp-4.ll
index bdd521ddb909..e96c28b780b2 100644
--- a/llvm/test/Transforms/InstCombine/strcmp-4.ll
+++ b/llvm/test/Transforms/InstCombine/strcmp-4.ll
@@ -11,8 +11,8 @@ declare i32 @strcmp(ptr, ptr)
define i32 @fold_strcmp_s3_x_s4_s3(i1 %C) {
; CHECK-LABEL: @fold_strcmp_s3_x_s4_s3(
-; CHECK-NEXT: [[PTR:%.*]] = select i1 [[C:%.*]], ptr getelementptr inbounds ([10 x i8], ptr @s9, i64 0, i64 6), ptr getelementptr inbounds ([10 x i8], ptr @s9, i64 0, i64 5)
-; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(ptr noundef nonnull dereferenceable(1) [[PTR]], ptr noundef nonnull dereferenceable(4) getelementptr inbounds ([10 x i8], ptr @s9, i64 0, i64 6))
+; CHECK-NEXT: [[PTR:%.*]] = select i1 [[C:%.*]], ptr getelementptr inbounds (i8, ptr @s9, i64 6), ptr getelementptr inbounds (i8, ptr @s9, i64 5)
+; CHECK-NEXT: [[CMP:%.*]] = call i32 @strcmp(ptr noundef nonnull dereferenceable(1) [[PTR]], ptr noundef nonnull dereferenceable(4) getelementptr inbounds (i8, ptr @s9, i64 6))
; CHECK-NEXT: ret i32 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/strlcpy-1.ll b/llvm/test/Transforms/InstCombine/strlcpy-1.ll
index bfa4fc11d310..7ca6c1599f19 100644
--- a/llvm/test/Transforms/InstCombine/strlcpy-1.ll
+++ b/llvm/test/Transforms/InstCombine/strlcpy-1.ll
@@ -235,9 +235,9 @@ define void @call_strlcpy_s0_n(ptr %dst, ptr %s, i64 %n) {
; ANY-NEXT: [[NZ:%.*]] = or i64 [[N]], 1
; ANY-NEXT: [[NS_NZ:%.*]] = call i64 @strlcpy(ptr noundef nonnull dereferenceable(1) [[DST]], ptr noundef nonnull dereferenceable(1) [[S]], i64 [[NZ]])
; ANY-NEXT: call void @sink(ptr [[DST]], i64 [[NS_NZ]])
-; ANY-NEXT: [[NS0_N:%.*]] = call i64 @strlcpy(ptr [[DST]], ptr noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], ptr @s4, i64 0, i64 4), i64 [[N]])
+; ANY-NEXT: [[NS0_N:%.*]] = call i64 @strlcpy(ptr [[DST]], ptr noundef nonnull dereferenceable(1) getelementptr inbounds (i8, ptr @s4, i64 4), i64 [[N]])
; ANY-NEXT: call void @sink(ptr [[DST]], i64 [[NS0_N]])
-; ANY-NEXT: [[NS1_N:%.*]] = call i64 @strlcpy(ptr [[DST]], ptr noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x i8], ptr @s4, i64 0, i64 3), i64 [[N]])
+; ANY-NEXT: [[NS1_N:%.*]] = call i64 @strlcpy(ptr [[DST]], ptr noundef nonnull dereferenceable(1) getelementptr inbounds (i8, ptr @s4, i64 3), i64 [[N]])
; ANY-NEXT: call void @sink(ptr [[DST]], i64 [[NS1_N]])
; ANY-NEXT: [[NS4_N:%.*]] = call i64 @strlcpy(ptr [[DST]], ptr noundef nonnull dereferenceable(1) @s4, i64 [[N]])
; ANY-NEXT: call void @sink(ptr [[DST]], i64 [[NS4_N]])
diff --git a/llvm/test/Transforms/InstCombine/strlen-1.ll b/llvm/test/Transforms/InstCombine/strlen-1.ll
index bd4c4a2ce47e..8def4dd9747f 100644
--- a/llvm/test/Transforms/InstCombine/strlen-1.ll
+++ b/llvm/test/Transforms/InstCombine/strlen-1.ll
@@ -235,7 +235,7 @@ define i1 @strlen0_after_write_to_first_byte_global() {
define i1 @strlen0_after_write_to_second_byte_global() {
; CHECK-LABEL: @strlen0_after_write_to_second_byte_global(
-; CHECK-NEXT: store i8 49, ptr getelementptr inbounds ([32 x i8], ptr @a, i32 0, i32 1), align 16
+; CHECK-NEXT: store i8 49, ptr getelementptr inbounds (i8, ptr @a, i32 1), align 16
; CHECK-NEXT: [[CHAR0:%.*]] = load i8, ptr @a, align 1
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[CHAR0]], 0
; CHECK-NEXT: ret i1 [[CMP]]
diff --git a/llvm/test/Transforms/InstCombine/strlen-6.ll b/llvm/test/Transforms/InstCombine/strlen-6.ll
index f1fe715d3893..25e653362db8 100644
--- a/llvm/test/Transforms/InstCombine/strlen-6.ll
+++ b/llvm/test/Transforms/InstCombine/strlen-6.ll
@@ -103,7 +103,7 @@ define i64 @fold_strlen_a_S3_p2_s4_to_1() {
define void @fold_strlen_a_s3_S4_to_4() {
; CHECK-LABEL: @fold_strlen_a_s3_S4_to_4(
; CHECK-NEXT: store i64 4, ptr @ax, align 4
-; CHECK-NEXT: store i64 4, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 4, ptr getelementptr inbounds (i8, ptr @ax, i64 8), align 4
; CHECK-NEXT: ret void
;
%p1 = getelementptr %struct.A_a4_a5, ptr @a_s3_s4, i32 0, i32 0, i32 4
@@ -125,7 +125,7 @@ define void @fold_strlen_a_s3_S4_to_4() {
define void @fold_strlen_a_s3_S4_p1_to_3() {
; CHECK-LABEL: @fold_strlen_a_s3_S4_p1_to_3(
; CHECK-NEXT: store i64 3, ptr @ax, align 4
-; CHECK-NEXT: store i64 3, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 3, ptr getelementptr inbounds (i8, ptr @ax, i64 8), align 4
; CHECK-NEXT: ret void
;
%p1 = getelementptr %struct.A_a4_a5, ptr @a_s3_s4, i32 0, i32 0, i32 5
@@ -147,7 +147,7 @@ define void @fold_strlen_a_s3_S4_p1_to_3() {
define void @fold_strlen_a_s3_i32_S4_to_4() {
; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_to_4(
; CHECK-NEXT: store i64 4, ptr @ax, align 4
-; CHECK-NEXT: store i64 4, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 4, ptr getelementptr inbounds (i8, ptr @ax, i64 8), align 4
; CHECK-NEXT: ret void
;
%p1 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 0, i32 8
@@ -169,7 +169,7 @@ define void @fold_strlen_a_s3_i32_S4_to_4() {
define void @fold_strlen_a_s3_i32_S4_p1_to_3() {
; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p1_to_3(
; CHECK-NEXT: store i64 3, ptr @ax, align 4
-; CHECK-NEXT: store i64 3, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 3, ptr getelementptr inbounds (i8, ptr @ax, i64 8), align 4
; CHECK-NEXT: ret void
;
%p1 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 0, i32 9
@@ -191,7 +191,7 @@ define void @fold_strlen_a_s3_i32_S4_p1_to_3() {
define void @fold_strlen_a_s3_i32_S4_p2_to_2() {
; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p2_to_2(
; CHECK-NEXT: store i64 2, ptr @ax, align 4
-; CHECK-NEXT: store i64 2, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 2, ptr getelementptr inbounds (i8, ptr @ax, i64 8), align 4
; CHECK-NEXT: ret void
;
%p1 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 0, i32 10
@@ -213,7 +213,7 @@ define void @fold_strlen_a_s3_i32_S4_p2_to_2() {
define void @fold_strlen_a_s3_i32_S4_p3_to_1() {
; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p3_to_1(
; CHECK-NEXT: store i64 1, ptr @ax, align 4
-; CHECK-NEXT: store i64 1, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 1, ptr getelementptr inbounds (i8, ptr @ax, i64 8), align 4
; CHECK-NEXT: ret void
;
%p1 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 0, i32 11
@@ -235,7 +235,7 @@ define void @fold_strlen_a_s3_i32_S4_p3_to_1() {
define void @fold_strlen_a_s3_i32_S4_p4_to_0() {
; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p4_to_0(
; CHECK-NEXT: store i64 0, ptr @ax, align 4
-; CHECK-NEXT: store i64 0, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
+; CHECK-NEXT: store i64 0, ptr getelementptr inbounds (i8, ptr @ax, i64 8), align 4
; CHECK-NEXT: ret void
;
%p1 = getelementptr %struct.A_a4_i32_a5, ptr @a_s3_i32_s4, i32 0, i32 0, i32 12
@@ -257,8 +257,8 @@ define void @fold_strlen_a_s3_i32_S4_p4_to_0() {
define void @fold_strlen_ax_s() {
; CHECK-LABEL: @fold_strlen_ax_s(
; CHECK-NEXT: store i64 3, ptr @ax, align 4
-; CHECK-NEXT: store i64 5, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 1), align 4
-; CHECK-NEXT: store i64 7, ptr getelementptr inbounds ([0 x i64], ptr @ax, i64 0, i64 2), align 4
+; CHECK-NEXT: store i64 5, ptr getelementptr inbounds (i8, ptr @ax, i64 8), align 4
+; CHECK-NEXT: store i64 7, ptr getelementptr inbounds (i8, ptr @ax, i64 16), align 4
; CHECK-NEXT: ret void
;
%pax_s3 = getelementptr { i8, [4 x i8] }, ptr @ax_s3, i64 0, i32 1, i64 0
diff --git a/llvm/test/Transforms/InstCombine/strpbrk-1.ll b/llvm/test/Transforms/InstCombine/strpbrk-1.ll
index 411bd8d627ec..b51071df25d2 100644
--- a/llvm/test/Transforms/InstCombine/strpbrk-1.ll
+++ b/llvm/test/Transforms/InstCombine/strpbrk-1.ll
@@ -37,7 +37,7 @@ define ptr @test_simplify2(ptr %pat) {
define ptr @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([12 x i8], ptr @hello, i32 0, i32 6)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @hello, i32 6)
;
%ret = call ptr @strpbrk(ptr @hello, ptr @w)
diff --git a/llvm/test/Transforms/InstCombine/strrchr-1.ll b/llvm/test/Transforms/InstCombine/strrchr-1.ll
index 661e040f8042..0c876b9d2a98 100644
--- a/llvm/test/Transforms/InstCombine/strrchr-1.ll
+++ b/llvm/test/Transforms/InstCombine/strrchr-1.ll
@@ -12,7 +12,7 @@ declare ptr @strrchr(ptr, i32)
define void @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 6), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hello, i32 6), ptr @chp, align 4
; CHECK-NEXT: ret void
;
@@ -34,7 +34,7 @@ define void @test_simplify2() {
define void @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 13), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hello, i32 13), ptr @chp, align 4
; CHECK-NEXT: ret void
;
@@ -45,7 +45,7 @@ define void @test_simplify3() {
define void @test_simplify4() {
; CHECK-LABEL: @test_simplify4(
-; CHECK-NEXT: store ptr getelementptr inbounds ([14 x i8], ptr @hello, i32 0, i32 13), ptr @chp, align 4
+; CHECK-NEXT: store ptr getelementptr inbounds (i8, ptr @hello, i32 13), ptr @chp, align 4
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/InstCombine/strrchr-3.ll b/llvm/test/Transforms/InstCombine/strrchr-3.ll
index 1dadb0487871..f25504a8db2b 100644
--- a/llvm/test/Transforms/InstCombine/strrchr-3.ll
+++ b/llvm/test/Transforms/InstCombine/strrchr-3.ll
@@ -13,7 +13,7 @@ define ptr @fold_strrchr_sp10_x(i32 %c) {
; CHECK-LABEL: @fold_strrchr_sp10_x(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C:%.*]] to i8
; CHECK-NEXT: [[MEMRCHR_CHAR0CMP:%.*]] = icmp eq i8 [[TMP1]], 0
-; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[MEMRCHR_CHAR0CMP]], ptr getelementptr inbounds ([11 x i8], ptr @s10, i64 0, i64 10), ptr null
+; CHECK-NEXT: [[MEMRCHR_SEL:%.*]] = select i1 [[MEMRCHR_CHAR0CMP]], ptr getelementptr inbounds (i8, ptr @s10, i64 10), ptr null
; CHECK-NEXT: ret ptr [[MEMRCHR_SEL]]
;
%psp10 = getelementptr [11 x i8], ptr @s10, i32 0, i32 10
@@ -26,7 +26,7 @@ define ptr @fold_strrchr_sp10_x(i32 %c) {
define ptr @call_strrchr_sp9_x(i32 %c) {
; CHECK-LABEL: @call_strrchr_sp9_x(
-; CHECK-NEXT: [[MEMRCHR:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(2) getelementptr inbounds ([11 x i8], ptr @s10, i64 0, i64 9), i32 [[C:%.*]], i64 2)
+; CHECK-NEXT: [[MEMRCHR:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(2) getelementptr inbounds (i8, ptr @s10, i64 9), i32 [[C:%.*]], i64 2)
; CHECK-NEXT: ret ptr [[MEMRCHR]]
;
%psp9 = getelementptr [11 x i8], ptr @s10, i32 0, i32 9
@@ -40,7 +40,7 @@ define ptr @call_strrchr_sp9_x(i32 %c) {
define ptr @call_strrchr_sp2_x(i32 %c) {
; CHECK-LABEL: @call_strrchr_sp2_x(
-; CHECK-NEXT: [[MEMRCHR:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(9) getelementptr inbounds ([11 x i8], ptr @s10, i64 0, i64 2), i32 [[C:%.*]], i64 9)
+; CHECK-NEXT: [[MEMRCHR:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(9) getelementptr inbounds (i8, ptr @s10, i64 2), i32 [[C:%.*]], i64 9)
; CHECK-NEXT: ret ptr [[MEMRCHR]]
;
%psp2 = getelementptr [11 x i8], ptr @s10, i32 0, i32 2
@@ -53,7 +53,7 @@ define ptr @call_strrchr_sp2_x(i32 %c) {
define ptr @call_strrchr_sp1_x(i32 %c) {
; CHECK-LABEL: @call_strrchr_sp1_x(
-; CHECK-NEXT: [[MEMRCHR:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(10) getelementptr inbounds ([11 x i8], ptr @s10, i64 0, i64 1), i32 [[C:%.*]], i64 10)
+; CHECK-NEXT: [[MEMRCHR:%.*]] = call ptr @memrchr(ptr noundef nonnull dereferenceable(10) getelementptr inbounds (i8, ptr @s10, i64 1), i32 [[C:%.*]], i64 10)
; CHECK-NEXT: ret ptr [[MEMRCHR]]
;
%psp1 = getelementptr [11 x i8], ptr @s10, i32 0, i32 1
diff --git a/llvm/test/Transforms/InstCombine/strstr-1.ll b/llvm/test/Transforms/InstCombine/strstr-1.ll
index 50edbfffb9f8..b5f4a2ce288d 100644
--- a/llvm/test/Transforms/InstCombine/strstr-1.ll
+++ b/llvm/test/Transforms/InstCombine/strstr-1.ll
@@ -37,7 +37,7 @@ define ptr @test_simplify2(ptr %str) {
define ptr @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
-; CHECK-NEXT: ret ptr getelementptr inbounds ([6 x i8], ptr @.str2, i64 0, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @.str2, i64 1)
;
%ret = call ptr @strstr(ptr @.str2, ptr @.str3)
ret ptr %ret
diff --git a/llvm/test/Transforms/InstCombine/vec_demanded_elts-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vec_demanded_elts-inseltpoison.ll
index 738ef1bc1ad2..74465cde86ad 100644
--- a/llvm/test/Transforms/InstCombine/vec_demanded_elts-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/vec_demanded_elts-inseltpoison.ll
@@ -566,7 +566,7 @@ define ptr @gep_cvbase_w_s_idx(<2 x ptr> %base, i64 %raw_addr) {
define ptr @gep_cvbase_w_cv_idx(<2 x ptr> %base, i64 %raw_addr) {
; CHECK-LABEL: @gep_cvbase_w_cv_idx(
-; CHECK-NEXT: ret ptr getelementptr inbounds (i32, ptr @GLOBAL, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @GLOBAL, i64 4)
;
%gep = getelementptr i32, <2 x ptr> <ptr @GLOBAL, ptr @GLOBAL>, <2 x i64> <i64 0, i64 1>
%ee = extractelement <2 x ptr> %gep, i32 1
diff --git a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
index fd55a236e0d7..d8a3b87f78ee 100644
--- a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
+++ b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -569,7 +569,7 @@ define ptr @gep_cvbase_w_s_idx(<2 x ptr> %base, i64 %raw_addr) {
define ptr @gep_cvbase_w_cv_idx(<2 x ptr> %base, i64 %raw_addr) {
; CHECK-LABEL: @gep_cvbase_w_cv_idx(
-; CHECK-NEXT: ret ptr getelementptr inbounds (i32, ptr @GLOBAL, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inbounds (i8, ptr @GLOBAL, i64 4)
;
%gep = getelementptr i32, <2 x ptr> <ptr @GLOBAL, ptr @GLOBAL>, <2 x i64> <i64 0, i64 1>
%ee = extractelement <2 x ptr> %gep, i32 1
diff --git a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
index ef085d3e7b50..a9cdc8bd2024 100644
--- a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
@@ -92,6 +92,23 @@ define <4 x float> @test8(<4 x float> %x, <4 x float> %y) {
%t2 = extractelement <4 x float> %x, i32 3
%t1 = extractelement <4 x float> %y, i32 0
%t128 = insertelement <4 x float> poison, float %t4, i32 0
+ %t130 = insertelement <4 x float> %t128, float poison, i32 1
+ %t132 = insertelement <4 x float> %t130, float %t2, i32 2
+ %t134 = insertelement <4 x float> %t132, float %t1, i32 3
+ ret <4 x float> %t134
+}
+
+; This shouldn't turn into a single shuffle
+define <4 x float> @test8_undef(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: @test8_undef(
+; CHECK-NEXT: [[T132:%.*]] = shufflevector <4 x float> [[X:%.*]], <4 x float> <float poison, float undef, float poison, float poison>, <4 x i32> <i32 1, i32 5, i32 3, i32 poison>
+; CHECK-NEXT: [[T134:%.*]] = shufflevector <4 x float> [[T132]], <4 x float> [[Y:%.*]], <4 x i32> <i32 0, i32 1, i32 2, i32 4>
+; CHECK-NEXT: ret <4 x float> [[T134]]
+;
+ %t4 = extractelement <4 x float> %x, i32 1
+ %t2 = extractelement <4 x float> %x, i32 3
+ %t1 = extractelement <4 x float> %y, i32 0
+ %t128 = insertelement <4 x float> poison, float %t4, i32 0
%t130 = insertelement <4 x float> %t128, float undef, i32 1
%t132 = insertelement <4 x float> %t130, float %t2, i32 2
%t134 = insertelement <4 x float> %t132, float %t1, i32 3
diff --git a/llvm/test/Transforms/InstCombine/vec_shuffle.ll b/llvm/test/Transforms/InstCombine/vec_shuffle.ll
index 919e30f672e4..8c91efb473fa 100644
--- a/llvm/test/Transforms/InstCombine/vec_shuffle.ll
+++ b/llvm/test/Transforms/InstCombine/vec_shuffle.ll
@@ -88,10 +88,11 @@ define <4 x float> @test7(<4 x float> %x) {
ret <4 x float> %r
}
-; This should turn into a single shuffle.
+; This should not turn into a single shuffle.
define <4 x float> @test8(<4 x float> %x, <4 x float> %y) {
; CHECK-LABEL: @test8(
-; CHECK-NEXT: [[T134:%.*]] = shufflevector <4 x float> [[X:%.*]], <4 x float> [[Y:%.*]], <4 x i32> <i32 1, i32 poison, i32 3, i32 4>
+; CHECK-NEXT: [[T132:%.*]] = shufflevector <4 x float> [[X:%.*]], <4 x float> <float poison, float undef, float poison, float poison>, <4 x i32> <i32 1, i32 5, i32 3, i32 poison>
+; CHECK-NEXT: [[T134:%.*]] = shufflevector <4 x float> [[T132]], <4 x float> [[Y:%.*]], <4 x i32> <i32 0, i32 1, i32 2, i32 4>
; CHECK-NEXT: ret <4 x float> [[T134]]
;
%t4 = extractelement <4 x float> %x, i32 1
@@ -1280,6 +1281,17 @@ define <2 x float> @fneg(<2 x float> %x) {
ret <2 x float> %r
}
+define <2 x float> @fneg_not_single_source(<2 x float> %x) {
+; CHECK-LABEL: @fneg_not_single_source(
+; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]]
+; CHECK-NEXT: [[SPLAT1:%.*]] = insertelement <2 x float> [[NEG]], float undef, i64 1
+; CHECK-NEXT: ret <2 x float> [[SPLAT1]]
+;
+ %neg = fneg <2 x float> %x
+ %splat = shufflevector <2 x float> %neg, <2 x float> undef, <2 x i32> <i32 0, i32 2>
+ ret <2 x float> %splat
+}
+
define <2 x float> @fmul_splat_constant(<2 x float> %x) {
; CHECK-LABEL: @fmul_splat_constant(
; CHECK-NEXT: [[TMP1:%.*]] = fmul <2 x float> [[X:%.*]], <float 4.200000e+01, float poison>
@@ -2330,3 +2342,38 @@ define <4 x i16> @blend_elements_from_load(ptr align 8 %_0) {
%rv = shufflevector <3 x i16> <i16 0, i16 undef, i16 undef>, <3 x i16> %load, <4 x i32> <i32 0, i32 1, i32 3, i32 5>
ret <4 x i16> %rv
}
+
+define i16 @pr92887(<2 x i16> %v) {
+; CHECK-LABEL: @pr92887(
+; CHECK-NEXT: ret i16 undef
+;
+ %v0 = extractelement <2 x i16> %v, i64 0
+ %v0lo = and i16 %v0, 1
+ %v1 = extractelement <2 x i16> %v, i64 1
+ %v1lo = and i16 %v1, 1
+ %ins1 = insertelement <4 x i16> poison, i16 %v0lo, i64 0
+ %ins2 = insertelement <4 x i16> %ins1, i16 %v1lo, i64 1
+ %shuf = shufflevector <4 x i16> %ins2, <4 x i16> <i16 poison, i16 poison, i16 undef, i16 undef>, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ %extract = extractelement <4 x i16> %shuf, i32 2
+ ret i16 %extract
+}
+
+define <2 x i32> @not_splat_shuffle1(i32 %x) {
+; CHECK-LABEL: @not_splat_shuffle1(
+; CHECK-NEXT: [[SHUF:%.*]] = insertelement <2 x i32> <i32 poison, i32 undef>, i32 [[X:%.*]], i64 0
+; CHECK-NEXT: ret <2 x i32> [[SHUF]]
+;
+ %vec = insertelement <2 x i32> undef, i32 %x, i32 1
+ %shuf = shufflevector <2 x i32> %vec, <2 x i32> poison, <2 x i32> <i32 1, i32 0>
+ ret <2 x i32> %shuf
+}
+
+define <2 x i32> @not_splat_shuffle2(i32 %x) {
+; CHECK-LABEL: @not_splat_shuffle2(
+; CHECK-NEXT: [[SHUF:%.*]] = insertelement <2 x i32> <i32 poison, i32 undef>, i32 [[X:%.*]], i64 0
+; CHECK-NEXT: ret <2 x i32> [[SHUF]]
+;
+ %vec = insertelement <2 x i32> poison, i32 %x, i32 1
+ %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <2 x i32> <i32 1, i32 3>
+ ret <2 x i32> %shuf
+}
diff --git a/llvm/test/Transforms/InstCombine/wcslen-1.ll b/llvm/test/Transforms/InstCombine/wcslen-1.ll
index 138b3ff585c5..8833754a5367 100644
--- a/llvm/test/Transforms/InstCombine/wcslen-1.ll
+++ b/llvm/test/Transforms/InstCombine/wcslen-1.ll
@@ -231,7 +231,7 @@ define i64 @fold_wcslen_1() {
; with an offset that isn't a multiple of the element size).
define i64 @no_fold_wcslen_1() {
; CHECK-LABEL: @no_fold_wcslen_1(
-; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr getelementptr ([15 x i8], ptr @ws, i64 0, i64 3))
+; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull getelementptr inbounds (i8, ptr @ws, i64 3))
; CHECK-NEXT: ret i64 [[LEN]]
;
%p = getelementptr [15 x i8], ptr @ws, i64 0, i64 3
@@ -246,7 +246,7 @@ define i64 @no_fold_wcslen_1() {
; with an offset that isn't a multiple of the element size).
define i64 @no_fold_wcslen_2() {
; CHECK-LABEL: @no_fold_wcslen_2(
-; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull getelementptr inbounds ([10 x i8], ptr @s8, i64 0, i64 3))
+; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull getelementptr inbounds (i8, ptr @s8, i64 3))
; CHECK-NEXT: ret i64 [[LEN]]
;
%p = getelementptr [10 x i8], ptr @s8, i64 0, i64 3
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias.ll b/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias.ll
index f77a49e90be7..097ccfe78e97 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/gep-alias.ll
@@ -14,7 +14,7 @@ target triple = "x86_64-unknown-linux-gnu"
define ptr @f() {
; CHECK-LABEL: define ptr @f() {
-; CHECK-NEXT: ret ptr getelementptr ([3 x ptr], ptr @b, i64 0, i64 1)
+; CHECK-NEXT: ret ptr getelementptr (i8, ptr @b, i64 8)
;
ret ptr getelementptr ([3 x ptr], ptr @b, i64 0, i64 1)
}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/gep-constanfolding-error.ll b/llvm/test/Transforms/InstSimplify/ConstProp/gep-constanfolding-error.ll
index bcba5ce3aa7e..e5287a45da4b 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/gep-constanfolding-error.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/gep-constanfolding-error.ll
@@ -44,8 +44,7 @@ entry:
%9 = add i32 %f.promoted, %smax
%10 = add i32 %9, 2
call void @llvm.memset.p0.i32(ptr %scevgep, i8 %conv6, i32 %10, i1 false)
-; CHECK: call void @llvm.memset.p0.i32(ptr getelementptr inbounds ([6 x [6 x [7 x i8]]], ptr @j, i32 0, i{{32|64}} 5, i{{32|64}} 4, i32 1), i8 %conv6, i32 1, i1 false)
-; CHECK-NOT: call void @llvm.memset.p0.i32(ptr getelementptr ([6 x [6 x [7 x i8]]], ptr @j, i64 1, i64 4, i64 4, i32 1)
+; CHECK: call void @llvm.memset.p0.i32(ptr getelementptr inbounds (i8, ptr @j, i32 239), i8 %conv6, i32 1, i1 false)
ret i32 0
}
; Function Attrs: argmemonly nounwind
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/gep.ll b/llvm/test/Transforms/InstSimplify/ConstProp/gep.ll
index d91349a570b7..b3fe7f36ff97 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/gep.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/gep.ll
@@ -11,21 +11,21 @@ target triple = "x86_64-unknown-linux-gnu"
define ptr @f0() {
; CHECK-LABEL: @f0(
-; CHECK-NEXT: ret ptr getelementptr inbounds inrange(-16, 8) ([3 x ptr], ptr @vt, i64 0, i64 2)
+; CHECK-NEXT: ret ptr getelementptr inbounds inrange(-16, 8) (i8, ptr @vt, i64 16)
;
ret ptr getelementptr (ptr, ptr getelementptr inbounds inrange(-8, 16) ([3 x ptr], ptr @vt, i64 0, i64 1), i64 1)
}
define ptr @f1() {
; CHECK-LABEL: @f1(
-; CHECK-NEXT: ret ptr getelementptr inbounds inrange(-8, 0) ([3 x ptr], ptr @vt, i64 0, i64 2)
+; CHECK-NEXT: ret ptr getelementptr inbounds inrange(-8, 0) (i8, ptr @vt, i64 16)
;
ret ptr getelementptr (ptr, ptr getelementptr inbounds inrange(0, 8) ([3 x ptr], ptr @vt, i64 0, i64 1), i64 1)
}
define ptr @f2() {
; CHECK-LABEL: @f2(
-; CHECK-NEXT: ret ptr getelementptr inrange(-24, -16) ([3 x ptr], ptr @vt, i64 1, i64 1)
+; CHECK-NEXT: ret ptr getelementptr inrange(-24, -16) (i8, ptr @vt, i64 32)
;
ret ptr getelementptr (ptr, ptr getelementptr inbounds inrange(0, 8) ([3 x ptr], ptr @vt, i64 0, i64 1), i64 3)
}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll b/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll
index b4afb7bd4a2b..1d7ed23d3e82 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/icmp-global.ll
@@ -121,7 +121,7 @@ define i1 @global_gep_ugt_null() {
define i1 @global_gep_sgt_null() {
; CHECK-LABEL: @global_gep_sgt_null(
-; CHECK-NEXT: ret i1 icmp sgt (ptr getelementptr inbounds ([2 x i32], ptr @g, i64 1), ptr null)
+; CHECK-NEXT: ret i1 icmp sgt (ptr getelementptr inbounds (i8, ptr @g, i64 8), ptr null)
;
%gep = getelementptr inbounds [2 x i32], ptr @g, i64 1
%cmp = icmp sgt ptr %gep, null
@@ -222,7 +222,7 @@ define i1 @global_gep_ugt_global() {
define i1 @global_gep_sgt_global() {
; CHECK-LABEL: @global_gep_sgt_global(
-; CHECK-NEXT: ret i1 icmp sgt (ptr getelementptr inbounds ([2 x i32], ptr @g, i64 1), ptr @g)
+; CHECK-NEXT: ret i1 icmp sgt (ptr getelementptr inbounds (i8, ptr @g, i64 8), ptr @g)
;
%gep = getelementptr inbounds [2 x i32], ptr @g, i64 1
%cmp = icmp sgt ptr %gep, @g
@@ -232,7 +232,7 @@ define i1 @global_gep_sgt_global() {
; This should not fold to true, as the offset is negative.
define i1 @global_gep_ugt_global_neg_offset() {
; CHECK-LABEL: @global_gep_ugt_global_neg_offset(
-; CHECK-NEXT: ret i1 icmp ugt (ptr getelementptr ([2 x i32], ptr @g, i64 -1), ptr @g)
+; CHECK-NEXT: ret i1 icmp ugt (ptr getelementptr (i8, ptr @g, i64 -8), ptr @g)
;
%gep = getelementptr [2 x i32], ptr @g, i64 -1
%cmp = icmp ugt ptr %gep, @g
@@ -241,7 +241,7 @@ define i1 @global_gep_ugt_global_neg_offset() {
define i1 @global_gep_sgt_global_neg_offset() {
; CHECK-LABEL: @global_gep_sgt_global_neg_offset(
-; CHECK-NEXT: ret i1 icmp sgt (ptr getelementptr ([2 x i32], ptr @g, i64 -1), ptr @g)
+; CHECK-NEXT: ret i1 icmp sgt (ptr getelementptr (i8, ptr @g, i64 -8), ptr @g)
;
%gep = getelementptr [2 x i32], ptr @g, i64 -1
%cmp = icmp sgt ptr %gep, @g
@@ -260,7 +260,7 @@ define i1 @global_gep_ugt_global_gep() {
; Should not fold due to signed comparison.
define i1 @global_gep_sgt_global_gep() {
; CHECK-LABEL: @global_gep_sgt_global_gep(
-; CHECK-NEXT: ret i1 icmp sgt (ptr getelementptr inbounds ([2 x i32], ptr @g, i64 0, i64 1), ptr @g)
+; CHECK-NEXT: ret i1 icmp sgt (ptr getelementptr inbounds (i8, ptr @g, i64 4), ptr @g)
;
%gep2 = getelementptr inbounds [2 x i32], ptr @g, i64 0, i64 1
%cmp = icmp sgt ptr %gep2, @g
diff --git a/llvm/test/Transforms/InstSimplify/compare.ll b/llvm/test/Transforms/InstSimplify/compare.ll
index 724912d90bd8..0f72cd813f2f 100644
--- a/llvm/test/Transforms/InstSimplify/compare.ll
+++ b/llvm/test/Transforms/InstSimplify/compare.ll
@@ -3078,7 +3078,7 @@ define i1 @globals_inequal() {
; TODO: Never equal
define i1 @globals_offset_inequal() {
; CHECK-LABEL: @globals_offset_inequal(
-; CHECK-NEXT: ret i1 icmp ne (ptr getelementptr (i8, ptr @A, i32 1), ptr getelementptr (i8, ptr @B, i32 1))
+; CHECK-NEXT: ret i1 icmp ne (ptr getelementptr inbounds (i8, ptr @A, i32 1), ptr getelementptr inbounds (i8, ptr @B, i32 1))
;
%a.off = getelementptr i8, ptr @A, i32 1
%b.off = getelementptr i8, ptr @B, i32 1
diff --git a/llvm/test/Transforms/InstSimplify/known-non-zero.ll b/llvm/test/Transforms/InstSimplify/known-non-zero.ll
index fd2862eb04a2..965c333d306d 100644
--- a/llvm/test/Transforms/InstSimplify/known-non-zero.ll
+++ b/llvm/test/Transforms/InstSimplify/known-non-zero.ll
@@ -400,3 +400,186 @@ define i1 @nonzero_reduce_or_fail(<2 x i8> %xx) {
%r = icmp eq i8 %v, 0
ret i1 %r
}
+
+define i1 @src_x_add_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_add_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = zext i1 %x_eq_0 to i8
+ %v = add i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_add_x_eq_1_fail(i8 %x) {
+; CHECK-LABEL: @src_x_add_x_eq_1_fail(
+; CHECK-NEXT: [[X_EQ_1:%.*]] = icmp eq i8 [[X:%.*]], 1
+; CHECK-NEXT: [[Y:%.*]] = zext i1 [[X_EQ_1]] to i8
+; CHECK-NEXT: [[V:%.*]] = add i8 [[X]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %x_eq_1 = icmp eq i8 %x, 1
+ %y = zext i1 %x_eq_1 to i8
+ %v = add i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_or_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_or_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = sext i1 %x_eq_0 to i8
+ %v = or i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_or_x_sle_0_fail(i8 %x) {
+; CHECK-LABEL: @src_x_or_x_sle_0_fail(
+; CHECK-NEXT: [[X_EQ_0:%.*]] = icmp sle i8 [[X:%.*]], 0
+; CHECK-NEXT: [[Y:%.*]] = sext i1 [[X_EQ_0]] to i8
+; CHECK-NEXT: [[V:%.*]] = or i8 [[X]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %x_eq_0 = icmp sle i8 %x, 0
+ %y = sext i1 %x_eq_0 to i8
+ %v = or i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_xor_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_xor_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = zext i1 %x_eq_0 to i8
+ %v = xor i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_xor_x_ne_0_fail(i8 %x) {
+; CHECK-LABEL: @src_x_xor_x_ne_0_fail(
+; CHECK-NEXT: [[X_NE_0:%.*]] = icmp ne i8 [[X:%.*]], 0
+; CHECK-NEXT: [[Y:%.*]] = zext i1 [[X_NE_0]] to i8
+; CHECK-NEXT: [[V:%.*]] = xor i8 [[X]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %x_ne_0 = icmp ne i8 %x, 0
+ %y = zext i1 %x_ne_0 to i8
+ %v = xor i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_sub0_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_sub0_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = sext i1 %x_eq_0 to i8
+ %v = sub i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_sub0_z_eq_0_fail(i8 %x, i8 %z) {
+; CHECK-LABEL: @src_x_sub0_z_eq_0_fail(
+; CHECK-NEXT: [[Z_EQ_0:%.*]] = icmp eq i8 [[Z:%.*]], 0
+; CHECK-NEXT: [[Y:%.*]] = sext i1 [[Z_EQ_0]] to i8
+; CHECK-NEXT: [[V:%.*]] = sub i8 [[X:%.*]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %z_eq_0 = icmp eq i8 %z, 0
+ %y = sext i1 %z_eq_0 to i8
+ %v = sub i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_sub1_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_sub1_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = zext i1 %x_eq_0 to i8
+ %v = sub i8 %y, %x
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_sub1_x_eq_0_or_fail(i8 %x, i1 %c1) {
+; CHECK-LABEL: @src_x_sub1_x_eq_0_or_fail(
+; CHECK-NEXT: [[X_EQ_0:%.*]] = icmp eq i8 [[X:%.*]], 0
+; CHECK-NEXT: [[X_EQ_0_OR:%.*]] = or i1 [[X_EQ_0]], [[C1:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = zext i1 [[X_EQ_0_OR]] to i8
+; CHECK-NEXT: [[V:%.*]] = sub i8 [[Y]], [[X]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %x_eq_0_or = or i1 %x_eq_0, %c1
+ %y = zext i1 %x_eq_0_or to i8
+ %v = sub i8 %y, %x
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_umax_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_umax_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = sext i1 %x_eq_0 to i8
+ %v = call i8 @llvm.umax.i8(i8 %y, i8 %x)
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_umax_x_ugt_10_fail(i8 %x) {
+; CHECK-LABEL: @src_x_umax_x_ugt_10_fail(
+; CHECK-NEXT: [[X_UGT_10:%.*]] = icmp ugt i8 [[X:%.*]], 10
+; CHECK-NEXT: [[Y:%.*]] = sext i1 [[X_UGT_10]] to i8
+; CHECK-NEXT: [[V:%.*]] = call i8 @llvm.umax.i8(i8 [[Y]], i8 [[X]])
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %x_ugt_10 = icmp ugt i8 %x, 10
+ %y = sext i1 %x_ugt_10 to i8
+ %v = call i8 @llvm.umax.i8(i8 %y, i8 %x)
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_uadd.sat_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_uadd.sat_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = zext i1 %x_eq_0 to i8
+ %v = call i8 @llvm.uadd.sat.i8(i8 %y, i8 %x)
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_uadd.sat_c1_fail(i8 %x, i1 %c1) {
+; CHECK-LABEL: @src_x_uadd.sat_c1_fail(
+; CHECK-NEXT: [[Y:%.*]] = zext i1 [[C1:%.*]] to i8
+; CHECK-NEXT: [[V:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[Y]], i8 [[X:%.*]])
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %y = zext i1 %c1 to i8
+ %v = call i8 @llvm.uadd.sat.i8(i8 %y, i8 %x)
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
diff --git a/llvm/test/Transforms/InstSimplify/past-the-end.ll b/llvm/test/Transforms/InstSimplify/past-the-end.ll
index 96339c1cdcf2..1e146d18327a 100644
--- a/llvm/test/Transforms/InstSimplify/past-the-end.ll
+++ b/llvm/test/Transforms/InstSimplify/past-the-end.ll
@@ -21,7 +21,7 @@ define zeroext i1 @no_offsets() {
define zeroext i1 @both_past_the_end() {
; CHECK-LABEL: @both_past_the_end(
-; CHECK-NEXT: ret i1 icmp eq (ptr getelementptr inbounds (i32, ptr @opte_a, i32 1), ptr getelementptr inbounds (i32, ptr @opte_b, i32 1))
+; CHECK-NEXT: ret i1 icmp eq (ptr getelementptr inbounds (i8, ptr @opte_a, i32 4), ptr getelementptr inbounds (i8, ptr @opte_b, i32 4))
;
%x = getelementptr i32, ptr @opte_a, i32 1
%y = getelementptr i32, ptr @opte_b, i32 1
@@ -35,7 +35,7 @@ define zeroext i1 @both_past_the_end() {
define zeroext i1 @just_one_past_the_end() {
; CHECK-LABEL: @just_one_past_the_end(
-; CHECK-NEXT: ret i1 icmp eq (ptr getelementptr inbounds (i32, ptr @opte_a, i32 1), ptr @opte_b)
+; CHECK-NEXT: ret i1 icmp eq (ptr getelementptr inbounds (i8, ptr @opte_a, i32 4), ptr @opte_b)
;
%x = getelementptr i32, ptr @opte_a, i32 1
%t = icmp eq ptr %x, @opte_b
diff --git a/llvm/test/Transforms/InstSimplify/shufflevector.ll b/llvm/test/Transforms/InstSimplify/shufflevector.ll
index 460e90aa31d9..201950516160 100644
--- a/llvm/test/Transforms/InstSimplify/shufflevector.ll
+++ b/llvm/test/Transforms/InstSimplify/shufflevector.ll
@@ -249,13 +249,13 @@ define <8 x i64> @PR30630(<8 x i64> %x) {
; ret <2 x float> zeroinitializer
define <2 x float> @PR32872(<2 x float> %x) {
; CHECK-LABEL: @PR32872(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x float> [[X:%.*]], <2 x float> zeroinitializer, <4 x i32> <i32 2, i32 2, i32 0, i32 1>
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> zeroinitializer, <4 x float> [[TMP1]], <2 x i32> <i32 4, i32 5>
-; CHECK-NEXT: ret <2 x float> [[TMP4]]
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <2 x float> [[X:%.*]], <2 x float> zeroinitializer, <4 x i32> <i32 2, i32 2, i32 0, i32 1>
+; CHECK-NEXT: [[SHUF2:%.*]] = shufflevector <4 x float> zeroinitializer, <4 x float> [[SHUF]], <2 x i32> <i32 4, i32 5>
+; CHECK-NEXT: ret <2 x float> [[SHUF2]]
;
- %tmp1 = shufflevector <2 x float> %x, <2 x float> zeroinitializer, <4 x i32> <i32 2, i32 2, i32 0, i32 1>
- %tmp4 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp1, <2 x i32> <i32 4, i32 5>
- ret <2 x float> %tmp4
+ %shuf = shufflevector <2 x float> %x, <2 x float> zeroinitializer, <4 x i32> <i32 2, i32 2, i32 0, i32 1>
+ %shuf2 = shufflevector <4 x float> zeroinitializer, <4 x float> %shuf, <2 x i32> <i32 4, i32 5>
+ ret <2 x float> %shuf2
}
define <5 x i8> @splat_inserted_constant(<4 x i8> %x) {
@@ -284,3 +284,56 @@ define <2 x i8> @splat_inserted_constant_not_canonical(<3 x i8> %x, <3 x i8> %y)
%splat2 = shufflevector <3 x i8> %y, <3 x i8> %ins2, <2 x i32> <i32 undef, i32 5>
ret <2 x i8> %splat2
}
+
+define <4 x i32> @fold_identity(<4 x i32> %x) {
+; CHECK-LABEL: @fold_identity(
+; CHECK-NEXT: ret <4 x i32> [[X:%.*]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %revshuf = shufflevector <4 x i32> %shuf, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %revshuf
+}
+
+define <4 x i32> @fold_identity2(<4 x i32> %x) {
+; CHECK-LABEL: @fold_identity2(
+; CHECK-NEXT: [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT: ret <4 x i32> [[SHL]]
+;
+ %shl = shl <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
+ %shuf = shufflevector <4 x i32> %shl, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %revshuf = shufflevector <4 x i32> %shuf, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %revshuf
+}
+
+define <4 x i32> @fold_identity3(<4 x i32> %x) {
+; CHECK-LABEL: @fold_identity3(
+; CHECK-NEXT: [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[X]]
+; CHECK-NEXT: ret <4 x i32> [[SHL]]
+;
+ %shl = shl <4 x i32> %x, %x
+ %shuf = shufflevector <4 x i32> %shl, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %revshuf = shufflevector <4 x i32> %shuf, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %revshuf
+}
+
+define <4 x i32> @not_fold_identity(<4 x i32> %x) {
+; CHECK-LABEL: @not_fold_identity(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[SHUF]], <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+; CHECK-NEXT: ret <4 x i32> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
+ %revshuf = shufflevector <4 x i32> %shuf, <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ ret <4 x i32> %revshuf
+}
+
+define <4 x i32> @not_fold_identity2(<4 x i32> %x) {
+; CHECK-LABEL: @not_fold_identity2(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[SHUF]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i32> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 1, i32 0>
+ %revshuf = shufflevector <4 x i32> %shuf, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %revshuf
+}
diff --git a/llvm/test/Transforms/InterleavedAccess/AArch64/fixed-deinterleave-intrinsics.ll b/llvm/test/Transforms/InterleavedAccess/AArch64/fixed-deinterleave-intrinsics.ll
index 54348d1e2a48..24d624c221f4 100644
--- a/llvm/test/Transforms/InterleavedAccess/AArch64/fixed-deinterleave-intrinsics.ll
+++ b/llvm/test/Transforms/InterleavedAccess/AArch64/fixed-deinterleave-intrinsics.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
; RUN: opt < %s -interleaved-access -S | FileCheck %s --check-prefix=NEON
-; RUN: opt < %s -interleaved-access -mtriple=aarch64-linux-gnu -mattr=+sve -force-streaming-compatible-sve -S | FileCheck %s --check-prefix=SVE-FIXED
+; RUN: opt < %s -interleaved-access -mtriple=aarch64-linux-gnu -mattr=+sve -force-streaming-compatible -S | FileCheck %s --check-prefix=SVE-FIXED
; RUN: opt < %s -passes=interleaved-access -S | FileCheck %s --check-prefix=NEON
-; RUN: opt < %s -passes=interleaved-access -mtriple=aarch64-linux-gnu -mattr=+sve -force-streaming-compatible-sve -S | FileCheck %s --check-prefix=SVE-FIXED
+; RUN: opt < %s -passes=interleaved-access -mtriple=aarch64-linux-gnu -mattr=+sve -force-streaming-compatible -S | FileCheck %s --check-prefix=SVE-FIXED
target triple = "aarch64-linux-gnu"
diff --git a/llvm/test/Transforms/LICM/scalar-promote-unwind.ll b/llvm/test/Transforms/LICM/scalar-promote-unwind.ll
index be11722d2d56..f7829c4d6e4d 100644
--- a/llvm/test/Transforms/LICM/scalar-promote-unwind.ll
+++ b/llvm/test/Transforms/LICM/scalar-promote-unwind.ll
@@ -304,7 +304,7 @@ define void @loop_within_tryblock() personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP0]], 1
; CHECK-NEXT: br label [[CATCH_DISPATCH:%.*]]
; CHECK: catch.dispatch:
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
; CHECK-NEXT: [[MATCHES:%.*]] = icmp eq i32 [[TMP2]], [[TMP3]]
; CHECK-NEXT: br i1 [[MATCHES]], label [[CATCH:%.*]], label [[EH_RESUME:%.*]]
; CHECK: catch:
@@ -355,7 +355,7 @@ lpad:
br label %catch.dispatch
catch.dispatch:
- %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #3
+ %4 = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi) #3
%matches = icmp eq i32 %3, %4
br i1 %matches, label %catch, label %eh.resume
@@ -564,6 +564,6 @@ declare ptr @__cxa_begin_catch(ptr)
declare void @__cxa_end_catch()
-declare i32 @llvm.eh.typeid.for(ptr)
+declare i32 @llvm.eh.typeid.for.p0(ptr)
declare void @f() uwtable
diff --git a/llvm/test/Transforms/LoopInterchange/pr43176-move-to-new-latch.ll b/llvm/test/Transforms/LoopInterchange/pr43176-move-to-new-latch.ll
index 965d95110da4..cc787fa55600 100644
--- a/llvm/test/Transforms/LoopInterchange/pr43176-move-to-new-latch.ll
+++ b/llvm/test/Transforms/LoopInterchange/pr43176-move-to-new-latch.ll
@@ -1,42 +1,25 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=loop-interchange -cache-line-size=64 -verify-loop-lcssa -verify-dom-info -S %s | FileCheck %s
+; RUN: opt < %s -passes=loop-interchange -cache-line-size=64 -pass-remarks-missed='loop-interchange' -pass-remarks-output=%t -S
+; RUN: FileCheck --input-file=%t %s
@b = external dso_local global [5 x i32], align 16
+;; Not profitable to interchange, because the access is invariant to j loop.
+;;
+;; for(int i=0;i<4;i++) {
+;; for(int j=1;j<4;j++) {
+;; b[i] = ....
+;; }
+;; }
+
+; CHECK: --- !Missed
+; CHECK-NEXT: Pass: loop-interchange
+; CHECK-NEXT: Name: InterchangeNotProfitable
+; CHECK-NEXT: Function: test1
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - String: Interchanging loops is not considered to improve cache locality nor vectorization.
+
define void @test1() {
-; CHECK-LABEL: @test1(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br label [[FOR_BODY2_PREHEADER:%.*]]
-; CHECK: for.body.preheader:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[INC41:%.*]] = phi i32 [ [[INC4:%.*]], [[FOR_INC3:%.*]] ], [ undef, [[FOR_BODY_PREHEADER:%.*]] ]
-; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[INC41]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x i32], ptr @b, i64 0, i64 [[IDXPROM]]
-; CHECK-NEXT: br label [[FOR_INC:%.*]]
-; CHECK: for.body2.preheader:
-; CHECK-NEXT: br label [[FOR_BODY2:%.*]]
-; CHECK: for.body2:
-; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[TMP1:%.*]], [[FOR_INC_SPLIT:%.*]] ], [ 1, [[FOR_BODY2_PREHEADER]] ]
-; CHECK-NEXT: br label [[FOR_BODY_PREHEADER]]
-; CHECK: for.inc:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: store i32 undef, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[LSR_IV]], 4
-; CHECK-NEXT: [[LSR_IV_NEXT:%.*]] = add nuw nsw i32 [[LSR_IV]], 1
-; CHECK-NEXT: br label [[FOR_COND1_FOR_END_CRIT_EDGE:%.*]]
-; CHECK: for.inc.split:
-; CHECK-NEXT: [[TMP1]] = add nuw nsw i32 [[LSR_IV]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[LSR_IV]], 4
-; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY2]], label [[FOR_COND_FOR_END5_CRIT_EDGE:%.*]]
-; CHECK: for.cond1.for.end_crit_edge:
-; CHECK-NEXT: br label [[FOR_INC3]]
-; CHECK: for.inc3:
-; CHECK-NEXT: [[INC4]] = add nsw i32 [[INC41]], 1
-; CHECK-NEXT: br i1 false, label [[FOR_BODY]], label [[FOR_INC_SPLIT]]
-; CHECK: for.cond.for.end5_crit_edge:
-; CHECK-NEXT: ret void
-;
entry:
br label %for.body
@@ -68,41 +51,15 @@ for.cond.for.end5_crit_edge: ; preds = %for.inc3
ret void
}
+
+; CHECK: --- !Missed
+; CHECK-NEXT: Pass: loop-interchange
+; CHECK-NEXT: Name: InterchangeNotProfitable
+; CHECK-NEXT: Function: test2
+; CHECK-NEXT: Args:
+; CHECK-NEXT: - String: Interchanging loops is not considered to improve cache locality nor vectorization.
+
define void @test2() {
-; CHECK-LABEL: @test2(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br label [[FOR_BODY2_PREHEADER:%.*]]
-; CHECK: for.body.preheader:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[INC41:%.*]] = phi i32 [ [[INC4:%.*]], [[FOR_INC3:%.*]] ], [ undef, [[FOR_BODY_PREHEADER:%.*]] ]
-; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[INC41]] to i64
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x i32], ptr @b, i64 0, i64 [[IDXPROM]]
-; CHECK-NEXT: br label [[FOR_INC:%.*]]
-; CHECK: for.body2.preheader:
-; CHECK-NEXT: br label [[FOR_BODY2:%.*]]
-; CHECK: for.body2:
-; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[TMP1:%.*]], [[FOR_INC_SPLIT:%.*]] ], [ 1, [[FOR_BODY2_PREHEADER]] ]
-; CHECK-NEXT: br label [[FOR_BODY_PREHEADER]]
-; CHECK: for.inc:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[LSR_IV]], 4
-; CHECK-NEXT: [[CMP_ZEXT:%.*]] = zext i1 [[CMP]] to i32
-; CHECK-NEXT: store i32 [[CMP_ZEXT]], ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[LSR_IV_NEXT:%.*]] = add nuw nsw i32 [[LSR_IV]], 1
-; CHECK-NEXT: br label [[FOR_COND1_FOR_END_CRIT_EDGE:%.*]]
-; CHECK: for.inc.split:
-; CHECK-NEXT: [[TMP1]] = add nuw nsw i32 [[LSR_IV]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[LSR_IV]], 4
-; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY2]], label [[FOR_COND_FOR_END5_CRIT_EDGE:%.*]]
-; CHECK: for.cond1.for.end_crit_edge:
-; CHECK-NEXT: br label [[FOR_INC3]]
-; CHECK: for.inc3:
-; CHECK-NEXT: [[INC4]] = add nsw i32 [[INC41]], 1
-; CHECK-NEXT: br i1 false, label [[FOR_BODY]], label [[FOR_INC_SPLIT]]
-; CHECK: for.cond.for.end5_crit_edge:
-; CHECK-NEXT: ret void
-;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll b/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
index 552cd8803732..616e3ae1b036 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
@@ -16,7 +16,7 @@ define void @vb() nounwind {
; CHECK-NEXT: for.cond.preheader:
; CHECK-NEXT: br label [[FOR_BODY7:%.*]]
; CHECK: for.body7:
-; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[FOR_BODY7]] ], [ getelementptr inbounds ([121 x i32], ptr @b, i32 0, i32 1), [[FOR_COND_PREHEADER:%.*]] ]
+; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[FOR_BODY7]] ], [ getelementptr inbounds (i8, ptr @b, i32 4), [[FOR_COND_PREHEADER:%.*]] ]
; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY7]] ], [ 8, [[FOR_COND_PREHEADER]] ]
; CHECK-NEXT: [[INDVARS_IV77:%.*]] = phi i32 [ [[INDVARS_IV_NEXT78:%.*]], [[FOR_BODY7]] ], [ 1, [[FOR_COND_PREHEADER]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT78]] = add i32 [[INDVARS_IV77]], 1
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
index 7fef404eaf14..c4aa6c7725d4 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
@@ -10,23 +10,23 @@ define i32 @test(ptr %base) nounwind uwtable ssp {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[WHILE_BODY_LR_PH_I:%.*]]
; CHECK: while.body.lr.ph.i:
-; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 16
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 16
; CHECK-NEXT: br label [[WHILE_BODY_I:%.*]]
; CHECK: while.body.i:
; CHECK-NEXT: [[INDVARS_IV7_I:%.*]] = phi i64 [ 16, [[WHILE_BODY_LR_PH_I]] ], [ [[INDVARS_IV_NEXT8_I:%.*]], [[COND_TRUE29_I:%.*]] ]
; CHECK-NEXT: [[I_05_I:%.*]] = phi i64 [ 0, [[WHILE_BODY_LR_PH_I]] ], [ [[INDVARS_IV7_I]], [[COND_TRUE29_I]] ]
; CHECK-NEXT: [[LSR4:%.*]] = trunc i64 [[I_05_I]] to i32
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[LSR4]] to i64
-; CHECK-NEXT: [[UGLYGEP1:%.*]] = getelementptr i8, ptr [[UGLYGEP]], i64 [[TMP0]]
+; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP0]]
; CHECK-NEXT: [[SEXT_I:%.*]] = shl i64 [[I_05_I]], 32
; CHECK-NEXT: [[IDX_EXT_I:%.*]] = ashr exact i64 [[SEXT_I]], 32
; CHECK-NEXT: [[ADD_PTR_SUM_I:%.*]] = add i64 [[IDX_EXT_I]], 16
; CHECK-NEXT: br label [[FOR_BODY_I:%.*]]
; CHECK: for.body.i:
-; CHECK-NEXT: [[LSR_IV2:%.*]] = phi ptr [ [[UGLYGEP3:%.*]], [[FOR_BODY_I]] ], [ [[UGLYGEP1]], [[WHILE_BODY_I]] ]
+; CHECK-NEXT: [[LSR_IV2:%.*]] = phi ptr [ [[SCEVGEP3:%.*]], [[FOR_BODY_I]] ], [ [[SCEVGEP1]], [[WHILE_BODY_I]] ]
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[LSR_IV2]], align 1
; CHECK-NEXT: [[CMP:%.*]] = call i1 @check() #[[ATTR3:[0-9]+]]
-; CHECK-NEXT: [[UGLYGEP3]] = getelementptr i8, ptr [[LSR_IV2]], i64 1
+; CHECK-NEXT: [[SCEVGEP3]] = getelementptr i8, ptr [[LSR_IV2]], i64 1
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_END_I:%.*]], label [[FOR_BODY_I]]
; CHECK: for.end.i:
; CHECK-NEXT: [[ADD_PTR_I144:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD_PTR_SUM_I]]
@@ -96,18 +96,18 @@ define void @test2(i32 %n) nounwind uwtable {
; CHECK-NEXT: br label [[FOR_COND468:%.*]]
; CHECK: for.cond468:
; CHECK-NEXT: [[LSR_IV1:%.*]] = phi i32 [ 1, [[FOR_COND468_PREHEADER]] ], [ [[LSR_IV_NEXT:%.*]], [[IF_THEN477:%.*]] ]
-; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ getelementptr inbounds ([5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], ptr @tags, i64 0, i64 0, i32 2), [[FOR_COND468_PREHEADER]] ], [ [[UGLYGEP:%.*]], [[IF_THEN477]] ]
+; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ getelementptr inbounds (i8, ptr @tags, i64 8), [[FOR_COND468_PREHEADER]] ], [ [[SCEVGEP:%.*]], [[IF_THEN477]] ]
; CHECK-NEXT: [[K_0:%.*]] = load i32, ptr [[LSR_IV]], align 4
; CHECK-NEXT: [[CMP469:%.*]] = icmp slt i32 [[LSR_IV1]], [[N:%.*]]
; CHECK-NEXT: br i1 [[CMP469]], label [[FOR_BODY471:%.*]], label [[FOR_INC498_PREHEADER:%.*]]
; CHECK: for.body471:
-; CHECK-NEXT: [[UGLYGEP2:%.*]] = getelementptr i8, ptr [[LSR_IV]], i64 8
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[UGLYGEP2]], align 4
+; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[LSR_IV]], i64 8
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[SCEVGEP2]], align 4
; CHECK-NEXT: br i1 false, label [[IF_THEN477]], label [[FOR_INC498_PREHEADER]]
; CHECK: for.inc498.preheader:
; CHECK-NEXT: br label [[FOR_INC498:%.*]]
; CHECK: if.then477:
-; CHECK-NEXT: [[UGLYGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 12
+; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 12
; CHECK-NEXT: [[LSR_IV_NEXT]] = add nuw nsw i32 [[LSR_IV1]], 1
; CHECK-NEXT: br label [[FOR_COND468]]
; CHECK: for.inc498:
@@ -162,8 +162,8 @@ define fastcc void @test3(ptr nocapture %u) nounwind uwtable ssp {
; CHECK-NEXT: [[TMP:%.*]] = trunc i64 [[TMP0]] to i32
; CHECK-NEXT: [[MUL_I_US_I:%.*]] = mul nsw i32 0, [[TMP]]
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[INDVARS_IV_I_SV_PHI]], 3
-; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[U:%.*]], i64 [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[UGLYGEP]], align 8
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[U:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP]], align 8
; CHECK-NEXT: br i1 undef, label [[FOR_INC8_US_I:%.*]], label [[MESHBB]]
; CHECK: for.body3.lr.ph.us.i.loopexit:
; CHECK-NEXT: [[LSR_IV_NEXT:%.*]] = add i64 [[LSR_IV]], 1
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/pr40514.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/pr40514.ll
index 03b1aece9e87..a6bff63dfc71 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/pr40514.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/pr40514.ll
@@ -54,4 +54,4 @@ bb10: ; preds = %bb10, %bb
}
-attributes #0 = { "target-cpu"="broadwell" "target-features"="+sse2,+cx16,+sahf,-tbm,-avx512ifma,-sha,-gfni,-fma4,-vpclmulqdq,+prfchw,+bmi2,-cldemote,+fsgsbase,-ptwrite,-xsavec,+popcnt,+aes,-avx512bitalg,-movdiri,-xsaves,-avx512er,-avx512vnni,-avx512vpopcntdq,-pconfig,-clwb,-avx512f,-clzero,-pku,+mmx,-lwp,-rdpid,-xop,+rdseed,-waitpkg,-movdir64b,-sse4a,-avx512bw,-clflushopt,+xsave,-avx512vbmi2,+64bit,-avx512vl,+invpcid,-avx512cd,+avx,-vaes,+rtm,+fma,+bmi,+rdrnd,-mwaitx,+sse4.1,+sse4.2,+avx2,-wbnoinvd,+sse,+lzcnt,+pclmul,-prefetchwt1,+f16c,+ssse3,-sgx,-shstk,+cmov,-avx512vbmi,+movbe,+xsaveopt,-avx512dq,+adx,-avx512pf,+sse3" }
+attributes #0 = { "target-cpu"="broadwell" "target-features"="+sse2,+cx16,+sahf,-tbm,-avx512ifma,-sha,-gfni,-fma4,-vpclmulqdq,+prfchw,+bmi2,-cldemote,+fsgsbase,-ptwrite,-xsavec,+popcnt,+aes,-avx512bitalg,-movdiri,-xsaves,-avx512vnni,-avx512vpopcntdq,-pconfig,-clwb,-avx512f,-clzero,-pku,+mmx,-lwp,-rdpid,-xop,+rdseed,-waitpkg,-movdir64b,-sse4a,-avx512bw,-clflushopt,+xsave,-avx512vbmi2,+64bit,-avx512vl,+invpcid,-avx512cd,+avx,-vaes,+rtm,+fma,+bmi,+rdrnd,-mwaitx,+sse4.1,+sse4.2,+avx2,-wbnoinvd,+sse,+lzcnt,+pclmul,+f16c,+ssse3,-sgx,-shstk,+cmov,-avx512vbmi,+movbe,+xsaveopt,-avx512dq,+adx,-avx512pf,+sse3" }
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
index 14b5ee244080..a74b0b441771 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
@@ -333,103 +333,9 @@ define void @invar_cond_chain_1(ptr %I, ptr noalias %src, i1 %c) {
; DEFAULT-LABEL: define void @invar_cond_chain_1(
; DEFAULT-SAME: ptr [[I:%.*]], ptr noalias [[SRC:%.*]], i1 [[C:%.*]]) {
; DEFAULT-NEXT: entry:
-; DEFAULT-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64
-; DEFAULT-NEXT: [[I1:%.*]] = ptrtoint ptr [[I]] to i64
-; DEFAULT-NEXT: [[TMP29:%.*]] = sub i64 [[I1]], [[SRC2]]
-; DEFAULT-NEXT: [[TMP0:%.*]] = lshr i64 [[TMP29]], 2
-; DEFAULT-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
-; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 8
-; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; DEFAULT: vector.ph:
-; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 8
-; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
-; DEFAULT-NEXT: [[TMP2:%.*]] = mul i64 [[N_VEC]], 4
-; DEFAULT-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP2]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[C]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
-; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]]
-; DEFAULT: vector.body:
-; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE17:%.*]] ]
-; DEFAULT-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4
-; DEFAULT-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 0
-; DEFAULT-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 16
-; DEFAULT-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]]
-; DEFAULT-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP4]]
-; DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 0
-; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 4
-; DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4
-; DEFAULT-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4
-; DEFAULT-NEXT: [[TMP7:%.*]] = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[BROADCAST_SPLAT]], <4 x i1> zeroinitializer
-; DEFAULT-NEXT: [[TMP8:%.*]] = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[BROADCAST_SPLAT]], <4 x i1> zeroinitializer
-; DEFAULT-NEXT: [[TMP9:%.*]] = or <4 x i1> [[TMP7]], zeroinitializer
-; DEFAULT-NEXT: [[TMP10:%.*]] = or <4 x i1> [[TMP8]], zeroinitializer
-; DEFAULT-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP9]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
-; DEFAULT: pred.store.if:
-; DEFAULT-NEXT: [[TMP12:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP12]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE]]
-; DEFAULT: pred.store.continue:
-; DEFAULT-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP9]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]]
-; DEFAULT: pred.store.if5:
-; DEFAULT-NEXT: [[TMP14:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 1
-; DEFAULT-NEXT: store i32 [[TMP14]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE5]]
-; DEFAULT: pred.store.continue6:
-; DEFAULT-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP9]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]]
-; DEFAULT: pred.store.if7:
-; DEFAULT-NEXT: [[TMP16:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
-; DEFAULT-NEXT: store i32 [[TMP16]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE7]]
-; DEFAULT: pred.store.continue8:
-; DEFAULT-NEXT: [[TMP17:%.*]] = extractelement <4 x i1> [[TMP9]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP17]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9:%.*]]
-; DEFAULT: pred.store.if9:
-; DEFAULT-NEXT: [[TMP18:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
-; DEFAULT-NEXT: store i32 [[TMP18]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE9]]
-; DEFAULT: pred.store.continue10:
-; DEFAULT-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP10]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF10:%.*]], label [[PRED_STORE_CONTINUE11:%.*]]
-; DEFAULT: pred.store.if11:
-; DEFAULT-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[WIDE_LOAD3]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP20]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE11]]
-; DEFAULT: pred.store.continue12:
-; DEFAULT-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP10]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP21]], label [[PRED_STORE_IF12:%.*]], label [[PRED_STORE_CONTINUE13:%.*]]
-; DEFAULT: pred.store.if13:
-; DEFAULT-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[WIDE_LOAD3]], i32 1
-; DEFAULT-NEXT: store i32 [[TMP22]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE13]]
-; DEFAULT: pred.store.continue14:
-; DEFAULT-NEXT: [[TMP23:%.*]] = extractelement <4 x i1> [[TMP10]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP23]], label [[PRED_STORE_IF14:%.*]], label [[PRED_STORE_CONTINUE15:%.*]]
-; DEFAULT: pred.store.if15:
-; DEFAULT-NEXT: [[TMP24:%.*]] = extractelement <4 x i32> [[WIDE_LOAD3]], i32 2
-; DEFAULT-NEXT: store i32 [[TMP24]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE15]]
-; DEFAULT: pred.store.continue16:
-; DEFAULT-NEXT: [[TMP25:%.*]] = extractelement <4 x i1> [[TMP10]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP25]], label [[PRED_STORE_IF16:%.*]], label [[PRED_STORE_CONTINUE17]]
-; DEFAULT: pred.store.if17:
-; DEFAULT-NEXT: [[TMP26:%.*]] = extractelement <4 x i32> [[WIDE_LOAD3]], i32 3
-; DEFAULT-NEXT: store i32 [[TMP26]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE17]]
-; DEFAULT: pred.store.continue18:
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
-; DEFAULT-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; DEFAULT-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
-; DEFAULT: middle.block:
-; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
-; DEFAULT-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP312_LOOPEXIT:%.*]], label [[SCALAR_PH]]
-; DEFAULT: scalar.ph:
-; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[SRC]], [[ENTRY:%.*]] ]
; DEFAULT-NEXT: br label [[FOR_BODY313:%.*]]
; DEFAULT: loop.header:
-; DEFAULT-NEXT: [[__BEGIN3_011973:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
+; DEFAULT-NEXT: [[__BEGIN3_011973:%.*]] = phi ptr [ [[SRC]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
; DEFAULT-NEXT: [[TMP28:%.*]] = load i32, ptr [[__BEGIN3_011973]], align 4
; DEFAULT-NEXT: br i1 true, label [[IF_ELSE321:%.*]], label [[IF_THEN316:%.*]]
; DEFAULT: if:
@@ -444,7 +350,7 @@ define void @invar_cond_chain_1(ptr %I, ptr noalias %src, i1 %c) {
; DEFAULT: loop.latch:
; DEFAULT-NEXT: [[INCDEC_PTR329]] = getelementptr inbounds i8, ptr [[__BEGIN3_011973]], i64 4
; DEFAULT-NEXT: [[CMP311_NOT:%.*]] = icmp eq ptr [[__BEGIN3_011973]], [[I]]
-; DEFAULT-NEXT: br i1 [[CMP311_NOT]], label [[FOR_COND_CLEANUP312_LOOPEXIT]], label [[FOR_BODY313]], !llvm.loop [[LOOP7:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[CMP311_NOT]], label [[EXIT:%.*]], label [[FOR_BODY313]]
; DEFAULT: exit:
; DEFAULT-NEXT: ret void
;
@@ -506,86 +412,9 @@ define void @invar_cond_chain_2(ptr %I, ptr noalias %src, ptr noalias %dst, i32
; DEFAULT-LABEL: define void @invar_cond_chain_2(
; DEFAULT-SAME: ptr [[I:%.*]], ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[A:%.*]]) {
; DEFAULT-NEXT: entry:
-; DEFAULT-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64
-; DEFAULT-NEXT: [[I1:%.*]] = ptrtoint ptr [[I]] to i64
-; DEFAULT-NEXT: [[TMP0:%.*]] = sub i64 [[I1]], [[SRC2]]
-; DEFAULT-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 2
-; DEFAULT-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 8
-; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; DEFAULT: vector.ph:
-; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 8
-; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
-; DEFAULT-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], 4
-; DEFAULT-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[A]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]]
-; DEFAULT: vector.body:
-; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE15:%.*]] ]
-; DEFAULT-NEXT: [[TMP4:%.*]] = icmp sgt <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer
-; DEFAULT-NEXT: [[TMP5:%.*]] = icmp sgt <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer
-; DEFAULT-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP4]], <i1 true, i1 true, i1 true, i1 true>
-; DEFAULT-NEXT: [[TMP7:%.*]] = xor <4 x i1> [[TMP5]], <i1 true, i1 true, i1 true, i1 true>
-; DEFAULT-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP6]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
-; DEFAULT: pred.store.if:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE]]
-; DEFAULT: pred.store.continue:
-; DEFAULT-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP6]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3:%.*]]
-; DEFAULT: pred.store.if3:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE3]]
-; DEFAULT: pred.store.continue4:
-; DEFAULT-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP6]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]]
-; DEFAULT: pred.store.if5:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE5]]
-; DEFAULT: pred.store.continue6:
-; DEFAULT-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP6]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]]
-; DEFAULT: pred.store.if7:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE7]]
-; DEFAULT: pred.store.continue8:
-; DEFAULT-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP7]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9:%.*]]
-; DEFAULT: pred.store.if9:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE9]]
-; DEFAULT: pred.store.continue10:
-; DEFAULT-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP7]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF10:%.*]], label [[PRED_STORE_CONTINUE11:%.*]]
-; DEFAULT: pred.store.if11:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE11]]
-; DEFAULT: pred.store.continue12:
-; DEFAULT-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP7]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF12:%.*]], label [[PRED_STORE_CONTINUE13:%.*]]
-; DEFAULT: pred.store.if13:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE13]]
-; DEFAULT: pred.store.continue14:
-; DEFAULT-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP7]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF14:%.*]], label [[PRED_STORE_CONTINUE15]]
-; DEFAULT: pred.store.if15:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE15]]
-; DEFAULT: pred.store.continue16:
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
-; DEFAULT-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; DEFAULT-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
-; DEFAULT: middle.block:
-; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
-; DEFAULT-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP312_LOOPEXIT:%.*]], label [[SCALAR_PH]]
-; DEFAULT: scalar.ph:
-; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[SRC]], [[ENTRY:%.*]] ]
; DEFAULT-NEXT: br label [[FOR_BODY313:%.*]]
; DEFAULT: loop.header:
-; DEFAULT-NEXT: [[__BEGIN3_01197:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
+; DEFAULT-NEXT: [[__BEGIN3_01197:%.*]] = phi ptr [ [[SRC]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
; DEFAULT-NEXT: [[CMP315_NOT:%.*]] = icmp sgt i32 [[A]], 0
; DEFAULT-NEXT: br i1 [[CMP315_NOT]], label [[IF_END327]], label [[IF_THEN316:%.*]]
; DEFAULT: if:
@@ -596,74 +425,16 @@ define void @invar_cond_chain_2(ptr %I, ptr noalias %src, ptr noalias %dst, i32
; DEFAULT: loop.latch:
; DEFAULT-NEXT: [[INCDEC_PTR329]] = getelementptr inbounds i8, ptr [[__BEGIN3_01197]], i64 4
; DEFAULT-NEXT: [[CMP311_NOT:%.*]] = icmp eq ptr [[__BEGIN3_01197]], [[I]]
-; DEFAULT-NEXT: br i1 [[CMP311_NOT]], label [[FOR_COND_CLEANUP312_LOOPEXIT]], label [[FOR_BODY313]], !llvm.loop [[LOOP9:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[CMP311_NOT]], label [[EXIT:%.*]], label [[FOR_BODY313]]
; DEFAULT: exit:
; DEFAULT-NEXT: ret void
;
; PRED-LABEL: define void @invar_cond_chain_2(
; PRED-SAME: ptr [[I:%.*]], ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[A:%.*]]) {
; PRED-NEXT: entry:
-; PRED-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64
-; PRED-NEXT: [[I1:%.*]] = ptrtoint ptr [[I]] to i64
-; PRED-NEXT: [[TMP0:%.*]] = sub i64 [[I1]], [[SRC2]]
-; PRED-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 2
-; PRED-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; PRED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; PRED: vector.ph:
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP2]], 3
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; PRED-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], 4
-; PRED-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]]
-; PRED-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP2]], 1
-; PRED-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT2]], <4 x i64> poison, <4 x i32> zeroinitializer
-; PRED-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <4 x i32> poison, i32 [[A]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT4]], <4 x i32> poison, <4 x i32> zeroinitializer
-; PRED-NEXT: br label [[VECTOR_BODY:%.*]]
-; PRED: vector.body:
-; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE11:%.*]] ]
-; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
-; PRED-NEXT: [[VEC_IV:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3>
-; PRED-NEXT: [[TMP4:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT3]]
-; PRED-NEXT: [[TMP5:%.*]] = icmp sgt <4 x i32> [[BROADCAST_SPLAT5]], zeroinitializer
-; PRED-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], <i1 true, i1 true, i1 true, i1 true>
-; PRED-NEXT: [[TMP7:%.*]] = select <4 x i1> [[TMP4]], <4 x i1> [[TMP6]], <4 x i1> zeroinitializer
-; PRED-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP7]], i32 0
-; PRED-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
-; PRED: pred.store.if:
-; PRED-NEXT: store i32 0, ptr [[DST]], align 4
-; PRED-NEXT: br label [[PRED_STORE_CONTINUE]]
-; PRED: pred.store.continue:
-; PRED-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP7]], i32 1
-; PRED-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]]
-; PRED: pred.store.if7:
-; PRED-NEXT: store i32 0, ptr [[DST]], align 4
-; PRED-NEXT: br label [[PRED_STORE_CONTINUE7]]
-; PRED: pred.store.continue8:
-; PRED-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP7]], i32 2
-; PRED-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9:%.*]]
-; PRED: pred.store.if9:
-; PRED-NEXT: store i32 0, ptr [[DST]], align 4
-; PRED-NEXT: br label [[PRED_STORE_CONTINUE9]]
-; PRED: pred.store.continue10:
-; PRED-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP7]], i32 3
-; PRED-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF10:%.*]], label [[PRED_STORE_CONTINUE11]]
-; PRED: pred.store.if11:
-; PRED-NEXT: store i32 0, ptr [[DST]], align 4
-; PRED-NEXT: br label [[PRED_STORE_CONTINUE11]]
-; PRED: pred.store.continue12:
-; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
-; PRED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; PRED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
-; PRED: middle.block:
-; PRED-NEXT: br i1 true, label [[FOR_COND_CLEANUP312_LOOPEXIT:%.*]], label [[SCALAR_PH]]
-; PRED: scalar.ph:
-; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[SRC]], [[ENTRY:%.*]] ]
; PRED-NEXT: br label [[FOR_BODY313:%.*]]
; PRED: loop.header:
-; PRED-NEXT: [[__BEGIN3_01197:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
+; PRED-NEXT: [[__BEGIN3_01197:%.*]] = phi ptr [ [[SRC]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
; PRED-NEXT: [[CMP315_NOT:%.*]] = icmp sgt i32 [[A]], 0
; PRED-NEXT: br i1 [[CMP315_NOT]], label [[IF_END327]], label [[IF_THEN316:%.*]]
; PRED: if:
@@ -674,7 +445,7 @@ define void @invar_cond_chain_2(ptr %I, ptr noalias %src, ptr noalias %dst, i32
; PRED: loop.latch:
; PRED-NEXT: [[INCDEC_PTR329]] = getelementptr inbounds i8, ptr [[__BEGIN3_01197]], i64 4
; PRED-NEXT: [[CMP311_NOT:%.*]] = icmp eq ptr [[__BEGIN3_01197]], [[I]]
-; PRED-NEXT: br i1 [[CMP311_NOT]], label [[FOR_COND_CLEANUP312_LOOPEXIT]], label [[FOR_BODY313]], !llvm.loop [[LOOP5:![0-9]+]]
+; PRED-NEXT: br i1 [[CMP311_NOT]], label [[EXIT:%.*]], label [[FOR_BODY313]]
; PRED: exit:
; PRED-NEXT: ret void
;
@@ -723,7 +494,7 @@ define void @latch_branch_cost(ptr %dst) {
; DEFAULT-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP5]], align 1
; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; DEFAULT-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96
-; DEFAULT-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; DEFAULT: middle.block:
; DEFAULT-NEXT: br i1 false, label [[FOR_END:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; DEFAULT: vec.epilog.iter.check:
@@ -739,7 +510,7 @@ define void @latch_branch_cost(ptr %dst) {
; DEFAULT-NEXT: store <4 x i8> zeroinitializer, ptr [[TMP9]], align 1
; DEFAULT-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 4
; DEFAULT-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 100
-; DEFAULT-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; DEFAULT: vec.epilog.middle.block:
; DEFAULT-NEXT: br i1 true, label [[FOR_END]], label [[SCALAR_PH]]
; DEFAULT: vec.epilog.scalar.ph:
@@ -751,7 +522,7 @@ define void @latch_branch_cost(ptr %dst) {
; DEFAULT-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1
; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100
-; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; DEFAULT: exit:
; DEFAULT-NEXT: ret void
;
@@ -832,7 +603,7 @@ define void @latch_branch_cost(ptr %dst) {
; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8
; PRED-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
; PRED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], 104
-; PRED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; PRED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; PRED: middle.block:
; PRED-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; PRED: scalar.ph:
@@ -844,7 +615,7 @@ define void @latch_branch_cost(ptr %dst) {
; PRED-NEXT: store i8 0, ptr [[GEP]], align 1
; PRED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[IV]], 1
; PRED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100
-; PRED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; PRED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; PRED: exit:
; PRED-NEXT: ret void
;
@@ -863,6 +634,247 @@ exit:
ret void
}
+define i32 @header_mask_and_invariant_compare(ptr %A, ptr %B, ptr %C, ptr %D, ptr %E, i64 %N) "target-features"="+sve" {
+; DEFAULT-LABEL: define i32 @header_mask_and_invariant_compare(
+; DEFAULT-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], ptr [[E:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] {
+; DEFAULT-NEXT: entry:
+; DEFAULT-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
+; DEFAULT-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
+; DEFAULT-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4
+; DEFAULT-NEXT: [[TMP3:%.*]] = call i64 @llvm.umax.i64(i64 64, i64 [[TMP2]])
+; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP3]]
+; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; DEFAULT: vector.memcheck:
+; DEFAULT-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[E]], i64 4
+; DEFAULT-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 2
+; DEFAULT-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 4
+; DEFAULT-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[D]], i64 [[TMP5]]
+; DEFAULT-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A]], i64 4
+; DEFAULT-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[B]], i64 4
+; DEFAULT-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, ptr [[C]], i64 4
+; DEFAULT-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[E]], [[SCEVGEP1]]
+; DEFAULT-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[D]], [[SCEVGEP]]
+; DEFAULT-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; DEFAULT-NEXT: [[BOUND05:%.*]] = icmp ult ptr [[E]], [[SCEVGEP2]]
+; DEFAULT-NEXT: [[BOUND16:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
+; DEFAULT-NEXT: [[FOUND_CONFLICT7:%.*]] = and i1 [[BOUND05]], [[BOUND16]]
+; DEFAULT-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT7]]
+; DEFAULT-NEXT: [[BOUND08:%.*]] = icmp ult ptr [[E]], [[SCEVGEP3]]
+; DEFAULT-NEXT: [[BOUND19:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
+; DEFAULT-NEXT: [[FOUND_CONFLICT10:%.*]] = and i1 [[BOUND08]], [[BOUND19]]
+; DEFAULT-NEXT: [[CONFLICT_RDX11:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT10]]
+; DEFAULT-NEXT: [[BOUND012:%.*]] = icmp ult ptr [[E]], [[SCEVGEP4]]
+; DEFAULT-NEXT: [[BOUND113:%.*]] = icmp ult ptr [[C]], [[SCEVGEP]]
+; DEFAULT-NEXT: [[FOUND_CONFLICT14:%.*]] = and i1 [[BOUND012]], [[BOUND113]]
+; DEFAULT-NEXT: [[CONFLICT_RDX15:%.*]] = or i1 [[CONFLICT_RDX11]], [[FOUND_CONFLICT14]]
+; DEFAULT-NEXT: [[BOUND016:%.*]] = icmp ult ptr [[D]], [[SCEVGEP2]]
+; DEFAULT-NEXT: [[BOUND117:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; DEFAULT-NEXT: [[FOUND_CONFLICT18:%.*]] = and i1 [[BOUND016]], [[BOUND117]]
+; DEFAULT-NEXT: [[CONFLICT_RDX19:%.*]] = or i1 [[CONFLICT_RDX15]], [[FOUND_CONFLICT18]]
+; DEFAULT-NEXT: [[BOUND020:%.*]] = icmp ult ptr [[D]], [[SCEVGEP3]]
+; DEFAULT-NEXT: [[BOUND121:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
+; DEFAULT-NEXT: [[FOUND_CONFLICT22:%.*]] = and i1 [[BOUND020]], [[BOUND121]]
+; DEFAULT-NEXT: [[CONFLICT_RDX23:%.*]] = or i1 [[CONFLICT_RDX19]], [[FOUND_CONFLICT22]]
+; DEFAULT-NEXT: [[BOUND024:%.*]] = icmp ult ptr [[D]], [[SCEVGEP4]]
+; DEFAULT-NEXT: [[BOUND125:%.*]] = icmp ult ptr [[C]], [[SCEVGEP1]]
+; DEFAULT-NEXT: [[FOUND_CONFLICT26:%.*]] = and i1 [[BOUND024]], [[BOUND125]]
+; DEFAULT-NEXT: [[CONFLICT_RDX27:%.*]] = or i1 [[CONFLICT_RDX23]], [[FOUND_CONFLICT26]]
+; DEFAULT-NEXT: br i1 [[CONFLICT_RDX27]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; DEFAULT: vector.ph:
+; DEFAULT-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; DEFAULT-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4
+; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP7]]
+; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; DEFAULT-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; DEFAULT-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 4
+; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT32:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[E]], i64 0
+; DEFAULT-NEXT: [[BROADCAST_SPLAT33:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT32]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
+; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]]
+; DEFAULT: vector.body:
+; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; DEFAULT-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0
+; DEFAULT-NEXT: [[TMP11:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META9:![0-9]+]]
+; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT28:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0
+; DEFAULT-NEXT: [[BROADCAST_SPLAT29:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT28]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; DEFAULT-NEXT: [[TMP12:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META12:![0-9]+]]
+; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP12]], i64 0
+; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; DEFAULT-NEXT: [[TMP13:%.*]] = or <vscale x 4 x i32> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT29]]
+; DEFAULT-NEXT: [[TMP14:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META14:![0-9]+]]
+; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT30:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP14]], i64 0
+; DEFAULT-NEXT: [[BROADCAST_SPLAT31:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT30]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; DEFAULT-NEXT: [[TMP15:%.*]] = icmp ugt <vscale x 4 x i32> [[BROADCAST_SPLAT31]], [[TMP13]]
+; DEFAULT-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[D]], i64 [[TMP10]]
+; DEFAULT-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> [[BROADCAST_SPLAT33]], i32 4, <vscale x 4 x i1> [[TMP15]]), !alias.scope [[META16:![0-9]+]], !noalias [[META18:![0-9]+]]
+; DEFAULT-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[TMP16]], i32 0
+; DEFAULT-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> zeroinitializer, ptr [[TMP17]], i32 4, <vscale x 4 x i1> [[TMP15]]), !alias.scope [[META20:![0-9]+]], !noalias [[META21:![0-9]+]]
+; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
+; DEFAULT-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; DEFAULT-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; DEFAULT: middle.block:
+; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
+; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; DEFAULT: scalar.ph:
+; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; DEFAULT-NEXT: br label [[LOOP_HEADER:%.*]]
+; DEFAULT: loop.header:
+; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; DEFAULT-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4
+; DEFAULT-NEXT: [[L_B:%.*]] = load i32, ptr [[B]], align 4
+; DEFAULT-NEXT: [[OR:%.*]] = or i32 [[L_B]], [[L_A]]
+; DEFAULT-NEXT: [[L_C:%.*]] = load i32, ptr [[C]], align 4
+; DEFAULT-NEXT: [[C_0:%.*]] = icmp ugt i32 [[L_C]], [[OR]]
+; DEFAULT-NEXT: br i1 [[C_0]], label [[IF_THEN:%.*]], label [[LOOP_LATCH]]
+; DEFAULT: if.then:
+; DEFAULT-NEXT: [[GEP_D:%.*]] = getelementptr i32, ptr [[D]], i64 [[IV]]
+; DEFAULT-NEXT: store i32 [[OR]], ptr [[E]], align 4
+; DEFAULT-NEXT: store i32 0, ptr [[GEP_D]], align 4
+; DEFAULT-NEXT: br label [[LOOP_LATCH]]
+; DEFAULT: loop.latch:
+; DEFAULT-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; DEFAULT-NEXT: [[C_1:%.*]] = icmp eq i64 [[IV]], [[N]]
+; DEFAULT-NEXT: br i1 [[C_1]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP23:![0-9]+]]
+; DEFAULT: exit:
+; DEFAULT-NEXT: ret i32 0
+;
+; PRED-LABEL: define i32 @header_mask_and_invariant_compare(
+; PRED-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], ptr [[E:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] {
+; PRED-NEXT: entry:
+; PRED-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
+; PRED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; PRED: vector.memcheck:
+; PRED-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[E]], i64 4
+; PRED-NEXT: [[TMP1:%.*]] = shl i64 [[N]], 2
+; PRED-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 4
+; PRED-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[D]], i64 [[TMP2]]
+; PRED-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A]], i64 4
+; PRED-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[B]], i64 4
+; PRED-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, ptr [[C]], i64 4
+; PRED-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[E]], [[SCEVGEP1]]
+; PRED-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[D]], [[SCEVGEP]]
+; PRED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; PRED-NEXT: [[BOUND05:%.*]] = icmp ult ptr [[E]], [[SCEVGEP2]]
+; PRED-NEXT: [[BOUND16:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
+; PRED-NEXT: [[FOUND_CONFLICT7:%.*]] = and i1 [[BOUND05]], [[BOUND16]]
+; PRED-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT7]]
+; PRED-NEXT: [[BOUND08:%.*]] = icmp ult ptr [[E]], [[SCEVGEP3]]
+; PRED-NEXT: [[BOUND19:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
+; PRED-NEXT: [[FOUND_CONFLICT10:%.*]] = and i1 [[BOUND08]], [[BOUND19]]
+; PRED-NEXT: [[CONFLICT_RDX11:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT10]]
+; PRED-NEXT: [[BOUND012:%.*]] = icmp ult ptr [[E]], [[SCEVGEP4]]
+; PRED-NEXT: [[BOUND113:%.*]] = icmp ult ptr [[C]], [[SCEVGEP]]
+; PRED-NEXT: [[FOUND_CONFLICT14:%.*]] = and i1 [[BOUND012]], [[BOUND113]]
+; PRED-NEXT: [[CONFLICT_RDX15:%.*]] = or i1 [[CONFLICT_RDX11]], [[FOUND_CONFLICT14]]
+; PRED-NEXT: [[BOUND016:%.*]] = icmp ult ptr [[D]], [[SCEVGEP2]]
+; PRED-NEXT: [[BOUND117:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; PRED-NEXT: [[FOUND_CONFLICT18:%.*]] = and i1 [[BOUND016]], [[BOUND117]]
+; PRED-NEXT: [[CONFLICT_RDX19:%.*]] = or i1 [[CONFLICT_RDX15]], [[FOUND_CONFLICT18]]
+; PRED-NEXT: [[BOUND020:%.*]] = icmp ult ptr [[D]], [[SCEVGEP3]]
+; PRED-NEXT: [[BOUND121:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
+; PRED-NEXT: [[FOUND_CONFLICT22:%.*]] = and i1 [[BOUND020]], [[BOUND121]]
+; PRED-NEXT: [[CONFLICT_RDX23:%.*]] = or i1 [[CONFLICT_RDX19]], [[FOUND_CONFLICT22]]
+; PRED-NEXT: [[BOUND024:%.*]] = icmp ult ptr [[D]], [[SCEVGEP4]]
+; PRED-NEXT: [[BOUND125:%.*]] = icmp ult ptr [[C]], [[SCEVGEP1]]
+; PRED-NEXT: [[FOUND_CONFLICT26:%.*]] = and i1 [[BOUND024]], [[BOUND125]]
+; PRED-NEXT: [[CONFLICT_RDX27:%.*]] = or i1 [[CONFLICT_RDX23]], [[FOUND_CONFLICT26]]
+; PRED-NEXT: br i1 [[CONFLICT_RDX27]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; PRED: vector.ph:
+; PRED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; PRED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
+; PRED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; PRED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4
+; PRED-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
+; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP7]]
+; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; PRED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; PRED-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 4
+; PRED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; PRED-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4
+; PRED-NEXT: [[TMP12:%.*]] = sub i64 [[TMP0]], [[TMP11]]
+; PRED-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[TMP0]], [[TMP11]]
+; PRED-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
+; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[TMP0]])
+; PRED-NEXT: [[BROADCAST_SPLATINSERT32:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[E]], i64 0
+; PRED-NEXT: [[BROADCAST_SPLAT33:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT32]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
+; PRED-NEXT: br label [[VECTOR_BODY:%.*]]
+; PRED: vector.body:
+; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
+; PRED-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 0
+; PRED-NEXT: [[TMP16:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META6:![0-9]+]]
+; PRED-NEXT: [[BROADCAST_SPLATINSERT28:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP16]], i64 0
+; PRED-NEXT: [[BROADCAST_SPLAT29:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT28]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; PRED-NEXT: [[TMP17:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META9:![0-9]+]]
+; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP17]], i64 0
+; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; PRED-NEXT: [[TMP18:%.*]] = or <vscale x 4 x i32> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT29]]
+; PRED-NEXT: [[TMP19:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META11:![0-9]+]]
+; PRED-NEXT: [[BROADCAST_SPLATINSERT30:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP19]], i64 0
+; PRED-NEXT: [[BROADCAST_SPLAT31:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT30]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; PRED-NEXT: [[TMP20:%.*]] = icmp ugt <vscale x 4 x i32> [[BROADCAST_SPLAT31]], [[TMP18]]
+; PRED-NEXT: [[TMP21:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP20]], <vscale x 4 x i1> zeroinitializer
+; PRED-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[D]], i64 [[TMP15]]
+; PRED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP18]], <vscale x 4 x ptr> [[BROADCAST_SPLAT33]], i32 4, <vscale x 4 x i1> [[TMP21]]), !alias.scope [[META13:![0-9]+]], !noalias [[META15:![0-9]+]]
+; PRED-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[TMP22]], i32 0
+; PRED-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> zeroinitializer, ptr [[TMP23]], i32 4, <vscale x 4 x i1> [[TMP21]]), !alias.scope [[META17:![0-9]+]], !noalias [[META18:![0-9]+]]
+; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]]
+; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP14]])
+; PRED-NEXT: [[TMP24:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
+; PRED-NEXT: [[TMP25:%.*]] = extractelement <vscale x 4 x i1> [[TMP24]], i32 0
+; PRED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; PRED: middle.block:
+; PRED-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; PRED: scalar.ph:
+; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; PRED-NEXT: br label [[LOOP_HEADER:%.*]]
+; PRED: loop.header:
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; PRED-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4
+; PRED-NEXT: [[L_B:%.*]] = load i32, ptr [[B]], align 4
+; PRED-NEXT: [[OR:%.*]] = or i32 [[L_B]], [[L_A]]
+; PRED-NEXT: [[L_C:%.*]] = load i32, ptr [[C]], align 4
+; PRED-NEXT: [[C_0:%.*]] = icmp ugt i32 [[L_C]], [[OR]]
+; PRED-NEXT: br i1 [[C_0]], label [[IF_THEN:%.*]], label [[LOOP_LATCH]]
+; PRED: if.then:
+; PRED-NEXT: [[GEP_D:%.*]] = getelementptr i32, ptr [[D]], i64 [[IV]]
+; PRED-NEXT: store i32 [[OR]], ptr [[E]], align 4
+; PRED-NEXT: store i32 0, ptr [[GEP_D]], align 4
+; PRED-NEXT: br label [[LOOP_LATCH]]
+; PRED: loop.latch:
+; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; PRED-NEXT: [[C_1:%.*]] = icmp eq i64 [[IV]], [[N]]
+; PRED-NEXT: br i1 [[C_1]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP20:![0-9]+]]
+; PRED: exit:
+; PRED-NEXT: ret i32 0
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
+ %l.A = load i32, ptr %A, align 4
+ %l.B = load i32, ptr %B, align 4
+ %or = or i32 %l.B, %l.A
+ %l.C = load i32, ptr %C, align 4
+ %c.0 = icmp ugt i32 %l.C, %or
+ br i1 %c.0, label %if.then, label %loop.latch
+
+if.then:
+ %gep.D = getelementptr i32, ptr %D, i64 %iv
+ store i32 %or, ptr %E, align 4
+ store i32 0, ptr %gep.D, align 4
+ br label %loop.latch
+
+loop.latch:
+ %iv.next = add i64 %iv, 1
+ %c.1 = icmp eq i64 %iv, %N
+ br i1 %c.1, label %exit, label %loop.header
+
+exit:
+ ret i32 0
+}
+
;.
; DEFAULT: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; DEFAULT: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
@@ -871,12 +883,23 @@ exit:
; DEFAULT: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
; DEFAULT: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
; DEFAULT: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
-; DEFAULT: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
-; DEFAULT: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
-; DEFAULT: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
-; DEFAULT: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
-; DEFAULT: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]}
-; DEFAULT: [[LOOP12]] = distinct !{[[LOOP12]], [[META2]], [[META1]]}
+; DEFAULT: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]}
+; DEFAULT: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]}
+; DEFAULT: [[META9]] = !{[[META10:![0-9]+]]}
+; DEFAULT: [[META10]] = distinct !{[[META10]], [[META11:![0-9]+]]}
+; DEFAULT: [[META11]] = distinct !{[[META11]], !"LVerDomain"}
+; DEFAULT: [[META12]] = !{[[META13:![0-9]+]]}
+; DEFAULT: [[META13]] = distinct !{[[META13]], [[META11]]}
+; DEFAULT: [[META14]] = !{[[META15:![0-9]+]]}
+; DEFAULT: [[META15]] = distinct !{[[META15]], [[META11]]}
+; DEFAULT: [[META16]] = !{[[META17:![0-9]+]]}
+; DEFAULT: [[META17]] = distinct !{[[META17]], [[META11]]}
+; DEFAULT: [[META18]] = !{[[META19:![0-9]+]], [[META10]], [[META13]], [[META15]]}
+; DEFAULT: [[META19]] = distinct !{[[META19]], [[META11]]}
+; DEFAULT: [[META20]] = !{[[META19]]}
+; DEFAULT: [[META21]] = !{[[META10]], [[META13]], [[META15]]}
+; DEFAULT: [[LOOP22]] = distinct !{[[LOOP22]], [[META1]], [[META2]]}
+; DEFAULT: [[LOOP23]] = distinct !{[[LOOP23]], [[META1]]}
;.
; PRED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; PRED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
@@ -884,6 +907,19 @@ exit:
; PRED: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
; PRED: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
; PRED: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
-; PRED: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
-; PRED: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
+; PRED: [[META6]] = !{[[META7:![0-9]+]]}
+; PRED: [[META7]] = distinct !{[[META7]], [[META8:![0-9]+]]}
+; PRED: [[META8]] = distinct !{[[META8]], !"LVerDomain"}
+; PRED: [[META9]] = !{[[META10:![0-9]+]]}
+; PRED: [[META10]] = distinct !{[[META10]], [[META8]]}
+; PRED: [[META11]] = !{[[META12:![0-9]+]]}
+; PRED: [[META12]] = distinct !{[[META12]], [[META8]]}
+; PRED: [[META13]] = !{[[META14:![0-9]+]]}
+; PRED: [[META14]] = distinct !{[[META14]], [[META8]]}
+; PRED: [[META15]] = !{[[META16:![0-9]+]], [[META7]], [[META10]], [[META12]]}
+; PRED: [[META16]] = distinct !{[[META16]], [[META8]]}
+; PRED: [[META17]] = !{[[META16]]}
+; PRED: [[META18]] = !{[[META7]], [[META10]], [[META12]]}
+; PRED: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]], [[META2]]}
+; PRED: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
index b91579106261..200c2adcf0e6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
@@ -223,10 +223,9 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFCOMMON-NEXT: [[TMP10:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[TMP9]])
; TFCOMMON-NEXT: [[TMP11:%.*]] = xor <vscale x 2 x i1> [[TMP8]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer)
; TFCOMMON-NEXT: [[TMP12:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> zeroinitializer
-; TFCOMMON-NEXT: [[TMP13:%.*]] = or <vscale x 2 x i1> [[TMP9]], [[TMP12]]
; TFCOMMON-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP12]], <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i64> [[TMP10]]
; TFCOMMON-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
-; TFCOMMON-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP14]], i32 8, <vscale x 2 x i1> [[TMP13]])
+; TFCOMMON-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP14]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; TFCOMMON-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025)
; TFCOMMON-NEXT: [[TMP15:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer)
@@ -272,16 +271,14 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = xor <vscale x 2 x i1> [[TMP14]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer)
; TFA_INTERLEAVE-NEXT: [[TMP21:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i1> [[TMP19]], <vscale x 2 x i1> zeroinitializer
; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]], <vscale x 2 x i1> [[TMP20]], <vscale x 2 x i1> zeroinitializer
-; TFA_INTERLEAVE-NEXT: [[TMP23:%.*]] = or <vscale x 2 x i1> [[TMP15]], [[TMP21]]
-; TFA_INTERLEAVE-NEXT: [[TMP24:%.*]] = or <vscale x 2 x i1> [[TMP16]], [[TMP22]]
; TFA_INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP21]], <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i64> [[TMP17]]
; TFA_INTERLEAVE-NEXT: [[PREDPHI4:%.*]] = select <vscale x 2 x i1> [[TMP22]], <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i64> [[TMP18]]
; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
; TFA_INTERLEAVE-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 2
; TFA_INTERLEAVE-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[TMP25]], i64 [[TMP27]]
-; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP25]], i32 8, <vscale x 2 x i1> [[TMP23]])
-; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP28]], i32 8, <vscale x 2 x i1> [[TMP24]])
+; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP25]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
+; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP28]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], 2
@@ -405,10 +402,9 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFCOMMON-NEXT: [[TMP11:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP10]])
; TFCOMMON-NEXT: [[TMP12:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i1> [[TMP8]], <vscale x 2 x i1> zeroinitializer
; TFCOMMON-NEXT: [[TMP13:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[TMP12]])
-; TFCOMMON-NEXT: [[TMP14:%.*]] = or <vscale x 2 x i1> [[TMP10]], [[TMP12]]
; TFCOMMON-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[TMP13]]
; TFCOMMON-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
-; TFCOMMON-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP15]], i32 8, <vscale x 2 x i1> [[TMP14]])
+; TFCOMMON-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP15]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; TFCOMMON-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025)
; TFCOMMON-NEXT: [[TMP16:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer)
@@ -456,16 +452,14 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]], <vscale x 2 x i1> [[TMP14]], <vscale x 2 x i1> zeroinitializer
; TFA_INTERLEAVE-NEXT: [[TMP23:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[TMP21]])
; TFA_INTERLEAVE-NEXT: [[TMP24:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD3]], <vscale x 2 x i1> [[TMP22]])
-; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = or <vscale x 2 x i1> [[TMP17]], [[TMP21]]
-; TFA_INTERLEAVE-NEXT: [[TMP26:%.*]] = or <vscale x 2 x i1> [[TMP18]], [[TMP22]]
; TFA_INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP17]], <vscale x 2 x i64> [[TMP19]], <vscale x 2 x i64> [[TMP23]]
; TFA_INTERLEAVE-NEXT: [[PREDPHI4:%.*]] = select <vscale x 2 x i1> [[TMP18]], <vscale x 2 x i64> [[TMP20]], <vscale x 2 x i64> [[TMP24]]
; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
; TFA_INTERLEAVE-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = mul i64 [[TMP28]], 2
; TFA_INTERLEAVE-NEXT: [[TMP30:%.*]] = getelementptr inbounds i64, ptr [[TMP27]], i64 [[TMP29]]
-; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP27]], i32 8, <vscale x 2 x i1> [[TMP25]])
-; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP30]], i32 8, <vscale x 2 x i1> [[TMP26]])
+; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP27]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
+; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP30]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP32:%.*]] = mul i64 [[TMP31]], 2
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
index ddc004657ed5..bcf8096f1b73 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
@@ -1241,9 +1241,8 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no
; CHECK-ORDERED-TF-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP16]], i32 4, <vscale x 4 x i1> [[TMP15]], <vscale x 4 x float> poison)
; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = xor <vscale x 4 x i1> [[TMP13]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP17]], <vscale x 4 x i1> zeroinitializer
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = or <vscale x 4 x i1> [[TMP15]], [[TMP18]]
; CHECK-ORDERED-TF-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP18]], <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 3.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x float> [[WIDE_MASKED_LOAD1]]
-; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = select <vscale x 4 x i1> [[TMP19]], <vscale x 4 x float> [[PREDPHI]], <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float -0.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> [[PREDPHI]], <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float -0.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP21]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[TMP20]])
; CHECK-ORDERED-TF-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP23]]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/streaming-compatible-sve-no-maximize-bandwidth.ll b/llvm/test/Transforms/LoopVectorize/AArch64/streaming-compatible-sve-no-maximize-bandwidth.ll
index b89d09f25896..6b10d4591f41 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/streaming-compatible-sve-no-maximize-bandwidth.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/streaming-compatible-sve-no-maximize-bandwidth.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=loop-vectorize -force-streaming-compatible-sve -enable-fixedwidth-autovec-in-streaming-mode -mattr=+sve -force-target-instruction-cost=1 -scalable-vectorization=off -force-vector-interleave=1 -S 2>&1 | FileCheck %s --check-prefix=SC_SVE
+; RUN: opt < %s -passes=loop-vectorize -force-streaming-compatible -enable-fixedwidth-autovec-in-streaming-mode -mattr=+sve -force-target-instruction-cost=1 -scalable-vectorization=off -force-vector-interleave=1 -S 2>&1 | FileCheck %s --check-prefix=SC_SVE
; RUN: opt < %s -passes=loop-vectorize -mattr=+sve -force-target-instruction-cost=1 -scalable-vectorization=off -force-vector-interleave=1 -S 2>&1 | FileCheck %s --check-prefix=NO_SC_SVE
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
index 2b2742ca7ccb..63ad98b2d8ab 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
@@ -480,11 +480,10 @@ define void @cond_uniform_load(ptr noalias %dst, ptr noalias readonly %src, ptr
; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> zeroinitializer
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 4, <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP18:%.*]] = or <vscale x 4 x i1> [[TMP15]], [[TMP16]]
; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> [[WIDE_MASKED_GATHER]]
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[TMP10]]
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i32 0
-; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[PREDPHI]], ptr [[TMP19]], i32 4, <vscale x 4 x i1> [[TMP18]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[PREDPHI]], ptr [[TMP19]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP21]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP22:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll
index c3e30f1f81f4..e796e40a7591 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll
@@ -592,7 +592,41 @@ define dso_local i32 @predicated_test(i32 noundef %0, ptr %glob) #0 {
ret i32 0
}
+; This has a maximum trip count of 4. The codegen is currently much better with <8 x half> vectorization.
+; CHECK-LABEL: arm_q15_to_f16_remainder
+; CHECK: LV: Selecting VF: 8
+define void @arm_q15_to_f16_remainder(ptr nocapture noundef readonly %pSrc, ptr nocapture noundef writeonly noalias %pDst, i32 noundef %blockSize) #0 {
+entry:
+ %rem = and i32 %blockSize, 3
+ %cmp.not5 = icmp eq i32 %rem, 0
+ br i1 %cmp.not5, label %while.end, label %while.body.preheader
+
+while.body.preheader: ; preds = %entry
+ br label %while.body
+
+while.body: ; preds = %while.body.preheader, %while.body
+ %blkCnt.08 = phi i32 [ %dec, %while.body ], [ %rem, %while.body.preheader ]
+ %pIn.07 = phi ptr [ %incdec.ptr, %while.body ], [ %pSrc, %while.body.preheader ]
+ %pDst.addr.06 = phi ptr [ %incdec.ptr2, %while.body ], [ %pDst, %while.body.preheader ]
+ %incdec.ptr = getelementptr inbounds i8, ptr %pIn.07, i32 2
+ %0 = load i16, ptr %pIn.07, align 2
+ %conv1 = sitofp i16 %0 to half
+ %1 = fmul fast half %conv1, 0xH0200
+ %incdec.ptr2 = getelementptr inbounds i8, ptr %pDst.addr.06, i32 2
+ store half %1, ptr %pDst.addr.06, align 2
+ %dec = add nsw i32 %blkCnt.08, -1
+ %cmp.not = icmp eq i32 %dec, 0
+ br i1 %cmp.not, label %while.end.loopexit, label %while.body
+
+while.end.loopexit: ; preds = %while.body
+ br label %while.end
+
+while.end: ; preds = %while.end.loopexit, %entry
+ ret void
+}
+
+
declare void @llvm.lifetime.start.p0(i64, ptr)
declare void @llvm.lifetime.end.p0(i64, ptr)
-attributes #0 = { "target-features"="+mve" }
+attributes #0 = { "target-features"="+mve.fp" }
diff --git a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll
index 7172f0907e77..28c1eef84e25 100644
--- a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll
+++ b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll
@@ -22,14 +22,20 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP0]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <4 x i64> [[TMP3]], ptr [[TMP2]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP5]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <4 x i64> [[TMP6]], ptr [[TMP4]], align 8
+; CHECK-NEXT: store <4 x i64> [[TMP7]], ptr [[TMP5]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/LoongArch/loongarch-interleaved.ll b/llvm/test/Transforms/LoopVectorize/LoongArch/loongarch-interleaved.ll
new file mode 100644
index 000000000000..be9b170491b9
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/LoongArch/loongarch-interleaved.ll
@@ -0,0 +1,39 @@
+; REQUIRES: asserts
+; RUN: opt --passes=loop-vectorize,dce,instcombine --mtriple loongarch64 \
+; RUN: -S < %s 2>&1 | FileCheck %s
+
+; CHECK-LABEL: foo
+; CHECK: %{{.*}} = add {{.*}}, 2
+
+; Function Attrs: nofree norecurse nosync nounwind writeonly
+define dso_local void @foo(i32 signext %n, ptr nocapture %A) local_unnamed_addr #0 {
+entry:
+ %cmp5 = icmp sgt i32 %n, 0
+ br i1 %cmp5, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader: ; preds = %entry
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret void
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %0 = trunc i64 %indvars.iv to i32
+ store i32 %0, ptr %arrayidx, align 4, !tbaa !4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body, !llvm.loop !8
+}
+
+!4 = !{!5, !5, i64 0}
+!5 = !{!"int", !6, i64 0}
+!6 = !{!"omnipotent char", !7, i64 0}
+!7 = !{!"Simple C/C++ TBAA"}
+!8 = distinct !{!8, !9}
+!9 = !{!"llvm.loop.mustprogress"}
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization-profitability.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization-profitability.ll
index b88254e7b678..786197bfdb90 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization-profitability.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization-profitability.ll
@@ -10,7 +10,7 @@ target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-unknown-linux-gnu"
; Do not vectorize epilogues for loops with minsize attribute
-; CHECK-LABLE: @f1
+; CHECK-LABEL: @f1
; CHECK-NOT: vector.main.loop.iter.check
; CHECK-NOT: vec.epilog.iter.check
; CHECK-NOT: vec.epilog.ph
@@ -48,7 +48,7 @@ for.end: ; preds = %for.end.loopexit, %
}
; Do not vectorize epilogues for loops with optsize attribute
-; CHECK-LABLE: @f2
+; CHECK-LABEL: @f2
; CHECK-NOT: vector.main.loop.iter.check
; CHECK-NOT: vec.epilog.iter.check
; CHECK-NOT: vec.epilog.ph
@@ -86,7 +86,7 @@ for.end: ; preds = %for.end.loopexit, %
}
; Do not vectorize the epilogue for loops with VF less than the default -epilogue-vectorization-minimum-VF of 16.
-; CHECK-MIN-D-LABLE: @f3
+; CHECK-MIN-D-LABEL: @f3
; CHECK-MIN-D-NOT: vector.main.loop.iter.check
; CHECK-MIN-D-NOT: vec.epilog.iter.check
; CHECK-MIN-D-NOT: vec.epilog.ph
@@ -96,7 +96,7 @@ for.end: ; preds = %for.end.loopexit, %
; Specify a smaller minimum VF (via `-epilogue-vectorization-minimum-VF=4`) and
; make sure the epilogue gets vectorized in that case.
-; CHECK-MIN-D-LABLE: @f3
+; CHECK-MIN-4-LABEL: @f3
; CHECK-MIN-4: vector.main.loop.iter.check
; CHECK-MIN-4: vec.epilog.iter.check
; CHECK-MIN-4: vec.epilog.ph
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
new file mode 100644
index 000000000000..e40f51fd7bd7
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
@@ -0,0 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl \
+; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s
+
+; Make sure we do not vectorize a loop with a widened int induction.
+define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
+; CHECK-LABEL: define void @test_wide_integer_induction(
+; CHECK-SAME: ptr noalias [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: store i64 [[IV]], ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
+ store i64 %iv, ptr %arrayidx, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:
+ ret void
+}
+
+; Make sure we do not vectorize a loop with a widened ptr induction.
+define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) {
+; CHECK-LABEL: define void @test_wide_ptr_induction(
+; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[VECTOR_BODY]] ], [ [[B]], [[VECTOR_PH]] ]
+; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[ADDR]], i64 8
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]]
+; CHECK-NEXT: store ptr [[ADDR]], ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw nsw i64 [[EVL_BASED_IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %addr = phi ptr [ %incdec.ptr, %for.body ], [ %b, %entry ]
+ %incdec.ptr = getelementptr inbounds i8, ptr %addr, i64 8
+ %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
+ store ptr %addr, ptr %arrayidx, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
new file mode 100644
index 000000000000..a91f92348ab2
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
@@ -0,0 +1,118 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s
+
+define void @test(ptr %p, i64 %a, i8 %b) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[A]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i8> poison, i8 [[B]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT1]], <4 x i8> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label [[VECTOR_BODY1:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE8:%.*]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE8]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0
+; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[TMP0]], i32 3)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <4 x i32> [[VEC_IND]], <i32 2, i32 2, i32 2, i32 2>
+; CHECK-NEXT: [[TMP2:%.*]] = shl <4 x i64> [[BROADCAST_SPLAT]], <i64 48, i64 48, i64 48, i64 48>
+; CHECK-NEXT: [[TMP3:%.*]] = ashr <4 x i64> [[TMP2]], <i64 52, i64 52, i64 52, i64 52>
+; CHECK-NEXT: [[TMP4:%.*]] = trunc <4 x i64> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP1]], <4 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = zext <4 x i8> [[BROADCAST_SPLAT2]] to <4 x i32>
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[TMP6]], <4 x i32> [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = shl <4 x i32> [[PREDPHI]], <i32 8, i32 8, i32 8, i32 8>
+; CHECK-NEXT: [[TMP8:%.*]] = trunc <4 x i32> [[TMP7]] to <4 x i8>
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 0
+; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF:%.*]], label [[VECTOR_BODY:%.*]]
+; CHECK: pred.store.if:
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i8> [[TMP8]], i32 0
+; CHECK-NEXT: store i8 [[TMP10]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[VECTOR_BODY]]
+; CHECK: pred.store.continue:
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 1
+; CHECK-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
+; CHECK: pred.store.if3:
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i8> [[TMP8]], i32 1
+; CHECK-NEXT: store i8 [[TMP12]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]]
+; CHECK: pred.store.continue4:
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 2
+; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]]
+; CHECK: pred.store.if5:
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i8> [[TMP8]], i32 2
+; CHECK-NEXT: store i8 [[TMP14]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
+; CHECK: pred.store.continue6:
+; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 3
+; CHECK-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8]]
+; CHECK: pred.store.if7:
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i8> [[TMP8]], i32 3
+; CHECK-NEXT: store i8 [[TMP16]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE8]]
+; CHECK: pred.store.continue8:
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
+; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
+; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY1]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 4, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[FOR_COND:%.*]]
+; CHECK: for.cond:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY:%.*]] ]
+; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1
+; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2
+; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48
+; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SHL]], 52
+; CHECK-NEXT: [[TRUNC_I32:%.*]] = trunc i64 [[ASHR]] to i32
+; CHECK-NEXT: br i1 [[CMP_SLT]], label [[COND_FALSE:%.*]], label [[FOR_BODY]]
+; CHECK: cond.false:
+; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[B]] to i32
+; CHECK-NEXT: br label [[FOR_BODY]]
+; CHECK: for.body:
+; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TRUNC_I32]], [[FOR_COND]] ], [ [[ZEXT]], [[COND_FALSE]] ]
+; CHECK-NEXT: [[SHL_I32:%.*]] = shl i32 [[COND]], 8
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SHL_I32]] to i8
+; CHECK-NEXT: store i8 [[TRUNC]], ptr [[P]], align 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV]], 2
+; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.body, %entry
+ %iv = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %add = add i32 %iv, 1
+ %cmp.slt = icmp slt i32 %iv, 2
+ %shl = shl i64 %a, 48
+ %ashr = ashr i64 %shl, 52
+ %trunc.i32 = trunc i64 %ashr to i32
+ br i1 %cmp.slt, label %cond.false, label %for.body
+
+cond.false: ; preds = %for.cond
+ %zext = zext i8 %b to i32
+ br label %for.body
+
+for.body: ; preds = %cond.false, %for.cond
+ %cond = phi i32 [ %trunc.i32, %for.cond ], [ %zext, %cond.false ]
+ %shl.i32 = shl i32 %cond, 8
+ %trunc = trunc i32 %shl.i32 to i8
+ store i8 %trunc, ptr %p, align 1
+ %cmp = icmp slt i32 %iv, 2
+ br i1 %cmp, label %for.cond, label %exit
+
+exit: ; preds = %for.body
+ ret void
+}
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
index 1ce4cb928e80..ee70f4aa3585 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
@@ -462,13 +462,10 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp ugt <vscale x 2 x i64> [[VEC_IND]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 10, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
; TF-SCALABLE-NEXT: [[TMP13:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i1> [[TMP12]], <vscale x 2 x i1> zeroinitializer
; TF-SCALABLE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT]], i32 8, <vscale x 2 x i1> [[TMP13]], <vscale x 2 x i64> poison)
-; TF-SCALABLE-NEXT: [[TMP14:%.*]] = xor <vscale x 2 x i1> [[TMP12]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer)
-; TF-SCALABLE-NEXT: [[TMP15:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i1> [[TMP14]], <vscale x 2 x i1> zeroinitializer
-; TF-SCALABLE-NEXT: [[TMP17:%.*]] = or <vscale x 2 x i1> [[TMP13]], [[TMP15]]
; TF-SCALABLE-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP13]], <vscale x 2 x i64> [[WIDE_MASKED_GATHER]], <vscale x 2 x i64> zeroinitializer
; TF-SCALABLE-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP11]]
; TF-SCALABLE-NEXT: [[TMP18:%.*]] = getelementptr inbounds i64, ptr [[TMP16]], i32 0
-; TF-SCALABLE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP18]], i32 8, <vscale x 2 x i1> [[TMP17]])
+; TF-SCALABLE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP18]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP20]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
; TF-SCALABLE-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -510,13 +507,10 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-FIXEDLEN-NEXT: [[TMP1:%.*]] = icmp ugt <4 x i64> [[VEC_IND]], <i64 10, i64 10, i64 10, i64 10>
; TF-FIXEDLEN-NEXT: [[TMP2:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP1]], <4 x i1> zeroinitializer
; TF-FIXEDLEN-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr> [[BROADCAST_SPLAT]], i32 8, <4 x i1> [[TMP2]], <4 x i64> poison)
-; TF-FIXEDLEN-NEXT: [[TMP3:%.*]] = xor <4 x i1> [[TMP1]], <i1 true, i1 true, i1 true, i1 true>
-; TF-FIXEDLEN-NEXT: [[TMP4:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP3]], <4 x i1> zeroinitializer
-; TF-FIXEDLEN-NEXT: [[TMP6:%.*]] = or <4 x i1> [[TMP2]], [[TMP4]]
; TF-FIXEDLEN-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP2]], <4 x i64> [[WIDE_MASKED_GATHER]], <4 x i64> zeroinitializer
; TF-FIXEDLEN-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP0]]
; TF-FIXEDLEN-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 0
-; TF-FIXEDLEN-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[PREDPHI]], ptr [[TMP7]], i32 8, <4 x i1> [[TMP6]])
+; TF-FIXEDLEN-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[PREDPHI]], ptr [[TMP7]], i32 8, <4 x i1> [[ACTIVE_LANE_MASK]])
; TF-FIXEDLEN-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
; TF-FIXEDLEN-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], <i64 4, i64 4, i64 4, i64 4>
; TF-FIXEDLEN-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1028
@@ -1296,12 +1290,9 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp ugt <vscale x 2 x i64> [[VEC_IND]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 10, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
; TF-SCALABLE-NEXT: [[TMP13:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i1> [[TMP12]], <vscale x 2 x i1> zeroinitializer
; TF-SCALABLE-NEXT: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> [[BROADCAST_SPLAT2]], i32 8, <vscale x 2 x i1> [[TMP13]])
-; TF-SCALABLE-NEXT: [[TMP15:%.*]] = xor <vscale x 2 x i1> [[TMP12]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer)
-; TF-SCALABLE-NEXT: [[TMP16:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> zeroinitializer
-; TF-SCALABLE-NEXT: [[TMP17:%.*]] = or <vscale x 2 x i1> [[TMP13]], [[TMP16]]
; TF-SCALABLE-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP11]]
; TF-SCALABLE-NEXT: [[TMP18:%.*]] = getelementptr inbounds i64, ptr [[TMP14]], i32 0
-; TF-SCALABLE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP18]], i32 8, <vscale x 2 x i1> [[TMP17]])
+; TF-SCALABLE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP18]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP20]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
; TF-SCALABLE-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1344,12 +1335,9 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-FIXEDLEN-NEXT: [[TMP1:%.*]] = icmp ugt <4 x i64> [[VEC_IND]], <i64 10, i64 10, i64 10, i64 10>
; TF-FIXEDLEN-NEXT: [[TMP2:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP1]], <4 x i1> zeroinitializer
; TF-FIXEDLEN-NEXT: call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> [[BROADCAST_SPLAT]], <4 x ptr> [[BROADCAST_SPLAT2]], i32 8, <4 x i1> [[TMP2]])
-; TF-FIXEDLEN-NEXT: [[TMP4:%.*]] = xor <4 x i1> [[TMP1]], <i1 true, i1 true, i1 true, i1 true>
-; TF-FIXEDLEN-NEXT: [[TMP5:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP4]], <4 x i1> zeroinitializer
-; TF-FIXEDLEN-NEXT: [[TMP6:%.*]] = or <4 x i1> [[TMP2]], [[TMP5]]
; TF-FIXEDLEN-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP0]]
; TF-FIXEDLEN-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP3]], i32 0
-; TF-FIXEDLEN-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], i32 8, <4 x i1> [[TMP6]])
+; TF-FIXEDLEN-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], i32 8, <4 x i1> [[ACTIVE_LANE_MASK]])
; TF-FIXEDLEN-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
; TF-FIXEDLEN-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], <i64 4, i64 4, i64 4, i64 4>
; TF-FIXEDLEN-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1028
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll
index ae01bdd37110..a52da79ee396 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll
@@ -12,66 +12,18 @@
define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %index, i64 %n) {
; IF-EVL-LABEL: @gather_scatter(
; IF-EVL-NEXT: entry:
-; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N:%.*]]
-; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 2
-; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
-; IF-EVL-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; IF-EVL: vector.ph:
-; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2
-; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 2
-; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1
-; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]]
-; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
-; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2
-; IF-EVL-NEXT: [[TMP11:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-; IF-EVL-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[TMP11]], zeroinitializer
-; IF-EVL-NEXT: [[TMP13:%.*]] = mul <vscale x 2 x i64> [[TMP12]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
-; IF-EVL-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP13]]
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2
-; IF-EVL-NEXT: [[TMP16:%.*]] = mul i64 1, [[TMP15]]
-; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP16]], i64 0
-; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
-; IF-EVL: vector.body:
-; IF-EVL-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP17:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP17]], i32 2, i1 true)
-; IF-EVL-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], <vscale x 2 x i64> [[VEC_IND]]
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.vp.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP20]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 [[TMP18]])
-; IF-EVL-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_GATHER]]
-; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 2 x float> @llvm.vp.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP21]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 [[TMP18]])
-; IF-EVL-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_GATHER]]
-; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> [[WIDE_MASKED_GATHER2]], <vscale x 2 x ptr> align 4 [[TMP22]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), i32 [[TMP18]])
-; IF-EVL-NEXT: [[TMP23:%.*]] = zext i32 [[TMP18]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX1]], [[TMP10]]
-; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IF-EVL-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; IF-EVL: middle.block:
-; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
-; IF-EVL: scalar.ph:
-; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX]], i64 [[INDVARS_IV]]
-; IF-EVL-NEXT: [[TMP25:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
-; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[TMP25]]
-; IF-EVL-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
-; IF-EVL-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT]], i64 [[TMP25]]
-; IF-EVL-NEXT: store float [[TMP26]], ptr [[ARRAYIDX7]], align 4
+; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[INDVARS_IV]]
+; IF-EVL-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
+; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], i64 [[TMP0]]
+; IF-EVL-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
+; IF-EVL-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], i64 [[TMP0]]
+; IF-EVL-NEXT: store float [[TMP1]], ptr [[ARRAYIDX7]], align 4
; IF-EVL-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; IF-EVL: for.end:
; IF-EVL-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll
new file mode 100644
index 000000000000..07a1cca1bc21
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -passes=loop-vectorize -mtriple=s390x -mcpu=z14 -S %s | FileCheck %s
+
+define void @test(ptr %p, i40 %a) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[P:%.*]], i40 [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <16 x i40> poison, i40 [[A]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <16 x i40> [[BROADCAST_SPLATINSERT1]], <16 x i40> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE32:%.*]] ]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[INDEX]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[VEC_IV:%.*]] = add <16 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ule <16 x i32> [[VEC_IV]], <i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9>
+; CHECK-NEXT: [[TMP1:%.*]] = shl <16 x i40> [[BROADCAST_SPLAT2]], <i40 24, i40 24, i40 24, i40 24, i40 24, i40 24, i40 24, i40 24, i40 24, i40 24, i40 24, i40 24, i40 24, i40 24, i40 24, i40 24>
+; CHECK-NEXT: [[TMP2:%.*]] = ashr <16 x i40> [[TMP1]], <i40 28, i40 28, i40 28, i40 28, i40 28, i40 28, i40 28, i40 28, i40 28, i40 28, i40 28, i40 28, i40 28, i40 28, i40 28, i40 28>
+; CHECK-NEXT: [[TMP3:%.*]] = trunc <16 x i40> [[TMP2]] to <16 x i32>
+; CHECK-NEXT: [[TMP4:%.*]] = trunc <16 x i32> [[TMP3]] to <16 x i1>
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <16 x i1> [[TMP4]], zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ult <16 x i1> zeroinitializer, [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = or <16 x i1> [[TMP6]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt <16 x i1> [[TMP7]], zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <16 x i1> [[TMP0]], i32 0
+; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
+; CHECK: pred.store.if:
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <16 x i1> [[TMP8]], i32 0
+; CHECK-NEXT: store i1 [[TMP10]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
+; CHECK: pred.store.continue:
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <16 x i1> [[TMP0]], i32 1
+; CHECK-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
+; CHECK: pred.store.if3:
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <16 x i1> [[TMP8]], i32 1
+; CHECK-NEXT: store i1 [[TMP12]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]]
+; CHECK: pred.store.continue4:
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <16 x i1> [[TMP0]], i32 2
+; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]]
+; CHECK: pred.store.if5:
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <16 x i1> [[TMP8]], i32 2
+; CHECK-NEXT: store i1 [[TMP14]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
+; CHECK: pred.store.continue6:
+; CHECK-NEXT: [[TMP15:%.*]] = extractelement <16 x i1> [[TMP0]], i32 3
+; CHECK-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
+; CHECK: pred.store.if7:
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <16 x i1> [[TMP8]], i32 3
+; CHECK-NEXT: store i1 [[TMP16]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE8]]
+; CHECK: pred.store.continue8:
+; CHECK-NEXT: [[TMP17:%.*]] = extractelement <16 x i1> [[TMP0]], i32 4
+; CHECK-NEXT: br i1 [[TMP17]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10:%.*]]
+; CHECK: pred.store.if9:
+; CHECK-NEXT: [[TMP18:%.*]] = extractelement <16 x i1> [[TMP8]], i32 4
+; CHECK-NEXT: store i1 [[TMP18]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]]
+; CHECK: pred.store.continue10:
+; CHECK-NEXT: [[TMP19:%.*]] = extractelement <16 x i1> [[TMP0]], i32 5
+; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12:%.*]]
+; CHECK: pred.store.if11:
+; CHECK-NEXT: [[TMP20:%.*]] = extractelement <16 x i1> [[TMP8]], i32 5
+; CHECK-NEXT: store i1 [[TMP20]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE12]]
+; CHECK: pred.store.continue12:
+; CHECK-NEXT: [[TMP21:%.*]] = extractelement <16 x i1> [[TMP0]], i32 6
+; CHECK-NEXT: br i1 [[TMP21]], label [[PRED_STORE_IF13:%.*]], label [[PRED_STORE_CONTINUE14:%.*]]
+; CHECK: pred.store.if13:
+; CHECK-NEXT: [[TMP22:%.*]] = extractelement <16 x i1> [[TMP8]], i32 6
+; CHECK-NEXT: store i1 [[TMP22]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE14]]
+; CHECK: pred.store.continue14:
+; CHECK-NEXT: [[TMP23:%.*]] = extractelement <16 x i1> [[TMP0]], i32 7
+; CHECK-NEXT: br i1 [[TMP23]], label [[PRED_STORE_IF15:%.*]], label [[PRED_STORE_CONTINUE16:%.*]]
+; CHECK: pred.store.if15:
+; CHECK-NEXT: [[TMP24:%.*]] = extractelement <16 x i1> [[TMP8]], i32 7
+; CHECK-NEXT: store i1 [[TMP24]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE16]]
+; CHECK: pred.store.continue16:
+; CHECK-NEXT: [[TMP25:%.*]] = extractelement <16 x i1> [[TMP0]], i32 8
+; CHECK-NEXT: br i1 [[TMP25]], label [[PRED_STORE_IF17:%.*]], label [[PRED_STORE_CONTINUE18:%.*]]
+; CHECK: pred.store.if17:
+; CHECK-NEXT: [[TMP26:%.*]] = extractelement <16 x i1> [[TMP8]], i32 8
+; CHECK-NEXT: store i1 [[TMP26]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE18]]
+; CHECK: pred.store.continue18:
+; CHECK-NEXT: [[TMP27:%.*]] = extractelement <16 x i1> [[TMP0]], i32 9
+; CHECK-NEXT: br i1 [[TMP27]], label [[PRED_STORE_IF19:%.*]], label [[PRED_STORE_CONTINUE20:%.*]]
+; CHECK: pred.store.if19:
+; CHECK-NEXT: [[TMP28:%.*]] = extractelement <16 x i1> [[TMP8]], i32 9
+; CHECK-NEXT: store i1 [[TMP28]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE20]]
+; CHECK: pred.store.continue20:
+; CHECK-NEXT: [[TMP29:%.*]] = extractelement <16 x i1> [[TMP0]], i32 10
+; CHECK-NEXT: br i1 [[TMP29]], label [[PRED_STORE_IF21:%.*]], label [[PRED_STORE_CONTINUE22:%.*]]
+; CHECK: pred.store.if21:
+; CHECK-NEXT: [[TMP30:%.*]] = extractelement <16 x i1> [[TMP8]], i32 10
+; CHECK-NEXT: store i1 [[TMP30]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE22]]
+; CHECK: pred.store.continue22:
+; CHECK-NEXT: [[TMP31:%.*]] = extractelement <16 x i1> [[TMP0]], i32 11
+; CHECK-NEXT: br i1 [[TMP31]], label [[PRED_STORE_IF23:%.*]], label [[PRED_STORE_CONTINUE24:%.*]]
+; CHECK: pred.store.if23:
+; CHECK-NEXT: [[TMP32:%.*]] = extractelement <16 x i1> [[TMP8]], i32 11
+; CHECK-NEXT: store i1 [[TMP32]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE24]]
+; CHECK: pred.store.continue24:
+; CHECK-NEXT: [[TMP33:%.*]] = extractelement <16 x i1> [[TMP0]], i32 12
+; CHECK-NEXT: br i1 [[TMP33]], label [[PRED_STORE_IF25:%.*]], label [[PRED_STORE_CONTINUE26:%.*]]
+; CHECK: pred.store.if25:
+; CHECK-NEXT: [[TMP34:%.*]] = extractelement <16 x i1> [[TMP8]], i32 12
+; CHECK-NEXT: store i1 [[TMP34]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE26]]
+; CHECK: pred.store.continue26:
+; CHECK-NEXT: [[TMP35:%.*]] = extractelement <16 x i1> [[TMP0]], i32 13
+; CHECK-NEXT: br i1 [[TMP35]], label [[PRED_STORE_IF27:%.*]], label [[PRED_STORE_CONTINUE28:%.*]]
+; CHECK: pred.store.if27:
+; CHECK-NEXT: [[TMP36:%.*]] = extractelement <16 x i1> [[TMP8]], i32 13
+; CHECK-NEXT: store i1 [[TMP36]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE28]]
+; CHECK: pred.store.continue28:
+; CHECK-NEXT: [[TMP37:%.*]] = extractelement <16 x i1> [[TMP0]], i32 14
+; CHECK-NEXT: br i1 [[TMP37]], label [[PRED_STORE_IF29:%.*]], label [[PRED_STORE_CONTINUE30:%.*]]
+; CHECK: pred.store.if29:
+; CHECK-NEXT: [[TMP38:%.*]] = extractelement <16 x i1> [[TMP8]], i32 14
+; CHECK-NEXT: store i1 [[TMP38]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE30]]
+; CHECK: pred.store.continue30:
+; CHECK-NEXT: [[TMP39:%.*]] = extractelement <16 x i1> [[TMP0]], i32 15
+; CHECK-NEXT: br i1 [[TMP39]], label [[PRED_STORE_IF31:%.*]], label [[PRED_STORE_CONTINUE32]]
+; CHECK: pred.store.if31:
+; CHECK-NEXT: [[TMP40:%.*]] = extractelement <16 x i1> [[TMP8]], i32 15
+; CHECK-NEXT: store i1 [[TMP40]], ptr [[P]], align 1
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE32]]
+; CHECK: pred.store.continue32:
+; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 16
+; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[SHL:%.*]] = shl i40 [[A]], 24
+; CHECK-NEXT: [[ASHR:%.*]] = ashr i40 [[SHL]], 28
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i40 [[ASHR]] to i32
+; CHECK-NEXT: [[ICMP_EQ:%.*]] = icmp eq i32 [[TRUNC]], 0
+; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[ICMP_EQ]] to i32
+; CHECK-NEXT: [[ICMP_ULT:%.*]] = icmp ult i32 0, [[ZEXT]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[ICMP_ULT]], true
+; CHECK-NEXT: [[ICMP_SGT:%.*]] = icmp sgt i1 [[OR]], false
+; CHECK-NEXT: store i1 [[ICMP_SGT]], ptr [[P]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[IV_NEXT]], 10
+; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %shl = shl i40 %a, 24
+ %ashr = ashr i40 %shl, 28
+ %trunc = trunc i40 %ashr to i32
+ %icmp.eq = icmp eq i32 %trunc, 0
+ %zext = zext i1 %icmp.eq to i32
+ %icmp.ult = icmp ult i32 0, %zext
+ %or = or i1 %icmp.ult, true
+ %icmp.sgt = icmp sgt i1 %or, false
+ store i1 %icmp.sgt, ptr %p, align 1
+ %iv.next = add i32 %iv, 1
+ %cond = icmp ult i32 %iv.next, 10
+ br i1 %cond, label %for.body, label %exit
+
+exit: ; preds = %for.body
+ ret void
+}
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll
index 0b16d80a4adb..3d7153e66fc6 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll
@@ -88,7 +88,7 @@ loopexit:
ret void
}
-attributes #0 = { uwtable "target-cpu"="skylake" "target-features"="+sse2,+cx16,+sahf,-tbm,-avx512ifma,-sha,-gfni,-fma4,-vpclmulqdq,+prfchw,+bmi2,-cldemote,+fsgsbase,+xsavec,+popcnt,+aes,-avx512bitalg,+xsaves,-avx512er,-avx512vnni,-avx512vpopcntdq,-clwb,-avx512f,-clzero,-pku,+mmx,-lwp,-rdpid,-xop,+rdseed,-waitpkg,-sse4a,-avx512bw,+clflushopt,+xsave,-avx512vbmi2,-avx512vl,-avx512cd,+avx,-vaes,+rtm,+fma,+bmi,+rdrnd,-mwaitx,+sse4.1,+sse4.2,+avx2,-wbnoinvd,+sse,+lzcnt,+pclmul,-prefetchwt1,+f16c,+ssse3,+sgx,-shstk,+cmov,-avx512vbmi,+movbe,+xsaveopt,-avx512dq,+adx,-avx512pf,+sse3" }
+attributes #0 = { uwtable "target-cpu"="skylake" "target-features"="+sse2,+cx16,+sahf,-tbm,-avx512ifma,-sha,-gfni,-fma4,-vpclmulqdq,+prfchw,+bmi2,-cldemote,+fsgsbase,+xsavec,+popcnt,+aes,-avx512bitalg,+xsaves,-avx512vnni,-avx512vpopcntdq,-clwb,-avx512f,-clzero,-pku,+mmx,-lwp,-rdpid,-xop,+rdseed,-waitpkg,-sse4a,-avx512bw,+clflushopt,+xsave,-avx512vbmi2,-avx512vl,-avx512cd,+avx,-vaes,+rtm,+fma,+bmi,+rdrnd,-mwaitx,+sse4.1,+sse4.2,+avx2,-wbnoinvd,+sse,+lzcnt,+pclmul,+f16c,+ssse3,+sgx,-shstk,+cmov,-avx512vbmi,+movbe,+xsaveopt,-avx512dq,+adx,-avx512pf,+sse3" }
!0 = !{i32 0, i32 2147483646}
!1 = !{}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr42674.ll b/llvm/test/Transforms/LoopVectorize/X86/pr42674.ll
index 97bb4a2b4db5..1c64359dea24 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr42674.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr42674.ll
@@ -9,7 +9,7 @@
define zeroext i8 @sum() {
; CHECK-LABEL: @sum(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <64 x i8>, ptr getelementptr inbounds ([128 x i8], ptr @bytes, i64 0, i64 64), align 1
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <64 x i8>, ptr getelementptr inbounds (i8, ptr @bytes, i64 64), align 1
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <64 x i8>, ptr @bytes, align 1
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <64 x i8> [[WIDE_LOAD2]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP0:%.*]] = call i8 @llvm.vector.reduce.add.v64i8(<64 x i8> [[BIN_RDX]])
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr54634.ll b/llvm/test/Transforms/LoopVectorize/X86/pr54634.ll
index 5c9fe54b5521..743ca20f92b4 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr54634.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr54634.ll
@@ -118,7 +118,7 @@ L44: ; preds = %L26
ret ptr addrspace(10) null
}
-attributes #0 = { "target-cpu"="skylake-avx512" "target-features"="+xsaves,+xsavec,+prfchw,+lzcnt,+sahf,+pku,+avx512vl,+avx512bw,+avx512cd,+clwb,+clflushopt,+adx,+avx512dq,+avx512f,+bmi2,+avx2,+bmi,+fsgsbase,+f16c,+avx,+xsave,+aes,+popcnt,+movbe,+sse4.2,+sse4.1,+cx16,+fma,+ssse3,+pclmul,+sse3,-rdrnd,-rtm,-rdseed,-avx512ifma,-avx512pf,-avx512er,-sha,-prefetchwt1,-avx512vbmi,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-amx-tile,-amx-int8,-sse4a,-xop,-lwp,-fma4,-tbm,-mwaitx,-xsaveopt,-clzero,-wbnoinvd,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" }
+attributes #0 = { "target-cpu"="skylake-avx512" "target-features"="+xsaves,+xsavec,+prfchw,+lzcnt,+sahf,+pku,+avx512vl,+avx512bw,+avx512cd,+clwb,+clflushopt,+adx,+avx512dq,+avx512f,+bmi2,+avx2,+bmi,+fsgsbase,+f16c,+avx,+xsave,+aes,+popcnt,+movbe,+sse4.2,+sse4.1,+cx16,+fma,+ssse3,+pclmul,+sse3,-rdrnd,-rtm,-rdseed,-avx512ifma,-avx512pf,-sha,-avx512vbmi,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-amx-tile,-amx-int8,-sse4a,-xop,-lwp,-fma4,-tbm,-mwaitx,-xsaveopt,-clzero,-wbnoinvd,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" }
attributes #1 = { inaccessiblemem_or_argmemonly }
attributes #2 = { allocsize(1) }
diff --git a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
index bf2b9e2aef85..ce460f4fe354 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
@@ -111,4 +111,4 @@ for.body: ; preds = %for.body.preheader,
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit99
}
-attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="knl" "target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512er,+avx512f,+avx512pf,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+prefetchwt1,+rdrnd,+rdseed,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="broadwell" "target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512f,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+evex512,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+rdrnd,+rdseed,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt,-vzeroupper" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopVectorize/outer_loop_test1.ll b/llvm/test/Transforms/LoopVectorize/outer_loop_test1.ll
index 3b5811d2c9f0..40d0a51f9d5e 100644
--- a/llvm/test/Transforms/LoopVectorize/outer_loop_test1.ll
+++ b/llvm/test/Transforms/LoopVectorize/outer_loop_test1.ll
@@ -13,7 +13,7 @@
; }
; }
;
-; RUN: opt -S -passes=loop-vectorize -enable-vplan-native-path -verify-loop-info < %s | FileCheck %s
+; RUN: opt -S -passes=loop-vectorize -enable-vplan-native-path -verify-loop-info -verify-dom-info < %s | FileCheck %s
; CHECK-LABEL: vector.ph:
; CHECK: %[[SplatVal:.*]] = insertelement <4 x i32> poison, i32 %n, i64 0
; CHECK: %[[Splat:.*]] = shufflevector <4 x i32> %[[SplatVal]], <4 x i32> poison, <4 x i32> zeroinitializer
diff --git a/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll b/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll
index b3b6d3ee5509..aebe47c12879 100644
--- a/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll
@@ -39,14 +39,14 @@ define void @f() {
; CHECK: vector.memcheck:
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[TMP1]], i64 1
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr @f.e, [[SCEVGEP]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[TMP1]], getelementptr inbounds (i32, ptr @f.e, i64 1)
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[TMP1]], getelementptr inbounds (i8, ptr @f.e, i64 4)
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: store i32 0, ptr @f.e, align 1, !alias.scope !0, !noalias !3
+; CHECK-NEXT: store i32 0, ptr @f.e, align 1, !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
; CHECK-NEXT: store i8 10, ptr [[TMP0]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 500
diff --git a/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll b/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll
index 6ae6645378b3..e58c99dc4bc5 100644
--- a/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll
+++ b/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll
@@ -149,3 +149,104 @@ for.cond: ; preds = %for.body, %entry
for.end: ; preds = %for.cond
ret void
}
+
+@h = global i64 0
+
+define void @implied_wrap_predicate(ptr %A, ptr %B, ptr %C) {
+; CHECK-LABEL: define void @implied_wrap_predicate
+; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[A3:%.*]] = ptrtoint ptr [[A]] to i64
+; CHECK-NEXT: [[C2:%.*]] = ptrtoint ptr [[C]] to i64
+; CHECK-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[A3]], 16
+; CHECK-NEXT: [[UMAX4:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP0]], i64 add (i64 ptrtoint (ptr @h to i64), i64 1))
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[UMAX4]], -9
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[A3]]
+; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
+; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; CHECK: vector.scevcheck:
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[A1]], 16
+; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP5]], i64 add (i64 ptrtoint (ptr @h to i64), i64 1))
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[UMAX]], -9
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], [[A1]]
+; CHECK-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP7]], 3
+; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i16
+; CHECK-NEXT: [[TMP10:%.*]] = add i16 1, [[TMP9]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i16 [[TMP10]], 1
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt i64 [[TMP8]], 65535
+; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[TMP8]] to i16
+; CHECK-NEXT: [[TMP15:%.*]] = add i16 2, [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ult i16 [[TMP15]], 2
+; CHECK-NEXT: [[TMP17:%.*]] = icmp ugt i64 [[TMP8]], 65535
+; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP16]], [[TMP17]]
+; CHECK-NEXT: [[TMP19:%.*]] = or i1 [[TMP13]], [[TMP18]]
+; CHECK-NEXT: br i1 [[TMP19]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
+; CHECK: vector.memcheck:
+; CHECK-NEXT: [[TMP20:%.*]] = sub i64 [[C2]], [[A3]]
+; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP20]], 32
+; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
+; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16
+; CHECK-NEXT: [[IND_END:%.*]] = add i16 1, [[DOTCAST]]
+; CHECK-NEXT: [[IND_END5:%.*]] = add i64 1, [[N_VEC]]
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
+; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i64, ptr [[A]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i64, ptr [[TMP22]], i32 0
+; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr [[TMP23]], align 4
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[C]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i64, ptr [[TMP24]], i32 0
+; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr [[TMP25]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY:%.*]] ], [ 1, [[VECTOR_SCEVCHECK]] ], [ 1, [[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL6:%.*]] = phi i64 [ [[IND_END5]], [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY]] ], [ 1, [[VECTOR_SCEVCHECK]] ], [ 1, [[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV_EXT:%.*]] = phi i64 [ [[BC_RESUME_VAL6]], [[SCALAR_PH]] ], [ [[IV_EXT_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV_EXT]]
+; CHECK-NEXT: store i64 0, ptr [[GEP_A]], align 4
+; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV_EXT]]
+; CHECK-NEXT: store i64 0, ptr [[GEP_C]], align 4
+; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
+; CHECK-NEXT: [[IV_EXT_NEXT]] = zext i16 [[IV_NEXT]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV_EXT_NEXT]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[GEP]], @h
+; CHECK-NEXT: br i1 [[CMP]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i16 [ 1, %entry ], [ %iv.next, %loop ]
+ %iv.ext = phi i64 [ 1, %entry ], [ %iv.ext.next, %loop ]
+ %gep.A = getelementptr i64, ptr %A, i64 %iv.ext
+ store i64 0, ptr %gep.A
+ %gep.C = getelementptr i64, ptr %C, i64 %iv.ext
+ store i64 0, ptr %gep.C
+ %iv.next = add i16 %iv, 1
+ %iv.ext.next = zext i16 %iv.next to i64
+ %gep = getelementptr i64, ptr %A, i64 %iv.ext.next
+ %cmp = icmp ugt ptr %gep, @h
+ br i1 %cmp, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
index 19cbcac6090c..f33ec1419b11 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
@@ -86,11 +86,8 @@ define void @blend_chain_iv(i1 %c) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[MASK1]], <4 x i1> [[MASK1]], <4 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP4:%.*]] = xor <4 x i1> [[MASK1]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[MASK1]], <4 x i1> [[TMP4]], <4 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP8:%.*]] = or <4 x i1> [[TMP6]], [[TMP5]]
; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP6]], <4 x i64> [[VEC_IND]], <4 x i64> undef
-; CHECK-NEXT: [[PREDPHI1:%.*]] = select <4 x i1> [[TMP8]], <4 x i64> [[PREDPHI]], <4 x i64> undef
+; CHECK-NEXT: [[PREDPHI1:%.*]] = select <4 x i1> [[MASK1]], <4 x i64> [[PREDPHI]], <4 x i64> undef
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[PREDPHI1]], i32 0
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i64> [[PREDPHI1]], i32 1
diff --git a/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll b/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll
index 0f7bd3d71feb..d79b4a7cefc2 100644
--- a/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll
+++ b/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll
@@ -172,8 +172,6 @@ define void @test_not_first_lane_only_wide_compare_incoming_order_swapped(ptr %A
; CHECK: vector.ph:
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[X]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i16> poison, i16 [[Y]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT1]], <4 x i16> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -184,14 +182,8 @@ define void @test_not_first_lane_only_wide_compare_incoming_order_swapped(ptr %A
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP2]], align 2
; CHECK-NEXT: [[TMP3:%.*]] = icmp ult <4 x i16> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP4:%.*]] = xor <4 x i1> [[TMP3]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <4 x i16> [[WIDE_LOAD]], [[BROADCAST_SPLAT2]]
-; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP4]], <4 x i1> [[TMP5]], <4 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP7:%.*]] = xor <4 x i1> [[TMP5]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[TMP8:%.*]] = select <4 x i1> [[TMP4]], <4 x i1> [[TMP7]], <4 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP6]], i32 0
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP8]], i32 0
-; CHECK-NEXT: [[TMP11:%.*]] = or i1 [[TMP9]], [[TMP10]]
-; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP11]], ptr [[B]], ptr poison
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP4]], i32 0
+; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP9]], ptr [[B]], ptr poison
; CHECK-NEXT: [[TMP12:%.*]] = load i16, ptr [[PREDPHI]], align 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <4 x i16> poison, i16 [[TMP12]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT3]], <4 x i16> poison, <4 x i32> zeroinitializer
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing.ll
index c95f94bddf5e..c84191665d94 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-printing.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing.ll
@@ -882,6 +882,58 @@ exit:
ret void
}
+define i16 @print_first_order_recurrence_and_result(ptr %ptr) {
+; CHECK-LABEL: 'print_first_order_recurrence_and_result'
+; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' {
+; CHECK-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF
+; CHECK-NEXT: Live-in vp<[[VTC:%.+]]> = vector-trip-count
+; CHECK-NEXT: Live-in ir<1000> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
+; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for.1> = phi ir<22>, ir<%for.1.next>
+; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
+; CHECK-NEXT: CLONE ir<%gep.ptr> = getelementptr inbounds ir<%ptr>, vp<[[STEPS]]>
+; CHECK-NEXT: vp<[[VEC_PTR:%.+]]> = vector-pointer ir<%gep.ptr>
+; CHECK-NEXT: WIDEN ir<%for.1.next> = load vp<[[VEC_PTR]]>
+; CHECK-NEXT: EMIT vp<[[FOR1_SPLICE:%.+]]> = first-order splice ir<%for.1>, ir<%for.1.next>
+; CHECK-NEXT: WIDEN ir<%add> = add vp<[[FOR1_SPLICE]]>, ir<1>
+; CHECK-NEXT: vp<[[VEC_PTR2:%.+]]> = vector-pointer ir<%gep.ptr>
+; CHECK-NEXT: WIDEN store vp<[[VEC_PTR2]]>, ir<%add>
+; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]>
+; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VTC]]>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: Live-out i16 %for.1.lcssa = vp<[[FOR1_SPLICE]]>
+; CHECK-NEXT: }
+;
+entry:
+ br label %loop
+
+loop:
+ %for.1 = phi i16 [ 22, %entry ], [ %for.1.next, %loop ]
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add nuw nsw i64 %iv, 1
+ %gep.ptr = getelementptr inbounds i16, ptr %ptr, i64 %iv
+ %for.1.next = load i16, ptr %gep.ptr, align 2
+ %add = add i16 %for.1, 1
+ store i16 %add, ptr %gep.ptr
+ %exitcond.not = icmp eq i64 %iv.next, 1000
+ br i1 %exitcond.not, label %exit, label %loop
+
+exit:
+ ret i16 %for.1
+}
+
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
index 1e60e57a5409..ae5879bb2bae 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
@@ -361,15 +361,12 @@ define void @pred_cfg1(i32 %k, i32 %j) {
; CHECK-NEXT: Successor(s): then.0.0
; CHECK-EMPTY:
; CHECK-NEXT: then.0.0:
-; CHECK-NEXT: EMIT vp<[[NOT:%.+]]> = not ir<%c.1>
-; CHECK-NEXT: EMIT vp<[[MASK3:%.+]]> = logical-and vp<[[MASK1]]>, vp<[[NOT]]>
-; CHECK-NEXT: EMIT vp<[[OR:%.+]]> = or vp<[[MASK2]]>, vp<[[MASK3]]>
; CHECK-NEXT: BLEND ir<%p> = ir<0> vp<[[PRED]]>/vp<[[MASK2]]>
; CHECK-NEXT: Successor(s): pred.store
; CHECK-EMPTY:
; CHECK-NEXT: <xVFxUF> pred.store: {
; CHECK-NEXT: pred.store.entry:
-; CHECK-NEXT: BRANCH-ON-MASK vp<[[OR]]>
+; CHECK-NEXT: BRANCH-ON-MASK vp<[[MASK1]]>
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
@@ -462,16 +459,13 @@ define void @pred_cfg2(i32 %k, i32 %j) {
; CHECK-NEXT: Successor(s): then.0.0
; CHECK-EMPTY:
; CHECK-NEXT: then.0.0:
-; CHECK-NEXT: EMIT vp<[[NOT:%.+]]> = not ir<%c.0>
-; CHECK-NEXT: EMIT vp<[[MASK3:%.+]]> = logical-and vp<[[MASK1]]>, vp<[[NOT]]>
-; CHECK-NEXT: EMIT vp<[[OR:%.+]]> = or vp<[[MASK2]]>, vp<[[MASK3]]>
; CHECK-NEXT: BLEND ir<%p> = ir<0> vp<[[PRED]]>/vp<[[MASK2]]>
-; CHECK-NEXT: EMIT vp<[[MASK4:%.+]]> = logical-and vp<[[OR]]>, ir<%c.1>
+; CHECK-NEXT: EMIT vp<[[MASK3:%.+]]> = logical-and vp<[[MASK1]]>, ir<%c.1>
; CHECK-NEXT: Successor(s): pred.store
; CHECK-EMPTY:
; CHECK-NEXT: <xVFxUF> pred.store: {
; CHECK-NEXT: pred.store.entry:
-; CHECK-NEXT: BRANCH-ON-MASK vp<[[MASK4]]>
+; CHECK-NEXT: BRANCH-ON-MASK vp<[[MASK3]]>
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
@@ -570,16 +564,13 @@ define void @pred_cfg3(i32 %k, i32 %j) {
; CHECK-NEXT: Successor(s): then.0.0
; CHECK-EMPTY:
; CHECK-NEXT: then.0.0:
-; CHECK-NEXT: EMIT vp<[[NOT:%.+]]> = not ir<%c.0>
-; CHECK-NEXT: EMIT vp<[[MASK3:%.+]]> = logical-and vp<[[MASK1]]>, vp<[[NOT]]>
-; CHECK-NEXT: EMIT vp<[[MASK4:%.+]]> = or vp<[[MASK2]]>, vp<[[MASK3]]>
; CHECK-NEXT: BLEND ir<%p> = ir<0> vp<[[PRED]]>/vp<[[MASK2]]>
-; CHECK-NEXT: EMIT vp<[[MASK5:%.+]]> = logical-and vp<[[MASK4]]>, ir<%c.0>
+; CHECK-NEXT: EMIT vp<[[MASK3:%.+]]> = logical-and vp<[[MASK1]]>, ir<%c.0>
; CHECK-NEXT: Successor(s): pred.store
; CHECK-EMPTY:
; CHECK-NEXT: <xVFxUF> pred.store: {
; CHECK-NEXT: pred.store.entry:
-; CHECK-NEXT: BRANCH-ON-MASK vp<[[MASK5]]>
+; CHECK-NEXT: BRANCH-ON-MASK vp<[[MASK3]]>
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
diff --git a/llvm/test/Transforms/LoopVersioning/add-phi-update-users.ll b/llvm/test/Transforms/LoopVersioning/add-phi-update-users.ll
index d9050700001a..e326064175d1 100644
--- a/llvm/test/Transforms/LoopVersioning/add-phi-update-users.ll
+++ b/llvm/test/Transforms/LoopVersioning/add-phi-update-users.ll
@@ -27,7 +27,7 @@ define void @f1() {
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[T0]], i64 2
; CHECK-NEXT: br label [[FOR_BODY_LVER_CHECK:%.*]]
; CHECK: for.body.lver.check:
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[T0]], getelementptr inbounds (i16, ptr @b, i64 1)
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[T0]], getelementptr inbounds (i8, ptr @b, i64 2)
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr @b, [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
@@ -44,8 +44,8 @@ define void @f1() {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[T1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[T2:%.*]] = load i16, ptr @b, align 1, !tbaa [[TBAA2]], !alias.scope !6
-; CHECK-NEXT: store i16 [[T2]], ptr [[T0]], align 1, !tbaa [[TBAA2]], !alias.scope !9, !noalias !6
+; CHECK-NEXT: [[T2:%.*]] = load i16, ptr @b, align 1, !tbaa [[TBAA2]], !alias.scope [[META6:![0-9]+]]
+; CHECK-NEXT: store i16 [[T2]], ptr [[T0]], align 1, !tbaa [[TBAA2]], !alias.scope [[META9:![0-9]+]], !noalias [[META6]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[T1]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INC]], 3
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT1:%.*]]
diff --git a/llvm/test/Transforms/LoopVersioning/bound-check-partially-known.ll b/llvm/test/Transforms/LoopVersioning/bound-check-partially-known.ll
index 70c12a2d62ec..2fb58f5980ec 100644
--- a/llvm/test/Transforms/LoopVersioning/bound-check-partially-known.ll
+++ b/llvm/test/Transforms/LoopVersioning/bound-check-partially-known.ll
@@ -18,14 +18,14 @@ define void @bound_check_partially_known_1(i32 %N) {
; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr @global, i64 [[TMP2]]
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr @global, [[SCEVGEP1]]
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]]
-; CHECK-NEXT: [[BOUND13:%.*]] = icmp ult ptr getelementptr inbounds ([[STRUCT_FOO:%.*]], ptr @global, i64 0, i32 1, i64 0), [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND13:%.*]] = icmp ult ptr getelementptr inbounds (i8, ptr @global, i64 256000), [[SCEVGEP1]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND13]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[LOOP_PH_LVER_ORIG:%.*]], label [[LOOP_PH:%.*]]
; CHECK: loop.ph.lver.orig:
; CHECK-NEXT: br label [[LOOP_LVER_ORIG:%.*]]
; CHECK: loop.lver.orig:
; CHECK-NEXT: [[IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[LOOP_PH_LVER_ORIG]] ], [ [[IV_NEXT_LVER_ORIG:%.*]], [[LOOP_LVER_ORIG]] ]
-; CHECK-NEXT: [[GEP_0_IV_LVER_ORIG:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @global, i64 0, i32 0, i64 [[IV_LVER_ORIG]]
+; CHECK-NEXT: [[GEP_0_IV_LVER_ORIG:%.*]] = getelementptr inbounds [[STRUCT_FOO:%.*]], ptr @global, i64 0, i32 0, i64 [[IV_LVER_ORIG]]
; CHECK-NEXT: [[L_0_LVER_ORIG:%.*]] = load double, ptr [[GEP_0_IV_LVER_ORIG]], align 8
; CHECK-NEXT: [[GEP_1_IV_LVER_ORIG:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @global, i64 0, i32 1, i64 [[IV_LVER_ORIG]]
; CHECK-NEXT: [[L_1_LVER_ORIG:%.*]] = load double, ptr [[GEP_1_IV_LVER_ORIG]], align 8
@@ -41,13 +41,13 @@ define void @bound_check_partially_known_1(i32 %N) {
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[LOOP_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[GEP_0_IV:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @global, i64 0, i32 0, i64 [[IV]]
-; CHECK-NEXT: [[L_0:%.*]] = load double, ptr [[GEP_0_IV]], align 8, !alias.scope !0
+; CHECK-NEXT: [[L_0:%.*]] = load double, ptr [[GEP_0_IV]], align 8, !alias.scope [[META0:![0-9]+]]
; CHECK-NEXT: [[GEP_1_IV:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @global, i64 0, i32 1, i64 [[IV]]
-; CHECK-NEXT: [[L_1:%.*]] = load double, ptr [[GEP_1_IV]], align 8, !alias.scope !3
+; CHECK-NEXT: [[L_1:%.*]] = load double, ptr [[GEP_1_IV]], align 8, !alias.scope [[META3:![0-9]+]]
; CHECK-NEXT: [[ADD:%.*]] = fadd double [[L_0]], [[L_1]]
; CHECK-NEXT: [[IV_N:%.*]] = add nuw nsw i64 [[IV]], [[N_EXT]]
; CHECK-NEXT: [[GEP_0_IV_N:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @global, i64 0, i32 0, i64 [[IV_N]]
-; CHECK-NEXT: store double [[ADD]], ptr [[GEP_0_IV_N]], align 8, !alias.scope !5, !noalias !7
+; CHECK-NEXT: store double [[ADD]], ptr [[GEP_0_IV_N]], align 8, !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_EXT]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT_LOOPEXIT4:%.*]], label [[LOOP]]
diff --git a/llvm/test/Transforms/LowerSwitch/93152.ll b/llvm/test/Transforms/LowerSwitch/93152.ll
new file mode 100644
index 000000000000..f796b8c7c782
--- /dev/null
+++ b/llvm/test/Transforms/LowerSwitch/93152.ll
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=lower-switch -S | FileCheck %s
+define void @i3_range_4(i3 %0) {
+; CHECK-LABEL: define void @i3_range_4(
+; CHECK-SAME: i3 [[TMP0:%.*]]) {
+; CHECK-NEXT: [[BB_0:.*:]]
+; CHECK-NEXT: br label %[[LEAFBLOCK:.*]]
+; CHECK: [[LEAFBLOCK]]:
+; CHECK-NEXT: [[DOTOFF:%.*]] = add i3 [[TMP0]], 2
+; CHECK-NEXT: [[SWITCHLEAF:%.*]] = icmp ule i3 [[DOTOFF]], -4
+; CHECK-NEXT: br i1 [[SWITCHLEAF]], label %[[BB_1:.*]], label %[[BB_2:.*]]
+; CHECK: [[BB_1]]:
+; CHECK-NEXT: [[TMP:%.*]] = phi i3 [ 0, %[[LEAFBLOCK]] ]
+; CHECK-NEXT: br label %[[BB_2]]
+; CHECK: [[BB_2]]:
+; CHECK-NEXT: ret void
+;
+bb.0:
+ switch i3 %0, label %bb.2 [
+ i3 -1, label %bb.1
+ i3 -2, label %bb.1
+ i3 2, label %bb.1
+ i3 1, label %bb.1
+ i3 0, label %bb.1
+ ]
+
+bb.1: ; preds = %bb.0, %bb.0, %bb.0, %bb.0, %bb.0
+ %tmp = phi i3 [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ]
+ br label %bb.2
+
+bb.2: ; preds = %bb.1, %bb.0
+ ret void
+}
+
+define void @i3_range_6(i3 %0) {
+; CHECK-LABEL: define void @i3_range_6(
+; CHECK-SAME: i3 [[TMP0:%.*]]) {
+; CHECK-NEXT: [[BB_0:.*:]]
+; CHECK-NEXT: br label %[[LEAFBLOCK:.*]]
+; CHECK: [[LEAFBLOCK]]:
+; CHECK-NEXT: [[SWITCHLEAF:%.*]] = icmp sge i3 [[TMP0]], -3
+; CHECK-NEXT: br i1 [[SWITCHLEAF]], label %[[BB_1:.*]], label %[[BB_2:.*]]
+; CHECK: [[BB_1]]:
+; CHECK-NEXT: [[TMP:%.*]] = phi i3 [ 0, %[[LEAFBLOCK]] ]
+; CHECK-NEXT: br label %[[BB_2]]
+; CHECK: [[BB_2]]:
+; CHECK-NEXT: ret void
+;
+bb.0:
+ switch i3 %0, label %bb.2 [
+ i3 -1, label %bb.1
+ i3 -2, label %bb.1
+ i3 -3, label %bb.1
+ i3 3, label %bb.1
+ i3 2, label %bb.1
+ i3 1, label %bb.1
+ i3 0, label %bb.1
+ ]
+
+bb.1: ; preds = %bb.0, %bb.0, %bb.0, %bb.0, %bb.0
+ %tmp = phi i3 [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ]
+ br label %bb.2
+
+bb.2: ; preds = %bb.1, %bb.0
+ ret void
+}
+
+
+define void @i3_range_7(i3 %0) {
+; CHECK-LABEL: define void @i3_range_7(
+; CHECK-SAME: i3 [[TMP0:%.*]]) {
+; CHECK-NEXT: [[BB_0:.*:]]
+; CHECK-NEXT: br label %[[BB_1:.*]]
+; CHECK: [[BB_1]]:
+; CHECK-NEXT: br label %[[BB_2:.*]]
+; CHECK: [[BB_2]]:
+; CHECK-NEXT: ret void
+;
+bb.0:
+ switch i3 %0, label %bb.2 [
+ i3 -1, label %bb.1
+ i3 -2, label %bb.1
+ i3 -3, label %bb.1
+ i3 -4, label %bb.1
+ i3 3, label %bb.1
+ i3 2, label %bb.1
+ i3 1, label %bb.1
+ i3 0, label %bb.1
+ ]
+
+bb.1: ; preds = %bb.0, %bb.0, %bb.0, %bb.0, %bb.0
+ %tmp = phi i3 [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ], [ 0, %bb.0 ]
+ br label %bb.2
+
+bb.2: ; preds = %bb.1, %bb.0
+ ret void
+}
diff --git a/llvm/test/Transforms/LowerTypeTests/cfi-unwind-direct-call.ll b/llvm/test/Transforms/LowerTypeTests/cfi-unwind-direct-call.ll
index 3e1f8b97e98b..4d5055cc5a76 100644
--- a/llvm/test/Transforms/LowerTypeTests/cfi-unwind-direct-call.ll
+++ b/llvm/test/Transforms/LowerTypeTests/cfi-unwind-direct-call.ll
@@ -65,7 +65,7 @@ lpad: ; preds = %cfi.cont
%1 = landingpad { ptr, i32 }
catch ptr @_ZTIi
%2 = extractvalue { ptr, i32 } %1, 1
- %3 = tail call i32 @llvm.eh.typeid.for(ptr nonnull @_ZTIi) #5
+ %3 = tail call i32 @llvm.eh.typeid.for.p0(ptr nonnull @_ZTIi) #5
%matches = icmp eq i32 %2, %3
br i1 %matches, label %catch, label %eh.resume
@@ -90,7 +90,7 @@ declare void @__cfi_slowpath(i64, ptr) local_unnamed_addr
declare i32 @__gxx_personality_v0(...)
; Function Attrs: nofree nosync nounwind memory(none)
-declare i32 @llvm.eh.typeid.for(ptr) #2
+declare i32 @llvm.eh.typeid.for.p0(ptr) #2
declare ptr @__cxa_begin_catch(ptr) local_unnamed_addr
@@ -181,7 +181,7 @@ attributes #8 = { noreturn nounwind }
; CHECK-NEXT: [[TMP0:%.*]] = landingpad { ptr, i32 }
; CHECK-NEXT: catch ptr @_ZTIi
; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { ptr, i32 } [[TMP0]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.eh.typeid.for(ptr nonnull @_ZTIi) #[[ATTR6]]
+; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @llvm.eh.typeid.for.p0(ptr nonnull @_ZTIi) #[[ATTR6]]
; CHECK-NEXT: [[MATCHES:%.*]] = icmp eq i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: br i1 [[MATCHES]], label [[CATCH:%.*]], label [[EH_RESUME:%.*]]
; CHECK: catch:
diff --git a/llvm/test/Transforms/MemProfContextDisambiguation/tailcall-nonunique.ll b/llvm/test/Transforms/MemProfContextDisambiguation/tailcall-nonunique.ll
index b49c9a139cfc..985c381ad42f 100644
--- a/llvm/test/Transforms/MemProfContextDisambiguation/tailcall-nonunique.ll
+++ b/llvm/test/Transforms/MemProfContextDisambiguation/tailcall-nonunique.ll
@@ -9,10 +9,7 @@
; RUN: -stats -debug %s -S 2>&1 | FileCheck %s --check-prefix=STATS \
; RUN: --check-prefix=IR --check-prefix=DEBUG
-; DEBUG: Not found through unique tail call chain: _Z3barv from main that actually called _Z3foob (found multiple possible chains)
-; DEBUG: Not found through unique tail call chain: _Z3barv from main that actually called _Z3foob (found multiple possible chains)
-; DEBUG: Not found through unique tail call chain: _Z3barv from main that actually called _Z3foob (found multiple possible chains)
-; DEBUG: Not found through unique tail call chain: _Z3barv from main that actually called _Z3foob (found multiple possible chains)
+; DEBUG: Not found through unique tail call chain: _Z3barv from main that actually called xyz (found multiple possible chains)
;; Check that all calls in the IR are to the original functions, leading to a
;; non-cold operator new call.
@@ -91,39 +88,37 @@ return: ; preds = %if.else, %if.then
}
; Function Attrs: noinline
-; IR-LABEL: @main()
-define dso_local i32 @main() local_unnamed_addr #0 {
-delete.end13:
+; IR-LABEL: @xyz()
+define dso_local i32 @xyz() local_unnamed_addr #0 {
; IR: call ptr @_Z3foob(i1 true)
- %call = tail call ptr @_Z3foob(i1 true), !callsite !10
+ %call = tail call ptr @_Z3foob(i1 true)
; IR: call ptr @_Z3foob(i1 true)
- %call1 = tail call ptr @_Z3foob(i1 true), !callsite !11
+ %call1 = tail call ptr @_Z3foob(i1 true)
; IR: call ptr @_Z3foob(i1 false)
- %call2 = tail call ptr @_Z3foob(i1 false), !callsite !12
+ %call2 = tail call ptr @_Z3foob(i1 false)
; IR: call ptr @_Z3foob(i1 false)
- %call3 = tail call ptr @_Z3foob(i1 false), !callsite !13
+ %call3 = tail call ptr @_Z3foob(i1 false)
+ ret i32 0
+}
+
+define dso_local i32 @main() local_unnamed_addr #0 {
+ ; IR: call i32 @xyz()
+ %call1 = tail call i32 @xyz(), !callsite !11
ret i32 0
}
; IR: attributes #[[NOTCOLD]] = { builtin allocsize(0) "memprof"="notcold" }
-; STATS: 4 memprof-context-disambiguation - Number of profiled callees found via multiple tail call chains
+; STATS: 1 memprof-context-disambiguation - Number of profiled callees found via multiple tail call chains
attributes #0 = { noinline }
attributes #1 = { nobuiltin allocsize(0) }
attributes #2 = { builtin allocsize(0) }
-!0 = !{!1, !3, !5, !7}
-!1 = !{!2, !"notcold"}
-!2 = !{i64 3186456655321080972, i64 6307901912192269588}
-!3 = !{!4, !"cold"}
-!4 = !{i64 3186456655321080972, i64 6792096022461663180}
+!0 = !{!5, !7}
!5 = !{!6, !"notcold"}
!6 = !{i64 3186456655321080972, i64 8632435727821051414}
!7 = !{!8, !"cold"}
!8 = !{i64 3186456655321080972, i64 -3421689549917153178}
!9 = !{i64 3186456655321080972}
-!10 = !{i64 8632435727821051414}
!11 = !{i64 -3421689549917153178}
-!12 = !{i64 6307901912192269588}
-!13 = !{i64 6792096022461663180}
diff --git a/llvm/test/Transforms/NaryReassociate/preserving-debugloc-add-mul.ll b/llvm/test/Transforms/NaryReassociate/preserving-debugloc-add-mul.ll
new file mode 100644
index 000000000000..cc66d0cd3710
--- /dev/null
+++ b/llvm/test/Transforms/NaryReassociate/preserving-debugloc-add-mul.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=nary-reassociate -S | FileCheck %s
+
+; Test that NaryReassociate's tryReassociatedBinaryOp() propagates the
+; debug location to new `add` and `mul` from the original binary operator
+; they replaced (`%3` in both `@add_reassociate` and `@mul_reassociate`).
+
+define void @add_reassociate(i32 %a, i32 %b, i32 %c) !dbg !5 {
+; CHECK-LABEL: define void @add_reassociate(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) !dbg [[DBG5:![0-9]+]] {
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A]], [[C]]
+; CHECK-NEXT: call void @foo(i32 [[TMP1]])
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[B]], !dbg [[DBG8:![0-9]+]]
+; CHECK-NEXT: call void @foo(i32 [[TMP2]])
+; CHECK-NEXT: ret void
+;
+ %1 = add i32 %a, %c
+ call void @foo(i32 %1)
+ %2 = add i32 %b, %c
+ %3 = add i32 %a, %2, !dbg !11
+ call void @foo(i32 %3)
+ ret void
+}
+
+define void @mul_reassociate(i32 %a, i32 %b, i32 %c) !dbg !14 {
+; CHECK-LABEL: define void @mul_reassociate(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) !dbg [[DBG9:![0-9]+]] {
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[A]], [[C]]
+; CHECK-NEXT: call void @foo(i32 [[TMP1]])
+; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP1]], [[B]], !dbg [[DBG10:![0-9]+]]
+; CHECK-NEXT: call void @foo(i32 [[TMP2]])
+; CHECK-NEXT: ret void
+;
+ %1 = mul i32 %a, %c
+ call void @foo(i32 %1)
+ %2 = mul i32 %a, %b
+ %3 = mul i32 %2, %c, !dbg !18
+ call void @foo(i32 %3)
+ ret void
+}
+
+declare void @foo(i32)
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!2, !3}
+!llvm.module.flags = !{!4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = !DIFile(filename: "test.ll", directory: "/")
+!2 = !{i32 12}
+!3 = !{i32 0}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "add_reassociate", linkageName: "add_reassociate", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!6 = !DISubroutineType(types: !7)
+!7 = !{}
+!11 = !DILocation(line: 4, column: 1, scope: !5)
+!14 = distinct !DISubprogram(name: "mul_reassociate", linkageName: "mul_reassociate", scope: null, file: !1, line: 7, type: !6, scopeLine: 7, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!18 = !DILocation(line: 10, column: 1, scope: !14)
+
+;.
+; CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C, file: [[META1:![0-9]+]], producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+; CHECK: [[META1]] = !DIFile(filename: "test.ll", directory: {{.*}})
+; CHECK: [[DBG5]] = distinct !DISubprogram(name: "add_reassociate", linkageName: "add_reassociate", scope: null, file: [[META1]], line: 1, type: [[META6:![0-9]+]], scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]])
+; CHECK: [[META6]] = !DISubroutineType(types: [[META7:![0-9]+]])
+; CHECK: [[META7]] = !{}
+; CHECK: [[DBG8]] = !DILocation(line: 4, column: 1, scope: [[DBG5]])
+; CHECK: [[DBG9]] = distinct !DISubprogram(name: "mul_reassociate", linkageName: "mul_reassociate", scope: null, file: [[META1]], line: 7, type: [[META6]], scopeLine: 7, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]])
+; CHECK: [[DBG10]] = !DILocation(line: 10, column: 1, scope: [[DBG9]])
+;.
diff --git a/llvm/test/Transforms/NewGVN/2011-09-07-TypeIdFor.ll b/llvm/test/Transforms/NewGVN/2011-09-07-TypeIdFor.ll
index 675e7da26a10..afd7610b7162 100644
--- a/llvm/test/Transforms/NewGVN/2011-09-07-TypeIdFor.ll
+++ b/llvm/test/Transforms/NewGVN/2011-09-07-TypeIdFor.ll
@@ -10,7 +10,7 @@ declare void @_Z4barv()
declare void @_Z7cleanupv()
-declare i32 @llvm.eh.typeid.for(ptr) nounwind readonly
+declare i32 @llvm.eh.typeid.for.p0(ptr) nounwind readonly
declare ptr @__cxa_begin_catch(ptr) nounwind
@@ -32,11 +32,11 @@ define void @_Z3foov() uwtable personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: catch ptr @_ZTIb
; CHECK-NEXT: [[EXC_PTR2_I:%.*]] = extractvalue { ptr, i32 } [[TMP0]], 0
; CHECK-NEXT: [[FILTER3_I:%.*]] = extractvalue { ptr, i32 } [[TMP0]], 1
-; CHECK-NEXT: [[TYPEID_I:%.*]] = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
+; CHECK-NEXT: [[TYPEID_I:%.*]] = tail call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[FILTER3_I]], [[TYPEID_I]]
; CHECK-NEXT: br i1 [[TMP1]], label [[PPAD:%.*]], label [[NEXT:%.*]]
; CHECK: next:
-; CHECK-NEXT: [[TYPEID1_I:%.*]] = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIb)
+; CHECK-NEXT: [[TYPEID1_I:%.*]] = tail call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIb)
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[FILTER3_I]], [[TYPEID1_I]]
; CHECK-NEXT: br i1 [[TMP2]], label [[PPAD2:%.*]], label [[NEXT2:%.*]]
; CHECK: ppad:
@@ -77,12 +77,12 @@ lpad: ; preds = %entry
catch ptr @_ZTIb
%exc_ptr2.i = extractvalue { ptr, i32 } %0, 0
%filter3.i = extractvalue { ptr, i32 } %0, 1
- %typeid.i = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
+ %typeid.i = tail call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
%1 = icmp eq i32 %filter3.i, %typeid.i
br i1 %1, label %ppad, label %next
next: ; preds = %lpad
- %typeid1.i = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIb)
+ %typeid1.i = tail call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIb)
%2 = icmp eq i32 %filter3.i, %typeid1.i
br i1 %2, label %ppad2, label %next2
@@ -98,12 +98,12 @@ ppad2: ; preds = %next
next2: ; preds = %next
call void @_Z7cleanupv()
- %typeid = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
+ %typeid = tail call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
%4 = icmp eq i32 %filter3.i, %typeid
br i1 %4, label %ppad3, label %next3
next3: ; preds = %next2
- %typeid1 = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIb)
+ %typeid1 = tail call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIb)
%5 = icmp eq i32 %filter3.i, %typeid1
br i1 %5, label %ppad4, label %unwind
diff --git a/llvm/test/Transforms/NewGVN/loadforward.ll b/llvm/test/Transforms/NewGVN/loadforward.ll
index 85ceafd433f4..a44a6e92b8ad 100644
--- a/llvm/test/Transforms/NewGVN/loadforward.ll
+++ b/llvm/test/Transforms/NewGVN/loadforward.ll
@@ -9,7 +9,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
;; Test that we forward the first store to the second load
define i16 @bazinga() {
; CHECK-LABEL: @bazinga(
-; CHECK-NEXT: [[_TMP10:%.*]] = load i16, ptr getelementptr inbounds ([[REC11:%.*]], ptr @str, i64 0, i32 1), align 2
+; CHECK-NEXT: [[_TMP10:%.*]] = load i16, ptr getelementptr inbounds (i8, ptr @str, i64 2), align 2
; CHECK-NEXT: store i16 [[_TMP10]], ptr @str, align 2
; CHECK-NEXT: [[_TMP15:%.*]] = icmp eq i16 [[_TMP10]], 3
; CHECK-NEXT: [[_TMP16:%.*]] = select i1 [[_TMP15]], i16 1, i16 0
diff --git a/llvm/test/Transforms/OpenMP/custom_state_machines.ll b/llvm/test/Transforms/OpenMP/custom_state_machines.ll
index 34a68a3020e5..e6ddf16f0676 100644
--- a/llvm/test/Transforms/OpenMP/custom_state_machines.ll
+++ b/llvm/test/Transforms/OpenMP/custom_state_machines.ll
@@ -8,7 +8,7 @@
;; void p1(void);
;; int unknown(void);
;; void unknown_pure(void) __attribute__((pure));
-;; void unknown_no_openmp(void) __attribute__((assume("omp_no_openmp")));
+;; [[omp::assume("omp_no_openmp")]] void unknown_no_openmp(void);
;;
;; int G;
;; void no_parallel_region_in_here(void) {
diff --git a/llvm/test/Transforms/OpenMP/custom_state_machines_pre_lto.ll b/llvm/test/Transforms/OpenMP/custom_state_machines_pre_lto.ll
index 85d495f45039..d20821d45036 100644
--- a/llvm/test/Transforms/OpenMP/custom_state_machines_pre_lto.ll
+++ b/llvm/test/Transforms/OpenMP/custom_state_machines_pre_lto.ll
@@ -10,7 +10,7 @@
;; void p1(void);
;; int unknown(void);
;; void unknown_pure(void) __attribute__((pure));
-;; void unknown_no_openmp(void) __attribute__((assume("omp_no_openmp")));
+;; [[omp::assume("omp_no_openmp")]] void unknown_no_openmp(void);
;;
;; int G;
;; void no_parallel_region_in_here(void) {
diff --git a/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll b/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll
index f8c4e6b113c9..f7bfd3065069 100644
--- a/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll
+++ b/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll
@@ -1,10 +1,10 @@
; RUN: opt -passes=openmp-opt -pass-remarks=openmp-opt -pass-remarks-missed=openmp-opt -pass-remarks-analysis=openmp-opt -disable-output < %s 2>&1 | FileCheck %s
target triple = "nvptx64"
-; CHECK: remark: llvm/test/Transforms/OpenMP/custom_state_machines_remarks.c:11:1: Generic-mode kernel is executed with a customized state machine that requires a fallback.
-; CHECK: remark: llvm/test/Transforms/OpenMP/custom_state_machines_remarks.c:13:5: Call may contain unknown parallel regions. Use `__attribute__((assume("omp_no_parallelism")))` to override.
-; CHECK: remark: llvm/test/Transforms/OpenMP/custom_state_machines_remarks.c:15:5: Call may contain unknown parallel regions. Use `__attribute__((assume("omp_no_parallelism")))` to override.
-; CHECK: remark: llvm/test/Transforms/OpenMP/custom_state_machines_remarks.c:20:1: Rewriting generic-mode kernel with a customized state machine.
+; CHECK{LITERAL}: remark: llvm/test/Transforms/OpenMP/custom_state_machines_remarks.c:11:1: Generic-mode kernel is executed with a customized state machine that requires a fallback.
+; CHECK{LITERAL}: remark: llvm/test/Transforms/OpenMP/custom_state_machines_remarks.c:13:5: Call may contain unknown parallel regions. Use `[[omp::assume("omp_no_parallelism")]]` to override.
+; CHECK{LITERAL}: remark: llvm/test/Transforms/OpenMP/custom_state_machines_remarks.c:15:5: Call may contain unknown parallel regions. Use `[[omp::assume("omp_no_parallelism")]]` to override.
+; CHECK{LITERAL}: remark: llvm/test/Transforms/OpenMP/custom_state_machines_remarks.c:20:1: Rewriting generic-mode kernel with a customized state machine.
;; void unknown(void);
@@ -24,7 +24,7 @@ target triple = "nvptx64"
;; }
;; }
;;
-;; void no_openmp(void) __attribute__((assume("omp_no_openmp")));
+;; [[omp::assume("omp_no_openmp")]] void no_openmp(void);
;; void test_no_fallback(void) {
;; #pragma omp target teams
;; {
diff --git a/llvm/test/Transforms/OpenMP/spmdization.ll b/llvm/test/Transforms/OpenMP/spmdization.ll
index 159280ae62a0..393968913855 100644
--- a/llvm/test/Transforms/OpenMP/spmdization.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization.ll
@@ -7,7 +7,7 @@
; RUN: opt --mtriple=nvptx64-- -S -passes=openmp-opt-postlink < %s | FileCheck %s --check-prefix=NVPTX-DISABLED2
;; void unknown(void);
-;; void spmd_amenable(void) __attribute__((assume("ompx_spmd_amenable")));
+;; [[omp::assume("ompx_spmd_amenable")]] void spmd_amenable(void);
;;
;; void sequential_loop() {
;; #pragma omp target teams
@@ -22,7 +22,7 @@
;; }
;; }
;;
-;; void use(__attribute__((noescape)) int *) __attribute__((assume("ompx_spmd_amenable")));
+;; [[omp::assume("ompx_spmd_amenable")]] void use(__attribute__((noescape)) int *);
;;
;; void sequential_loop_to_stack_var() {
;; #pragma omp target teams
diff --git a/llvm/test/Transforms/OpenMP/spmdization_guarding.ll b/llvm/test/Transforms/OpenMP/spmdization_guarding.ll
index b2e14dce94d5..bd128b7f74d7 100644
--- a/llvm/test/Transforms/OpenMP/spmdization_guarding.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization_guarding.ll
@@ -2,8 +2,8 @@
; RUN: opt -S -passes=openmp-opt < %s | FileCheck %s
; RUN: opt -S -passes=openmp-opt -openmp-opt-disable-spmdization < %s | FileCheck %s --check-prefix=CHECK-DISABLED
;
-; void pure(void) __attribute__((pure, assume("ompx_spmd_amenable")));
-; int no_openmp(int *) __attribute__((assume("omp_no_openmp","ompx_spmd_amenable")));
+; [[omp::assume("ompx_spmd_amenable")]] void pure(void) __attribute__((pure));
+; [[omp::assume("omp_no_openmp","ompx_spmd_amenable")]] int no_openmp(int *);
;
; void sequential_loop(int *x, int N) {
; #pragma omp target teams
diff --git a/llvm/test/Transforms/OpenMP/spmdization_remarks.ll b/llvm/test/Transforms/OpenMP/spmdization_remarks.ll
index 28df2f524913..f5a4cea9a841 100644
--- a/llvm/test/Transforms/OpenMP/spmdization_remarks.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization_remarks.ll
@@ -1,12 +1,12 @@
; RUN: opt -passes=openmp-opt -pass-remarks=openmp-opt -pass-remarks-missed=openmp-opt -pass-remarks-analysis=openmp-opt -disable-output < %s 2>&1 | FileCheck %s
target triple = "nvptx64"
-; CHECK: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:13:5: Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override.
-; CHECK: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:15:5: Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function to override.
-; CHECK: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:11:1: Generic-mode kernel is executed with a customized state machine that requires a fallback.
-; CHECK: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:13:5: Call may contain unknown parallel regions. Use `__attribute__((assume("omp_no_parallelism")))` to override.
-; CHECK: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:15:5: Call may contain unknown parallel regions. Use `__attribute__((assume("omp_no_parallelism")))` to override.
-; CHECK: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:20:1: Transformed generic-mode kernel to SPMD-mode.
+; CHECK{LITERAL}: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:13:5: Value has potential side effects preventing SPMD-mode execution. Add `[[omp::assume("ompx_spmd_amenable")]]` to the called function to override.
+; CHECK{LITERAL}: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:15:5: Value has potential side effects preventing SPMD-mode execution. Add `[[omp::assume("ompx_spmd_amenable")]]` to the called function to override.
+; CHECK{LITERAL}: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:11:1: Generic-mode kernel is executed with a customized state machine that requires a fallback.
+; CHECK{LITERAL}: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:13:5: Call may contain unknown parallel regions. Use `[[omp::assume("omp_no_parallelism")]]` to override.
+; CHECK{LITERAL}: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:15:5: Call may contain unknown parallel regions. Use `[[omp::assume("omp_no_parallelism")]]` to override.
+; CHECK{LITERAL}: remark: llvm/test/Transforms/OpenMP/spmdization_remarks.c:20:1: Transformed generic-mode kernel to SPMD-mode.
;; void unknown(void);
@@ -26,7 +26,7 @@ target triple = "nvptx64"
;; }
;; }
;;
-;; void no_openmp(void) __attribute__((assume("omp_no_openmp")));
+;; void no_openmp(void) [[omp::assume("omp_no_openmp")]];
;; void test_no_fallback(void) {
;; #pragma omp target teams
;; {
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
index d048b0bab417..5cbf50e06fbe 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
@@ -85,93 +85,316 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP210_NOT:%.*]] = icmp eq i32 [[I:%.*]], 0
; CHECK-NEXT: [[CONV6:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: br i1 [[CMP210_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_COND1_PREHEADER_US:%.*]]
-; CHECK: for.cond1.preheader.us:
-; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[I]], 225
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP0]])
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds <225 x double>, ptr [[B:%.*]], i64 0, i64 [[CONV6]]
+; CHECK-NEXT: br i1 [[CMP210_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]]
+; CHECK: for.cond1.preheader.us.preheader:
+; CHECK-NEXT: [[TMP0:%.*]] = shl nuw nsw i64 [[CONV6]], 3
+; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 360
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[SCEVGEP20:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[I]], 225
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP2]])
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[CONV6]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[I]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[FOR_BODY4_US_PREHEADER:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; CHECK: vector.memcheck:
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY4_US_PREHEADER]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[CONV6]], 252
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = or disjoint i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP6:%.*]] = or disjoint i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x i64> poison, i64 [[INDEX]], i64 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> [[TMP7]], i64 [[TMP4]], i64 1
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i64> poison, i64 [[TMP5]], i64 0
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i64> [[TMP9]], i64 [[TMP6]], i64 1
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <2 x i64> [[TMP8]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <2 x i64> [[TMP10]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP11]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP11]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP14]])
+; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP12]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP15]])
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP12]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP16]])
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TMP17]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP17]], align 8, !alias.scope [[META0:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD21:%.*]] = load <2 x double>, ptr [[TMP18]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP19:%.*]] = load double, ptr [[TMP3]], align 8, !alias.scope [[META3:![0-9]+]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT22:%.*]] = insertelement <2 x double> poison, double [[TMP19]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT23:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = fmul <2 x double> [[WIDE_LOAD]], [[BROADCAST_SPLAT23]]
+; CHECK-NEXT: [[TMP21:%.*]] = fmul <2 x double> [[WIDE_LOAD21]], [[BROADCAST_SPLAT23]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[TMP22]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD24:%.*]] = load <2 x double>, ptr [[TMP22]], align 8, !alias.scope [[META5:![0-9]+]], !noalias [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD25:%.*]] = load <2 x double>, ptr [[TMP23]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[TMP24:%.*]] = fsub <2 x double> [[WIDE_LOAD24]], [[TMP20]]
+; CHECK-NEXT: [[TMP25:%.*]] = fsub <2 x double> [[WIDE_LOAD25]], [[TMP21]]
+; CHECK-NEXT: store <2 x double> [[TMP24]], ptr [[TMP22]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[TMP23]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[CONV6]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US:%.*]], label [[FOR_BODY4_US_PREHEADER]]
+; CHECK: for.body4.us.preheader:
+; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_BODY4_US:%.*]]
; CHECK: for.body4.us:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY4_US]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i64 [[INDVARS_IV]], 225
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP2]])
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds <225 x double>, ptr [[A:%.*]], i64 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[MATRIXEXT_US:%.*]] = load double, ptr [[TMP3]], align 8
-; CHECK-NEXT: [[MATRIXEXT8_US:%.*]] = load double, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY4_US]] ], [ [[INDVARS_IV_PH]], [[FOR_BODY4_US_PREHEADER]] ]
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ult i64 [[INDVARS_IV]], 225
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP27]])
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[MATRIXEXT_US:%.*]] = load double, ptr [[TMP28]], align 8
+; CHECK-NEXT: [[MATRIXEXT8_US:%.*]] = load double, ptr [[TMP3]], align 8
; CHECK-NEXT: [[MUL_US:%.*]] = fmul double [[MATRIXEXT_US]], [[MATRIXEXT8_US]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[MATRIXEXT11_US:%.*]] = load double, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[MATRIXEXT11_US:%.*]] = load double, ptr [[TMP29]], align 8
; CHECK-NEXT: [[SUB_US:%.*]] = fsub double [[MATRIXEXT11_US]], [[MUL_US]]
-; CHECK-NEXT: store double [[SUB_US]], ptr [[TMP4]], align 8
+; CHECK-NEXT: store double [[SUB_US]], ptr [[TMP29]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[CONV6]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US:%.*]], label [[FOR_BODY4_US]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]], label [[FOR_BODY4_US]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us:
-; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i64 [[CONV6]], 15
-; CHECK-NEXT: [[TMP6:%.*]] = icmp ult i32 [[I]], 210
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP6]])
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP5]]
+; CHECK-NEXT: [[TMP30:%.*]] = add nuw nsw i64 [[CONV6]], 15
+; CHECK-NEXT: [[TMP31:%.*]] = icmp ult i32 [[I]], 210
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP31]])
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP30]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK_1:%.*]] = icmp ult i32 [[I]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_1]], label [[FOR_BODY4_US_PREHEADER_1:%.*]], label [[VECTOR_MEMCHECK_1:%.*]]
+; CHECK: vector.memcheck.1:
+; CHECK-NEXT: [[BOUND0_1:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]]
+; CHECK-NEXT: [[BOUND1_1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[FOUND_CONFLICT_1:%.*]] = and i1 [[BOUND0_1]], [[BOUND1_1]]
+; CHECK-NEXT: br i1 [[FOUND_CONFLICT_1]], label [[FOR_BODY4_US_PREHEADER_1]], label [[VECTOR_PH_1:%.*]]
+; CHECK: vector.ph.1:
+; CHECK-NEXT: [[N_VEC_1:%.*]] = and i64 [[CONV6]], 252
+; CHECK-NEXT: br label [[VECTOR_BODY_1:%.*]]
+; CHECK: vector.body.1:
+; CHECK-NEXT: [[INDEX_1:%.*]] = phi i64 [ 0, [[VECTOR_PH_1]] ], [ [[INDEX_NEXT_1:%.*]], [[VECTOR_BODY_1]] ]
+; CHECK-NEXT: [[TMP33:%.*]] = add nuw nsw i64 [[INDEX_1]], 15
+; CHECK-NEXT: [[TMP34:%.*]] = add i64 [[INDEX_1]], 16
+; CHECK-NEXT: [[TMP35:%.*]] = insertelement <2 x i64> poison, i64 [[TMP33]], i64 0
+; CHECK-NEXT: [[TMP36:%.*]] = insertelement <2 x i64> [[TMP35]], i64 [[TMP34]], i64 1
+; CHECK-NEXT: [[TMP37:%.*]] = add i64 [[INDEX_1]], 17
+; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[INDEX_1]], 18
+; CHECK-NEXT: [[TMP39:%.*]] = insertelement <2 x i64> poison, i64 [[TMP37]], i64 0
+; CHECK-NEXT: [[TMP40:%.*]] = insertelement <2 x i64> [[TMP39]], i64 [[TMP38]], i64 1
+; CHECK-NEXT: [[TMP41:%.*]] = icmp ult <2 x i64> [[TMP36]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP42:%.*]] = icmp ult <2 x i64> [[TMP40]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP41]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP43]])
+; CHECK-NEXT: [[TMP44:%.*]] = extractelement <2 x i1> [[TMP41]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP44]])
+; CHECK-NEXT: [[TMP45:%.*]] = extractelement <2 x i1> [[TMP42]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP45]])
+; CHECK-NEXT: [[TMP46:%.*]] = extractelement <2 x i1> [[TMP42]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP46]])
+; CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP33]]
+; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <2 x double>, ptr [[TMP47]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD21_1:%.*]] = load <2 x double>, ptr [[TMP48]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP49:%.*]] = load double, ptr [[TMP32]], align 8, !alias.scope [[META3]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT22_1:%.*]] = insertelement <2 x double> poison, double [[TMP49]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT23_1:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_1]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP50:%.*]] = fmul <2 x double> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT23_1]]
+; CHECK-NEXT: [[TMP51:%.*]] = fmul <2 x double> [[WIDE_LOAD21_1]], [[BROADCAST_SPLAT23_1]]
+; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP33]]
+; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP52]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD24_1:%.*]] = load <2 x double>, ptr [[TMP52]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD25_1:%.*]] = load <2 x double>, ptr [[TMP53]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[TMP54:%.*]] = fsub <2 x double> [[WIDE_LOAD24_1]], [[TMP50]]
+; CHECK-NEXT: [[TMP55:%.*]] = fsub <2 x double> [[WIDE_LOAD25_1]], [[TMP51]]
+; CHECK-NEXT: store <2 x double> [[TMP54]], ptr [[TMP52]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: store <2 x double> [[TMP55]], ptr [[TMP53]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[INDEX_NEXT_1]] = add nuw i64 [[INDEX_1]], 4
+; CHECK-NEXT: [[TMP56:%.*]] = icmp eq i64 [[INDEX_NEXT_1]], [[N_VEC_1]]
+; CHECK-NEXT: br i1 [[TMP56]], label [[MIDDLE_BLOCK_1:%.*]], label [[VECTOR_BODY_1]], !llvm.loop [[LOOP7]]
+; CHECK: middle.block.1:
+; CHECK-NEXT: [[CMP_N_1:%.*]] = icmp eq i64 [[N_VEC_1]], [[CONV6]]
+; CHECK-NEXT: br i1 [[CMP_N_1]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1:%.*]], label [[FOR_BODY4_US_PREHEADER_1]]
+; CHECK: for.body4.us.preheader.1:
+; CHECK-NEXT: [[INDVARS_IV_PH_1:%.*]] = phi i64 [ 0, [[VECTOR_MEMCHECK_1]] ], [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]] ], [ [[N_VEC_1]], [[MIDDLE_BLOCK_1]] ]
; CHECK-NEXT: br label [[FOR_BODY4_US_1:%.*]]
; CHECK: for.body4.us.1:
-; CHECK-NEXT: [[INDVARS_IV_1:%.*]] = phi i64 [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]] ], [ [[INDVARS_IV_NEXT_1:%.*]], [[FOR_BODY4_US_1]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = add nuw nsw i64 [[INDVARS_IV_1]], 15
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ult i64 [[INDVARS_IV_1]], 210
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP9]])
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP8]]
-; CHECK-NEXT: [[MATRIXEXT_US_1:%.*]] = load double, ptr [[TMP10]], align 8
-; CHECK-NEXT: [[MATRIXEXT8_US_1:%.*]] = load double, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[INDVARS_IV_1:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_1:%.*]], [[FOR_BODY4_US_1]] ], [ [[INDVARS_IV_PH_1]], [[FOR_BODY4_US_PREHEADER_1]] ]
+; CHECK-NEXT: [[TMP57:%.*]] = add nuw nsw i64 [[INDVARS_IV_1]], 15
+; CHECK-NEXT: [[TMP58:%.*]] = icmp ult i64 [[INDVARS_IV_1]], 210
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP58]])
+; CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP57]]
+; CHECK-NEXT: [[MATRIXEXT_US_1:%.*]] = load double, ptr [[TMP59]], align 8
+; CHECK-NEXT: [[MATRIXEXT8_US_1:%.*]] = load double, ptr [[TMP32]], align 8
; CHECK-NEXT: [[MUL_US_1:%.*]] = fmul double [[MATRIXEXT_US_1]], [[MATRIXEXT8_US_1]]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP8]]
-; CHECK-NEXT: [[MATRIXEXT11_US_1:%.*]] = load double, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP57]]
+; CHECK-NEXT: [[MATRIXEXT11_US_1:%.*]] = load double, ptr [[TMP60]], align 8
; CHECK-NEXT: [[SUB_US_1:%.*]] = fsub double [[MATRIXEXT11_US_1]], [[MUL_US_1]]
-; CHECK-NEXT: store double [[SUB_US_1]], ptr [[TMP11]], align 8
+; CHECK-NEXT: store double [[SUB_US_1]], ptr [[TMP60]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT_1]] = add nuw nsw i64 [[INDVARS_IV_1]], 1
; CHECK-NEXT: [[EXITCOND_NOT_1:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_1]], [[CONV6]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT_1]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1:%.*]], label [[FOR_BODY4_US_1]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT_1]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1]], label [[FOR_BODY4_US_1]], !llvm.loop [[LOOP10]]
; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us.1:
-; CHECK-NEXT: [[TMP12:%.*]] = add nuw nsw i64 [[CONV6]], 30
-; CHECK-NEXT: [[TMP13:%.*]] = icmp ult i32 [[I]], 195
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP13]])
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP12]]
+; CHECK-NEXT: [[TMP61:%.*]] = add nuw nsw i64 [[CONV6]], 30
+; CHECK-NEXT: [[TMP62:%.*]] = icmp ult i32 [[I]], 195
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP62]])
+; CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP61]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK_2:%.*]] = icmp ult i32 [[I]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_2]], label [[FOR_BODY4_US_PREHEADER_2:%.*]], label [[VECTOR_MEMCHECK_2:%.*]]
+; CHECK: vector.memcheck.2:
+; CHECK-NEXT: [[BOUND0_2:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]]
+; CHECK-NEXT: [[BOUND1_2:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[FOUND_CONFLICT_2:%.*]] = and i1 [[BOUND0_2]], [[BOUND1_2]]
+; CHECK-NEXT: br i1 [[FOUND_CONFLICT_2]], label [[FOR_BODY4_US_PREHEADER_2]], label [[VECTOR_PH_2:%.*]]
+; CHECK: vector.ph.2:
+; CHECK-NEXT: [[N_VEC_2:%.*]] = and i64 [[CONV6]], 252
+; CHECK-NEXT: br label [[VECTOR_BODY_2:%.*]]
+; CHECK: vector.body.2:
+; CHECK-NEXT: [[INDEX_2:%.*]] = phi i64 [ 0, [[VECTOR_PH_2]] ], [ [[INDEX_NEXT_2:%.*]], [[VECTOR_BODY_2]] ]
+; CHECK-NEXT: [[TMP64:%.*]] = add nuw nsw i64 [[INDEX_2]], 30
+; CHECK-NEXT: [[TMP65:%.*]] = add i64 [[INDEX_2]], 31
+; CHECK-NEXT: [[TMP66:%.*]] = insertelement <2 x i64> poison, i64 [[TMP64]], i64 0
+; CHECK-NEXT: [[TMP67:%.*]] = insertelement <2 x i64> [[TMP66]], i64 [[TMP65]], i64 1
+; CHECK-NEXT: [[TMP68:%.*]] = add i64 [[INDEX_2]], 32
+; CHECK-NEXT: [[TMP69:%.*]] = add i64 [[INDEX_2]], 33
+; CHECK-NEXT: [[TMP70:%.*]] = insertelement <2 x i64> poison, i64 [[TMP68]], i64 0
+; CHECK-NEXT: [[TMP71:%.*]] = insertelement <2 x i64> [[TMP70]], i64 [[TMP69]], i64 1
+; CHECK-NEXT: [[TMP72:%.*]] = icmp ult <2 x i64> [[TMP67]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP73:%.*]] = icmp ult <2 x i64> [[TMP71]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP74:%.*]] = extractelement <2 x i1> [[TMP72]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP74]])
+; CHECK-NEXT: [[TMP75:%.*]] = extractelement <2 x i1> [[TMP72]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP75]])
+; CHECK-NEXT: [[TMP76:%.*]] = extractelement <2 x i1> [[TMP73]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP76]])
+; CHECK-NEXT: [[TMP77:%.*]] = extractelement <2 x i1> [[TMP73]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP77]])
+; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP64]]
+; CHECK-NEXT: [[TMP79:%.*]] = getelementptr inbounds i8, ptr [[TMP78]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <2 x double>, ptr [[TMP78]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD21_2:%.*]] = load <2 x double>, ptr [[TMP79]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP80:%.*]] = load double, ptr [[TMP63]], align 8, !alias.scope [[META3]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT22_2:%.*]] = insertelement <2 x double> poison, double [[TMP80]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT23_2:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_2]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP81:%.*]] = fmul <2 x double> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT23_2]]
+; CHECK-NEXT: [[TMP82:%.*]] = fmul <2 x double> [[WIDE_LOAD21_2]], [[BROADCAST_SPLAT23_2]]
+; CHECK-NEXT: [[TMP83:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP64]]
+; CHECK-NEXT: [[TMP84:%.*]] = getelementptr inbounds i8, ptr [[TMP83]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD24_2:%.*]] = load <2 x double>, ptr [[TMP83]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD25_2:%.*]] = load <2 x double>, ptr [[TMP84]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[TMP85:%.*]] = fsub <2 x double> [[WIDE_LOAD24_2]], [[TMP81]]
+; CHECK-NEXT: [[TMP86:%.*]] = fsub <2 x double> [[WIDE_LOAD25_2]], [[TMP82]]
+; CHECK-NEXT: store <2 x double> [[TMP85]], ptr [[TMP83]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: store <2 x double> [[TMP86]], ptr [[TMP84]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[INDEX_NEXT_2]] = add nuw i64 [[INDEX_2]], 4
+; CHECK-NEXT: [[TMP87:%.*]] = icmp eq i64 [[INDEX_NEXT_2]], [[N_VEC_2]]
+; CHECK-NEXT: br i1 [[TMP87]], label [[MIDDLE_BLOCK_2:%.*]], label [[VECTOR_BODY_2]], !llvm.loop [[LOOP7]]
+; CHECK: middle.block.2:
+; CHECK-NEXT: [[CMP_N_2:%.*]] = icmp eq i64 [[N_VEC_2]], [[CONV6]]
+; CHECK-NEXT: br i1 [[CMP_N_2]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2:%.*]], label [[FOR_BODY4_US_PREHEADER_2]]
+; CHECK: for.body4.us.preheader.2:
+; CHECK-NEXT: [[INDVARS_IV_PH_2:%.*]] = phi i64 [ 0, [[VECTOR_MEMCHECK_2]] ], [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1]] ], [ [[N_VEC_2]], [[MIDDLE_BLOCK_2]] ]
; CHECK-NEXT: br label [[FOR_BODY4_US_2:%.*]]
; CHECK: for.body4.us.2:
-; CHECK-NEXT: [[INDVARS_IV_2:%.*]] = phi i64 [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1]] ], [ [[INDVARS_IV_NEXT_2:%.*]], [[FOR_BODY4_US_2]] ]
-; CHECK-NEXT: [[TMP15:%.*]] = add nuw nsw i64 [[INDVARS_IV_2]], 30
-; CHECK-NEXT: [[TMP16:%.*]] = icmp ult i64 [[INDVARS_IV_2]], 195
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP16]])
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP15]]
-; CHECK-NEXT: [[MATRIXEXT_US_2:%.*]] = load double, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[MATRIXEXT8_US_2:%.*]] = load double, ptr [[TMP14]], align 8
+; CHECK-NEXT: [[INDVARS_IV_2:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_2:%.*]], [[FOR_BODY4_US_2]] ], [ [[INDVARS_IV_PH_2]], [[FOR_BODY4_US_PREHEADER_2]] ]
+; CHECK-NEXT: [[TMP88:%.*]] = add nuw nsw i64 [[INDVARS_IV_2]], 30
+; CHECK-NEXT: [[TMP89:%.*]] = icmp ult i64 [[INDVARS_IV_2]], 195
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP89]])
+; CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP88]]
+; CHECK-NEXT: [[MATRIXEXT_US_2:%.*]] = load double, ptr [[TMP90]], align 8
+; CHECK-NEXT: [[MATRIXEXT8_US_2:%.*]] = load double, ptr [[TMP63]], align 8
; CHECK-NEXT: [[MUL_US_2:%.*]] = fmul double [[MATRIXEXT_US_2]], [[MATRIXEXT8_US_2]]
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP15]]
-; CHECK-NEXT: [[MATRIXEXT11_US_2:%.*]] = load double, ptr [[TMP18]], align 8
+; CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP88]]
+; CHECK-NEXT: [[MATRIXEXT11_US_2:%.*]] = load double, ptr [[TMP91]], align 8
; CHECK-NEXT: [[SUB_US_2:%.*]] = fsub double [[MATRIXEXT11_US_2]], [[MUL_US_2]]
-; CHECK-NEXT: store double [[SUB_US_2]], ptr [[TMP18]], align 8
+; CHECK-NEXT: store double [[SUB_US_2]], ptr [[TMP91]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT_2]] = add nuw nsw i64 [[INDVARS_IV_2]], 1
; CHECK-NEXT: [[EXITCOND_NOT_2:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_2]], [[CONV6]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT_2]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2:%.*]], label [[FOR_BODY4_US_2]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT_2]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2]], label [[FOR_BODY4_US_2]], !llvm.loop [[LOOP10]]
; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us.2:
-; CHECK-NEXT: [[TMP19:%.*]] = add nuw nsw i64 [[CONV6]], 45
-; CHECK-NEXT: [[TMP20:%.*]] = icmp ult i32 [[I]], 180
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP20]])
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP19]]
+; CHECK-NEXT: [[TMP92:%.*]] = add nuw nsw i64 [[CONV6]], 45
+; CHECK-NEXT: [[TMP93:%.*]] = icmp ult i32 [[I]], 180
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP93]])
+; CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP92]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK_3:%.*]] = icmp ult i32 [[I]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_3]], label [[FOR_BODY4_US_PREHEADER_3:%.*]], label [[VECTOR_MEMCHECK_3:%.*]]
+; CHECK: vector.memcheck.3:
+; CHECK-NEXT: [[BOUND0_3:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]]
+; CHECK-NEXT: [[BOUND1_3:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[FOUND_CONFLICT_3:%.*]] = and i1 [[BOUND0_3]], [[BOUND1_3]]
+; CHECK-NEXT: br i1 [[FOUND_CONFLICT_3]], label [[FOR_BODY4_US_PREHEADER_3]], label [[VECTOR_PH_3:%.*]]
+; CHECK: vector.ph.3:
+; CHECK-NEXT: [[N_VEC_3:%.*]] = and i64 [[CONV6]], 252
+; CHECK-NEXT: br label [[VECTOR_BODY_3:%.*]]
+; CHECK: vector.body.3:
+; CHECK-NEXT: [[INDEX_3:%.*]] = phi i64 [ 0, [[VECTOR_PH_3]] ], [ [[INDEX_NEXT_3:%.*]], [[VECTOR_BODY_3]] ]
+; CHECK-NEXT: [[TMP95:%.*]] = add nuw nsw i64 [[INDEX_3]], 45
+; CHECK-NEXT: [[TMP96:%.*]] = add i64 [[INDEX_3]], 46
+; CHECK-NEXT: [[TMP97:%.*]] = insertelement <2 x i64> poison, i64 [[TMP95]], i64 0
+; CHECK-NEXT: [[TMP98:%.*]] = insertelement <2 x i64> [[TMP97]], i64 [[TMP96]], i64 1
+; CHECK-NEXT: [[TMP99:%.*]] = add i64 [[INDEX_3]], 47
+; CHECK-NEXT: [[TMP100:%.*]] = add i64 [[INDEX_3]], 48
+; CHECK-NEXT: [[TMP101:%.*]] = insertelement <2 x i64> poison, i64 [[TMP99]], i64 0
+; CHECK-NEXT: [[TMP102:%.*]] = insertelement <2 x i64> [[TMP101]], i64 [[TMP100]], i64 1
+; CHECK-NEXT: [[TMP103:%.*]] = icmp ult <2 x i64> [[TMP98]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP104:%.*]] = icmp ult <2 x i64> [[TMP102]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP105:%.*]] = extractelement <2 x i1> [[TMP103]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP105]])
+; CHECK-NEXT: [[TMP106:%.*]] = extractelement <2 x i1> [[TMP103]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP106]])
+; CHECK-NEXT: [[TMP107:%.*]] = extractelement <2 x i1> [[TMP104]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP107]])
+; CHECK-NEXT: [[TMP108:%.*]] = extractelement <2 x i1> [[TMP104]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP108]])
+; CHECK-NEXT: [[TMP109:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP95]]
+; CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds i8, ptr [[TMP109]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <2 x double>, ptr [[TMP109]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD21_3:%.*]] = load <2 x double>, ptr [[TMP110]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP111:%.*]] = load double, ptr [[TMP94]], align 8, !alias.scope [[META3]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT22_3:%.*]] = insertelement <2 x double> poison, double [[TMP111]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT23_3:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_3]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP112:%.*]] = fmul <2 x double> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT23_3]]
+; CHECK-NEXT: [[TMP113:%.*]] = fmul <2 x double> [[WIDE_LOAD21_3]], [[BROADCAST_SPLAT23_3]]
+; CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP95]]
+; CHECK-NEXT: [[TMP115:%.*]] = getelementptr inbounds i8, ptr [[TMP114]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD24_3:%.*]] = load <2 x double>, ptr [[TMP114]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD25_3:%.*]] = load <2 x double>, ptr [[TMP115]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[TMP116:%.*]] = fsub <2 x double> [[WIDE_LOAD24_3]], [[TMP112]]
+; CHECK-NEXT: [[TMP117:%.*]] = fsub <2 x double> [[WIDE_LOAD25_3]], [[TMP113]]
+; CHECK-NEXT: store <2 x double> [[TMP116]], ptr [[TMP114]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: store <2 x double> [[TMP117]], ptr [[TMP115]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[INDEX_NEXT_3]] = add nuw i64 [[INDEX_3]], 4
+; CHECK-NEXT: [[TMP118:%.*]] = icmp eq i64 [[INDEX_NEXT_3]], [[N_VEC_3]]
+; CHECK-NEXT: br i1 [[TMP118]], label [[MIDDLE_BLOCK_3:%.*]], label [[VECTOR_BODY_3]], !llvm.loop [[LOOP7]]
+; CHECK: middle.block.3:
+; CHECK-NEXT: [[CMP_N_3:%.*]] = icmp eq i64 [[N_VEC_3]], [[CONV6]]
+; CHECK-NEXT: br i1 [[CMP_N_3]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY4_US_PREHEADER_3]]
+; CHECK: for.body4.us.preheader.3:
+; CHECK-NEXT: [[INDVARS_IV_PH_3:%.*]] = phi i64 [ 0, [[VECTOR_MEMCHECK_3]] ], [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2]] ], [ [[N_VEC_3]], [[MIDDLE_BLOCK_3]] ]
; CHECK-NEXT: br label [[FOR_BODY4_US_3:%.*]]
; CHECK: for.body4.us.3:
-; CHECK-NEXT: [[INDVARS_IV_3:%.*]] = phi i64 [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2]] ], [ [[INDVARS_IV_NEXT_3:%.*]], [[FOR_BODY4_US_3]] ]
-; CHECK-NEXT: [[TMP22:%.*]] = add nuw nsw i64 [[INDVARS_IV_3]], 45
-; CHECK-NEXT: [[TMP23:%.*]] = icmp ult i64 [[INDVARS_IV_3]], 180
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP23]])
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP22]]
-; CHECK-NEXT: [[MATRIXEXT_US_3:%.*]] = load double, ptr [[TMP24]], align 8
-; CHECK-NEXT: [[MATRIXEXT8_US_3:%.*]] = load double, ptr [[TMP21]], align 8
+; CHECK-NEXT: [[INDVARS_IV_3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_3:%.*]], [[FOR_BODY4_US_3]] ], [ [[INDVARS_IV_PH_3]], [[FOR_BODY4_US_PREHEADER_3]] ]
+; CHECK-NEXT: [[TMP119:%.*]] = add nuw nsw i64 [[INDVARS_IV_3]], 45
+; CHECK-NEXT: [[TMP120:%.*]] = icmp ult i64 [[INDVARS_IV_3]], 180
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP120]])
+; CHECK-NEXT: [[TMP121:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP119]]
+; CHECK-NEXT: [[MATRIXEXT_US_3:%.*]] = load double, ptr [[TMP121]], align 8
+; CHECK-NEXT: [[MATRIXEXT8_US_3:%.*]] = load double, ptr [[TMP94]], align 8
; CHECK-NEXT: [[MUL_US_3:%.*]] = fmul double [[MATRIXEXT_US_3]], [[MATRIXEXT8_US_3]]
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP22]]
-; CHECK-NEXT: [[MATRIXEXT11_US_3:%.*]] = load double, ptr [[TMP25]], align 8
+; CHECK-NEXT: [[TMP122:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP119]]
+; CHECK-NEXT: [[MATRIXEXT11_US_3:%.*]] = load double, ptr [[TMP122]], align 8
; CHECK-NEXT: [[SUB_US_3:%.*]] = fsub double [[MATRIXEXT11_US_3]], [[MUL_US_3]]
-; CHECK-NEXT: store double [[SUB_US_3]], ptr [[TMP25]], align 8
+; CHECK-NEXT: store double [[SUB_US_3]], ptr [[TMP122]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT_3]] = add nuw nsw i64 [[INDVARS_IV_3]], 1
; CHECK-NEXT: [[EXITCOND_NOT_3:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_3]], [[CONV6]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT_3]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY4_US_3]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT_3]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY4_US_3]], !llvm.loop [[LOOP10]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/PhaseOrdering/SystemZ/sub-xor.ll b/llvm/test/Transforms/PhaseOrdering/SystemZ/sub-xor.ll
index 5fe267d62f93..43fd8bd59b8d 100644
--- a/llvm/test/Transforms/PhaseOrdering/SystemZ/sub-xor.ll
+++ b/llvm/test/Transforms/PhaseOrdering/SystemZ/sub-xor.ll
@@ -20,35 +20,35 @@ define dso_local zeroext i32 @foo(ptr noundef %a) #0 {
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT_7:%.*]], [[FOR_BODY4]] ]
; CHECK-NEXT: [[SUM_11:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD_7:%.*]], [[FOR_BODY4]] ]
; CHECK-NEXT: [[IDX_NEG:%.*]] = sub nsw i64 0, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[IDX_NEG]]
+; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[IDX_NEG]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ADD_PTR]], align 4, !tbaa [[TBAA3:![0-9]+]]
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], [[SUM_11]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_NEG:%.*]] = xor i64 [[INDVARS_IV]], -1
-; CHECK-NEXT: [[ADD_PTR_110:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_NEG]]
+; CHECK-NEXT: [[ADD_PTR_110:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_NEG]]
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ADD_PTR_110]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[ADD_111:%.*]] = add i32 [[TMP1]], [[ADD]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_112_NEG:%.*]] = sub nuw nsw i64 -2, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_217:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_112_NEG]]
+; CHECK-NEXT: [[ADD_PTR_217:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_112_NEG]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ADD_PTR_217]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[ADD_218:%.*]] = add i32 [[TMP2]], [[ADD_111]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_219_NEG:%.*]] = sub nuw nsw i64 -3, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_3:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_219_NEG]]
+; CHECK-NEXT: [[ADD_PTR_3:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_219_NEG]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ADD_PTR_3]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[ADD_3:%.*]] = add i32 [[TMP3]], [[ADD_218]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_3_NEG:%.*]] = sub nuw nsw i64 -4, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_4:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_3_NEG]]
+; CHECK-NEXT: [[ADD_PTR_4:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_3_NEG]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ADD_PTR_4]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[ADD_4:%.*]] = add i32 [[TMP4]], [[ADD_3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_4_NEG:%.*]] = sub nuw nsw i64 -5, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_5:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_4_NEG]]
+; CHECK-NEXT: [[ADD_PTR_5:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_4_NEG]]
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ADD_PTR_5]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[ADD_5:%.*]] = add i32 [[TMP5]], [[ADD_4]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_5_NEG:%.*]] = sub nuw nsw i64 -6, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_6:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_5_NEG]]
+; CHECK-NEXT: [[ADD_PTR_6:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_5_NEG]]
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ADD_PTR_6]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[ADD_6:%.*]] = add i32 [[TMP6]], [[ADD_5]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_6_NEG:%.*]] = sub nuw nsw i64 -7, [[INDVARS_IV]]
-; CHECK-NEXT: [[ADD_PTR_7:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_6_NEG]]
+; CHECK-NEXT: [[ADD_PTR_7:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_6_NEG]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ADD_PTR_7]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[ADD_7]] = add i32 [[TMP7]], [[ADD_6]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_7]] = add nuw nsw i64 [[INDVARS_IV]], 8
@@ -58,34 +58,34 @@ define dso_local zeroext i32 @foo(ptr noundef %a) #0 {
; CHECK-NEXT: [[INDVARS_IV_1:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_1_7:%.*]], [[FOR_BODY4_1]] ], [ 0, [[FOR_BODY4]] ]
; CHECK-NEXT: [[SUM_11_1:%.*]] = phi i32 [ [[ADD_1_7:%.*]], [[FOR_BODY4_1]] ], [ [[ADD_7]], [[FOR_BODY4]] ]
; CHECK-NEXT: [[IDX_NEG_1:%.*]] = sub nsw i64 0, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[IDX_NEG_1]]
+; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[IDX_NEG_1]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ADD_PTR_1]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_NEG:%.*]] = xor i64 [[INDVARS_IV_1]], -1
-; CHECK-NEXT: [[ADD_PTR_1_1:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_1_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_1:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_NEG]]
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ADD_PTR_1_1]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP8]], [[TMP9]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_1_NEG:%.*]] = sub nuw nsw i64 -2, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_2:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_1_1_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_2:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_1_NEG]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ADD_PTR_1_2]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[TMP10]], [[TMP11]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_2_NEG:%.*]] = sub nuw nsw i64 -3, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_3:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_1_2_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_3:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_2_NEG]]
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ADD_PTR_1_3]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[TMP12]], [[TMP13]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_3_NEG:%.*]] = sub nuw nsw i64 -4, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_4:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_1_3_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_4:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_3_NEG]]
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ADD_PTR_1_4]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[TMP16:%.*]] = add i32 [[TMP14]], [[TMP15]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_4_NEG:%.*]] = sub nuw nsw i64 -5, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_5:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_1_4_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_5:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_4_NEG]]
; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[ADD_PTR_1_5]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[TMP18:%.*]] = add i32 [[TMP16]], [[TMP17]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_5_NEG:%.*]] = sub nuw nsw i64 -6, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_6:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_1_5_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_6:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_5_NEG]]
; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[ADD_PTR_1_6]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[TMP20:%.*]] = add i32 [[TMP18]], [[TMP19]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_1_6_NEG:%.*]] = sub nuw nsw i64 -7, [[INDVARS_IV_1]]
-; CHECK-NEXT: [[ADD_PTR_1_7:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_1_6_NEG]]
+; CHECK-NEXT: [[ADD_PTR_1_7:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_1_6_NEG]]
; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[ADD_PTR_1_7]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[TMP22:%.*]] = add i32 [[TMP20]], [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = shl i32 [[TMP22]], 1
@@ -97,42 +97,42 @@ define dso_local zeroext i32 @foo(ptr noundef %a) #0 {
; CHECK-NEXT: [[INDVARS_IV_2:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_2_7:%.*]], [[FOR_BODY4_2]] ], [ 0, [[FOR_BODY4_1]] ]
; CHECK-NEXT: [[SUM_11_2:%.*]] = phi i32 [ [[ADD_2_7:%.*]], [[FOR_BODY4_2]] ], [ [[ADD_1_7]], [[FOR_BODY4_1]] ]
; CHECK-NEXT: [[IDX_NEG_2:%.*]] = sub nsw i64 0, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[IDX_NEG_2]]
+; CHECK-NEXT: [[ADD_PTR_2:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[IDX_NEG_2]]
; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[ADD_PTR_2]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[TMP24]], 3
; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[MUL_2]], [[SUM_11_2]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_NEG:%.*]] = xor i64 [[INDVARS_IV_2]], -1
-; CHECK-NEXT: [[ADD_PTR_2_1:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_2_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_1:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_NEG]]
; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[ADD_PTR_2_1]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[MUL_2_1:%.*]] = mul i32 [[TMP25]], 3
; CHECK-NEXT: [[ADD_2_1:%.*]] = add i32 [[MUL_2_1]], [[ADD_2]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_1_NEG:%.*]] = sub nuw nsw i64 -2, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_2:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_2_1_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_2:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_1_NEG]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ADD_PTR_2_2]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[MUL_2_2:%.*]] = mul i32 [[TMP26]], 3
; CHECK-NEXT: [[ADD_2_2:%.*]] = add i32 [[MUL_2_2]], [[ADD_2_1]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_2_NEG:%.*]] = sub nuw nsw i64 -3, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_3:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_2_2_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_3:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_2_NEG]]
; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[ADD_PTR_2_3]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[MUL_2_3:%.*]] = mul i32 [[TMP27]], 3
; CHECK-NEXT: [[ADD_2_3:%.*]] = add i32 [[MUL_2_3]], [[ADD_2_2]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_3_NEG:%.*]] = sub nuw nsw i64 -4, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_4:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_2_3_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_4:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_3_NEG]]
; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[ADD_PTR_2_4]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[MUL_2_4:%.*]] = mul i32 [[TMP28]], 3
; CHECK-NEXT: [[ADD_2_4:%.*]] = add i32 [[MUL_2_4]], [[ADD_2_3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_4_NEG:%.*]] = sub nuw nsw i64 -5, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_5:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_2_4_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_5:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_4_NEG]]
; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[ADD_PTR_2_5]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[MUL_2_5:%.*]] = mul i32 [[TMP29]], 3
; CHECK-NEXT: [[ADD_2_5:%.*]] = add i32 [[MUL_2_5]], [[ADD_2_4]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_5_NEG:%.*]] = sub nuw nsw i64 -6, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_6:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_2_5_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_6:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_5_NEG]]
; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[ADD_PTR_2_6]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[MUL_2_6:%.*]] = mul i32 [[TMP30]], 3
; CHECK-NEXT: [[ADD_2_6:%.*]] = add i32 [[MUL_2_6]], [[ADD_2_5]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_2_6_NEG:%.*]] = sub nuw nsw i64 -7, [[INDVARS_IV_2]]
-; CHECK-NEXT: [[ADD_PTR_2_7:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds ([100 x i32], ptr @ARR, i64 0, i64 99), i64 [[INDVARS_IV_NEXT_2_6_NEG]]
+; CHECK-NEXT: [[ADD_PTR_2_7:%.*]] = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @ARR, i64 396), i64 [[INDVARS_IV_NEXT_2_6_NEG]]
; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[ADD_PTR_2_7]], align 4, !tbaa [[TBAA3]]
; CHECK-NEXT: [[MUL_2_7:%.*]] = mul i32 [[TMP31]], 3
; CHECK-NEXT: [[ADD_2_7]] = add i32 [[MUL_2_7]], [[ADD_2_6]]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll b/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll
index 741e3ad4f7b9..ed25734c8448 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll
@@ -13,129 +13,129 @@ define void @test_known_trip_count() {
; CHECK-LABEL: @test_known_trip_count(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr @b, align 16
-; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 2), align 16
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 16), align 16
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x double>, ptr @c, align 16
-; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 2), align 16
+; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 16), align 16
; CHECK-NEXT: [[TMP0:%.*]] = fadd <2 x double> [[WIDE_LOAD]], [[WIDE_LOAD4]]
; CHECK-NEXT: [[TMP1:%.*]] = fadd <2 x double> [[WIDE_LOAD3]], [[WIDE_LOAD5]]
; CHECK-NEXT: store <2 x double> [[TMP0]], ptr @a, align 16
-; CHECK-NEXT: store <2 x double> [[TMP1]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 2), align 16
-; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 4), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_1:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 6), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_1:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 4), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_1:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 6), align 16
+; CHECK-NEXT: store <2 x double> [[TMP1]], ptr getelementptr inbounds (i8, ptr @a, i64 16), align 16
+; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 32), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_1:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 48), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_1:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 32), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_1:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 48), align 16
; CHECK-NEXT: [[TMP2:%.*]] = fadd <2 x double> [[WIDE_LOAD_1]], [[WIDE_LOAD4_1]]
; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[WIDE_LOAD3_1]], [[WIDE_LOAD5_1]]
-; CHECK-NEXT: store <2 x double> [[TMP2]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 4), align 16
-; CHECK-NEXT: store <2 x double> [[TMP3]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 6), align 16
-; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 8), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_2:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 10), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_2:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 8), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_2:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 10), align 16
+; CHECK-NEXT: store <2 x double> [[TMP2]], ptr getelementptr inbounds (i8, ptr @a, i64 32), align 16
+; CHECK-NEXT: store <2 x double> [[TMP3]], ptr getelementptr inbounds (i8, ptr @a, i64 48), align 16
+; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 64), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_2:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 80), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_2:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 64), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_2:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 80), align 16
; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[WIDE_LOAD_2]], [[WIDE_LOAD4_2]]
; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[WIDE_LOAD3_2]], [[WIDE_LOAD5_2]]
-; CHECK-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 8), align 16
-; CHECK-NEXT: store <2 x double> [[TMP5]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 10), align 16
-; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 12), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 14), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 12), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 14), align 16
+; CHECK-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds (i8, ptr @a, i64 64), align 16
+; CHECK-NEXT: store <2 x double> [[TMP5]], ptr getelementptr inbounds (i8, ptr @a, i64 80), align 16
+; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 96), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_3:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 112), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_3:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 96), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_3:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 112), align 16
; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x double> [[WIDE_LOAD_3]], [[WIDE_LOAD4_3]]
; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[WIDE_LOAD3_3]], [[WIDE_LOAD5_3]]
-; CHECK-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 12), align 16
-; CHECK-NEXT: store <2 x double> [[TMP7]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 14), align 16
-; CHECK-NEXT: [[WIDE_LOAD_4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 16), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 18), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 16), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 18), align 16
+; CHECK-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds (i8, ptr @a, i64 96), align 16
+; CHECK-NEXT: store <2 x double> [[TMP7]], ptr getelementptr inbounds (i8, ptr @a, i64 112), align 16
+; CHECK-NEXT: [[WIDE_LOAD_4:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 128), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_4:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 144), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_4:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 128), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_4:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 144), align 16
; CHECK-NEXT: [[TMP8:%.*]] = fadd <2 x double> [[WIDE_LOAD_4]], [[WIDE_LOAD4_4]]
; CHECK-NEXT: [[TMP9:%.*]] = fadd <2 x double> [[WIDE_LOAD3_4]], [[WIDE_LOAD5_4]]
-; CHECK-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 16), align 16
-; CHECK-NEXT: store <2 x double> [[TMP9]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 18), align 16
-; CHECK-NEXT: [[WIDE_LOAD_5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 20), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 22), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 20), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 22), align 16
+; CHECK-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds (i8, ptr @a, i64 128), align 16
+; CHECK-NEXT: store <2 x double> [[TMP9]], ptr getelementptr inbounds (i8, ptr @a, i64 144), align 16
+; CHECK-NEXT: [[WIDE_LOAD_5:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 160), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_5:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 176), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_5:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 160), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_5:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 176), align 16
; CHECK-NEXT: [[TMP10:%.*]] = fadd <2 x double> [[WIDE_LOAD_5]], [[WIDE_LOAD4_5]]
; CHECK-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[WIDE_LOAD3_5]], [[WIDE_LOAD5_5]]
-; CHECK-NEXT: store <2 x double> [[TMP10]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 20), align 16
-; CHECK-NEXT: store <2 x double> [[TMP11]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 22), align 16
-; CHECK-NEXT: [[WIDE_LOAD_6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 24), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 26), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 24), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 26), align 16
+; CHECK-NEXT: store <2 x double> [[TMP10]], ptr getelementptr inbounds (i8, ptr @a, i64 160), align 16
+; CHECK-NEXT: store <2 x double> [[TMP11]], ptr getelementptr inbounds (i8, ptr @a, i64 176), align 16
+; CHECK-NEXT: [[WIDE_LOAD_6:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 192), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_6:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 208), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_6:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 192), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_6:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 208), align 16
; CHECK-NEXT: [[TMP12:%.*]] = fadd <2 x double> [[WIDE_LOAD_6]], [[WIDE_LOAD4_6]]
; CHECK-NEXT: [[TMP13:%.*]] = fadd <2 x double> [[WIDE_LOAD3_6]], [[WIDE_LOAD5_6]]
-; CHECK-NEXT: store <2 x double> [[TMP12]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 24), align 16
-; CHECK-NEXT: store <2 x double> [[TMP13]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 26), align 16
-; CHECK-NEXT: [[WIDE_LOAD_7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 28), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 30), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 28), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 30), align 16
+; CHECK-NEXT: store <2 x double> [[TMP12]], ptr getelementptr inbounds (i8, ptr @a, i64 192), align 16
+; CHECK-NEXT: store <2 x double> [[TMP13]], ptr getelementptr inbounds (i8, ptr @a, i64 208), align 16
+; CHECK-NEXT: [[WIDE_LOAD_7:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 224), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_7:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 240), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_7:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 224), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_7:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 240), align 16
; CHECK-NEXT: [[TMP14:%.*]] = fadd <2 x double> [[WIDE_LOAD_7]], [[WIDE_LOAD4_7]]
; CHECK-NEXT: [[TMP15:%.*]] = fadd <2 x double> [[WIDE_LOAD3_7]], [[WIDE_LOAD5_7]]
-; CHECK-NEXT: store <2 x double> [[TMP14]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 28), align 16
-; CHECK-NEXT: store <2 x double> [[TMP15]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 30), align 16
-; CHECK-NEXT: [[WIDE_LOAD_8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 32), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 34), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 32), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 34), align 16
+; CHECK-NEXT: store <2 x double> [[TMP14]], ptr getelementptr inbounds (i8, ptr @a, i64 224), align 16
+; CHECK-NEXT: store <2 x double> [[TMP15]], ptr getelementptr inbounds (i8, ptr @a, i64 240), align 16
+; CHECK-NEXT: [[WIDE_LOAD_8:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 256), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_8:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 272), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_8:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 256), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_8:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 272), align 16
; CHECK-NEXT: [[TMP16:%.*]] = fadd <2 x double> [[WIDE_LOAD_8]], [[WIDE_LOAD4_8]]
; CHECK-NEXT: [[TMP17:%.*]] = fadd <2 x double> [[WIDE_LOAD3_8]], [[WIDE_LOAD5_8]]
-; CHECK-NEXT: store <2 x double> [[TMP16]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 32), align 16
-; CHECK-NEXT: store <2 x double> [[TMP17]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 34), align 16
-; CHECK-NEXT: [[WIDE_LOAD_9:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 36), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_9:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 38), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_9:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 36), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_9:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 38), align 16
+; CHECK-NEXT: store <2 x double> [[TMP16]], ptr getelementptr inbounds (i8, ptr @a, i64 256), align 16
+; CHECK-NEXT: store <2 x double> [[TMP17]], ptr getelementptr inbounds (i8, ptr @a, i64 272), align 16
+; CHECK-NEXT: [[WIDE_LOAD_9:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 288), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_9:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 304), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_9:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 288), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_9:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 304), align 16
; CHECK-NEXT: [[TMP18:%.*]] = fadd <2 x double> [[WIDE_LOAD_9]], [[WIDE_LOAD4_9]]
; CHECK-NEXT: [[TMP19:%.*]] = fadd <2 x double> [[WIDE_LOAD3_9]], [[WIDE_LOAD5_9]]
-; CHECK-NEXT: store <2 x double> [[TMP18]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 36), align 16
-; CHECK-NEXT: store <2 x double> [[TMP19]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 38), align 16
-; CHECK-NEXT: [[WIDE_LOAD_10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 40), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 42), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 40), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 42), align 16
+; CHECK-NEXT: store <2 x double> [[TMP18]], ptr getelementptr inbounds (i8, ptr @a, i64 288), align 16
+; CHECK-NEXT: store <2 x double> [[TMP19]], ptr getelementptr inbounds (i8, ptr @a, i64 304), align 16
+; CHECK-NEXT: [[WIDE_LOAD_10:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 320), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_10:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 336), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_10:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 320), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_10:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 336), align 16
; CHECK-NEXT: [[TMP20:%.*]] = fadd <2 x double> [[WIDE_LOAD_10]], [[WIDE_LOAD4_10]]
; CHECK-NEXT: [[TMP21:%.*]] = fadd <2 x double> [[WIDE_LOAD3_10]], [[WIDE_LOAD5_10]]
-; CHECK-NEXT: store <2 x double> [[TMP20]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 40), align 16
-; CHECK-NEXT: store <2 x double> [[TMP21]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 42), align 16
-; CHECK-NEXT: [[WIDE_LOAD_11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 44), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 46), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 44), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 46), align 16
+; CHECK-NEXT: store <2 x double> [[TMP20]], ptr getelementptr inbounds (i8, ptr @a, i64 320), align 16
+; CHECK-NEXT: store <2 x double> [[TMP21]], ptr getelementptr inbounds (i8, ptr @a, i64 336), align 16
+; CHECK-NEXT: [[WIDE_LOAD_11:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 352), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_11:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 368), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_11:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 352), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_11:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 368), align 16
; CHECK-NEXT: [[TMP22:%.*]] = fadd <2 x double> [[WIDE_LOAD_11]], [[WIDE_LOAD4_11]]
; CHECK-NEXT: [[TMP23:%.*]] = fadd <2 x double> [[WIDE_LOAD3_11]], [[WIDE_LOAD5_11]]
-; CHECK-NEXT: store <2 x double> [[TMP22]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 44), align 16
-; CHECK-NEXT: store <2 x double> [[TMP23]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 46), align 16
-; CHECK-NEXT: [[WIDE_LOAD_12:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 48), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_12:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 50), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_12:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 48), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_12:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 50), align 16
+; CHECK-NEXT: store <2 x double> [[TMP22]], ptr getelementptr inbounds (i8, ptr @a, i64 352), align 16
+; CHECK-NEXT: store <2 x double> [[TMP23]], ptr getelementptr inbounds (i8, ptr @a, i64 368), align 16
+; CHECK-NEXT: [[WIDE_LOAD_12:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 384), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_12:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 400), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_12:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 384), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_12:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 400), align 16
; CHECK-NEXT: [[TMP24:%.*]] = fadd <2 x double> [[WIDE_LOAD_12]], [[WIDE_LOAD4_12]]
; CHECK-NEXT: [[TMP25:%.*]] = fadd <2 x double> [[WIDE_LOAD3_12]], [[WIDE_LOAD5_12]]
-; CHECK-NEXT: store <2 x double> [[TMP24]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 48), align 16
-; CHECK-NEXT: store <2 x double> [[TMP25]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 50), align 16
-; CHECK-NEXT: [[WIDE_LOAD_13:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 52), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_13:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 54), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_13:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 52), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_13:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 54), align 16
+; CHECK-NEXT: store <2 x double> [[TMP24]], ptr getelementptr inbounds (i8, ptr @a, i64 384), align 16
+; CHECK-NEXT: store <2 x double> [[TMP25]], ptr getelementptr inbounds (i8, ptr @a, i64 400), align 16
+; CHECK-NEXT: [[WIDE_LOAD_13:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 416), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_13:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 432), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_13:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 416), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_13:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 432), align 16
; CHECK-NEXT: [[TMP26:%.*]] = fadd <2 x double> [[WIDE_LOAD_13]], [[WIDE_LOAD4_13]]
; CHECK-NEXT: [[TMP27:%.*]] = fadd <2 x double> [[WIDE_LOAD3_13]], [[WIDE_LOAD5_13]]
-; CHECK-NEXT: store <2 x double> [[TMP26]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 52), align 16
-; CHECK-NEXT: store <2 x double> [[TMP27]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 54), align 16
-; CHECK-NEXT: [[WIDE_LOAD_14:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 56), align 16
-; CHECK-NEXT: [[WIDE_LOAD3_14:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 1, i64 0), align 16
-; CHECK-NEXT: [[WIDE_LOAD4_14:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 56), align 16
-; CHECK-NEXT: [[WIDE_LOAD5_14:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 1, i64 0), align 16
+; CHECK-NEXT: store <2 x double> [[TMP26]], ptr getelementptr inbounds (i8, ptr @a, i64 416), align 16
+; CHECK-NEXT: store <2 x double> [[TMP27]], ptr getelementptr inbounds (i8, ptr @a, i64 432), align 16
+; CHECK-NEXT: [[WIDE_LOAD_14:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 448), align 16
+; CHECK-NEXT: [[WIDE_LOAD3_14:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 464), align 16
+; CHECK-NEXT: [[WIDE_LOAD4_14:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 448), align 16
+; CHECK-NEXT: [[WIDE_LOAD5_14:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 464), align 16
; CHECK-NEXT: [[TMP28:%.*]] = fadd <2 x double> [[WIDE_LOAD_14]], [[WIDE_LOAD4_14]]
; CHECK-NEXT: [[TMP29:%.*]] = fadd <2 x double> [[WIDE_LOAD3_14]], [[WIDE_LOAD5_14]]
-; CHECK-NEXT: store <2 x double> [[TMP28]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 56), align 16
-; CHECK-NEXT: store <2 x double> [[TMP29]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 1, i64 0), align 16
-; CHECK-NEXT: [[TMP30:%.*]] = load double, ptr getelementptr inbounds ([58 x double], ptr @b, i64 1, i64 2), align 16
-; CHECK-NEXT: [[TMP31:%.*]] = load double, ptr getelementptr inbounds ([58 x double], ptr @c, i64 1, i64 2), align 16
+; CHECK-NEXT: store <2 x double> [[TMP28]], ptr getelementptr inbounds (i8, ptr @a, i64 448), align 16
+; CHECK-NEXT: store <2 x double> [[TMP29]], ptr getelementptr inbounds (i8, ptr @a, i64 464), align 16
+; CHECK-NEXT: [[TMP30:%.*]] = load double, ptr getelementptr inbounds (i8, ptr @b, i64 480), align 16
+; CHECK-NEXT: [[TMP31:%.*]] = load double, ptr getelementptr inbounds (i8, ptr @c, i64 480), align 16
; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP30]], [[TMP31]]
-; CHECK-NEXT: store double [[ADD]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 1, i64 2), align 16
+; CHECK-NEXT: store double [[ADD]], ptr getelementptr inbounds (i8, ptr @a, i64 480), align 16
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SCCP/2009-09-24-byval-ptr.ll b/llvm/test/Transforms/SCCP/2009-09-24-byval-ptr.ll
index 34ef4349c786..ac2e945b125b 100644
--- a/llvm/test/Transforms/SCCP/2009-09-24-byval-ptr.ll
+++ b/llvm/test/Transforms/SCCP/2009-09-24-byval-ptr.ll
@@ -31,7 +31,7 @@ return: ; preds = %entry
define internal i32 @vfu2(ptr byval(%struct.MYstr) align 4 %u) nounwind readonly {
; CHECK-LABEL: @vfu2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr inbounds ([[STRUCT_MYSTR:%.*]], ptr @mystr, i64 0, i32 1), align 4
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr inbounds (i8, ptr @mystr, i64 4), align 4
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @mystr, align 1
; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP1]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], [[TMP0]]
diff --git a/llvm/test/Transforms/SCCP/apint-bigint2.ll b/llvm/test/Transforms/SCCP/apint-bigint2.ll
index 6092c092bea5..695d6a4cf056 100644
--- a/llvm/test/Transforms/SCCP/apint-bigint2.ll
+++ b/llvm/test/Transforms/SCCP/apint-bigint2.ll
@@ -23,7 +23,7 @@ define i101 @large_aggregate() {
; CHECK-LABEL: @large_aggregate(
; CHECK-NEXT: [[D:%.*]] = and i101 undef, 1
; CHECK-NEXT: [[DD:%.*]] = or i101 [[D]], 1
-; CHECK-NEXT: [[G:%.*]] = getelementptr i101, ptr getelementptr inbounds ([6 x i101], ptr @Y, i64 0, i64 5), i101 [[DD]]
+; CHECK-NEXT: [[G:%.*]] = getelementptr i101, ptr getelementptr inbounds (i8, ptr @Y, i64 80), i101 [[DD]]
; CHECK-NEXT: [[L3:%.*]] = load i101, ptr [[G]], align 4
; CHECK-NEXT: ret i101 [[L3]]
;
@@ -40,7 +40,7 @@ define i101 @large_aggregate_2() {
; CHECK-LABEL: @large_aggregate_2(
; CHECK-NEXT: [[D:%.*]] = and i101 undef, 1
; CHECK-NEXT: [[DD:%.*]] = or i101 [[D]], 1
-; CHECK-NEXT: [[G:%.*]] = getelementptr i101, ptr getelementptr inbounds ([6 x i101], ptr @Y, i64 0, i64 5), i101 [[DD]]
+; CHECK-NEXT: [[G:%.*]] = getelementptr i101, ptr getelementptr inbounds (i8, ptr @Y, i64 80), i101 [[DD]]
; CHECK-NEXT: [[L3:%.*]] = load i101, ptr [[G]], align 4
; CHECK-NEXT: ret i101 [[L3]]
;
@@ -54,7 +54,7 @@ define i101 @large_aggregate_2() {
define void @index_too_large() {
; CHECK-LABEL: @index_too_large(
-; CHECK-NEXT: store ptr getelementptr ([6 x i101], ptr @Y, i64 187649984473770, i64 2), ptr undef, align 8
+; CHECK-NEXT: store ptr getelementptr (i8, ptr @Y, i64 18014398509481952), ptr undef, align 8
; CHECK-NEXT: ret void
;
%ptr1 = getelementptr [6 x i101], ptr @Y, i32 0, i32 -1
diff --git a/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll b/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll
index c24c554102dd..91efbcc4ee38 100644
--- a/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll
+++ b/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll
@@ -159,7 +159,7 @@ exit:
}
define i32 @caller5() {
-; CHECK-LABEL: define range(i32 200, 401) i32 @caller5() {
+; CHECK-LABEL: define i32 @caller5() {
; CHECK-NEXT: [[C1:%.*]] = call i32 @callee5(i32 10, i32 100)
; CHECK-NEXT: [[C2:%.*]] = call i32 @callee5(i32 20, i32 200)
; CHECK-NEXT: [[A:%.*]] = add i32 [[C1]], [[C2]]
diff --git a/llvm/test/Transforms/SCCP/range-mul-nuw-nsw-flags.ll b/llvm/test/Transforms/SCCP/range-mul-nuw-nsw-flags.ll
new file mode 100644
index 000000000000..8525264a0087
--- /dev/null
+++ b/llvm/test/Transforms/SCCP/range-mul-nuw-nsw-flags.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=ipsccp -S %s | FileCheck %s
+
+define i1 @range_from_mul_nuw_nsw(i32 %a) {
+; CHECK-LABEL: @range_from_mul_nuw_nsw(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CHECK: then:
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[A]], 10000
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[MUL]], -5000
+; CHECK-NEXT: ret i1 false
+; CHECK: else:
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ %cmp = icmp ne i32 %a, 0
+ br i1 %cmp, label %then, label %else
+then:
+ %mul = mul nuw nsw i32 %a, 10000 ; Refined range via mul_nuw: [10000, 0)
+ %add = add nsw i32 %mul, -5000 ; Range: [5000, UINT_MAX - 5000 + 1)
+ %cond = icmp ult i32 %add, 4999
+ ret i1 %cond
+else:
+ ret i1 0
+}
diff --git a/llvm/test/Transforms/SCCP/range-with-undef.ll b/llvm/test/Transforms/SCCP/range-with-undef.ll
new file mode 100644
index 000000000000..9b8d41517114
--- /dev/null
+++ b/llvm/test/Transforms/SCCP/range-with-undef.ll
@@ -0,0 +1,118 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=ipsccp < %s | FileCheck %s
+
+; Make sure that constant ranges including undef are propagated correctly.
+
+define i8 @test_binop(i1 %cond, i8 %a) {
+; CHECK-LABEL: define i8 @test_binop(
+; CHECK-SAME: i1 [[COND:%.*]], i8 [[A:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 [[COND]], label %[[IF:.*]], label %[[JOIN:.*]]
+; CHECK: [[IF]]:
+; CHECK-NEXT: [[A_EXT:%.*]] = zext i8 [[A]] to i16
+; CHECK-NEXT: br label %[[JOIN]]
+; CHECK: [[JOIN]]:
+; CHECK-NEXT: [[PHI:%.*]] = phi i16 [ undef, %[[ENTRY]] ], [ [[A_EXT]], %[[IF]] ]
+; CHECK-NEXT: [[AND:%.*]] = and i16 [[PHI]], -1
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i16 [[AND]] to i8
+; CHECK-NEXT: ret i8 [[TRUNC]]
+;
+entry:
+ br i1 %cond, label %if, label %join
+
+if:
+ %a.ext = zext i8 %a to i16
+ br label %join
+
+join:
+ %phi = phi i16 [ undef, %entry ], [ %a.ext, %if ]
+ %and = and i16 %phi, u0x0000ffff
+ %trunc = trunc i16 %and to i8
+ ret i8 %trunc
+}
+
+define i8 @test_cast(i1 %cond, i8 %a) {
+; CHECK-LABEL: define i8 @test_cast(
+; CHECK-SAME: i1 [[COND:%.*]], i8 [[A:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 [[COND]], label %[[IF:.*]], label %[[JOIN:.*]]
+; CHECK: [[IF]]:
+; CHECK-NEXT: [[A_EXT:%.*]] = zext i8 [[A]] to i16
+; CHECK-NEXT: br label %[[JOIN]]
+; CHECK: [[JOIN]]:
+; CHECK-NEXT: [[PHI:%.*]] = phi i16 [ undef, %[[ENTRY]] ], [ [[A_EXT]], %[[IF]] ]
+; CHECK-NEXT: [[ZEXT:%.*]] = zext i16 [[PHI]] to i32
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[ZEXT]] to i8
+; CHECK-NEXT: ret i8 [[TRUNC]]
+;
+entry:
+ br i1 %cond, label %if, label %join
+
+if:
+ %a.ext = zext i8 %a to i16
+ br label %join
+
+join:
+ %phi = phi i16 [ undef, %entry ], [ %a.ext, %if ]
+ %zext = zext i16 %phi to i32
+ %trunc = trunc i32 %zext to i8
+ ret i8 %trunc
+}
+
+define i8 @test_intrin(i1 %cond, i8 %a) {
+; CHECK-LABEL: define i8 @test_intrin(
+; CHECK-SAME: i1 [[COND:%.*]], i8 [[A:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 [[COND]], label %[[IF:.*]], label %[[JOIN:.*]]
+; CHECK: [[IF]]:
+; CHECK-NEXT: [[A_EXT:%.*]] = zext i8 [[A]] to i16
+; CHECK-NEXT: br label %[[JOIN]]
+; CHECK: [[JOIN]]:
+; CHECK-NEXT: [[PHI:%.*]] = phi i16 [ undef, %[[ENTRY]] ], [ [[A_EXT]], %[[IF]] ]
+; CHECK-NEXT: [[UMAX:%.*]] = call i16 @llvm.umax.i16(i16 [[PHI]], i16 42)
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i16 [[UMAX]] to i8
+; CHECK-NEXT: ret i8 [[TRUNC]]
+;
+entry:
+ br i1 %cond, label %if, label %join
+
+if:
+ %a.ext = zext i8 %a to i16
+ br label %join
+
+join:
+ %phi = phi i16 [ undef, %entry ], [ %a.ext, %if ]
+ %umax = call i16 @llvm.umax(i16 %phi, i16 42)
+ %trunc = trunc i16 %umax to i8
+ ret i8 %trunc
+}
+
+define i9 @test_with_overflow(i1 %cond, i8 %a) {
+; CHECK-LABEL: define i9 @test_with_overflow(
+; CHECK-SAME: i1 [[COND:%.*]], i8 [[A:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 [[COND]], label %[[IF:.*]], label %[[JOIN:.*]]
+; CHECK: [[IF]]:
+; CHECK-NEXT: [[A_EXT:%.*]] = zext i8 [[A]] to i16
+; CHECK-NEXT: br label %[[JOIN]]
+; CHECK: [[JOIN]]:
+; CHECK-NEXT: [[PHI:%.*]] = phi i16 [ undef, %[[ENTRY]] ], [ [[A_EXT]], %[[IF]] ]
+; CHECK-NEXT: [[WO:%.*]] = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 [[PHI]], i16 1)
+; CHECK-NEXT: [[ADD:%.*]] = extractvalue { i16, i1 } [[WO]], 0
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i16 [[ADD]] to i9
+; CHECK-NEXT: ret i9 [[TRUNC]]
+;
+entry:
+ br i1 %cond, label %if, label %join
+
+if:
+ %a.ext = zext i8 %a to i16
+ br label %join
+
+join:
+ %phi = phi i16 [ undef, %entry ], [ %a.ext, %if ]
+ %wo = call {i16, i1} @llvm.uadd.with.overflow(i16 %phi, i16 1)
+ %add = extractvalue {i16, i1} %wo, 0
+ %trunc = trunc i16 %add to i9
+ ret i9 %trunc
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll
index 2ea472169250..45030a0965e0 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/gather-cost.ll
@@ -61,16 +61,16 @@ define void @gather_load(ptr noalias %ptr) {
; CHECK-NEXT: [[ARRAYIDX183:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 4
; CHECK-NEXT: [[ARRAYIDX184:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 6
; CHECK-NEXT: [[ARRAYIDX185:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 8
-; CHECK-NEXT: [[L0:%.*]] = load i8, ptr getelementptr inbounds ([6 x [258 x i8]], ptr @data, i64 0, i64 1, i64 0), align 1
+; CHECK-NEXT: [[L0:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr @data, i64 258), align 1
; CHECK-NEXT: [[CONV150:%.*]] = zext i8 [[L0]] to i16
; CHECK-NEXT: [[ADD152:%.*]] = add nuw nsw i16 [[CONV150]], 10
-; CHECK-NEXT: [[L1:%.*]] = load i8, ptr getelementptr inbounds ([6 x [258 x i8]], ptr @data, i64 0, i64 2, i64 1), align 1
+; CHECK-NEXT: [[L1:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr @data, i64 517), align 1
; CHECK-NEXT: [[CONV156:%.*]] = zext i8 [[L1]] to i16
; CHECK-NEXT: [[ADD158:%.*]] = add nuw nsw i16 [[CONV156]], 20
-; CHECK-NEXT: [[L2:%.*]] = load i8, ptr getelementptr inbounds ([6 x [258 x i8]], ptr @data, i64 0, i64 3, i64 2), align 1
+; CHECK-NEXT: [[L2:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr @data, i64 776), align 1
; CHECK-NEXT: [[CONV162:%.*]] = zext i8 [[L2]] to i16
; CHECK-NEXT: [[ADD164:%.*]] = add nuw nsw i16 [[CONV162]], 30
-; CHECK-NEXT: [[L3:%.*]] = load i8, ptr getelementptr inbounds ([6 x [258 x i8]], ptr @data, i64 0, i64 4, i64 3), align 1
+; CHECK-NEXT: [[L3:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr @data, i64 1035), align 1
; CHECK-NEXT: [[CONV168:%.*]] = zext i8 [[L3]] to i16
; CHECK-NEXT: [[ADD170:%.*]] = add nuw nsw i16 [[CONV168]], 40
; CHECK-NEXT: store i16 [[ADD152]], ptr [[ARRAYIDX182]], align 2
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll
index 290560151b79..3749bdf1bba3 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX7 %s
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX8 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX8 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX9 %s
define <2 x i16> @uadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX7-LABEL: @uadd_sat_v2i16(
@@ -21,6 +21,11 @@ define <2 x i16> @uadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @uadd_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -51,6 +56,11 @@ define <2 x i16> @usub_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @usub_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -81,6 +91,11 @@ define <2 x i16> @sadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @sadd_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -111,6 +126,11 @@ define <2 x i16> @ssub_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @ssub_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -252,6 +272,18 @@ define <3 x i16> @uadd_sat_v3i16(<3 x i16> %arg0, <3 x i16> %arg1) {
; GFX8-NEXT: [[INS_2:%.*]] = insertelement <3 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
; GFX8-NEXT: ret <3 x i16> [[INS_2]]
;
+; GFX9-LABEL: @uadd_sat_v3i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[ARG0_2:%.*]] = extractelement <3 x i16> [[ARG0:%.*]], i64 2
+; GFX9-NEXT: [[ARG1_2:%.*]] = extractelement <3 x i16> [[ARG1:%.*]], i64 2
+; GFX9-NEXT: [[TMP0:%.*]] = shufflevector <3 x i16> [[ARG0]], <3 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP1:%.*]] = shufflevector <3 x i16> [[ARG1]], <3 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
+; GFX9-NEXT: [[ADD_2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_2]], i16 [[ARG1_2]])
+; GFX9-NEXT: [[TMP3:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> poison, <3 x i32> <i32 0, i32 1, i32 poison>
+; GFX9-NEXT: [[INS_2:%.*]] = insertelement <3 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
+; GFX9-NEXT: ret <3 x i16> [[INS_2]]
+;
bb:
%arg0.0 = extractelement <3 x i16> %arg0, i64 0
%arg0.1 = extractelement <3 x i16> %arg0, i64 1
@@ -291,19 +323,25 @@ define <4 x i16> @uadd_sat_v4i16(<4 x i16> %arg0, <4 x i16> %arg1) {
;
; GFX8-LABEL: @uadd_sat_v4i16(
; GFX8-NEXT: bb:
-; GFX8-NEXT: [[ARG0_2:%.*]] = extractelement <4 x i16> [[ARG0:%.*]], i64 2
-; GFX8-NEXT: [[ARG0_3:%.*]] = extractelement <4 x i16> [[ARG0]], i64 3
-; GFX8-NEXT: [[ARG1_2:%.*]] = extractelement <4 x i16> [[ARG1:%.*]], i64 2
-; GFX8-NEXT: [[ARG1_3:%.*]] = extractelement <4 x i16> [[ARG1]], i64 3
-; GFX8-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
-; GFX8-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX8-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX8-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
; GFX8-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
-; GFX8-NEXT: [[ADD_2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_2]], i16 [[ARG1_2]])
-; GFX8-NEXT: [[ADD_3:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_3]], i16 [[ARG1_3]])
-; GFX8-NEXT: [[TMP3:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; GFX8-NEXT: [[INS_2:%.*]] = insertelement <4 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
-; GFX8-NEXT: [[INS_3:%.*]] = insertelement <4 x i16> [[INS_2]], i16 [[ADD_3]], i64 3
-; GFX8-NEXT: ret <4 x i16> [[INS_3]]
+; GFX8-NEXT: [[TMP3:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX8-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX8-NEXT: [[TMP5:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP3]], <2 x i16> [[TMP4]])
+; GFX8-NEXT: [[INS_31:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX8-NEXT: ret <4 x i16> [[INS_31]]
+;
+; GFX9-LABEL: @uadd_sat_v4i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
+; GFX9-NEXT: [[TMP3:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX9-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX9-NEXT: [[TMP5:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP3]], <2 x i16> [[TMP4]])
+; GFX9-NEXT: [[INS_31:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX9-NEXT: ret <4 x i16> [[INS_31]]
;
bb:
%arg0.0 = extractelement <4 x i16> %arg0, i64 0
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll
index 2038400a0586..0bb641371825 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX7 %s
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX8 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX8 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX9 %s
define <2 x i16> @uadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX7-LABEL: @uadd_sat_v2i16(
@@ -21,6 +21,11 @@ define <2 x i16> @uadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @uadd_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -51,6 +56,11 @@ define <2 x i16> @usub_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @usub_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -81,6 +91,11 @@ define <2 x i16> @sadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @sadd_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -111,6 +126,11 @@ define <2 x i16> @ssub_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @ssub_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -252,6 +272,18 @@ define <3 x i16> @uadd_sat_v3i16(<3 x i16> %arg0, <3 x i16> %arg1) {
; GFX8-NEXT: [[INS_2:%.*]] = insertelement <3 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
; GFX8-NEXT: ret <3 x i16> [[INS_2]]
;
+; GFX9-LABEL: @uadd_sat_v3i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[ARG0_2:%.*]] = extractelement <3 x i16> [[ARG0:%.*]], i64 2
+; GFX9-NEXT: [[ARG1_2:%.*]] = extractelement <3 x i16> [[ARG1:%.*]], i64 2
+; GFX9-NEXT: [[TMP0:%.*]] = shufflevector <3 x i16> [[ARG0]], <3 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP1:%.*]] = shufflevector <3 x i16> [[ARG1]], <3 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
+; GFX9-NEXT: [[ADD_2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_2]], i16 [[ARG1_2]])
+; GFX9-NEXT: [[TMP3:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> poison, <3 x i32> <i32 0, i32 1, i32 poison>
+; GFX9-NEXT: [[INS_2:%.*]] = insertelement <3 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
+; GFX9-NEXT: ret <3 x i16> [[INS_2]]
+;
bb:
%arg0.0 = extractelement <3 x i16> %arg0, i64 0
%arg0.1 = extractelement <3 x i16> %arg0, i64 1
@@ -291,19 +323,25 @@ define <4 x i16> @uadd_sat_v4i16(<4 x i16> %arg0, <4 x i16> %arg1) {
;
; GFX8-LABEL: @uadd_sat_v4i16(
; GFX8-NEXT: bb:
-; GFX8-NEXT: [[ARG0_2:%.*]] = extractelement <4 x i16> [[ARG0:%.*]], i64 2
-; GFX8-NEXT: [[ARG0_3:%.*]] = extractelement <4 x i16> [[ARG0]], i64 3
-; GFX8-NEXT: [[ARG1_2:%.*]] = extractelement <4 x i16> [[ARG1:%.*]], i64 2
-; GFX8-NEXT: [[ARG1_3:%.*]] = extractelement <4 x i16> [[ARG1]], i64 3
-; GFX8-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
-; GFX8-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX8-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX8-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
; GFX8-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
-; GFX8-NEXT: [[ADD_2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_2]], i16 [[ARG1_2]])
-; GFX8-NEXT: [[ADD_3:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_3]], i16 [[ARG1_3]])
-; GFX8-NEXT: [[TMP3:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; GFX8-NEXT: [[INS_2:%.*]] = insertelement <4 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
-; GFX8-NEXT: [[INS_3:%.*]] = insertelement <4 x i16> [[INS_2]], i16 [[ADD_3]], i64 3
-; GFX8-NEXT: ret <4 x i16> [[INS_3]]
+; GFX8-NEXT: [[TMP3:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX8-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX8-NEXT: [[TMP5:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP3]], <2 x i16> [[TMP4]])
+; GFX8-NEXT: [[INS_31:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX8-NEXT: ret <4 x i16> [[INS_31]]
+;
+; GFX9-LABEL: @uadd_sat_v4i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
+; GFX9-NEXT: [[TMP3:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX9-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX9-NEXT: [[TMP5:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP3]], <2 x i16> [[TMP4]])
+; GFX9-NEXT: [[INS_31:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX9-NEXT: ret <4 x i16> [[INS_31]]
;
bb:
%arg0.0 = extractelement <4 x i16> %arg0, i64 0
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/crash_extract_subvector_cost.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/crash_extract_subvector_cost.ll
index 0a020c855cc2..e2d25bae95e9 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/crash_extract_subvector_cost.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/crash_extract_subvector_cost.ll
@@ -4,15 +4,10 @@
define <2 x i16> @uadd_sat_v9i16_combine_vi16(<9 x i16> %arg0, <9 x i16> %arg1) {
; CHECK-LABEL: @uadd_sat_v9i16_combine_vi16(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[ARG0_1:%.*]] = extractelement <9 x i16> undef, i64 7
-; CHECK-NEXT: [[ARG0_2:%.*]] = extractelement <9 x i16> [[ARG0:%.*]], i64 8
-; CHECK-NEXT: [[ARG1_1:%.*]] = extractelement <9 x i16> [[ARG1:%.*]], i64 7
-; CHECK-NEXT: [[ARG1_2:%.*]] = extractelement <9 x i16> [[ARG1]], i64 8
-; CHECK-NEXT: [[ADD_1:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_1]], i16 [[ARG1_1]])
-; CHECK-NEXT: [[ADD_2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_2]], i16 [[ARG1_2]])
-; CHECK-NEXT: [[INS_1:%.*]] = insertelement <2 x i16> undef, i16 [[ADD_1]], i64 0
-; CHECK-NEXT: [[INS_2:%.*]] = insertelement <2 x i16> [[INS_1]], i16 [[ADD_2]], i64 1
-; CHECK-NEXT: ret <2 x i16> [[INS_2]]
+; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <9 x i16> undef, <9 x i16> [[ARG0:%.*]], <2 x i32> <i32 0, i32 17>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <9 x i16> [[ARG1:%.*]], <9 x i16> poison, <2 x i32> <i32 7, i32 8>
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
+; CHECK-NEXT: ret <2 x i16> [[TMP2]]
;
bb:
%arg0.1 = extractelement <9 x i16> undef, i64 7
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll
index 46980b33e401..3b63c1e35610 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll
@@ -4,23 +4,20 @@
define <4 x half> @phis(i1 %cmp1, <4 x half> %in1, <4 x half> %in2) {
; CHECK-LABEL: @phis(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A2:%.*]] = extractelement <4 x half> [[IN1:%.*]], i64 2
-; CHECK-NEXT: [[A3:%.*]] = extractelement <4 x half> [[IN1]], i64 3
-; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x half> [[IN1]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x half> [[IN1:%.*]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x half> [[IN1]], <4 x half> poison, <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[BB1:%.*]], label [[BB0:%.*]]
; CHECK: bb0:
-; CHECK-NEXT: [[B2:%.*]] = extractelement <4 x half> [[IN2:%.*]], i64 2
-; CHECK-NEXT: [[B3:%.*]] = extractelement <4 x half> [[IN2]], i64 3
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x half> [[IN2]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x half> [[IN2:%.*]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x half> [[IN2]], <4 x half> poison, <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: br label [[BB1]]
; CHECK: bb1:
-; CHECK-NEXT: [[C2:%.*]] = phi half [ [[A2]], [[ENTRY:%.*]] ], [ [[B2]], [[BB0]] ]
-; CHECK-NEXT: [[C3:%.*]] = phi half [ [[A3]], [[ENTRY]] ], [ [[B3]], [[BB0]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = phi <2 x half> [ [[TMP0]], [[ENTRY]] ], [ [[TMP1]], [[BB0]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x half> [[TMP2]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; CHECK-NEXT: [[O2:%.*]] = insertelement <4 x half> [[TMP3]], half [[C2]], i64 2
-; CHECK-NEXT: [[O3:%.*]] = insertelement <4 x half> [[O2]], half [[C3]], i64 3
-; CHECK-NEXT: ret <4 x half> [[O3]]
+; CHECK-NEXT: [[TMP4:%.*]] = phi <2 x half> [ [[TMP0]], [[ENTRY:%.*]] ], [ [[TMP2]], [[BB0]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = phi <2 x half> [ [[TMP1]], [[ENTRY]] ], [ [[TMP3]], [[BB0]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x half> [[TMP4]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x half> [[TMP5]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x half> [[TMP4]], <2 x half> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: ret <4 x half> [[TMP8]]
;
entry:
%a0 = extractelement <4 x half> %in1, i64 0
@@ -52,23 +49,20 @@ bb1:
define <4 x half> @phis_reverse(i1 %cmp1, <4 x half> %in1, <4 x half> %in2) {
; CHECK-LABEL: @phis_reverse(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A2:%.*]] = extractelement <4 x half> [[IN1:%.*]], i64 2
-; CHECK-NEXT: [[A3:%.*]] = extractelement <4 x half> [[IN1]], i64 3
-; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x half> [[IN1]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x half> [[IN1:%.*]], <4 x half> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x half> [[IN1]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[BB1:%.*]], label [[BB0:%.*]]
; CHECK: bb0:
-; CHECK-NEXT: [[B2:%.*]] = extractelement <4 x half> [[IN2:%.*]], i64 2
-; CHECK-NEXT: [[B3:%.*]] = extractelement <4 x half> [[IN2]], i64 3
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x half> [[IN2]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x half> [[IN2:%.*]], <4 x half> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x half> [[IN2]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
; CHECK-NEXT: br label [[BB1]]
; CHECK: bb1:
-; CHECK-NEXT: [[C3:%.*]] = phi half [ [[A3]], [[ENTRY:%.*]] ], [ [[B3]], [[BB0]] ]
-; CHECK-NEXT: [[C2:%.*]] = phi half [ [[A2]], [[ENTRY]] ], [ [[B2]], [[BB0]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = phi <2 x half> [ [[TMP0]], [[ENTRY]] ], [ [[TMP1]], [[BB0]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x half> [[TMP2]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; CHECK-NEXT: [[O2:%.*]] = insertelement <4 x half> [[TMP3]], half [[C2]], i64 2
-; CHECK-NEXT: [[O3:%.*]] = insertelement <4 x half> [[O2]], half [[C3]], i64 3
-; CHECK-NEXT: ret <4 x half> [[O3]]
+; CHECK-NEXT: [[TMP4:%.*]] = phi <2 x half> [ [[TMP0]], [[ENTRY:%.*]] ], [ [[TMP2]], [[BB0]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = phi <2 x half> [ [[TMP1]], [[ENTRY]] ], [ [[TMP3]], [[BB0]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x half> [[TMP5]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x half> [[TMP4]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x half> [[TMP6]], <4 x half> [[TMP7]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: ret <4 x half> [[TMP8]]
;
entry:
%a0 = extractelement <4 x half> %in1, i64 0
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll
index b34b9a352536..aceee8840bb4 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll
@@ -3,21 +3,10 @@
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -passes=slp-vectorizer,dce < %s | FileCheck -check-prefixes=GCN,VI %s
define half @reduction_half4(<4 x half> %a) {
-; GFX9-LABEL: @reduction_half4(
-; GFX9-NEXT: entry:
-; GFX9-NEXT: [[TMP0:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH8000, <4 x half> [[A:%.*]])
-; GFX9-NEXT: ret half [[TMP0]]
-;
-; VI-LABEL: @reduction_half4(
-; VI-NEXT: entry:
-; VI-NEXT: [[ELT0:%.*]] = extractelement <4 x half> [[A:%.*]], i64 0
-; VI-NEXT: [[ELT1:%.*]] = extractelement <4 x half> [[A]], i64 1
-; VI-NEXT: [[ELT2:%.*]] = extractelement <4 x half> [[A]], i64 2
-; VI-NEXT: [[ELT3:%.*]] = extractelement <4 x half> [[A]], i64 3
-; VI-NEXT: [[ADD1:%.*]] = fadd fast half [[ELT1]], [[ELT0]]
-; VI-NEXT: [[ADD2:%.*]] = fadd fast half [[ELT2]], [[ADD1]]
-; VI-NEXT: [[ADD3:%.*]] = fadd fast half [[ELT3]], [[ADD2]]
-; VI-NEXT: ret half [[ADD3]]
+; GCN-LABEL: @reduction_half4(
+; GCN-NEXT: entry:
+; GCN-NEXT: [[TMP0:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH8000, <4 x half> [[A:%.*]])
+; GCN-NEXT: ret half [[TMP0]]
;
entry:
%elt0 = extractelement <4 x half> %a, i64 0
@@ -33,29 +22,10 @@ entry:
}
define half @reduction_half8(<8 x half> %vec8) {
-; GFX9-LABEL: @reduction_half8(
-; GFX9-NEXT: entry:
-; GFX9-NEXT: [[TMP0:%.*]] = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH8000, <8 x half> [[VEC8:%.*]])
-; GFX9-NEXT: ret half [[TMP0]]
-;
-; VI-LABEL: @reduction_half8(
-; VI-NEXT: entry:
-; VI-NEXT: [[ELT0:%.*]] = extractelement <8 x half> [[VEC8:%.*]], i64 0
-; VI-NEXT: [[ELT1:%.*]] = extractelement <8 x half> [[VEC8]], i64 1
-; VI-NEXT: [[ELT2:%.*]] = extractelement <8 x half> [[VEC8]], i64 2
-; VI-NEXT: [[ELT3:%.*]] = extractelement <8 x half> [[VEC8]], i64 3
-; VI-NEXT: [[ELT4:%.*]] = extractelement <8 x half> [[VEC8]], i64 4
-; VI-NEXT: [[ELT5:%.*]] = extractelement <8 x half> [[VEC8]], i64 5
-; VI-NEXT: [[ELT6:%.*]] = extractelement <8 x half> [[VEC8]], i64 6
-; VI-NEXT: [[ELT7:%.*]] = extractelement <8 x half> [[VEC8]], i64 7
-; VI-NEXT: [[ADD1:%.*]] = fadd fast half [[ELT1]], [[ELT0]]
-; VI-NEXT: [[ADD2:%.*]] = fadd fast half [[ELT2]], [[ADD1]]
-; VI-NEXT: [[ADD3:%.*]] = fadd fast half [[ELT3]], [[ADD2]]
-; VI-NEXT: [[ADD4:%.*]] = fadd fast half [[ELT4]], [[ADD3]]
-; VI-NEXT: [[ADD5:%.*]] = fadd fast half [[ELT5]], [[ADD4]]
-; VI-NEXT: [[ADD6:%.*]] = fadd fast half [[ELT6]], [[ADD5]]
-; VI-NEXT: [[ADD7:%.*]] = fadd fast half [[ELT7]], [[ADD6]]
-; VI-NEXT: ret half [[ADD7]]
+; GCN-LABEL: @reduction_half8(
+; GCN-NEXT: entry:
+; GCN-NEXT: [[TMP0:%.*]] = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH8000, <8 x half> [[VEC8:%.*]])
+; GCN-NEXT: ret half [[TMP0]]
;
entry:
%elt0 = extractelement <8 x half> %vec8, i64 0
@@ -86,38 +56,12 @@ define half @reduction_half16(<16 x half> %vec16) {
;
; VI-LABEL: @reduction_half16(
; VI-NEXT: entry:
-; VI-NEXT: [[ELT0:%.*]] = extractelement <16 x half> [[VEC16:%.*]], i64 0
-; VI-NEXT: [[ELT1:%.*]] = extractelement <16 x half> [[VEC16]], i64 1
-; VI-NEXT: [[ELT2:%.*]] = extractelement <16 x half> [[VEC16]], i64 2
-; VI-NEXT: [[ELT3:%.*]] = extractelement <16 x half> [[VEC16]], i64 3
-; VI-NEXT: [[ELT4:%.*]] = extractelement <16 x half> [[VEC16]], i64 4
-; VI-NEXT: [[ELT5:%.*]] = extractelement <16 x half> [[VEC16]], i64 5
-; VI-NEXT: [[ELT6:%.*]] = extractelement <16 x half> [[VEC16]], i64 6
-; VI-NEXT: [[ELT7:%.*]] = extractelement <16 x half> [[VEC16]], i64 7
-; VI-NEXT: [[ELT8:%.*]] = extractelement <16 x half> [[VEC16]], i64 8
-; VI-NEXT: [[ELT9:%.*]] = extractelement <16 x half> [[VEC16]], i64 9
-; VI-NEXT: [[ELT10:%.*]] = extractelement <16 x half> [[VEC16]], i64 10
-; VI-NEXT: [[ELT11:%.*]] = extractelement <16 x half> [[VEC16]], i64 11
-; VI-NEXT: [[ELT12:%.*]] = extractelement <16 x half> [[VEC16]], i64 12
-; VI-NEXT: [[ELT13:%.*]] = extractelement <16 x half> [[VEC16]], i64 13
-; VI-NEXT: [[ELT14:%.*]] = extractelement <16 x half> [[VEC16]], i64 14
-; VI-NEXT: [[ELT15:%.*]] = extractelement <16 x half> [[VEC16]], i64 15
-; VI-NEXT: [[ADD1:%.*]] = fadd fast half [[ELT1]], [[ELT0]]
-; VI-NEXT: [[ADD2:%.*]] = fadd fast half [[ELT2]], [[ADD1]]
-; VI-NEXT: [[ADD3:%.*]] = fadd fast half [[ELT3]], [[ADD2]]
-; VI-NEXT: [[ADD4:%.*]] = fadd fast half [[ELT4]], [[ADD3]]
-; VI-NEXT: [[ADD5:%.*]] = fadd fast half [[ELT5]], [[ADD4]]
-; VI-NEXT: [[ADD6:%.*]] = fadd fast half [[ELT6]], [[ADD5]]
-; VI-NEXT: [[ADD7:%.*]] = fadd fast half [[ELT7]], [[ADD6]]
-; VI-NEXT: [[ADD8:%.*]] = fadd fast half [[ELT8]], [[ADD7]]
-; VI-NEXT: [[ADD9:%.*]] = fadd fast half [[ELT9]], [[ADD8]]
-; VI-NEXT: [[ADD10:%.*]] = fadd fast half [[ELT10]], [[ADD9]]
-; VI-NEXT: [[ADD11:%.*]] = fadd fast half [[ELT11]], [[ADD10]]
-; VI-NEXT: [[ADD12:%.*]] = fadd fast half [[ELT12]], [[ADD11]]
-; VI-NEXT: [[ADD13:%.*]] = fadd fast half [[ELT13]], [[ADD12]]
-; VI-NEXT: [[ADD14:%.*]] = fadd fast half [[ELT14]], [[ADD13]]
-; VI-NEXT: [[ADD15:%.*]] = fadd fast half [[ELT15]], [[ADD14]]
-; VI-NEXT: ret half [[ADD15]]
+; VI-NEXT: [[TMP0:%.*]] = shufflevector <16 x half> [[VEC16:%.*]], <16 x half> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; VI-NEXT: [[TMP1:%.*]] = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH8000, <8 x half> [[TMP0]])
+; VI-NEXT: [[TMP2:%.*]] = shufflevector <16 x half> [[VEC16]], <16 x half> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; VI-NEXT: [[TMP3:%.*]] = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH8000, <8 x half> [[TMP2]])
+; VI-NEXT: [[OP_RDX:%.*]] = fadd fast half [[TMP1]], [[TMP3]]
+; VI-NEXT: ret half [[OP_RDX]]
;
entry:
%elt0 = extractelement <16 x half> %vec16, i64 0
@@ -183,21 +127,10 @@ entry:
}
define i16 @reduction_v4i16(<4 x i16> %a) {
-; GFX9-LABEL: @reduction_v4i16(
-; GFX9-NEXT: entry:
-; GFX9-NEXT: [[TMP0:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[A:%.*]])
-; GFX9-NEXT: ret i16 [[TMP0]]
-;
-; VI-LABEL: @reduction_v4i16(
-; VI-NEXT: entry:
-; VI-NEXT: [[ELT0:%.*]] = extractelement <4 x i16> [[A:%.*]], i64 0
-; VI-NEXT: [[ELT1:%.*]] = extractelement <4 x i16> [[A]], i64 1
-; VI-NEXT: [[ELT2:%.*]] = extractelement <4 x i16> [[A]], i64 2
-; VI-NEXT: [[ELT3:%.*]] = extractelement <4 x i16> [[A]], i64 3
-; VI-NEXT: [[ADD1:%.*]] = add i16 [[ELT1]], [[ELT0]]
-; VI-NEXT: [[ADD2:%.*]] = add i16 [[ELT2]], [[ADD1]]
-; VI-NEXT: [[ADD3:%.*]] = add i16 [[ELT3]], [[ADD2]]
-; VI-NEXT: ret i16 [[ADD3]]
+; GCN-LABEL: @reduction_v4i16(
+; GCN-NEXT: entry:
+; GCN-NEXT: [[TMP0:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[A:%.*]])
+; GCN-NEXT: ret i16 [[TMP0]]
;
entry:
%elt0 = extractelement <4 x i16> %a, i64 0
@@ -213,29 +146,10 @@ entry:
}
define i16 @reduction_v8i16(<8 x i16> %vec8) {
-; GFX9-LABEL: @reduction_v8i16(
-; GFX9-NEXT: entry:
-; GFX9-NEXT: [[TMP0:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[VEC8:%.*]])
-; GFX9-NEXT: ret i16 [[TMP0]]
-;
-; VI-LABEL: @reduction_v8i16(
-; VI-NEXT: entry:
-; VI-NEXT: [[ELT0:%.*]] = extractelement <8 x i16> [[VEC8:%.*]], i64 0
-; VI-NEXT: [[ELT1:%.*]] = extractelement <8 x i16> [[VEC8]], i64 1
-; VI-NEXT: [[ELT2:%.*]] = extractelement <8 x i16> [[VEC8]], i64 2
-; VI-NEXT: [[ELT3:%.*]] = extractelement <8 x i16> [[VEC8]], i64 3
-; VI-NEXT: [[ELT4:%.*]] = extractelement <8 x i16> [[VEC8]], i64 4
-; VI-NEXT: [[ELT5:%.*]] = extractelement <8 x i16> [[VEC8]], i64 5
-; VI-NEXT: [[ELT6:%.*]] = extractelement <8 x i16> [[VEC8]], i64 6
-; VI-NEXT: [[ELT7:%.*]] = extractelement <8 x i16> [[VEC8]], i64 7
-; VI-NEXT: [[ADD1:%.*]] = add i16 [[ELT1]], [[ELT0]]
-; VI-NEXT: [[ADD2:%.*]] = add i16 [[ELT2]], [[ADD1]]
-; VI-NEXT: [[ADD3:%.*]] = add i16 [[ELT3]], [[ADD2]]
-; VI-NEXT: [[ADD4:%.*]] = add i16 [[ELT4]], [[ADD3]]
-; VI-NEXT: [[ADD5:%.*]] = add i16 [[ELT5]], [[ADD4]]
-; VI-NEXT: [[ADD6:%.*]] = add i16 [[ELT6]], [[ADD5]]
-; VI-NEXT: [[ADD7:%.*]] = add i16 [[ELT7]], [[ADD6]]
-; VI-NEXT: ret i16 [[ADD7]]
+; GCN-LABEL: @reduction_v8i16(
+; GCN-NEXT: entry:
+; GCN-NEXT: [[TMP0:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[VEC8:%.*]])
+; GCN-NEXT: ret i16 [[TMP0]]
;
entry:
%elt0 = extractelement <8 x i16> %vec8, i64 0
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/math-function.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/math-function.ll
index 059e4c38b519..9608608a1809 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/math-function.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/math-function.ll
@@ -155,13 +155,11 @@ define <4 x float> @exp_4x(ptr %a) {
; CHECK-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; CHECK-NEXT: [[TMP2:%.*]] = tail call fast float @expf(float [[VECEXT_1]])
; CHECK-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; CHECK-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; CHECK-NEXT: [[TMP3:%.*]] = tail call fast float @expf(float [[VECEXT_2]])
-; CHECK-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; CHECK-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; CHECK-NEXT: [[TMP4:%.*]] = tail call fast float @expf(float [[VECEXT_3]])
-; CHECK-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; CHECK-NEXT: ret <4 x float> [[VECINS_3]]
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.exp.v2f32(<2 x float> [[TMP3]])
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: ret <4 x float> [[VECINS_31]]
;
; DEFAULT-LABEL: define <4 x float> @exp_4x
; DEFAULT-SAME: (ptr [[A:%.*]]) #[[ATTR1]] {
@@ -173,13 +171,11 @@ define <4 x float> @exp_4x(ptr %a) {
; DEFAULT-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; DEFAULT-NEXT: [[TMP2:%.*]] = tail call fast float @expf(float [[VECEXT_1]])
; DEFAULT-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; DEFAULT-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; DEFAULT-NEXT: [[TMP3:%.*]] = tail call fast float @expf(float [[VECEXT_2]])
-; DEFAULT-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; DEFAULT-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; DEFAULT-NEXT: [[TMP4:%.*]] = tail call fast float @expf(float [[VECEXT_3]])
-; DEFAULT-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; DEFAULT-NEXT: ret <4 x float> [[VECINS_3]]
+; DEFAULT-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; DEFAULT-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.exp.v2f32(<2 x float> [[TMP3]])
+; DEFAULT-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; DEFAULT-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; DEFAULT-NEXT: ret <4 x float> [[VECINS_31]]
;
entry:
%0 = load <4 x float>, ptr %a, align 16
@@ -212,13 +208,11 @@ define <4 x float> @int_exp_4x(ptr %a) {
; CHECK-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; CHECK-NEXT: [[TMP2:%.*]] = tail call fast float @llvm.exp.f32(float [[VECEXT_1]])
; CHECK-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; CHECK-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; CHECK-NEXT: [[TMP3:%.*]] = tail call fast float @llvm.exp.f32(float [[VECEXT_2]])
-; CHECK-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; CHECK-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; CHECK-NEXT: [[TMP4:%.*]] = tail call fast float @llvm.exp.f32(float [[VECEXT_3]])
-; CHECK-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; CHECK-NEXT: ret <4 x float> [[VECINS_3]]
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.exp.v2f32(<2 x float> [[TMP3]])
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: ret <4 x float> [[VECINS_31]]
;
; DEFAULT-LABEL: define <4 x float> @int_exp_4x
; DEFAULT-SAME: (ptr [[A:%.*]]) #[[ATTR1]] {
@@ -230,13 +224,11 @@ define <4 x float> @int_exp_4x(ptr %a) {
; DEFAULT-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; DEFAULT-NEXT: [[TMP2:%.*]] = tail call fast float @llvm.exp.f32(float [[VECEXT_1]])
; DEFAULT-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; DEFAULT-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; DEFAULT-NEXT: [[TMP3:%.*]] = tail call fast float @llvm.exp.f32(float [[VECEXT_2]])
-; DEFAULT-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; DEFAULT-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; DEFAULT-NEXT: [[TMP4:%.*]] = tail call fast float @llvm.exp.f32(float [[VECEXT_3]])
-; DEFAULT-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; DEFAULT-NEXT: ret <4 x float> [[VECINS_3]]
+; DEFAULT-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; DEFAULT-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.exp.v2f32(<2 x float> [[TMP3]])
+; DEFAULT-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; DEFAULT-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; DEFAULT-NEXT: ret <4 x float> [[VECINS_31]]
;
entry:
%0 = load <4 x float>, ptr %a, align 16
@@ -269,13 +261,11 @@ define <4 x float> @log_4x(ptr %a) {
; CHECK-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; CHECK-NEXT: [[TMP2:%.*]] = tail call fast float @logf(float [[VECEXT_1]])
; CHECK-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; CHECK-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; CHECK-NEXT: [[TMP3:%.*]] = tail call fast float @logf(float [[VECEXT_2]])
-; CHECK-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; CHECK-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; CHECK-NEXT: [[TMP4:%.*]] = tail call fast float @logf(float [[VECEXT_3]])
-; CHECK-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; CHECK-NEXT: ret <4 x float> [[VECINS_3]]
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.log.v2f32(<2 x float> [[TMP3]])
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: ret <4 x float> [[VECINS_31]]
;
; DEFAULT-LABEL: define <4 x float> @log_4x
; DEFAULT-SAME: (ptr [[A:%.*]]) #[[ATTR1]] {
@@ -287,13 +277,11 @@ define <4 x float> @log_4x(ptr %a) {
; DEFAULT-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; DEFAULT-NEXT: [[TMP2:%.*]] = tail call fast float @logf(float [[VECEXT_1]])
; DEFAULT-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; DEFAULT-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; DEFAULT-NEXT: [[TMP3:%.*]] = tail call fast float @logf(float [[VECEXT_2]])
-; DEFAULT-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; DEFAULT-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; DEFAULT-NEXT: [[TMP4:%.*]] = tail call fast float @logf(float [[VECEXT_3]])
-; DEFAULT-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; DEFAULT-NEXT: ret <4 x float> [[VECINS_3]]
+; DEFAULT-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; DEFAULT-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.log.v2f32(<2 x float> [[TMP3]])
+; DEFAULT-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; DEFAULT-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; DEFAULT-NEXT: ret <4 x float> [[VECINS_31]]
;
entry:
%0 = load <4 x float>, ptr %a, align 16
@@ -326,13 +314,11 @@ define <4 x float> @int_log_4x(ptr %a) {
; CHECK-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; CHECK-NEXT: [[TMP2:%.*]] = tail call fast float @llvm.log.f32(float [[VECEXT_1]])
; CHECK-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; CHECK-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; CHECK-NEXT: [[TMP3:%.*]] = tail call fast float @llvm.log.f32(float [[VECEXT_2]])
-; CHECK-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; CHECK-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; CHECK-NEXT: [[TMP4:%.*]] = tail call fast float @llvm.log.f32(float [[VECEXT_3]])
-; CHECK-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; CHECK-NEXT: ret <4 x float> [[VECINS_3]]
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.log.v2f32(<2 x float> [[TMP3]])
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: ret <4 x float> [[VECINS_31]]
;
; DEFAULT-LABEL: define <4 x float> @int_log_4x
; DEFAULT-SAME: (ptr [[A:%.*]]) #[[ATTR1]] {
@@ -344,13 +330,11 @@ define <4 x float> @int_log_4x(ptr %a) {
; DEFAULT-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; DEFAULT-NEXT: [[TMP2:%.*]] = tail call fast float @llvm.log.f32(float [[VECEXT_1]])
; DEFAULT-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; DEFAULT-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; DEFAULT-NEXT: [[TMP3:%.*]] = tail call fast float @llvm.log.f32(float [[VECEXT_2]])
-; DEFAULT-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; DEFAULT-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; DEFAULT-NEXT: [[TMP4:%.*]] = tail call fast float @llvm.log.f32(float [[VECEXT_3]])
-; DEFAULT-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; DEFAULT-NEXT: ret <4 x float> [[VECINS_3]]
+; DEFAULT-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; DEFAULT-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.log.v2f32(<2 x float> [[TMP3]])
+; DEFAULT-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; DEFAULT-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; DEFAULT-NEXT: ret <4 x float> [[VECINS_31]]
;
entry:
%0 = load <4 x float>, ptr %a, align 16
@@ -383,13 +367,11 @@ define <4 x float> @sin_4x(ptr %a) {
; CHECK-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; CHECK-NEXT: [[TMP2:%.*]] = tail call fast float @sinf(float [[VECEXT_1]])
; CHECK-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; CHECK-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; CHECK-NEXT: [[TMP3:%.*]] = tail call fast float @sinf(float [[VECEXT_2]])
-; CHECK-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; CHECK-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; CHECK-NEXT: [[TMP4:%.*]] = tail call fast float @sinf(float [[VECEXT_3]])
-; CHECK-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; CHECK-NEXT: ret <4 x float> [[VECINS_3]]
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.sin.v2f32(<2 x float> [[TMP3]])
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: ret <4 x float> [[VECINS_31]]
;
; DEFAULT-LABEL: define <4 x float> @sin_4x
; DEFAULT-SAME: (ptr [[A:%.*]]) #[[ATTR1]] {
@@ -401,13 +383,11 @@ define <4 x float> @sin_4x(ptr %a) {
; DEFAULT-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; DEFAULT-NEXT: [[TMP2:%.*]] = tail call fast float @sinf(float [[VECEXT_1]])
; DEFAULT-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; DEFAULT-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; DEFAULT-NEXT: [[TMP3:%.*]] = tail call fast float @sinf(float [[VECEXT_2]])
-; DEFAULT-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; DEFAULT-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; DEFAULT-NEXT: [[TMP4:%.*]] = tail call fast float @sinf(float [[VECEXT_3]])
-; DEFAULT-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; DEFAULT-NEXT: ret <4 x float> [[VECINS_3]]
+; DEFAULT-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; DEFAULT-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.sin.v2f32(<2 x float> [[TMP3]])
+; DEFAULT-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; DEFAULT-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; DEFAULT-NEXT: ret <4 x float> [[VECINS_31]]
;
entry:
%0 = load <4 x float>, ptr %a, align 16
@@ -440,13 +420,11 @@ define <4 x float> @int_sin_4x(ptr %a) {
; CHECK-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; CHECK-NEXT: [[TMP2:%.*]] = tail call fast float @llvm.sin.f32(float [[VECEXT_1]])
; CHECK-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; CHECK-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; CHECK-NEXT: [[TMP3:%.*]] = tail call fast float @llvm.sin.f32(float [[VECEXT_2]])
-; CHECK-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; CHECK-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; CHECK-NEXT: [[TMP4:%.*]] = tail call fast float @llvm.sin.f32(float [[VECEXT_3]])
-; CHECK-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; CHECK-NEXT: ret <4 x float> [[VECINS_3]]
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.sin.v2f32(<2 x float> [[TMP3]])
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: ret <4 x float> [[VECINS_31]]
;
; DEFAULT-LABEL: define <4 x float> @int_sin_4x
; DEFAULT-SAME: (ptr [[A:%.*]]) #[[ATTR1]] {
@@ -458,13 +436,11 @@ define <4 x float> @int_sin_4x(ptr %a) {
; DEFAULT-NEXT: [[VECEXT_1:%.*]] = extractelement <4 x float> [[TMP0]], i32 1
; DEFAULT-NEXT: [[TMP2:%.*]] = tail call fast float @llvm.sin.f32(float [[VECEXT_1]])
; DEFAULT-NEXT: [[VECINS_1:%.*]] = insertelement <4 x float> [[VECINS]], float [[TMP2]], i32 1
-; DEFAULT-NEXT: [[VECEXT_2:%.*]] = extractelement <4 x float> [[TMP0]], i32 2
-; DEFAULT-NEXT: [[TMP3:%.*]] = tail call fast float @llvm.sin.f32(float [[VECEXT_2]])
-; DEFAULT-NEXT: [[VECINS_2:%.*]] = insertelement <4 x float> [[VECINS_1]], float [[TMP3]], i32 2
-; DEFAULT-NEXT: [[VECEXT_3:%.*]] = extractelement <4 x float> [[TMP0]], i32 3
-; DEFAULT-NEXT: [[TMP4:%.*]] = tail call fast float @llvm.sin.f32(float [[VECEXT_3]])
-; DEFAULT-NEXT: [[VECINS_3:%.*]] = insertelement <4 x float> [[VECINS_2]], float [[TMP4]], i32 3
-; DEFAULT-NEXT: ret <4 x float> [[VECINS_3]]
+; DEFAULT-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
+; DEFAULT-NEXT: [[TMP4:%.*]] = call fast <2 x float> @llvm.sin.v2f32(<2 x float> [[TMP3]])
+; DEFAULT-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; DEFAULT-NEXT: [[VECINS_31:%.*]] = shufflevector <4 x float> [[VECINS_1]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; DEFAULT-NEXT: ret <4 x float> [[VECINS_31]]
;
entry:
%0 = load <4 x float>, ptr %a, align 16
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/scatter-vectorize-reversed.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/scatter-vectorize-reversed.ll
new file mode 100644
index 000000000000..2daa3b58e5c3
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/scatter-vectorize-reversed.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr=+v -slp-threshold=-11 < %s | FileCheck %s
+
+define <4 x i32> @test(<2 x i64> %v, ptr %p) {
+; CHECK-LABEL: define <4 x i32> @test(
+; CHECK-SAME: <2 x i64> [[V:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x ptr> poison, ptr [[P]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x ptr> [[TMP0]], <2 x ptr> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i16, <2 x ptr> [[TMP1]], <2 x i64> [[V]]
+; CHECK-NEXT: [[TMP3:%.*]] = call <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr> [[TMP2]], i32 2, <2 x i1> <i1 true, i1 true>, <2 x i16> poison)
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i16> [[TMP3]], <2 x i16> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[TMP7:%.*]] = zext <2 x i16> [[TMP4]] to <2 x i32>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x i32> [[TMP7]], <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> zeroinitializer, <4 x i32> [[TMP6]], <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+; CHECK-NEXT: ret <4 x i32> [[TMP5]]
+;
+entry:
+ %0 = extractelement <2 x i64> %v, i32 1
+ %arrayidx127.2 = getelementptr i16, ptr %p, i64 %0
+ %1 = load i16, ptr %arrayidx127.2, align 2
+ %conv128.2 = zext i16 %1 to i32
+ %2 = extractelement <2 x i64> %v, i32 0
+ %arrayidx127.3 = getelementptr i16, ptr %p, i64 %2
+ %3 = load i16, ptr %arrayidx127.3, align 2
+ %conv128.3 = zext i16 %3 to i32
+ %4 = insertelement <4 x i32> zeroinitializer, i32 %conv128.2, i32 0
+ %5 = insertelement <4 x i32> %4, i32 %conv128.3, i32 1
+ ret <4 x i32> %5
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/alternate-calls-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/X86/alternate-calls-inseltpoison.ll
index 6c21cc1cfc5b..45ce1eec2cbf 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/alternate-calls-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/alternate-calls-inseltpoison.ll
@@ -51,25 +51,23 @@ define <8 x float> @ceil_floor(<8 x float> %a) {
;
; AVX-LABEL: @ceil_floor(
; AVX-NEXT: [[A0:%.*]] = extractelement <8 x float> [[A:%.*]], i64 0
-; AVX-NEXT: [[A1:%.*]] = extractelement <8 x float> [[A]], i64 1
-; AVX-NEXT: [[A2:%.*]] = extractelement <8 x float> [[A]], i64 2
; AVX-NEXT: [[A3:%.*]] = extractelement <8 x float> [[A]], i64 3
; AVX-NEXT: [[AB0:%.*]] = call float @llvm.ceil.f32(float [[A0]])
-; AVX-NEXT: [[AB1:%.*]] = call float @llvm.floor.f32(float [[A1]])
-; AVX-NEXT: [[AB2:%.*]] = call float @llvm.floor.f32(float [[A2]])
+; AVX-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A]], <8 x float> poison, <2 x i32> <i32 1, i32 2>
+; AVX-NEXT: [[TMP2:%.*]] = call <2 x float> @llvm.floor.v2f32(<2 x float> [[TMP1]])
; AVX-NEXT: [[AB3:%.*]] = call float @llvm.ceil.f32(float [[A3]])
-; AVX-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A]], <8 x float> poison, <2 x i32> <i32 4, i32 5>
-; AVX-NEXT: [[TMP2:%.*]] = call <2 x float> @llvm.ceil.v2f32(<2 x float> [[TMP1]])
-; AVX-NEXT: [[TMP3:%.*]] = shufflevector <8 x float> [[A]], <8 x float> poison, <2 x i32> <i32 6, i32 7>
-; AVX-NEXT: [[TMP4:%.*]] = call <2 x float> @llvm.floor.v2f32(<2 x float> [[TMP3]])
+; AVX-NEXT: [[TMP3:%.*]] = shufflevector <8 x float> [[A]], <8 x float> poison, <2 x i32> <i32 4, i32 5>
+; AVX-NEXT: [[TMP4:%.*]] = call <2 x float> @llvm.ceil.v2f32(<2 x float> [[TMP3]])
+; AVX-NEXT: [[TMP5:%.*]] = shufflevector <8 x float> [[A]], <8 x float> poison, <2 x i32> <i32 6, i32 7>
+; AVX-NEXT: [[TMP6:%.*]] = call <2 x float> @llvm.floor.v2f32(<2 x float> [[TMP5]])
; AVX-NEXT: [[R0:%.*]] = insertelement <8 x float> poison, float [[AB0]], i64 0
-; AVX-NEXT: [[R1:%.*]] = insertelement <8 x float> [[R0]], float [[AB1]], i64 1
-; AVX-NEXT: [[R2:%.*]] = insertelement <8 x float> [[R1]], float [[AB2]], i64 2
-; AVX-NEXT: [[R3:%.*]] = insertelement <8 x float> [[R2]], float [[AB3]], i64 3
-; AVX-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; AVX-NEXT: [[R52:%.*]] = shufflevector <8 x float> [[R3]], <8 x float> [[TMP5]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 poison, i32 poison>
-; AVX-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; AVX-NEXT: [[R71:%.*]] = shufflevector <8 x float> [[R52]], <8 x float> [[TMP6]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
+; AVX-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; AVX-NEXT: [[R23:%.*]] = shufflevector <8 x float> [[R0]], <8 x float> [[TMP7]], <8 x i32> <i32 0, i32 8, i32 9, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; AVX-NEXT: [[R3:%.*]] = insertelement <8 x float> [[R23]], float [[AB3]], i64 3
+; AVX-NEXT: [[TMP8:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; AVX-NEXT: [[R52:%.*]] = shufflevector <8 x float> [[R3]], <8 x float> [[TMP8]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 poison, i32 poison>
+; AVX-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; AVX-NEXT: [[R71:%.*]] = shufflevector <8 x float> [[R52]], <8 x float> [[TMP9]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
; AVX-NEXT: ret <8 x float> [[R71]]
;
; AVX2-LABEL: @ceil_floor(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/alternate-calls.ll b/llvm/test/Transforms/SLPVectorizer/X86/alternate-calls.ll
index bc5bcee36116..b8b284b9595a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/alternate-calls.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/alternate-calls.ll
@@ -51,25 +51,23 @@ define <8 x float> @ceil_floor(<8 x float> %a) {
;
; AVX-LABEL: @ceil_floor(
; AVX-NEXT: [[A0:%.*]] = extractelement <8 x float> [[A:%.*]], i64 0
-; AVX-NEXT: [[A1:%.*]] = extractelement <8 x float> [[A]], i64 1
-; AVX-NEXT: [[A2:%.*]] = extractelement <8 x float> [[A]], i64 2
; AVX-NEXT: [[A3:%.*]] = extractelement <8 x float> [[A]], i64 3
; AVX-NEXT: [[AB0:%.*]] = call float @llvm.ceil.f32(float [[A0]])
-; AVX-NEXT: [[AB1:%.*]] = call float @llvm.floor.f32(float [[A1]])
-; AVX-NEXT: [[AB2:%.*]] = call float @llvm.floor.f32(float [[A2]])
+; AVX-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A]], <8 x float> poison, <2 x i32> <i32 1, i32 2>
+; AVX-NEXT: [[TMP2:%.*]] = call <2 x float> @llvm.floor.v2f32(<2 x float> [[TMP1]])
; AVX-NEXT: [[AB3:%.*]] = call float @llvm.ceil.f32(float [[A3]])
-; AVX-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> [[A]], <8 x float> poison, <2 x i32> <i32 4, i32 5>
-; AVX-NEXT: [[TMP2:%.*]] = call <2 x float> @llvm.ceil.v2f32(<2 x float> [[TMP1]])
-; AVX-NEXT: [[TMP3:%.*]] = shufflevector <8 x float> [[A]], <8 x float> poison, <2 x i32> <i32 6, i32 7>
-; AVX-NEXT: [[TMP4:%.*]] = call <2 x float> @llvm.floor.v2f32(<2 x float> [[TMP3]])
+; AVX-NEXT: [[TMP3:%.*]] = shufflevector <8 x float> [[A]], <8 x float> poison, <2 x i32> <i32 4, i32 5>
+; AVX-NEXT: [[TMP4:%.*]] = call <2 x float> @llvm.ceil.v2f32(<2 x float> [[TMP3]])
+; AVX-NEXT: [[TMP5:%.*]] = shufflevector <8 x float> [[A]], <8 x float> poison, <2 x i32> <i32 6, i32 7>
+; AVX-NEXT: [[TMP6:%.*]] = call <2 x float> @llvm.floor.v2f32(<2 x float> [[TMP5]])
; AVX-NEXT: [[R0:%.*]] = insertelement <8 x float> poison, float [[AB0]], i64 0
-; AVX-NEXT: [[R1:%.*]] = insertelement <8 x float> [[R0]], float [[AB1]], i64 1
-; AVX-NEXT: [[R2:%.*]] = insertelement <8 x float> [[R1]], float [[AB2]], i64 2
-; AVX-NEXT: [[R3:%.*]] = insertelement <8 x float> [[R2]], float [[AB3]], i64 3
-; AVX-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; AVX-NEXT: [[R52:%.*]] = shufflevector <8 x float> [[R3]], <8 x float> [[TMP5]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 poison, i32 poison>
-; AVX-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; AVX-NEXT: [[R71:%.*]] = shufflevector <8 x float> [[R52]], <8 x float> [[TMP6]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
+; AVX-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; AVX-NEXT: [[R23:%.*]] = shufflevector <8 x float> [[R0]], <8 x float> [[TMP7]], <8 x i32> <i32 0, i32 8, i32 9, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; AVX-NEXT: [[R3:%.*]] = insertelement <8 x float> [[R23]], float [[AB3]], i64 3
+; AVX-NEXT: [[TMP8:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; AVX-NEXT: [[R52:%.*]] = shufflevector <8 x float> [[R3]], <8 x float> [[TMP8]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 poison, i32 poison>
+; AVX-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> poison, <8 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; AVX-NEXT: [[R71:%.*]] = shufflevector <8 x float> [[R52]], <8 x float> [[TMP9]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
; AVX-NEXT: ret <8 x float> [[R71]]
;
; AVX2-LABEL: @ceil_floor(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/blending-shuffle.ll b/llvm/test/Transforms/SLPVectorizer/X86/blending-shuffle.ll
index 4f35b77c50be..8701551f46ab 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/blending-shuffle.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/blending-shuffle.ll
@@ -39,9 +39,10 @@ define <4 x i8> @h(<4 x i8> %x, <4 x i8> %y) {
define <4 x i8> @h_undef(<4 x i8> %x, <4 x i8> %y) {
; CHECK-LABEL: @h_undef(
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i8> [[X:%.*]], <4 x i8> [[Y:%.*]], <4 x i32> <i32 poison, i32 3, i32 5, i32 6>
-; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i8> [[TMP1]], [[TMP1]]
-; CHECK-NEXT: ret <4 x i8> [[TMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i8> [[X:%.*]], <4 x i8> <i8 undef, i8 poison, i8 poison, i8 poison>, <4 x i32> <i32 4, i32 3, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[Y:%.*]], <4 x i32> <i32 0, i32 1, i32 5, i32 6>
+; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i8> [[TMP2]], [[TMP2]]
+; CHECK-NEXT: ret <4 x i8> [[TMP3]]
;
%x0 = extractelement <4 x i8> undef, i32 0
%x3 = extractelement <4 x i8> %x, i32 3
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/hadd-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/X86/hadd-inseltpoison.ll
index 4a9f717918a0..b85ec5bce819 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/hadd-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/hadd-inseltpoison.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,SSE
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,SLM
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX
;
; 128-bit vectors
@@ -213,62 +213,16 @@ define <4 x double> @test_v4f64(<4 x double> %a, <4 x double> %b) {
; PR50392
define <4 x double> @test_v4f64_partial_swizzle(<4 x double> %a, <4 x double> %b) {
-; SSE-LABEL: @test_v4f64_partial_swizzle(
-; SSE-NEXT: [[B2:%.*]] = extractelement <4 x double> [[B:%.*]], i64 2
-; SSE-NEXT: [[B3:%.*]] = extractelement <4 x double> [[B]], i64 3
-; SSE-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A:%.*]], <4 x double> [[B]], <2 x i32> <i32 0, i32 4>
-; SSE-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[A]], <4 x double> [[B]], <2 x i32> <i32 1, i32 5>
-; SSE-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
-; SSE-NEXT: [[R3:%.*]] = fadd double [[B2]], [[B3]]
-; SSE-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> poison, <4 x i32> <i32 0, i32 poison, i32 1, i32 poison>
-; SSE-NEXT: [[R03:%.*]] = insertelement <4 x double> [[TMP4]], double [[R3]], i64 3
-; SSE-NEXT: ret <4 x double> [[R03]]
-;
-; SLM-LABEL: @test_v4f64_partial_swizzle(
-; SLM-NEXT: [[B2:%.*]] = extractelement <4 x double> [[B:%.*]], i64 2
-; SLM-NEXT: [[B3:%.*]] = extractelement <4 x double> [[B]], i64 3
-; SLM-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A:%.*]], <4 x double> [[B]], <2 x i32> <i32 0, i32 4>
-; SLM-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[A]], <4 x double> [[B]], <2 x i32> <i32 1, i32 5>
-; SLM-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
-; SLM-NEXT: [[R3:%.*]] = fadd double [[B2]], [[B3]]
-; SLM-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> poison, <4 x i32> <i32 0, i32 poison, i32 1, i32 poison>
-; SLM-NEXT: [[R03:%.*]] = insertelement <4 x double> [[TMP4]], double [[R3]], i64 3
-; SLM-NEXT: ret <4 x double> [[R03]]
-;
-; AVX1-LABEL: @test_v4f64_partial_swizzle(
-; AVX1-NEXT: [[A0:%.*]] = extractelement <4 x double> [[A:%.*]], i64 0
-; AVX1-NEXT: [[A1:%.*]] = extractelement <4 x double> [[A]], i64 1
-; AVX1-NEXT: [[R0:%.*]] = fadd double [[A0]], [[A1]]
-; AVX1-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[B:%.*]], <4 x double> poison, <2 x i32> <i32 1, i32 2>
-; AVX1-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[B]], <4 x double> poison, <2 x i32> <i32 0, i32 3>
-; AVX1-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
-; AVX1-NEXT: [[R00:%.*]] = insertelement <4 x double> poison, double [[R0]], i64 0
-; AVX1-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; AVX1-NEXT: [[R031:%.*]] = shufflevector <4 x double> [[R00]], <4 x double> [[TMP4]], <4 x i32> <i32 0, i32 poison, i32 4, i32 5>
-; AVX1-NEXT: ret <4 x double> [[R031]]
-;
-; AVX2-LABEL: @test_v4f64_partial_swizzle(
-; AVX2-NEXT: [[A0:%.*]] = extractelement <4 x double> [[A:%.*]], i64 0
-; AVX2-NEXT: [[A1:%.*]] = extractelement <4 x double> [[A]], i64 1
-; AVX2-NEXT: [[R0:%.*]] = fadd double [[A0]], [[A1]]
-; AVX2-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[B:%.*]], <4 x double> poison, <2 x i32> <i32 1, i32 2>
-; AVX2-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[B]], <4 x double> poison, <2 x i32> <i32 0, i32 3>
-; AVX2-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
-; AVX2-NEXT: [[R00:%.*]] = insertelement <4 x double> poison, double [[R0]], i64 0
-; AVX2-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; AVX2-NEXT: [[R031:%.*]] = shufflevector <4 x double> [[R00]], <4 x double> [[TMP4]], <4 x i32> <i32 0, i32 poison, i32 4, i32 5>
-; AVX2-NEXT: ret <4 x double> [[R031]]
-;
-; AVX512-LABEL: @test_v4f64_partial_swizzle(
-; AVX512-NEXT: [[B2:%.*]] = extractelement <4 x double> [[B:%.*]], i64 2
-; AVX512-NEXT: [[B3:%.*]] = extractelement <4 x double> [[B]], i64 3
-; AVX512-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A:%.*]], <4 x double> [[B]], <2 x i32> <i32 0, i32 4>
-; AVX512-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[A]], <4 x double> [[B]], <2 x i32> <i32 1, i32 5>
-; AVX512-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
-; AVX512-NEXT: [[R3:%.*]] = fadd double [[B2]], [[B3]]
-; AVX512-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> poison, <4 x i32> <i32 0, i32 poison, i32 1, i32 poison>
-; AVX512-NEXT: [[R03:%.*]] = insertelement <4 x double> [[TMP4]], double [[R3]], i64 3
-; AVX512-NEXT: ret <4 x double> [[R03]]
+; CHECK-LABEL: @test_v4f64_partial_swizzle(
+; CHECK-NEXT: [[B2:%.*]] = extractelement <4 x double> [[B:%.*]], i64 2
+; CHECK-NEXT: [[B3:%.*]] = extractelement <4 x double> [[B]], i64 3
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A:%.*]], <4 x double> [[B]], <2 x i32> <i32 0, i32 4>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[A]], <4 x double> [[B]], <2 x i32> <i32 1, i32 5>
+; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[R3:%.*]] = fadd double [[B2]], [[B3]]
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> poison, <4 x i32> <i32 0, i32 poison, i32 1, i32 poison>
+; CHECK-NEXT: [[R03:%.*]] = insertelement <4 x double> [[TMP4]], double [[R3]], i64 3
+; CHECK-NEXT: ret <4 x double> [[R03]]
;
%a0 = extractelement <4 x double> %a, i64 0
%a1 = extractelement <4 x double> %a, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/hadd.ll b/llvm/test/Transforms/SLPVectorizer/X86/hadd.ll
index 31e3e6aa0a83..e30f84e4f17b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/hadd.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/hadd.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,SSE
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=slm -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,SLM
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=knl -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -passes=slp-vectorizer,instcombine -S | FileCheck %s --check-prefixes=CHECK,AVX
;
; 128-bit vectors
@@ -213,62 +213,16 @@ define <4 x double> @test_v4f64(<4 x double> %a, <4 x double> %b) {
; PR50392
define <4 x double> @test_v4f64_partial_swizzle(<4 x double> %a, <4 x double> %b) {
-; SSE-LABEL: @test_v4f64_partial_swizzle(
-; SSE-NEXT: [[B2:%.*]] = extractelement <4 x double> [[B:%.*]], i64 2
-; SSE-NEXT: [[B3:%.*]] = extractelement <4 x double> [[B]], i64 3
-; SSE-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A:%.*]], <4 x double> [[B]], <2 x i32> <i32 0, i32 4>
-; SSE-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[A]], <4 x double> [[B]], <2 x i32> <i32 1, i32 5>
-; SSE-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
-; SSE-NEXT: [[R3:%.*]] = fadd double [[B2]], [[B3]]
-; SSE-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> <double undef, double poison>, <4 x i32> <i32 0, i32 2, i32 1, i32 poison>
-; SSE-NEXT: [[R03:%.*]] = insertelement <4 x double> [[TMP4]], double [[R3]], i64 3
-; SSE-NEXT: ret <4 x double> [[R03]]
-;
-; SLM-LABEL: @test_v4f64_partial_swizzle(
-; SLM-NEXT: [[B2:%.*]] = extractelement <4 x double> [[B:%.*]], i64 2
-; SLM-NEXT: [[B3:%.*]] = extractelement <4 x double> [[B]], i64 3
-; SLM-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A:%.*]], <4 x double> [[B]], <2 x i32> <i32 0, i32 4>
-; SLM-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[A]], <4 x double> [[B]], <2 x i32> <i32 1, i32 5>
-; SLM-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
-; SLM-NEXT: [[R3:%.*]] = fadd double [[B2]], [[B3]]
-; SLM-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> <double undef, double poison>, <4 x i32> <i32 0, i32 2, i32 1, i32 poison>
-; SLM-NEXT: [[R03:%.*]] = insertelement <4 x double> [[TMP4]], double [[R3]], i64 3
-; SLM-NEXT: ret <4 x double> [[R03]]
-;
-; AVX1-LABEL: @test_v4f64_partial_swizzle(
-; AVX1-NEXT: [[A0:%.*]] = extractelement <4 x double> [[A:%.*]], i64 0
-; AVX1-NEXT: [[A1:%.*]] = extractelement <4 x double> [[A]], i64 1
-; AVX1-NEXT: [[R0:%.*]] = fadd double [[A0]], [[A1]]
-; AVX1-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[B:%.*]], <4 x double> poison, <2 x i32> <i32 1, i32 2>
-; AVX1-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[B]], <4 x double> poison, <2 x i32> <i32 0, i32 3>
-; AVX1-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
-; AVX1-NEXT: [[R00:%.*]] = insertelement <4 x double> <double poison, double undef, double poison, double poison>, double [[R0]], i64 0
-; AVX1-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; AVX1-NEXT: [[R031:%.*]] = shufflevector <4 x double> [[R00]], <4 x double> [[TMP4]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
-; AVX1-NEXT: ret <4 x double> [[R031]]
-;
-; AVX2-LABEL: @test_v4f64_partial_swizzle(
-; AVX2-NEXT: [[A0:%.*]] = extractelement <4 x double> [[A:%.*]], i64 0
-; AVX2-NEXT: [[A1:%.*]] = extractelement <4 x double> [[A]], i64 1
-; AVX2-NEXT: [[R0:%.*]] = fadd double [[A0]], [[A1]]
-; AVX2-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[B:%.*]], <4 x double> poison, <2 x i32> <i32 1, i32 2>
-; AVX2-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[B]], <4 x double> poison, <2 x i32> <i32 0, i32 3>
-; AVX2-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
-; AVX2-NEXT: [[R00:%.*]] = insertelement <4 x double> <double poison, double undef, double poison, double poison>, double [[R0]], i64 0
-; AVX2-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; AVX2-NEXT: [[R031:%.*]] = shufflevector <4 x double> [[R00]], <4 x double> [[TMP4]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
-; AVX2-NEXT: ret <4 x double> [[R031]]
-;
-; AVX512-LABEL: @test_v4f64_partial_swizzle(
-; AVX512-NEXT: [[B2:%.*]] = extractelement <4 x double> [[B:%.*]], i64 2
-; AVX512-NEXT: [[B3:%.*]] = extractelement <4 x double> [[B]], i64 3
-; AVX512-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A:%.*]], <4 x double> [[B]], <2 x i32> <i32 0, i32 4>
-; AVX512-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[A]], <4 x double> [[B]], <2 x i32> <i32 1, i32 5>
-; AVX512-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
-; AVX512-NEXT: [[R3:%.*]] = fadd double [[B2]], [[B3]]
-; AVX512-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> <double undef, double poison>, <4 x i32> <i32 0, i32 2, i32 1, i32 poison>
-; AVX512-NEXT: [[R03:%.*]] = insertelement <4 x double> [[TMP4]], double [[R3]], i64 3
-; AVX512-NEXT: ret <4 x double> [[R03]]
+; CHECK-LABEL: @test_v4f64_partial_swizzle(
+; CHECK-NEXT: [[B2:%.*]] = extractelement <4 x double> [[B:%.*]], i64 2
+; CHECK-NEXT: [[B3:%.*]] = extractelement <4 x double> [[B]], i64 3
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[A:%.*]], <4 x double> [[B]], <2 x i32> <i32 0, i32 4>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[A]], <4 x double> [[B]], <2 x i32> <i32 1, i32 5>
+; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[R3:%.*]] = fadd double [[B2]], [[B3]]
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> <double undef, double poison>, <4 x i32> <i32 0, i32 2, i32 1, i32 poison>
+; CHECK-NEXT: [[R03:%.*]] = insertelement <4 x double> [[TMP4]], double [[R3]], i64 3
+; CHECK-NEXT: ret <4 x double> [[R03]]
;
%a0 = extractelement <4 x double> %a, i64 0
%a1 = extractelement <4 x double> %a, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr47623.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr47623.ll
index c46a5aa758fb..892a2b6cee3b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr47623.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr47623.ll
@@ -13,32 +13,32 @@ define void @foo() {
; SSE-LABEL: @foo(
; SSE-NEXT: [[TMP1:%.*]] = load i32, ptr @b, align 16
; SSE-NEXT: store i32 [[TMP1]], ptr @a, align 16
-; SSE-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @b, i64 0, i64 2), align 8
-; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 1), align 4
-; SSE-NEXT: store i32 [[TMP1]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 2), align 8
-; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 3), align 4
-; SSE-NEXT: store i32 [[TMP1]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 4), align 16
-; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 5), align 4
-; SSE-NEXT: store i32 [[TMP1]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 6), align 8
-; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds ([8 x i32], ptr @a, i64 0, i64 7), align 4
+; SSE-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr inbounds (i8, ptr @b, i64 8), align 8
+; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds (i8, ptr @a, i64 4), align 4
+; SSE-NEXT: store i32 [[TMP1]], ptr getelementptr inbounds (i8, ptr @a, i64 8), align 8
+; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds (i8, ptr @a, i64 12), align 4
+; SSE-NEXT: store i32 [[TMP1]], ptr getelementptr inbounds (i8, ptr @a, i64 16), align 16
+; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds (i8, ptr @a, i64 20), align 4
+; SSE-NEXT: store i32 [[TMP1]], ptr getelementptr inbounds (i8, ptr @a, i64 24), align 8
+; SSE-NEXT: store i32 [[TMP2]], ptr getelementptr inbounds (i8, ptr @a, i64 28), align 4
; SSE-NEXT: ret void
;
; AVX-LABEL: @foo(
; AVX-NEXT: [[TMP1:%.*]] = load i32, ptr @b, align 16
-; AVX-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @b, i64 0, i64 2), align 8
+; AVX-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr inbounds (i8, ptr @b, i64 8), align 8
; AVX-NEXT: [[TMP3:%.*]] = insertelement <8 x i32> poison, i32 [[TMP1]], i64 0
; AVX-NEXT: [[TMP4:%.*]] = insertelement <8 x i32> [[TMP3]], i32 [[TMP2]], i64 1
-; AVX-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
-; AVX-NEXT: store <8 x i32> [[SHUFFLE]], ptr @a, align 16
+; AVX-NEXT: [[TMP5:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+; AVX-NEXT: store <8 x i32> [[TMP5]], ptr @a, align 16
; AVX-NEXT: ret void
;
; AVX512-LABEL: @foo(
; AVX512-NEXT: [[TMP1:%.*]] = load i32, ptr @b, align 16
-; AVX512-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @b, i64 0, i64 2), align 8
+; AVX512-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr inbounds (i8, ptr @b, i64 8), align 8
; AVX512-NEXT: [[TMP3:%.*]] = insertelement <8 x i32> poison, i32 [[TMP1]], i64 0
; AVX512-NEXT: [[TMP4:%.*]] = insertelement <8 x i32> [[TMP3]], i32 [[TMP2]], i64 1
-; AVX512-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
-; AVX512-NEXT: store <8 x i32> [[SHUFFLE]], ptr @a, align 16
+; AVX512-NEXT: [[TMP5:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
+; AVX512-NEXT: store <8 x i32> [[TMP5]], ptr @a, align 16
; AVX512-NEXT: ret void
;
%1 = load i32, ptr @b, align 16
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/scalarazied-result.ll b/llvm/test/Transforms/SLPVectorizer/X86/scalarazied-result.ll
index 53f17083bd4b..1d6e191c6f97 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/scalarazied-result.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/scalarazied-result.ll
@@ -4,10 +4,6 @@
define void @test() {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = extractelement <8 x half> zeroinitializer, i64 1
-; CHECK-NEXT: [[TOBOOL:%.*]] = fcmp une half [[TMP0]], 0xH0000
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <8 x half> zeroinitializer, i64 1
-; CHECK-NEXT: [[TOBOOL3:%.*]] = fcmp une half [[TMP1]], 0xH0000
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vector_gep.ll b/llvm/test/Transforms/SLPVectorizer/X86/vector_gep.ll
index b8c551c7b771..9e8cdc62c729 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/vector_gep.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/vector_gep.ll
@@ -26,5 +26,5 @@ entry:
unreachable
}
-attributes #0 = { noreturn readonly uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="knl" "target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512er,+avx512f,+avx512pf,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+prefetchwt1,+rdrnd,+rdseed,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { noreturn readonly uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="broadwell" "target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512f,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+rdrnd,+rdseed,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/streaming-compatible-expand-masked-gather-scatter.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/streaming-compatible-expand-masked-gather-scatter.ll
index ee67ab341117..b827fc63c0ef 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/streaming-compatible-expand-masked-gather-scatter.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AArch64/streaming-compatible-expand-masked-gather-scatter.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; REQUIRES: aarch64-registered-target
-; RUN: opt -S %s -passes=scalarize-masked-mem-intrin -mtriple=aarch64-linux-gnu -mattr=+sve -force-streaming-compatible-sve | FileCheck %s
+; RUN: opt -S %s -passes=scalarize-masked-mem-intrin -mtriple=aarch64-linux-gnu -mattr=+sve -force-streaming-compatible | FileCheck %s
define <2 x i32> @scalarize_v2i32(<2 x ptr> %p, <2 x i1> %mask, <2 x i32> %passthru) {
; CHECK-LABEL: @scalarize_v2i32(
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll
index a7ca5b93c361..dd12c98af696 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll
@@ -288,8 +288,8 @@ entry:
define void @multiple_index_maybe_neg(ptr %in.ptr, i64 %in.idx1) {
; CHECK-LABEL: define void @multiple_index_maybe_neg(
; CHECK-SAME: ptr [[IN_PTR:%.*]], i64 [[IN_IDX1:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[CONST1:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[IN_PTR]], i64 0, i64 1
-; CHECK-NEXT: [[IDX1:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[CONST1]], i64 0, i64 [[IN_IDX1]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [2 x <2 x i8>], ptr [[IN_PTR]], i64 0, i64 [[IN_IDX1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [2 x <2 x i8>], ptr [[TMP1]], i64 0, i64 1
; CHECK-NEXT: ret void
;
%const1 = getelementptr inbounds [2 x <2 x i8>], ptr %in.ptr, i64 0, i64 1
@@ -301,8 +301,8 @@ define void @multiple_index_nonneg(ptr %in.ptr, i64 %in.idx1) {
; CHECK-LABEL: define void @multiple_index_nonneg(
; CHECK-SAME: ptr [[IN_PTR:%.*]], i64 [[IN_IDX1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[IN_IDX1_NNEG:%.*]] = and i64 [[IN_IDX1]], 9223372036854775807
-; CHECK-NEXT: [[CONST1:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[IN_PTR]], i64 0, i64 1
-; CHECK-NEXT: [[IDX1:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[CONST1]], i64 0, i64 [[IN_IDX1_NNEG]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[IN_PTR]], i64 0, i64 [[IN_IDX1_NNEG]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[TMP1]], i64 0, i64 1
; CHECK-NEXT: ret void
;
%in.idx1.nneg = and i64 %in.idx1, 9223372036854775807
diff --git a/llvm/test/Transforms/SimplifyCFG/HoistCode.ll b/llvm/test/Transforms/SimplifyCFG/HoistCode.ll
index 887d18201681..e6a255a4b8f0 100644
--- a/llvm/test/Transforms/SimplifyCFG/HoistCode.ll
+++ b/llvm/test/Transforms/SimplifyCFG/HoistCode.ll
@@ -215,3 +215,63 @@ F:
%z2 = trunc nsw nuw i32 %x to i16
ret i16 %z2
}
+
+define ptr @hoist_gep_flags_both_nuw(i1 %C, ptr %p) {
+; CHECK-LABEL: @hoist_gep_flags_both_nuw(
+; CHECK-NEXT: common.ret:
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nuw i8, ptr [[P:%.*]], i64 1
+; CHECK-NEXT: ret ptr [[GEP1]]
+;
+ br i1 %C, label %T, label %F
+T:
+ %gep1 = getelementptr nuw i8, ptr %p, i64 1
+ ret ptr %gep1
+F:
+ %gep2 = getelementptr nuw i8, ptr %p, i64 1
+ ret ptr %gep2
+}
+
+define ptr @hoist_gep_flags_both_nusw(i1 %C, ptr %p) {
+; CHECK-LABEL: @hoist_gep_flags_both_nusw(
+; CHECK-NEXT: common.ret:
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nusw i8, ptr [[P:%.*]], i64 1
+; CHECK-NEXT: ret ptr [[GEP1]]
+;
+ br i1 %C, label %T, label %F
+T:
+ %gep1 = getelementptr nusw i8, ptr %p, i64 1
+ ret ptr %gep1
+F:
+ %gep2 = getelementptr nusw i8, ptr %p, i64 1
+ ret ptr %gep2
+}
+
+define ptr @hoist_gep_flags_intersect1(i1 %C, ptr %p) {
+; CHECK-LABEL: @hoist_gep_flags_intersect1(
+; CHECK-NEXT: common.ret:
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr nusw i8, ptr [[P:%.*]], i64 1
+; CHECK-NEXT: ret ptr [[GEP1]]
+;
+ br i1 %C, label %T, label %F
+T:
+ %gep1 = getelementptr inbounds nuw i8, ptr %p, i64 1
+ ret ptr %gep1
+F:
+ %gep2 = getelementptr nusw i8, ptr %p, i64 1
+ ret ptr %gep2
+}
+
+define ptr @hoist_gep_flags_intersect2(i1 %C, ptr %p) {
+; CHECK-LABEL: @hoist_gep_flags_intersect2(
+; CHECK-NEXT: common.ret:
+; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 1
+; CHECK-NEXT: ret ptr [[GEP1]]
+;
+ br i1 %C, label %T, label %F
+T:
+ %gep1 = getelementptr inbounds i8, ptr %p, i64 1
+ ret ptr %gep1
+F:
+ %gep2 = getelementptr nuw i8, ptr %p, i64 1
+ ret ptr %gep2
+}
diff --git a/llvm/test/Transforms/SimplifyCFG/switch-dead-default-lookup-table.ll b/llvm/test/Transforms/SimplifyCFG/switch-dead-default-lookup-table.ll
new file mode 100644
index 000000000000..bead0dc4c567
--- /dev/null
+++ b/llvm/test/Transforms/SimplifyCFG/switch-dead-default-lookup-table.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt %s -S -passes='simplifycfg<switch-to-lookup>' -simplifycfg-require-and-preserve-domtree=1 -switch-range-to-icmp | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+
+define i64 @test_1(i64 %0) {
+; CHECK-LABEL: define i64 @test_1(
+; CHECK-SAME: i64 [[TMP0:%.*]]) {
+; CHECK-NEXT: switch.lookup:
+; CHECK-NEXT: [[TMP1:%.*]] = urem i64 [[TMP0]], 4
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i64], ptr @switch.table.test_1, i32 0, i64 [[TMP1]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i64, ptr [[SWITCH_GEP]], align 8
+; CHECK-NEXT: ret i64 [[SWITCH_LOAD]]
+;
+ %2 = urem i64 %0, 4
+ switch i64 %2, label %5 [
+ i64 1, label %3
+ i64 2, label %3
+ i64 3, label %4
+ ]
+
+3:
+ br label %5
+
+4:
+ br label %5
+
+5:
+ %.0 = phi i64 [ 2, %4 ], [ 1, %3 ], [ 0, %1 ]
+ ret i64 %.0
+}
+
+
+define i64 @test_2(i64 %0) {
+; CHECK-LABEL: define i64 @test_2(
+; CHECK-SAME: i64 [[TMP0:%.*]]) {
+; CHECK-NEXT: switch.lookup:
+; CHECK-NEXT: [[TMP1:%.*]] = urem i64 [[TMP0]], 4
+; CHECK-NEXT: ret i64 [[TMP1]]
+;
+ %2 = urem i64 %0, 4
+ switch i64 %2, label %6 [
+ i64 1, label %3
+ i64 2, label %4
+ i64 3, label %5
+ ]
+
+3:
+ br label %6
+
+4:
+ br label %6
+
+5:
+ br label %6
+
+6:
+ %.0 = phi i64 [ 0, %1 ], [ 1, %3 ], [ 2, %4 ], [ 3, %5 ]
+ ret i64 %.0
+}
+
diff --git a/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll b/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll
index 7c0d5e4f2b65..4a457cc177e8 100644
--- a/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll
+++ b/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll
@@ -79,15 +79,15 @@ default:
ret void
}
-; This one is a negative test - we know the value of the default,
-; but that's about it
+; We can replace the default branch with case 3 since it is the only case that is missing.
define void @test3(i2 %a) {
; CHECK-LABEL: define void @test3(
; CHECK-SAME: i2 [[A:%.*]]) {
-; CHECK-NEXT: switch i2 [[A]], label [[DEFAULT:%.*]] [
+; CHECK-NEXT: switch i2 [[A]], label [[DOTUNREACHABLEDEFAULT:%.*]] [
; CHECK-NEXT: i2 0, label [[CASE0:%.*]]
; CHECK-NEXT: i2 1, label [[CASE1:%.*]]
; CHECK-NEXT: i2 -2, label [[CASE2:%.*]]
+; CHECK-NEXT: i2 -1, label [[DEFAULT:%.*]]
; CHECK-NEXT: ]
; CHECK: common.ret:
; CHECK-NEXT: ret void
@@ -100,6 +100,8 @@ define void @test3(i2 %a) {
; CHECK: case2:
; CHECK-NEXT: call void @foo(i32 2)
; CHECK-NEXT: br label [[COMMON_RET]]
+; CHECK: .unreachabledefault:
+; CHECK-NEXT: unreachable
; CHECK: default:
; CHECK-NEXT: call void @foo(i32 3)
; CHECK-NEXT: br label [[COMMON_RET]]
@@ -122,6 +124,50 @@ default:
ret void
}
+define void @test3_prof(i2 %a) {
+; CHECK-LABEL: define void @test3_prof(
+; CHECK-SAME: i2 [[A:%.*]]) {
+; CHECK-NEXT: switch i2 [[A]], label [[DOTUNREACHABLEDEFAULT:%.*]] [
+; CHECK-NEXT: i2 0, label [[CASE0:%.*]]
+; CHECK-NEXT: i2 1, label [[CASE1:%.*]]
+; CHECK-NEXT: i2 -2, label [[CASE2:%.*]]
+; CHECK-NEXT: i2 -1, label [[DEFAULT:%.*]]
+; CHECK-NEXT: ], !prof [[PROF0:![0-9]+]]
+; CHECK: common.ret:
+; CHECK-NEXT: ret void
+; CHECK: case0:
+; CHECK-NEXT: call void @foo(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: case1:
+; CHECK-NEXT: call void @foo(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+; CHECK: case2:
+; CHECK-NEXT: call void @foo(i32 2)
+; CHECK-NEXT: br label [[COMMON_RET]]
+; CHECK: .unreachabledefault:
+; CHECK-NEXT: unreachable
+; CHECK: default:
+; CHECK-NEXT: call void @foo(i32 3)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+ switch i2 %a, label %default [i2 0, label %case0
+ i2 1, label %case1
+ i2 2, label %case2], !prof !0
+
+case0:
+ call void @foo(i32 0)
+ ret void
+case1:
+ call void @foo(i32 1)
+ ret void
+case2:
+ call void @foo(i32 2)
+ ret void
+default:
+ call void @foo(i32 3)
+ ret void
+}
+
; Negative test - check for possible overflow when computing
; number of possible cases.
define void @test4(i128 %a) {
@@ -267,3 +313,40 @@ default:
declare void @llvm.assume(i1)
+define zeroext i1 @test8(i128 %a) {
+; We should not transform conditions wider than 64 bit.
+; CHECK-LABEL: define zeroext i1 @test8(
+; CHECK-SAME: i128 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = and i128 [[A]], 3894222643901120721397872246915072
+; CHECK-NEXT: switch i128 [[TMP0]], label [[LOR_RHS:%.*]] [
+; CHECK-NEXT: i128 1298074214633706907132624082305024, label [[LOR_END:%.*]]
+; CHECK-NEXT: i128 2596148429267413814265248164610048, label [[LOR_END]]
+; CHECK-NEXT: i128 3894222643901120721397872246915072, label [[LOR_END]]
+; CHECK-NEXT: ]
+; CHECK: lor.rhs:
+; CHECK-NEXT: br label [[LOR_END]]
+; CHECK: lor.end:
+; CHECK-NEXT: [[TMP1:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ false, [[LOR_RHS]] ], [ true, [[ENTRY]] ], [ true, [[ENTRY]] ]
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+entry:
+ %0 = and i128 %a, 3894222643901120721397872246915072
+ switch i128 %0, label %lor.rhs [
+ i128 1298074214633706907132624082305024, label %lor.end
+ i128 2596148429267413814265248164610048, label %lor.end
+ i128 3894222643901120721397872246915072, label %lor.end
+ ]
+
+lor.rhs: ; preds = %entry
+ br label %lor.end
+
+lor.end: ; preds = %entry, %entry, %entry, %lor.rhs
+ %1 = phi i1 [ true, %entry ], [ false, %lor.rhs ], [ true, %entry ], [ true, %entry ]
+ ret i1 %1
+}
+
+!0 = !{!"branch_weights", i32 8, i32 4, i32 2, i32 1}
+;.
+; CHECK: [[PROF0]] = !{!"branch_weights", i32 0, i32 4, i32 2, i32 1, i32 8}
+;.
diff --git a/llvm/test/Transforms/Util/add-TLI-mappings.ll b/llvm/test/Transforms/Util/add-TLI-mappings.ll
index 0e005ae75ef5..4e4b81e89a32 100644
--- a/llvm/test/Transforms/Util/add-TLI-mappings.ll
+++ b/llvm/test/Transforms/Util/add-TLI-mappings.ll
@@ -274,19 +274,19 @@ attributes #0 = { nounwind readnone }
; ARMPL-SAME: _ZGVsMxvl4_modff(armpl_svmodf_f32_x)" }
; ARMPL: attributes #[[SIN]] = { "vector-function-abi-variant"=
; ARMPL-SAME: "_ZGV_LLVM_N2v_sin(armpl_vsinq_f64),
-; ARMPL-SAME _ZGVsMxv_sin(armpl_svsin_f64_x)" }
+; ARMPL-SAME: _ZGVsMxv_sin(armpl_svsin_f64_x)" }
; ARMPL: attributes #[[SINCOS]] = { "vector-function-abi-variant"=
; ARMPL-SAME: "_ZGV_LLVM_N2vl8l8_sincos(armpl_vsincosq_f64),
-; ARMPL-SAME: _ZGVsMxvl8l8_sincos(armpl_svsincos_f64_x)" }
+; ARMPL-SAME: _ZGVsMxvl8l8_sincos(armpl_svsincos_f64_x)" }
; ARMPL: attributes #[[SINCOSF]] = { "vector-function-abi-variant"=
; ARMPL-SAME: "_ZGV_LLVM_N4vl4l4_sincosf(armpl_vsincosq_f32),
; ARMPL-SAME: _ZGVsMxvl4l4_sincosf(armpl_svsincos_f32_x)" }
; ARMPL: attributes #[[SINCOSPI]] = { "vector-function-abi-variant"=
; ARMPL-SAME: "_ZGV_LLVM_N2vl8l8_sincospi(armpl_vsincospiq_f64),
-; ARMPL-SAME: _ZGVsMxvl8l8_sincospi(armpl_svsincospi_f64_x)" }
+; ARMPL-SAME: _ZGVsMxvl8l8_sincospi(armpl_svsincospi_f64_x)" }
; ARMPL: attributes #[[SINCOSPIF]] = { "vector-function-abi-variant"=
; ARMPL-SAME: "_ZGV_LLVM_N4vl4l4_sincospif(armpl_vsincospiq_f32),
; ARMPL-SAME: _ZGVsMxvl4l4_sincospif(armpl_svsincospi_f32_x)" }
; ARMPL: attributes #[[LOG10]] = { "vector-function-abi-variant"=
; ARMPL-SAME: "_ZGV_LLVM_N4v_llvm.log10.f32(armpl_vlog10q_f32),
-; ARMPL-SAME _ZGVsMxv_llvm.log10.f32(armpl_svlog10_f32_x)" }
+; ARMPL-SAME: _ZGVsMxv_llvm.log10.f32(armpl_svlog10_f32_x)" }
diff --git a/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll b/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll
index b58f92d70936..7dadeb5d72de 100644
--- a/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll
+++ b/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll
@@ -13,8 +13,21 @@ define <8 x i8> @trivial(<8 x i8> %a) {
ret <8 x i8> %r
}
-define <8 x i8> @add(<8 x i8> %a, <8 x i8> %b) {
-; CHECK-LABEL: @add(
+define <4 x i32> @add_same_operands(<4 x i32> %x) {
+; CHECK-LABEL: @add_same_operands(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ADD:%.*]] = add <4 x i32> [[SHUF]], [[SHUF]]
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[ADD]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i32> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %add = add <4 x i32> %shuf, %shuf
+ %revshuf = shufflevector <4 x i32> %add, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %revshuf
+}
+
+define <8 x i8> @add_different_operands(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: @add_different_operands(
; CHECK-NEXT: [[R:%.*]] = add <8 x i8> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <8 x i8> [[R]]
;
@@ -317,6 +330,23 @@ define <8 x i8> @constantdiff2(<8 x i8> %a) {
ret <8 x i8> %r
}
+define <8 x half> @constantsplatf(<8 x half> %a) {
+; CHECK-LABEL: @constantsplatf(
+; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x half> [[A:%.*]], <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x half> [[A]], <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[ABT:%.*]] = fadd <4 x half> [[AT]], <half 0xH4900, half 0xH4900, half 0xH4900, half 0xH4900>
+; CHECK-NEXT: [[ABB:%.*]] = fadd <4 x half> [[AB]], <half 0xH4900, half 0xH4900, half 0xH4900, half 0xH4900>
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x half> [[ABT]], <4 x half> [[ABB]], <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <8 x half> [[R]]
+;
+ %ab = shufflevector <8 x half> %a, <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %at = shufflevector <8 x half> %a, <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %abt = fadd <4 x half> %at, <half 10.0, half 10.0, half 10.0, half 10.0>
+ %abb = fadd <4 x half> %ab, <half 10.0, half 10.0, half 10.0, half 10.0>
+ %r = shufflevector <4 x half> %abt, <4 x half> %abb, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x half> %r
+}
+
define <8 x i8> @inner_shuffle(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
; CHECK-LABEL: @inner_shuffle(
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i8> [[C:%.*]], <8 x i8> poison, <8 x i32> zeroinitializer
@@ -339,8 +369,23 @@ define <8 x i8> @inner_shuffle(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
ret <8 x i8> %r
}
-define <8 x i8> @extrause_add(<8 x i8> %a, <8 x i8> %b) {
-; CHECK-LABEL: @extrause_add(
+define <4 x i32> @extrause_add_same_operands(<4 x i32> %x) {
+; CHECK-LABEL: @extrause_add_same_operands(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ADD:%.*]] = add <4 x i32> [[SHUF]], [[SHUF]]
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[ADD]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ADD2:%.*]] = add <4 x i32> [[SHUF]], [[REVSHUF]]
+; CHECK-NEXT: ret <4 x i32> [[ADD2]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %add = add <4 x i32> %shuf, %shuf
+ %revshuf = shufflevector <4 x i32> %add, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %add2 = add <4 x i32> %shuf, %revshuf
+ ret <4 x i32> %add2
+}
+
+define <8 x i8> @extrause_add_different_operands(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: @extrause_add_different_operands(
; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x i8> [[A:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
; CHECK-NEXT: [[BB:%.*]] = shufflevector <8 x i8> [[B:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -413,6 +458,72 @@ define <8 x i8> @icmpsel(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
ret <8 x i8> %r
}
+define <8 x i8> @icmpsel_diffentcond(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
+; CHECK-LABEL: @icmpsel_diffentcond(
+; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x i8> [[A:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[BB:%.*]] = shufflevector <8 x i8> [[B:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[BT:%.*]] = shufflevector <8 x i8> [[B]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[CB:%.*]] = shufflevector <8 x i8> [[C:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[CT:%.*]] = shufflevector <8 x i8> [[C]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[DB:%.*]] = shufflevector <8 x i8> [[D:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[DT:%.*]] = shufflevector <8 x i8> [[D]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[ABT1:%.*]] = icmp slt <4 x i8> [[AT]], [[BT]]
+; CHECK-NEXT: [[ABB1:%.*]] = icmp ult <4 x i8> [[AB]], [[BB]]
+; CHECK-NEXT: [[ABT:%.*]] = select <4 x i1> [[ABT1]], <4 x i8> [[CT]], <4 x i8> [[DT]]
+; CHECK-NEXT: [[ABB:%.*]] = select <4 x i1> [[ABB1]], <4 x i8> [[CB]], <4 x i8> [[DB]]
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i8> [[ABT]], <4 x i8> [[ABB]], <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <8 x i8> [[R]]
+;
+ %ab = shufflevector <8 x i8> %a, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %at = shufflevector <8 x i8> %a, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %bb = shufflevector <8 x i8> %b, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %bt = shufflevector <8 x i8> %b, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %cb = shufflevector <8 x i8> %c, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %ct = shufflevector <8 x i8> %c, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %db = shufflevector <8 x i8> %d, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %dt = shufflevector <8 x i8> %d, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %abt1 = icmp slt <4 x i8> %at, %bt
+ %abb1 = icmp ult <4 x i8> %ab, %bb
+ %abt = select <4 x i1> %abt1, <4 x i8> %ct, <4 x i8> %dt
+ %abb = select <4 x i1> %abb1, <4 x i8> %cb, <4 x i8> %db
+ %r = shufflevector <4 x i8> %abt, <4 x i8> %abb, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i8> %r
+}
+
+define <8 x i8> @fcmpsel(<8 x half> %a, <8 x half> %b, <8 x i8> %c, <8 x i8> %d) {
+; CHECK-LABEL: @fcmpsel(
+; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x half> [[A:%.*]], <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x half> [[A]], <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[BB:%.*]] = shufflevector <8 x half> [[B:%.*]], <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[BT:%.*]] = shufflevector <8 x half> [[B]], <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[CB:%.*]] = shufflevector <8 x i8> [[C:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[CT:%.*]] = shufflevector <8 x i8> [[C]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[DB:%.*]] = shufflevector <8 x i8> [[D:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[DT:%.*]] = shufflevector <8 x i8> [[D]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[ABT1:%.*]] = fcmp olt <4 x half> [[AT]], [[BT]]
+; CHECK-NEXT: [[ABB1:%.*]] = fcmp olt <4 x half> [[AB]], [[BB]]
+; CHECK-NEXT: [[ABT:%.*]] = select <4 x i1> [[ABT1]], <4 x i8> [[CT]], <4 x i8> [[DT]]
+; CHECK-NEXT: [[ABB:%.*]] = select <4 x i1> [[ABB1]], <4 x i8> [[CB]], <4 x i8> [[DB]]
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i8> [[ABT]], <4 x i8> [[ABB]], <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <8 x i8> [[R]]
+;
+ %ab = shufflevector <8 x half> %a, <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %at = shufflevector <8 x half> %a, <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %bb = shufflevector <8 x half> %b, <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %bt = shufflevector <8 x half> %b, <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %cb = shufflevector <8 x i8> %c, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %ct = shufflevector <8 x i8> %c, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %db = shufflevector <8 x i8> %d, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %dt = shufflevector <8 x i8> %d, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %abt1 = fcmp olt <4 x half> %at, %bt
+ %abb1 = fcmp olt <4 x half> %ab, %bb
+ %abt = select <4 x i1> %abt1, <4 x i8> %ct, <4 x i8> %dt
+ %abb = select <4 x i1> %abb1, <4 x i8> %cb, <4 x i8> %db
+ %r = shufflevector <4 x i8> %abt, <4 x i8> %abb, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i8> %r
+}
+
define <8 x half> @fma(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
; CHECK-LABEL: @fma(
; CHECK-NEXT: [[R:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]])
@@ -430,6 +541,63 @@ define <8 x half> @fma(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
ret <8 x half> %r
}
+define <4 x i64> @single_zext(<4 x i32> %x) {
+; CHECK-LABEL: @single_zext(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[SHUF]] to <4 x i64>
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[ZEXT]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %zext = zext <4 x i32> %shuf to <4 x i64>
+ %revshuf = shufflevector <4 x i64> %zext, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i64> %revshuf
+}
+
+define <4 x i64> @not_zext(<4 x i32> %x) {
+; CHECK-LABEL: @not_zext(
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[X:%.*]] to <4 x i64>
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[ZEXT]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
+;
+ %zext = zext <4 x i32> %x to <4 x i64>
+ %revshuf = shufflevector <4 x i64> %zext, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i64> %revshuf
+}
+
+define <4 x i64> @not_zext2(<4 x i32> %x) {
+; CHECK-LABEL: @not_zext2(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[SHUF]] to <4 x i64>
+; CHECK-NEXT: ret <4 x i64> [[ZEXT]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %zext = zext <4 x i32> %shuf to <4 x i64>
+ ret <4 x i64> %zext
+}
+
+define i32 @not_bitcast(<4 x i8> %x) {
+; CHECK-LABEL: @not_bitcast(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i8> [[X:%.*]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[BITCAST:%.*]] = bitcast <4 x i8> [[SHUF]] to i32
+; CHECK-NEXT: ret i32 [[BITCAST]]
+;
+ %shuf = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %bitcast = bitcast <4 x i8> %shuf to i32
+ ret i32 %bitcast
+}
+
+define <8 x i16> @not_bitcast2(<4 x i32> %x, <8 x i16> %y) {
+; CHECK-LABEL: @not_bitcast2(
+; CHECK-NEXT: [[CAST:%.*]] = bitcast <4 x i32> [[X:%.*]] to <8 x i16>
+; CHECK-NEXT: [[OUT:%.*]] = shufflevector <8 x i16> [[Y:%.*]], <8 x i16> [[CAST]], <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+; CHECK-NEXT: ret <8 x i16> [[OUT]]
+;
+ %cast = bitcast <4 x i32> %x to <8 x i16>
+ %out = shufflevector <8 x i16> %y, <8 x i16> %cast, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ ret <8 x i16> %out
+}
+
define void @exttrunc(<8 x i32> %a, <8 x i32> %b, ptr %p) {
; CHECK-LABEL: @exttrunc(
; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x i32> [[A:%.*]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -465,6 +633,170 @@ define void @exttrunc(<8 x i32> %a, <8 x i32> %b, ptr %p) {
ret void
}
+define void @zext(<8 x i16> %a, <8 x i16> %b, ptr %p) {
+; CHECK-LABEL: @zext(
+; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK-NEXT: [[BB:%.*]] = shufflevector <8 x i16> [[B:%.*]], <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK-NEXT: [[BT:%.*]] = shufflevector <8 x i16> [[B]], <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK-NEXT: [[AB1:%.*]] = zext <4 x i16> [[AB]] to <4 x i32>
+; CHECK-NEXT: [[AT1:%.*]] = zext <4 x i16> [[AT]] to <4 x i32>
+; CHECK-NEXT: [[BB1:%.*]] = zext <4 x i16> [[BB]] to <4 x i32>
+; CHECK-NEXT: [[BT1:%.*]] = zext <4 x i16> [[BT]] to <4 x i32>
+; CHECK-NEXT: [[ABB:%.*]] = add <4 x i32> [[AB1]], [[BB1]]
+; CHECK-NEXT: [[ABT:%.*]] = add <4 x i32> [[AT1]], [[BT1]]
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i32> [[ABB]], <4 x i32> [[ABT]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT: store <8 x i32> [[R]], ptr [[P:%.*]], align 32
+; CHECK-NEXT: ret void
+;
+ %ab = shufflevector <8 x i16> %a, <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %at = shufflevector <8 x i16> %a, <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %bb = shufflevector <8 x i16> %b, <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %bt = shufflevector <8 x i16> %b, <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %ab1 = zext <4 x i16> %ab to <4 x i32>
+ %at1 = zext <4 x i16> %at to <4 x i32>
+ %bb1 = zext <4 x i16> %bb to <4 x i32>
+ %bt1 = zext <4 x i16> %bt to <4 x i32>
+ %abb = add <4 x i32> %ab1, %bb1
+ %abt = add <4 x i32> %at1, %bt1
+ %r = shufflevector <4 x i32> %abb, <4 x i32> %abt, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+ store <8 x i32> %r, ptr %p
+ ret void
+}
+
+define void @sext(<8 x i16> %a, <8 x i16> %b, ptr %p) {
+; CHECK-LABEL: @sext(
+; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK-NEXT: [[BB:%.*]] = shufflevector <8 x i16> [[B:%.*]], <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK-NEXT: [[BT:%.*]] = shufflevector <8 x i16> [[B]], <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK-NEXT: [[AB1:%.*]] = sext <4 x i16> [[AB]] to <4 x i32>
+; CHECK-NEXT: [[AT1:%.*]] = sext <4 x i16> [[AT]] to <4 x i32>
+; CHECK-NEXT: [[BB1:%.*]] = sext <4 x i16> [[BB]] to <4 x i32>
+; CHECK-NEXT: [[BT1:%.*]] = sext <4 x i16> [[BT]] to <4 x i32>
+; CHECK-NEXT: [[ABB:%.*]] = add <4 x i32> [[AB1]], [[BB1]]
+; CHECK-NEXT: [[ABT:%.*]] = add <4 x i32> [[AT1]], [[BT1]]
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i32> [[ABB]], <4 x i32> [[ABT]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT: store <8 x i32> [[R]], ptr [[P:%.*]], align 32
+; CHECK-NEXT: ret void
+;
+ %ab = shufflevector <8 x i16> %a, <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %at = shufflevector <8 x i16> %a, <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %bb = shufflevector <8 x i16> %b, <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %bt = shufflevector <8 x i16> %b, <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %ab1 = sext <4 x i16> %ab to <4 x i32>
+ %at1 = sext <4 x i16> %at to <4 x i32>
+ %bb1 = sext <4 x i16> %bb to <4 x i32>
+ %bt1 = sext <4 x i16> %bt to <4 x i32>
+ %abb = add <4 x i32> %ab1, %bb1
+ %abt = add <4 x i32> %at1, %bt1
+ %r = shufflevector <4 x i32> %abb, <4 x i32> %abt, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+ store <8 x i32> %r, ptr %p
+ ret void
+}
+
+define void @szext(<8 x i32> %a, <8 x i32> %b, ptr %p) {
+; CHECK-LABEL: @szext(
+; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x i32> [[A:%.*]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x i32> [[A]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK-NEXT: [[AB1:%.*]] = sext <4 x i32> [[AB]] to <4 x i64>
+; CHECK-NEXT: [[AT1:%.*]] = zext <4 x i32> [[AT]] to <4 x i64>
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i64> [[AB1]], <4 x i64> [[AT1]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT: store <8 x i64> [[R]], ptr [[P:%.*]], align 64
+; CHECK-NEXT: ret void
+;
+ %ab = shufflevector <8 x i32> %a, <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %at = shufflevector <8 x i32> %a, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %ab1 = sext <4 x i32> %ab to <4 x i64>
+ %at1 = zext <4 x i32> %at to <4 x i64>
+ %r = shufflevector <4 x i64> %ab1, <4 x i64> %at1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+ store <8 x i64> %r, ptr %p
+ ret void
+}
+
+define void @zext_types(<8 x i16> %a, <8 x i32> %b, ptr %p) {
+; CHECK-LABEL: @zext_types(
+; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x i32> [[B:%.*]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK-NEXT: [[AB1:%.*]] = zext <4 x i16> [[AB]] to <4 x i64>
+; CHECK-NEXT: [[AT1:%.*]] = zext <4 x i32> [[AT]] to <4 x i64>
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i64> [[AB1]], <4 x i64> [[AT1]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT: store <8 x i64> [[R]], ptr [[P:%.*]], align 64
+; CHECK-NEXT: ret void
+;
+ %ab = shufflevector <8 x i16> %a, <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %at = shufflevector <8 x i32> %b, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %ab1 = zext <4 x i16> %ab to <4 x i64>
+ %at1 = zext <4 x i32> %at to <4 x i64>
+ %r = shufflevector <4 x i64> %ab1, <4 x i64> %at1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+ store <8 x i64> %r, ptr %p
+ ret void
+}
+
+define void @trunc(<8 x i64> %a, <8 x i64> %b, ptr %p) {
+; CHECK-LABEL: @trunc(
+; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x i64> [[A:%.*]], <8 x i64> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x i64> [[A]], <8 x i64> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK-NEXT: [[ABB1:%.*]] = trunc <4 x i64> [[AB]] to <4 x i32>
+; CHECK-NEXT: [[ABT1:%.*]] = trunc <4 x i64> [[AT]] to <4 x i32>
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i32> [[ABB1]], <4 x i32> [[ABT1]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT: store <8 x i32> [[R]], ptr [[P:%.*]], align 32
+; CHECK-NEXT: ret void
+;
+ %ab = shufflevector <8 x i64> %a, <8 x i64> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %at = shufflevector <8 x i64> %a, <8 x i64> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %abb1 = trunc <4 x i64> %ab to <4 x i32>
+ %abt1 = trunc <4 x i64> %at to <4 x i32>
+ %r = shufflevector <4 x i32> %abb1, <4 x i32> %abt1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+ store <8 x i32> %r, ptr %p
+ ret void
+}
+
+define <4 x i64> @zext_chain(<4 x i16> %x) {
+; CHECK-LABEL: @zext_chain(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i16> [[X:%.*]], <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i16> [[SHUF]] to <4 x i32>
+; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i32> [[ZEXT]] to <4 x i64>
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[SEXT]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i16> %x, <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %zext = zext <4 x i16> %shuf to <4 x i32>
+ %sext = sext <4 x i32> %zext to <4 x i64>
+ %revshuf = shufflevector <4 x i64> %sext, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i64> %revshuf
+}
+
+define <4 x i32> @add_chain(<4 x i32> %x) {
+; CHECK-LABEL: @add_chain(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ADD:%.*]] = add <4 x i32> [[SHUF]], [[SHUF]]
+; CHECK-NEXT: [[ADD2:%.*]] = add <4 x i32> [[ADD]], [[ADD]]
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[ADD2]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i32> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %add = add <4 x i32> %shuf, %shuf
+ %add2 = add <4 x i32> %add, %add
+ %revshuf = shufflevector <4 x i32> %add2, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %revshuf
+}
+
+define <4 x i64> @zext_add_chain(<4 x i32> %x) {
+; CHECK-LABEL: @zext_add_chain(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[SHUF]] to <4 x i64>
+; CHECK-NEXT: [[ADD:%.*]] = add <4 x i64> [[ZEXT]], [[ZEXT]]
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[ADD]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %zext = zext <4 x i32> %shuf to <4 x i64>
+ %add = add <4 x i64> %zext, %zext
+ %revshuf = shufflevector <4 x i64> %add, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i64> %revshuf
+}
+
define <8 x i8> @intrinsics_minmax(<8 x i8> %a, <8 x i8> %b) {
; CHECK-LABEL: @intrinsics_minmax(
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i8> @llvm.smin.v8i8(<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]])
@@ -624,4 +956,54 @@ entry:
ret void
}
+define <4 x i8> @singleop(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: @singleop(
+; CHECK-NEXT: [[A1:%.*]] = shufflevector <4 x i8> [[A:%.*]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[B1:%.*]] = shufflevector <4 x i8> [[B:%.*]], <4 x i8> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[A2:%.*]] = zext <4 x i8> [[A1]] to <4 x i16>
+; CHECK-NEXT: [[B2:%.*]] = zext <4 x i8> [[B1]] to <4 x i16>
+; CHECK-NEXT: [[AB:%.*]] = add <4 x i16> [[A2]], [[B2]]
+; CHECK-NEXT: [[T:%.*]] = trunc <4 x i16> [[AB]] to <4 x i8>
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i8> [[T]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i8> [[R]]
+;
+ %a1 = shufflevector <4 x i8> %a, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %b1 = shufflevector <4 x i8> %b, <4 x i8> poison, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %a2 = zext <4 x i8> %a1 to <4 x i16>
+ %b2 = zext <4 x i8> %b1 to <4 x i16>
+ %ab = add <4 x i16> %a2, %b2
+ %t = trunc <4 x i16> %ab to <4 x i8>
+ %r = shufflevector <4 x i8> %t, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i8> %r
+}
+
+define <4 x i64> @cast_mismatched_types(<4 x i32> %x) {
+; CHECK-LABEL: @cast_mismatched_types(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i32> [[SHUF]] to <2 x i64>
+; CHECK-NEXT: [[EXTSHUF:%.*]] = shufflevector <2 x i64> [[ZEXT]], <2 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: ret <4 x i64> [[EXTSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <2 x i32> <i32 0, i32 2>
+ %zext = zext <2 x i32> %shuf to <2 x i64>
+ %extshuf = shufflevector <2 x i64> %zext, <2 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ ret <4 x i64> %extshuf
+}
+
+define <4 x float> @fadd_mismatched_types(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: @fadd_mismatched_types(
+; CHECK-NEXT: [[SHUF_X:%.*]] = shufflevector <4 x float> [[X:%.*]], <4 x float> poison, <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[SHUF_Y:%.*]] = shufflevector <4 x float> [[Y:%.*]], <4 x float> poison, <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[FADD:%.*]] = fadd fast <2 x float> [[SHUF_X]], [[SHUF_Y]]
+; CHECK-NEXT: [[EXTSHUF:%.*]] = shufflevector <2 x float> [[FADD]], <2 x float> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: ret <4 x float> [[EXTSHUF]]
+;
+ %shuf.x = shufflevector <4 x float> %x, <4 x float> poison, <2 x i32> <i32 0, i32 2>
+ %shuf.y = shufflevector <4 x float> %y, <4 x float> poison, <2 x i32> <i32 1, i32 3>
+ %fadd = fadd fast <2 x float> %shuf.x, %shuf.y
+ %extshuf = shufflevector <2 x float> %fadd, <2 x float> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ ret <4 x float> %extshuf
+}
+
+
declare void @use(<4 x i8>)
diff --git a/llvm/test/Transforms/VectorCombine/X86/select-shuffle.ll b/llvm/test/Transforms/VectorCombine/X86/select-shuffle.ll
index 60a6c4b1d9b9..685d661ea6bc 100644
--- a/llvm/test/Transforms/VectorCombine/X86/select-shuffle.ll
+++ b/llvm/test/Transforms/VectorCombine/X86/select-shuffle.ll
@@ -12,11 +12,12 @@ define <4 x double> @PR60649() {
; CHECK: unreachable:
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[TMP0:%.*]] = phi <4 x double> [ zeroinitializer, [[ENTRY:%.*]] ], [ zeroinitializer, [[UNREACHABLE:%.*]] ]
+; CHECK-NEXT: [[T0:%.*]] = phi <4 x double> [ zeroinitializer, [[ENTRY:%.*]] ], [ zeroinitializer, [[UNREACHABLE:%.*]] ]
; CHECK-NEXT: [[T1:%.*]] = phi <4 x double> [ zeroinitializer, [[ENTRY]] ], [ zeroinitializer, [[UNREACHABLE]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[TMP0]], <4 x double> [[TMP0]], <4 x i32> <i32 2, i32 3, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP2:%.*]] = fdiv <4 x double> [[TMP0]], <double 0.000000e+00, double 0.000000e+00, double undef, double undef>
-; CHECK-NEXT: [[TMP3:%.*]] = fmul <4 x double> [[TMP1]], <double 0.000000e+00, double 0.000000e+00, double undef, double undef>
+; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x double> [[T0]], <4 x double> [[T0]], <4 x i32> <i32 2, i32 3, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> [[T0]], <4 x double> [[T0]], <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP2:%.*]] = fdiv <4 x double> [[TMP1]], <double 0.000000e+00, double 0.000000e+00, double undef, double undef>
+; CHECK-NEXT: [[TMP3:%.*]] = fmul <4 x double> [[TMP0]], <double 0.000000e+00, double 0.000000e+00, double undef, double undef>
; CHECK-NEXT: [[T5:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> [[TMP3]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
; CHECK-NEXT: ret <4 x double> [[T5]]
;
diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.ll b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.ll
new file mode 100644
index 000000000000..36f105592462
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.ll
@@ -0,0 +1,6 @@
+; RUN: cat %S/amdgpu_function_alt.s | FileCheck --check-prefixes=CHECK %s
+
+define float @sample(float %x) {
+ %y = fmul float %x, %x
+ ret float %y
+}
diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.ll.expected
new file mode 100644
index 000000000000..ef5378a720e5
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.ll.expected
@@ -0,0 +1,11 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --tool cat --default-march amdgcn --version 4
+; RUN: cat %S/amdgpu_function_alt.s | FileCheck --check-prefixes=CHECK %s
+
+define float @sample(float %x) {
+; CHECK-LABEL: sample:
+; CHECK: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_mul_f32_e32 v0, v0, v0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %y = fmul float %x, %x
+ ret float %y
+}
diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.s b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.s
new file mode 100644
index 000000000000..d61e7cdd954c
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_function_alt.s
@@ -0,0 +1,32 @@
+ .text
+ .amdgcn_target "amdgcn--amdpal--gfx1030"
+ .globl sample
+ .p2align 2
+ .type sample,@function
+sample:
+ s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+ v_mul_f32_e32 v0, v0, v0
+ s_setpc_b64 s[30:31]
+.Lfunc_end0:
+ .size sample, .Lfunc_end0-sample
+
+ .p2alignl 6, 3214868480
+ .fill 48, 4, 3214868480
+ .section ".note.GNU-stack","",@progbits
+ .amd_amdgpu_isa "amdgcn--amdpal--gfx1030"
+ .amdgpu_pal_metadata
+---
+amdpal.pipelines:
+ - .api: !str ''
+ .shader_functions:
+ sample:
+ .backend_stack_size: 0
+ .lds_size: 0
+ .sgpr_count: 0x20
+ .stack_frame_size_in_bytes: 0
+ .vgpr_count: 0x1
+amdpal.version:
+ - 0x3
+ - 0
+...
+ .end_amdgpu_pal_metadata
diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/amdgpu_function_alt.test b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/amdgpu_function_alt.test
new file mode 100644
index 000000000000..5142b01945eb
--- /dev/null
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/amdgpu_function_alt.test
@@ -0,0 +1,22 @@
+## Checks two things:
+# - matching functions in AMDGPU assembly output without certain comments in the assembly
+# - using the --tool and --default-march arguments
+
+# Using `cat` as a tool requires some unusual setup (compared to the other test
+# of UpdateTestChecks tools), because update_llc_test_checks only reliably
+# substitutes %S (source directory) when evaluating RUN lines in the .ll file.
+# The .ll file used during the test is a temporary copy, so we also need to
+# provide a temporary copy of the cat'd .s file to avoid relying on
+# implementation details of how llvm-lit chooses %t.
+
+# RUN: rm -rf %t.dir && mkdir %t.dir
+# RUN: cp -f %S/Inputs/amdgpu_function_alt.s %t.dir/
+# RUN: cp -f %S/Inputs/amdgpu_function_alt.ll %t.dir/
+
+# RUN: %update_llc_test_checks --llc-binary "" --tool cat --default-march=amdgcn --version 4 %t.dir/amdgpu_function_alt.ll
+# RUN: diff -u %S/Inputs/amdgpu_function_alt.ll.expected %t.dir/amdgpu_function_alt.ll
+
+# Check that re-running remembers the arguments and doesn't change anything
+
+# RUN: %update_llc_test_checks --llc-binary "" %t.dir/amdgpu_function_alt.ll
+# RUN: diff -u %S/Inputs/amdgpu_function_alt.ll.expected %t.dir/amdgpu_function_alt.ll
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/phi-labels.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/phi-labels.ll.expected
index 1d21ebe547f6..5e70a6c89d32 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/phi-labels.ll.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/phi-labels.ll.expected
@@ -1,15 +1,15 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -S | FileCheck %s
define i32 @phi_after_label(i1 %cc) {
; CHECK-LABEL: define i32 @phi_after_label(
; CHECK-SAME: i1 [[CC:%.*]]) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 [[CC]], label [[THEN:%.*]], label [[END:%.*]]
-; CHECK: then:
-; CHECK-NEXT: br label [[END]]
-; CHECK: end:
-; CHECK-NEXT: [[R:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 1, [[THEN]] ]
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 [[CC]], label %[[THEN:.*]], label %[[END:.*]]
+; CHECK: [[THEN]]:
+; CHECK-NEXT: br label %[[END]]
+; CHECK: [[END]]:
+; CHECK-NEXT: [[R:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ 1, %[[THEN]] ]
; CHECK-NEXT: ret i32 [[R]]
;
entry:
@@ -26,14 +26,14 @@ end:
define void @phi_before_label(i32 %bound) {
; CHECK-LABEL: define void @phi_before_label(
; CHECK-SAME: i32 [[BOUND:%.*]]) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[CTR:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[CTR_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[CTR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[CTR_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[CTR_NEXT]] = add i32 [[CTR]], 1
; CHECK-NEXT: [[CC:%.*]] = icmp ult i32 [[CTR_NEXT]], [[BOUND]]
-; CHECK-NEXT: br i1 [[CC]], label [[LOOP]], label [[END:%.*]]
-; CHECK: end:
+; CHECK-NEXT: br i1 [[CC]], label %[[LOOP]], label %[[END:.*]]
+; CHECK: [[END]]:
; CHECK-NEXT: ret void
;
entry:
@@ -52,11 +52,11 @@ end:
define i32 @phi_after_label_unnamed(i1 %cc) {
; CHECK-LABEL: define i32 @phi_after_label_unnamed(
; CHECK-SAME: i1 [[CC:%.*]]) {
-; CHECK-NEXT: br i1 [[CC]], label [[TMP1:%.*]], label [[TMP2:%.*]]
-; CHECK: 1:
-; CHECK-NEXT: br label [[TMP2]]
-; CHECK: 2:
-; CHECK-NEXT: [[R:%.*]] = phi i32 [ 0, [[TMP0:%.*]] ], [ 1, [[TMP1]] ]
+; CHECK-NEXT: br i1 [[CC]], label %[[BB1:.*]], label %[[BB2:.*]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: br label %[[BB2]]
+; CHECK: [[BB2]]:
+; CHECK-NEXT: [[R:%.*]] = phi i32 [ 0, [[TMP0:%.*]] ], [ 1, %[[BB1]] ]
; CHECK-NEXT: ret i32 [[R]]
;
0:
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/phi-labels.test b/llvm/test/tools/UpdateTestChecks/update_test_checks/phi-labels.test
index 411c84de1dcb..2b0d0cb7f54b 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/phi-labels.test
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/phi-labels.test
@@ -1,4 +1,4 @@
-# RUN: cp -f %S/Inputs/phi-labels.ll %t.ll && %update_test_checks --version 4 %t.ll
+# RUN: cp -f %S/Inputs/phi-labels.ll %t.ll && %update_test_checks --version 5 %t.ll
# RUN: diff -u %t.ll %S/Inputs/phi-labels.ll.expected
## Check that running the script again does not change the result:
# RUN: %update_test_checks %t.ll
diff --git a/llvm/test/tools/llvm-driver/symlink-call.test b/llvm/test/tools/llvm-driver/symlink-call.test
index eeedf9edc73f..ca6098216b13 100644
--- a/llvm/test/tools/llvm-driver/symlink-call.test
+++ b/llvm/test/tools/llvm-driver/symlink-call.test
@@ -14,6 +14,8 @@
# RUN: %t/cxxfilt-15 --help | FileCheck %s
# RUN: ln -s %llvm %t/cxxfilt-15.exe
# RUN: %t/cxxfilt-15.exe --help | FileCheck %s
+# RUN: ln -s %llvm %t/c++filt
+# RUN: %t/c++filt --help | FileCheck %s
# RUN: ln -s %llvm %t/llvm-15
# RUN: %t/llvm-15 cxxfilt --help | FileCheck %s
diff --git a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-clear-upper-regs.s b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-clear-upper-regs.s
new file mode 100644
index 000000000000..ab81f9fb04af
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-clear-upper-regs.s
@@ -0,0 +1,791 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=aarch64 -mcpu=neoverse-v1 --timeline --timeline-max-iterations=4 < %s | FileCheck %s
+
+# LLVM-MCA-BEGIN GPR32-bit
+ldr w0, [sp]
+add x0, x0, x0
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN FPR8-bit
+ldr b0, [sp]
+fadd d0, d0, d0
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN FPR16-bit
+ldr h0, [sp]
+fadd d0, d0, d0
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN FPR32-bit
+ldr s0, [sp]
+fadd d0, d0, d0
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN SIMD64-bit-b
+ld1 {v0.8b}, [sp]
+add v0.16b, v0.16b, v0.16b
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN SIMD64-bit-h
+ld1 {v0.4h}, [sp]
+add v0.8h, v0.8h, v0.8h
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN SIMD64-bit-s
+ld1 {v0.2s}, [sp]
+add v0.4s, v0.4s, v0.4s
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN SIMD64-bit-d
+ld1 {v0.1d}, [sp]
+add v0.2d, v0.2d, v0.2d
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN ins
+ins v0.b[0], v1.b[1]
+add v0.16b, v0.16b, v0.16b
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN lanewise-load
+ld1 {v0.b}[0], [sp]
+add v0.16b, v0.16b, v0.16b
+# LLVM-MCA-END
+
+# CHECK: [0] Code Region - GPR32-bit
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 41
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 15
+# CHECK-NEXT: uOps Per Cycle: 4.88
+# CHECK-NEXT: IPC: 4.88
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 4 0.33 * ldr w0, [sp]
+# CHECK-NEXT: 1 1 0.25 add x0, x0, x0
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V1UnitB
+# CHECK-NEXT: [0.1] - V1UnitB
+# CHECK-NEXT: [1.0] - V1UnitD
+# CHECK-NEXT: [1.1] - V1UnitD
+# CHECK-NEXT: [2] - V1UnitL2
+# CHECK-NEXT: [3.0] - V1UnitL01
+# CHECK-NEXT: [3.1] - V1UnitL01
+# CHECK-NEXT: [4] - V1UnitM0
+# CHECK-NEXT: [5] - V1UnitM1
+# CHECK-NEXT: [6.0] - V1UnitS
+# CHECK-NEXT: [6.1] - V1UnitS
+# CHECK-NEXT: [7] - V1UnitV0
+# CHECK-NEXT: [8] - V1UnitV1
+# CHECK-NEXT: [9] - V1UnitV2
+# CHECK-NEXT: [10] - V1UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 0.22 0.22 0.28 0.28 - - - -
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - ldr w0, [sp]
+# CHECK-NEXT: - - - - - - - 0.22 0.22 0.28 0.28 - - - - add x0, x0, x0
+
+# CHECK: Timeline view:
+# CHECK-NEXT: Index 012345678
+
+# CHECK: [0,0] DeeeeER . ldr w0, [sp]
+# CHECK-NEXT: [0,1] D====eER. add x0, x0, x0
+# CHECK-NEXT: [1,0] DeeeeE-R. ldr w0, [sp]
+# CHECK-NEXT: [1,1] D====eER. add x0, x0, x0
+# CHECK-NEXT: [2,0] DeeeeE-R. ldr w0, [sp]
+# CHECK-NEXT: [2,1] D====eER. add x0, x0, x0
+# CHECK-NEXT: [3,0] D=eeeeER. ldr w0, [sp]
+# CHECK-NEXT: [3,1] D=====eER add x0, x0, x0
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 0.5 ldr w0, [sp]
+# CHECK-NEXT: 1. 4 5.3 0.0 0.0 add x0, x0, x0
+# CHECK-NEXT: 4 3.3 0.6 0.3 <total>
+
+# CHECK: [1] Code Region - FPR8-bit
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 15
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ldr b0, [sp]
+# CHECK-NEXT: 1 2 0.25 fadd d0, d0, d0
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V1UnitB
+# CHECK-NEXT: [0.1] - V1UnitB
+# CHECK-NEXT: [1.0] - V1UnitD
+# CHECK-NEXT: [1.1] - V1UnitD
+# CHECK-NEXT: [2] - V1UnitL2
+# CHECK-NEXT: [3.0] - V1UnitL01
+# CHECK-NEXT: [3.1] - V1UnitL01
+# CHECK-NEXT: [4] - V1UnitM0
+# CHECK-NEXT: [5] - V1UnitM1
+# CHECK-NEXT: [6.0] - V1UnitS
+# CHECK-NEXT: [6.1] - V1UnitS
+# CHECK-NEXT: [7] - V1UnitV0
+# CHECK-NEXT: [8] - V1UnitV1
+# CHECK-NEXT: [9] - V1UnitV2
+# CHECK-NEXT: [10] - V1UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - ldr b0, [sp]
+# CHECK-NEXT: - - - - - - - - - - - 0.25 0.25 0.25 0.25 fadd d0, d0, d0
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ldr b0, [sp]
+# CHECK-NEXT: [0,1] D======eeER. fadd d0, d0, d0
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ldr b0, [sp]
+# CHECK-NEXT: [1,1] D======eeER. fadd d0, d0, d0
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ldr b0, [sp]
+# CHECK-NEXT: [2,1] D======eeER. fadd d0, d0, d0
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ldr b0, [sp]
+# CHECK-NEXT: [3,1] D=======eeER fadd d0, d0, d0
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ldr b0, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 fadd d0, d0, d0
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [2] Code Region - FPR16-bit
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 15
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ldr h0, [sp]
+# CHECK-NEXT: 1 2 0.25 fadd d0, d0, d0
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V1UnitB
+# CHECK-NEXT: [0.1] - V1UnitB
+# CHECK-NEXT: [1.0] - V1UnitD
+# CHECK-NEXT: [1.1] - V1UnitD
+# CHECK-NEXT: [2] - V1UnitL2
+# CHECK-NEXT: [3.0] - V1UnitL01
+# CHECK-NEXT: [3.1] - V1UnitL01
+# CHECK-NEXT: [4] - V1UnitM0
+# CHECK-NEXT: [5] - V1UnitM1
+# CHECK-NEXT: [6.0] - V1UnitS
+# CHECK-NEXT: [6.1] - V1UnitS
+# CHECK-NEXT: [7] - V1UnitV0
+# CHECK-NEXT: [8] - V1UnitV1
+# CHECK-NEXT: [9] - V1UnitV2
+# CHECK-NEXT: [10] - V1UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - ldr h0, [sp]
+# CHECK-NEXT: - - - - - - - - - - - 0.25 0.25 0.25 0.25 fadd d0, d0, d0
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ldr h0, [sp]
+# CHECK-NEXT: [0,1] D======eeER. fadd d0, d0, d0
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ldr h0, [sp]
+# CHECK-NEXT: [1,1] D======eeER. fadd d0, d0, d0
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ldr h0, [sp]
+# CHECK-NEXT: [2,1] D======eeER. fadd d0, d0, d0
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ldr h0, [sp]
+# CHECK-NEXT: [3,1] D=======eeER fadd d0, d0, d0
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ldr h0, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 fadd d0, d0, d0
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [3] Code Region - FPR32-bit
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 15
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ldr s0, [sp]
+# CHECK-NEXT: 1 2 0.25 fadd d0, d0, d0
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V1UnitB
+# CHECK-NEXT: [0.1] - V1UnitB
+# CHECK-NEXT: [1.0] - V1UnitD
+# CHECK-NEXT: [1.1] - V1UnitD
+# CHECK-NEXT: [2] - V1UnitL2
+# CHECK-NEXT: [3.0] - V1UnitL01
+# CHECK-NEXT: [3.1] - V1UnitL01
+# CHECK-NEXT: [4] - V1UnitM0
+# CHECK-NEXT: [5] - V1UnitM1
+# CHECK-NEXT: [6.0] - V1UnitS
+# CHECK-NEXT: [6.1] - V1UnitS
+# CHECK-NEXT: [7] - V1UnitV0
+# CHECK-NEXT: [8] - V1UnitV1
+# CHECK-NEXT: [9] - V1UnitV2
+# CHECK-NEXT: [10] - V1UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - ldr s0, [sp]
+# CHECK-NEXT: - - - - - - - - - - - 0.25 0.25 0.25 0.25 fadd d0, d0, d0
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ldr s0, [sp]
+# CHECK-NEXT: [0,1] D======eeER. fadd d0, d0, d0
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ldr s0, [sp]
+# CHECK-NEXT: [1,1] D======eeER. fadd d0, d0, d0
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ldr s0, [sp]
+# CHECK-NEXT: [2,1] D======eeER. fadd d0, d0, d0
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ldr s0, [sp]
+# CHECK-NEXT: [3,1] D=======eeER fadd d0, d0, d0
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ldr s0, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 fadd d0, d0, d0
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [4] Code Region - SIMD64-bit-b
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 15
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ld1 { v0.8b }, [sp]
+# CHECK-NEXT: 1 2 0.25 add v0.16b, v0.16b, v0.16b
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V1UnitB
+# CHECK-NEXT: [0.1] - V1UnitB
+# CHECK-NEXT: [1.0] - V1UnitD
+# CHECK-NEXT: [1.1] - V1UnitD
+# CHECK-NEXT: [2] - V1UnitL2
+# CHECK-NEXT: [3.0] - V1UnitL01
+# CHECK-NEXT: [3.1] - V1UnitL01
+# CHECK-NEXT: [4] - V1UnitM0
+# CHECK-NEXT: [5] - V1UnitM1
+# CHECK-NEXT: [6.0] - V1UnitS
+# CHECK-NEXT: [6.1] - V1UnitS
+# CHECK-NEXT: [7] - V1UnitV0
+# CHECK-NEXT: [8] - V1UnitV1
+# CHECK-NEXT: [9] - V1UnitV2
+# CHECK-NEXT: [10] - V1UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - ld1 { v0.8b }, [sp]
+# CHECK-NEXT: - - - - - - - - - - - 0.25 0.25 0.25 0.25 add v0.16b, v0.16b, v0.16b
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ld1 { v0.8b }, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ld1 { v0.8b }, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ld1 { v0.8b }, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ld1 { v0.8b }, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add v0.16b, v0.16b, v0.16b
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ld1 { v0.8b }, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [5] Code Region - SIMD64-bit-h
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 15
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ld1 { v0.4h }, [sp]
+# CHECK-NEXT: 1 2 0.25 add v0.8h, v0.8h, v0.8h
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V1UnitB
+# CHECK-NEXT: [0.1] - V1UnitB
+# CHECK-NEXT: [1.0] - V1UnitD
+# CHECK-NEXT: [1.1] - V1UnitD
+# CHECK-NEXT: [2] - V1UnitL2
+# CHECK-NEXT: [3.0] - V1UnitL01
+# CHECK-NEXT: [3.1] - V1UnitL01
+# CHECK-NEXT: [4] - V1UnitM0
+# CHECK-NEXT: [5] - V1UnitM1
+# CHECK-NEXT: [6.0] - V1UnitS
+# CHECK-NEXT: [6.1] - V1UnitS
+# CHECK-NEXT: [7] - V1UnitV0
+# CHECK-NEXT: [8] - V1UnitV1
+# CHECK-NEXT: [9] - V1UnitV2
+# CHECK-NEXT: [10] - V1UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - ld1 { v0.4h }, [sp]
+# CHECK-NEXT: - - - - - - - - - - - 0.25 0.25 0.25 0.25 add v0.8h, v0.8h, v0.8h
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ld1 { v0.4h }, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ld1 { v0.4h }, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ld1 { v0.4h }, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ld1 { v0.4h }, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add v0.8h, v0.8h, v0.8h
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ld1 { v0.4h }, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add v0.8h, v0.8h, v0.8h
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [6] Code Region - SIMD64-bit-s
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 15
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ld1 { v0.2s }, [sp]
+# CHECK-NEXT: 1 2 0.25 add v0.4s, v0.4s, v0.4s
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V1UnitB
+# CHECK-NEXT: [0.1] - V1UnitB
+# CHECK-NEXT: [1.0] - V1UnitD
+# CHECK-NEXT: [1.1] - V1UnitD
+# CHECK-NEXT: [2] - V1UnitL2
+# CHECK-NEXT: [3.0] - V1UnitL01
+# CHECK-NEXT: [3.1] - V1UnitL01
+# CHECK-NEXT: [4] - V1UnitM0
+# CHECK-NEXT: [5] - V1UnitM1
+# CHECK-NEXT: [6.0] - V1UnitS
+# CHECK-NEXT: [6.1] - V1UnitS
+# CHECK-NEXT: [7] - V1UnitV0
+# CHECK-NEXT: [8] - V1UnitV1
+# CHECK-NEXT: [9] - V1UnitV2
+# CHECK-NEXT: [10] - V1UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - ld1 { v0.2s }, [sp]
+# CHECK-NEXT: - - - - - - - - - - - 0.25 0.25 0.25 0.25 add v0.4s, v0.4s, v0.4s
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ld1 { v0.2s }, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ld1 { v0.2s }, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ld1 { v0.2s }, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ld1 { v0.2s }, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add v0.4s, v0.4s, v0.4s
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ld1 { v0.2s }, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add v0.4s, v0.4s, v0.4s
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [7] Code Region - SIMD64-bit-d
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 15
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ld1 { v0.1d }, [sp]
+# CHECK-NEXT: 1 2 0.25 add v0.2d, v0.2d, v0.2d
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V1UnitB
+# CHECK-NEXT: [0.1] - V1UnitB
+# CHECK-NEXT: [1.0] - V1UnitD
+# CHECK-NEXT: [1.1] - V1UnitD
+# CHECK-NEXT: [2] - V1UnitL2
+# CHECK-NEXT: [3.0] - V1UnitL01
+# CHECK-NEXT: [3.1] - V1UnitL01
+# CHECK-NEXT: [4] - V1UnitM0
+# CHECK-NEXT: [5] - V1UnitM1
+# CHECK-NEXT: [6.0] - V1UnitS
+# CHECK-NEXT: [6.1] - V1UnitS
+# CHECK-NEXT: [7] - V1UnitV0
+# CHECK-NEXT: [8] - V1UnitV1
+# CHECK-NEXT: [9] - V1UnitV2
+# CHECK-NEXT: [10] - V1UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - ld1 { v0.1d }, [sp]
+# CHECK-NEXT: - - - - - - - - - - - 0.25 0.25 0.25 0.25 add v0.2d, v0.2d, v0.2d
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ld1 { v0.1d }, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ld1 { v0.1d }, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ld1 { v0.1d }, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ld1 { v0.1d }, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add v0.2d, v0.2d, v0.2d
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ld1 { v0.1d }, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add v0.2d, v0.2d, v0.2d
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [8] Code Region - ins
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 403
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 15
+# CHECK-NEXT: uOps Per Cycle: 0.50
+# CHECK-NEXT: IPC: 0.50
+# CHECK-NEXT: Block RThroughput: 0.5
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 2 0.25 mov v0.b[0], v1.b[1]
+# CHECK-NEXT: 1 2 0.25 add v0.16b, v0.16b, v0.16b
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V1UnitB
+# CHECK-NEXT: [0.1] - V1UnitB
+# CHECK-NEXT: [1.0] - V1UnitD
+# CHECK-NEXT: [1.1] - V1UnitD
+# CHECK-NEXT: [2] - V1UnitL2
+# CHECK-NEXT: [3.0] - V1UnitL01
+# CHECK-NEXT: [3.1] - V1UnitL01
+# CHECK-NEXT: [4] - V1UnitM0
+# CHECK-NEXT: [5] - V1UnitM1
+# CHECK-NEXT: [6.0] - V1UnitS
+# CHECK-NEXT: [6.1] - V1UnitS
+# CHECK-NEXT: [7] - V1UnitV0
+# CHECK-NEXT: [8] - V1UnitV1
+# CHECK-NEXT: [9] - V1UnitV2
+# CHECK-NEXT: [10] - V1UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
+# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 0.50 0.50
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
+# CHECK-NEXT: - - - - - - - - - - - - 0.50 - 0.50 mov v0.b[0], v1.b[1]
+# CHECK-NEXT: - - - - - - - - - - - 0.50 - 0.50 - add v0.16b, v0.16b, v0.16b
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 012345678
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeER. . . . mov v0.b[0], v1.b[1]
+# CHECK-NEXT: [0,1] D==eeER . . . add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: [1,0] D====eeER . . . mov v0.b[0], v1.b[1]
+# CHECK-NEXT: [1,1] D======eeER . . add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: [2,0] D========eeER . . mov v0.b[0], v1.b[1]
+# CHECK-NEXT: [2,1] D==========eeER. . add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: [3,0] D============eeER . mov v0.b[0], v1.b[1]
+# CHECK-NEXT: [3,1] D==============eeER add v0.16b, v0.16b, v0.16b
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 7.0 0.3 0.0 mov v0.b[0], v1.b[1]
+# CHECK-NEXT: 1. 4 9.0 0.0 0.0 add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 4 8.0 0.1 0.0 <total>
+
+# CHECK: [9] Code Region - lanewise-load
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 1003
+# CHECK-NEXT: Total uOps: 300
+
+# CHECK: Dispatch Width: 15
+# CHECK-NEXT: uOps Per Cycle: 0.30
+# CHECK-NEXT: IPC: 0.20
+# CHECK-NEXT: Block RThroughput: 0.5
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 2 8 0.33 * ld1 { v0.b }[0], [sp]
+# CHECK-NEXT: 1 2 0.25 add v0.16b, v0.16b, v0.16b
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V1UnitB
+# CHECK-NEXT: [0.1] - V1UnitB
+# CHECK-NEXT: [1.0] - V1UnitD
+# CHECK-NEXT: [1.1] - V1UnitD
+# CHECK-NEXT: [2] - V1UnitL2
+# CHECK-NEXT: [3.0] - V1UnitL01
+# CHECK-NEXT: [3.1] - V1UnitL01
+# CHECK-NEXT: [4] - V1UnitM0
+# CHECK-NEXT: [5] - V1UnitM1
+# CHECK-NEXT: [6.0] - V1UnitS
+# CHECK-NEXT: [6.1] - V1UnitS
+# CHECK-NEXT: [7] - V1UnitV0
+# CHECK-NEXT: [8] - V1UnitV1
+# CHECK-NEXT: [9] - V1UnitV2
+# CHECK-NEXT: [10] - V1UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - 0.50 0.50 0.50 0.50
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
+# CHECK-NEXT: - - - - - 0.50 0.50 - - - - - 0.50 - 0.50 ld1 { v0.b }[0], [sp]
+# CHECK-NEXT: - - - - - - - - - - - 0.50 - 0.50 - add v0.16b, v0.16b, v0.16b
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 0123456789 0123456789
+# CHECK-NEXT: Index 0123456789 0123456789 012
+
+# CHECK: [0,0] DeeeeeeeeER . . . . . . . ld1 { v0.b }[0], [sp]
+# CHECK-NEXT: [0,1] D========eeER . . . . . . . add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: [1,0] D==========eeeeeeeeER . . . . . ld1 { v0.b }[0], [sp]
+# CHECK-NEXT: [1,1] D==================eeER . . . . . add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: [2,0] D====================eeeeeeeeER . . . ld1 { v0.b }[0], [sp]
+# CHECK-NEXT: [2,1] D============================eeER . . . add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: [3,0] D==============================eeeeeeeeER . ld1 { v0.b }[0], [sp]
+# CHECK-NEXT: [3,1] D======================================eeER add v0.16b, v0.16b, v0.16b
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 16.0 0.3 0.0 ld1 { v0.b }[0], [sp]
+# CHECK-NEXT: 1. 4 24.0 0.0 0.0 add v0.16b, v0.16b, v0.16b
+# CHECK-NEXT: 4 20.0 0.1 0.0 <total>
diff --git a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V2-clear-upper-regs.s b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V2-clear-upper-regs.s
new file mode 100644
index 000000000000..fd2083dc1277
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V2-clear-upper-regs.s
@@ -0,0 +1,812 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=aarch64 -mcpu=neoverse-v2 --timeline --timeline-max-iterations=4 < %s | FileCheck %s
+
+# LLVM-MCA-BEGIN FPR8-bit
+ldr b0, [sp]
+add z0.d, z0.d, z0.d
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN FPR16-bit
+ldr h0, [sp]
+add z0.d, z0.d, z0.d
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN FPR32-bit
+ldr s0, [sp]
+add z0.d, z0.d, z0.d
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN FPR64-bit
+ldr d0, [sp]
+add z0.d, z0.d, z0.d
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN FPR128-bit
+ldr q0, [sp]
+add z0.d, z0.d, z0.d
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN SIMD64-bit-b
+ld1 {v0.8b}, [sp]
+add z0.d, z0.d, z0.d
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN SIMD64-bit-h
+ld1 {v0.4h}, [sp]
+add z0.d, z0.d, z0.d
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN SIMD64-bit-s
+ld1 {v0.2s}, [sp]
+add z0.d, z0.d, z0.d
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN SIMD64-bit-d
+ld1 {v0.1d}, [sp]
+add z0.d, z0.d, z0.d
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN insr
+insr z0.s, w0
+add z0.s, z0.s, z0.s
+# LLVM-MCA-END
+
+# CHECK: [0] Code Region - FPR8-bit
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 16
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ldr b0, [sp]
+# CHECK-NEXT: 1 2 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V2UnitB
+# CHECK-NEXT: [0.1] - V2UnitB
+# CHECK-NEXT: [1.0] - V2UnitD
+# CHECK-NEXT: [1.1] - V2UnitD
+# CHECK-NEXT: [2] - V2UnitL2
+# CHECK-NEXT: [3.0] - V2UnitL01
+# CHECK-NEXT: [3.1] - V2UnitL01
+# CHECK-NEXT: [4] - V2UnitM0
+# CHECK-NEXT: [5] - V2UnitM1
+# CHECK-NEXT: [6] - V2UnitS0
+# CHECK-NEXT: [7] - V2UnitS1
+# CHECK-NEXT: [8] - V2UnitS2
+# CHECK-NEXT: [9] - V2UnitS3
+# CHECK-NEXT: [10] - V2UnitV0
+# CHECK-NEXT: [11] - V2UnitV1
+# CHECK-NEXT: [12] - V2UnitV2
+# CHECK-NEXT: [13] - V2UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - - - ldr b0, [sp]
+# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ldr b0, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ldr b0, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ldr b0, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ldr b0, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add z0.d, z0.d, z0.d
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ldr b0, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add z0.d, z0.d, z0.d
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [1] Code Region - FPR16-bit
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 16
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ldr h0, [sp]
+# CHECK-NEXT: 1 2 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V2UnitB
+# CHECK-NEXT: [0.1] - V2UnitB
+# CHECK-NEXT: [1.0] - V2UnitD
+# CHECK-NEXT: [1.1] - V2UnitD
+# CHECK-NEXT: [2] - V2UnitL2
+# CHECK-NEXT: [3.0] - V2UnitL01
+# CHECK-NEXT: [3.1] - V2UnitL01
+# CHECK-NEXT: [4] - V2UnitM0
+# CHECK-NEXT: [5] - V2UnitM1
+# CHECK-NEXT: [6] - V2UnitS0
+# CHECK-NEXT: [7] - V2UnitS1
+# CHECK-NEXT: [8] - V2UnitS2
+# CHECK-NEXT: [9] - V2UnitS3
+# CHECK-NEXT: [10] - V2UnitV0
+# CHECK-NEXT: [11] - V2UnitV1
+# CHECK-NEXT: [12] - V2UnitV2
+# CHECK-NEXT: [13] - V2UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - - - ldr h0, [sp]
+# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ldr h0, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ldr h0, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ldr h0, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ldr h0, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add z0.d, z0.d, z0.d
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ldr h0, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add z0.d, z0.d, z0.d
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [2] Code Region - FPR32-bit
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 16
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ldr s0, [sp]
+# CHECK-NEXT: 1 2 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V2UnitB
+# CHECK-NEXT: [0.1] - V2UnitB
+# CHECK-NEXT: [1.0] - V2UnitD
+# CHECK-NEXT: [1.1] - V2UnitD
+# CHECK-NEXT: [2] - V2UnitL2
+# CHECK-NEXT: [3.0] - V2UnitL01
+# CHECK-NEXT: [3.1] - V2UnitL01
+# CHECK-NEXT: [4] - V2UnitM0
+# CHECK-NEXT: [5] - V2UnitM1
+# CHECK-NEXT: [6] - V2UnitS0
+# CHECK-NEXT: [7] - V2UnitS1
+# CHECK-NEXT: [8] - V2UnitS2
+# CHECK-NEXT: [9] - V2UnitS3
+# CHECK-NEXT: [10] - V2UnitV0
+# CHECK-NEXT: [11] - V2UnitV1
+# CHECK-NEXT: [12] - V2UnitV2
+# CHECK-NEXT: [13] - V2UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - - - ldr s0, [sp]
+# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ldr s0, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ldr s0, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ldr s0, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ldr s0, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add z0.d, z0.d, z0.d
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ldr s0, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add z0.d, z0.d, z0.d
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [3] Code Region - FPR64-bit
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 16
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ldr d0, [sp]
+# CHECK-NEXT: 1 2 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V2UnitB
+# CHECK-NEXT: [0.1] - V2UnitB
+# CHECK-NEXT: [1.0] - V2UnitD
+# CHECK-NEXT: [1.1] - V2UnitD
+# CHECK-NEXT: [2] - V2UnitL2
+# CHECK-NEXT: [3.0] - V2UnitL01
+# CHECK-NEXT: [3.1] - V2UnitL01
+# CHECK-NEXT: [4] - V2UnitM0
+# CHECK-NEXT: [5] - V2UnitM1
+# CHECK-NEXT: [6] - V2UnitS0
+# CHECK-NEXT: [7] - V2UnitS1
+# CHECK-NEXT: [8] - V2UnitS2
+# CHECK-NEXT: [9] - V2UnitS3
+# CHECK-NEXT: [10] - V2UnitV0
+# CHECK-NEXT: [11] - V2UnitV1
+# CHECK-NEXT: [12] - V2UnitV2
+# CHECK-NEXT: [13] - V2UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - - - ldr d0, [sp]
+# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ldr d0, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ldr d0, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ldr d0, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ldr d0, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add z0.d, z0.d, z0.d
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ldr d0, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add z0.d, z0.d, z0.d
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [4] Code Region - FPR128-bit
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 16
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ldr q0, [sp]
+# CHECK-NEXT: 1 2 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V2UnitB
+# CHECK-NEXT: [0.1] - V2UnitB
+# CHECK-NEXT: [1.0] - V2UnitD
+# CHECK-NEXT: [1.1] - V2UnitD
+# CHECK-NEXT: [2] - V2UnitL2
+# CHECK-NEXT: [3.0] - V2UnitL01
+# CHECK-NEXT: [3.1] - V2UnitL01
+# CHECK-NEXT: [4] - V2UnitM0
+# CHECK-NEXT: [5] - V2UnitM1
+# CHECK-NEXT: [6] - V2UnitS0
+# CHECK-NEXT: [7] - V2UnitS1
+# CHECK-NEXT: [8] - V2UnitS2
+# CHECK-NEXT: [9] - V2UnitS3
+# CHECK-NEXT: [10] - V2UnitV0
+# CHECK-NEXT: [11] - V2UnitV1
+# CHECK-NEXT: [12] - V2UnitV2
+# CHECK-NEXT: [13] - V2UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - - - ldr q0, [sp]
+# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ldr q0, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ldr q0, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ldr q0, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ldr q0, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add z0.d, z0.d, z0.d
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ldr q0, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add z0.d, z0.d, z0.d
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [5] Code Region - SIMD64-bit-b
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 16
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ld1 { v0.8b }, [sp]
+# CHECK-NEXT: 1 2 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V2UnitB
+# CHECK-NEXT: [0.1] - V2UnitB
+# CHECK-NEXT: [1.0] - V2UnitD
+# CHECK-NEXT: [1.1] - V2UnitD
+# CHECK-NEXT: [2] - V2UnitL2
+# CHECK-NEXT: [3.0] - V2UnitL01
+# CHECK-NEXT: [3.1] - V2UnitL01
+# CHECK-NEXT: [4] - V2UnitM0
+# CHECK-NEXT: [5] - V2UnitM1
+# CHECK-NEXT: [6] - V2UnitS0
+# CHECK-NEXT: [7] - V2UnitS1
+# CHECK-NEXT: [8] - V2UnitS2
+# CHECK-NEXT: [9] - V2UnitS3
+# CHECK-NEXT: [10] - V2UnitV0
+# CHECK-NEXT: [11] - V2UnitV1
+# CHECK-NEXT: [12] - V2UnitV2
+# CHECK-NEXT: [13] - V2UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - - - ld1 { v0.8b }, [sp]
+# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ld1 { v0.8b }, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ld1 { v0.8b }, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ld1 { v0.8b }, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ld1 { v0.8b }, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add z0.d, z0.d, z0.d
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ld1 { v0.8b }, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add z0.d, z0.d, z0.d
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [6] Code Region - SIMD64-bit-h
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 16
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ld1 { v0.4h }, [sp]
+# CHECK-NEXT: 1 2 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V2UnitB
+# CHECK-NEXT: [0.1] - V2UnitB
+# CHECK-NEXT: [1.0] - V2UnitD
+# CHECK-NEXT: [1.1] - V2UnitD
+# CHECK-NEXT: [2] - V2UnitL2
+# CHECK-NEXT: [3.0] - V2UnitL01
+# CHECK-NEXT: [3.1] - V2UnitL01
+# CHECK-NEXT: [4] - V2UnitM0
+# CHECK-NEXT: [5] - V2UnitM1
+# CHECK-NEXT: [6] - V2UnitS0
+# CHECK-NEXT: [7] - V2UnitS1
+# CHECK-NEXT: [8] - V2UnitS2
+# CHECK-NEXT: [9] - V2UnitS3
+# CHECK-NEXT: [10] - V2UnitV0
+# CHECK-NEXT: [11] - V2UnitV1
+# CHECK-NEXT: [12] - V2UnitV2
+# CHECK-NEXT: [13] - V2UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - - - ld1 { v0.4h }, [sp]
+# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ld1 { v0.4h }, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ld1 { v0.4h }, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ld1 { v0.4h }, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ld1 { v0.4h }, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add z0.d, z0.d, z0.d
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ld1 { v0.4h }, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add z0.d, z0.d, z0.d
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [7] Code Region - SIMD64-bit-s
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 16
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ld1 { v0.2s }, [sp]
+# CHECK-NEXT: 1 2 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V2UnitB
+# CHECK-NEXT: [0.1] - V2UnitB
+# CHECK-NEXT: [1.0] - V2UnitD
+# CHECK-NEXT: [1.1] - V2UnitD
+# CHECK-NEXT: [2] - V2UnitL2
+# CHECK-NEXT: [3.0] - V2UnitL01
+# CHECK-NEXT: [3.1] - V2UnitL01
+# CHECK-NEXT: [4] - V2UnitM0
+# CHECK-NEXT: [5] - V2UnitM1
+# CHECK-NEXT: [6] - V2UnitS0
+# CHECK-NEXT: [7] - V2UnitS1
+# CHECK-NEXT: [8] - V2UnitS2
+# CHECK-NEXT: [9] - V2UnitS3
+# CHECK-NEXT: [10] - V2UnitV0
+# CHECK-NEXT: [11] - V2UnitV1
+# CHECK-NEXT: [12] - V2UnitV2
+# CHECK-NEXT: [13] - V2UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - - - ld1 { v0.2s }, [sp]
+# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ld1 { v0.2s }, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ld1 { v0.2s }, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ld1 { v0.2s }, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ld1 { v0.2s }, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add z0.d, z0.d, z0.d
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ld1 { v0.2s }, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add z0.d, z0.d, z0.d
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [8] Code Region - SIMD64-bit-d
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 44
+# CHECK-NEXT: Total uOps: 200
+
+# CHECK: Dispatch Width: 16
+# CHECK-NEXT: uOps Per Cycle: 4.55
+# CHECK-NEXT: IPC: 4.55
+# CHECK-NEXT: Block RThroughput: 0.3
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 6 0.33 * ld1 { v0.1d }, [sp]
+# CHECK-NEXT: 1 2 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V2UnitB
+# CHECK-NEXT: [0.1] - V2UnitB
+# CHECK-NEXT: [1.0] - V2UnitD
+# CHECK-NEXT: [1.1] - V2UnitD
+# CHECK-NEXT: [2] - V2UnitL2
+# CHECK-NEXT: [3.0] - V2UnitL01
+# CHECK-NEXT: [3.1] - V2UnitL01
+# CHECK-NEXT: [4] - V2UnitM0
+# CHECK-NEXT: [5] - V2UnitM1
+# CHECK-NEXT: [6] - V2UnitS0
+# CHECK-NEXT: [7] - V2UnitS1
+# CHECK-NEXT: [8] - V2UnitS2
+# CHECK-NEXT: [9] - V2UnitS3
+# CHECK-NEXT: [10] - V2UnitV0
+# CHECK-NEXT: [11] - V2UnitV1
+# CHECK-NEXT: [12] - V2UnitV2
+# CHECK-NEXT: [13] - V2UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - 0.25 0.25 0.25 0.25
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
+# CHECK-NEXT: - - - - 0.33 0.33 0.34 - - - - - - - - - - ld1 { v0.1d }, [sp]
+# CHECK-NEXT: - - - - - - - - - - - - - 0.25 0.25 0.25 0.25 add z0.d, z0.d, z0.d
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 01
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeeeER .. ld1 { v0.1d }, [sp]
+# CHECK-NEXT: [0,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [1,0] DeeeeeeE--R. ld1 { v0.1d }, [sp]
+# CHECK-NEXT: [1,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [2,0] DeeeeeeE--R. ld1 { v0.1d }, [sp]
+# CHECK-NEXT: [2,1] D======eeER. add z0.d, z0.d, z0.d
+# CHECK-NEXT: [3,0] D=eeeeeeE-R. ld1 { v0.1d }, [sp]
+# CHECK-NEXT: [3,1] D=======eeER add z0.d, z0.d, z0.d
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 1.3 1.3 1.3 ld1 { v0.1d }, [sp]
+# CHECK-NEXT: 1. 4 7.3 0.0 0.0 add z0.d, z0.d, z0.d
+# CHECK-NEXT: 4 4.3 0.6 0.6 <total>
+
+# CHECK: [9] Code Region - insr
+
+# CHECK: Iterations: 100
+# CHECK-NEXT: Instructions: 200
+# CHECK-NEXT: Total Cycles: 803
+# CHECK-NEXT: Total uOps: 300
+
+# CHECK: Dispatch Width: 16
+# CHECK-NEXT: uOps Per Cycle: 0.37
+# CHECK-NEXT: IPC: 0.25
+# CHECK-NEXT: Block RThroughput: 1.0
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 2 6 1.00 insr z0.s, w0
+# CHECK-NEXT: 1 2 0.25 add z0.s, z0.s, z0.s
+
+# CHECK: Resources:
+# CHECK-NEXT: [0.0] - V2UnitB
+# CHECK-NEXT: [0.1] - V2UnitB
+# CHECK-NEXT: [1.0] - V2UnitD
+# CHECK-NEXT: [1.1] - V2UnitD
+# CHECK-NEXT: [2] - V2UnitL2
+# CHECK-NEXT: [3.0] - V2UnitL01
+# CHECK-NEXT: [3.1] - V2UnitL01
+# CHECK-NEXT: [4] - V2UnitM0
+# CHECK-NEXT: [5] - V2UnitM1
+# CHECK-NEXT: [6] - V2UnitS0
+# CHECK-NEXT: [7] - V2UnitS1
+# CHECK-NEXT: [8] - V2UnitS2
+# CHECK-NEXT: [9] - V2UnitS3
+# CHECK-NEXT: [10] - V2UnitV0
+# CHECK-NEXT: [11] - V2UnitV1
+# CHECK-NEXT: [12] - V2UnitV2
+# CHECK-NEXT: [13] - V2UnitV3
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
+# CHECK-NEXT: - - - - - - - 1.00 - - - - - 0.33 1.00 0.33 0.34
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
+# CHECK-NEXT: - - - - - - - 1.00 - - - - - - 1.00 - - insr z0.s, w0
+# CHECK-NEXT: - - - - - - - - - - - - - 0.33 - 0.33 0.34 add z0.s, z0.s, z0.s
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 0123456789 01234
+# CHECK-NEXT: Index 0123456789 0123456789
+
+# CHECK: [0,0] DeeeeeeER . . . . . . insr z0.s, w0
+# CHECK-NEXT: [0,1] D======eeER . . . . . add z0.s, z0.s, z0.s
+# CHECK-NEXT: [1,0] D========eeeeeeER . . . . insr z0.s, w0
+# CHECK-NEXT: [1,1] D==============eeER . . . . add z0.s, z0.s, z0.s
+# CHECK-NEXT: [2,0] D================eeeeeeER. . . insr z0.s, w0
+# CHECK-NEXT: [2,1] D======================eeER . . add z0.s, z0.s, z0.s
+# CHECK-NEXT: [3,0] D========================eeeeeeER . insr z0.s, w0
+# CHECK-NEXT: [3,1] D==============================eeER add z0.s, z0.s, z0.s
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 4 13.0 0.3 0.0 insr z0.s, w0
+# CHECK-NEXT: 1. 4 19.0 0.0 0.0 add z0.s, z0.s, z0.s
+# CHECK-NEXT: 4 16.0 0.1 0.0 <total>
diff --git a/llvm/test/tools/llvm-mca/X86/call-latency.s b/llvm/test/tools/llvm-mca/X86/call-latency.s
new file mode 100644
index 000000000000..9559d11f1b0a
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/X86/call-latency.s
@@ -0,0 +1,58 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=btver2 -iterations=1 %s | FileCheck --check-prefixes=ALL,DEFAULT %s
+# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=btver2 -call-latency=50 -iterations=1 %s | FileCheck --check-prefixes=ALL,CUSTOM %s
+
+callq printf
+
+# ALL: Iterations: 1
+# ALL-NEXT: Instructions: 1
+
+# CUSTOM-NEXT: Total Cycles: 53
+# DEFAULT-NEXT: Total Cycles: 103
+
+# ALL-NEXT: Total uOps: 1
+
+# ALL: Dispatch Width: 2
+
+# CUSTOM-NEXT: uOps Per Cycle: 0.02
+# CUSTOM-NEXT: IPC: 0.02
+
+# DEFAULT-NEXT: uOps Per Cycle: 0.01
+# DEFAULT-NEXT: IPC: 0.01
+
+# ALL-NEXT: Block RThroughput: 0.5
+
+# ALL: Instruction Info:
+# ALL-NEXT: [1]: #uOps
+# ALL-NEXT: [2]: Latency
+# ALL-NEXT: [3]: RThroughput
+# ALL-NEXT: [4]: MayLoad
+# ALL-NEXT: [5]: MayStore
+# ALL-NEXT: [6]: HasSideEffects (U)
+
+# ALL: [1] [2] [3] [4] [5] [6] Instructions:
+# ALL-NEXT: 1 1 0.50 callq printf
+
+# ALL: Resources:
+# ALL-NEXT: [0] - JALU0
+# ALL-NEXT: [1] - JALU1
+# ALL-NEXT: [2] - JDiv
+# ALL-NEXT: [3] - JFPA
+# ALL-NEXT: [4] - JFPM
+# ALL-NEXT: [5] - JFPU0
+# ALL-NEXT: [6] - JFPU1
+# ALL-NEXT: [7] - JLAGU
+# ALL-NEXT: [8] - JMul
+# ALL-NEXT: [9] - JSAGU
+# ALL-NEXT: [10] - JSTC
+# ALL-NEXT: [11] - JVALU0
+# ALL-NEXT: [12] - JVALU1
+# ALL-NEXT: [13] - JVIMUL
+
+# ALL: Resource pressure per iteration:
+# ALL-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13]
+# ALL-NEXT: - 1.00 - - - - - - - - - - - -
+
+# ALL: Resource pressure by instruction:
+# ALL-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] Instructions:
+# ALL-NEXT: - 1.00 - - - - - - - - - - - - callq printf
diff --git a/llvm/test/tools/llvm-objcopy/tool-options.test b/llvm/test/tools/llvm-objcopy/tool-options.test
new file mode 100644
index 000000000000..8d2bb4476009
--- /dev/null
+++ b/llvm/test/tools/llvm-objcopy/tool-options.test
@@ -0,0 +1,6 @@
+## An error must be reported if a required argument value is missing.
+# RUN: not llvm-objcopy --only-section 2>&1 | FileCheck --check-prefix=CHECK-NO-VALUE-ONLY-SECTION %s
+# CHECK-NO-VALUE-ONLY-SECTION: error: argument to '--only-section' is missing (expected 1 value(s))
+
+# RUN: not llvm-objcopy -O 2>&1 | FileCheck --check-prefix=CHECK-NO-VALUE-O %s
+# CHECK-NO-VALUE-O: error: argument to '-O' is missing (expected 1 value(s))
diff --git a/llvm/test/tools/llvm-profdata/show-order-error.proftext b/llvm/test/tools/llvm-profdata/show-order-error.proftext
new file mode 100644
index 000000000000..633f1a9949b6
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/show-order-error.proftext
@@ -0,0 +1,27 @@
+# RUN: not llvm-profdata order %s --num-test-traces=10 2>&1 | FileCheck %s
+
+# CHECK: --num-test-traces must be smaller than the total number of traces
+
+# Header
+:ir
+:temporal_prof_traces
+# Num Traces
+1
+# Trace Stream Size:
+1
+# Weight
+1
+a, b
+
+a
+# Func Hash:
+0x1234
+# Num Counters:
+1
+# Counter Values:
+101
+
+b
+0x5678
+1
+202
diff --git a/llvm/test/tools/llvm-profdata/show-order.proftext b/llvm/test/tools/llvm-profdata/show-order.proftext
index 8ef26847ad77..28eb1b9b42af 100644
--- a/llvm/test/tools/llvm-profdata/show-order.proftext
+++ b/llvm/test/tools/llvm-profdata/show-order.proftext
@@ -1,4 +1,6 @@
-# RUN: llvm-profdata order %s | FileCheck %s
+# RUN: llvm-profdata order %s --num-test-traces=1 | FileCheck %s
+
+# CHECK: # Total area under the page fault curve: 4.000000e+00
# CHECK: a
# CHECK: b
@@ -9,9 +11,9 @@
:ir
:temporal_prof_traces
# Num Traces
-3
+4
# Trace Stream Size:
-3
+4
# Weight
1
a, main.c:b, c
@@ -21,6 +23,9 @@ a, x, main.c:b, c
# Weight
1
a, main.c:b, c
+# Weight
+1
+a, main.c:b, c, x
a
# Func Hash:
diff --git a/llvm/test/tools/llvm-profgen/profile-density.test b/llvm/test/tools/llvm-profgen/profile-density.test
index 0eb83838d16e..086697e8da0a 100644
--- a/llvm/test/tools/llvm-profgen/profile-density.test
+++ b/llvm/test/tools/llvm-profgen/profile-density.test
@@ -1,13 +1,17 @@
-; RUN: llvm-profgen --format=text --unsymbolized-profile=%S/Inputs/profile-density.raw.prof --binary=%S/Inputs/inline-noprobe2.perfbin --output=%t1 --use-offset=0 --show-density -hot-function-density-threshold=10 --trim-cold-profile=0 &> %t2
+; RUN: llvm-profgen --format=text --unsymbolized-profile=%S/Inputs/profile-density.raw.prof --binary=%S/Inputs/inline-noprobe2.perfbin --output=%t1 --use-offset=0 --show-density -profile-density-threshold=10 --trim-cold-profile=0 &> %t2
; RUN: FileCheck %s --input-file %t2 --check-prefix=CHECK-DENSITY
-
-; RUN: llvm-profgen --format=text --unsymbolized-profile=%S/Inputs/profile-density-cs.raw.prof --binary=%S/Inputs/inline-noprobe2.perfbin --output=%t3 --show-density -hot-function-density-threshold=1 &> %t4
+; RUN: llvm-profgen --format=text --unsymbolized-profile=%S/Inputs/profile-density-cs.raw.prof --binary=%S/Inputs/inline-noprobe2.perfbin --output=%t3 --show-density -profile-density-threshold=1 -profile-density-threshold=10000 &> %t4
; RUN: FileCheck %s --input-file %t4 --check-prefix=CHECK-DENSITY-CS
+; RUN: llvm-profgen --format=text --unsymbolized-profile=%S/Inputs/profile-density-cs.raw.prof --binary=%S/Inputs/inline-noprobe2.perfbin --output=%t5 --show-density -profile-density-threshold=1 -profile-density-cutoff-hot=800000 &> %t6
+; RUN: FileCheck %s --input-file %t6 --check-prefix=CHECK-DENSITY-CS-80
+
+;CHECK-DENSITY: Sample PGO is estimated to optimize better with 2.9x more samples. Please consider increasing sampling rate or profiling for longer duration to get more samples.
+;CHECK-DENSITY: Functions with density >= 3.5 account for 99.00% total sample counts.
-;CHECK-DENSITY: Sample PGO is estimated to optimize better with 3.1x more samples. Please consider increasing sampling rate or profiling for longer duration to get more samples.
-;CHECK-DENSITY: Minimum profile density for hot functions with top 99.00% total samples: 3.2
+;CHECK-DENSITY-CS: Sample PGO is estimated to optimize better with 12.5x more samples. Please consider increasing sampling rate or profiling for longer duration to get more samples.
+;CHECK-DENSITY-CS: Functions with density >= 800.1 account for 99.00% total sample counts.
-;CHECK-DENSITY-CS: Minimum profile density for hot functions with top 99.00% total samples: 128.3
+;CHECK-DENSITY-CS-80: Functions with density >= 1886.2 account for 80.00% total sample counts.
; original code:
; clang -O3 -g -fno-optimize-sibling-calls -fdebug-info-for-profiling qsort.c -o a.out
diff --git a/llvm/test/tools/llvm-readobj/ELF/note-core-ntfile.test b/llvm/test/tools/llvm-readobj/ELF/note-core-ntfile.test
index 752cb723cd22..f4957b42a877 100644
--- a/llvm/test/tools/llvm-readobj/ELF/note-core-ntfile.test
+++ b/llvm/test/tools/llvm-readobj/ELF/note-core-ntfile.test
@@ -3,6 +3,7 @@
# RUN: yaml2obj %s -o %t.o
# RUN: llvm-readelf --notes %t.o | FileCheck %s --check-prefix=GNU
# RUN: llvm-readobj --notes %t.o | FileCheck %s --check-prefix=LLVM
+# RUN: llvm-readobj --elf-output-style=JSON --pretty-print --notes %t.o | FileCheck %s --check-prefix=JSON
## llvm-mc doesn't support generating ET_CORE files; the 'Content' field was
## generated with the following steps:
@@ -72,24 +73,62 @@ ProgramHeaders:
# LLVM-NEXT: Data size: 0x80
# LLVM-NEXT: Type: NT_FILE (mapped files)
# LLVM-NEXT: Page Size: 4096
-# LLVM-NEXT: Mapping [
+# LLVM-NEXT: Mappings [
+# LLVM-NEXT: {
# LLVM-NEXT: Start: 0x1000
# LLVM-NEXT: End: 0x2000
# LLVM-NEXT: Offset: 0x3000
# LLVM-NEXT: Filename: /path/to/a.out
-# LLVM-NEXT: ]
-# LLVM-NEXT: Mapping [
+# LLVM-NEXT: }
+# LLVM-NEXT: {
# LLVM-NEXT: Start: 0x4000
# LLVM-NEXT: End: 0x5000
# LLVM-NEXT: Offset: 0x6000
# LLVM-NEXT: Filename: /path/to/libc.so
-# LLVM-NEXT: ]
-# LLVM-NEXT: Mapping [
+# LLVM-NEXT: }
+# LLVM-NEXT: {
# LLVM-NEXT: Start: 0x7000
# LLVM-NEXT: End: 0x8000
# LLVM-NEXT: Offset: 0x9000
# LLVM-NEXT: Filename: [stack]
-# LLVM-NEXT: ]
-# LLVM-NEXT: }
+# LLVM-NEXT: }
+# LLVM-NEXT: ]
# LLVM-NEXT: }
+# LLVM-NEXT: }
# LLVM-NEXT: ]
+
+# JSON: "Notes": [
+# JSON-NEXT: {
+# JSON-NEXT: "NoteSection": {
+# JSON-NEXT: "Name": "<?>",
+# JSON-NEXT: "Offset": 120,
+# JSON-NEXT: "Size": 148,
+# JSON-NEXT: "Note": {
+# JSON-NEXT: "Owner": "CORE",
+# JSON-NEXT: "Data size": 128,
+# JSON-NEXT: "Type": "NT_FILE (mapped files)",
+# JSON-NEXT: "Page Size": 4096,
+# JSON-NEXT: "Mappings": [
+# JSON-NEXT: {
+# JSON-NEXT: "Start": 4096,
+# JSON-NEXT: "End": 8192,
+# JSON-NEXT: "Offset": 12288,
+# JSON-NEXT: "Filename": "/path/to/a.out"
+# JSON-NEXT: },
+# JSON-NEXT: {
+# JSON-NEXT: "Start": 16384,
+# JSON-NEXT: "End": 20480,
+# JSON-NEXT: "Offset": 24576,
+# JSON-NEXT: "Filename": "/path/to/libc.so"
+# JSON-NEXT: },
+# JSON-NEXT: {
+# JSON-NEXT: "Start": 28672,
+# JSON-NEXT: "End": 32768,
+# JSON-NEXT: "Offset": 36864,
+# JSON-NEXT: "Filename": "[stack]"
+# JSON-NEXT: }
+# JSON-NEXT: ]
+# JSON-NEXT: }
+# JSON-NEXT: }
+# JSON-NEXT: }
+# JSON-NEXT: ]
diff --git a/llvm/test/tools/llvm-reduce/reduce-flags.ll b/llvm/test/tools/llvm-reduce/reduce-flags.ll
index 5d6d1260ac50..293504e32f91 100644
--- a/llvm/test/tools/llvm-reduce/reduce-flags.ll
+++ b/llvm/test/tools/llvm-reduce/reduce-flags.ll
@@ -57,18 +57,26 @@ define i32 @ashr_exact_keep(i32 %a, i32 %b) {
ret i32 %op
}
-; CHECK-LABEL: @getelementptr_inbounds_drop(
+; CHECK-LABEL: @getelementptr_inbounds_nuw_drop_both(
; INTERESTING: getelementptr
; RESULT: getelementptr i32, ptr %a, i64 %b
-define ptr @getelementptr_inbounds_drop(ptr %a, i64 %b) {
- %op = getelementptr inbounds i32, ptr %a, i64 %b
+define ptr @getelementptr_inbounds_nuw_drop_both(ptr %a, i64 %b) {
+ %op = getelementptr inbounds nuw i32, ptr %a, i64 %b
ret ptr %op
}
-; CHECK-LABEL: @getelementptr_inbounds_keep(
+; CHECK-LABEL: @getelementptr_inbounds_keep_only_inbounds(
; INTERESTING: inbounds
; RESULT: getelementptr inbounds i32, ptr %a, i64 %b
-define ptr @getelementptr_inbounds_keep(ptr %a, i64 %b) {
+define ptr @getelementptr_inbounds_keep_only_inbounds(ptr %a, i64 %b) {
+ %op = getelementptr inbounds nuw i32, ptr %a, i64 %b
+ ret ptr %op
+}
+
+; CHECK-LABEL: @getelementptr_inbounds_relax_to_nusw(
+; INTERESTING: getelementptr {{inbounds|nusw}}
+; RESULT: getelementptr nusw i32, ptr %a, i64 %b
+define ptr @getelementptr_inbounds_relax_to_nusw(ptr %a, i64 %b) {
%op = getelementptr inbounds i32, ptr %a, i64 %b
ret ptr %op
}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/address-taken-externalize-with-call.ll b/llvm/test/tools/llvm-split/AMDGPU/address-taken-externalize-with-call.ll
new file mode 100644
index 000000000000..8b76237efa32
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/address-taken-externalize-with-call.ll
@@ -0,0 +1,46 @@
+; RUN: llvm-split -o %t %s -j 3 -mtriple amdgcn-amd-amdhsa -amdgpu-module-splitting-large-kernel-threshold=0
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+
+; 3 kernels:
+; - A does a direct call to HelperA
+; - B is storing @HelperA
+; - C does a direct call to HelperA
+;
+; The helper functions will get externalized, which will force A and C into P0 as
+; external functions cannot be duplicated.
+
+; CHECK0: define hidden void @HelperA()
+; CHECK0: define amdgpu_kernel void @A()
+; CHECK0: declare amdgpu_kernel void @B(ptr)
+; CHECK0: define amdgpu_kernel void @C()
+
+; CHECK1: declare hidden void @HelperA()
+; CHECK1: declare amdgpu_kernel void @A()
+; CHECK1: declare amdgpu_kernel void @B(ptr)
+; CHECK1: declare amdgpu_kernel void @C()
+
+; CHECK2: declare hidden void @HelperA()
+; CHECK2: declare amdgpu_kernel void @A()
+; CHECK2: define amdgpu_kernel void @B(ptr %dst)
+; CHECK2: declare amdgpu_kernel void @C()
+
+define internal void @HelperA() {
+ ret void
+}
+
+define amdgpu_kernel void @A() {
+ call void @HelperA()
+ ret void
+}
+
+define amdgpu_kernel void @B(ptr %dst) {
+ store ptr @HelperA, ptr %dst
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ call void @HelperA()
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/address-taken-externalize.ll b/llvm/test/tools/llvm-split/AMDGPU/address-taken-externalize.ll
new file mode 100644
index 000000000000..46d7d9783aea
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/address-taken-externalize.ll
@@ -0,0 +1,37 @@
+; RUN: llvm-split -o %t %s -j 2 -mtriple amdgcn-amd-amdhsa -amdgpu-module-splitting-large-kernel-threshold=0
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+
+; 2 kernels:
+; - A is isolated
+; - B is storing @HelperA/B's address
+;
+; The helper functions should get externalized (become hidden w/ external linkage)
+
+; CHECK0: define hidden void @HelperA()
+; CHECK0: define hidden void @HelperB()
+; CHECK0: define amdgpu_kernel void @A()
+; CHECK0: declare amdgpu_kernel void @B(i1, ptr)
+
+; CHECK1: declare hidden void @HelperA()
+; CHECK1: declare hidden void @HelperB()
+; CHECK1: declare amdgpu_kernel void @A()
+; CHECK1: define amdgpu_kernel void @B(i1 %cond, ptr %dst)
+
+define internal void @HelperA() {
+ ret void
+}
+
+define internal void @HelperB() {
+ ret void
+}
+
+define amdgpu_kernel void @A() {
+ ret void
+}
+
+define amdgpu_kernel void @B(i1 %cond, ptr %dst) {
+ %addr = select i1 %cond, ptr @HelperA, ptr @HelperB
+ store ptr %addr, ptr %dst
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/debug-name-hiding.ll b/llvm/test/tools/llvm-split/AMDGPU/debug-name-hiding.ll
new file mode 100644
index 000000000000..6a07ed51ba1b
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/debug-name-hiding.ll
@@ -0,0 +1,20 @@
+; RUN: llvm-split -o %t %s -j 3 -mtriple amdgcn-amd-amdhsa -debug -amdgpu-module-splitting-log-private 2>&1 | FileCheck %s --implicit-check-not=MyCustomKernel
+; REQUIRES: asserts
+
+; SHA256 of the kernel names.
+
+; CHECK: a097723d21cf9f35d90e6fb7881995ac8c398b3366a6c97efc657404f9fe301c
+; CHECK: 626bc23242de8fcfda7f0e66318d29455c081df6b5380e64d14703c95fcbcd59
+; CHECK: c38d90a7ca71dc5d694bb9e093dadcdedfc4cb4adf7ed7e46d42fe95a0b4ef55
+
+define amdgpu_kernel void @MyCustomKernel0() {
+ ret void
+}
+
+define amdgpu_kernel void @MyCustomKernel1() {
+ ret void
+}
+
+define amdgpu_kernel void @MyCustomKernel2() {
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/kernels-alias-dependencies.ll b/llvm/test/tools/llvm-split/AMDGPU/kernels-alias-dependencies.ll
new file mode 100644
index 000000000000..c2746d139892
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/kernels-alias-dependencies.ll
@@ -0,0 +1,45 @@
+; RUN: llvm-split -o %t %s -j 2 -mtriple amdgcn-amd-amdhsa
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+
+; 3 kernels:
+; - A calls nothing
+; - B calls @PerryThePlatypus
+; - C calls @Perry, an alias of @PerryThePlatypus
+;
+; We should see through the alias and put B/C in the same
+; partition.
+;
+; Additionally, @PerryThePlatypus gets externalized as
+; the alias counts as taking its address.
+
+; CHECK0-NOT: define
+; CHECK0: @Perry = internal alias ptr (), ptr @PerryThePlatypus
+; CHECK0: define hidden void @PerryThePlatypus()
+; CHECK0: define amdgpu_kernel void @B
+; CHECK0: define amdgpu_kernel void @C
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: define amdgpu_kernel void @A
+; CHECK1-NOT: define
+
+@Perry = internal alias ptr(), ptr @PerryThePlatypus
+
+define internal void @PerryThePlatypus() {
+ ret void
+}
+
+define amdgpu_kernel void @A() {
+ ret void
+}
+
+define amdgpu_kernel void @B() {
+ call void @PerryThePlatypus()
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ call void @Perry()
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/kernels-cost-ranking.ll b/llvm/test/tools/llvm-split/AMDGPU/kernels-cost-ranking.ll
new file mode 100644
index 000000000000..4635264aefb3
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/kernels-cost-ranking.ll
@@ -0,0 +1,54 @@
+; RUN: llvm-split -o %t %s -j 3 -mtriple amdgcn-amd-amdhsa
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+
+; 3 kernels with each their own dependencies should go into 3
+; distinct partitions. The most expensive kernel should be
+; seen first and go into the last partition.
+
+; CHECK0-NOT: define
+; CHECK0: define amdgpu_kernel void @C
+; CHECK0: define internal void @HelperC
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: define amdgpu_kernel void @A
+; CHECK1: define internal void @HelperA
+; CHECK1-NOT: define
+
+; CHECK2-NOT: define
+; CHECK2: define amdgpu_kernel void @B
+; CHECK2: define internal void @HelperB
+; CHECK2-NOT: define
+
+
+define amdgpu_kernel void @A() {
+ call void @HelperA()
+ ret void
+}
+
+define internal void @HelperA() {
+ ret void
+}
+
+define amdgpu_kernel void @B(ptr %x) {
+ store i64 42, ptr %x
+ store i64 43, ptr %x
+ store i64 44, ptr %x
+ call void @HelperB()
+ ret void
+}
+
+define internal void @HelperB() {
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ call void @HelperC()
+ ret void
+}
+
+define internal void @HelperC() {
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/kernels-dependencies.ll b/llvm/test/tools/llvm-split/AMDGPU/kernels-dependencies.ll
new file mode 100644
index 000000000000..bea527f15bba
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/kernels-dependencies.ll
@@ -0,0 +1,50 @@
+; RUN: llvm-split -o %t %s -j 3 -mtriple amdgcn-amd-amdhsa
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+
+; 3 kernels with each their own dependencies should go into 3
+; distinct partitions.
+
+; CHECK0-NOT: define
+; CHECK0: define amdgpu_kernel void @C
+; CHECK0: define internal void @HelperC
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: define amdgpu_kernel void @B
+; CHECK1: define internal void @HelperB
+; CHECK1-NOT: define
+
+; CHECK2-NOT: define
+; CHECK2: define amdgpu_kernel void @A
+; CHECK2: define internal void @HelperA
+; CHECK2-NOT: define
+
+
+define amdgpu_kernel void @A() {
+ call void @HelperA()
+ ret void
+}
+
+define internal void @HelperA() {
+ ret void
+}
+
+define amdgpu_kernel void @B() {
+ call void @HelperB()
+ ret void
+}
+
+define internal void @HelperB() {
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ call void @HelperC()
+ ret void
+}
+
+define internal void @HelperC() {
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-duplication.ll b/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-duplication.ll
new file mode 100644
index 000000000000..64839f8d8456
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-duplication.ll
@@ -0,0 +1,41 @@
+; RUN: llvm-split -o %t %s -j 3 -mtriple amdgcn-amd-amdhsa
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+
+; 3 kernels share a common helper, that helper should be
+; cloned in all partitions.
+
+; CHECK0-NOT: define
+; CHECK0: define internal void @Helper
+; CHECK0: define amdgpu_kernel void @C
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: define internal void @Helper
+; CHECK1: define amdgpu_kernel void @B
+; CHECK1-NOT: define
+
+; CHECK2-NOT: define
+; CHECK2: define internal void @Helper
+; CHECK2: define amdgpu_kernel void @A
+; CHECK2-NOT: define
+
+define internal void @Helper() {
+ ret void
+}
+
+define amdgpu_kernel void @A() {
+ call void @Helper()
+ ret void
+}
+
+define amdgpu_kernel void @B() {
+ call void @Helper()
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ call void @Helper()
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-external.ll b/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-external.ll
new file mode 100644
index 000000000000..435e97a58134
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-external.ll
@@ -0,0 +1,64 @@
+; RUN: llvm-split -o %t %s -j 4 -mtriple amdgcn-amd-amdhsa
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+; RUN: llvm-dis -o - %t3 | FileCheck --check-prefix=CHECK3 %s
+
+; Both overridable helper should go in P0.
+
+; CHECK0-NOT: define
+; CHECK0: define available_externally void @OverridableHelper0()
+; CHECK0: define internal void @OverridableHelper1()
+; CHECK0: define amdgpu_kernel void @A
+; CHECK0: define amdgpu_kernel void @B
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+
+; CHECK2-NOT: define
+; CHECK2: define internal void @PrivateHelper1()
+; CHECK2: define amdgpu_kernel void @D
+; CHECK2-NOT: define
+
+; CHECK3-NOT: define
+; CHECK3: define internal void @PrivateHelper0()
+; CHECK3: define amdgpu_kernel void @C
+; CHECK3-NOT: define
+
+define available_externally void @OverridableHelper0() {
+ ret void
+}
+
+define internal void @OverridableHelper1() #0 {
+ ret void
+}
+
+define internal void @PrivateHelper0() {
+ ret void
+}
+
+define internal void @PrivateHelper1() {
+ ret void
+}
+
+define amdgpu_kernel void @A() {
+ call void @OverridableHelper0()
+ ret void
+}
+
+define amdgpu_kernel void @B() {
+ call void @OverridableHelper1()
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ call void @PrivateHelper0()
+ ret void
+}
+
+define amdgpu_kernel void @D() {
+ call void @PrivateHelper1()
+ ret void
+}
+
+attributes #0 = { nobuiltin }
diff --git a/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-indirect.ll b/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-indirect.ll
new file mode 100644
index 000000000000..9701ac35ce54
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-indirect.ll
@@ -0,0 +1,76 @@
+; RUN: llvm-split -o %t %s -j 3 -mtriple amdgcn-amd-amdhsa
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+
+; We have 4 kernels:
+; - Each kernel has an internal helper
+; - @A and @B's helpers does an indirect call.
+;
+; We default to putting A/B in P0, alongside a copy
+; of all helpers who have their address taken.
+; The other kernels can still go into separate partitions.
+
+; CHECK0-NOT: define
+; CHECK0: define hidden void @HelperA
+; CHECK0: define hidden void @HelperB
+; CHECK0: define hidden void @CallCandidate
+; CHECK0-NOT: define {{.*}} @HelperC
+; CHECK0-NOT: define {{.*}} @HelperD
+; CHECK0: define amdgpu_kernel void @A
+; CHECK0: define amdgpu_kernel void @B
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: define internal void @HelperD
+; CHECK1: define amdgpu_kernel void @D
+; CHECK1-NOT: define
+
+; CHECK2-NOT: define
+; CHECK2: define internal void @HelperC
+; CHECK2: define amdgpu_kernel void @C
+; CHECK2-NOT: define
+
+@addrthief = global [3 x ptr] [ptr @HelperA, ptr @HelperB, ptr @CallCandidate]
+
+define internal void @HelperA(ptr %call) {
+ call void %call()
+ ret void
+}
+
+define internal void @HelperB(ptr %call) {
+ call void %call()
+ ret void
+}
+
+define internal void @CallCandidate() {
+ ret void
+}
+
+define internal void @HelperC() {
+ ret void
+}
+
+define internal void @HelperD() {
+ ret void
+}
+
+define amdgpu_kernel void @A(ptr %call) {
+ call void @HelperA(ptr %call)
+ ret void
+}
+
+define amdgpu_kernel void @B(ptr %call) {
+ call void @HelperB(ptr %call)
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ call void @HelperC()
+ ret void
+}
+
+define amdgpu_kernel void @D() {
+ call void @HelperD()
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-overridable.ll b/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-overridable.ll
new file mode 100644
index 000000000000..dc2c5c3c07be
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/kernels-dependency-overridable.ll
@@ -0,0 +1,40 @@
+; RUN: llvm-split -o %t %s -j 3 -mtriple amdgcn-amd-amdhsa
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+
+; CHECK0-NOT: define
+; CHECK0: define void @ExternalHelper
+; CHECK0: define amdgpu_kernel void @A
+; CHECK0: define amdgpu_kernel void @B
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: define amdgpu_kernel void @D
+; CHECK1-NOT: define
+
+; CHECK2-NOT: define
+; CHECK2: define amdgpu_kernel void @C
+; CHECK2-NOT: define
+
+define void @ExternalHelper() {
+ ret void
+}
+
+define amdgpu_kernel void @A() {
+ call void @ExternalHelper()
+ ret void
+}
+
+define amdgpu_kernel void @B() {
+ call void @ExternalHelper()
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ ret void
+}
+
+define amdgpu_kernel void @D() {
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/kernels-global-variables-noexternal.ll b/llvm/test/tools/llvm-split/AMDGPU/kernels-global-variables-noexternal.ll
new file mode 100644
index 000000000000..0fc76934afc5
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/kernels-global-variables-noexternal.ll
@@ -0,0 +1,42 @@
+; RUN: llvm-split -o %t %s -j 3 -mtriple amdgcn-amd-amdhsa -amdgpu-module-splitting-no-externalize-globals
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+
+; 3 kernels use private/internal global variables.
+; The GVs should be copied in each partition as needed.
+
+; CHECK0-NOT: define
+; CHECK0: @bar = internal constant ptr
+; CHECK0: define amdgpu_kernel void @C
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: @foo = private constant ptr
+; CHECK1: define amdgpu_kernel void @A
+; CHECK1-NOT: define
+
+; CHECK2-NOT: define
+; CHECK2: @foo = private constant ptr
+; CHECK2: @bar = internal constant ptr
+; CHECK2: define amdgpu_kernel void @B
+; CHECK2-NOT: define
+
+@foo = private constant ptr poison
+@bar = internal constant ptr poison
+
+define amdgpu_kernel void @A() {
+ store i32 42, ptr @foo
+ ret void
+}
+
+define amdgpu_kernel void @B() {
+ store i32 42, ptr @foo
+ store i32 42, ptr @bar
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ store i32 42, ptr @bar
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/kernels-global-variables.ll b/llvm/test/tools/llvm-split/AMDGPU/kernels-global-variables.ll
new file mode 100644
index 000000000000..7564662e7c7c
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/kernels-global-variables.ll
@@ -0,0 +1,44 @@
+; RUN: llvm-split -o %t %s -j 3 -mtriple amdgcn-amd-amdhsa
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+
+; 3 kernels use private/internal global variables.
+; The GVs should be copied in each partition as needed.
+
+; CHECK0-NOT: define
+; CHECK0: @foo = hidden constant ptr poison
+; CHECK0: @bar = hidden constant ptr poison
+; CHECK0: define amdgpu_kernel void @C
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: @foo = external hidden constant ptr{{$}}
+; CHECK1: @bar = external hidden constant ptr{{$}}
+; CHECK1: define amdgpu_kernel void @A
+; CHECK1-NOT: define
+
+; CHECK2-NOT: define
+; CHECK2: @foo = external hidden constant ptr{{$}}
+; CHECK2: @bar = external hidden constant ptr{{$}}
+; CHECK2: define amdgpu_kernel void @B
+; CHECK2-NOT: define
+
+@foo = private constant ptr poison
+@bar = internal constant ptr poison
+
+define amdgpu_kernel void @A() {
+ store i32 42, ptr @foo
+ ret void
+}
+
+define amdgpu_kernel void @B() {
+ store i32 42, ptr @foo
+ store i32 42, ptr @bar
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ store i32 42, ptr @bar
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/kernels-load-balancing.ll b/llvm/test/tools/llvm-split/AMDGPU/kernels-load-balancing.ll
new file mode 100644
index 000000000000..5dfb95c5fc66
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/kernels-load-balancing.ll
@@ -0,0 +1,75 @@
+; RUN: llvm-split -o %t %s -j 3 -mtriple amdgcn-amd-amdhsa
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+
+; Test load balancing logic with 6 kernels.
+;
+; Kernels go from most expensive (A == 6) to least expensive (F == 1)
+;
+; Load balancing should work like this (current partition cost is in parens)
+;
+; Initial -> [P0(0), P1(0), P2(0)]
+;
+; A(6) goes in 2 -> [P2(6), P0(0), P1(0)]
+; B(5) goes in 1 -> [P2(6), P1(5), P0(4)]
+; C(4) goes in 0 -> [P2(6), P1(5), P0(4)]
+
+; D(3) goes in 0 -> [P0(7), P2(6), P1(5)]
+; E(2) goes in 1 -> [P0(7), P1(7), P2(6)]
+; F(1) goes in 2 -> [P0(7), P1(7), P2(7)]
+
+; CHECK0-NOT: define
+; CHECK0: define amdgpu_kernel void @C
+; CHECK0: define amdgpu_kernel void @D
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: define amdgpu_kernel void @B
+; CHECK1: define amdgpu_kernel void @E
+; CHECK1-NOT: define
+
+; CHECK2-NOT: define
+; CHECK2: define amdgpu_kernel void @A
+; CHECK2: define amdgpu_kernel void @F
+; CHECK2-NOT: define
+
+
+define amdgpu_kernel void @A(ptr %x) {
+ store i64 42, ptr %x
+ store i64 43, ptr %x
+ store i64 44, ptr %x
+ store i64 45, ptr %x
+ store i64 46, ptr %x
+ ret void
+}
+
+define amdgpu_kernel void @B(ptr %x) {
+ store i64 42, ptr %x
+ store i64 43, ptr %x
+ store i64 44, ptr %x
+ store i64 45, ptr %x
+ ret void
+}
+
+define amdgpu_kernel void @C(ptr %x) {
+ store i64 42, ptr %x
+ store i64 43, ptr %x
+ store i64 44, ptr %x
+ ret void
+}
+
+define amdgpu_kernel void @D(ptr %x) {
+ store i64 42, ptr %x
+ store i64 43, ptr %x
+ ret void
+}
+
+define amdgpu_kernel void @E(ptr %x) {
+ store i64 42, ptr %x
+ ret void
+}
+
+define amdgpu_kernel void @F() {
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/kernels-no-dependencies.ll b/llvm/test/tools/llvm-split/AMDGPU/kernels-no-dependencies.ll
new file mode 100644
index 000000000000..8959acfcae54
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/kernels-no-dependencies.ll
@@ -0,0 +1,39 @@
+; RUN: llvm-split -o %t %s -j 4 -mtriple amdgcn-amd-amdhsa
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+; RUN: llvm-dis -o - %t3 | FileCheck --check-prefix=CHECK3 %s
+
+; Check that 4 independent kernels get put into 4 different partitions.
+
+; CHECK0-NOT: define
+; CHECK0: define amdgpu_kernel void @D
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: define amdgpu_kernel void @C
+; CHECK1-NOT: define
+
+; CHECK2-NOT: define
+; CHECK2: define amdgpu_kernel void @B
+; CHECK2-NOT: define
+
+; CHECK3-NOT: define
+; CHECK3: define amdgpu_kernel void @A
+; CHECK3-NOT: define
+
+define amdgpu_kernel void @A() {
+ ret void
+}
+
+define amdgpu_kernel void @B() {
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ ret void
+}
+
+define amdgpu_kernel void @D() {
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/large-kernels-merging.ll b/llvm/test/tools/llvm-split/AMDGPU/large-kernels-merging.ll
new file mode 100644
index 000000000000..4fdbac7d1789
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/large-kernels-merging.ll
@@ -0,0 +1,98 @@
+; RUN: llvm-split -o %t %s -j 3 -mtriple amdgcn-amd-amdhsa -amdgpu-module-splitting-large-kernel-threshold=1.2 -amdgpu-module-splitting-large-kernel-merge-overlap=0.5
+; RUN: llvm-dis -o - %t0 | FileCheck --check-prefix=CHECK0 %s
+; RUN: llvm-dis -o - %t1 | FileCheck --check-prefix=CHECK1 %s
+; RUN: llvm-dis -o - %t2 | FileCheck --check-prefix=CHECK2 %s
+
+; RUN: llvm-split -o %t.nolarge %s -j 3 -mtriple amdgcn-amd-amdhsa -amdgpu-module-splitting-large-kernel-threshold=0
+; RUN: llvm-dis -o - %t.nolarge0 | FileCheck --check-prefix=NOLARGEKERNELS-CHECK0 %s
+; RUN: llvm-dis -o - %t.nolarge1 | FileCheck --check-prefix=NOLARGEKERNELS-CHECK1 %s
+; RUN: llvm-dis -o - %t.nolarge2 | FileCheck --check-prefix=NOLARGEKERNELS-CHECK2 %s
+
+; 2 kernels (A/B) are large and share all their dependencies.
+; They should go in the same partition, the remaining kernel should
+; go somewhere else, and one partition should be empty.
+;
+; Also check w/o large kernels processing to verify they are indeed handled
+; differently.
+
+; CHECK0-NOT: define
+
+; CHECK1-NOT: define
+; CHECK1: define internal void @HelperC()
+; CHECK1: define amdgpu_kernel void @C
+; CHECK1-NOT: define
+
+; CHECK2-NOT: define
+; CHECK2: define internal void @large2()
+; CHECK2: define internal void @large1()
+; CHECK2: define internal void @large0()
+; CHECK2: define internal void @HelperA()
+; CHECK2: define internal void @HelperB()
+; CHECK2: define amdgpu_kernel void @A
+; CHECK2: define amdgpu_kernel void @B
+; CHECK2-NOT: define
+
+; NOLARGEKERNELS-CHECK0-NOT: define
+; NOLARGEKERNELS-CHECK0: define internal void @HelperC()
+; NOLARGEKERNELS-CHECK0: define amdgpu_kernel void @C
+; NOLARGEKERNELS-CHECK0-NOT: define
+
+; NOLARGEKERNELS-CHECK1: define internal void @large2()
+; NOLARGEKERNELS-CHECK1: define internal void @large1()
+; NOLARGEKERNELS-CHECK1: define internal void @large0()
+; NOLARGEKERNELS-CHECK1: define internal void @HelperB()
+; NOLARGEKERNELS-CHECK1: define amdgpu_kernel void @B
+
+; NOLARGEKERNELS-CHECK2: define internal void @large2()
+; NOLARGEKERNELS-CHECK2: define internal void @large1()
+; NOLARGEKERNELS-CHECK2: define internal void @large0()
+; NOLARGEKERNELS-CHECK2: define internal void @HelperA()
+; NOLARGEKERNELS-CHECK2: define amdgpu_kernel void @A
+
+define internal void @large2() {
+ store volatile i32 42, ptr null
+ call void @large2()
+ ret void
+}
+
+define internal void @large1() {
+ call void @large1()
+ call void @large2()
+ ret void
+}
+
+define internal void @large0() {
+ call void @large0()
+ call void @large1()
+ call void @large2()
+ ret void
+}
+
+define internal void @HelperA() {
+ call void @large0()
+ ret void
+}
+
+define internal void @HelperB() {
+ call void @large0()
+ ret void
+}
+
+define amdgpu_kernel void @A() {
+ call void @HelperA()
+ ret void
+}
+
+define amdgpu_kernel void @B() {
+ call void @HelperB()
+ ret void
+}
+
+define internal void @HelperC() {
+ ret void
+}
+
+define amdgpu_kernel void @C() {
+ call void @HelperC()
+ ret void
+}
diff --git a/llvm/test/tools/llvm-split/AMDGPU/lit.local.cfg b/llvm/test/tools/llvm-split/AMDGPU/lit.local.cfg
new file mode 100644
index 000000000000..6154a6c1c906
--- /dev/null
+++ b/llvm/test/tools/llvm-split/AMDGPU/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "AMDGPU" in config.root.targets:
+ config.unsupported = True
diff --git a/llvm/tools/dsymutil/MachODebugMapParser.cpp b/llvm/tools/dsymutil/MachODebugMapParser.cpp
index 6a9f25681cdd..e28c976d6ace 100644
--- a/llvm/tools/dsymutil/MachODebugMapParser.cpp
+++ b/llvm/tools/dsymutil/MachODebugMapParser.cpp
@@ -301,7 +301,7 @@ void MachODebugMapParser::switchToNewLibDebugMapObject(
if (CurrentDebugMapObject &&
CurrentDebugMapObject->getType() == MachO::N_LIB &&
- CurrentDebugMapObject->getObjectFilename().compare(Path.str()) == 0) {
+ CurrentDebugMapObject->getObjectFilename() == Path) {
return;
}
diff --git a/llvm/tools/llvm-cxxfilt/CMakeLists.txt b/llvm/tools/llvm-cxxfilt/CMakeLists.txt
index cbc4c2db6154..a644baffdd90 100644
--- a/llvm/tools/llvm-cxxfilt/CMakeLists.txt
+++ b/llvm/tools/llvm-cxxfilt/CMakeLists.txt
@@ -17,6 +17,10 @@ add_llvm_tool(llvm-cxxfilt
GENERATE_DRIVER
)
+if(LLVM_TOOL_LLVM_DRIVER_BUILD)
+ set_property(GLOBAL APPEND PROPERTY LLVM_DRIVER_HIDDEN_TOOL_ALIASES_llvm-cxxfilt c++filt)
+endif()
+
if(LLVM_INSTALL_BINUTILS_SYMLINKS)
add_llvm_tool_symlink(c++filt llvm-cxxfilt)
endif()
diff --git a/llvm/tools/llvm-link/llvm-link.cpp b/llvm/tools/llvm-link/llvm-link.cpp
index 7794f2d81ed0..1b90fce76fbd 100644
--- a/llvm/tools/llvm-link/llvm-link.cpp
+++ b/llvm/tools/llvm-link/llvm-link.cpp
@@ -377,9 +377,13 @@ static bool importFunctions(const char *argv0, Module &DestModule) {
if (Verbose)
errs() << "Importing " << FunctionName << " from " << FileName << "\n";
+ // `-import` specifies the `<filename,function-name>` pairs to import as
+ // definition, so make the import type definition directly.
+ // FIXME: A follow-up patch should add test coverage for import declaration
+ // in `llvm-link` CLI (e.g., by introducing a new command line option).
auto &Entry =
ImportList[FileNameStringCache.insert(FileName).first->getKey()];
- Entry.insert(F->getGUID());
+ Entry[F->getGUID()] = GlobalValueSummary::Definition;
}
auto CachedModuleLoader = [&](StringRef Identifier) {
return ModuleLoaderCache.takeModule(std::string(Identifier));
diff --git a/llvm/tools/llvm-lto/llvm-lto.cpp b/llvm/tools/llvm-lto/llvm-lto.cpp
index f310097eec63..8218bd5a74ea 100644
--- a/llvm/tools/llvm-lto/llvm-lto.cpp
+++ b/llvm/tools/llvm-lto/llvm-lto.cpp
@@ -692,8 +692,9 @@ private:
// Build a map of module to the GUIDs and summary objects that should
// be written to its index.
std::map<std::string, GVSummaryMapTy> ModuleToSummariesForIndex;
+ GVSummaryPtrSet DecSummaries;
ThinGenerator.gatherImportedSummariesForModule(
- *TheModule, *Index, ModuleToSummariesForIndex, *Input);
+ *TheModule, *Index, ModuleToSummariesForIndex, DecSummaries, *Input);
std::string OutputName = OutputFilename;
if (OutputName.empty()) {
@@ -703,7 +704,7 @@ private:
std::error_code EC;
raw_fd_ostream OS(OutputName, EC, sys::fs::OpenFlags::OF_None);
error(EC, "error opening the file '" + OutputName + "'");
- writeIndexToFile(*Index, OS, &ModuleToSummariesForIndex);
+ writeIndexToFile(*Index, OS, &ModuleToSummariesForIndex, &DecSummaries);
}
}
diff --git a/llvm/tools/llvm-mc/llvm-mc.cpp b/llvm/tools/llvm-mc/llvm-mc.cpp
index 807071a7b9a1..506e4f22ef8f 100644
--- a/llvm/tools/llvm-mc/llvm-mc.cpp
+++ b/llvm/tools/llvm-mc/llvm-mc.cpp
@@ -569,9 +569,6 @@ int main(int argc, char **argv) {
Str->initSections(true, *STI);
}
- // Use Assembler information for parsing.
- Str->setUseAssemblerInfoForParsing(true);
-
int Res = 1;
bool disassemble = false;
switch (Action) {
diff --git a/llvm/tools/llvm-mca/llvm-mca.cpp b/llvm/tools/llvm-mca/llvm-mca.cpp
index 03d7d7944b9c..cc5d4f5fa05d 100644
--- a/llvm/tools/llvm-mca/llvm-mca.cpp
+++ b/llvm/tools/llvm-mca/llvm-mca.cpp
@@ -135,6 +135,11 @@ static cl::opt<unsigned>
"(instructions per cycle)"),
cl::cat(ToolOptions), cl::init(0));
+static cl::opt<unsigned>
+ CallLatency("call-latency", cl::Hidden,
+ cl::desc("Number of cycles to assume for a call instruction"),
+ cl::cat(ToolOptions), cl::init(100U));
+
enum class SkipType { NONE, LACK_SCHED, PARSE_FAILURE, ANY_FAILURE };
static cl::opt<enum SkipType> SkipUnsupportedInstructions(
@@ -568,7 +573,7 @@ int main(int argc, char **argv) {
}
// Create an instruction builder.
- mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM);
+ mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM, CallLatency);
// Create a context to control ownership of the pipeline hardware.
mca::Context MCA(*MRI, *STI);
diff --git a/llvm/tools/llvm-ml/llvm-ml.cpp b/llvm/tools/llvm-ml/llvm-ml.cpp
index 1cac576f54e7..f1f39af059aa 100644
--- a/llvm/tools/llvm-ml/llvm-ml.cpp
+++ b/llvm/tools/llvm-ml/llvm-ml.cpp
@@ -428,9 +428,6 @@ int llvm_ml_main(int Argc, char **Argv, const llvm::ToolContext &) {
Str->emitAssignment(Feat00Sym, MCConstantExpr::create(Feat00Flags, Ctx));
}
- // Use Assembler information for parsing.
- Str->setUseAssemblerInfoForParsing(true);
-
int Res = 1;
if (InputArgs.hasArg(OPT_as_lex)) {
// -as-lex; Lex only, and output a stream of tokens
diff --git a/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp b/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
index a1897334cff2..4ab3b7265f2f 100644
--- a/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
+++ b/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
@@ -571,6 +571,12 @@ objcopy::parseObjcopyOptions(ArrayRef<const char *> RawArgsArr,
llvm::opt::InputArgList InputArgs =
T.ParseArgs(ArgsArr, MissingArgumentIndex, MissingArgumentCount);
+ if (MissingArgumentCount)
+ return createStringError(
+ errc::invalid_argument,
+ "argument to '%s' is missing (expected %d value(s))",
+ InputArgs.getArgString(MissingArgumentIndex), MissingArgumentCount);
+
if (InputArgs.size() == 0 && DashDash == RawArgsArr.end()) {
printHelp(T, errs(), ToolType::Objcopy);
exit(1);
diff --git a/llvm/tools/llvm-profdata/llvm-profdata.cpp b/llvm/tools/llvm-profdata/llvm-profdata.cpp
index 4126b55576dd..28c3afa10164 100644
--- a/llvm/tools/llvm-profdata/llvm-profdata.cpp
+++ b/llvm/tools/llvm-profdata/llvm-profdata.cpp
@@ -75,7 +75,6 @@ cl::SubCommand MergeSubcommand(
namespace {
enum ProfileKinds { instr, sample, memory };
enum FailureMode { warnOnly, failIfAnyAreInvalid, failIfAllAreInvalid };
-} // namespace
enum ProfileFormat {
PF_None = 0,
@@ -87,6 +86,7 @@ enum ProfileFormat {
};
enum class ShowFormat { Text, Json, Yaml };
+} // namespace
// Common options.
cl::opt<std::string> OutputFilename("output", cl::value_desc("output"),
@@ -340,7 +340,7 @@ cl::opt<unsigned long long> OverlapValueCutoff(
"profile with max count value greater then the parameter value"),
cl::sub(OverlapSubcommand));
-// Options unique to show subcommand.
+// Options specific to show subcommand.
cl::opt<bool> ShowCounts("counts", cl::init(false),
cl::desc("Show counter values for shown functions"),
cl::sub(ShowSubcommand));
@@ -439,12 +439,19 @@ cl::opt<bool> ShowProfileVersion("profile-version", cl::init(false),
cl::desc("Show profile version. "),
cl::sub(ShowSubcommand));
+// Options specific to order subcommand.
+cl::opt<unsigned>
+ NumTestTraces("num-test-traces", cl::init(0),
+ cl::desc("Keep aside the last <num-test-traces> traces in "
+ "the profile when computing the function order and "
+ "instead use them to evaluate that order"),
+ cl::sub(OrderSubcommand));
+
// We use this string to indicate that there are
// multiple static functions map to the same name.
const std::string DuplicateNameStr = "----";
-static void warn(Twine Message, std::string Whence = "",
- std::string Hint = "") {
+static void warn(Twine Message, StringRef Whence = "", StringRef Hint = "") {
WithColor::warning();
if (!Whence.empty())
errs() << Whence << ": ";
@@ -456,13 +463,13 @@ static void warn(Twine Message, std::string Whence = "",
static void warn(Error E, StringRef Whence = "") {
if (E.isA<InstrProfError>()) {
handleAllErrors(std::move(E), [&](const InstrProfError &IPE) {
- warn(IPE.message(), std::string(Whence), std::string(""));
+ warn(IPE.message(), Whence);
});
}
}
-static void exitWithError(Twine Message, std::string Whence = "",
- std::string Hint = "") {
+static void exitWithError(Twine Message, StringRef Whence = "",
+ StringRef Hint = "") {
WithColor::error();
if (!Whence.empty())
errs() << Whence << ": ";
@@ -481,16 +488,16 @@ static void exitWithError(Error E, StringRef Whence = "") {
// Hint in case user missed specifying the profile type.
Hint = "Perhaps you forgot to use the --sample or --memory option?";
}
- exitWithError(IPE.message(), std::string(Whence), std::string(Hint));
+ exitWithError(IPE.message(), Whence, Hint);
});
return;
}
- exitWithError(toString(std::move(E)), std::string(Whence));
+ exitWithError(toString(std::move(E)), Whence);
}
static void exitWithErrorCode(std::error_code EC, StringRef Whence = "") {
- exitWithError(EC.message(), std::string(Whence));
+ exitWithError(EC.message(), Whence);
}
static void warnOrExitGivenError(FailureMode FailMode, std::error_code EC,
@@ -498,7 +505,7 @@ static void warnOrExitGivenError(FailureMode FailMode, std::error_code EC,
if (FailMode == failIfAnyAreInvalid)
exitWithErrorCode(EC, Whence);
else
- warn(EC.message(), std::string(Whence));
+ warn(EC.message(), Whence);
}
static void handleMergeWriterError(Error E, StringRef WhenceFile = "",
@@ -1585,7 +1592,7 @@ static void mergeSampleProfile(const WeightedFileVector &Inputs,
// If OutputSizeLimit is 0 (default), it is the same as write().
if (std::error_code EC =
Writer->writeWithSizeLimit(ProfileMap, OutputSizeLimit))
- exitWithErrorCode(std::move(EC));
+ exitWithErrorCode(EC);
}
static WeightedFile parseWeightedFile(const StringRef &WeightedFilename) {
@@ -3278,13 +3285,42 @@ static int order_main() {
// Read all entries
(void)I;
}
- auto &Traces = Reader->getTemporalProfTraces();
- auto Nodes = TemporalProfTraceTy::createBPFunctionNodes(Traces);
+ ArrayRef Traces = Reader->getTemporalProfTraces();
+ if (NumTestTraces && NumTestTraces >= Traces.size())
+ exitWithError(
+ "--" + NumTestTraces.ArgStr +
+ " must be smaller than the total number of traces: expected: < " +
+ Twine(Traces.size()) + ", actual: " + Twine(NumTestTraces));
+ ArrayRef TestTraces = Traces.take_back(NumTestTraces);
+ Traces = Traces.drop_back(NumTestTraces);
+
+ std::vector<BPFunctionNode> Nodes;
+ TemporalProfTraceTy::createBPFunctionNodes(Traces, Nodes);
BalancedPartitioningConfig Config;
BalancedPartitioning BP(Config);
BP.run(Nodes);
OS << "# Ordered " << Nodes.size() << " functions\n";
+ if (!TestTraces.empty()) {
+ // Since we don't know the symbol sizes, we assume 32 functions per page.
+ DenseMap<BPFunctionNode::IDT, unsigned> IdToPageNumber;
+ for (auto &Node : Nodes)
+ IdToPageNumber[Node.Id] = IdToPageNumber.size() / 32;
+
+ SmallSet<unsigned, 0> TouchedPages;
+ unsigned Area = 0;
+ for (auto &Trace : TestTraces) {
+ for (auto Id : Trace.FunctionNameRefs) {
+ auto It = IdToPageNumber.find(Id);
+ if (It == IdToPageNumber.end())
+ continue;
+ TouchedPages.insert(It->getSecond());
+ Area += TouchedPages.size();
+ }
+ TouchedPages.clear();
+ }
+ OS << "# Total area under the page fault curve: " << (float)Area << "\n";
+ }
OS << "# Warning: Mach-O may prefix symbols with \"_\" depending on the "
"linkage and this output does not take that into account. Some "
"post-processing may be required before passing to the linker via "
diff --git a/llvm/tools/llvm-profgen/PerfReader.cpp b/llvm/tools/llvm-profgen/PerfReader.cpp
index e9442027aed3..e63c6d61b3bf 100644
--- a/llvm/tools/llvm-profgen/PerfReader.cpp
+++ b/llvm/tools/llvm-profgen/PerfReader.cpp
@@ -552,7 +552,7 @@ bool PerfScriptReader::extractLBRStack(TraceStream &TraceIt,
// ... 0x4005c8/0x4005dc/P/-/-/0
// It's in FIFO order and seperated by whitespace.
SmallVector<StringRef, 32> Records;
- TraceIt.getCurrentLine().split(Records, " ", -1, false);
+ TraceIt.getCurrentLine().rtrim().split(Records, " ", -1, false);
auto WarnInvalidLBR = [](TraceStream &TraceIt) {
WithColor::warning() << "Invalid address in LBR record at line "
<< TraceIt.getLineNumber() << ": "
diff --git a/llvm/tools/llvm-profgen/ProfileGenerator.cpp b/llvm/tools/llvm-profgen/ProfileGenerator.cpp
index 5aa44108f966..2118e954fe54 100644
--- a/llvm/tools/llvm-profgen/ProfileGenerator.cpp
+++ b/llvm/tools/llvm-profgen/ProfileGenerator.cpp
@@ -75,14 +75,18 @@ static cl::opt<int, true> CSProfMaxContextDepth(
"depth limit."),
cl::location(llvm::sampleprof::CSProfileGenerator::MaxContextDepth));
-static cl::opt<double> HotFunctionDensityThreshold(
- "hot-function-density-threshold", llvm::cl::init(1000),
- llvm::cl::desc(
- "specify density threshold for hot functions (default: 1000)"),
+static cl::opt<double> ProfileDensityThreshold(
+ "profile-density-threshold", llvm::cl::init(50),
+ llvm::cl::desc("If the profile density is below the given threshold, it "
+ "will be suggested to increase the sampling rate."),
llvm::cl::Optional);
static cl::opt<bool> ShowDensity("show-density", llvm::cl::init(false),
llvm::cl::desc("show profile density details"),
llvm::cl::Optional);
+static cl::opt<int> ProfileDensityCutOffHot(
+ "profile-density-cutoff-hot", llvm::cl::init(990000),
+ llvm::cl::desc("Total samples cutoff for functions used to calculate "
+ "profile density."));
static cl::opt<bool> UpdateTotalSamples(
"update-total-samples", llvm::cl::init(false),
@@ -179,21 +183,22 @@ void ProfileGeneratorBase::write() {
void ProfileGeneratorBase::showDensitySuggestion(double Density) {
if (Density == 0.0)
- WithColor::warning() << "The --profile-summary-cutoff-hot option may be "
+ WithColor::warning() << "The output profile is empty or the "
+ "--profile-density-cutoff-hot option is "
"set too low. Please check your command.\n";
- else if (Density < HotFunctionDensityThreshold)
+ else if (Density < ProfileDensityThreshold)
WithColor::warning()
<< "Sample PGO is estimated to optimize better with "
- << format("%.1f", HotFunctionDensityThreshold / Density)
+ << format("%.1f", ProfileDensityThreshold / Density)
<< "x more samples. Please consider increasing sampling rate or "
"profiling for longer duration to get more samples.\n";
if (ShowDensity)
- outs() << "Minimum profile density for hot functions with top "
+ outs() << "Functions with density >= " << format("%.1f", Density)
+ << " account for "
<< format("%.2f",
- static_cast<double>(ProfileSummaryCutoffHot.getValue()) /
- 10000)
- << "% total samples: " << format("%.1f", Density) << "\n";
+ static_cast<double>(ProfileDensityCutOffHot) / 10000)
+ << "% total sample counts.\n";
}
bool ProfileGeneratorBase::filterAmbiguousProfile(FunctionSamples &FS) {
@@ -238,32 +243,6 @@ void ProfileGeneratorBase::filterAmbiguousProfile(SampleProfileMap &Profiles) {
}
}
-double ProfileGeneratorBase::calculateDensity(const SampleProfileMap &Profiles,
- uint64_t HotCntThreshold) {
- double Density = DBL_MAX;
- std::vector<const FunctionSamples *> HotFuncs;
- for (auto &I : Profiles) {
- auto &FuncSamples = I.second;
- if (FuncSamples.getTotalSamples() < HotCntThreshold)
- continue;
- HotFuncs.emplace_back(&FuncSamples);
- }
-
- for (auto *FuncSamples : HotFuncs) {
- auto *Func = Binary->getBinaryFunction(FuncSamples->getFunction());
- if (!Func)
- continue;
- uint64_t FuncSize = Func->getFuncSize();
- if (FuncSize == 0)
- continue;
- Density =
- std::min(Density, static_cast<double>(FuncSamples->getTotalSamples()) /
- FuncSize);
- }
-
- return Density == DBL_MAX ? 0.0 : Density;
-}
-
void ProfileGeneratorBase::findDisjointRanges(RangeSample &DisjointRanges,
const RangeSample &Ranges) {
@@ -768,9 +747,95 @@ void ProfileGenerator::populateBoundarySamplesForAllFunctions(
}
}
+void ProfileGeneratorBase::calculateBodySamplesAndSize(
+ const FunctionSamples &FSamples, uint64_t &TotalBodySamples,
+ uint64_t &FuncBodySize) {
+ // Note that ideally the size should be the number of function instruction.
+ // However, for probe-based profile, we don't have the accurate instruction
+ // count for each probe, instead, the probe sample is the samples count for
+ // the block, which is equivelant to
+ // total_instruction_samples/num_of_instruction in one block. Hence, we use
+ // the number of probe as a proxy for the function's size.
+ FuncBodySize += FSamples.getBodySamples().size();
+
+ // The accumulated body samples re-calculated here could be different from the
+ // TotalSamples(getTotalSamples) field of FunctionSamples for line-number
+ // based profile. The reason is that TotalSamples is the sum of all the
+ // samples of the machine instruction in one source-code line, however, the
+ // entry of Bodysamples is the only max number of them, so the TotalSamples is
+ // usually much bigger than the accumulated body samples as one souce-code
+ // line can emit many machine instructions. We observed a regression when we
+ // switched to use the accumulated body samples(by using
+ // -update-total-samples). Hence, it's safer to re-calculate here to avoid
+ // such discrepancy. There is no problem for probe-based profile, as the
+ // TotalSamples is exactly the same as the accumulated body samples.
+ for (const auto &I : FSamples.getBodySamples())
+ TotalBodySamples += I.second.getSamples();
+
+ for (const auto &CallsiteSamples : FSamples.getCallsiteSamples())
+ for (const auto &Callee : CallsiteSamples.second) {
+ // For binary-level density, the inlinees' samples and size should be
+ // included in the calculation.
+ calculateBodySamplesAndSize(Callee.second, TotalBodySamples,
+ FuncBodySize);
+ }
+}
+
+// Calculate Profile-density:
+// Calculate the density for each function and sort them in descending order,
+// keep accumulating their total samples unitl it exceeds the
+// percentage_threshold(cut-off) of total profile samples, the profile-density
+// is the last(minimum) function-density of the processed functions, which means
+// all the functions hot to perf are on good density if the profile-density is
+// good. The percentage_threshold(--profile-density-cutoff-hot) is configurable
+// depending on how much regression the system want to tolerate.
+double
+ProfileGeneratorBase::calculateDensity(const SampleProfileMap &Profiles) {
+ double ProfileDensity = 0.0;
+
+ uint64_t TotalProfileSamples = 0;
+ // A list of the function profile density and its total samples.
+ std::vector<std::pair<double, uint64_t>> FuncDensityList;
+ for (const auto &I : Profiles) {
+ uint64_t TotalBodySamples = 0;
+ uint64_t FuncBodySize = 0;
+ calculateBodySamplesAndSize(I.second, TotalBodySamples, FuncBodySize);
+
+ if (FuncBodySize == 0)
+ continue;
+
+ double FuncDensity = static_cast<double>(TotalBodySamples) / FuncBodySize;
+ TotalProfileSamples += TotalBodySamples;
+ FuncDensityList.emplace_back(FuncDensity, TotalBodySamples);
+ }
+
+ // Sorted by the density in descending order.
+ llvm::stable_sort(FuncDensityList, [&](const std::pair<double, uint64_t> &A,
+ const std::pair<double, uint64_t> &B) {
+ if (A.first != B.first)
+ return A.first > B.first;
+ return A.second < B.second;
+ });
+
+ uint64_t AccumulatedSamples = 0;
+ uint32_t I = 0;
+ assert(ProfileDensityCutOffHot <= 1000000 &&
+ "The cutoff value is greater than 1000000(100%)");
+ while (AccumulatedSamples < TotalProfileSamples *
+ static_cast<float>(ProfileDensityCutOffHot) /
+ 1000000 &&
+ I < FuncDensityList.size()) {
+ AccumulatedSamples += FuncDensityList[I].second;
+ ProfileDensity = FuncDensityList[I].first;
+ I++;
+ }
+
+ return ProfileDensity;
+}
+
void ProfileGeneratorBase::calculateAndShowDensity(
const SampleProfileMap &Profiles) {
- double Density = calculateDensity(Profiles, HotCountThreshold);
+ double Density = calculateDensity(Profiles);
showDensitySuggestion(Density);
}
@@ -1057,17 +1122,13 @@ void CSProfileGenerator::postProcessProfiles() {
CSProfMaxColdContextDepth, EnableCSPreInliner);
}
- // Merge function samples of CS profile to calculate profile density.
- sampleprof::SampleProfileMap ContextLessProfiles;
- ProfileConverter::flattenProfile(ProfileMap, ContextLessProfiles, true);
-
- calculateAndShowDensity(ContextLessProfiles);
if (GenCSNestedProfile) {
ProfileConverter CSConverter(ProfileMap);
CSConverter.convertCSProfiles();
FunctionSamples::ProfileIsCS = false;
}
filterAmbiguousProfile(ProfileMap);
+ ProfileGeneratorBase::calculateAndShowDensity(ProfileMap);
}
void ProfileGeneratorBase::computeSummaryAndThreshold(
diff --git a/llvm/tools/llvm-profgen/ProfileGenerator.h b/llvm/tools/llvm-profgen/ProfileGenerator.h
index d258fb78bfb1..5e36128530cd 100644
--- a/llvm/tools/llvm-profgen/ProfileGenerator.h
+++ b/llvm/tools/llvm-profgen/ProfileGenerator.h
@@ -116,10 +116,13 @@ protected:
void computeSummaryAndThreshold(SampleProfileMap &ProfileMap);
- void calculateAndShowDensity(const SampleProfileMap &Profiles);
+ void calculateBodySamplesAndSize(const FunctionSamples &FSamples,
+ uint64_t &TotalBodySamples,
+ uint64_t &FuncBodySize);
+
+ double calculateDensity(const SampleProfileMap &Profiles);
- double calculateDensity(const SampleProfileMap &Profiles,
- uint64_t HotCntThreshold);
+ void calculateAndShowDensity(const SampleProfileMap &Profiles);
void showDensitySuggestion(double Density);
diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp
index bacffd6c36f8..e463e141590d 100644
--- a/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -3910,7 +3910,10 @@ template <class ELFT> void GNUELFDumper<ELFT>::printRelocations() {
this->Obj.getSectionContents(Sec);
if (!ContentsOrErr)
return ContentsOrErr.takeError();
- return this->Obj.crelHeader(*ContentsOrErr) / 8;
+ auto NumOrErr = this->Obj.getCrelHeader(*ContentsOrErr);
+ if (!NumOrErr)
+ return NumOrErr.takeError();
+ return *NumOrErr / 8;
}
if (PrintAsRelr(Sec)) {
@@ -3930,7 +3933,8 @@ template <class ELFT> void GNUELFDumper<ELFT>::printRelocations() {
HasRelocSections = true;
std::string EntriesNum = "<?>";
- if (Expected<size_t> NumOrErr = GetEntriesNum(Sec))
+ Expected<size_t> NumOrErr = GetEntriesNum(Sec);
+ if (NumOrErr)
EntriesNum = std::to_string(*NumOrErr);
else
this->reportUniqueWarning("unable to get the number of relocations in " +
@@ -3947,10 +3951,10 @@ template <class ELFT> void GNUELFDumper<ELFT>::printRelocations() {
printRelr(Sec);
} else {
uint64_t CrelHdr = 0;
- if (auto ContentsOrErr = this->Obj.getSectionContents(Sec))
- CrelHdr = this->Obj.crelHeader(*ContentsOrErr);
- else
- consumeError(ContentsOrErr.takeError());
+ if (Sec.sh_type == ELF::SHT_CREL && NumOrErr) {
+ CrelHdr = cantFail(this->Obj.getCrelHeader(
+ cantFail(this->Obj.getSectionContents(Sec))));
+ }
printRelocHeaderFields<ELFT>(OS, Sec.sh_type, this->Obj.getHeader(),
CrelHdr);
this->printRelocationsHelper(Sec);
@@ -7909,8 +7913,9 @@ static bool printLLVMOMPOFFLOADNoteLLVMStyle(uint32_t NoteType,
static void printCoreNoteLLVMStyle(const CoreNote &Note, ScopedPrinter &W) {
W.printNumber("Page Size", Note.PageSize);
+ ListScope D(W, "Mappings");
for (const CoreFileMapping &Mapping : Note.Mappings) {
- ListScope D(W, "Mapping");
+ DictScope D(W);
W.printHex("Start", Mapping.Start);
W.printHex("End", Mapping.End);
W.printHex("Offset", Mapping.Offset);
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp b/llvm/tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp
index ad619a6c02a4..ba345d3659b2 100644
--- a/llvm/tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp
+++ b/llvm/tools/llvm-reduce/deltas/ReduceInstructionFlags.cpp
@@ -42,8 +42,14 @@ static void reduceFlagsInModule(Oracle &O, ReducerWorkItem &WorkItem) {
if (PDI->isDisjoint() && !O.shouldKeep())
PDI->setIsDisjoint(false);
} else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
- if (GEP->isInBounds() && !O.shouldKeep())
- GEP->setIsInBounds(false);
+ GEPNoWrapFlags NW = GEP->getNoWrapFlags();
+ if (NW.isInBounds() && !O.shouldKeep())
+ NW = NW.withoutInBounds();
+ if (NW.hasNoUnsignedSignedWrap() && !O.shouldKeep())
+ NW = NW.withoutNoUnsignedSignedWrap();
+ if (NW.hasNoUnsignedWrap() && !O.shouldKeep())
+ NW = NW.withoutNoUnsignedWrap();
+ GEP->setNoWrapFlags(NW);
} else if (auto *FPOp = dyn_cast<FPMathOperator>(&I)) {
FastMathFlags Flags = FPOp->getFastMathFlags();
diff --git a/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp b/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
index 4cb76f434742..06ac98b0c5e1 100644
--- a/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
+++ b/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
@@ -649,9 +649,9 @@ void applySpecificSectionMappings(RuntimeDyld &Dyld,
const FileToSectionIDMap &FileToSecIDMap) {
for (StringRef Mapping : SpecificSectionMappings) {
- size_t EqualsIdx = Mapping.find_first_of("=");
+ size_t EqualsIdx = Mapping.find_first_of('=');
std::string SectionIDStr = std::string(Mapping.substr(0, EqualsIdx));
- size_t ComaIdx = Mapping.find_first_of(",");
+ size_t ComaIdx = Mapping.find_first_of(',');
if (ComaIdx == StringRef::npos)
report_fatal_error("Invalid section specification '" + Mapping +
diff --git a/llvm/tools/opt-viewer/CMakeLists.txt b/llvm/tools/opt-viewer/CMakeLists.txt
index c0070f8cbfac..4bcf6932ee77 100644
--- a/llvm/tools/opt-viewer/CMakeLists.txt
+++ b/llvm/tools/opt-viewer/CMakeLists.txt
@@ -13,6 +13,7 @@ foreach (file ${files})
endforeach (file)
add_custom_target(opt-viewer DEPENDS ${files})
+set_target_properties(opt-viewer PROPERTIES FOLDER "LLVM/Tools")
if(NOT LLVM_ENABLE_IDE)
add_llvm_install_targets("install-opt-viewer"
DEPENDS opt-viewer
diff --git a/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt b/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt
index a0df549e8ea9..22cd0cb1d9a0 100644
--- a/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt
+++ b/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt
@@ -12,11 +12,10 @@ if (NOT WIN32 AND NOT CYGWIN)
BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/../
LIBRARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/../
)
- set_target_properties(InlineAdvisorPlugin PROPERTIES FOLDER "Tests")
+ set_target_properties(InlineAdvisorPlugin PROPERTIES FOLDER "LLVM/Tests")
# The plugin depends on some of the output files of intrinsics_gen, so make sure
# it is built before the plugin.
add_dependencies(InlineAdvisorPlugin intrinsics_gen)
add_dependencies(AnalysisTests InlineAdvisorPlugin)
- set_property(TARGET InlineAdvisorPlugin PROPERTY FOLDER "Tests/UnitTests/AnalysisTests")
endif()
diff --git a/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt b/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt
index e5e5ac4a6fa9..cc470a934426 100644
--- a/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt
+++ b/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt
@@ -18,5 +18,4 @@ if (NOT WIN32 AND NOT CYGWIN)
# it is built before the plugin.
add_dependencies(InlineOrderPlugin intrinsics_gen)
add_dependencies(AnalysisTests InlineOrderPlugin)
- set_property(TARGET InlineOrderPlugin PROPERTY FOLDER "Tests/UnitTests/AnalysisTests")
endif()
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index 8738af91b652..a30db468c772 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -2005,7 +2005,7 @@ TEST_F(ComputeKnownFPClassTest, SqrtNszSignBit) {
computeKnownFPClass(A4, M->getDataLayout(), fcAllFlags, 0, nullptr,
nullptr, nullptr, nullptr, /*UseInstrInfo=*/true);
EXPECT_EQ(fcPositive | fcQNan, UseInstrInfoNSZNoNan.KnownFPClasses);
- EXPECT_EQ(false, UseInstrInfoNSZNoNan.SignBit);
+ EXPECT_EQ(std::nullopt, UseInstrInfoNSZNoNan.SignBit);
KnownFPClass NoUseInstrInfoNSZNoNan =
computeKnownFPClass(A4, M->getDataLayout(), fcAllFlags, 0, nullptr,
diff --git a/llvm/unittests/CMakeLists.txt b/llvm/unittests/CMakeLists.txt
index 46f30ff398e1..b2f10b218588 100644
--- a/llvm/unittests/CMakeLists.txt
+++ b/llvm/unittests/CMakeLists.txt
@@ -1,5 +1,5 @@
add_custom_target(UnitTests)
-set_target_properties(UnitTests PROPERTIES FOLDER "Tests/UnitTests")
+set_target_properties(UnitTests PROPERTIES FOLDER "LLVM/Tests")
function(add_llvm_unittest test_dirname)
add_unittest(UnitTests ${test_dirname} ${ARGN})
diff --git a/llvm/unittests/DebugInfo/BTF/CMakeLists.txt b/llvm/unittests/DebugInfo/BTF/CMakeLists.txt
index b425e46b9f0c..6f7f684c58be 100644
--- a/llvm/unittests/DebugInfo/BTF/CMakeLists.txt
+++ b/llvm/unittests/DebugInfo/BTF/CMakeLists.txt
@@ -9,5 +9,3 @@ add_llvm_unittest(DebugInfoBTFTests
)
target_link_libraries(DebugInfoBTFTests PRIVATE LLVMTestingSupport)
-
-set_property(TARGET DebugInfoBTFTests PROPERTY FOLDER "Tests/UnitTests/DebugInfoTests")
diff --git a/llvm/unittests/DebugInfo/CodeView/CMakeLists.txt b/llvm/unittests/DebugInfo/CodeView/CMakeLists.txt
index 9a5eca14a879..5044b9bde290 100644
--- a/llvm/unittests/DebugInfo/CodeView/CMakeLists.txt
+++ b/llvm/unittests/DebugInfo/CodeView/CMakeLists.txt
@@ -10,5 +10,3 @@ add_llvm_unittest(DebugInfoCodeViewTests
)
target_link_libraries(DebugInfoCodeViewTests PRIVATE LLVMTestingSupport)
-
-set_property(TARGET DebugInfoCodeViewTests PROPERTY FOLDER "Tests/UnitTests/DebugInfoTests")
diff --git a/llvm/unittests/DebugInfo/DWARF/CMakeLists.txt b/llvm/unittests/DebugInfo/DWARF/CMakeLists.txt
index 1ba378638759..0c5b3f28ca3d 100644
--- a/llvm/unittests/DebugInfo/DWARF/CMakeLists.txt
+++ b/llvm/unittests/DebugInfo/DWARF/CMakeLists.txt
@@ -31,5 +31,3 @@ add_llvm_unittest(DebugInfoDWARFTests
)
target_link_libraries(DebugInfoDWARFTests PRIVATE LLVMTestingSupport)
-
-set_property(TARGET DebugInfoDWARFTests PROPERTY FOLDER "Tests/UnitTests/DebugInfoTests")
diff --git a/llvm/unittests/DebugInfo/GSYM/CMakeLists.txt b/llvm/unittests/DebugInfo/GSYM/CMakeLists.txt
index 7cd5af561c6d..029767471c86 100644
--- a/llvm/unittests/DebugInfo/GSYM/CMakeLists.txt
+++ b/llvm/unittests/DebugInfo/GSYM/CMakeLists.txt
@@ -11,5 +11,3 @@ add_llvm_unittest(DebugInfoGSYMTests
)
target_link_libraries(DebugInfoGSYMTests PRIVATE LLVMTestingSupport)
-
-set_property(TARGET DebugInfoGSYMTests PROPERTY FOLDER "Tests/UnitTests/DebugInfoTests")
diff --git a/llvm/unittests/DebugInfo/MSF/CMakeLists.txt b/llvm/unittests/DebugInfo/MSF/CMakeLists.txt
index 5e06ef683867..0e48ab8f2f6b 100644
--- a/llvm/unittests/DebugInfo/MSF/CMakeLists.txt
+++ b/llvm/unittests/DebugInfo/MSF/CMakeLists.txt
@@ -9,5 +9,3 @@ add_llvm_unittest(DebugInfoMSFTests
)
target_link_libraries(DebugInfoMSFTests PRIVATE LLVMTestingSupport)
-
-set_property(TARGET DebugInfoMSFTests PROPERTY FOLDER "Tests/UnitTests/DebugInfoTests")
diff --git a/llvm/unittests/DebugInfo/PDB/CMakeLists.txt b/llvm/unittests/DebugInfo/PDB/CMakeLists.txt
index 2907b556e3cd..c8c2659277a6 100644
--- a/llvm/unittests/DebugInfo/PDB/CMakeLists.txt
+++ b/llvm/unittests/DebugInfo/PDB/CMakeLists.txt
@@ -13,5 +13,3 @@ add_llvm_unittest_with_input_files(DebugInfoPDBTests
)
target_link_libraries(DebugInfoPDBTests PRIVATE LLVMTestingSupport)
-
-set_property(TARGET DebugInfoPDBTests PROPERTY FOLDER "Tests/UnitTests/DebugInfoTests")
diff --git a/llvm/unittests/ExecutionEngine/CMakeLists.txt b/llvm/unittests/ExecutionEngine/CMakeLists.txt
index ab6b9e7070f9..1bf210556b66 100644
--- a/llvm/unittests/ExecutionEngine/CMakeLists.txt
+++ b/llvm/unittests/ExecutionEngine/CMakeLists.txt
@@ -21,5 +21,3 @@ list(FIND LLVM_TARGETS_WITH_JIT "${LLVM_NATIVE_ARCH}" jit_idx)
if (NOT build_idx LESS 0 AND NOT jit_idx LESS 0)
add_subdirectory(MCJIT)
endif()
-
-set_property(TARGET ExecutionEngineTests PROPERTY FOLDER "Tests/UnitTests/ExecutionTests")
diff --git a/llvm/unittests/ExecutionEngine/JITLink/CMakeLists.txt b/llvm/unittests/ExecutionEngine/JITLink/CMakeLists.txt
index d4886754f0a9..82d277309307 100644
--- a/llvm/unittests/ExecutionEngine/JITLink/CMakeLists.txt
+++ b/llvm/unittests/ExecutionEngine/JITLink/CMakeLists.txt
@@ -18,5 +18,3 @@ add_llvm_unittest(JITLinkTests
)
target_link_libraries(JITLinkTests PRIVATE LLVMTestingSupport)
-
-set_property(TARGET JITLinkTests PROPERTY FOLDER "Tests/UnitTests/ExecutionTests")
diff --git a/llvm/unittests/ExecutionEngine/MCJIT/CMakeLists.txt b/llvm/unittests/ExecutionEngine/MCJIT/CMakeLists.txt
index 110f097e088a..3784ca290cb0 100644
--- a/llvm/unittests/ExecutionEngine/MCJIT/CMakeLists.txt
+++ b/llvm/unittests/ExecutionEngine/MCJIT/CMakeLists.txt
@@ -32,5 +32,3 @@ add_llvm_unittest(MCJITTests
if(MINGW OR CYGWIN)
set_property(TARGET MCJITTests PROPERTY LINK_FLAGS -Wl,--export-all-symbols)
endif()
-
-set_property(TARGET MCJITTests PROPERTY FOLDER "Tests/UnitTests/ExecutionTests")
diff --git a/llvm/unittests/ExecutionEngine/Orc/CMakeLists.txt b/llvm/unittests/ExecutionEngine/Orc/CMakeLists.txt
index 8a6a26bba63c..af431658c9b7 100644
--- a/llvm/unittests/ExecutionEngine/Orc/CMakeLists.txt
+++ b/llvm/unittests/ExecutionEngine/Orc/CMakeLists.txt
@@ -48,6 +48,4 @@ target_link_libraries(OrcJITTests PRIVATE
LLVMTestingSupport
${ORC_JIT_TEST_LIBS})
-set_property(TARGET OrcJITTests PROPERTY FOLDER "Tests/UnitTests/ExecutionTests")
-
export_executable_symbols(OrcJITTests)
diff --git a/llvm/unittests/IR/ConstantRangeTest.cpp b/llvm/unittests/IR/ConstantRangeTest.cpp
index 8ec120d70e99..ac2075cb4af4 100644
--- a/llvm/unittests/IR/ConstantRangeTest.cpp
+++ b/llvm/unittests/IR/ConstantRangeTest.cpp
@@ -209,6 +209,10 @@ static bool CheckAll(const ConstantRange &, const ConstantRange &) {
return true;
}
+static bool CheckCorrectnessOnly(const ConstantRange &, const ConstantRange &) {
+ return false;
+}
+
static bool CheckSingleElementsOnly(const ConstantRange &CR1,
const ConstantRange &CR2) {
return CR1.isSingleElement() && CR2.isSingleElement();
@@ -1019,18 +1023,102 @@ TEST_F(ConstantRangeTest, Multiply) {
});
}
+TEST_F(ConstantRangeTest, MultiplyWithNoWrap) {
+ using OBO = OverflowingBinaryOperator;
+
+ EXPECT_EQ(Empty.multiplyWithNoWrap(Some, OBO::NoUnsignedWrap), Empty);
+ EXPECT_EQ(Some.multiplyWithNoWrap(Empty, OBO::NoUnsignedWrap), Empty);
+ EXPECT_EQ(Full.multiplyWithNoWrap(Full, OBO::NoUnsignedWrap), Full);
+ EXPECT_EQ(Full.multiplyWithNoWrap(Some, OBO::NoUnsignedWrap), Full);
+ EXPECT_EQ(Some.multiplyWithNoWrap(Full, OBO::NoUnsignedWrap), Full);
+ EXPECT_EQ(ConstantRange(APInt(4, 0), APInt(4, 2))
+ .multiplyWithNoWrap(ConstantRange(APInt(4, 2), APInt(4, 0)),
+ OBO::NoUnsignedWrap),
+ ConstantRange::getFull(4));
+ EXPECT_EQ(ConstantRange(APInt(4, 1), APInt(4, 5))
+ .multiplyWithNoWrap(ConstantRange(APInt(4, 1), APInt(4, 5)),
+ OBO::NoUnsignedWrap),
+ ConstantRange(APInt(4, 1), APInt(4, 0)));
+ EXPECT_EQ(ConstantRange(APInt(8, 254), APInt(8, 0))
+ .multiplyWithNoWrap(ConstantRange(APInt(8, 252), APInt(8, 4)),
+ OBO::NoUnsignedWrap),
+ ConstantRange(APInt(8, 250), APInt(8, 9)));
+ EXPECT_EQ(ConstantRange(APInt(8, 254), APInt(8, 255))
+ .multiplyWithNoWrap(ConstantRange(APInt(8, 2), APInt(8, 4)),
+ OBO::NoUnsignedWrap),
+ ConstantRange::getEmpty(8));
+
+ EXPECT_EQ(Empty.multiplyWithNoWrap(Some, OBO::NoSignedWrap), Empty);
+ EXPECT_EQ(Some.multiplyWithNoWrap(Empty, OBO::NoSignedWrap), Empty);
+ EXPECT_EQ(Full.multiplyWithNoWrap(Full, OBO::NoSignedWrap), Full);
+ EXPECT_EQ(Full.multiplyWithNoWrap(Some, OBO::NoSignedWrap), Full);
+ EXPECT_EQ(Some.multiplyWithNoWrap(Full, OBO::NoSignedWrap), Full);
+ EXPECT_EQ(
+ ConstantRange(APInt(4, 0), APInt(4, 4))
+ .multiplyWithNoWrap(ConstantRange(APInt(4, -5, true), APInt(4, 4)),
+ OBO::NoSignedWrap),
+ ConstantRange::getFull(4));
+ EXPECT_EQ(ConstantRange(APInt(4, 0), APInt(4, 3))
+ .multiplyWithNoWrap(ConstantRange(APInt(4, 0), APInt(4, 5)),
+ OBO::NoSignedWrap),
+ ConstantRange(APInt(4, 0), APInt(4, -8, true)));
+ EXPECT_EQ(ConstantRange(APInt(8, 3), APInt(8, -11, true))
+ .multiplyWithNoWrap(ConstantRange(APInt(8, -1, true)),
+ OBO::NoSignedWrap),
+ ConstantRange(APInt(8, 12), APInt(8, -2, true)));
+ EXPECT_EQ(ConstantRange(APInt(8, 254), APInt(8, 255))
+ .multiplyWithNoWrap(ConstantRange(APInt(8, 100), APInt(8, 121)),
+ OBO::NoSignedWrap),
+ ConstantRange::getEmpty(8));
+
+ TestBinaryOpExhaustive(
+ [](const ConstantRange &CR1, const ConstantRange &CR2) {
+ return CR1.multiplyWithNoWrap(CR2, OBO::NoUnsignedWrap);
+ },
+ [](const APInt &N1, const APInt &N2) -> std::optional<APInt> {
+ bool IsOverflow;
+ APInt Res = N1.umul_ov(N2, IsOverflow);
+ if (IsOverflow)
+ return std::nullopt;
+ return Res;
+ },
+ PreferSmallest, CheckCorrectnessOnly);
+ TestBinaryOpExhaustive(
+ [](const ConstantRange &CR1, const ConstantRange &CR2) {
+ return CR1.multiplyWithNoWrap(CR2, OBO::NoSignedWrap);
+ },
+ [](const APInt &N1, const APInt &N2) -> std::optional<APInt> {
+ bool IsOverflow;
+ APInt Res = N1.smul_ov(N2, IsOverflow);
+ if (IsOverflow)
+ return std::nullopt;
+ return Res;
+ },
+ PreferSmallest, CheckCorrectnessOnly);
+ TestBinaryOpExhaustive(
+ [](const ConstantRange &CR1, const ConstantRange &CR2) {
+ return CR1.multiplyWithNoWrap(CR2,
+ OBO::NoUnsignedWrap | OBO::NoSignedWrap);
+ },
+ [](const APInt &N1, const APInt &N2) -> std::optional<APInt> {
+ bool IsOverflow1, IsOverflow2;
+ APInt Res1 = N1.umul_ov(N2, IsOverflow1);
+ APInt Res2 = N1.smul_ov(N2, IsOverflow2);
+ if (IsOverflow1 || IsOverflow2)
+ return std::nullopt;
+ assert(Res1 == Res2 && "Multiplication results differ?");
+ return Res1;
+ },
+ PreferSmallest, CheckCorrectnessOnly);
+}
+
TEST_F(ConstantRangeTest, smul_fast) {
TestBinaryOpExhaustive(
[](const ConstantRange &CR1, const ConstantRange &CR2) {
return CR1.smul_fast(CR2);
},
- [](const APInt &N1, const APInt &N2) {
- return N1 * N2;
- },
- PreferSmallest,
- [](const ConstantRange &, const ConstantRange &) {
- return false; // Check correctness only.
- });
+ [](const APInt &N1, const APInt &N2) { return N1 * N2; }, PreferSmallest,
+ CheckCorrectnessOnly);
}
TEST_F(ConstantRangeTest, UMax) {
diff --git a/llvm/unittests/IR/MDBuilderTest.cpp b/llvm/unittests/IR/MDBuilderTest.cpp
index 2b5ab81b6066..4656c70ce9ca 100644
--- a/llvm/unittests/IR/MDBuilderTest.cpp
+++ b/llvm/unittests/IR/MDBuilderTest.cpp
@@ -127,4 +127,43 @@ TEST_F(MDBuilderTest, createPCSections) {
EXPECT_EQ(mdconst::extract<ConstantInt>(Aux->getOperand(1))->getValue(),
C2->getValue());
}
+TEST_F(MDBuilderTest, createCallbackAndMerge) {
+ MDBuilder MDHelper(Context);
+ auto *CB1 = MDHelper.createCallbackEncoding(0, {1, -1}, false);
+ auto *CB2 = MDHelper.createCallbackEncoding(2, {-1}, false);
+ ASSERT_EQ(CB1->getNumOperands(), 4U);
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB1->getOperand(0)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB1->getOperand(1)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB1->getOperand(2)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB1->getOperand(3)));
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB1->getOperand(0))->getValue(), 0);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB1->getOperand(1))->getValue(), 1);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB1->getOperand(2))->getValue(), -1);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB1->getOperand(3))->getValue(),
+ false);
+ ASSERT_EQ(CB2->getNumOperands(), 3U);
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB2->getOperand(0)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB2->getOperand(1)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB2->getOperand(2)));
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB2->getOperand(0))->getValue(), 2);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB2->getOperand(1))->getValue(), -1);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB2->getOperand(2))->getValue(),
+ false);
+ auto *CBList = MDNode::get(Context, {CB1, CB2});
+ auto *CB3 = MDHelper.createCallbackEncoding(4, {5}, false);
+ auto *NewCBList = MDHelper.mergeCallbackEncodings(CBList, CB3);
+ ASSERT_EQ(NewCBList->getNumOperands(), 3U);
+ EXPECT_TRUE(NewCBList->getOperand(0) == CB1);
+ EXPECT_TRUE(NewCBList->getOperand(1) == CB2);
+ EXPECT_TRUE(NewCBList->getOperand(2) == CB3);
+
+ ASSERT_EQ(CB3->getNumOperands(), 3U);
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB3->getOperand(0)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB3->getOperand(1)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB3->getOperand(2)));
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB3->getOperand(0))->getValue(), 4);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB3->getOperand(1))->getValue(), 5);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB3->getOperand(2))->getValue(),
+ false);
+}
} // namespace
diff --git a/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp b/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp
index 6af6f1bcdc40..24586b5aa31a 100644
--- a/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp
+++ b/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp
@@ -8,7 +8,6 @@
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/BalancedPartitioning.h"
-#include "llvm/Testing/Support/SupportHelpers.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
@@ -31,22 +30,32 @@ TEST(BPFunctionNodeTest, Basic) {
UnorderedElementsAreArray(UNs)));
};
- auto Nodes = TemporalProfTraceTy::createBPFunctionNodes({
- TemporalProfTraceTy({0, 1, 2, 3}),
- });
+ std::vector<BPFunctionNode> Nodes;
+ TemporalProfTraceTy::createBPFunctionNodes(
+ {TemporalProfTraceTy({0, 1, 2, 3})}, Nodes, /*RemoveOutlierUNs=*/false);
+ // Utility nodes that are too infrequent or too prevalent are filtered out.
EXPECT_THAT(Nodes,
UnorderedElementsAre(NodeIs(0, {0, 1, 2}), NodeIs(1, {1, 2}),
- NodeIs(2, {1, 2}), NodeIs(3, {2})));
+ NodeIs(2, {2}), NodeIs(3, {2})));
- Nodes = TemporalProfTraceTy::createBPFunctionNodes({
- TemporalProfTraceTy({0, 1, 2, 3, 4}),
- TemporalProfTraceTy({4, 2}),
- });
+ Nodes.clear();
+ TemporalProfTraceTy::createBPFunctionNodes(
+ {TemporalProfTraceTy({0, 1, 2, 3, 4}), TemporalProfTraceTy({4, 2})},
+ Nodes, /*RemoveOutlierUNs=*/false);
EXPECT_THAT(Nodes,
- UnorderedElementsAre(NodeIs(0, {0, 1, 2}), NodeIs(1, {1, 2}),
- NodeIs(2, {1, 2, 4, 5}), NodeIs(3, {2}),
- NodeIs(4, {2, 3, 4, 5})));
+ UnorderedElementsAre(NodeIs(0, {0, 1, 2, 3}),
+ NodeIs(1, {1, 2, 3}), NodeIs(2, {2, 3, 5}),
+ NodeIs(3, {2, 3}), NodeIs(4, {3, 4, 5})));
+
+ Nodes.clear();
+ TemporalProfTraceTy::createBPFunctionNodes(
+ {TemporalProfTraceTy({0, 1, 2, 3, 4}), TemporalProfTraceTy({4, 2})},
+ Nodes, /*RemoveOutlierUNs=*/true);
+
+ EXPECT_THAT(Nodes, UnorderedElementsAre(NodeIs(0, {1}), NodeIs(1, {1}),
+ NodeIs(2, {5}), NodeIs(3, {}),
+ NodeIs(4, {5})));
}
} // end namespace llvm
diff --git a/llvm/unittests/Support/CommandLineInit/CMakeLists.txt b/llvm/unittests/Support/CommandLineInit/CMakeLists.txt
index 1889c8fba9a7..adcebb5cdbeb 100644
--- a/llvm/unittests/Support/CommandLineInit/CMakeLists.txt
+++ b/llvm/unittests/Support/CommandLineInit/CMakeLists.txt
@@ -33,7 +33,3 @@ add_dependencies(${test_suite} ${test_name})
set(outdir ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR})
set_output_directory(${test_name} BINARY_DIR ${outdir} LIBRARY_DIR ${outdir})
-get_target_property(test_suite_folder ${test_suite} FOLDER)
-if (test_suite_folder)
- set_property(TARGET ${test_name} PROPERTY FOLDER "${test_suite_folder}")
-endif ()
diff --git a/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt b/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt
index 9e5d3b3dfb15..4574acd62906 100644
--- a/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt
+++ b/llvm/unittests/Support/DynamicLibrary/CMakeLists.txt
@@ -6,7 +6,7 @@ set(LLVM_LINK_COMPONENTS Support)
add_library(DynamicLibraryLib STATIC
ExportedFuncs.cpp
)
-set_target_properties(DynamicLibraryLib PROPERTIES FOLDER "Tests")
+set_target_properties(DynamicLibraryLib PROPERTIES FOLDER "LLVM/Tests/Support")
# extract_symbols.py relies on all its library arguments being in the same
# directory, so we must set the output directory in the same way as if
@@ -25,7 +25,7 @@ function(dynlib_add_module NAME)
add_library(${NAME} MODULE
PipSqueak.cpp
)
- set_target_properties(${NAME} PROPERTIES FOLDER "Tests")
+ set_target_properties(${NAME} PROPERTIES FOLDER "LLVM/Tests/Support")
set_output_directory(${NAME}
BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}
diff --git a/llvm/unittests/Support/KnownBitsTest.cpp b/llvm/unittests/Support/KnownBitsTest.cpp
index d74070702716..824cf7501fd4 100644
--- a/llvm/unittests/Support/KnownBitsTest.cpp
+++ b/llvm/unittests/Support/KnownBitsTest.cpp
@@ -501,6 +501,18 @@ TEST(KnownBitsTest, BinaryExhaustive) {
"mulhu", KnownBits::mulhu,
[](const APInt &N1, const APInt &N2) { return APIntOps::mulhu(N1, N2); },
/*CheckOptimality=*/false);
+
+ testBinaryOpExhaustive("avgFloorS", KnownBits::avgFloorS, APIntOps::avgFloorS,
+ false);
+
+ testBinaryOpExhaustive("avgFloorU", KnownBits::avgFloorU, APIntOps::avgFloorU,
+ false);
+
+ testBinaryOpExhaustive("avgCeilU", KnownBits::avgCeilU, APIntOps::avgCeilU,
+ false);
+
+ testBinaryOpExhaustive("avgCeilS", KnownBits::avgCeilS, APIntOps::avgCeilS,
+ false);
}
TEST(KnownBitsTest, UnaryExhaustive) {
diff --git a/llvm/unittests/Support/LEB128Test.cpp b/llvm/unittests/Support/LEB128Test.cpp
index 60f5ddd568ca..5aa7139c45a7 100644
--- a/llvm/unittests/Support/LEB128Test.cpp
+++ b/llvm/unittests/Support/LEB128Test.cpp
@@ -147,7 +147,8 @@ TEST(LEB128Test, DecodeULEB128) {
TEST(LEB128Test, DecodeInvalidULEB128) {
#define EXPECT_INVALID_ULEB128(VALUE, ERROR_OFFSET) \
do { \
- const uint8_t *Value = reinterpret_cast<const uint8_t *>(VALUE); \
+ const char *DefaultValue = VALUE; \
+ const uint8_t *Value = reinterpret_cast<const uint8_t *>(DefaultValue); \
const char *Error = nullptr; \
unsigned ErrorOffset = 0; \
uint64_t Actual = \
@@ -155,12 +156,13 @@ TEST(LEB128Test, DecodeInvalidULEB128) {
EXPECT_NE(Error, nullptr); \
EXPECT_EQ(0ul, Actual); \
EXPECT_EQ(ERROR_OFFSET, ErrorOffset); \
- Value = reinterpret_cast<const uint8_t *>(VALUE); \
+ Value = reinterpret_cast<const uint8_t *>(DefaultValue); \
Error = nullptr; \
Actual = decodeULEB128AndInc(Value, Value + strlen(VALUE), &Error); \
EXPECT_NE(Error, nullptr); \
EXPECT_EQ(0ul, Actual); \
- EXPECT_EQ(ERROR_OFFSET, Value - reinterpret_cast<const uint8_t *>(VALUE)); \
+ EXPECT_EQ(ERROR_OFFSET, \
+ Value - reinterpret_cast<const uint8_t *>(DefaultValue)); \
} while (0)
// Buffer overflow.
@@ -222,7 +224,8 @@ TEST(LEB128Test, DecodeSLEB128) {
TEST(LEB128Test, DecodeInvalidSLEB128) {
#define EXPECT_INVALID_SLEB128(VALUE, ERROR_OFFSET) \
do { \
- const uint8_t *Value = reinterpret_cast<const uint8_t *>(VALUE); \
+ const char *DefaultValue = VALUE; \
+ const uint8_t *Value = reinterpret_cast<const uint8_t *>(DefaultValue); \
const char *Error = nullptr; \
unsigned ErrorOffset = 0; \
uint64_t Actual = \
@@ -230,12 +233,13 @@ TEST(LEB128Test, DecodeInvalidSLEB128) {
EXPECT_NE(Error, nullptr); \
EXPECT_EQ(0ul, Actual); \
EXPECT_EQ(ERROR_OFFSET, ErrorOffset); \
- Value = reinterpret_cast<const uint8_t *>(VALUE); \
+ Value = reinterpret_cast<const uint8_t *>(DefaultValue); \
Error = nullptr; \
Actual = decodeSLEB128AndInc(Value, Value + strlen(VALUE), &Error); \
EXPECT_NE(Error, nullptr); \
EXPECT_EQ(0ul, Actual); \
- EXPECT_EQ(ERROR_OFFSET, Value - reinterpret_cast<const uint8_t *>(VALUE)); \
+ EXPECT_EQ(ERROR_OFFSET, \
+ Value - reinterpret_cast<const uint8_t *>(DefaultValue)); \
} while (0)
// Buffer overflow.
@@ -257,7 +261,9 @@ TEST(LEB128Test, DecodeInvalidSLEB128) {
TEST(LEB128Test, DecodeAndInc) {
#define EXPECT_LEB128(FUN, VALUE, SIZE) \
do { \
- const uint8_t *V = reinterpret_cast<const uint8_t *>(VALUE), *P = V; \
+ const char *DefaultValue = VALUE; \
+ const uint8_t *V = reinterpret_cast<const uint8_t *>(DefaultValue), \
+ *P = V; \
auto Expected = FUN(P), Actual = FUN##AndInc(P, P + strlen(VALUE)); \
EXPECT_EQ(Actual, Expected); \
EXPECT_EQ(P - V, SIZE); \
diff --git a/llvm/unittests/Support/raw_socket_stream_test.cpp b/llvm/unittests/Support/raw_socket_stream_test.cpp
index a8536228666d..c4e8cfbbe7e6 100644
--- a/llvm/unittests/Support/raw_socket_stream_test.cpp
+++ b/llvm/unittests/Support/raw_socket_stream_test.cpp
@@ -7,7 +7,6 @@
#include "llvm/Testing/Support/Error.h"
#include "gtest/gtest.h"
#include <future>
-#include <iostream>
#include <stdlib.h>
#include <thread>
@@ -86,13 +85,8 @@ TEST(raw_socket_streamTest, TIMEOUT_PROVIDED) {
std::chrono::milliseconds Timeout = std::chrono::milliseconds(100);
Expected<std::unique_ptr<raw_socket_stream>> MaybeServer =
ServerListener.accept(Timeout);
-
- ASSERT_THAT_EXPECTED(MaybeServer, Failed());
- llvm::Error Err = MaybeServer.takeError();
- llvm::handleAllErrors(std::move(Err), [&](const llvm::StringError &SE) {
- std::error_code EC = SE.convertToErrorCode();
- ASSERT_EQ(EC, std::errc::timed_out);
- });
+ ASSERT_EQ(llvm::errorToErrorCode(MaybeServer.takeError()),
+ std::errc::timed_out);
}
TEST(raw_socket_streamTest, FILE_DESCRIPTOR_CLOSED) {
@@ -122,12 +116,7 @@ TEST(raw_socket_streamTest, FILE_DESCRIPTOR_CLOSED) {
// Wait for the CloseThread to finish
CloseThread.join();
-
- ASSERT_THAT_EXPECTED(MaybeServer, Failed());
- llvm::Error Err = MaybeServer.takeError();
- llvm::handleAllErrors(std::move(Err), [&](const llvm::StringError &SE) {
- std::error_code EC = SE.convertToErrorCode();
- ASSERT_EQ(EC, std::errc::operation_canceled);
- });
+ ASSERT_EQ(llvm::errorToErrorCode(MaybeServer.takeError()),
+ std::errc::operation_canceled);
}
} // namespace
diff --git a/llvm/unittests/Target/AArch64/CMakeLists.txt b/llvm/unittests/Target/AArch64/CMakeLists.txt
index 64ab991ac479..f53668373efe 100644
--- a/llvm/unittests/Target/AArch64/CMakeLists.txt
+++ b/llvm/unittests/Target/AArch64/CMakeLists.txt
@@ -31,5 +31,3 @@ add_llvm_target_unittest(AArch64Tests
AArch64SVESchedPseudoTest.cpp
Immediates.cpp
)
-
-set_property(TARGET AArch64Tests PROPERTY FOLDER "Tests/UnitTests/TargetTests")
diff --git a/llvm/unittests/Target/AMDGPU/CMakeLists.txt b/llvm/unittests/Target/AMDGPU/CMakeLists.txt
index 2d7a47943df6..ae5052925712 100644
--- a/llvm/unittests/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/unittests/Target/AMDGPU/CMakeLists.txt
@@ -21,5 +21,3 @@ add_llvm_target_unittest(AMDGPUTests
DwarfRegMappings.cpp
ExecMayBeModifiedBeforeAnyUse.cpp
)
-
-set_property(TARGET AMDGPUTests PROPERTY FOLDER "Tests/UnitTests/TargetTests")
diff --git a/llvm/unittests/Target/ARM/CMakeLists.txt b/llvm/unittests/Target/ARM/CMakeLists.txt
index 3da6cd95caed..5da249708abf 100644
--- a/llvm/unittests/Target/ARM/CMakeLists.txt
+++ b/llvm/unittests/Target/ARM/CMakeLists.txt
@@ -23,5 +23,3 @@ add_llvm_target_unittest(ARMTests
MachineInstrTest.cpp
InstSizes.cpp
)
-
-set_property(TARGET ARMTests PROPERTY FOLDER "Tests/UnitTests/TargetTests")
diff --git a/llvm/unittests/Target/CMakeLists.txt b/llvm/unittests/Target/CMakeLists.txt
index b10236a2948f..589314fad889 100644
--- a/llvm/unittests/Target/CMakeLists.txt
+++ b/llvm/unittests/Target/CMakeLists.txt
@@ -9,6 +9,3 @@ set(LLVM_LINK_COMPONENTS Core Target AllTargetsCodeGens AllTargetsDescs AllTarge
add_llvm_unittest(TargetMachineCTests
TargetMachineOptionsTest.cpp
)
-
-set_property(TARGET TargetMachineCTests
- PROPERTY FOLDER "Tests/UnitTests/TargetTests")
diff --git a/llvm/unittests/Target/LoongArch/CMakeLists.txt b/llvm/unittests/Target/LoongArch/CMakeLists.txt
index e6f8ec073721..6e7e49b4cb4e 100644
--- a/llvm/unittests/Target/LoongArch/CMakeLists.txt
+++ b/llvm/unittests/Target/LoongArch/CMakeLists.txt
@@ -22,5 +22,3 @@ add_llvm_target_unittest(LoongArchTests
InstSizes.cpp
MCInstrAnalysisTest.cpp
)
-
-set_property(TARGET LoongArchTests PROPERTY FOLDER "Tests/UnitTests/TargetTests")
diff --git a/llvm/unittests/Target/PowerPC/CMakeLists.txt b/llvm/unittests/Target/PowerPC/CMakeLists.txt
index 893e7587b372..a45052950704 100644
--- a/llvm/unittests/Target/PowerPC/CMakeLists.txt
+++ b/llvm/unittests/Target/PowerPC/CMakeLists.txt
@@ -16,5 +16,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_unittest(PowerPCTests
AIXRelocModelTest.cpp
)
-
-set_property(TARGET PowerPCTests PROPERTY FOLDER "Tests/UnitTests/TargetTests")
diff --git a/llvm/unittests/Target/RISCV/CMakeLists.txt b/llvm/unittests/Target/RISCV/CMakeLists.txt
index 0a64bf273e31..10d6412f9b35 100644
--- a/llvm/unittests/Target/RISCV/CMakeLists.txt
+++ b/llvm/unittests/Target/RISCV/CMakeLists.txt
@@ -19,5 +19,3 @@ add_llvm_target_unittest(RISCVTests
MCInstrAnalysisTest.cpp
RISCVInstrInfoTest.cpp
)
-
-set_property(TARGET RISCVTests PROPERTY FOLDER "Tests/UnitTests/TargetTests")
diff --git a/llvm/unittests/Target/WebAssembly/CMakeLists.txt b/llvm/unittests/Target/WebAssembly/CMakeLists.txt
index 6175e6bfe6e1..b1e01169e7a0 100644
--- a/llvm/unittests/Target/WebAssembly/CMakeLists.txt
+++ b/llvm/unittests/Target/WebAssembly/CMakeLists.txt
@@ -18,5 +18,3 @@ set(LLVM_LINK_COMPONENTS
add_llvm_target_unittest(WebAssemblyTests
WebAssemblyExceptionInfoTest.cpp
)
-
-set_property(TARGET WebAssemblyTests PROPERTY FOLDER "Tests/UnitTests/TargetTests")
diff --git a/llvm/unittests/Target/X86/CMakeLists.txt b/llvm/unittests/Target/X86/CMakeLists.txt
index 2b371966d1bc..b011681aa3b9 100644
--- a/llvm/unittests/Target/X86/CMakeLists.txt
+++ b/llvm/unittests/Target/X86/CMakeLists.txt
@@ -25,5 +25,3 @@ add_llvm_unittest(X86Tests
MachineSizeOptsTest.cpp
TernlogTest.cpp
)
-
-set_property(TARGET X86Tests PROPERTY FOLDER "Tests/UnitTests/TargetTests")
diff --git a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
index 22fe31809319..df4066980093 100644
--- a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
@@ -934,7 +934,9 @@ R"(All available -march extensions for RISC-V
zmmul 1.0
za128rs 1.0
za64rs 1.0
+ zaamo 1.0
zacas 1.0
+ zalrsc 1.0
zama16b 1.0
zawrs 1.0
zfa 1.0
@@ -1060,10 +1062,8 @@ R"(All available -march extensions for RISC-V
Experimental extensions
zicfilp 0.4 This is a long dummy description
zicfiss 0.4
- zaamo 0.2
zabha 1.0
zalasr 0.1
- zalrsc 0.2
zfbfmin 1.0
ztso 0.1
zvfbfmin 1.0
diff --git a/llvm/unittests/TargetParser/TargetParserTest.cpp b/llvm/unittests/TargetParser/TargetParserTest.cpp
index 0455e061f0bf..797d7dfbca20 100644
--- a/llvm/unittests/TargetParser/TargetParserTest.cpp
+++ b/llvm/unittests/TargetParser/TargetParserTest.cpp
@@ -1996,7 +1996,6 @@ TEST(TargetParserTest, AArch64ExtensionFeatures) {
AArch64::AEK_D128, AArch64::AEK_LSE128,
AArch64::AEK_SPECRES2, AArch64::AEK_RASV2,
AArch64::AEK_ITE, AArch64::AEK_GCS,
- AArch64::AEK_FPMR, AArch64::AEK_FP8,
AArch64::AEK_FAMINMAX, AArch64::AEK_FP8FMA,
AArch64::AEK_SSVE_FP8FMA, AArch64::AEK_FP8DOT2,
AArch64::AEK_SSVE_FP8DOT2, AArch64::AEK_FP8DOT4,
@@ -2005,7 +2004,8 @@ TEST(TargetParserTest, AArch64ExtensionFeatures) {
AArch64::AEK_SMEF8F32, AArch64::AEK_SMEFA64,
AArch64::AEK_CPA, AArch64::AEK_PAUTHLR,
AArch64::AEK_TLBIW, AArch64::AEK_JSCVT,
- AArch64::AEK_FCMA,
+ AArch64::AEK_FCMA, AArch64::AEK_FP8,
+
};
std::vector<StringRef> Features;
@@ -2078,7 +2078,6 @@ TEST(TargetParserTest, AArch64ExtensionFeatures) {
EXPECT_TRUE(llvm::is_contained(Features, "+specres2"));
EXPECT_TRUE(llvm::is_contained(Features, "+ite"));
EXPECT_TRUE(llvm::is_contained(Features, "+gcs"));
- EXPECT_TRUE(llvm::is_contained(Features, "+fpmr"));
EXPECT_TRUE(llvm::is_contained(Features, "+fp8"));
EXPECT_TRUE(llvm::is_contained(Features, "+faminmax"));
EXPECT_TRUE(llvm::is_contained(Features, "+fp8fma"));
@@ -2224,7 +2223,6 @@ TEST(TargetParserTest, AArch64ArchExtFeature) {
{"predres2", "nopredres2", "+specres2", "-specres2"},
{"rasv2", "norasv2", "+rasv2", "-rasv2"},
{"gcs", "nogcs", "+gcs", "-gcs"},
- {"fpmr", "nofpmr", "+fpmr", "-fpmr"},
{"fp8", "nofp8", "+fp8", "-fp8"},
{"faminmax", "nofaminmax", "+faminmax", "-faminmax"},
{"fp8fma", "nofp8fma", "+fp8fma", "-fp8fma"},
diff --git a/llvm/unittests/Transforms/Coroutines/CMakeLists.txt b/llvm/unittests/Transforms/Coroutines/CMakeLists.txt
index 0913e82a3443..8a457753ce3e 100644
--- a/llvm/unittests/Transforms/Coroutines/CMakeLists.txt
+++ b/llvm/unittests/Transforms/Coroutines/CMakeLists.txt
@@ -14,5 +14,3 @@ add_llvm_unittest(CoroTests
)
target_link_libraries(CoroTests PRIVATE LLVMTestingSupport)
-
-set_property(TARGET CoroTests PROPERTY FOLDER "Tests/UnitTests/TransformTests")
diff --git a/llvm/unittests/Transforms/IPO/CMakeLists.txt b/llvm/unittests/Transforms/IPO/CMakeLists.txt
index 4e4372179b46..ac632450d573 100644
--- a/llvm/unittests/Transforms/IPO/CMakeLists.txt
+++ b/llvm/unittests/Transforms/IPO/CMakeLists.txt
@@ -14,5 +14,3 @@ add_llvm_unittest(IPOTests
AttributorTest.cpp
FunctionSpecializationTest.cpp
)
-
-set_property(TARGET IPOTests PROPERTY FOLDER "Tests/UnitTests/TransformsTests")
diff --git a/llvm/unittests/Transforms/Scalar/CMakeLists.txt b/llvm/unittests/Transforms/Scalar/CMakeLists.txt
index 5e37ecf4fe4a..eaa2c4eb27a1 100644
--- a/llvm/unittests/Transforms/Scalar/CMakeLists.txt
+++ b/llvm/unittests/Transforms/Scalar/CMakeLists.txt
@@ -19,5 +19,3 @@ target_link_libraries(ScalarTests PRIVATE LLVMTestingSupport)
if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 6.0 AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0)
set_source_files_properties(LoopPassManagerTest.cpp PROPERTIES COMPILE_FLAGS -Wno-unused-function)
endif()
-
-set_property(TARGET ScalarTests PROPERTY FOLDER "Tests/UnitTests/TransformsTests")
diff --git a/llvm/unittests/Transforms/Utils/CMakeLists.txt b/llvm/unittests/Transforms/Utils/CMakeLists.txt
index 2974811d6c41..35055baa05ee 100644
--- a/llvm/unittests/Transforms/Utils/CMakeLists.txt
+++ b/llvm/unittests/Transforms/Utils/CMakeLists.txt
@@ -32,5 +32,3 @@ add_llvm_unittest(UtilsTests
ValueMapperTest.cpp
ProfDataUtilTest.cpp
)
-
-set_property(TARGET UtilsTests PROPERTY FOLDER "Tests/UnitTests/TransformsTests")
diff --git a/llvm/unittests/Transforms/Utils/CallPromotionUtilsTest.cpp b/llvm/unittests/Transforms/Utils/CallPromotionUtilsTest.cpp
index 0e9641c5846f..2d457eb3b678 100644
--- a/llvm/unittests/Transforms/Utils/CallPromotionUtilsTest.cpp
+++ b/llvm/unittests/Transforms/Utils/CallPromotionUtilsTest.cpp
@@ -8,9 +8,12 @@
#include "llvm/Transforms/Utils/CallPromotionUtils.h"
#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/NoFolder.h"
#include "llvm/Support/SourceMgr.h"
#include "gtest/gtest.h"
@@ -24,6 +27,21 @@ static std::unique_ptr<Module> parseIR(LLVMContext &C, const char *IR) {
return Mod;
}
+// Returns a constant representing the vtable's address point specified by the
+// offset.
+static Constant *getVTableAddressPointOffset(GlobalVariable *VTable,
+ uint32_t AddressPointOffset) {
+ Module &M = *VTable->getParent();
+ LLVMContext &Context = M.getContext();
+ assert(AddressPointOffset <
+ M.getDataLayout().getTypeAllocSize(VTable->getValueType()) &&
+ "Out-of-bound access");
+
+ return ConstantExpr::getInBoundsGetElementPtr(
+ Type::getInt8Ty(Context), VTable,
+ llvm::ConstantInt::get(Type::getInt32Ty(Context), AddressPointOffset));
+}
+
TEST(CallPromotionUtilsTest, TryPromoteCall) {
LLVMContext C;
std::unique_ptr<Module> M = parseIR(C,
@@ -368,3 +386,73 @@ declare %struct2 @_ZN4Impl3RunEv(%class.Impl* %this)
bool IsPromoted = tryPromoteCall(*CI);
EXPECT_FALSE(IsPromoted);
}
+
+TEST(CallPromotionUtilsTest, promoteCallWithVTableCmp) {
+ LLVMContext C;
+ std::unique_ptr<Module> M = parseIR(C,
+ R"IR(
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@_ZTV5Base1 = constant { [4 x ptr] } { [4 x ptr] [ptr null, ptr null, ptr @_ZN5Base15func0Ev, ptr @_ZN5Base15func1Ev] }, !type !0
+@_ZTV8Derived1 = constant { [4 x ptr], [3 x ptr] } { [4 x ptr] [ptr inttoptr (i64 -8 to ptr), ptr null, ptr @_ZN5Base15func0Ev, ptr @_ZN5Base15func1Ev], [3 x ptr] [ptr null, ptr null, ptr @_ZN5Base25func2Ev] }, !type !0, !type !1, !type !2
+@_ZTV8Derived2 = constant { [3 x ptr], [3 x ptr], [4 x ptr] } { [3 x ptr] [ptr null, ptr null, ptr @_ZN5Base35func3Ev], [3 x ptr] [ptr inttoptr (i64 -8 to ptr), ptr null, ptr @_ZN5Base25func2Ev], [4 x ptr] [ptr inttoptr (i64 -16 to ptr), ptr null, ptr @_ZN5Base15func0Ev, ptr @_ZN5Base15func1Ev] }, !type !3, !type !4, !type !5, !type !6
+
+define i32 @testfunc(ptr %d) {
+entry:
+ %vtable = load ptr, ptr %d, !prof !7
+ %vfn = getelementptr inbounds ptr, ptr %vtable, i64 1
+ %0 = load ptr, ptr %vfn
+ %call = tail call i32 %0(ptr %d), !prof !8
+ ret i32 %call
+}
+
+define i32 @_ZN5Base15func1Ev(ptr %this) {
+entry:
+ ret i32 2
+}
+
+declare i32 @_ZN5Base25func2Ev(ptr)
+declare i32 @_ZN5Base15func0Ev(ptr)
+declare void @_ZN5Base35func3Ev(ptr)
+
+!0 = !{i64 16, !"_ZTS5Base1"}
+!1 = !{i64 48, !"_ZTS5Base2"}
+!2 = !{i64 16, !"_ZTS8Derived1"}
+!3 = !{i64 64, !"_ZTS5Base1"}
+!4 = !{i64 40, !"_ZTS5Base2"}
+!5 = !{i64 16, !"_ZTS5Base3"}
+!6 = !{i64 16, !"_ZTS8Derived2"}
+!7 = !{!"VP", i32 2, i64 1600, i64 -9064381665493407289, i64 800, i64 5035968517245772950, i64 500, i64 3215870116411581797, i64 300}
+!8 = !{!"VP", i32 0, i64 1600, i64 6804820478065511155, i64 1600})IR");
+
+ Function *F = M->getFunction("testfunc");
+ CallInst *CI = dyn_cast<CallInst>(&*std::next(F->front().rbegin()));
+ ASSERT_TRUE(CI && CI->isIndirectCall());
+
+ // Create the constant and the branch weights
+ SmallVector<Constant *, 3> VTableAddressPoints;
+
+ for (auto &[VTableName, AddressPointOffset] : {std::pair{"_ZTV5Base1", 16},
+ {"_ZTV8Derived1", 16},
+ {"_ZTV8Derived2", 64}})
+ VTableAddressPoints.push_back(getVTableAddressPointOffset(
+ M->getGlobalVariable(VTableName), AddressPointOffset));
+
+ MDBuilder MDB(C);
+ MDNode *BranchWeights = MDB.createBranchWeights(1600, 0);
+
+ size_t OrigEntryBBSize = F->front().size();
+
+ LoadInst *VPtr = dyn_cast<LoadInst>(&*F->front().begin());
+
+ Function *Callee = M->getFunction("_ZN5Base15func1Ev");
+ // Tests that promoted direct call is returned.
+ CallBase &DirectCB = promoteCallWithVTableCmp(
+ *CI, VPtr, Callee, VTableAddressPoints, BranchWeights);
+ EXPECT_EQ(DirectCB.getCalledOperand(), Callee);
+
+ // Promotion inserts 3 icmp instructions and 2 or instructions, and removes
+ // 1 call instruction from the entry block.
+ EXPECT_EQ(F->front().size(), OrigEntryBBSize + 4);
+}
diff --git a/llvm/unittests/Transforms/Vectorize/CMakeLists.txt b/llvm/unittests/Transforms/Vectorize/CMakeLists.txt
index 4f8dd577d0aa..1354558a94f0 100644
--- a/llvm/unittests/Transforms/Vectorize/CMakeLists.txt
+++ b/llvm/unittests/Transforms/Vectorize/CMakeLists.txt
@@ -13,5 +13,3 @@ add_llvm_unittest(VectorizeTests
VPlanSlpTest.cpp
VPlanVerifierTest.cpp
)
-
-set_property(TARGET VectorizeTests PROPERTY FOLDER "Tests/UnitTests/TransformsTests")
diff --git a/llvm/unittests/tools/llvm-cfi-verify/CMakeLists.txt b/llvm/unittests/tools/llvm-cfi-verify/CMakeLists.txt
index 9a4e8663c806..7fc5dac85bb7 100644
--- a/llvm/unittests/tools/llvm-cfi-verify/CMakeLists.txt
+++ b/llvm/unittests/tools/llvm-cfi-verify/CMakeLists.txt
@@ -16,5 +16,3 @@ add_llvm_unittest(CFIVerifyTests
GraphBuilder.cpp
)
target_link_libraries(CFIVerifyTests PRIVATE LLVMCFIVerify)
-
-set_property(TARGET CFIVerifyTests PROPERTY FOLDER "Tests/UnitTests/ToolTests")
diff --git a/llvm/unittests/tools/llvm-exegesis/CMakeLists.txt b/llvm/unittests/tools/llvm-exegesis/CMakeLists.txt
index 8b2f6334dda8..3ee3a0dc6b5d 100644
--- a/llvm/unittests/tools/llvm-exegesis/CMakeLists.txt
+++ b/llvm/unittests/tools/llvm-exegesis/CMakeLists.txt
@@ -62,5 +62,3 @@ add_llvm_target_unittest(LLVMExegesisTests
${exegesis_sources}
)
target_link_libraries(LLVMExegesisTests PRIVATE ${exegesis_link_libraries})
-
-set_property(TARGET LLVMExegesisTests PROPERTY FOLDER "Tests/UnitTests/ToolTests")
diff --git a/llvm/unittests/tools/llvm-mca/CMakeLists.txt b/llvm/unittests/tools/llvm-mca/CMakeLists.txt
index b209d7931190..9c69509b4b54 100644
--- a/llvm/unittests/tools/llvm-mca/CMakeLists.txt
+++ b/llvm/unittests/tools/llvm-mca/CMakeLists.txt
@@ -48,5 +48,3 @@ include_directories(${mca_includes})
add_llvm_target_unittest(LLVMMCATests
${mca_sources}
)
-
-set_property(TARGET LLVMMCATests PROPERTY FOLDER "Tests/UnitTests/ToolTests")
diff --git a/llvm/unittests/tools/llvm-mca/MCATestBase.cpp b/llvm/unittests/tools/llvm-mca/MCATestBase.cpp
index 4f444fae3d4c..4a39f5e663f2 100644
--- a/llvm/unittests/tools/llvm-mca/MCATestBase.cpp
+++ b/llvm/unittests/tools/llvm-mca/MCATestBase.cpp
@@ -66,7 +66,7 @@ Error MCATestBase::runBaselineMCA(json::Object &Result, ArrayRef<MCInst> Insts,
// Default InstrumentManager
auto IM = std::make_unique<mca::InstrumentManager>(*STI, *MCII);
- mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM);
+ mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM, /*CallLatency=*/100);
const SmallVector<mca::Instrument *> Instruments;
SmallVector<std::unique_ptr<mca::Instruction>> LoweredInsts;
diff --git a/llvm/unittests/tools/llvm-mca/X86/TestIncrementalMCA.cpp b/llvm/unittests/tools/llvm-mca/X86/TestIncrementalMCA.cpp
index 00a44dc1bab1..ac35dce522ae 100644
--- a/llvm/unittests/tools/llvm-mca/X86/TestIncrementalMCA.cpp
+++ b/llvm/unittests/tools/llvm-mca/X86/TestIncrementalMCA.cpp
@@ -33,7 +33,7 @@ TEST_F(X86TestBase, TestResumablePipeline) {
P->addEventListener(SV.get());
auto IM = std::make_unique<mca::InstrumentManager>(*STI, *MCII);
- mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM);
+ mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM, /*CallLatency=*/100);
const SmallVector<mca::Instrument *> Instruments;
// Tile size = 7
@@ -124,7 +124,7 @@ TEST_F(X86TestBase, TestInstructionRecycling) {
// Default InstrumentManager
auto IM = std::make_unique<mca::InstrumentManager>(*STI, *MCII);
- mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM);
+ mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM, /*CallLatency=*/100);
IB.setInstRecycleCallback(GetRecycledInst);
const SmallVector<mca::Instrument *> Instruments;
diff --git a/llvm/unittests/tools/llvm-profdata/CMakeLists.txt b/llvm/unittests/tools/llvm-profdata/CMakeLists.txt
index ad91ce36bcb5..71de2326ee82 100644
--- a/llvm/unittests/tools/llvm-profdata/CMakeLists.txt
+++ b/llvm/unittests/tools/llvm-profdata/CMakeLists.txt
@@ -9,5 +9,3 @@ add_llvm_unittest(LLVMProfdataTests
)
target_link_libraries(LLVMProfdataTests PRIVATE LLVMTestingSupport)
-
-set_property(TARGET LLVMProfdataTests PROPERTY FOLDER "Tests/UnitTests/ToolTests")
diff --git a/llvm/unittests/tools/llvm-profgen/CMakeLists.txt b/llvm/unittests/tools/llvm-profgen/CMakeLists.txt
index a733843ef519..5a658cf70846 100644
--- a/llvm/unittests/tools/llvm-profgen/CMakeLists.txt
+++ b/llvm/unittests/tools/llvm-profgen/CMakeLists.txt
@@ -9,5 +9,3 @@ add_llvm_unittest(LLVMProfgenTests
target_link_libraries(LLVMProfgenTests PRIVATE LLVMTestingSupport)
add_dependencies(LLVMProfgenTests intrinsics_gen)
-
-set_property(TARGET LLVMProfgenTests PROPERTY FOLDER "Tests/UnitTests/ToolTests")
diff --git a/llvm/utils/LLVMVisualizers/CMakeLists.txt b/llvm/utils/LLVMVisualizers/CMakeLists.txt
index 9fdc8906e55a..d2c7ac4ffe72 100644
--- a/llvm/utils/LLVMVisualizers/CMakeLists.txt
+++ b/llvm/utils/LLVMVisualizers/CMakeLists.txt
@@ -3,5 +3,5 @@
if (LLVM_ADD_NATIVE_VISUALIZERS_TO_SOLUTION)
set(LLVM_VISUALIZERS llvm.natvis)
add_custom_target(LLVMVisualizers SOURCES ${LLVM_VISUALIZERS})
- set_target_properties(LLVMVisualizers PROPERTIES FOLDER "Utils")
+ set_target_properties(LLVMVisualizers PROPERTIES FOLDER "LLVM/Misc")
endif()
diff --git a/llvm/utils/TableGen/ARMTargetDefEmitter.cpp b/llvm/utils/TableGen/ARMTargetDefEmitter.cpp
index 491011643bbf..b79458529623 100644
--- a/llvm/utils/TableGen/ARMTargetDefEmitter.cpp
+++ b/llvm/utils/TableGen/ARMTargetDefEmitter.cpp
@@ -170,7 +170,7 @@ static void EmitARMTargetDef(RecordKeeper &RK, raw_ostream &OS) {
<< "/// The set of all architectures\n"
<< "static constexpr std::array<const ArchInfo *, " << CppSpellings.size()
<< "> ArchInfos = {\n";
- for (auto CppSpelling : CppSpellings)
+ for (StringRef CppSpelling : CppSpellings)
OS << " &" << CppSpelling << ",\n";
OS << "};\n";
diff --git a/llvm/utils/TableGen/Basic/CMakeLists.txt b/llvm/utils/TableGen/Basic/CMakeLists.txt
index 5a899e3b7c80..09d79a01cae0 100644
--- a/llvm/utils/TableGen/Basic/CMakeLists.txt
+++ b/llvm/utils/TableGen/Basic/CMakeLists.txt
@@ -12,7 +12,6 @@ add_llvm_library(LLVMTableGenBasic OBJECT EXCLUDE_FROM_ALL
CodeGenIntrinsics.cpp
SDNodeProperties.cpp
)
-set_target_properties(LLVMTableGenBasic PROPERTIES FOLDER "Tablegenning")
# Users may include its headers as "Basic/*.h"
target_include_directories(LLVMTableGenBasic
diff --git a/llvm/utils/TableGen/CMakeLists.txt b/llvm/utils/TableGen/CMakeLists.txt
index 202f33fdf8b4..6a0124dce429 100644
--- a/llvm/utils/TableGen/CMakeLists.txt
+++ b/llvm/utils/TableGen/CMakeLists.txt
@@ -23,7 +23,6 @@ add_tablegen(llvm-min-tblgen LLVM_HEADERS
PARTIAL_SOURCES_INTENDED
)
-set_target_properties(llvm-min-tblgen PROPERTIES FOLDER "Tablegenning")
set(LLVM_LINK_COMPONENTS
CodeGenTypes
@@ -83,4 +82,3 @@ add_tablegen(llvm-tblgen LLVM
DEPENDS
intrinsics_gen # via llvm-min-tablegen
)
-set_target_properties(llvm-tblgen PROPERTIES FOLDER "Tablegenning")
diff --git a/llvm/utils/TableGen/Common/CMakeLists.txt b/llvm/utils/TableGen/Common/CMakeLists.txt
index 30f188ae48a2..13883aa8fa39 100644
--- a/llvm/utils/TableGen/Common/CMakeLists.txt
+++ b/llvm/utils/TableGen/Common/CMakeLists.txt
@@ -40,8 +40,8 @@ add_llvm_library(LLVMTableGenCommon STATIC OBJECT EXCLUDE_FROM_ALL
DEPENDS
vt_gen
+ intrinsics_gen
)
-set_target_properties(LLVMTableGenCommon PROPERTIES FOLDER "Tablegenning")
# Users may include its headers as "Common/*.h"
target_include_directories(LLVMTableGenCommon
diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
index 88d353e89a46..709aa00ae8b3 100644
--- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
@@ -903,7 +903,7 @@ TreePredicateFn::TreePredicateFn(TreePattern *N) : PatFragRec(N) {
}
bool TreePredicateFn::hasPredCode() const {
- return isLoad() || isStore() || isAtomic() || hasNoUse() ||
+ return isLoad() || isStore() || isAtomic() || hasNoUse() || hasOneUse() ||
!PatFragRec->getRecord()->getValueAsString("PredicateCode").empty();
}
@@ -1140,6 +1140,8 @@ std::string TreePredicateFn::getPredCode() const {
if (hasNoUse())
Code += "if (!SDValue(N, 0).use_empty()) return false;\n";
+ if (hasOneUse())
+ Code += "if (!SDValue(N, 0).hasOneUse()) return false;\n";
std::string PredicateCode =
std::string(PatFragRec->getRecord()->getValueAsString("PredicateCode"));
@@ -1187,6 +1189,9 @@ bool TreePredicateFn::usesOperands() const {
bool TreePredicateFn::hasNoUse() const {
return isPredefinedPredicateEqualTo("HasNoUse", true);
}
+bool TreePredicateFn::hasOneUse() const {
+ return isPredefinedPredicateEqualTo("HasOneUse", true);
+}
bool TreePredicateFn::isLoad() const {
return isPredefinedPredicateEqualTo("IsLoad", true);
}
diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
index 7f94db0b7d5d..1f4d45d81fd3 100644
--- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
@@ -533,6 +533,8 @@ public:
// Check if the HasNoUse predicate is set.
bool hasNoUse() const;
+ // Check if the HasOneUse predicate is set.
+ bool hasOneUse() const;
// Is the desired predefined predicate for a load?
bool isLoad() const;
diff --git a/llvm/utils/TableGen/Common/CodeGenTarget.cpp b/llvm/utils/TableGen/Common/CodeGenTarget.cpp
index e1cf33e7f62f..bc3ccd888cb4 100644
--- a/llvm/utils/TableGen/Common/CodeGenTarget.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenTarget.cpp
@@ -63,212 +63,9 @@ StringRef llvm::getName(MVT::SimpleValueType T) {
StringRef llvm::getEnumName(MVT::SimpleValueType T) {
// clang-format off
switch (T) {
- case MVT::Other: return "MVT::Other";
- case MVT::i1: return "MVT::i1";
- case MVT::i2: return "MVT::i2";
- case MVT::i4: return "MVT::i4";
- case MVT::i8: return "MVT::i8";
- case MVT::i16: return "MVT::i16";
- case MVT::i32: return "MVT::i32";
- case MVT::i64: return "MVT::i64";
- case MVT::i128: return "MVT::i128";
- case MVT::Any: return "MVT::Any";
- case MVT::iAny: return "MVT::iAny";
- case MVT::fAny: return "MVT::fAny";
- case MVT::vAny: return "MVT::vAny";
- case MVT::f16: return "MVT::f16";
- case MVT::bf16: return "MVT::bf16";
- case MVT::f32: return "MVT::f32";
- case MVT::f64: return "MVT::f64";
- case MVT::f80: return "MVT::f80";
- case MVT::f128: return "MVT::f128";
- case MVT::ppcf128: return "MVT::ppcf128";
- case MVT::x86mmx: return "MVT::x86mmx";
- case MVT::x86amx: return "MVT::x86amx";
- case MVT::aarch64svcount: return "MVT::aarch64svcount";
- case MVT::i64x8: return "MVT::i64x8";
- case MVT::Glue: return "MVT::Glue";
- case MVT::isVoid: return "MVT::isVoid";
- case MVT::v1i1: return "MVT::v1i1";
- case MVT::v2i1: return "MVT::v2i1";
- case MVT::v3i1: return "MVT::v3i1";
- case MVT::v4i1: return "MVT::v4i1";
- case MVT::v8i1: return "MVT::v8i1";
- case MVT::v16i1: return "MVT::v16i1";
- case MVT::v32i1: return "MVT::v32i1";
- case MVT::v64i1: return "MVT::v64i1";
- case MVT::v128i1: return "MVT::v128i1";
- case MVT::v256i1: return "MVT::v256i1";
- case MVT::v512i1: return "MVT::v512i1";
- case MVT::v1024i1: return "MVT::v1024i1";
- case MVT::v2048i1: return "MVT::v2048i1";
- case MVT::v128i2: return "MVT::v128i2";
- case MVT::v256i2: return "MVT::v256i2";
- case MVT::v64i4: return "MVT::v64i4";
- case MVT::v128i4: return "MVT::v128i4";
- case MVT::v1i8: return "MVT::v1i8";
- case MVT::v2i8: return "MVT::v2i8";
- case MVT::v3i8: return "MVT::v3i8";
- case MVT::v4i8: return "MVT::v4i8";
- case MVT::v8i8: return "MVT::v8i8";
- case MVT::v16i8: return "MVT::v16i8";
- case MVT::v32i8: return "MVT::v32i8";
- case MVT::v64i8: return "MVT::v64i8";
- case MVT::v128i8: return "MVT::v128i8";
- case MVT::v256i8: return "MVT::v256i8";
- case MVT::v512i8: return "MVT::v512i8";
- case MVT::v1024i8: return "MVT::v1024i8";
- case MVT::v1i16: return "MVT::v1i16";
- case MVT::v2i16: return "MVT::v2i16";
- case MVT::v3i16: return "MVT::v3i16";
- case MVT::v4i16: return "MVT::v4i16";
- case MVT::v8i16: return "MVT::v8i16";
- case MVT::v16i16: return "MVT::v16i16";
- case MVT::v32i16: return "MVT::v32i16";
- case MVT::v64i16: return "MVT::v64i16";
- case MVT::v128i16: return "MVT::v128i16";
- case MVT::v256i16: return "MVT::v256i16";
- case MVT::v512i16: return "MVT::v512i16";
- case MVT::v1i32: return "MVT::v1i32";
- case MVT::v2i32: return "MVT::v2i32";
- case MVT::v3i32: return "MVT::v3i32";
- case MVT::v4i32: return "MVT::v4i32";
- case MVT::v5i32: return "MVT::v5i32";
- case MVT::v6i32: return "MVT::v6i32";
- case MVT::v7i32: return "MVT::v7i32";
- case MVT::v8i32: return "MVT::v8i32";
- case MVT::v9i32: return "MVT::v9i32";
- case MVT::v10i32: return "MVT::v10i32";
- case MVT::v11i32: return "MVT::v11i32";
- case MVT::v12i32: return "MVT::v12i32";
- case MVT::v16i32: return "MVT::v16i32";
- case MVT::v32i32: return "MVT::v32i32";
- case MVT::v64i32: return "MVT::v64i32";
- case MVT::v128i32: return "MVT::v128i32";
- case MVT::v256i32: return "MVT::v256i32";
- case MVT::v512i32: return "MVT::v512i32";
- case MVT::v1024i32: return "MVT::v1024i32";
- case MVT::v2048i32: return "MVT::v2048i32";
- case MVT::v1i64: return "MVT::v1i64";
- case MVT::v2i64: return "MVT::v2i64";
- case MVT::v3i64: return "MVT::v3i64";
- case MVT::v4i64: return "MVT::v4i64";
- case MVT::v8i64: return "MVT::v8i64";
- case MVT::v16i64: return "MVT::v16i64";
- case MVT::v32i64: return "MVT::v32i64";
- case MVT::v64i64: return "MVT::v64i64";
- case MVT::v128i64: return "MVT::v128i64";
- case MVT::v256i64: return "MVT::v256i64";
- case MVT::v1i128: return "MVT::v1i128";
- case MVT::v1f16: return "MVT::v1f16";
- case MVT::v2f16: return "MVT::v2f16";
- case MVT::v3f16: return "MVT::v3f16";
- case MVT::v4f16: return "MVT::v4f16";
- case MVT::v8f16: return "MVT::v8f16";
- case MVT::v16f16: return "MVT::v16f16";
- case MVT::v32f16: return "MVT::v32f16";
- case MVT::v64f16: return "MVT::v64f16";
- case MVT::v128f16: return "MVT::v128f16";
- case MVT::v256f16: return "MVT::v256f16";
- case MVT::v512f16: return "MVT::v512f16";
- case MVT::v2bf16: return "MVT::v2bf16";
- case MVT::v3bf16: return "MVT::v3bf16";
- case MVT::v4bf16: return "MVT::v4bf16";
- case MVT::v8bf16: return "MVT::v8bf16";
- case MVT::v16bf16: return "MVT::v16bf16";
- case MVT::v32bf16: return "MVT::v32bf16";
- case MVT::v64bf16: return "MVT::v64bf16";
- case MVT::v128bf16: return "MVT::v128bf16";
- case MVT::v1f32: return "MVT::v1f32";
- case MVT::v2f32: return "MVT::v2f32";
- case MVT::v3f32: return "MVT::v3f32";
- case MVT::v4f32: return "MVT::v4f32";
- case MVT::v5f32: return "MVT::v5f32";
- case MVT::v6f32: return "MVT::v6f32";
- case MVT::v7f32: return "MVT::v7f32";
- case MVT::v8f32: return "MVT::v8f32";
- case MVT::v9f32: return "MVT::v9f32";
- case MVT::v10f32: return "MVT::v10f32";
- case MVT::v11f32: return "MVT::v11f32";
- case MVT::v12f32: return "MVT::v12f32";
- case MVT::v16f32: return "MVT::v16f32";
- case MVT::v32f32: return "MVT::v32f32";
- case MVT::v64f32: return "MVT::v64f32";
- case MVT::v128f32: return "MVT::v128f32";
- case MVT::v256f32: return "MVT::v256f32";
- case MVT::v512f32: return "MVT::v512f32";
- case MVT::v1024f32: return "MVT::v1024f32";
- case MVT::v2048f32: return "MVT::v2048f32";
- case MVT::v1f64: return "MVT::v1f64";
- case MVT::v2f64: return "MVT::v2f64";
- case MVT::v3f64: return "MVT::v3f64";
- case MVT::v4f64: return "MVT::v4f64";
- case MVT::v8f64: return "MVT::v8f64";
- case MVT::v16f64: return "MVT::v16f64";
- case MVT::v32f64: return "MVT::v32f64";
- case MVT::v64f64: return "MVT::v64f64";
- case MVT::v128f64: return "MVT::v128f64";
- case MVT::v256f64: return "MVT::v256f64";
- case MVT::nxv1i1: return "MVT::nxv1i1";
- case MVT::nxv2i1: return "MVT::nxv2i1";
- case MVT::nxv4i1: return "MVT::nxv4i1";
- case MVT::nxv8i1: return "MVT::nxv8i1";
- case MVT::nxv16i1: return "MVT::nxv16i1";
- case MVT::nxv32i1: return "MVT::nxv32i1";
- case MVT::nxv64i1: return "MVT::nxv64i1";
- case MVT::nxv1i8: return "MVT::nxv1i8";
- case MVT::nxv2i8: return "MVT::nxv2i8";
- case MVT::nxv4i8: return "MVT::nxv4i8";
- case MVT::nxv8i8: return "MVT::nxv8i8";
- case MVT::nxv16i8: return "MVT::nxv16i8";
- case MVT::nxv32i8: return "MVT::nxv32i8";
- case MVT::nxv64i8: return "MVT::nxv64i8";
- case MVT::nxv1i16: return "MVT::nxv1i16";
- case MVT::nxv2i16: return "MVT::nxv2i16";
- case MVT::nxv4i16: return "MVT::nxv4i16";
- case MVT::nxv8i16: return "MVT::nxv8i16";
- case MVT::nxv16i16: return "MVT::nxv16i16";
- case MVT::nxv32i16: return "MVT::nxv32i16";
- case MVT::nxv1i32: return "MVT::nxv1i32";
- case MVT::nxv2i32: return "MVT::nxv2i32";
- case MVT::nxv4i32: return "MVT::nxv4i32";
- case MVT::nxv8i32: return "MVT::nxv8i32";
- case MVT::nxv16i32: return "MVT::nxv16i32";
- case MVT::nxv32i32: return "MVT::nxv32i32";
- case MVT::nxv1i64: return "MVT::nxv1i64";
- case MVT::nxv2i64: return "MVT::nxv2i64";
- case MVT::nxv4i64: return "MVT::nxv4i64";
- case MVT::nxv8i64: return "MVT::nxv8i64";
- case MVT::nxv16i64: return "MVT::nxv16i64";
- case MVT::nxv32i64: return "MVT::nxv32i64";
- case MVT::nxv1f16: return "MVT::nxv1f16";
- case MVT::nxv2f16: return "MVT::nxv2f16";
- case MVT::nxv4f16: return "MVT::nxv4f16";
- case MVT::nxv8f16: return "MVT::nxv8f16";
- case MVT::nxv16f16: return "MVT::nxv16f16";
- case MVT::nxv32f16: return "MVT::nxv32f16";
- case MVT::nxv1bf16: return "MVT::nxv1bf16";
- case MVT::nxv2bf16: return "MVT::nxv2bf16";
- case MVT::nxv4bf16: return "MVT::nxv4bf16";
- case MVT::nxv8bf16: return "MVT::nxv8bf16";
- case MVT::nxv16bf16: return "MVT::nxv16bf16";
- case MVT::nxv32bf16: return "MVT::nxv32bf16";
- case MVT::nxv1f32: return "MVT::nxv1f32";
- case MVT::nxv2f32: return "MVT::nxv2f32";
- case MVT::nxv4f32: return "MVT::nxv4f32";
- case MVT::nxv8f32: return "MVT::nxv8f32";
- case MVT::nxv16f32: return "MVT::nxv16f32";
- case MVT::nxv1f64: return "MVT::nxv1f64";
- case MVT::nxv2f64: return "MVT::nxv2f64";
- case MVT::nxv4f64: return "MVT::nxv4f64";
- case MVT::nxv8f64: return "MVT::nxv8f64";
- case MVT::token: return "MVT::token";
- case MVT::Metadata: return "MVT::Metadata";
- case MVT::iPTR: return "MVT::iPTR";
- case MVT::iPTRAny: return "MVT::iPTRAny";
- case MVT::Untyped: return "MVT::Untyped";
- case MVT::funcref: return "MVT::funcref";
- case MVT::externref: return "MVT::externref";
+#define GET_VT_ATTR(Ty, N, Sz, Any, Int, FP, Vec, Sc) \
+ case MVT::Ty: return "MVT::" # Ty;
+#include "llvm/CodeGen/GenVT.inc"
default: llvm_unreachable("ILLEGAL VALUE TYPE!");
}
// clang-format on
diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h
index 5fe3f9a32c01..edddc051c162 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h
@@ -806,6 +806,7 @@ public:
IPM_MemoryAlignment,
IPM_VectorSplatImm,
IPM_NoUse,
+ IPM_OneUse,
IPM_GenericPredicate,
IPM_MIFlags,
OPM_SameOperand,
@@ -1691,6 +1692,28 @@ public:
}
};
+/// Generates code to check that the first result has only one use.
+class OneUsePredicateMatcher : public InstructionPredicateMatcher {
+public:
+ OneUsePredicateMatcher(unsigned InsnVarID)
+ : InstructionPredicateMatcher(IPM_OneUse, InsnVarID) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_OneUse;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return InstructionPredicateMatcher::isIdentical(B);
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckHasOneUse")
+ << MatchTable::Comment("MI") << MatchTable::ULEB128Value(InsnVarID)
+ << MatchTable::LineBreak;
+ }
+};
+
/// Generates code to check that a set of predicates and operands match for a
/// particular instruction.
///
diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp
index 9b356148cc17..ec41cd9fec07 100644
--- a/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -207,7 +207,7 @@ static Error isTrivialOperatorNode(const TreePatternNode &N) {
if (Predicate.isImmediatePattern())
continue;
- if (Predicate.hasNoUse())
+ if (Predicate.hasNoUse() || Predicate.hasOneUse())
continue;
if (Predicate.isNonExtLoad() || Predicate.isAnyExtLoad() ||
@@ -782,6 +782,10 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
InsnMatcher.addPredicate<NoUsePredicateMatcher>();
HasAddedBuiltinMatcher = true;
}
+ if (Predicate.hasOneUse()) {
+ InsnMatcher.addPredicate<OneUsePredicateMatcher>();
+ HasAddedBuiltinMatcher = true;
+ }
if (Predicate.hasGISelPredicateCode()) {
if (Predicate.usesOperands()) {
diff --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py
index f0c456a1648d..cce00e4a2802 100644
--- a/llvm/utils/UpdateTestChecks/asm.py
+++ b/llvm/utils/UpdateTestChecks/asm.py
@@ -51,7 +51,8 @@ ASM_FUNCTION_AARCH64_RE = re.compile(
)
ASM_FUNCTION_AMDGPU_RE = re.compile(
- r'^_?(?P<func>[^:]+):[ \t]*;+[ \t]*@"?(?P=func)"?\n[^:]*?'
+ r"\.type\s+_?(?P<func>[^,\n]+),@function\n"
+ r'^_?(?P=func):(?:[ \t]*;+[ \t]*@"?(?P=func)"?)?\n'
r"(?P<body>.*?)\n" # (body of the function)
# This list is incomplete
r"^\s*(\.Lfunc_end[0-9]+:\n|\.section)",
@@ -605,6 +606,7 @@ def add_checks(
prefix_list,
func_dict,
func_name,
+ ginfo: common.GeneralizerInfo,
global_vars_seen_dict,
is_filtered,
):
@@ -617,9 +619,7 @@ def add_checks(
func_dict,
func_name,
check_label_format,
- True,
- False,
- 1,
+ ginfo,
global_vars_seen_dict,
is_filtered=is_filtered,
)
diff --git a/llvm/utils/UpdateTestChecks/common.py b/llvm/utils/UpdateTestChecks/common.py
index 5595e6f41755..85c129488d95 100644
--- a/llvm/utils/UpdateTestChecks/common.py
+++ b/llvm/utils/UpdateTestChecks/common.py
@@ -30,8 +30,9 @@ Version changelog:
in case arguments are split to a separate SAME line.
4: --check-globals now has a third option ('smart'). The others are now called
'none' and 'all'. 'smart' is the default.
+5: Basic block labels are matched by FileCheck expressions
"""
-DEFAULT_VERSION = 4
+DEFAULT_VERSION = 5
SUPPORTED_ANALYSES = {
@@ -698,6 +699,7 @@ class function_body(object):
args_and_sig,
attrs,
func_name_separator,
+ ginfo,
):
self.scrub = string
self.extrascrub = extra
@@ -705,24 +707,27 @@ class function_body(object):
self.args_and_sig = args_and_sig
self.attrs = attrs
self.func_name_separator = func_name_separator
+ self._ginfo = ginfo
def is_same_except_arg_names(
- self, extrascrub, funcdef_attrs_and_ret, args_and_sig, attrs, is_backend
+ self, extrascrub, funcdef_attrs_and_ret, args_and_sig, attrs
):
arg_names = set()
def drop_arg_names(match):
- arg_names.add(match.group(variable_group_in_ir_value_match))
- if match.group(attribute_group_in_ir_value_match):
- attr = match.group(attribute_group_in_ir_value_match)
+ nameless_value = self._ginfo.get_nameless_value_from_match(match)
+ if nameless_value.check_key == "%":
+ arg_names.add(self._ginfo.get_name_from_match(match))
+ substitute = ""
else:
- attr = ""
- return match.group(1) + attr + match.group(match.lastindex)
+ substitute = match.group(2)
+ return match.group(1) + substitute + match.group(match.lastindex)
def repl_arg_names(match):
+ nameless_value = self._ginfo.get_nameless_value_from_match(match)
if (
- match.group(variable_group_in_ir_value_match) is not None
- and match.group(variable_group_in_ir_value_match) in arg_names
+ nameless_value.check_key == "%"
+ and self._ginfo.get_name_from_match(match) in arg_names
):
return match.group(1) + match.group(match.lastindex)
return match.group(1) + match.group(2) + match.group(match.lastindex)
@@ -731,17 +736,19 @@ class function_body(object):
return False
if self.attrs != attrs:
return False
- ans0 = IR_VALUE_RE.sub(drop_arg_names, self.args_and_sig)
- ans1 = IR_VALUE_RE.sub(drop_arg_names, args_and_sig)
+
+ regexp = self._ginfo.get_regexp()
+ ans0 = regexp.sub(drop_arg_names, self.args_and_sig)
+ ans1 = regexp.sub(drop_arg_names, args_and_sig)
if ans0 != ans1:
return False
- if is_backend:
+ if self._ginfo.is_asm():
# Check without replacements, the replacements are not applied to the
# body for backend checks.
return self.extrascrub == extrascrub
- es0 = IR_VALUE_RE.sub(repl_arg_names, self.extrascrub)
- es1 = IR_VALUE_RE.sub(repl_arg_names, extrascrub)
+ es0 = regexp.sub(repl_arg_names, self.extrascrub)
+ es1 = regexp.sub(repl_arg_names, extrascrub)
es0 = SCRUB_IR_COMMENT_RE.sub(r"", es0)
es1 = SCRUB_IR_COMMENT_RE.sub(r"", es1)
return es0 == es1
@@ -751,7 +758,7 @@ class function_body(object):
class FunctionTestBuilder:
- def __init__(self, run_list, flags, scrubber_args, path):
+ def __init__(self, run_list, flags, scrubber_args, path, ginfo):
self._verbose = flags.verbose
self._record_args = flags.function_signature
self._check_attributes = flags.check_attributes
@@ -770,6 +777,7 @@ class FunctionTestBuilder:
)
self._scrubber_args = scrubber_args
self._path = path
+ self._ginfo = ginfo
# Strip double-quotes if input was read by UTC_ARGS
self._replace_value_regex = list(
map(lambda x: x.strip('"'), flags.replace_value_regex)
@@ -804,10 +812,10 @@ class FunctionTestBuilder:
def is_filtered(self):
return bool(self._filters)
- def process_run_line(
- self, function_re, scrubber, raw_tool_output, prefixes, is_backend
- ):
- build_global_values_dictionary(self._global_var_dict, raw_tool_output, prefixes)
+ def process_run_line(self, function_re, scrubber, raw_tool_output, prefixes):
+ build_global_values_dictionary(
+ self._global_var_dict, raw_tool_output, prefixes, self._ginfo
+ )
for m in function_re.finditer(raw_tool_output):
if not m:
continue
@@ -817,7 +825,7 @@ class FunctionTestBuilder:
# beginning of assembly function definition. In most assemblies, that is just a
# colon: `foo:`. But, for example, in nvptx it is a brace: `foo(`. If is_backend is
# False, just assume that separator is an empty string.
- if is_backend:
+ if self._ginfo.is_asm():
# Use ':' as default separator.
func_name_separator = (
m.group("func_name_separator")
@@ -900,7 +908,6 @@ class FunctionTestBuilder:
funcdef_attrs_and_ret,
args_and_sig,
attrs,
- is_backend,
):
self._func_dict[prefix][func].scrub = scrubbed_extra
self._func_dict[prefix][func].args_and_sig = args_and_sig
@@ -919,6 +926,7 @@ class FunctionTestBuilder:
args_and_sig,
attrs,
func_name_separator,
+ self._ginfo,
)
self._func_order[prefix].append(func)
else:
@@ -959,6 +967,12 @@ SCRUB_IR_COMMENT_RE = re.compile(r"\s*;.*")
class NamelessValue:
+ """
+ A NamelessValue object represents a type of value in the IR whose "name" we
+ generalize in the generated check lines; where the "name" could be an actual
+ name (as in e.g. `@some_global` or `%x`) or just a number (as in e.g. `%12`
+ or `!4`).
+ """
def __init__(
self,
check_prefix,
@@ -971,12 +985,14 @@ class NamelessValue:
is_number=False,
replace_number_with_counter=False,
match_literally=False,
- interlaced_with_previous=False
+ interlaced_with_previous=False,
+ ir_suffix=r"",
):
self.check_prefix = check_prefix
self.check_key = check_key
self.ir_prefix = ir_prefix
self.ir_regexp = ir_regexp
+ self.ir_suffix = ir_suffix
self.global_ir_rhs_regexp = global_ir_rhs_regexp
self.is_before_functions = is_before_functions
self.is_number = is_number
@@ -987,15 +1003,10 @@ class NamelessValue:
self.interlaced_with_previous = interlaced_with_previous
self.variable_mapping = {}
- # Return true if this kind of IR value is "local", basically if it matches '%{{.*}}'.
+ # Return true if this kind of IR value is defined "locally" to functions,
+ # which we assume is only the case precisely for LLVM IR local values.
def is_local_def_ir_value(self):
- return self.ir_prefix == "%"
-
- # Return the IR prefix and check prefix we use for this kind or IR value,
- # e.g., (%, TMP) for locals. If the IR prefix is a regex, return the prefix
- # used in the IR output
- def get_ir_prefix_from_ir_value_match(self, match):
- return re.search(self.ir_prefix, match[0])[0], self.check_prefix
+ return self.check_key == "%"
# Return the IR regexp we use for this kind or IR value, e.g., [\w.-]+? for locals
def get_ir_regex(self):
@@ -1030,205 +1041,216 @@ class NamelessValue:
var = var.replace("-", "_")
return var.upper()
- # Create a FileCheck variable from regex.
- def get_value_definition(self, var, match):
- # for backwards compatibility we check locals with '.*'
- varname = self.get_value_name(var, self.check_prefix)
- prefix = self.get_ir_prefix_from_ir_value_match(match)[0]
- if self.is_number:
- regex = "" # always capture a number in the default format
- capture_start = "[[#"
- else:
- regex = self.get_ir_regex()
- capture_start = "[["
- if self.is_local_def_ir_value():
- return capture_start + varname + ":" + prefix + regex + "]]"
- return prefix + capture_start + varname + ":" + regex + "]]"
-
- # Use a FileCheck variable.
- def get_value_use(self, var, match, var_prefix=None):
- if var_prefix is None:
- var_prefix = self.check_prefix
- capture_start = "[[#" if self.is_number else "[["
- if self.is_local_def_ir_value():
- return capture_start + self.get_value_name(var, var_prefix) + "]]"
- prefix = self.get_ir_prefix_from_ir_value_match(match)[0]
- return prefix + capture_start + self.get_value_name(var, var_prefix) + "]]"
-
-
-# Description of the different "unnamed" values we match in the IR, e.g.,
-# (local) ssa values, (debug) metadata, etc.
-ir_nameless_values = [
- # check_prefix check_key ir_prefix ir_regexp global_ir_rhs_regexp
- NamelessValue(r"TMP", "%", r"%", r"[\w$.-]+?", None),
- NamelessValue(r"ATTR", "#", r"#", r"[0-9]+", None),
- NamelessValue(r"ATTR", "#", r"attributes #", r"[0-9]+", r"{[^}]*}"),
- NamelessValue(r"GLOB", "@", r"@", r"[0-9]+", None),
- NamelessValue(r"GLOB", "@", r"@", r"[0-9]+", r".+", is_before_functions=True),
- NamelessValue(
- r"GLOBNAMED",
- "@",
- r"@",
- r"[a-zA-Z0-9_$\"\\.-]*[a-zA-Z_$\"\\.-][a-zA-Z0-9_$\"\\.-]*",
- r".+",
- is_before_functions=True,
- match_literally=True,
- interlaced_with_previous=True,
- ),
- NamelessValue(r"DBG", "!", r"!dbg ", r"![0-9]+", None),
- NamelessValue(r"DIASSIGNID", "!", r"!DIAssignID ", r"![0-9]+", None),
- NamelessValue(r"PROF", "!", r"!prof ", r"![0-9]+", None),
- NamelessValue(r"TBAA", "!", r"!tbaa ", r"![0-9]+", None),
- NamelessValue(r"TBAA_STRUCT", "!", r"!tbaa.struct ", r"![0-9]+", None),
- NamelessValue(r"RNG", "!", r"!range ", r"![0-9]+", None),
- NamelessValue(r"LOOP", "!", r"!llvm.loop ", r"![0-9]+", None),
- NamelessValue(r"META", "!", r"", r"![0-9]+", r"(?:distinct |)!.*"),
- NamelessValue(r"ACC_GRP", "!", r"!llvm.access.group ", r"![0-9]+", None),
- NamelessValue(r"META", "!", r"![a-z.]+ ", r"![0-9]+", None),
- NamelessValue(r"META", "!", r"[, (]", r"![0-9]+", None),
-]
+ def get_affixes_from_match(self, match):
+ prefix = re.match(self.ir_prefix, match.group(2)).group(0)
+ suffix = re.search(self.ir_suffix + "$", match.group(2)).group(0)
+ return prefix, suffix
-global_nameless_values = [
- nameless_value
- for nameless_value in ir_nameless_values
- if nameless_value.global_ir_rhs_regexp is not None
-]
-# global variable names should be matched literally
-global_nameless_values_w_unstable_ids = [
- nameless_value
- for nameless_value in global_nameless_values
- if not nameless_value.match_literally
-]
-asm_nameless_values = [
- NamelessValue(
- r"MCINST",
- "Inst#",
- "<MCInst #",
- r"\d+",
- r".+",
- is_number=True,
- replace_number_with_counter=True,
- ),
- NamelessValue(
- r"MCREG",
- "Reg:",
- "<MCOperand Reg:",
- r"\d+",
- r".+",
- is_number=True,
- replace_number_with_counter=True,
- ),
-]
+class GeneralizerInfo:
+ """
+ A GeneralizerInfo object holds information about how check lines should be generalized
+ (e.g., variable names replaced by FileCheck meta variables) as well as per-test-file
+ state (e.g. information about IR global variables).
+ """
-analyze_nameless_values = [
- NamelessValue(
- r"GRP",
- "#",
- r"",
- r"0x[0-9a-f]+",
- None,
- replace_number_with_counter=True,
- ),
-]
+ MODE_IR = 0
+ MODE_ASM = 1
+ MODE_ANALYZE = 2
+ def __init__(
+ self,
+ version,
+ mode,
+ nameless_values: List[NamelessValue],
+ regexp_prefix,
+ regexp_suffix,
+ ):
+ self._version = version
+ self._mode = mode
+ self._nameless_values = nameless_values
+
+ self._regexp_prefix = regexp_prefix
+ self._regexp_suffix = regexp_suffix
+
+ self._regexp, _ = self._build_regexp(False, False)
+ (
+ self._unstable_globals_regexp,
+ self._unstable_globals_values,
+ ) = self._build_regexp(True, True)
+
+ def _build_regexp(self, globals_only, unstable_only):
+ matches = []
+ values = []
+ for nameless_value in self._nameless_values:
+ is_global = nameless_value.global_ir_rhs_regexp is not None
+ if globals_only and not is_global:
+ continue
+ if unstable_only and nameless_value.match_literally:
+ continue
-def createOrRegexp(old, new):
- if not old:
- return new
- if not new:
- return old
- return old + "|" + new
-
-
-def createPrefixMatch(prefix_str, prefix_re):
- return "(?:" + prefix_str + "(" + prefix_re + "))"
-
-
-# Build the regexp that matches an "IR value". This can be a local variable,
-# argument, global, or metadata, anything that is "named". It is important that
-# the PREFIX and SUFFIX below only contain a single group, if that changes
-# other locations will need adjustment as well.
-IR_VALUE_REGEXP_PREFIX = r"(\s*)"
-IR_VALUE_REGEXP_STRING = r""
-for nameless_value in ir_nameless_values:
- match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
- if nameless_value.global_ir_rhs_regexp is not None:
- match = "^" + match
- IR_VALUE_REGEXP_STRING = createOrRegexp(IR_VALUE_REGEXP_STRING, match)
-IR_VALUE_REGEXP_SUFFIX = r"([,\s\(\)\}]|\Z)"
-IR_VALUE_RE = re.compile(
- IR_VALUE_REGEXP_PREFIX
- + r"("
- + IR_VALUE_REGEXP_STRING
- + r")"
- + IR_VALUE_REGEXP_SUFFIX
-)
+ match = f"(?:{nameless_value.ir_prefix}({nameless_value.ir_regexp}){nameless_value.ir_suffix})"
+ if self.is_ir() and not globals_only and is_global:
+ match = "^" + match
+ matches.append(match)
+ values.append(nameless_value)
-GLOBAL_VALUE_REGEXP_STRING = r""
-for nameless_value in global_nameless_values_w_unstable_ids:
- match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
- GLOBAL_VALUE_REGEXP_STRING = createOrRegexp(GLOBAL_VALUE_REGEXP_STRING, match)
-GLOBAL_VALUE_RE = re.compile(
- IR_VALUE_REGEXP_PREFIX
- + r"("
- + GLOBAL_VALUE_REGEXP_STRING
- + r")"
- + IR_VALUE_REGEXP_SUFFIX
-)
+ regexp_string = r"|".join(matches)
-# Build the regexp that matches an "ASM value" (currently only for --asm-show-inst comments).
-ASM_VALUE_REGEXP_STRING = ""
-for nameless_value in asm_nameless_values:
- match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
- ASM_VALUE_REGEXP_STRING = createOrRegexp(ASM_VALUE_REGEXP_STRING, match)
-ASM_VALUE_REGEXP_SUFFIX = r"([>\s]|\Z)"
-ASM_VALUE_RE = re.compile(
- r"((?:#|//)\s*)" + "(" + ASM_VALUE_REGEXP_STRING + ")" + ASM_VALUE_REGEXP_SUFFIX
-)
+ return (
+ re.compile(
+ self._regexp_prefix + r"(" + regexp_string + r")" + self._regexp_suffix
+ ),
+ values,
+ )
-ANALYZE_VALUE_REGEXP_PREFIX = r"(\s*)"
-ANALYZE_VALUE_REGEXP_STRING = r""
-for nameless_value in analyze_nameless_values:
- match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
- ANALYZE_VALUE_REGEXP_STRING = createOrRegexp(ANALYZE_VALUE_REGEXP_STRING, match)
-ANALYZE_VALUE_REGEXP_SUFFIX = r"(\)?:)"
-ANALYZE_VALUE_RE = re.compile(
- ANALYZE_VALUE_REGEXP_PREFIX
- + r"("
- + ANALYZE_VALUE_REGEXP_STRING
- + r")"
- + ANALYZE_VALUE_REGEXP_SUFFIX
-)
+ def get_version(self):
+ return self._version
-# The entire match is group 0, the prefix has one group (=1), the entire
-# IR_VALUE_REGEXP_STRING is one group (=2), and then the nameless values start.
-first_nameless_group_in_ir_value_match = 3
+ def is_ir(self):
+ return self._mode == GeneralizerInfo.MODE_IR
-# constants for the group id of special matches
-variable_group_in_ir_value_match = 3
-attribute_group_in_ir_value_match = 4
+ def is_asm(self):
+ return self._mode == GeneralizerInfo.MODE_ASM
+ def is_analyze(self):
+ return self._mode == GeneralizerInfo.MODE_ANALYZE
-# Check a match for IR_VALUE_RE and inspect it to determine if it was a local
-# value, %..., global @..., debug number !dbg !..., etc. See the PREFIXES above.
-def get_idx_from_ir_value_match(match):
- for i in range(first_nameless_group_in_ir_value_match, match.lastindex):
- if match.group(i) is not None:
- return i - first_nameless_group_in_ir_value_match
- error("Unable to identify the kind of IR value from the match!")
- return 0
+ def get_nameless_values(self):
+ return self._nameless_values
+ def get_regexp(self):
+ return self._regexp
-# See get_idx_from_ir_value_match
-def get_name_from_ir_value_match(match):
- return match.group(
- get_idx_from_ir_value_match(match) + first_nameless_group_in_ir_value_match
- )
+ def get_unstable_globals_regexp(self):
+ return self._unstable_globals_regexp
+ # The entire match is group 0, the prefix has one group (=1), the entire
+ # IR_VALUE_REGEXP_STRING is one group (=2), and then the nameless values start.
+ FIRST_NAMELESS_GROUP_IN_MATCH = 3
-def get_nameless_value_from_match(match, nameless_values) -> NamelessValue:
- return nameless_values[get_idx_from_ir_value_match(match)]
+ def get_match_info(self, match):
+ """
+ Returns (name, nameless_value) for the given match object
+ """
+ if match.re == self._regexp:
+ values = self._nameless_values
+ else:
+ match.re == self._unstable_globals_regexp
+ values = self._unstable_globals_values
+ for i in range(len(values)):
+ g = match.group(i + GeneralizerInfo.FIRST_NAMELESS_GROUP_IN_MATCH)
+ if g is not None:
+ return g, values[i]
+ error("Unable to identify the kind of IR value from the match!")
+ return None, None
+
+ # See get_idx_from_match
+ def get_name_from_match(self, match):
+ return self.get_match_info(match)[0]
+
+ def get_nameless_value_from_match(self, match) -> NamelessValue:
+ return self.get_match_info(match)[1]
+
+
+def make_ir_generalizer(version):
+ values = []
+
+ if version >= 5:
+ values += [
+ NamelessValue(r"BB", "%", r"label %", r"[\w$.-]+?", None),
+ NamelessValue(r"BB", "%", r"^", r"[\w$.-]+?", None, ir_suffix=r":"),
+ ]
+
+ values += [
+ # check_prefix check_key ir_prefix ir_regexp global_ir_rhs_regexp
+ NamelessValue(r"TMP", "%", r"%", r"[\w$.-]+?", None),
+ NamelessValue(r"ATTR", "#", r"#", r"[0-9]+", None),
+ NamelessValue(r"ATTR", "#", r"attributes #", r"[0-9]+", r"{[^}]*}"),
+ NamelessValue(r"GLOB", "@", r"@", r"[0-9]+", None),
+ NamelessValue(r"GLOB", "@", r"@", r"[0-9]+", r".+", is_before_functions=True),
+ NamelessValue(
+ r"GLOBNAMED",
+ "@",
+ r"@",
+ r"[a-zA-Z0-9_$\"\\.-]*[a-zA-Z_$\"\\.-][a-zA-Z0-9_$\"\\.-]*",
+ r".+",
+ is_before_functions=True,
+ match_literally=True,
+ interlaced_with_previous=True,
+ ),
+ NamelessValue(r"DBG", "!", r"!dbg ", r"![0-9]+", None),
+ NamelessValue(r"DIASSIGNID", "!", r"!DIAssignID ", r"![0-9]+", None),
+ NamelessValue(r"PROF", "!", r"!prof ", r"![0-9]+", None),
+ NamelessValue(r"TBAA", "!", r"!tbaa ", r"![0-9]+", None),
+ NamelessValue(r"TBAA_STRUCT", "!", r"!tbaa.struct ", r"![0-9]+", None),
+ NamelessValue(r"RNG", "!", r"!range ", r"![0-9]+", None),
+ NamelessValue(r"LOOP", "!", r"!llvm.loop ", r"![0-9]+", None),
+ NamelessValue(r"META", "!", r"", r"![0-9]+", r"(?:distinct |)!.*"),
+ NamelessValue(r"ACC_GRP", "!", r"!llvm.access.group ", r"![0-9]+", None),
+ NamelessValue(r"META", "!", r"![a-z.]+ ", r"![0-9]+", None),
+ NamelessValue(r"META", "!", r"[, (]", r"![0-9]+", None),
+ ]
+
+ prefix = r"(\s*)"
+ suffix = r"([,\s\(\)\}]|\Z)"
+
+ # values = [
+ # nameless_value
+ # for nameless_value in IR_NAMELESS_VALUES
+ # if not (globals_only and nameless_value.global_ir_rhs_regexp is None) and
+ # not (unstable_ids_only and nameless_value.match_literally)
+ # ]
+
+ return GeneralizerInfo(version, GeneralizerInfo.MODE_IR, values, prefix, suffix)
+
+
+def make_asm_generalizer(version):
+ values = [
+ NamelessValue(
+ r"MCINST",
+ "Inst#",
+ "<MCInst #",
+ r"\d+",
+ r".+",
+ is_number=True,
+ replace_number_with_counter=True,
+ ),
+ NamelessValue(
+ r"MCREG",
+ "Reg:",
+ "<MCOperand Reg:",
+ r"\d+",
+ r".+",
+ is_number=True,
+ replace_number_with_counter=True,
+ ),
+ ]
+
+ prefix = r"((?:#|//)\s*)"
+ suffix = r"([>\s]|\Z)"
+
+ return GeneralizerInfo(version, GeneralizerInfo.MODE_ASM, values, prefix, suffix)
+
+
+def make_analyze_generalizer(version):
+ values = [
+ NamelessValue(
+ r"GRP",
+ "#",
+ r"",
+ r"0x[0-9a-f]+",
+ None,
+ replace_number_with_counter=True,
+ ),
+ ]
+
+ prefix = r"(\s*)"
+ suffix = r"(\)?:)"
+
+ return GeneralizerInfo(
+ version, GeneralizerInfo.MODE_ANALYZE, values, prefix, suffix
+ )
# Return true if var clashes with the scripted FileCheck check_prefix.
@@ -1385,16 +1407,68 @@ METAVAR_RE = re.compile(r"\[\[([A-Z0-9_]+)(?::[^]]+)?\]\]")
NUMERIC_SUFFIX_RE = re.compile(r"[0-9]*$")
+class TestVar:
+ def __init__(self, nameless_value: NamelessValue, prefix: str, suffix: str):
+ self._nameless_value = nameless_value
+
+ self._prefix = prefix
+ self._suffix = suffix
+
+ def seen(self, nameless_value: NamelessValue, prefix: str, suffix: str):
+ if prefix != self._prefix:
+ self._prefix = ""
+ if suffix != self._suffix:
+ self._suffix = ""
+
+ def get_variable_name(self, text):
+ return self._nameless_value.get_value_name(
+ text, self._nameless_value.check_prefix
+ )
+
+ def get_def(self, name, prefix, suffix):
+ if self._nameless_value.is_number:
+ return f"{prefix}[[#{name}:]]{suffix}"
+ if self._prefix:
+ assert self._prefix == prefix
+ prefix = ""
+ if self._suffix:
+ assert self._suffix == suffix
+ suffix = ""
+ return f"{prefix}[[{name}:{self._prefix}{self._nameless_value.get_ir_regex()}{self._suffix}]]{suffix}"
+
+ def get_use(self, name, prefix, suffix):
+ if self._nameless_value.is_number:
+ return f"{prefix}[[#{name}]]{suffix}"
+ if self._prefix:
+ assert self._prefix == prefix
+ prefix = ""
+ if self._suffix:
+ assert self._suffix == suffix
+ suffix = ""
+ return f"{prefix}[[{name}]]{suffix}"
+
+
class CheckValueInfo:
def __init__(
self,
- nameless_value: NamelessValue,
- var: str,
+ key,
+ text,
+ name: str,
prefix: str,
+ suffix: str,
):
- self.nameless_value = nameless_value
- self.var = var
+ # Key for the value, e.g. '%'
+ self.key = key
+
+ # Text to be matched by the FileCheck variable (without any prefix or suffix)
+ self.text = text
+
+ # Name of the FileCheck variable
+ self.name = name
+
+ # Prefix and suffix that were captured by the NamelessValue regular expression
self.prefix = prefix
+ self.suffix = suffix
# Represent a check line in a way that allows us to compare check lines while
@@ -1433,7 +1507,7 @@ def remap_metavar_names(
new_mapping = {}
for line in new_line_infos:
for value in line.values:
- new_mapping[value.var] = value.var
+ new_mapping[value.name] = value.name
# Recursively commit to the identity mapping or find a better one
def recurse(old_begin, old_end, new_begin, new_end):
@@ -1445,7 +1519,7 @@ def remap_metavar_names(
def diffify_line(line, mapper):
values = []
for value in line.values:
- mapped = mapper(value.var)
+ mapped = mapper(value.name)
values.append(mapped if mapped in committed_names else "?")
return line.line.strip() + " @@@ " + " @ ".join(values)
@@ -1470,29 +1544,29 @@ def remap_metavar_names(
local_commits = {}
for lhs_value, rhs_value in zip(lhs_line.values, rhs_line.values):
- if new_mapping[rhs_value.var] in committed_names:
+ if new_mapping[rhs_value.name] in committed_names:
# The new value has already been committed. If it was mapped
# to the same name as the original value, we can consider
# committing other values from this line. Otherwise, we
# should ignore this line.
- if new_mapping[rhs_value.var] == lhs_value.var:
+ if new_mapping[rhs_value.name] == lhs_value.name:
continue
else:
break
- if rhs_value.var in local_commits:
+ if rhs_value.name in local_commits:
# Same, but for a possible commit happening on the same line
- if local_commits[rhs_value.var] == lhs_value.var:
+ if local_commits[rhs_value.name] == lhs_value.name:
continue
else:
break
- if lhs_value.var in committed_names:
+ if lhs_value.name in committed_names:
# We can't map this value because the name we would map it to has already been
# committed for something else. Give up on this line.
break
- local_commits[rhs_value.var] = lhs_value.var
+ local_commits[rhs_value.name] = lhs_value.name
else:
# No reason not to add any commitments for this line
for rhs_var, lhs_var in local_commits.items():
@@ -1545,58 +1619,26 @@ def remap_metavar_names(
return new_mapping
-def generalize_check_lines_common(
+def generalize_check_lines(
lines,
- is_analyze,
+ ginfo: GeneralizerInfo,
vars_seen,
global_vars_seen,
- nameless_values,
- nameless_value_regex,
- is_asm,
- preserve_names,
+ preserve_names=False,
original_check_lines=None,
+ *,
+ unstable_globals_only=False,
):
- # This gets called for each match that occurs in
- # a line. We transform variables we haven't seen
- # into defs, and variables we have seen into uses.
- def transform_line_vars(match, transform_locals=True):
- var = get_name_from_ir_value_match(match)
- nameless_value = get_nameless_value_from_match(match, nameless_values)
- if may_clash_with_default_check_prefix_name(nameless_value.check_prefix, var):
- warn(
- "Change IR value name '%s' or use --prefix-filecheck-ir-name to prevent possible conflict"
- " with scripted FileCheck name." % (var,)
- )
- key = (var, nameless_value.check_key)
- is_local_def = nameless_value.is_local_def_ir_value()
- if is_local_def and not transform_locals:
- return None
- if is_local_def and key in vars_seen:
- rv = nameless_value.get_value_use(var, match)
- elif not is_local_def and key in global_vars_seen:
- # We could have seen a different prefix for the global variables first,
- # ensure we use that one instead of the prefix for the current match.
- rv = nameless_value.get_value_use(var, match, global_vars_seen[key])
- else:
- if is_local_def:
- vars_seen.add(key)
- else:
- global_vars_seen[key] = nameless_value.check_prefix
- rv = nameless_value.get_value_definition(var, match)
- # re.sub replaces the entire regex match
- # with whatever you return, so we have
- # to make sure to hand it back everything
- # including the commas and spaces.
- return match.group(1) + rv + match.group(match.lastindex)
-
- def transform_non_local_line_vars(match):
- return transform_line_vars(match, False)
+ if unstable_globals_only:
+ regexp = ginfo.get_unstable_globals_regexp()
+ else:
+ regexp = ginfo.get_regexp()
multiple_braces_re = re.compile(r"({{+)|(}}+)")
def escape_braces(match_obj):
return '{{' + re.escape(match_obj.group(0)) + '}}'
- if not is_asm and not is_analyze:
+ if ginfo.is_ir():
for i, line in enumerate(lines):
# An IR variable named '%.' matches the FileCheck regex string.
line = line.replace("%.", "%dot")
@@ -1617,123 +1659,141 @@ def generalize_check_lines_common(
lines[i] = scrubbed_line
if not preserve_names:
- if is_asm:
- for i, _ in enumerate(lines):
- # It can happen that two matches are back-to-back and for some reason sub
- # will not replace both of them. For now we work around this by
- # substituting until there is no more match.
- changed = True
- while changed:
- (lines[i], changed) = nameless_value_regex.subn(
- transform_line_vars, lines[i], count=1
- )
- else:
- # LLVM IR case. Start by handling global meta variables (global IR variables,
- # metadata, attributes)
- for i, _ in enumerate(lines):
- start = 0
- while True:
- m = nameless_value_regex.search(lines[i][start:])
- if m is None:
- break
- start += m.start()
- sub = transform_non_local_line_vars(m)
- if sub is not None:
- lines[i] = (
- lines[i][:start] + sub + lines[i][start + len(m.group(0)) :]
- )
- start += 1
-
- # Collect information about new check lines and original check lines (if any)
- new_line_infos = []
- for line in lines:
- filtered_line = ""
- values = []
- while True:
- m = nameless_value_regex.search(line)
- if m is None:
- filtered_line += line
- break
+ committed_names = set(
+ test_var.get_variable_name(name)
+ for (name, _), test_var in vars_seen.items()
+ )
+ defs = set()
- var = get_name_from_ir_value_match(m)
- nameless_value = get_nameless_value_from_match(m, nameless_values)
- var = nameless_value.get_value_name(
- var, nameless_value.check_prefix
- )
+ # Collect information about new check lines, and generalize global reference
+ new_line_infos = []
+ for line in lines:
+ filtered_line = ""
+ values = []
+ while True:
+ m = regexp.search(line)
+ if m is None:
+ filtered_line += line
+ break
- # Replace with a [[@@]] tag, but be sure to keep the spaces and commas.
- filtered_line += (
- line[: m.start()]
- + m.group(1)
- + VARIABLE_TAG
- + m.group(m.lastindex)
+ name = ginfo.get_name_from_match(m)
+ nameless_value = ginfo.get_nameless_value_from_match(m)
+ prefix, suffix = nameless_value.get_affixes_from_match(m)
+ if may_clash_with_default_check_prefix_name(
+ nameless_value.check_prefix, name
+ ):
+ warn(
+ "Change IR value name '%s' or use --prefix-filecheck-ir-name to prevent possible conflict"
+ " with scripted FileCheck name." % (name,)
)
- line = line[m.end() :]
- values.append(
- CheckValueInfo(
- nameless_value=nameless_value,
- var=var,
- prefix=nameless_value.get_ir_prefix_from_ir_value_match(m)[
- 0
- ],
- )
+
+ # Record the variable as seen and (for locals) accumulate
+ # prefixes/suffixes
+ is_local_def = nameless_value.is_local_def_ir_value()
+ if is_local_def:
+ vars_dict = vars_seen
+ else:
+ vars_dict = global_vars_seen
+
+ key = (name, nameless_value.check_key)
+
+ if is_local_def:
+ test_prefix = prefix
+ test_suffix = suffix
+ else:
+ test_prefix = ""
+ test_suffix = ""
+
+ if key in vars_dict:
+ vars_dict[key].seen(nameless_value, test_prefix, test_suffix)
+ else:
+ vars_dict[key] = TestVar(nameless_value, test_prefix, test_suffix)
+ defs.add(key)
+
+ var = vars_dict[key].get_variable_name(name)
+
+ # Replace with a [[@@]] tag, but be sure to keep the spaces and commas.
+ filtered_line += (
+ line[: m.start()] + m.group(1) + VARIABLE_TAG + m.group(m.lastindex)
+ )
+ line = line[m.end() :]
+
+ values.append(
+ CheckValueInfo(
+ key=nameless_value.check_key,
+ text=name,
+ name=var,
+ prefix=prefix,
+ suffix=suffix,
)
- new_line_infos.append(CheckLineInfo(filtered_line, values))
-
- orig_line_infos = []
- for line in original_check_lines or []:
- filtered_line = ""
- values = []
- while True:
- m = METAVAR_RE.search(line)
- if m is None:
- filtered_line += line
- break
+ )
- # Replace with a [[@@]] tag, but be sure to keep the spaces and commas.
- filtered_line += line[: m.start()] + VARIABLE_TAG
- line = line[m.end() :]
- values.append(
- CheckValueInfo(
- nameless_value=None,
- var=m.group(1),
- prefix=None,
- )
+ new_line_infos.append(CheckLineInfo(filtered_line, values))
+
+ committed_names.update(
+ test_var.get_variable_name(name)
+ for (name, _), test_var in global_vars_seen.items()
+ )
+
+ # Collect information about original check lines, if any.
+ orig_line_infos = []
+ for line in original_check_lines or []:
+ filtered_line = ""
+ values = []
+ while True:
+ m = METAVAR_RE.search(line)
+ if m is None:
+ filtered_line += line
+ break
+
+ # Replace with a [[@@]] tag, but be sure to keep the spaces and commas.
+ filtered_line += line[: m.start()] + VARIABLE_TAG
+ line = line[m.end() :]
+ values.append(
+ CheckValueInfo(
+ key=None,
+ text=None,
+ name=m.group(1),
+ prefix="",
+ suffix="",
)
- orig_line_infos.append(CheckLineInfo(filtered_line, values))
+ )
+ orig_line_infos.append(CheckLineInfo(filtered_line, values))
- # Compute the variable name mapping
- committed_names = set(vars_seen)
+ # Compute the variable name mapping
+ mapping = remap_metavar_names(orig_line_infos, new_line_infos, committed_names)
- mapping = remap_metavar_names(
- orig_line_infos, new_line_infos, committed_names
- )
+ # Apply the variable name mapping
+ for i, line_info in enumerate(new_line_infos):
+ line_template = line_info.line
+ line = ""
- for i, line_info in enumerate(new_line_infos):
- line_template = line_info.line
- line = ""
+ for value in line_info.values:
+ idx = line_template.find(VARIABLE_TAG)
+ line += line_template[:idx]
+ line_template = line_template[idx + len(VARIABLE_TAG) :]
- for value in line_info.values:
- idx = line_template.find(VARIABLE_TAG)
- line += line_template[:idx]
- line_template = line_template[idx + len(VARIABLE_TAG) :]
+ key = (value.text, value.key)
+ if value.key == "%":
+ vars_dict = vars_seen
+ else:
+ vars_dict = global_vars_seen
- key = (mapping[value.var], nameless_value.check_key)
- is_local_def = nameless_value.is_local_def_ir_value()
- if is_local_def:
- if mapping[value.var] in vars_seen:
- line += f"[[{mapping[value.var]}]]"
- else:
- line += f"[[{mapping[value.var]}:{value.prefix}{value.nameless_value.get_ir_regex()}]]"
- vars_seen.add(mapping[value.var])
- else:
- raise RuntimeError("not implemented")
+ if key in defs:
+ line += vars_dict[key].get_def(
+ mapping[value.name], value.prefix, value.suffix
+ )
+ defs.remove(key)
+ else:
+ line += vars_dict[key].get_use(
+ mapping[value.name], value.prefix, value.suffix
+ )
- line += line_template
+ line += line_template
- lines[i] = line
+ lines[i] = line
- if is_analyze:
+ if ginfo.is_analyze():
for i, _ in enumerate(lines):
# Escape multiple {{ or }} as {{}} denotes a FileCheck regex.
scrubbed_line = multiple_braces_re.sub(escape_braces, lines[i])
@@ -1742,63 +1802,6 @@ def generalize_check_lines_common(
return lines
-# Replace IR value defs and uses with FileCheck variables.
-def generalize_check_lines(
- lines, is_analyze, vars_seen, global_vars_seen, preserve_names, original_check_lines
-):
- return generalize_check_lines_common(
- lines,
- is_analyze,
- vars_seen,
- global_vars_seen,
- ir_nameless_values,
- IR_VALUE_RE,
- False,
- preserve_names,
- original_check_lines=original_check_lines,
- )
-
-
-def generalize_global_check_line(line, preserve_names, global_vars_seen):
- [new_line] = generalize_check_lines_common(
- [line],
- False,
- set(),
- global_vars_seen,
- global_nameless_values_w_unstable_ids,
- GLOBAL_VALUE_RE,
- False,
- preserve_names,
- )
- return new_line
-
-
-def generalize_asm_check_lines(lines, vars_seen, global_vars_seen):
- return generalize_check_lines_common(
- lines,
- False,
- vars_seen,
- global_vars_seen,
- asm_nameless_values,
- ASM_VALUE_RE,
- True,
- False,
- )
-
-
-def generalize_analyze_check_lines(lines, vars_seen, global_vars_seen):
- return generalize_check_lines_common(
- lines,
- True,
- vars_seen,
- global_vars_seen,
- analyze_nameless_values,
- ANALYZE_VALUE_RE,
- False,
- False,
- )
-
-
def add_checks(
output_lines,
comment_marker,
@@ -1806,9 +1809,7 @@ def add_checks(
func_dict,
func_name,
check_label_format,
- is_backend,
- is_analyze,
- version,
+ ginfo,
global_vars_seen_dict,
is_filtered,
preserve_names=False,
@@ -1853,7 +1854,7 @@ def add_checks(
# Add some space between different check prefixes, but not after the last
# check line (before the test code).
- if is_backend:
+ if ginfo.is_asm():
if len(printed_prefixes) != 0:
output_lines.append(comment_marker)
@@ -1862,11 +1863,11 @@ def add_checks(
global_vars_seen_before = [key for key in global_vars_seen.keys()]
- vars_seen = set()
+ vars_seen = {}
printed_prefixes.append(checkprefix)
attrs = str(func_dict[checkprefix][func_name].attrs)
attrs = "" if attrs == "None" else attrs
- if version > 1:
+ if ginfo.get_version() > 1:
funcdef_attrs_and_ret = func_dict[checkprefix][
func_name
].funcdef_attrs_and_ret
@@ -1881,7 +1882,7 @@ def add_checks(
if args_and_sig:
args_and_sig = generalize_check_lines(
[args_and_sig],
- is_analyze,
+ ginfo,
vars_seen,
global_vars_seen,
preserve_names,
@@ -1892,7 +1893,7 @@ def add_checks(
# Captures in label lines are not supported, thus split into a -LABEL
# and a separate -SAME line that contains the arguments with captures.
args_and_sig_prefix = ""
- if version >= 3 and args_and_sig.startswith("("):
+ if ginfo.get_version() >= 3 and args_and_sig.startswith("("):
# Ensure the "(" separating function name and arguments is in the
# label line. This is required in case of function names that are
# prefixes of each other. Otherwise, the label line for "foo" might
@@ -1933,7 +1934,7 @@ def add_checks(
continue
# For ASM output, just emit the check lines.
- if is_backend:
+ if ginfo.is_asm():
body_start = 1
if is_filtered:
# For filtered output we don't add "-NEXT" so don't add extra spaces
@@ -1943,8 +1944,8 @@ def add_checks(
output_lines.append(
"%s %s: %s" % (comment_marker, checkprefix, func_body[0])
)
- func_lines = generalize_asm_check_lines(
- func_body[body_start:], vars_seen, global_vars_seen
+ func_lines = generalize_check_lines(
+ func_body[body_start:], ginfo, vars_seen, global_vars_seen
)
for func_line in func_lines:
if func_line.strip() == "":
@@ -1963,9 +1964,9 @@ def add_checks(
global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
break
# For analyze output, generalize the output, and emit CHECK-EMPTY lines as well.
- elif is_analyze:
- func_body = generalize_analyze_check_lines(
- func_body, vars_seen, global_vars_seen
+ elif ginfo.is_analyze():
+ func_body = generalize_check_lines(
+ func_body, ginfo, vars_seen, global_vars_seen
)
for func_line in func_body:
if func_line.strip() == "":
@@ -1994,7 +1995,7 @@ def add_checks(
else:
func_body = generalize_check_lines(
func_body,
- False,
+ ginfo,
vars_seen,
global_vars_seen,
preserve_names,
@@ -2057,13 +2058,14 @@ def add_ir_checks(
func_name,
preserve_names,
function_sig,
- version,
+ ginfo: GeneralizerInfo,
global_vars_seen_dict,
is_filtered,
original_check_lines={},
):
+ assert ginfo.is_ir()
# Label format is based on IR string.
- if function_sig and version > 1:
+ if function_sig and ginfo.get_version() > 1:
function_def_regex = "define %s"
elif function_sig:
function_def_regex = "define {{[^@]+}}%s"
@@ -2079,9 +2081,7 @@ def add_ir_checks(
func_dict,
func_name,
check_label_format,
- False,
- False,
- version,
+ ginfo,
global_vars_seen_dict,
is_filtered,
preserve_names,
@@ -2090,8 +2090,15 @@ def add_ir_checks(
def add_analyze_checks(
- output_lines, comment_marker, prefix_list, func_dict, func_name, is_filtered
+ output_lines,
+ comment_marker,
+ prefix_list,
+ func_dict,
+ func_name,
+ ginfo: GeneralizerInfo,
+ is_filtered,
):
+ assert ginfo.is_analyze()
check_label_format = "{} %s-LABEL: '%s%s%s%s'".format(comment_marker)
global_vars_seen_dict = {}
return add_checks(
@@ -2101,16 +2108,14 @@ def add_analyze_checks(
func_dict,
func_name,
check_label_format,
- False,
- True,
- 1,
+ ginfo,
global_vars_seen_dict,
is_filtered,
)
-def build_global_values_dictionary(glob_val_dict, raw_tool_output, prefixes):
- for nameless_value in itertools.chain(global_nameless_values, asm_nameless_values):
+def build_global_values_dictionary(glob_val_dict, raw_tool_output, prefixes, ginfo):
+ for nameless_value in ginfo.get_nameless_values():
if nameless_value.global_ir_rhs_regexp is None:
continue
@@ -2225,6 +2230,7 @@ def add_global_checks(
comment_marker,
prefix_list,
output_lines,
+ ginfo: GeneralizerInfo,
global_vars_seen_dict,
preserve_names,
is_before_functions,
@@ -2232,7 +2238,9 @@ def add_global_checks(
):
printed_prefixes = set()
output_lines_loc = {} # Allows GLOB and GLOBNAMED to be sorted correctly
- for nameless_value in global_nameless_values:
+ for nameless_value in ginfo.get_nameless_values():
+ if nameless_value.global_ir_rhs_regexp is None:
+ continue
if nameless_value.is_before_functions != is_before_functions:
continue
for p in prefix_list:
@@ -2274,8 +2282,13 @@ def add_global_checks(
break
if not matched:
continue
- new_line = generalize_global_check_line(
- line, preserve_names, global_vars_seen
+ [new_line] = generalize_check_lines(
+ [line],
+ ginfo,
+ {},
+ global_vars_seen,
+ preserve_names,
+ unstable_globals_only=True,
)
new_line = filter_unstable_metadata(new_line)
check_line = "%s %s: %s" % (comment_marker, checkprefix, new_line)
diff --git a/llvm/utils/UpdateTestChecks/isel.py b/llvm/utils/UpdateTestChecks/isel.py
index bdb68e5815a3..855bc50b09f4 100644
--- a/llvm/utils/UpdateTestChecks/isel.py
+++ b/llvm/utils/UpdateTestChecks/isel.py
@@ -60,6 +60,7 @@ def add_checks(
prefix_list,
func_dict,
func_name,
+ ginfo: common.GeneralizerInfo,
global_vars_seen_dict,
is_filtered,
):
@@ -72,9 +73,7 @@ def add_checks(
func_dict,
func_name,
check_label_format,
- True,
- False,
- 1,
+ ginfo,
global_vars_seen_dict,
is_filtered=is_filtered,
)
diff --git a/llvm/utils/gn/README.rst b/llvm/utils/gn/README.rst
index 9ca545061099..52d03be533e5 100644
--- a/llvm/utils/gn/README.rst
+++ b/llvm/utils/gn/README.rst
@@ -131,7 +131,7 @@ configure is used for three classes of feature checks:
For the last two points, it would be nice if LLVM didn't have a single
``config.h`` header, but one header per toggle. That way, when e.g.
-``llvm_enable_terminfo`` is toggled, only the 3 files caring about that setting
+``llvm_enable_zlib`` is toggled, only the 3 files caring about that setting
would need to be rebuilt, instead of everything including ``config.h``.
GN doesn't believe in users setting arbitrary cflags from an environment
diff --git a/llvm/utils/gn/build/libs/terminfo/BUILD.gn b/llvm/utils/gn/build/libs/terminfo/BUILD.gn
deleted file mode 100644
index 10003d61c4df..000000000000
--- a/llvm/utils/gn/build/libs/terminfo/BUILD.gn
+++ /dev/null
@@ -1,12 +0,0 @@
-import("//llvm/utils/gn/build/libs/terminfo/enable.gni")
-
-config("terminfo_config") {
- visibility = [ ":terminfo" ]
- libs = [ "ncurses" ]
-}
-
-group("terminfo") {
- if (llvm_enable_terminfo) {
- public_configs = [ ":terminfo_config" ]
- }
-}
diff --git a/llvm/utils/gn/build/libs/terminfo/enable.gni b/llvm/utils/gn/build/libs/terminfo/enable.gni
deleted file mode 100644
index 79ea2b601857..000000000000
--- a/llvm/utils/gn/build/libs/terminfo/enable.gni
+++ /dev/null
@@ -1,4 +0,0 @@
-declare_args() {
- # Whether to link against terminfo.
- llvm_enable_terminfo = false
-}
diff --git a/llvm/utils/gn/secondary/clang/lib/Analysis/FlowSensitive/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Analysis/FlowSensitive/BUILD.gn
index 22433459a787..393596186c0c 100644
--- a/llvm/utils/gn/secondary/clang/lib/Analysis/FlowSensitive/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/Analysis/FlowSensitive/BUILD.gn
@@ -26,6 +26,7 @@ static_library("FlowSensitive") {
"ASTOps.cpp",
"AdornedCFG.cpp",
"Arena.cpp",
+ "CNFFormula.cpp",
"DataflowAnalysisContext.cpp",
"DataflowEnvironment.cpp",
"DebugSupport.cpp",
diff --git a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn
index 0a7cc3854056..c312c86fa164 100644
--- a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn
@@ -140,12 +140,10 @@ copy("Headers") {
"avx512bwintrin.h",
"avx512cdintrin.h",
"avx512dqintrin.h",
- "avx512erintrin.h",
"avx512fintrin.h",
"avx512fp16intrin.h",
"avx512ifmaintrin.h",
"avx512ifmavlintrin.h",
- "avx512pfintrin.h",
"avx512vbmi2intrin.h",
"avx512vbmiintrin.h",
"avx512vbmivlintrin.h",
diff --git a/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn
index f6c9526278dd..9075ada55c0f 100644
--- a/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn
@@ -84,7 +84,7 @@ static_library("Sema") {
"SemaOpenMP.cpp",
"SemaOverload.cpp",
"SemaPseudoObject.cpp",
- "SemaRISCVVectorLookup.cpp",
+ "SemaRISCV.cpp",
"SemaSYCL.cpp",
"SemaStmt.cpp",
"SemaStmtAsm.cpp",
@@ -95,6 +95,7 @@ static_library("Sema") {
"SemaTemplateInstantiateDecl.cpp",
"SemaTemplateVariadic.cpp",
"SemaType.cpp",
+ "SemaX86.cpp",
"TypeLocBuilder.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/clang/lib/StaticAnalyzer/Checkers/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/StaticAnalyzer/Checkers/BUILD.gn
index da48149c4d90..3ae50b214eb1 100644
--- a/llvm/utils/gn/secondary/clang/lib/StaticAnalyzer/Checkers/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/StaticAnalyzer/Checkers/BUILD.gn
@@ -104,6 +104,7 @@ static_library("Checkers") {
"PointerSortingChecker.cpp",
"PointerSubChecker.cpp",
"PthreadLockChecker.cpp",
+ "PutenvStackArrayChecker.cpp",
"RetainCountChecker/RetainCountChecker.cpp",
"RetainCountChecker/RetainCountDiagnostics.cpp",
"ReturnPointerRangeChecker.cpp",
@@ -111,6 +112,7 @@ static_library("Checkers") {
"ReturnValueChecker.cpp",
"RunLoopAutoreleaseLeakChecker.cpp",
"STLAlgorithmModeling.cpp",
+ "SetgidSetuidOrderChecker.cpp",
"SimpleStreamChecker.cpp",
"SmartPtrChecker.cpp",
"SmartPtrModeling.cpp",
@@ -147,6 +149,5 @@ static_library("Checkers") {
"WebKit/UncountedLambdaCapturesChecker.cpp",
"WebKit/UncountedLocalVarsChecker.cpp",
"cert/InvalidPtrChecker.cpp",
- "cert/PutenvWithAutoChecker.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
index 210b26e8f166..b642b2c82e6d 100644
--- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
+++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
@@ -142,23 +142,8 @@ if (current_toolchain == default_toolchain) {
"__algorithm/partition_point.h",
"__algorithm/pop_heap.h",
"__algorithm/prev_permutation.h",
- "__algorithm/pstl_any_all_none_of.h",
- "__algorithm/pstl_copy.h",
- "__algorithm/pstl_count.h",
- "__algorithm/pstl_equal.h",
- "__algorithm/pstl_fill.h",
- "__algorithm/pstl_find.h",
- "__algorithm/pstl_for_each.h",
+ "__algorithm/pstl.h",
"__algorithm/pstl_frontend_dispatch.h",
- "__algorithm/pstl_generate.h",
- "__algorithm/pstl_is_partitioned.h",
- "__algorithm/pstl_merge.h",
- "__algorithm/pstl_move.h",
- "__algorithm/pstl_replace.h",
- "__algorithm/pstl_rotate_copy.h",
- "__algorithm/pstl_sort.h",
- "__algorithm/pstl_stable_sort.h",
- "__algorithm/pstl_transform.h",
"__algorithm/push_heap.h",
"__algorithm/ranges_adjacent_find.h",
"__algorithm/ranges_all_of.h",
@@ -294,6 +279,7 @@ if (current_toolchain == default_toolchain) {
"__atomic/atomic_flag.h",
"__atomic/atomic_init.h",
"__atomic/atomic_lock_free.h",
+ "__atomic/atomic_ref.h",
"__atomic/atomic_sync.h",
"__atomic/check_memory_order.h",
"__atomic/contention_t.h",
@@ -302,6 +288,7 @@ if (current_toolchain == default_toolchain) {
"__atomic/is_always_lock_free.h",
"__atomic/kill_dependency.h",
"__atomic/memory_order.h",
+ "__atomic/to_gcc_order.h",
"__availability",
"__bit/bit_cast.h",
"__bit/bit_ceil.h",
@@ -645,8 +632,7 @@ if (current_toolchain == default_toolchain) {
"__numeric/iota.h",
"__numeric/midpoint.h",
"__numeric/partial_sum.h",
- "__numeric/pstl_reduce.h",
- "__numeric/pstl_transform_reduce.h",
+ "__numeric/pstl.h",
"__numeric/reduce.h",
"__numeric/saturation_arithmetic.h",
"__numeric/transform_exclusive_scan.h",
diff --git a/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn b/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn
index 80a91507fcc6..e93130eacdc7 100644
--- a/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/include/llvm/Config/BUILD.gn
@@ -10,7 +10,6 @@ import("//llvm/utils/gn/build/buildflags.gni")
import("//llvm/utils/gn/build/libs/curl/enable.gni")
import("//llvm/utils/gn/build/libs/edit/enable.gni")
import("//llvm/utils/gn/build/libs/pthread/enable.gni")
-import("//llvm/utils/gn/build/libs/terminfo/enable.gni")
import("//llvm/utils/gn/build/libs/xar/enable.gni")
import("//llvm/utils/gn/build/libs/xml/enable.gni")
import("//llvm/utils/gn/build/libs/zlib/enable.gni")
@@ -294,12 +293,6 @@ write_cmake_config("config") {
values += [ "HAVE_LIBEDIT=" ]
}
- if (llvm_enable_terminfo) {
- values += [ "LLVM_ENABLE_TERMINFO=1" ]
- } else {
- values += [ "LLVM_ENABLE_TERMINFO=" ]
- }
-
if (llvm_enable_libxml2) {
values += [ "LLVM_ENABLE_LIBXML2=1" ]
} else {
diff --git a/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn
index 941d448b3367..7728455499bf 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn
@@ -6,7 +6,6 @@ static_library("Support") {
"//llvm/include/llvm/Support:write_vcsrevision",
"//llvm/lib/Demangle",
"//llvm/utils/gn/build/libs/pthread",
- "//llvm/utils/gn/build/libs/terminfo",
"//llvm/utils/gn/build/libs/zlib",
]
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn
index dad4f028236d..ab97507311a4 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/BUILD.gn
@@ -138,6 +138,7 @@ static_library("LLVMAMDGPUCodeGen") {
"AMDGPUAtomicOptimizer.cpp",
"AMDGPUAttributor.cpp",
"AMDGPUCallLowering.cpp",
+ "AMDGPUCodeGenPassBuilder.cpp",
"AMDGPUCodeGenPrepare.cpp",
"AMDGPUCombinerHelper.cpp",
"AMDGPUCtorDtorLowering.cpp",
@@ -185,6 +186,7 @@ static_library("LLVMAMDGPUCodeGen") {
"AMDGPURewriteOutArguments.cpp",
"AMDGPURewriteUndefForPHI.cpp",
"AMDGPUSetWavePriority.cpp",
+ "AMDGPUSplitModule.cpp",
"AMDGPUSubtarget.cpp",
"AMDGPUTargetMachine.cpp",
"AMDGPUTargetObjectFile.cpp",
@@ -206,6 +208,7 @@ static_library("LLVMAMDGPUCodeGen") {
"GCNVOPDUtils.cpp",
"R600AsmPrinter.cpp",
"R600ClauseMergePass.cpp",
+ "R600CodeGenPassBuilder.cpp",
"R600ControlFlowFinalizer.cpp",
"R600EmitClauseMarkers.cpp",
"R600ExpandSpecialInstrs.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/tools/llvm-config/BUILD.gn b/llvm/utils/gn/secondary/llvm/tools/llvm-config/BUILD.gn
index bf50cd0fce46..711e4e3b4315 100644
--- a/llvm/utils/gn/secondary/llvm/tools/llvm-config/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/tools/llvm-config/BUILD.gn
@@ -1,7 +1,6 @@
import("//llvm/lib/Target/targets_string.gni")
import("//llvm/utils/gn/build/buildflags.gni")
import("//llvm/utils/gn/build/libs/pthread/enable.gni")
-import("//llvm/utils/gn/build/libs/terminfo/enable.gni")
import("//llvm/utils/gn/build/libs/xml/enable.gni")
import("//llvm/utils/gn/build/libs/zlib/enable.gni")
import("//llvm/utils/gn/build/write_cmake_config.gni")
@@ -36,7 +35,7 @@ write_cmake_config("BuildVariables.inc") {
lib = ""
}
- # Windows doesn't use any of libxml2, terminfo, zlib by default.
+ # Windows doesn't use any of libxml2, zlib by default.
# Make GN not warn about these variables being unused.
not_needed([
"l",
@@ -63,9 +62,6 @@ write_cmake_config("BuildVariables.inc") {
if (llvm_enable_libxml2) {
system_libs += " ${l}xml2${lib}"
}
- if (llvm_enable_terminfo) {
- system_libs += " ${l}ncurses${lib}"
- }
if (llvm_enable_zlib) {
system_libs += " ${l}z${lib}"
}
diff --git a/llvm/utils/lit/CMakeLists.txt b/llvm/utils/lit/CMakeLists.txt
index 3dc2dc0d8efc..d22a778e2e53 100644
--- a/llvm/utils/lit/CMakeLists.txt
+++ b/llvm/utils/lit/CMakeLists.txt
@@ -26,5 +26,5 @@ add_lit_testsuite(check-lit "Running lit's tests"
)
# For IDEs
-set_target_properties(check-lit PROPERTIES FOLDER "Tests")
-set_target_properties(prepare-check-lit PROPERTIES FOLDER "Tests")
+set_target_properties(check-lit PROPERTIES FOLDER "LLVM/Tests")
+set_target_properties(prepare-check-lit PROPERTIES FOLDER "LLVM/Tests")
diff --git a/llvm/utils/llvm-locstats/CMakeLists.txt b/llvm/utils/llvm-locstats/CMakeLists.txt
index 1dbb9da92e23..c0e0d43a7bd7 100644
--- a/llvm/utils/llvm-locstats/CMakeLists.txt
+++ b/llvm/utils/llvm-locstats/CMakeLists.txt
@@ -12,5 +12,5 @@ if (LLVM_INCLUDE_UTILS AND LLVM_INCLUDE_TOOLS)
if (NOT LLVM_BUILD_TOOLS)
set_target_properties(llvm-locstats PROPERTIES EXCLUDE_FROM_ALL ON)
endif()
- set_target_properties(llvm-locstats PROPERTIES FOLDER "Tools")
+ set_target_properties(llvm-locstats PROPERTIES FOLDER "LLVM/Tools")
endif()
diff --git a/llvm/utils/mlgo-utils/CMakeLists.txt b/llvm/utils/mlgo-utils/CMakeLists.txt
index 3129331d58c7..2f3920644b73 100644
--- a/llvm/utils/mlgo-utils/CMakeLists.txt
+++ b/llvm/utils/mlgo-utils/CMakeLists.txt
@@ -7,5 +7,3 @@ add_lit_testsuite(check-mlgo-utils "Running mlgo-utils tests"
${CMAKE_CURRENT_BINARY_DIR}
DEPENDS "FileCheck" "not" "count" "split-file" "yaml2obj" "llvm-objcopy"
)
-
-set_target_properties(check-mlgo-utils PROPERTIES FOLDER "Tests")
diff --git a/llvm/utils/revert_checker.py b/llvm/utils/revert_checker.py
index 34395a6fe505..da80bdff8685 100755
--- a/llvm/utils/revert_checker.py
+++ b/llvm/utils/revert_checker.py
@@ -283,17 +283,12 @@ def _main() -> None:
seen_reverts.add(revert)
all_reverts.append(revert)
+ sha_prefix = (
+ "https://github.com/llvm/llvm-project/commit/" if opts.review_url else ""
+ )
for revert in all_reverts:
- sha_fmt = (
- f"https://reviews.llvm.org/rG{revert.sha}"
- if opts.review_url
- else revert.sha
- )
- reverted_sha_fmt = (
- f"https://reviews.llvm.org/rG{revert.reverted_sha}"
- if opts.review_url
- else revert.reverted_sha
- )
+ sha_fmt = f"{sha_prefix}{revert.sha}"
+ reverted_sha_fmt = f"{sha_prefix}{revert.reverted_sha}"
print(f"{sha_fmt} claims to revert {reverted_sha_fmt}")
diff --git a/llvm/utils/update_analyze_test_checks.py b/llvm/utils/update_analyze_test_checks.py
index 03053e5447d1..47506626a0a5 100755
--- a/llvm/utils/update_analyze_test_checks.py
+++ b/llvm/utils/update_analyze_test_checks.py
@@ -96,6 +96,7 @@ def main():
# now, we just ignore all but the last.
prefix_list.append((check_prefixes, tool_cmd_args))
+ ginfo = common.make_analyze_generalizer(version=1)
builder = common.FunctionTestBuilder(
run_list=prefix_list,
flags=type(
@@ -111,6 +112,7 @@ def main():
),
scrubber_args=[],
path=ti.path,
+ ginfo=ginfo,
)
for prefixes, opt_args in prefix_list:
@@ -131,7 +133,6 @@ def main():
common.scrub_body,
raw_tool_output,
prefixes,
- False,
)
elif re.search(r"LV: Checking a loop in ", raw_tool_outputs) is not None:
# Split analysis outputs by "Printing analysis " declarations.
@@ -143,7 +144,6 @@ def main():
common.scrub_body,
raw_tool_output,
prefixes,
- False,
)
else:
common.warn("Don't know how to deal with this output")
@@ -179,6 +179,7 @@ def main():
prefix_list,
func_dict,
func_name,
+ ginfo,
is_filtered=builder.is_filtered(),
)
)
diff --git a/llvm/utils/update_cc_test_checks.py b/llvm/utils/update_cc_test_checks.py
index 28c6bb0409f3..3ffb07ddf6ad 100755
--- a/llvm/utils/update_cc_test_checks.py
+++ b/llvm/utils/update_cc_test_checks.py
@@ -270,7 +270,7 @@ def get_function_body(builder, args, filename, clang_args, extra_commands, prefi
raw_tool_output = common.invoke_tool(extra_args[0], extra_args[1:], f.name)
if "-emit-llvm" in clang_args:
builder.process_run_line(
- common.OPT_FUNCTION_RE, common.scrub_body, raw_tool_output, prefixes, False
+ common.OPT_FUNCTION_RE, common.scrub_body, raw_tool_output, prefixes
)
builder.processed_prefixes(prefixes)
else:
@@ -360,8 +360,13 @@ def main():
# Store only filechecked runlines.
filecheck_run_list = [i for i in run_list if i[0]]
+ ginfo = common.make_ir_generalizer(version=ti.args.version)
builder = common.FunctionTestBuilder(
- run_list=filecheck_run_list, flags=ti.args, scrubber_args=[], path=ti.path
+ run_list=filecheck_run_list,
+ flags=ti.args,
+ scrubber_args=[],
+ path=ti.path,
+ ginfo=ginfo,
)
for prefixes, args, extra_commands, triple_in_cmd in run_list:
@@ -415,29 +420,18 @@ def main():
# Now generate all the checks.
def check_generator(my_output_lines, prefixes, func):
- if "-emit-llvm" in clang_args:
- return common.add_ir_checks(
- my_output_lines,
- "//",
- prefixes,
- func_dict,
- func,
- False,
- ti.args.function_signature,
- ti.args.version,
- global_vars_seen_dict,
- is_filtered=builder.is_filtered(),
- )
- else:
- return asm.add_checks(
- my_output_lines,
- "//",
- prefixes,
- func_dict,
- func,
- global_vars_seen_dict,
- is_filtered=builder.is_filtered(),
- )
+ return common.add_ir_checks(
+ my_output_lines,
+ "//",
+ prefixes,
+ func_dict,
+ func,
+ False,
+ ti.args.function_signature,
+ ginfo,
+ global_vars_seen_dict,
+ is_filtered=builder.is_filtered(),
+ )
if ti.args.check_globals != 'none':
generated_prefixes.extend(
@@ -446,6 +440,7 @@ def main():
"//",
run_list,
output_lines,
+ ginfo,
global_vars_seen_dict,
False,
True,
@@ -506,6 +501,7 @@ def main():
"//",
run_list,
output_lines,
+ ginfo,
global_vars_seen_dict,
False,
True,
@@ -525,7 +521,7 @@ def main():
mangled,
False,
args.function_signature,
- args.version,
+ ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
)
@@ -543,6 +539,7 @@ def main():
"//",
run_list,
output_lines,
+ ginfo,
global_vars_seen_dict,
False,
False,
diff --git a/llvm/utils/update_llc_test_checks.py b/llvm/utils/update_llc_test_checks.py
index 1ed0132781e2..3e9380d95e3f 100755
--- a/llvm/utils/update_llc_test_checks.py
+++ b/llvm/utils/update_llc_test_checks.py
@@ -16,7 +16,9 @@ from UpdateTestChecks import common
# llc is the only llc-like in the LLVM tree but downstream forks can add
# additional ones here if they have them.
-LLC_LIKE_TOOLS = ("llc",)
+LLC_LIKE_TOOLS = [
+ "llc",
+]
def main():
@@ -54,6 +56,16 @@ def main():
default=False,
help="Reduce scrubbing shuffles with memory operands",
)
+ parser.add_argument(
+ "--tool",
+ default=None,
+ help="Treat the given tool name as an llc-like tool for which check lines should be generated",
+ )
+ parser.add_argument(
+ "--default-march",
+ default=None,
+ help="Set a default -march for when neither triple nor arch are found in a RUN line",
+ )
parser.add_argument("tests", nargs="+")
initial_args = common.parse_commandline_args(parser)
@@ -89,7 +101,7 @@ def main():
if m:
triple_in_cmd = m.groups()[0]
- march_in_cmd = None
+ march_in_cmd = ti.args.default_march
m = common.MARCH_ARG_RE.search(llc_cmd)
if m:
march_in_cmd = m.groups()[0]
@@ -101,7 +113,11 @@ def main():
from UpdateTestChecks import asm as output_type
common.verify_filecheck_prefixes(filecheck_cmd)
- if llc_tool not in LLC_LIKE_TOOLS:
+
+ llc_like_tools = LLC_LIKE_TOOLS[:]
+ if ti.args.tool:
+ llc_like_tools.append(ti.args.tool)
+ if llc_tool not in llc_like_tools:
common.warn("Skipping non-llc RUN line: " + l)
continue
@@ -133,6 +149,7 @@ def main():
else:
check_indent = ""
+ ginfo = common.make_asm_generalizer(version=1)
builder = common.FunctionTestBuilder(
run_list=run_list,
flags=type(
@@ -148,6 +165,7 @@ def main():
),
scrubber_args=[ti.args],
path=ti.path,
+ ginfo=ginfo,
)
for (
@@ -173,9 +191,7 @@ def main():
triple = common.get_triple_from_march(march_in_cmd)
scrubber, function_re = output_type.get_run_handler(triple)
- builder.process_run_line(
- function_re, scrubber, raw_tool_output, prefixes, True
- )
+ builder.process_run_line(function_re, scrubber, raw_tool_output, prefixes)
builder.processed_prefixes(prefixes)
func_dict = builder.finish_and_get_func_dict()
@@ -218,6 +234,7 @@ def main():
prefixes,
func_dict,
func,
+ ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
),
@@ -243,6 +260,7 @@ def main():
run_list,
func_dict,
func_name,
+ ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
)
diff --git a/llvm/utils/update_test_checks.py b/llvm/utils/update_test_checks.py
index 04808ce6bb1c..16f3e618770b 100755
--- a/llvm/utils/update_test_checks.py
+++ b/llvm/utils/update_test_checks.py
@@ -147,9 +147,14 @@ def main():
# now, we just ignore all but the last.
prefix_list.append((check_prefixes, tool_cmd_args, preprocess_cmd))
+ ginfo = common.make_ir_generalizer(ti.args.version)
global_vars_seen_dict = {}
builder = common.FunctionTestBuilder(
- run_list=prefix_list, flags=ti.args, scrubber_args=[], path=ti.path
+ run_list=prefix_list,
+ flags=ti.args,
+ scrubber_args=[],
+ path=ti.path,
+ ginfo=ginfo,
)
tool_binary = ti.args.tool_binary
@@ -172,7 +177,6 @@ def main():
common.scrub_body,
raw_tool_output,
prefixes,
- False,
)
builder.processed_prefixes(prefixes)
@@ -217,6 +221,7 @@ def main():
";",
prefix_list,
output_lines,
+ ginfo,
global_vars_seen_dict,
args.preserve_names,
True,
@@ -239,7 +244,7 @@ def main():
func,
False,
args.function_signature,
- args.version,
+ ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
original_check_lines=original_check_lines.get(func, {}),
@@ -271,7 +276,7 @@ def main():
func_name,
args.preserve_names,
args.function_signature,
- args.version,
+ ginfo,
global_vars_seen_dict,
is_filtered=builder.is_filtered(),
original_check_lines=original_check_lines.get(
@@ -290,6 +295,7 @@ def main():
";",
prefix_list,
output_lines,
+ ginfo,
global_vars_seen_dict,
args.preserve_names,
True,
@@ -337,6 +343,7 @@ def main():
";",
prefix_list,
output_lines,
+ ginfo,
global_vars_seen_dict,
args.preserve_names,
False,
diff --git a/mlir/CMakeLists.txt b/mlir/CMakeLists.txt
index 4c0ef8387b8d..c6d44908a111 100644
--- a/mlir/CMakeLists.txt
+++ b/mlir/CMakeLists.txt
@@ -1,5 +1,6 @@
# MLIR project.
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "MLIR")
if(NOT DEFINED LLVM_COMMON_CMAKE_UTILS)
set(LLVM_COMMON_CMAKE_UTILS ${CMAKE_CURRENT_SOURCE_DIR}/../cmake)
@@ -96,12 +97,13 @@ endif()
# tablegen'd targets.
# mlir-generic-headers are dialect-independent.
add_custom_target(mlir-generic-headers)
-set_target_properties(mlir-generic-headers PROPERTIES FOLDER "Misc")
+set_target_properties(mlir-generic-headers PROPERTIES FOLDER "MLIR/Resources")
# mlir-headers may be dialect-dependent.
add_custom_target(mlir-headers)
-set_target_properties(mlir-headers PROPERTIES FOLDER "Misc")
+set_target_properties(mlir-headers PROPERTIES FOLDER "MLIR/Resources")
add_dependencies(mlir-headers mlir-generic-headers)
add_custom_target(mlir-doc)
+set_target_properties(mlir-doc PROPERTIES FOLDER "MLIR/Docs")
# Only enable execution engine if the native target is available.
if(${LLVM_NATIVE_ARCH} IN_LIST LLVM_TARGETS_TO_BUILD)
@@ -110,14 +112,6 @@ else()
set(MLIR_ENABLE_EXECUTION_ENGINE 0)
endif()
-# Build the CUDA conversions and run according tests if the NVPTX backend
-# is available
-if ("NVPTX" IN_LIST LLVM_TARGETS_TO_BUILD)
- set(MLIR_ENABLE_CUDA_CONVERSIONS 1)
-else()
- set(MLIR_ENABLE_CUDA_CONVERSIONS 0)
-endif()
-
# Build the ROCm conversions and run according tests if the AMDGPU backend
# is available.
if ("AMDGPU" IN_LIST LLVM_TARGETS_TO_BUILD)
@@ -201,6 +195,7 @@ add_subdirectory(lib/CAPI)
if (MLIR_INCLUDE_TESTS)
add_definitions(-DMLIR_INCLUDE_TESTS)
add_custom_target(MLIRUnitTests)
+ set_target_properties(MLIRUnitTests PROPERTIES FOLDER "MLIR/Tests")
if (EXISTS ${LLVM_THIRD_PARTY_DIR}/unittest/googletest/include/gtest/gtest.h)
add_subdirectory(unittests)
else()
@@ -261,7 +256,7 @@ endif()
# Custom target to install all mlir libraries
add_custom_target(mlir-libraries)
-set_target_properties(mlir-libraries PROPERTIES FOLDER "Misc")
+set_target_properties(mlir-libraries PROPERTIES FOLDER "MLIR/Metatargets")
if (NOT LLVM_ENABLE_IDE)
add_llvm_install_targets(install-mlir-libraries
diff --git a/mlir/cmake/modules/AddMLIR.cmake b/mlir/cmake/modules/AddMLIR.cmake
index afb74fb2d000..a68527720959 100644
--- a/mlir/cmake/modules/AddMLIR.cmake
+++ b/mlir/cmake/modules/AddMLIR.cmake
@@ -210,6 +210,7 @@ function(add_mlir_doc doc_filename output_file output_directory command)
${GEN_DOC_FILE}
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${output_file}.md)
add_custom_target(${output_file}DocGen DEPENDS ${GEN_DOC_FILE})
+ set_target_properties(${output_file}DocGen PROPERTIES FOLDER "MLIR/Tablegenning/Docs")
add_dependencies(mlir-doc ${output_file}DocGen)
endfunction()
@@ -290,7 +291,7 @@ function(add_mlir_example_library name)
list(APPEND ARG_DEPENDS mlir-generic-headers)
llvm_add_library(${name} ${LIBTYPE} ${ARG_UNPARSED_ARGUMENTS} ${srcs} DEPENDS ${ARG_DEPENDS} LINK_COMPONENTS ${ARG_LINK_COMPONENTS} LINK_LIBS ${ARG_LINK_LIBS})
- set_target_properties(${name} PROPERTIES FOLDER "Examples")
+ set_target_properties(${name} PROPERTIES FOLDER "MLIR/Examples")
if (LLVM_BUILD_EXAMPLES AND NOT ${ARG_DISABLE_INSTALL})
add_mlir_library_install(${name})
else()
@@ -367,7 +368,7 @@ function(add_mlir_library name)
# Add empty "phony" target
add_custom_target(${name})
endif()
- set_target_properties(${name} PROPERTIES FOLDER "MLIR libraries")
+ set_target_properties(${name} PROPERTIES FOLDER "MLIR/Libraries")
# Setup aggregate.
if(ARG_ENABLE_AGGREGATION)
diff --git a/mlir/docs/CMakeLists.txt b/mlir/docs/CMakeLists.txt
index 36cd3f998388..43192569847c 100644
--- a/mlir/docs/CMakeLists.txt
+++ b/mlir/docs/CMakeLists.txt
@@ -78,6 +78,7 @@ if (LLVM_ENABLE_DOXYGEN)
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating mlir doxygen documentation." VERBATIM)
+ set_target_properties(doxygen-mlir PROPERTIES FOLDER "MLIR/Docs")
if (LLVM_BUILD_DOCS)
add_dependencies(doxygen doxygen-mlir)
diff --git a/mlir/docs/PassManagement.md b/mlir/docs/PassManagement.md
index e9ecb99462b8..7b19a7bf6bf4 100644
--- a/mlir/docs/PassManagement.md
+++ b/mlir/docs/PassManagement.md
@@ -1359,6 +1359,45 @@ func.func @simple_constant() -> (i32, i32) {
}
```
+* `mlir-print-ir-tree-dir=(directory path)`
+ * Without setting this option, the IR printed by the instrumentation will
+ be printed to `stderr`. If you provide a directory using this option,
+ the output corresponding to each pass will be printed to a file in the
+ directory tree rooted at `(directory path)`. The path created for each
+ pass reflects the nesting structure of the IR and the pass pipeline.
+ * The below example illustrates the file tree created by running a pass
+ pipeline on IR that has two `func.func` located within two nested
+ `builtin.module` ops.
+ * The subdirectories are given names that reflect the parent op names and
+ the symbol names for those ops (if present).
+ * The printer keeps a counter associated with ops that are targeted by
+ passes and their isolated-from-above parents. Each filename is given a
+ numeric prefix using the counter value for the op that the pass is
+ targeting. The counter values for each parent are then prepended. This
+ gives a naming where it is easy to distinguish which passes may have run
+ concurrently versus which have a clear ordering. In the below example,for
+ both `1_1_pass4.mlir` files, the first 1 refers to the counter for the
+ parent op, and the second refers to the counter for the respective
+ function.
+
+```
+$ pipeline="builtin.module(pass1,pass2,func.func(pass3,pass4),pass5)"
+$ mlir-opt foo.mlir -pass-pipeline="$pipeline" -mlir-print-ir-tree-dir=/tmp/pipeline_output
+$ tree /tmp/pipeline_output
+
+/tmp/pass_output
+├── builtin_module_the_symbol_name
+│ ├── 0_pass1.mlir
+│ ├── 1_pass2.mlir
+│ ├── 2_pass5.mlir
+│ ├── func_func_my_func_name
+│ │ ├── 1_0_pass3.mlir
+│ │ ├── 1_1_pass4.mlir
+│ ├── func_func_my_other_func_name
+│ │ ├── 1_0_pass3.mlir
+│ │ ├── 1_1_pass4.mlir
+```
+
## Crash and Failure Reproduction
The [pass manager](#pass-manager) in MLIR contains a builtin mechanism to
diff --git a/mlir/examples/toy/CMakeLists.txt b/mlir/examples/toy/CMakeLists.txt
index 56002b1ad2e2..403236a55202 100644
--- a/mlir/examples/toy/CMakeLists.txt
+++ b/mlir/examples/toy/CMakeLists.txt
@@ -1,5 +1,5 @@
add_custom_target(Toy)
-set_target_properties(Toy PROPERTIES FOLDER Examples)
+set_target_properties(Toy PROPERTIES FOLDER "MLIR/Examples")
macro(add_toy_chapter name)
add_dependencies(Toy ${name})
diff --git a/mlir/examples/transform/CMakeLists.txt b/mlir/examples/transform/CMakeLists.txt
index b688aa7461d6..2b4208e4c0f7 100644
--- a/mlir/examples/transform/CMakeLists.txt
+++ b/mlir/examples/transform/CMakeLists.txt
@@ -1,4 +1,5 @@
add_custom_target(TransformExample)
+set_target_properties(TransformExample PROPERTIES FOLDER "MLIR/Examples")
add_subdirectory(Ch2)
add_subdirectory(Ch3)
diff --git a/mlir/include/mlir-c/Debug.h b/mlir/include/mlir-c/Debug.h
index 2502f2fa23bf..7dad73500858 100644
--- a/mlir/include/mlir-c/Debug.h
+++ b/mlir/include/mlir-c/Debug.h
@@ -21,6 +21,19 @@ MLIR_CAPI_EXPORTED void mlirEnableGlobalDebug(bool enable);
/// Retuns `true` if the global debugging flag is set, false otherwise.
MLIR_CAPI_EXPORTED bool mlirIsGlobalDebugEnabled();
+/// Sets the current debug type, similarly to `-debug-only=type` in the
+/// command-line tools. Note that global debug should be enabled for any output
+/// to be produced.
+MLIR_CAPI_EXPORTED void mlirSetGlobalDebugType(const char *type);
+
+/// Sets multiple current debug types, similarly to `-debug-only=type1,type2" in
+/// the command-line tools. Note that global debug should be enabled for any
+/// output to be produced.
+MLIR_CAPI_EXPORTED void mlirSetGlobalDebugTypes(const char **types, intptr_t n);
+
+/// Checks if `type` is set as the current debug type.
+MLIR_CAPI_EXPORTED bool mlirIsCurrentDebugType(const char *type);
+
#ifdef __cplusplus
}
#endif
diff --git a/mlir/include/mlir/Analysis/SliceAnalysis.h b/mlir/include/mlir/Analysis/SliceAnalysis.h
index d5cdf72c3889..99279fdfe427 100644
--- a/mlir/include/mlir/Analysis/SliceAnalysis.h
+++ b/mlir/include/mlir/Analysis/SliceAnalysis.h
@@ -223,11 +223,6 @@ SetVector<Operation *>
getSlice(Operation *op, const BackwardSliceOptions &backwardSliceOptions = {},
const ForwardSliceOptions &forwardSliceOptions = {});
-/// Multi-root DAG topological sort.
-/// Performs a topological sort of the Operation in the `toSort` SetVector.
-/// Returns a topologically sorted SetVector.
-SetVector<Operation *> topologicalSort(const SetVector<Operation *> &toSort);
-
/// Utility to match a generic reduction given a list of iteration-carried
/// arguments, `iterCarriedArgs` and the position of the potential reduction
/// argument within the list, `redPos`. If a reduction is matched, returns the
diff --git a/mlir/include/mlir/Transforms/TopologicalSortUtils.h b/mlir/include/mlir/Analysis/TopologicalSortUtils.h
index 74e44b1dc485..ee98cd8cb380 100644
--- a/mlir/include/mlir/Transforms/TopologicalSortUtils.h
+++ b/mlir/include/mlir/Analysis/TopologicalSortUtils.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef MLIR_TRANSFORMS_TOPOLOGICALSORTUTILS_H
-#define MLIR_TRANSFORMS_TOPOLOGICALSORTUTILS_H
+#ifndef MLIR_ANALYSIS_TOPOLOGICALSORTUTILS_H
+#define MLIR_ANALYSIS_TOPOLOGICALSORTUTILS_H
#include "mlir/IR/Block.h"
@@ -104,6 +104,14 @@ bool computeTopologicalSorting(
MutableArrayRef<Operation *> ops,
function_ref<bool(Value, Operation *)> isOperandReady = nullptr);
+/// Gets a list of blocks that is sorted according to dominance. This sort is
+/// stable.
+SetVector<Block *> getBlocksSortedByDominance(Region &region);
+
+/// Sorts all operations in `toSort` topologically while also considering region
+/// semantics. Does not support multi-sets.
+SetVector<Operation *> topologicalSort(const SetVector<Operation *> &toSort);
+
} // end namespace mlir
-#endif // MLIR_TRANSFORMS_TOPOLOGICALSORTUTILS_H
+#endif // MLIR_ANALYSIS_TOPOLOGICALSORTUTILS_H
diff --git a/mlir/include/mlir/Config/mlir-config.h.cmake b/mlir/include/mlir/Config/mlir-config.h.cmake
index 9339ce07bdfd..abd6f41b42ff 100644
--- a/mlir/include/mlir/Config/mlir-config.h.cmake
+++ b/mlir/include/mlir/Config/mlir-config.h.cmake
@@ -39,10 +39,6 @@
/* If set, enables PDL usage. */
#cmakedefine01 MLIR_ENABLE_PDL_IN_PATTERNMATCH
-/* If set, enables CUDA-related features in CUDA-related transforms, pipelines,
- and targets. */
-#cmakedefine01 MLIR_ENABLE_CUDA_CONVERSIONS
-
/* If set, enables features that depend on the NVIDIA's PTX compiler. */
#cmakedefine01 MLIR_ENABLE_NVPTXCOMPILER
diff --git a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td
index 4e4c6fd60177..ead52332e8ee 100644
--- a/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td
+++ b/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td
@@ -1540,6 +1540,18 @@ def Arith_CmpFOp : Arith_CompareOp<"cmpf",
// SelectOp
//===----------------------------------------------------------------------===//
+class AnyBooleanTypeMatch<list<string> names> :
+ AnyMatchOperatorTrait<names, "$_self.getType().isSignlessInteger(1)",
+ "scalar type">;
+
+class ScalarConditionOrMatchingShape<list<string> names> :
+ PredOpTrait<
+ !head(names) # " is scalar or has matching shape",
+ Or<[AnyBooleanTypeMatch<[!head(names)]>.predicate,
+ AllShapesMatch<names>.predicate]>> {
+ list<string> values = names;
+}
+
def SelectOp : Arith_Op<"select", [Pure,
AllTypesMatch<["true_value", "false_value", "result"]>,
ScalarConditionOrMatchingShape<["condition", "result"]>,
@@ -1548,16 +1560,16 @@ def SelectOp : Arith_Op<"select", [Pure,
let summary = "select operation";
let description = [{
The `arith.select` operation chooses one value based on a binary condition
- supplied as its first operand.
-
- If the value of the first operand (the condition) is `1`, then the second
- operand is returned, and the third operand is ignored, even if it was poison.
-
- If the value of the first operand (the condition) is `0`, then the third
- operand is returned, and the second operand is ignored, even if it was poison.
-
- If the value of the first operand (the condition) is poison, then the
- operation returns poison.
+ supplied as its first operand.
+
+ If the value of the first operand (the condition) is `1`, then the second
+ operand is returned, and the third operand is ignored, even if it was poison.
+
+ If the value of the first operand (the condition) is `0`, then the third
+ operand is returned, and the second operand is ignored, even if it was poison.
+
+ If the value of the first operand (the condition) is poison, then the
+ operation returns poison.
The operation applies to vectors and tensors elementwise given the _shape_
of all operands is identical. The choice is made for each element
diff --git a/mlir/include/mlir/Dialect/CommonFolders.h b/mlir/include/mlir/Dialect/CommonFolders.h
index 7dabc781cd59..6f497a259262 100644
--- a/mlir/include/mlir/Dialect/CommonFolders.h
+++ b/mlir/include/mlir/Dialect/CommonFolders.h
@@ -298,7 +298,10 @@ Attribute constFoldCastOp(ArrayRef<Attribute> operands, Type resType,
calculate(op.getSplatValue<ElementValueT>(), castStatus);
if (!castStatus)
return {};
- return DenseElementsAttr::get(cast<ShapedType>(resType), elementResult);
+ auto shapedResType = cast<ShapedType>(resType);
+ if (!shapedResType.hasStaticShape())
+ return {};
+ return DenseElementsAttr::get(shapedResType, elementResult);
}
if (auto op = dyn_cast<ElementsAttr>(operands[0])) {
// Operand is ElementsAttr-derived; perform an element-wise fold by
diff --git a/mlir/include/mlir/Dialect/IRDL/IR/IRDLOps.td b/mlir/include/mlir/Dialect/IRDL/IR/IRDLOps.td
index aa6a8e93c028..d2765dec420a 100644
--- a/mlir/include/mlir/Dialect/IRDL/IR/IRDLOps.td
+++ b/mlir/include/mlir/Dialect/IRDL/IR/IRDLOps.td
@@ -503,7 +503,8 @@ def IRDL_BaseOp : IRDL_ConstraintOp<"base",
}
def IRDL_ParametricOp : IRDL_ConstraintOp<"parametric",
- [ParentOneOf<["TypeOp", "AttributeOp", "OperationOp"]>, Pure]> {
+ [ParentOneOf<["TypeOp", "AttributeOp", "OperationOp"]>,
+ DeclareOpInterfaceMethods<SymbolUserOpInterface>, Pure]> {
let summary = "Constraints an attribute/type base and its parameters";
let description = [{
`irdl.parametric` defines a constraint that accepts only a single type
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index bd347d0cf630..57af89f5dbf8 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -635,7 +635,7 @@ def LLVM_VaEndOp : LLVM_ZeroResultIntrOp<"vaend", [0]>,
// Exception handling intrinsics.
//
-def LLVM_EhTypeidForOp : LLVM_OneResultIntrOp<"eh.typeid.for"> {
+def LLVM_EhTypeidForOp : LLVM_OneResultIntrOp<"eh.typeid.for", [], [0]> {
let arguments = (ins LLVM_AnyPointer:$type_info);
let assemblyFormat = "$type_info attr-dict `:` functional-type(operands, results)";
}
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 7ffbc2d7922f..4daeeab09386 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -429,7 +429,7 @@ def NVVM_BarrierArriveOp : NVVM_PTXBuilder_Op<"barrier.arrive">
let extraClassDefinition = [{
std::string $cppClass::getPtx() {
std::string ptx = "bar.arrive ";
- if (getBarrierId()) { ptx += "%0, %1"; }
+ if (getBarrierId()) { ptx += "%0, %1;"; }
else { ptx += "0, %0;"; }
return ptx;
}
diff --git a/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.h b/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.h
index 13e10b29c074..a7bf8796c027 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.h
@@ -13,7 +13,6 @@
#include "mlir/Dialect/LLVMIR/Transforms/LegalizeForExport.h"
#include "mlir/Dialect/LLVMIR/Transforms/OptimizeForNVVM.h"
#include "mlir/Dialect/LLVMIR/Transforms/RequestCWrappers.h"
-#include "mlir/Dialect/LLVMIR/Transforms/TypeConsistency.h"
#include "mlir/Pass/Pass.h"
namespace mlir {
diff --git a/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.td b/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.td
index 0242cfd9abb7..11d1b9411071 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.td
@@ -43,24 +43,6 @@ def LLVMRequestCWrappers
let constructor = "::mlir::LLVM::createRequestCWrappersPass()";
}
-def LLVMTypeConsistency
- : Pass<"llvm-type-consistency", "::mlir::LLVM::LLVMFuncOp"> {
- let summary = "Rewrites to improve type consistency";
- let description = [{
- Set of rewrites to improve the coherency of types within an LLVM dialect
- program. This will adjust operations operating on pointers so they interpret
- their associated pointee type as consistently as possible.
- }];
- let constructor = "::mlir::LLVM::createTypeConsistencyPass()";
-
- let options = [
- Option<"maxVectorSplitSize", "max-vector-split-size", "unsigned",
- /*default=*/"512",
- "Maximum size in bits of a vector value in a load or store operation"
- " operating on multiple elements that should still be split">,
- ];
-}
-
def NVVMOptimizeForTarget : Pass<"llvm-optimize-for-nvvm-target"> {
let summary = "Optimize NVVM IR";
let constructor = "::mlir::NVVM::createOptimizeForTargetPass()";
diff --git a/mlir/include/mlir/Dialect/LLVMIR/Transforms/TypeConsistency.h b/mlir/include/mlir/Dialect/LLVMIR/Transforms/TypeConsistency.h
deleted file mode 100644
index a4bb380b99b8..000000000000
--- a/mlir/include/mlir/Dialect/LLVMIR/Transforms/TypeConsistency.h
+++ /dev/null
@@ -1,73 +0,0 @@
-//===- TypeConsistency.h - Rewrites to improve type consistency -----------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Set of rewrites to improve the coherency of types within an LLVM dialect
-// program. This will adjust operations around a given pointer so they interpret
-// its pointee type as consistently as possible.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MLIR_DIALECT_LLVMIR_TRANSFORMS_TYPECONSISTENCY_H
-#define MLIR_DIALECT_LLVMIR_TRANSFORMS_TYPECONSISTENCY_H
-
-#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
-#include "mlir/IR/PatternMatch.h"
-#include "mlir/Pass/Pass.h"
-
-namespace mlir {
-namespace LLVM {
-
-#define GEN_PASS_DECL_LLVMTYPECONSISTENCY
-#include "mlir/Dialect/LLVMIR/Transforms/Passes.h.inc"
-
-/// Creates a pass that adjusts operations operating on pointers so they
-/// interpret pointee types as consistently as possible.
-std::unique_ptr<Pass> createTypeConsistencyPass();
-
-/// Canonicalizes GEPs of which the base type and the pointer's type hint do not
-/// match. This is done by replacing the original GEP into a GEP with the type
-/// hint as a base type when an element of the hinted type aligns with the
-/// original GEP.
-class CanonicalizeAlignedGep : public OpRewritePattern<GEPOp> {
-public:
- using OpRewritePattern::OpRewritePattern;
-
- LogicalResult matchAndRewrite(GEPOp gep,
- PatternRewriter &rewriter) const override;
-};
-
-/// Splits stores which write into multiple adjacent elements of an aggregate
-/// through a pointer. Currently, integers and vector are split and stores
-/// are generated for every element being stored to in a type-consistent manner.
-/// This is done on a best-effort basis.
-class SplitStores : public OpRewritePattern<StoreOp> {
- unsigned maxVectorSplitSize;
-
-public:
- SplitStores(MLIRContext *context, unsigned maxVectorSplitSize)
- : OpRewritePattern(context), maxVectorSplitSize(maxVectorSplitSize) {}
-
- LogicalResult matchAndRewrite(StoreOp store,
- PatternRewriter &rewrite) const override;
-};
-
-/// Splits GEPs with more than two indices into multiple GEPs with exactly
-/// two indices. The created GEPs are then guaranteed to index into only
-/// one aggregate at a time.
-class SplitGEP : public OpRewritePattern<GEPOp> {
-public:
- using OpRewritePattern::OpRewritePattern;
-
- LogicalResult matchAndRewrite(GEPOp gepOp,
- PatternRewriter &rewriter) const override;
-};
-
-} // namespace LLVM
-} // namespace mlir
-
-#endif // MLIR_DIALECT_LLVMIR_TRANSFORMS_TYPECONSISTENCY_H
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt
index f5d48b2ebcef..289c0e4bbdaf 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt
@@ -23,6 +23,7 @@ function(add_linalg_ods_yaml_gen yaml_ast_file output_file)
${MLIR_LINALG_ODS_YAML_GEN_EXE}
${MLIR_LINALG_ODS_YAML_GEN_TARGET}
${GEN_ODS_FILE} ${GEN_CPP_FILE})
+ set_target_properties(MLIR${output_file}YamlIncGen PROPERTIES FOLDER "MLIR/Tablegenning")
list(APPEND LLVM_TARGET_DEPENDS ${GEN_ODS_FILE})
set(LLVM_TARGET_DEPENDS ${LLVM_TARGET_DEPENDS} PARENT_SCOPE)
endfunction()
@@ -40,6 +41,7 @@ add_custom_target(LinalgOdsGen
DEPENDS
MLIRLinalgNamedStructuredOpsYamlIncGen
)
+set_target_properties(LinalgOdsGen PROPERTIES FOLDER "MLIR/Tablegenning")
add_dependencies(mlir-headers LinalgOdsGen)
add_mlir_dialect(LinalgOps linalg)
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
index f92843a1dcb9..08afdf373f01 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
@@ -28,6 +28,7 @@ namespace mlir {
namespace linalg {
class IteratorTypeAttr;
class LinalgOp;
+class GenericOp;
namespace detail {
/// Implementation of the method that check if given operands
@@ -115,6 +116,21 @@ bool isaConvolutionOpInterface(LinalgOp linalgOp);
/// Checks whether `linalgOp` is semantically equivalent to a `linalg.copyOp`.
bool isaCopyOpInterface(LinalgOp linalgOp);
+/// Checks whether a given `genericOp` is semantically equivalent to a single
+/// linalgelementwise unary op. e.g. linalg.exp.
+/// A linalg.generic body could be a series of unary elementwise ops e.g.
+/// `exp(neg(x))`, such as formed by linalg op fusion. Here we restrict it to
+/// detecting cases where body is is a single computation op.
+bool isaElemwiseSingleUnaryOpInterface(GenericOp genericOp);
+
+/// Checks whether `genericOp` is semantically equivalent to a single linalg
+/// elementwise binary op e.g. linalg.sub.
+bool isaElemwiseSingleBinaryOpInterface(GenericOp genericOp);
+
+/// Checks whether `genericOp` is semantically equivalent to a `linalg.fill`.
+/// Returns the scalar fill value if true.
+std::optional<Value> isaFillOpInterface(GenericOp genericOp);
+
namespace detail {
/// Returns true if the block contains a contraction of the following form:
diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index 5585ba27fdad..93e2c2db729d 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -1681,7 +1681,7 @@ def TileReductionUsingForOp : Op<Transform_Dialect, "structured.tile_reduction_u
// TODO: support mixed static-dynamic (see TileUsingForallOp).
let arguments = (ins TransformHandleTypeInterface:$target,
DefaultValuedAttr<DenseI64ArrayAttr, "{}">:$tile_sizes);
- let results = (outs TransformHandleTypeInterface:$fill_op,
+ let results = (outs Variadic<TransformHandleTypeInterface>:$fill_op,
TransformHandleTypeInterface:$split_linalg_op,
TransformHandleTypeInterface:$combining_linalg_op,
TransformHandleTypeInterface:$for_op);
@@ -1787,7 +1787,7 @@ def TileReductionUsingForallOp :
DefaultValuedAttr<DenseI64ArrayAttr, "{}">:$num_threads,
DefaultValuedAttr<DenseI64ArrayAttr, "{}">:$tile_sizes,
OptionalAttr<DeviceMappingArrayAttr>:$mapping);
- let results = (outs TransformHandleTypeInterface:$fill_op,
+ let results = (outs Variadic<TransformHandleTypeInterface>:$fill_op,
TransformHandleTypeInterface:$split_linalg_op,
TransformHandleTypeInterface:$combining_linalg_op,
TransformHandleTypeInterface:$forall_op);
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index f77c19ed0fcc..308ce92e3552 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -876,8 +876,8 @@ struct ForallReductionTilingResult {
Operation *parallelTiledOp;
/// The final reduction operation merging all the partial reductions.
Operation *mergeOp;
- /// The op initializing the tensor used for partial reductions.
- Operation *initialOp;
+ /// Initial values used for partial reductions.
+ SmallVector<Value> initialValues;
/// The `scf.forall` operation that iterate over the tiles.
scf::ForallOp loops;
};
diff --git a/mlir/include/mlir/Dialect/Math/Transforms/Passes.h b/mlir/include/mlir/Dialect/Math/Transforms/Passes.h
index ba6977251564..2dd7f6431f03 100644
--- a/mlir/include/mlir/Dialect/Math/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/Math/Transforms/Passes.h
@@ -14,10 +14,6 @@
namespace mlir {
namespace math {
#define GEN_PASS_DECL
-#include "mlir/Dialect/Math/Transforms/Passes.h.inc"
-#define GEN_PASS_DECL_MATHUPLIFTTOFMA
-#define GEN_PASS_DECL_MATHLEGALIZETOF32
-#include "mlir/Dialect/Math/Transforms/Passes.h.inc"
#define GEN_PASS_REGISTRATION
#include "mlir/Dialect/Math/Transforms/Passes.h.inc"
} // namespace math
diff --git a/mlir/include/mlir/Dialect/Mesh/IR/MeshBase.td b/mlir/include/mlir/Dialect/Mesh/IR/MeshBase.td
index 9d9b5892e1a5..3a85bf2d552f 100644
--- a/mlir/include/mlir/Dialect/Mesh/IR/MeshBase.td
+++ b/mlir/include/mlir/Dialect/Mesh/IR/MeshBase.td
@@ -151,7 +151,9 @@ def MeshSharding : AttrDef<Mesh_Dialect, "MeshSharding"> {
let extraClassDeclaration = [{
bool operator==(::mlir::Attribute rhs) const;
+ bool operator!=(::mlir::Attribute rhs) const;
bool operator==(::mlir::mesh::MeshShardingAttr rhs) const;
+ bool operator!=(::mlir::mesh::MeshShardingAttr rhs) const;
}];
let genVerifyDecl = 1;
diff --git a/mlir/include/mlir/Dialect/Mesh/IR/MeshOps.h b/mlir/include/mlir/Dialect/Mesh/IR/MeshOps.h
index 4569b77441c3..7a24c201a39a 100644
--- a/mlir/include/mlir/Dialect/Mesh/IR/MeshOps.h
+++ b/mlir/include/mlir/Dialect/Mesh/IR/MeshOps.h
@@ -51,15 +51,26 @@ void removeTrailingEmptySubArray(SmallVector<SmallVector<T>> &array) {
// Is the same tensor replicated on all processes.
inline bool isFullReplication(MeshShardingAttr attr) {
- return attr.getPartialAxes().empty() && attr.getSplitAxes().empty();
+ return attr.getPartialAxes().empty() &&
+ llvm::all_of(attr.getSplitAxes(), [](MeshAxesAttr axes) {
+ return axes.asArrayRef().empty();
+ });
}
-inline mesh::MeshOp getMesh(Operation *op, FlatSymbolRefAttr meshSymbol,
- SymbolTableCollection &symbolTableCollection) {
+inline mesh::MeshOp
+getMeshOrNull(Operation *op, FlatSymbolRefAttr meshSymbol,
+ SymbolTableCollection &symbolTableCollection) {
return symbolTableCollection.lookupNearestSymbolFrom<mesh::MeshOp>(
op, meshSymbol);
}
+inline mesh::MeshOp getMesh(Operation *op, FlatSymbolRefAttr meshSymbol,
+ SymbolTableCollection &symbolTableCollection) {
+ mesh::MeshOp meshOp = getMeshOrNull(op, meshSymbol, symbolTableCollection);
+ assert(meshOp);
+ return meshOp;
+}
+
// Get the corresponding mesh op using the standard attribute nomenclature.
template <typename Op>
mesh::MeshOp getMesh(Op op, SymbolTableCollection &symbolTableCollection) {
@@ -128,6 +139,17 @@ ShapedType shardShapedType(ShapedType shape, MeshOp mesh,
// `sharding` in that case must be null.
Type shardType(Type type, MeshOp mesh, MeshShardingAttr sharding);
+// Insert shard op if there is not one that already has the same sharding.
+// May insert resharding if required.
+void maybeInsertTargetShardingAnnotation(MeshShardingAttr sharding,
+ OpOperand &operand,
+ OpBuilder &builder);
+void maybeInsertTargetShardingAnnotation(MeshShardingAttr sharding,
+ OpResult result, OpBuilder &builder);
+void maybeInsertSourceShardingAnnotation(MeshShardingAttr sharding,
+ OpOperand &operand,
+ OpBuilder &builder);
+
} // namespace mesh
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterface.h b/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterface.h
index c47a7ddd3f9c..216d7e10296d 100644
--- a/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterface.h
+++ b/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterface.h
@@ -37,6 +37,11 @@ struct ShardingOption {
ShardingOption() = default;
ShardingOption(ShardingArray shardingArray, FlatSymbolRefAttr mesh)
: shardingArray(std::move(shardingArray)), mesh(mesh) {}
+ static ShardingOption makeEmpty() {
+ auto res = ShardingOption();
+ res.empty = true;
+ return res;
+ }
};
// This method retrieves the 'MeshShardingAttr' attribute from a given operation
@@ -56,6 +61,10 @@ defaultGetShardingOption(Operation *op,
ArrayRef<MeshShardingAttr> operandShardings,
ArrayRef<MeshShardingAttr> resultShardings);
+FailureOr<SmallVector<MeshShardingAttr>>
+defaultGetShardingAnnotations(Operation *op,
+ const ShardingOption &shardingOption);
+
LogicalResult
defaultAddShardingAnnotations(Operation *op, OpBuilder &b,
const ShardingOption &shardingOption);
diff --git a/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterface.td b/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterface.td
index 1f75135f4288..47a74f619f56 100644
--- a/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterface.td
+++ b/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterface.td
@@ -75,8 +75,11 @@ def ShardingInterface : OpInterface<"ShardingInterface"> {
InterfaceMethod<
/*desc=*/[{
Given that certain operands or results of the operation may have
- sharding annotations, this method leverages this information to deduce
- how the operation should be sharded.
+ sharding annotations, this method leverages this information to
+ deduce how the operation should be sharded.
+ The passed sharding may be incomplete, this gives freedom for the
+ op to select the most appropriate shardings for all the operands
+ and results and the op itself.
}],
/*retTy=*/"FailureOr<ShardingOption>",
/*methodName=*/"getShardingOption",
@@ -92,6 +95,24 @@ def ShardingInterface : OpInterface<"ShardingInterface"> {
>,
InterfaceMethod<
/*desc=*/[{
+ Based on a given ShardingOption, get the operand and result
+ operations for the operands and results sharding annotations.
+ This is what shardings the operands and results need to have in order
+ to shard the op according to shardingOption.
+ }],
+ /*retTy=*/"FailureOr<SmallVector<MeshShardingAttr>>",
+ /*methodName=*/"getShardingAnnotations",
+ /*args=*/(ins
+ "const ShardingOption &":$shardingOption
+ ),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ return detail::defaultGetShardingAnnotations(
+ $_op.getOperation(), shardingOption);
+ }]
+ >,
+ InterfaceMethod<
+ /*desc=*/[{
Based on a given ShardingOption, this method adds `mesh.shard`
operations for the operands and results that previously lacked
sharding annotations.
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPAttrDefs.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPAttrDefs.td
new file mode 100644
index 000000000000..704d0b2220e8
--- /dev/null
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPAttrDefs.td
@@ -0,0 +1,79 @@
+//=== OpenMPAttrDefs.td - OpenMP Attributes definition -----*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef OPENMP_ATTR_DEFS
+#define OPENMP_ATTR_DEFS
+
+include "mlir/Dialect/OpenMP/OpenMPDialect.td"
+include "mlir/Dialect/OpenMP/OpenMPEnums.td"
+include "mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td"
+include "mlir/Dialect/OpenMP/OpenMPTypeInterfaces.td"
+include "mlir/IR/AttrTypeBase.td"
+include "mlir/IR/CommonAttrConstraints.td"
+
+class OpenMP_Attr<string name, string attrMnemonic, list<Trait> traits = [],
+ string baseCppClass = "::mlir::Attribute">
+ : AttrDef<OpenMP_Dialect, name, traits, baseCppClass> {
+ let mnemonic = attrMnemonic;
+}
+
+//===----------------------------------------------------------------------===//
+// DeclareTargetAttr
+//===----------------------------------------------------------------------===//
+
+def DeclareTargetAttr : OpenMP_Attr<"DeclareTarget", "declaretarget"> {
+ let parameters = (ins
+ OptionalParameter<"DeclareTargetDeviceTypeAttr">:$device_type,
+ OptionalParameter<"DeclareTargetCaptureClauseAttr">:$capture_clause
+ );
+
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
+//===----------------------------------------------------------------------===//
+// FlagsAttr
+//===----------------------------------------------------------------------===//
+
+// Runtime library flags attribute that holds information for lowering to LLVM.
+def FlagsAttr : OpenMP_Attr<"Flags", "flags"> {
+ let parameters = (ins
+ DefaultValuedParameter<"uint32_t", "0">:$debug_kind,
+ DefaultValuedParameter<"bool", "false">:$assume_teams_oversubscription,
+ DefaultValuedParameter<"bool", "false">:$assume_threads_oversubscription,
+ DefaultValuedParameter<"bool", "false">:$assume_no_thread_state,
+ DefaultValuedParameter<"bool", "false">:$assume_no_nested_parallelism,
+ DefaultValuedParameter<"bool", "false">:$no_gpu_lib,
+ DefaultValuedParameter<"uint32_t", "50">:$openmp_device_version
+ );
+
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
+//===----------------------------------------------------------------------===//
+// TaskDependArrayAttr
+//===----------------------------------------------------------------------===//
+
+def TaskDependArrayAttr
+ : TypedArrayAttrBase<ClauseTaskDependAttr,
+ ClauseTaskDependAttr.summary # " array"> {
+ let constBuilderCall = ?;
+}
+
+//===----------------------------------------------------------------------===//
+// VersionAttr
+//===----------------------------------------------------------------------===//
+
+def VersionAttr : OpenMP_Attr<"Version", "version"> {
+ let parameters = (ins
+ "uint32_t":$version
+ );
+
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
+#endif // OPENMP_ATTR_DEFS
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPDialect.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPDialect.td
new file mode 100644
index 000000000000..459cc7843580
--- /dev/null
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPDialect.td
@@ -0,0 +1,22 @@
+//===- OpenMPDialect.td - OpenMP dialect definition --------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef OPENMP_DIALECT
+#define OPENMP_DIALECT
+
+include "mlir/IR/DialectBase.td"
+
+def OpenMP_Dialect : Dialect {
+ let name = "omp";
+ let cppNamespace = "::mlir::omp";
+ let dependentDialects = ["::mlir::LLVM::LLVMDialect, ::mlir::func::FuncDialect"];
+ let useDefaultAttributePrinterParser = 1;
+ let useDefaultTypePrinterParser = 1;
+}
+
+#endif // OPENMP_DIALECT
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td
new file mode 100644
index 000000000000..bf3d33819e9a
--- /dev/null
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td
@@ -0,0 +1,211 @@
+//===-- OpenMPEnums.td - OpenMP dialect enum file ----------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef OPENMP_ENUMS
+#define OPENMP_ENUMS
+
+include "mlir/Dialect/OpenMP/OpenMPDialect.td"
+include "mlir/IR/EnumAttr.td"
+
+include "mlir/Dialect/OpenMP/OmpCommon.td"
+
+//===----------------------------------------------------------------------===//
+// Base classes for OpenMP enum attributes.
+//===----------------------------------------------------------------------===//
+
+class OpenMP_I32EnumAttr<string name, string summary,
+ list<I32EnumAttrCase> cases>
+ : I32EnumAttr<name, summary, cases> {
+ let genSpecializedAttr = 0;
+ let cppNamespace = "::mlir::omp";
+}
+
+class OpenMP_BitEnumAttr<string name, string summary,
+ list<BitEnumAttrCaseBase> cases>
+ : I32BitEnumAttr<name, summary, cases> {
+ let genSpecializedAttr = 0;
+ let cppNamespace = "::mlir::omp";
+}
+
+class OpenMP_EnumAttr<EnumAttrInfo enumInfo, string name>
+ : EnumAttr<OpenMP_Dialect, enumInfo, name>;
+
+
+//===----------------------------------------------------------------------===//
+// capture_clause enum.
+//===----------------------------------------------------------------------===//
+
+def CaptureClauseTo : I32EnumAttrCase<"to", 0>;
+def CaptureClauseLink : I32EnumAttrCase<"link", 1>;
+def CaptureClauseEnter : I32EnumAttrCase<"enter", 2>;
+
+def DeclareTargetCaptureClause : OpenMP_I32EnumAttr<
+ "DeclareTargetCaptureClause",
+ "capture clause", [
+ CaptureClauseTo,
+ CaptureClauseLink,
+ CaptureClauseEnter
+ ]>;
+
+def DeclareTargetCaptureClauseAttr : OpenMP_EnumAttr<DeclareTargetCaptureClause,
+ "capture_clause"> {
+ let assemblyFormat = "`(` $value `)`";
+}
+
+//===----------------------------------------------------------------------===//
+// clause_depend enum.
+//===----------------------------------------------------------------------===//
+
+def ClauseDependSource : I32EnumAttrCase<"dependsource", 0>;
+def ClauseDependSink : I32EnumAttrCase<"dependsink", 1>;
+
+def ClauseDepend : OpenMP_I32EnumAttr<
+ "ClauseDepend",
+ "depend clause", [
+ ClauseDependSource,
+ ClauseDependSink
+ ]>;
+
+def ClauseDependAttr : OpenMP_EnumAttr<ClauseDepend, "clause_depend"> {
+ let assemblyFormat = "`(` $value `)`";
+}
+
+//===----------------------------------------------------------------------===//
+// clause_requires enum.
+//===----------------------------------------------------------------------===//
+
+// atomic_default_mem_order clause values not defined here because they can be
+// represented by the OMPC_MemoryOrder enumeration instead.
+def ClauseRequiresNone : I32BitEnumAttrCaseNone<"none">;
+def ClauseRequiresReverseOffload : I32BitEnumAttrCaseBit<"reverse_offload", 0>;
+def ClauseRequiresUnifiedAddress : I32BitEnumAttrCaseBit<"unified_address", 1>;
+def ClauseRequiresUnifiedSharedMemory
+ : I32BitEnumAttrCaseBit<"unified_shared_memory", 2>;
+def ClauseRequiresDynamicAllocators
+ : I32BitEnumAttrCaseBit<"dynamic_allocators", 3>;
+
+def ClauseRequires : OpenMP_BitEnumAttr<
+ "ClauseRequires",
+ "requires clauses", [
+ ClauseRequiresNone,
+ ClauseRequiresReverseOffload,
+ ClauseRequiresUnifiedAddress,
+ ClauseRequiresUnifiedSharedMemory,
+ ClauseRequiresDynamicAllocators
+ ]>;
+
+def ClauseRequiresAttr : OpenMP_EnumAttr<ClauseRequires, "clause_requires">;
+
+//===----------------------------------------------------------------------===//
+// clause_task_depend enum.
+//===----------------------------------------------------------------------===//
+
+def ClauseTaskDependIn : I32EnumAttrCase<"taskdependin", 0>;
+def ClauseTaskDependOut : I32EnumAttrCase<"taskdependout", 1>;
+def ClauseTaskDependInOut : I32EnumAttrCase<"taskdependinout", 2>;
+
+def ClauseTaskDepend : OpenMP_I32EnumAttr<
+ "ClauseTaskDepend",
+ "depend clause in a target or task construct", [
+ ClauseTaskDependIn,
+ ClauseTaskDependOut,
+ ClauseTaskDependInOut
+ ]>;
+
+def ClauseTaskDependAttr : OpenMP_EnumAttr<ClauseTaskDepend,
+ "clause_task_depend"> {
+ let assemblyFormat = "`(` $value `)`";
+}
+
+//===----------------------------------------------------------------------===//
+// data_sharing_type enum.
+//===----------------------------------------------------------------------===//
+
+def DataSharingTypePrivate : I32EnumAttrCase<"Private", 0, "private">;
+def DataSharingTypeFirstPrivate
+ : I32EnumAttrCase<"FirstPrivate", 1, "firstprivate">;
+
+def DataSharingClauseType : OpenMP_I32EnumAttr<
+ "DataSharingClauseType",
+ "Type of a data-sharing clause", [
+ DataSharingTypePrivate,
+ DataSharingTypeFirstPrivate
+ ]>;
+
+def DataSharingClauseTypeAttr : OpenMP_EnumAttr<DataSharingClauseType,
+ "data_sharing_type"> {
+ let assemblyFormat = "`{` `type` `=` $value `}`";
+}
+
+//===----------------------------------------------------------------------===//
+// device_type enum.
+//===----------------------------------------------------------------------===//
+
+def DeviceTypeAny : I32EnumAttrCase<"any", 0>;
+def DeviceTypeHost : I32EnumAttrCase<"host", 1>;
+def DeviceTypeNoHost : I32EnumAttrCase<"nohost", 2>;
+
+def DeclareTargetDeviceType : OpenMP_I32EnumAttr<
+ "DeclareTargetDeviceType",
+ "device_type clause", [
+ DeviceTypeAny,
+ DeviceTypeHost,
+ DeviceTypeNoHost
+ ]>;
+
+def DeclareTargetDeviceTypeAttr : OpenMP_EnumAttr<DeclareTargetDeviceType,
+ "device_type"> {
+ let assemblyFormat = "`(` $value `)`";
+}
+
+//===----------------------------------------------------------------------===//
+// sched_mod enum.
+//===----------------------------------------------------------------------===//
+
+def OpenMP_ScheduleModNone : I32EnumAttrCase<"none", 0>;
+def OpenMP_ScheduleModMonotonic : I32EnumAttrCase<"monotonic", 1>;
+def OpenMP_ScheduleModNonmonotonic : I32EnumAttrCase<"nonmonotonic", 2>;
+// FIXME: remove this value for the modifier because this is handled using a
+// separate attribute
+def OpenMP_ScheduleModSimd : I32EnumAttrCase<"simd", 3>;
+
+def ScheduleModifier : OpenMP_I32EnumAttr<
+ "ScheduleModifier",
+ "OpenMP Schedule Modifier", [
+ OpenMP_ScheduleModNone,
+ OpenMP_ScheduleModMonotonic,
+ OpenMP_ScheduleModNonmonotonic,
+ OpenMP_ScheduleModSimd
+ ]>;
+
+def ScheduleModifierAttr : OpenMP_EnumAttr<ScheduleModifier, "sched_mod">;
+
+//===----------------------------------------------------------------------===//
+// variable_capture_kind enum.
+//===----------------------------------------------------------------------===//
+
+def CaptureThis : I32EnumAttrCase<"This", 0>;
+def CaptureByRef : I32EnumAttrCase<"ByRef", 1>;
+def CaptureByCopy : I32EnumAttrCase<"ByCopy", 2>;
+def CaptureVLAType : I32EnumAttrCase<"VLAType", 3>;
+
+def VariableCaptureKind : OpenMP_I32EnumAttr<
+ "VariableCaptureKind",
+ "variable capture kind", [
+ CaptureThis,
+ CaptureByRef,
+ CaptureByCopy,
+ CaptureVLAType
+ ]>;
+
+def VariableCaptureKindAttr : OpenMP_EnumAttr<VariableCaptureKind,
+ "variable_capture_kind"> {
+ let assemblyFormat = "`(` $value `)`";
+}
+
+#endif // OPENMP_ENUMS
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td
new file mode 100644
index 000000000000..b98d87aa74a6
--- /dev/null
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td
@@ -0,0 +1,48 @@
+//===- OpenMPOpBase.td - OpenMP dialect shared definitions -*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains shared definitions for the OpenMP dialect.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef OPENMP_OP_BASE
+#define OPENMP_OP_BASE
+
+include "mlir/Dialect/OpenMP/OpenMPAttrDefs.td"
+include "mlir/Dialect/OpenMP/OpenMPDialect.td"
+include "mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td"
+include "mlir/Dialect/OpenMP/OpenMPTypeInterfaces.td"
+include "mlir/IR/OpBase.td"
+
+//===----------------------------------------------------------------------===//
+// OpenMP dialect type constraints.
+//===----------------------------------------------------------------------===//
+
+class OpenMP_Type<string name, string typeMnemonic> :
+ TypeDef<OpenMP_Dialect, name> {
+ let mnemonic = typeMnemonic;
+}
+
+// Type which can be constraint accepting standard integers and indices.
+def IntLikeType : AnyTypeOf<[AnyInteger, Index]>;
+
+def OpenMP_PointerLikeType : TypeAlias<OpenMP_PointerLikeTypeInterface,
+ "OpenMP-compatible variable type">;
+
+def OpenMP_MapBoundsType : OpenMP_Type<"MapBounds", "map_bounds_ty"> {
+ let summary = "Type for representing omp map clause bounds information";
+}
+
+//===----------------------------------------------------------------------===//
+// Base classes for OpenMP dialect operations.
+//===----------------------------------------------------------------------===//
+
+class OpenMP_Op<string mnemonic, list<Trait> traits = []> :
+ Op<OpenMP_Dialect, mnemonic, traits>;
+
+#endif // OPENMP_OP_BASE
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
index 29c287cad06e..dc9ac2b9de22 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
@@ -14,145 +14,20 @@
#ifndef OPENMP_OPS
#define OPENMP_OPS
+include "mlir/Dialect/LLVMIR/LLVMOpBase.td"
+include "mlir/Dialect/OpenACCMPCommon/Interfaces/AtomicInterfaces.td"
+include "mlir/Dialect/OpenMP/OpenMPAttrDefs.td"
+include "mlir/Dialect/OpenMP/OpenMPOpBase.td"
+include "mlir/Interfaces/ControlFlowInterfaces.td"
+include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/EnumAttr.td"
include "mlir/IR/OpBase.td"
-include "mlir/Interfaces/SideEffectInterfaces.td"
-include "mlir/Interfaces/ControlFlowInterfaces.td"
include "mlir/IR/SymbolInterfaces.td"
-include "mlir/Dialect/LLVMIR/LLVMOpBase.td"
-include "mlir/Dialect/OpenACCMPCommon/Interfaces/AtomicInterfaces.td"
-include "mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td"
-include "mlir/Dialect/OpenMP/OpenMPTypeInterfaces.td"
-
-def OpenMP_Dialect : Dialect {
- let name = "omp";
- let cppNamespace = "::mlir::omp";
- let dependentDialects = ["::mlir::LLVM::LLVMDialect, ::mlir::func::FuncDialect"];
- let useDefaultAttributePrinterParser = 1;
- let useDefaultTypePrinterParser = 1;
-}
-
-// OmpCommon requires definition of OpenACC_Dialect.
-include "mlir/Dialect/OpenMP/OmpCommon.td"
-
-//===----------------------------------------------------------------------===//
-// OpenMP Attributes
-//===----------------------------------------------------------------------===//
-
-class OpenMP_Attr<string name, string attrMnemonic,
- list<Trait> traits = [],
- string baseCppClass = "::mlir::Attribute">
- : AttrDef<OpenMP_Dialect, name, traits, baseCppClass> {
- let mnemonic = attrMnemonic;
-}
-
-def VersionAttr : OpenMP_Attr<"Version", "version"> {
- let parameters = (ins
- "uint32_t":$version
- );
-
- let assemblyFormat = "`<` struct(params) `>`";
-}
-
-//===----------------------------------------------------------------------===//
-// Runtime library flag's attribute that holds information for lowering to LLVM
-//===----------------------------------------------------------------------===//
-
-def FlagsAttr : OpenMP_Attr<"Flags", "flags"> {
- let parameters = (ins
- DefaultValuedParameter<"uint32_t", "0">:$debug_kind,
- DefaultValuedParameter<"bool", "false">:$assume_teams_oversubscription,
- DefaultValuedParameter<"bool", "false">:$assume_threads_oversubscription,
- DefaultValuedParameter<"bool", "false">:$assume_no_thread_state,
- DefaultValuedParameter<"bool", "false">:$assume_no_nested_parallelism,
- DefaultValuedParameter<"bool", "false">:$no_gpu_lib,
- DefaultValuedParameter<"uint32_t", "50">:$openmp_device_version
- );
-
- let assemblyFormat = "`<` struct(params) `>`";
-}
-
-
-class OpenMP_Op<string mnemonic, list<Trait> traits = []> :
- Op<OpenMP_Dialect, mnemonic, traits>;
-
-// Type which can be constraint accepting standard integers and indices.
-def IntLikeType : AnyTypeOf<[AnyInteger, Index]>;
-
-def OpenMP_PointerLikeType : TypeAlias<OpenMP_PointerLikeTypeInterface,
- "OpenMP-compatible variable type">;
-
-class OpenMP_Type<string name, string typeMnemonic> : TypeDef<OpenMP_Dialect, name> {
- let mnemonic = typeMnemonic;
-}
-
-//===----------------------------------------------------------------------===//
-// 2.12.7 Declare Target Directive
-//===----------------------------------------------------------------------===//
-
-def DeviceTypeAny : I32EnumAttrCase<"any", 0>;
-def DeviceTypeHost : I32EnumAttrCase<"host", 1>;
-def DeviceTypeNoHost : I32EnumAttrCase<"nohost", 2>;
-
-def DeclareTargetDeviceType : I32EnumAttr<
- "DeclareTargetDeviceType",
- "device_type clause",
- [DeviceTypeAny, DeviceTypeHost, DeviceTypeNoHost]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-
-def DeclareTargetDeviceTypeAttr : EnumAttr<OpenMP_Dialect, DeclareTargetDeviceType,
- "device_type"> {
- let assemblyFormat = "`(` $value `)`";
-}
-
-def CaptureClauseTo : I32EnumAttrCase<"to", 0>;
-def CaptureClauseLink : I32EnumAttrCase<"link", 1>;
-def CaptureClauseEnter : I32EnumAttrCase<"enter", 2>;
-
-def DeclareTargetCaptureClause : I32EnumAttr<
- "DeclareTargetCaptureClause",
- "capture clause",
- [CaptureClauseTo, CaptureClauseLink, CaptureClauseEnter]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-
-def DeclareTargetCaptureClauseAttr : EnumAttr<OpenMP_Dialect, DeclareTargetCaptureClause,
- "capture_clause"> {
- let assemblyFormat = "`(` $value `)`";
-}
-
-def DeclareTargetAttr : OpenMP_Attr<"DeclareTarget", "declaretarget"> {
- let parameters = (ins
- OptionalParameter<"DeclareTargetDeviceTypeAttr">:$device_type,
- OptionalParameter<"DeclareTargetCaptureClauseAttr">:$capture_clause
- );
-
- let assemblyFormat = "`<` struct(params) `>`";
-}
//===----------------------------------------------------------------------===//
// 2.19.4 Data-Sharing Attribute Clauses
//===----------------------------------------------------------------------===//
-def DataSharingTypePrivate : I32EnumAttrCase<"Private", 0, "private">;
-def DataSharingTypeFirstPrivate : I32EnumAttrCase<"FirstPrivate", 1, "firstprivate">;
-
-def DataSharingClauseType : I32EnumAttr<
- "DataSharingClauseType",
- "Type of a data-sharing clause",
- [DataSharingTypePrivate, DataSharingTypeFirstPrivate]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-
-def DataSharingClauseTypeAttr : EnumAttr<
- OpenMP_Dialect, DataSharingClauseType, "data_sharing_type"> {
- let assemblyFormat = "`{` `type` `=` $value `}`";
-}
-
def PrivateClauseOp : OpenMP_Op<"private", [IsolatedFromAbove]> {
let summary = "Provides declaration of [first]private logic.";
let description = [{
@@ -277,13 +152,9 @@ def ParallelOp : OpenMP_Op<"parallel", [
variable should be passed into the reduction region by value or by reference
in `reduction_vars_byref`. Each reduction is identified by the accumulator
it uses and accumulators must not be repeated in the same reduction. The
- `omp.reduction` operation accepts the accumulator and a partial value which
- is considered to be produced by the thread for the given reduction. If
- multiple values are produced for the same accumulator, i.e. there are
- multiple `omp.reduction`s, the last value is taken. The reduction
- declaration specifies how to combine the values from each thread into the
- final value, which is available in the accumulator after all the threads
- complete.
+ reduction declaration specifies how to combine the values from each thread
+ into the final value, which is available in the accumulator after all the
+ threads complete.
The optional $proc_bind_val attribute controls the thread affinity for the execution
of the parallel region.
@@ -403,23 +274,6 @@ def TeamsOp : OpenMP_Op<"teams", [
let hasVerifier = 1;
}
-def OMP_ScheduleModNone : I32EnumAttrCase<"none", 0>;
-def OMP_ScheduleModMonotonic : I32EnumAttrCase<"monotonic", 1>;
-def OMP_ScheduleModNonmonotonic : I32EnumAttrCase<"nonmonotonic", 2>;
-// FIXME: remove this value for the modifier because this is handled using a
-// separate attribute
-def OMP_ScheduleModSIMD : I32EnumAttrCase<"simd", 3>;
-
-def ScheduleModifier
- : I32EnumAttr<"ScheduleModifier", "OpenMP Schedule Modifier",
- [OMP_ScheduleModNone, OMP_ScheduleModMonotonic,
- OMP_ScheduleModNonmonotonic, OMP_ScheduleModSIMD]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-def ScheduleModifierAttr : EnumAttr<OpenMP_Dialect, ScheduleModifier,
- "sched_mod">;
-
//===----------------------------------------------------------------------===//
// 2.8.1 Sections Construct
//===----------------------------------------------------------------------===//
@@ -449,13 +303,9 @@ def SectionsOp : OpenMP_Op<"sections", [AttrSizedOperandSegments,
accumulator variables in `reduction_vars` and symbols referring to reduction
declarations in the `reductions` attribute. Each reduction is identified
by the accumulator it uses and accumulators must not be repeated in the same
- reduction. The `omp.reduction` operation accepts the accumulator and a
- partial value which is considered to be produced by the section for the
- given reduction. If multiple values are produced for the same accumulator,
- i.e. there are multiple `omp.reduction`s, the last value is taken. The
- reduction declaration specifies how to combine the values from each section
- into the final value, which is available in the accumulator after all the
- sections complete.
+ reduction. The reduction declaration specifies how to combine the values
+ from each section into the final value, which is available in the
+ accumulator after all the sections complete.
The $allocators_vars and $allocate_vars parameters are a variadic list of values
that specify the memory allocator to be used to obtain storage for private values.
@@ -904,26 +754,6 @@ def DistributeOp : OpenMP_Op<"distribute", [AttrSizedOperandSegments,
// 2.10.1 task Construct
//===----------------------------------------------------------------------===//
-def ClauseTaskDependIn : I32EnumAttrCase<"taskdependin", 0>;
-def ClauseTaskDependOut : I32EnumAttrCase<"taskdependout", 1>;
-def ClauseTaskDependInOut : I32EnumAttrCase<"taskdependinout", 2>;
-
-def ClauseTaskDepend : I32EnumAttr<
- "ClauseTaskDepend",
- "depend clause in a target or task construct",
- [ClauseTaskDependIn, ClauseTaskDependOut, ClauseTaskDependInOut]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-def ClauseTaskDependAttr :
- EnumAttr<OpenMP_Dialect, ClauseTaskDepend, "clause_task_depend"> {
- let assemblyFormat = "`(` $value `)`";
-}
-def TaskDependArrayAttr :
- TypedArrayAttrBase<ClauseTaskDependAttr, "clause_task_depend array attr"> {
- let constBuilderCall = ?;
- }
-
def TaskOp : OpenMP_Op<"task", [AttrSizedOperandSegments,
OutlineableOpenMPOpInterface, AutomaticAllocationScope,
ReductionClauseInterface]> {
@@ -1074,11 +904,7 @@ def TaskloopOp : OpenMP_Op<"taskloop", [AttrSizedOperandSegments,
variables in `reduction_vars` or `in_reduction_vars` and symbols referring
to reduction declarations in the `reductions` or `in_reductions` attribute.
Each reduction is identified by the accumulator it uses and accumulators
- must not be repeated in the same reduction. The `omp.reduction` operation
- accepts the accumulator and a partial value which is considered to be
- produced by the current loop iteration for the given reduction. If multiple
- values are produced for the same accumulator, i.e. there are multiple
- `omp.reduction`s, the last value is taken. The reduction declaration
+ must not be repeated in the same reduction. The reduction declaration
specifies how to combine the values from each iteration into the final
value, which is available in the accumulator after the loop completes.
@@ -1283,28 +1109,6 @@ def FlushOp : OpenMP_Op<"flush"> {
// Map related constructs
//===----------------------------------------------------------------------===//
-def CaptureThis : I32EnumAttrCase<"This", 0>;
-def CaptureByRef : I32EnumAttrCase<"ByRef", 1>;
-def CaptureByCopy : I32EnumAttrCase<"ByCopy", 2>;
-def CaptureVLAType : I32EnumAttrCase<"VLAType", 3>;
-
-def VariableCaptureKind : I32EnumAttr<
- "VariableCaptureKind",
- "variable capture kind",
- [CaptureThis, CaptureByRef, CaptureByCopy, CaptureVLAType]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-
-def VariableCaptureKindAttr : EnumAttr<OpenMP_Dialect, VariableCaptureKind,
- "variable_capture_kind"> {
- let assemblyFormat = "`(` $value `)`";
-}
-
-def MapBoundsType : OpenMP_Type<"MapBounds", "map_bounds_ty"> {
- let summary = "Type for representing omp map clause bounds information";
-}
-
def MapBoundsOp : OpenMP_Op<"map.bounds",
[AttrSizedOperandSegments, NoMemoryEffect]> {
let summary = "Represents normalized bounds information for map clauses.";
@@ -1386,7 +1190,7 @@ def MapBoundsOp : OpenMP_Op<"map.bounds",
Optional<IntLikeType>:$stride,
DefaultValuedAttr<BoolAttr, "false">:$stride_in_bytes,
Optional<IntLikeType>:$start_idx);
- let results = (outs MapBoundsType:$result);
+ let results = (outs OpenMP_MapBoundsType:$result);
let assemblyFormat = [{
oilist(
@@ -1419,7 +1223,7 @@ def MapInfoOp : OpenMP_Op<"map.info", [AttrSizedOperandSegments]> {
Optional<OpenMP_PointerLikeType>:$var_ptr_ptr,
Variadic<OpenMP_PointerLikeType>:$members,
OptionalAttr<AnyIntElementsAttr>:$members_index,
- Variadic<MapBoundsType>:$bounds, /* rank-0 to rank-{n-1} */
+ Variadic<OpenMP_MapBoundsType>:$bounds, /* rank-0 to rank-{n-1} */
OptionalAttr<UI64Attr>:$map_type,
OptionalAttr<VariableCaptureKindAttr>:$map_capture_type,
OptionalAttr<StrAttr>:$name,
@@ -1894,20 +1698,6 @@ def BarrierOp : OpenMP_Op<"barrier"> {
// [5.1] 2.19.9 ordered Construct
//===----------------------------------------------------------------------===//
-def ClauseDependSource : I32EnumAttrCase<"dependsource", 0>;
-def ClauseDependSink : I32EnumAttrCase<"dependsink", 1>;
-
-def ClauseDepend : I32EnumAttr<
- "ClauseDepend",
- "depend clause",
- [ClauseDependSource, ClauseDependSink]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-def ClauseDependAttr : EnumAttr<OpenMP_Dialect, ClauseDepend, "clause_depend"> {
- let assemblyFormat = "`(` $value `)`";
-}
-
def OrderedOp : OpenMP_Op<"ordered"> {
let summary = "ordered construct without region";
let description = [{
@@ -2357,55 +2147,4 @@ def DeclareReductionOp : OpenMP_Op<"declare_reduction", [Symbol,
let hasRegionVerifier = 1;
}
-//===----------------------------------------------------------------------===//
-// 2.19.5.4 reduction clause
-//===----------------------------------------------------------------------===//
-
-def ReductionOp : OpenMP_Op<"reduction"> {
- let summary = "reduction construct";
- let description = [{
- Indicates the value that is produced by the current reduction-participating
- entity for a reduction requested in some ancestor. The reduction is
- identified by the accumulator, but the value of the accumulator may not be
- updated immediately.
- }];
-
- let arguments= (ins AnyType:$operand, OpenMP_PointerLikeType:$accumulator);
- let assemblyFormat = [{
- $operand `,` $accumulator attr-dict `:` type($operand) `,` type($accumulator)
- }];
- let hasVerifier = 1;
-}
-
-//===----------------------------------------------------------------------===//
-// 8.2 requires directive
-//===----------------------------------------------------------------------===//
-
-// atomic_default_mem_order clause values not defined here because they can be
-// represented by the OMPC_MemoryOrder enumeration instead.
-def ClauseRequiresNone : I32BitEnumAttrCaseNone<"none">;
-def ClauseRequiresReverseOffload : I32BitEnumAttrCaseBit<"reverse_offload", 0>;
-def ClauseRequiresUnifiedAddress : I32BitEnumAttrCaseBit<"unified_address", 1>;
-def ClauseRequiresUnifiedSharedMemory
- : I32BitEnumAttrCaseBit<"unified_shared_memory", 2>;
-def ClauseRequiresDynamicAllocators
- : I32BitEnumAttrCaseBit<"dynamic_allocators", 3>;
-
-def ClauseRequires : I32BitEnumAttr<
- "ClauseRequires",
- "requires clauses",
- [
- ClauseRequiresNone,
- ClauseRequiresReverseOffload,
- ClauseRequiresUnifiedAddress,
- ClauseRequiresUnifiedSharedMemory,
- ClauseRequiresDynamicAllocators
- ]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-def ClauseRequiresAttr :
- EnumAttr<OpenMP_Dialect, ClauseRequires, "clause_requires"> {
-}
-
#endif // OPENMP_OPS
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
index d9569d9d294d..31a306072d0e 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef OpenMP_OPS_INTERFACES
-#define OpenMP_OPS_INTERFACES
+#ifndef OPENMP_OPS_INTERFACES
+#define OPENMP_OPS_INTERFACES
include "mlir/IR/OpBase.td"
@@ -349,4 +349,4 @@ def OffloadModuleInterface : OpInterface<"OffloadModuleInterface"> {
];
}
-#endif // OpenMP_OPS_INTERFACES
+#endif // OPENMP_OPS_INTERFACES
diff --git a/mlir/include/mlir/Dialect/Polynomial/IR/Polynomial.td b/mlir/include/mlir/Dialect/Polynomial/IR/Polynomial.td
index 294f58ae084b..f99cbccd243e 100644
--- a/mlir/include/mlir/Dialect/Polynomial/IR/Polynomial.td
+++ b/mlir/include/mlir/Dialect/Polynomial/IR/Polynomial.td
@@ -1,4 +1,4 @@
-//===- PolynomialOps.td - Polynomial dialect ---------------*- tablegen -*-===//
+//===- Polynomial.td - Polynomial dialect ------------------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -15,22 +15,7 @@ include "mlir/Interfaces/InferTypeOpInterface.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/Dialect/Polynomial/IR/PolynomialDialect.td"
include "mlir/Dialect/Polynomial/IR/PolynomialAttributes.td"
-
-class Polynomial_Type<string name, string typeMnemonic>
- : TypeDef<Polynomial_Dialect, name> {
- let mnemonic = typeMnemonic;
-}
-
-def Polynomial_PolynomialType : Polynomial_Type<"Polynomial", "polynomial"> {
- let summary = "An element of a polynomial ring.";
- let description = [{
- A type for polynomials in a polynomial quotient ring.
- }];
- let parameters = (ins Polynomial_RingAttr:$ring);
- let assemblyFormat = "`<` struct(params) `>`";
-}
-
-def PolynomialLike: TypeOrContainer<Polynomial_PolynomialType, "polynomial-like">;
+include "mlir/Dialect/Polynomial/IR/PolynomialTypes.td"
class Polynomial_Op<string mnemonic, list<Trait> traits = []> :
Op<Polynomial_Dialect, mnemonic, traits # [Pure]> {
@@ -67,8 +52,8 @@ def Polynomial_AddOp : Polynomial_BinaryOp<"add", [Commutative]> {
// add two polynomials modulo x^1024 - 1
#poly = #polynomial.int_polynomial<x**1024 - 1>
#ring = #polynomial.ring<coefficientType=i32, coefficientModulus=65536:i32, polynomialModulus=#poly>
- %0 = polynomial.constant #polynomial.int_polynomial<1 + x**2> : !polynomial.polynomial<#ring>
- %1 = polynomial.constant #polynomial.int_polynomial<x**5 - x + 1> : !polynomial.polynomial<#ring>
+ %0 = polynomial.constant int<1 + x**2> : !polynomial.polynomial<#ring>
+ %1 = polynomial.constant int<x**5 - x + 1> : !polynomial.polynomial<#ring>
%2 = polynomial.add %0, %1 : !polynomial.polynomial<#ring>
```
}];
@@ -91,8 +76,8 @@ def Polynomial_SubOp : Polynomial_BinaryOp<"sub"> {
// subtract two polynomials modulo x^1024 - 1
#poly = #polynomial.int_polynomial<x**1024 - 1>
#ring = #polynomial.ring<coefficientType=i32, coefficientModulus=65536:i32, polynomialModulus=#poly>
- %0 = polynomial.constant #polynomial.int_polynomial<1 + x**2> : !polynomial.polynomial<#ring>
- %1 = polynomial.constant #polynomial.int_polynomial<x**5 - x + 1> : !polynomial.polynomial<#ring>
+ %0 = polynomial.constant int<1 + x**2> : !polynomial.polynomial<#ring>
+ %1 = polynomial.constant int<x**5 - x + 1> : !polynomial.polynomial<#ring>
%2 = polynomial.sub %0, %1 : !polynomial.polynomial<#ring>
```
}];
@@ -116,8 +101,8 @@ def Polynomial_MulOp : Polynomial_BinaryOp<"mul", [Commutative]> {
// multiply two polynomials modulo x^1024 - 1
#poly = #polynomial.int_polynomial<x**1024 - 1>
#ring = #polynomial.ring<coefficientType=i32, coefficientModulus=65536:i32, polynomialModulus=#poly>
- %0 = polynomial.constant #polynomial.int_polynomial<1 + x**2> : !polynomial.polynomial<#ring>
- %1 = polynomial.constant #polynomial.int_polynomial<x**5 - x + 1> : !polynomial.polynomial<#ring>
+ %0 = polynomial.constant int<1 + x**2> : !polynomial.polynomial<#ring>
+ %1 = polynomial.constant int<x**5 - x + 1> : !polynomial.polynomial<#ring>
%2 = polynomial.mul %0, %1 : !polynomial.polynomial<#ring>
```
}];
@@ -141,7 +126,7 @@ def Polynomial_MulScalarOp : Polynomial_Op<"mul_scalar", [
// multiply two polynomials modulo x^1024 - 1
#poly = #polynomial.int_polynomial<x**1024 - 1>
#ring = #polynomial.ring<coefficientType=i32, coefficientModulus=65536:i32, polynomialModulus=#poly>
- %0 = polynomial.constant #polynomial.int_polynomial<1 + x**2> : !polynomial.polynomial<#ring>
+ %0 = polynomial.constant int<1 + x**2> : !polynomial.polynomial<#ring>
%1 = arith.constant 3 : i32
%2 = polynomial.mul_scalar %0, %1 : !polynomial.polynomial<#ring>, i32
```
@@ -172,7 +157,7 @@ def Polynomial_LeadingTermOp: Polynomial_Op<"leading_term"> {
```mlir
#poly = #polynomial.int_polynomial<x**1024 - 1>
#ring = #polynomial.ring<coefficientType=i32, coefficientModulus=65536:i32, polynomialModulus=#poly>
- %0 = polynomial.constant #polynomial.int_polynomial<1 + x**2> : !polynomial.polynomial<#ring>
+ %0 = polynomial.constant int<1 + x**2> : !polynomial.polynomial<#ring>
%1, %2 = polynomial.leading_term %0 : !polynomial.polynomial<#ring> -> (index, i32)
```
}];
@@ -287,29 +272,29 @@ def Polynomial_ToTensorOp : Polynomial_Op<"to_tensor", [Pure]> {
let hasVerifier = 1;
}
-def Polynomial_AnyPolynomialAttr : AnyAttrOf<[
- Polynomial_FloatPolynomialAttr,
- Polynomial_IntPolynomialAttr
+def Polynomial_AnyTypedPolynomialAttr : AnyAttrOf<[
+ Polynomial_TypedFloatPolynomialAttr,
+ Polynomial_TypedIntPolynomialAttr
]>;
// Not deriving from Polynomial_Op due to need for custom assembly format
-def Polynomial_ConstantOp : Op<Polynomial_Dialect, "constant", [Pure]> {
+def Polynomial_ConstantOp : Op<Polynomial_Dialect, "constant",
+ [Pure, InferTypeOpAdaptor]> {
let summary = "Define a constant polynomial via an attribute.";
let description = [{
Example:
```mlir
- #poly = #polynomial.int_polynomial<x**1024 - 1>
- #ring = #polynomial.ring<coefficientType=i32, coefficientModulus=65536:i32, polynomialModulus=#poly>
- %0 = polynomial.constant #polynomial.int_polynomial<1 + x**2> : !polynomial.polynomial<#ring>
+ !int_poly_ty = !polynomial.polynomial<ring=<coefficientType=i32>>
+ %0 = polynomial.constant int<1 + x**2> : !int_poly_ty
- #float_ring = #polynomial.ring<coefficientType=f32>
- %0 = polynomial.constant #polynomial.float_polynomial<0.5 + 1.3e06 x**2> : !polynomial.polynomial<#float_ring>
+ !float_poly_ty = !polynomial.polynomial<ring=<coefficientType=f32>>
+ %1 = polynomial.constant float<0.5 + 1.3e06 x**2> : !float_poly_ty
```
}];
- let arguments = (ins Polynomial_AnyPolynomialAttr:$value);
+ let arguments = (ins Polynomial_AnyTypedPolynomialAttr:$value);
let results = (outs Polynomial_PolynomialType:$output);
- let assemblyFormat = "attr-dict `:` type($output)";
+ let hasCustomAssemblyFormat = 1;
}
def Polynomial_NTTOp : Polynomial_Op<"ntt", [Pure]> {
diff --git a/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialAttributes.td b/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialAttributes.td
index e5dbfa7fa21e..655020adf808 100644
--- a/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialAttributes.td
+++ b/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialAttributes.td
@@ -18,7 +18,7 @@ class Polynomial_Attr<string name, string attrMnemonic, list<Trait> traits = []>
}
def Polynomial_IntPolynomialAttr : Polynomial_Attr<"IntPolynomial", "int_polynomial"> {
- let summary = "An attribute containing a single-variable polynomial with integer coefficients.";
+ let summary = "an attribute containing a single-variable polynomial with integer coefficients";
let description = [{
A polynomial attribute represents a single-variable polynomial with integer
coefficients, which is used to define the modulus of a `RingAttr`, as well
@@ -41,7 +41,7 @@ def Polynomial_IntPolynomialAttr : Polynomial_Attr<"IntPolynomial", "int_polynom
}
def Polynomial_FloatPolynomialAttr : Polynomial_Attr<"FloatPolynomial", "float_polynomial"> {
- let summary = "An attribute containing a single-variable polynomial with double precision floating point coefficients.";
+ let summary = "an attribute containing a single-variable polynomial with double precision floating point coefficients";
let description = [{
A polynomial attribute represents a single-variable polynomial with double
precision floating point coefficients.
@@ -62,8 +62,72 @@ def Polynomial_FloatPolynomialAttr : Polynomial_Attr<"FloatPolynomial", "float_p
let hasCustomAssemblyFormat = 1;
}
+def Polynomial_TypedIntPolynomialAttr : Polynomial_Attr<
+ "TypedIntPolynomial", "typed_int_polynomial", [TypedAttrInterface]> {
+ let summary = "a typed int_polynomial";
+ let description = [{
+ Example:
+
+ ```mlir
+ !poly_ty = !polynomial.polynomial<ring=<coefficientType=i32>>
+ #poly = int<1 x**7 + 4> : !poly_ty
+ #poly_verbose = #polynomial.typed_int_polynomial<1 x**7 + 4> : !poly_ty
+ ```
+ }];
+ let parameters = (ins "::mlir::Type":$type, "::mlir::polynomial::IntPolynomialAttr":$value);
+ let assemblyFormat = "$value `:` $type";
+ let builders = [
+ AttrBuilderWithInferredContext<(ins "Type":$type,
+ "const IntPolynomial &":$value), [{
+ return $_get(
+ type.getContext(),
+ type,
+ IntPolynomialAttr::get(type.getContext(), value));
+ }]>,
+ AttrBuilderWithInferredContext<(ins "Type":$type,
+ "const Attribute &":$value), [{
+ return $_get(type.getContext(), type, ::llvm::cast<IntPolynomialAttr>(value));
+ }]>
+ ];
+ let extraClassDeclaration = [{
+ using ValueType = ::mlir::Attribute;
+ }];
+}
+
+def Polynomial_TypedFloatPolynomialAttr : Polynomial_Attr<
+ "TypedFloatPolynomial", "typed_float_polynomial", [TypedAttrInterface]> {
+ let summary = "a typed float_polynomial";
+ let description = [{
+ Example:
+
+ ```mlir
+ !poly_ty = !polynomial.polynomial<ring=<coefficientType=f32>>
+ #poly = float<1.4 x**7 + 4.5> : !poly_ty
+ #poly_verbose = #polynomial.typed_float_polynomial<1.4 x**7 + 4.5> : !poly_ty
+ ```
+ }];
+ let parameters = (ins "::mlir::Type":$type, "::mlir::polynomial::FloatPolynomialAttr":$value);
+ let assemblyFormat = "$value `:` $type";
+ let builders = [
+ AttrBuilderWithInferredContext<(ins "Type":$type,
+ "const FloatPolynomial &":$value), [{
+ return $_get(
+ type.getContext(),
+ type,
+ FloatPolynomialAttr::get(type.getContext(), value));
+ }]>,
+ AttrBuilderWithInferredContext<(ins "Type":$type,
+ "const Attribute &":$value), [{
+ return $_get(type.getContext(), type, ::llvm::cast<FloatPolynomialAttr>(value));
+ }]>
+ ];
+ let extraClassDeclaration = [{
+ using ValueType = ::mlir::Attribute;
+ }];
+}
+
def Polynomial_RingAttr : Polynomial_Attr<"Ring", "ring"> {
- let summary = "An attribute specifying a polynomial ring.";
+ let summary = "an attribute specifying a polynomial ring";
let description = [{
A ring describes the domain in which polynomial arithmetic occurs. The ring
attribute in `polynomial` represents the more specific case of polynomials
diff --git a/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td b/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td
new file mode 100644
index 000000000000..89e406183e0b
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td
@@ -0,0 +1,32 @@
+//===- PolynomialTypes.td - Polynomial types ---------------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef POLYNOMIAL_TYPES
+#define POLYNOMIAL_TYPES
+
+include "mlir/Dialect/Polynomial/IR/PolynomialAttributes.td"
+include "mlir/Dialect/Polynomial/IR/PolynomialDialect.td"
+
+class Polynomial_Type<string name, string typeMnemonic>
+ : TypeDef<Polynomial_Dialect, name> {
+ let mnemonic = typeMnemonic;
+}
+
+def Polynomial_PolynomialType : Polynomial_Type<"Polynomial", "polynomial"> {
+ let summary = "An element of a polynomial ring.";
+ let description = [{
+ A type for polynomials in a polynomial quotient ring.
+ }];
+ let parameters = (ins Polynomial_RingAttr:$ring);
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
+def PolynomialLike: TypeOrContainer<Polynomial_PolynomialType, "polynomial-like">;
+
+
+#endif // POLYNOMIAL_TYPES
diff --git a/mlir/include/mlir/Dialect/SCF/Transforms/TileUsingInterface.h b/mlir/include/mlir/Dialect/SCF/Transforms/TileUsingInterface.h
index 965ef9e203be..6d567171e185 100644
--- a/mlir/include/mlir/Dialect/SCF/Transforms/TileUsingInterface.h
+++ b/mlir/include/mlir/Dialect/SCF/Transforms/TileUsingInterface.h
@@ -250,8 +250,8 @@ struct SCFReductionTilingResult {
Operation *parallelTiledOp;
/// The final reduction operation merging all the partial reductions.
Operation *mergeOp;
- /// Initial op
- Operation *initialOp;
+ /// Initial values used for reduction.
+ SmallVector<Value> initialValues;
/// The loop operations that iterate over the tiles.
SmallVector<LoopLikeOpInterface> loops;
};
diff --git a/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h
index e8a09c474104..dd6b0e868256 100644
--- a/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h
@@ -59,8 +59,8 @@ void populateDropRedundantInsertSliceRankExpansionPatterns(
/// `tensor.collapse_shape` into other ops.
void populateReassociativeReshapeFoldingPatterns(RewritePatternSet &patterns);
-/// Populates `patterns` with patterns that fold tensor.empty with
-/// tensor.[extract_slice|expand_shape|collapse_shape].
+/// Populates `patterns` with patterns that fold tensor.empty with its
+/// consumers.
///
/// If `singleUseOnly` is set to "true", only tensor.empty ops with a single
/// use are folded.
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 332b5ad08ced..56d866ac5b40 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -480,24 +480,25 @@ def Vector_ShuffleOp :
let hasCanonicalizer = 1;
}
-def Vector_InterleaveOp :
- Vector_Op<"interleave", [Pure,
- AllTypesMatch<["lhs", "rhs"]>,
- TypesMatchWith<
+def ResultIsDoubleSourceVectorType : TypesMatchWith<
"type of 'result' is double the width of the inputs",
"lhs", "result",
[{
[&]() -> ::mlir::VectorType {
- auto vectorType = ::llvm::cast<mlir::VectorType>($_self);
+ auto vectorType = ::llvm::cast<::mlir::VectorType>($_self);
::mlir::VectorType::Builder builder(vectorType);
if (vectorType.getRank() == 0) {
- static constexpr int64_t v2xty_shape[] = { 2 };
- return builder.setShape(v2xty_shape);
+ static constexpr int64_t v2xTyShape[] = {2};
+ return builder.setShape(v2xTyShape);
}
auto lastDim = vectorType.getRank() - 1;
return builder.setDim(lastDim, vectorType.getDimSize(lastDim) * 2);
}()
- }]>]> {
+ }]>;
+
+def Vector_InterleaveOp :
+ Vector_Op<"interleave", [Pure, AllTypesMatch<["lhs", "rhs"]>,
+ ResultIsDoubleSourceVectorType]> {
let summary = "constructs a vector by interleaving two input vectors";
let description = [{
The interleave operation constructs a new vector by interleaving the
@@ -513,16 +514,16 @@ def Vector_InterleaveOp :
Example:
```mlir
- %0 = vector.interleave %a, %b
- : vector<[4]xi32> ; yields vector<[8]xi32>
- %1 = vector.interleave %c, %d
- : vector<8xi8> ; yields vector<16xi8>
- %2 = vector.interleave %e, %f
- : vector<f16> ; yields vector<2xf16>
- %3 = vector.interleave %g, %h
- : vector<2x4x[2]xf64> ; yields vector<2x4x[4]xf64>
- %4 = vector.interleave %i, %j
- : vector<6x3xf32> ; yields vector<6x6xf32>
+ %a = arith.constant dense<[0, 1]> : vector<2xi32>
+ %b = arith.constant dense<[2, 3]> : vector<2xi32>
+ // The value of `%0` is `[0, 2, 1, 3]`.
+ %0 = vector.interleave %a, %b : vector<2xi32> -> vector<4xi32>
+
+ // Examples showing allowed input and result types.
+ %1 = vector.interleave %c, %d : vector<f16> -> vector<2xf16>
+ %2 = vector.interleave %e, %f : vector<6x3xf32> -> vector<6x6xf32>
+ %3 = vector.interleave %g, %h : vector<[4]xi32> -> vector<[8]xi32>
+ %4 = vector.interleave %i, %j : vector<2x4x[2]xf64> -> vector<2x4x[4]xf64>
```
}];
@@ -530,7 +531,7 @@ def Vector_InterleaveOp :
let results = (outs AnyVector:$result);
let assemblyFormat = [{
- $lhs `,` $rhs attr-dict `:` type($lhs)
+ $lhs `,` $rhs attr-dict `:` type($lhs) `->` type($result)
}];
let extraClassDeclaration = [{
@@ -543,6 +544,86 @@ def Vector_InterleaveOp :
}];
}
+class ResultIsHalfSourceVectorType<string result> : TypesMatchWith<
+ "the trailing dimension of the results is half the width of source trailing dimension",
+ "source", result,
+ [{
+ [&]() -> ::mlir::VectorType {
+ auto vectorType = ::llvm::cast<mlir::VectorType>($_self);
+ ::mlir::VectorType::Builder builder(vectorType);
+ auto lastDim = vectorType.getRank() - 1;
+ auto newDimSize = vectorType.getDimSize(lastDim) / 2;;
+ if (newDimSize <= 0)
+ return vectorType; // (invalid input type)
+ return builder.setDim(lastDim, newDimSize);
+ }()
+ }]
+>;
+
+def SourceVectorEvenElementCount : PredOpTrait<
+ "the trailing dimension of the source vector has an even number of elements",
+ CPred<[{
+ [&](){
+ auto srcVec = getSourceVectorType();
+ return srcVec.getDimSize(srcVec.getRank() - 1) % 2 == 0;
+ }()
+ }]>
+>;
+
+def Vector_DeinterleaveOp :
+ Vector_Op<"deinterleave", [Pure,
+ SourceVectorEvenElementCount,
+ ResultIsHalfSourceVectorType<"res1">,
+ AllTypesMatch<["res1", "res2"]>
+ ]> {
+ let summary = "constructs two vectors by deinterleaving an input vector";
+ let description = [{
+ The deinterleave operation constructs two vectors from a single input
+ vector. The first result vector contains the elements from even indexes
+ of the input, and the second contains elements from odd indexes. This is
+ the inverse of a `vector.interleave` operation.
+
+ Each output's trailing dimension is half of the size of the input
+ vector's trailing dimension. This operation requires the input vector
+ to have a rank > 0 and an even number of elements in its trailing
+ dimension.
+
+ The operation supports scalable vectors.
+
+ Example:
+ ```mlir
+ %0, %1 = vector.deinterleave %a
+ : vector<8xi8> -> vector<4xi8>
+ %2, %3 = vector.deinterleave %b
+ : vector<2x8xi8> -> vector<2x4xi8>
+ %4, %5 = vector.deinterleave %c
+ : vector<2x8x4xi8> -> vector<2x8x2xi8>
+ %6, %7 = vector.deinterleave %d
+ : vector<[8]xf32> -> vector<[4]xf32>
+ %8, %9 = vector.deinterleave %e
+ : vector<2x[6]xf64> -> vector<2x[3]xf64>
+ %10, %11 = vector.deinterleave %f
+ : vector<2x4x[6]xf64> -> vector<2x4x[3]xf64>
+ ```
+ }];
+
+ let arguments = (ins AnyVector:$source);
+ let results = (outs AnyVector:$res1, AnyVector:$res2);
+
+ let assemblyFormat = [{
+ $source attr-dict `:` type($source) `->` type($res1)
+ }];
+
+ let extraClassDeclaration = [{
+ VectorType getSourceVectorType() {
+ return ::llvm::cast<VectorType>(getSource().getType());
+ }
+ VectorType getResultVectorType() {
+ return ::llvm::cast<VectorType>(getRes1().getType());
+ }
+ }];
+ }
+
def Vector_ExtractElementOp :
Vector_Op<"extractelement", [Pure,
TypesMatchWith<"result type matches element type of vector operand",
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index 030be328e97f..9c83acc76e77 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -157,7 +157,14 @@ private:
if (failed(newOp))
return failure();
- rewriter.replaceOp(rootOp, *newOp);
+ // Rewriting succeeded but there are no values to replace.
+ if (rootOp->getNumResults() == 0) {
+ rewriter.eraseOp(rootOp);
+ } else {
+ assert(*newOp != Value() &&
+ "Cannot replace an op's use with an empty value.");
+ rewriter.replaceOp(rootOp, *newOp);
+ }
return success();
}
diff --git a/mlir/include/mlir/IR/OpBase.td b/mlir/include/mlir/IR/OpBase.td
index 7866ac24c1cc..4481e56615b8 100644
--- a/mlir/include/mlir/IR/OpBase.td
+++ b/mlir/include/mlir/IR/OpBase.td
@@ -670,16 +670,4 @@ class TCopVTEtAreSameAt<list<int> indices> : CPred<
"[this](unsigned i) { return getElementTypeOrSelf(this->getOperand(i)); "
"}))">;
-class AnyScalarTypeMatch<list<string> names> :
- AnyMatchOperatorTrait<names, "$_self.getType().isSignlessInteger(1)",
- "scalar type">;
-
-class ScalarConditionOrMatchingShape<list<string> names> :
- PredOpTrait<
- !head(names) # " is scalar or has matching shape",
- Or<[AnyScalarTypeMatch<[!head(names)]>.predicate,
- AllShapesMatch<names>.predicate]>> {
- list<string> values = names;
-}
-
#endif // OP_BASE
diff --git a/mlir/include/mlir/InitAllPasses.h b/mlir/include/mlir/InitAllPasses.h
index 90406f555b0f..fedd7737f9ea 100644
--- a/mlir/include/mlir/InitAllPasses.h
+++ b/mlir/include/mlir/InitAllPasses.h
@@ -14,7 +14,6 @@
#ifndef MLIR_INITALLPASSES_H_
#define MLIR_INITALLPASSES_H_
-#include "mlir/Config/mlir-config.h"
#include "mlir/Conversion/Passes.h"
#include "mlir/Dialect/AMDGPU/Transforms/Passes.h"
#include "mlir/Dialect/Affine/Passes.h"
@@ -99,7 +98,7 @@ inline void registerAllPasses() {
bufferization::registerBufferizationPipelines();
sparse_tensor::registerSparseTensorPipelines();
tosa::registerTosaToLinalgPipelines();
-#if MLIR_ENABLE_CUDA_CONVERSIONS
+#if LLVM_HAS_NVPTX_TARGET
gpu::registerGPUToNVVMPipeline();
#endif
}
diff --git a/mlir/include/mlir/Interfaces/TilingInterface.td b/mlir/include/mlir/Interfaces/TilingInterface.td
index 66382f29c242..14d775d986d2 100644
--- a/mlir/include/mlir/Interfaces/TilingInterface.td
+++ b/mlir/include/mlir/Interfaces/TilingInterface.td
@@ -170,11 +170,11 @@ def PartialReductionOpInterface : OpInterface<"PartialReductionOpInterface"> {
operation reduction. The tensor shape is equal to operation result
shape with new dimension for each non zero tile size.
}],
- /*retType=*/"FailureOr<Operation*>",
+ /*retType=*/"FailureOr<SmallVector<Value>>",
/*methodName=*/"generateInitialTensorForPartialReduction",
/*args=*/(ins
"OpBuilder &":$b,
- "Location ":$loc,
+ "Location":$loc,
"ArrayRef<OpFoldResult>":$sizes,
"ArrayRef<int>":$reductionDim),
/*methodBody=*/"",
diff --git a/mlir/include/mlir/Interfaces/Utils/InferIntRangeCommon.h b/mlir/include/mlir/Interfaces/Utils/InferIntRangeCommon.h
index 97c97c23ba82..851bb534bc7e 100644
--- a/mlir/include/mlir/Interfaces/Utils/InferIntRangeCommon.h
+++ b/mlir/include/mlir/Interfaces/Utils/InferIntRangeCommon.h
@@ -16,6 +16,7 @@
#include "mlir/Interfaces/InferIntRangeInterface.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitmaskEnum.h"
#include <optional>
namespace mlir {
@@ -31,6 +32,18 @@ static constexpr unsigned indexMaxWidth = 64;
enum class CmpMode : uint32_t { Both, Signed, Unsigned };
+enum class OverflowFlags : uint32_t {
+ None = 0,
+ Nsw = 1,
+ Nuw = 2,
+ LLVM_MARK_AS_BITMASK_ENUM(Nuw)
+};
+
+/// Function that performs inference on an array of `ConstantIntRanges` while
+/// taking special overflow behavior into account.
+using InferRangeWithOvfFlagsFn =
+ function_ref<ConstantIntRanges(ArrayRef<ConstantIntRanges>, OverflowFlags)>;
+
/// Compute `inferFn` on `ranges`, whose size should be the index storage
/// bitwidth. Then, compute the function on `argRanges` again after truncating
/// the ranges to 32 bits. Finally, if the truncation of the 64-bit result is
@@ -60,11 +73,14 @@ ConstantIntRanges extSIRange(const ConstantIntRanges &range,
ConstantIntRanges truncRange(const ConstantIntRanges &range,
unsigned destWidth);
-ConstantIntRanges inferAdd(ArrayRef<ConstantIntRanges> argRanges);
+ConstantIntRanges inferAdd(ArrayRef<ConstantIntRanges> argRanges,
+ OverflowFlags ovfFlags = OverflowFlags::None);
-ConstantIntRanges inferSub(ArrayRef<ConstantIntRanges> argRanges);
+ConstantIntRanges inferSub(ArrayRef<ConstantIntRanges> argRanges,
+ OverflowFlags ovfFlags = OverflowFlags::None);
-ConstantIntRanges inferMul(ArrayRef<ConstantIntRanges> argRanges);
+ConstantIntRanges inferMul(ArrayRef<ConstantIntRanges> argRanges,
+ OverflowFlags ovfFlags = OverflowFlags::None);
ConstantIntRanges inferDivS(ArrayRef<ConstantIntRanges> argRanges);
@@ -94,7 +110,8 @@ ConstantIntRanges inferOr(ArrayRef<ConstantIntRanges> argRanges);
ConstantIntRanges inferXor(ArrayRef<ConstantIntRanges> argRanges);
-ConstantIntRanges inferShl(ArrayRef<ConstantIntRanges> argRanges);
+ConstantIntRanges inferShl(ArrayRef<ConstantIntRanges> argRanges,
+ OverflowFlags ovfFlags = OverflowFlags::None);
ConstantIntRanges inferShrS(ArrayRef<ConstantIntRanges> argRanges);
diff --git a/mlir/include/mlir/Pass/PassManager.h b/mlir/include/mlir/Pass/PassManager.h
index 1b2e6a3bc82b..b3e427588173 100644
--- a/mlir/include/mlir/Pass/PassManager.h
+++ b/mlir/include/mlir/Pass/PassManager.h
@@ -18,8 +18,8 @@
#include "llvm/Support/raw_ostream.h"
#include <functional>
-#include <vector>
#include <optional>
+#include <vector>
namespace mlir {
class AnalysisManager;
@@ -387,6 +387,43 @@ public:
bool printAfterOnlyOnFailure = false, raw_ostream &out = llvm::errs(),
OpPrintingFlags opPrintingFlags = OpPrintingFlags());
+ /// Similar to `enableIRPrinting` above, except that instead of printing
+ /// the IR to a single output stream, the instrumentation will print the
+ /// output of each pass to a separate file. The files will be organized into a
+ /// directory tree rooted at `printTreeDir`. The directories mirror the
+ /// nesting structure of the IR. For example, if the IR is congruent to the
+ /// pass-pipeline "builtin.module(passA,passB,func.func(passC,passD),passE)",
+ /// and `printTreeDir=/tmp/pipeline_output`, then then the tree file tree
+ /// created will look like:
+ ///
+ /// ```
+ /// /tmp/pass_output
+ /// ├── builtin_module_the_symbol_name
+ /// │ ├── 0_passA.mlir
+ /// │ ├── 1_passB.mlir
+ /// │ ├── 2_passE.mlir
+ /// │ ├── func_func_my_func_name
+ /// │ │ ├── 1_0_passC.mlir
+ /// │ │ ├── 1_1__passD.mlir
+ /// │ ├── func_func_my_other_func_name
+ /// │ │ ├── 1_0_passC.mlir
+ /// │ │ ├── 1_1_passD.mlir
+ /// ```
+ ///
+ /// The subdirectories are given names that reflect the parent operation name
+ /// and symbol name (if present). The output MLIR files are prefixed using an
+ /// atomic counter to indicate the order the passes were printed in and to
+ /// prevent any potential name collisions.
+ void enableIRPrintingToFileTree(
+ std::function<bool(Pass *, Operation *)> shouldPrintBeforePass =
+ [](Pass *, Operation *) { return true; },
+ std::function<bool(Pass *, Operation *)> shouldPrintAfterPass =
+ [](Pass *, Operation *) { return true; },
+ bool printModuleScope = true, bool printAfterOnlyOnChange = true,
+ bool printAfterOnlyOnFailure = false,
+ llvm::StringRef printTreeDir = ".pass_manager_output",
+ OpPrintingFlags opPrintingFlags = OpPrintingFlags());
+
//===--------------------------------------------------------------------===//
// Pass Timing
diff --git a/mlir/include/mlir/Transforms/RegionUtils.h b/mlir/include/mlir/Transforms/RegionUtils.h
index f65d0d44eef4..06eebff201d1 100644
--- a/mlir/include/mlir/Transforms/RegionUtils.h
+++ b/mlir/include/mlir/Transforms/RegionUtils.h
@@ -87,10 +87,6 @@ LogicalResult eraseUnreachableBlocks(RewriterBase &rewriter,
LogicalResult runRegionDCE(RewriterBase &rewriter,
MutableArrayRef<Region> regions);
-/// Get a list of blocks that is sorted according to dominance. This sort is
-/// stable.
-SetVector<Block *> getBlocksSortedByDominance(Region &region);
-
} // namespace mlir
#endif // MLIR_TRANSFORMS_REGIONUTILS_H_
diff --git a/mlir/lib/Analysis/CMakeLists.txt b/mlir/lib/Analysis/CMakeLists.txt
index 005814ddbec7..38d8415d81c7 100644
--- a/mlir/lib/Analysis/CMakeLists.txt
+++ b/mlir/lib/Analysis/CMakeLists.txt
@@ -6,6 +6,7 @@ set(LLVM_OPTIONAL_SOURCES
Liveness.cpp
CFGLoopInfo.cpp
SliceAnalysis.cpp
+ TopologicalSortUtils.cpp
AliasAnalysis/LocalAliasAnalysis.cpp
@@ -28,6 +29,7 @@ add_mlir_library(MLIRAnalysis
Liveness.cpp
CFGLoopInfo.cpp
SliceAnalysis.cpp
+ TopologicalSortUtils.cpp
AliasAnalysis/LocalAliasAnalysis.cpp
diff --git a/mlir/lib/Analysis/Liveness.cpp b/mlir/lib/Analysis/Liveness.cpp
index a8e0daeabf40..e3245d68b369 100644
--- a/mlir/lib/Analysis/Liveness.cpp
+++ b/mlir/lib/Analysis/Liveness.cpp
@@ -72,6 +72,10 @@ struct BlockInfoBuilder {
defValues.insert(result);
for (Value operand : op->getOperands())
useValues.insert(operand);
+ for (Region &region : op->getRegions())
+ for (Block &child : region.getBlocks())
+ for (BlockArgument arg : child.getArguments())
+ defValues.insert(arg);
});
llvm::set_subtract(useValues, defValues);
}
diff --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp
index 26fe8e3dc081..2b1cf411ceee 100644
--- a/mlir/lib/Analysis/SliceAnalysis.cpp
+++ b/mlir/lib/Analysis/SliceAnalysis.cpp
@@ -11,7 +11,8 @@
//===----------------------------------------------------------------------===//
#include "mlir/Analysis/SliceAnalysis.h"
-#include "mlir/IR/BuiltinOps.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
+#include "mlir/IR/Block.h"
#include "mlir/IR/Operation.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Support/LLVM.h"
@@ -164,62 +165,6 @@ mlir::getSlice(Operation *op, const BackwardSliceOptions &backwardSliceOptions,
return topologicalSort(slice);
}
-namespace {
-/// DFS post-order implementation that maintains a global count to work across
-/// multiple invocations, to help implement topological sort on multi-root DAGs.
-/// We traverse all operations but only record the ones that appear in
-/// `toSort` for the final result.
-struct DFSState {
- DFSState(const SetVector<Operation *> &set) : toSort(set), seen() {}
- const SetVector<Operation *> &toSort;
- SmallVector<Operation *, 16> topologicalCounts;
- DenseSet<Operation *> seen;
-};
-} // namespace
-
-static void dfsPostorder(Operation *root, DFSState *state) {
- SmallVector<Operation *> queue(1, root);
- std::vector<Operation *> ops;
- while (!queue.empty()) {
- Operation *current = queue.pop_back_val();
- ops.push_back(current);
- for (Operation *op : current->getUsers())
- queue.push_back(op);
- for (Region &region : current->getRegions()) {
- for (Operation &op : region.getOps())
- queue.push_back(&op);
- }
- }
-
- for (Operation *op : llvm::reverse(ops)) {
- if (state->seen.insert(op).second && state->toSort.count(op) > 0)
- state->topologicalCounts.push_back(op);
- }
-}
-
-SetVector<Operation *>
-mlir::topologicalSort(const SetVector<Operation *> &toSort) {
- if (toSort.empty()) {
- return toSort;
- }
-
- // Run from each root with global count and `seen` set.
- DFSState state(toSort);
- for (auto *s : toSort) {
- assert(toSort.count(s) == 1 && "NYI: multi-sets not supported");
- dfsPostorder(s, &state);
- }
-
- // Reorder and return.
- SetVector<Operation *> res;
- for (auto it = state.topologicalCounts.rbegin(),
- eit = state.topologicalCounts.rend();
- it != eit; ++it) {
- res.insert(*it);
- }
- return res;
-}
-
/// Returns true if `value` (transitively) depends on iteration-carried values
/// of the given `ancestorOp`.
static bool dependsOnCarriedVals(Value value,
diff --git a/mlir/lib/Transforms/Utils/TopologicalSortUtils.cpp b/mlir/lib/Analysis/TopologicalSortUtils.cpp
index f3a9d217f2c9..c406960fdecc 100644
--- a/mlir/lib/Transforms/Utils/TopologicalSortUtils.cpp
+++ b/mlir/lib/Analysis/TopologicalSortUtils.cpp
@@ -1,4 +1,4 @@
-//===- TopologicalSortUtils.h - Topological sort utilities ------*- C++ -*-===//
+//===- TopologicalSortUtils.cpp - Topological sort utilities --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,8 +6,13 @@
//
//===----------------------------------------------------------------------===//
-#include "mlir/Transforms/TopologicalSortUtils.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
+#include "mlir/IR/Block.h"
#include "mlir/IR/OpDefinition.h"
+#include "mlir/IR/RegionGraphTraits.h"
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetVector.h"
using namespace mlir;
@@ -146,3 +151,135 @@ bool mlir::computeTopologicalSorting(
return allOpsScheduled;
}
+
+SetVector<Block *> mlir::getBlocksSortedByDominance(Region &region) {
+ // For each block that has not been visited yet (i.e. that has no
+ // predecessors), add it to the list as well as its successors.
+ SetVector<Block *> blocks;
+ for (Block &b : region) {
+ if (blocks.count(&b) == 0) {
+ llvm::ReversePostOrderTraversal<Block *> traversal(&b);
+ blocks.insert(traversal.begin(), traversal.end());
+ }
+ }
+ assert(blocks.size() == region.getBlocks().size() &&
+ "some blocks are not sorted");
+
+ return blocks;
+}
+
+namespace {
+class TopoSortHelper {
+public:
+ explicit TopoSortHelper(const SetVector<Operation *> &toSort)
+ : toSort(toSort) {}
+
+ /// Executes the topological sort of the operations this instance was
+ /// constructed with. This function will destroy the internal state of the
+ /// instance.
+ SetVector<Operation *> sort() {
+ if (toSort.size() <= 1) {
+ // Note: Creates a copy on purpose.
+ return toSort;
+ }
+
+ // First, find the root region to start the traversal through the IR. This
+ // additionally enriches the internal caches with all relevant ancestor
+ // regions and blocks.
+ Region *rootRegion = findCommonAncestorRegion();
+ assert(rootRegion && "expected all ops to have a common ancestor");
+
+ // Sort all elements in `toSort` by traversing the IR in the appropriate
+ // order.
+ SetVector<Operation *> result = topoSortRegion(*rootRegion);
+ assert(result.size() == toSort.size() &&
+ "expected all operations to be present in the result");
+ return result;
+ }
+
+private:
+ /// Computes the closest common ancestor region of all operations in `toSort`.
+ Region *findCommonAncestorRegion() {
+ // Map to count the number of times a region was encountered.
+ DenseMap<Region *, size_t> regionCounts;
+ size_t expectedCount = toSort.size();
+
+ // Walk the region tree for each operation towards the root and add to the
+ // region count.
+ Region *res = nullptr;
+ for (Operation *op : toSort) {
+ Region *current = op->getParentRegion();
+ // Store the block as an ancestor block.
+ ancestorBlocks.insert(op->getBlock());
+ while (current) {
+ // Insert or update the count and compare it.
+ if (++regionCounts[current] == expectedCount) {
+ res = current;
+ break;
+ }
+ ancestorBlocks.insert(current->getParentOp()->getBlock());
+ current = current->getParentRegion();
+ }
+ }
+ auto firstRange = llvm::make_first_range(regionCounts);
+ ancestorRegions.insert(firstRange.begin(), firstRange.end());
+ return res;
+ }
+
+ /// Performs the dominance respecting IR walk to collect the topological order
+ /// of the operation to sort.
+ SetVector<Operation *> topoSortRegion(Region &rootRegion) {
+ using StackT = PointerUnion<Region *, Block *, Operation *>;
+
+ SetVector<Operation *> result;
+ // Stack that stores the different IR constructs to traverse.
+ SmallVector<StackT> stack;
+ stack.push_back(&rootRegion);
+
+ // Traverse the IR in a dominance respecting pre-order walk.
+ while (!stack.empty()) {
+ StackT current = stack.pop_back_val();
+ if (auto *region = dyn_cast<Region *>(current)) {
+ // A region's blocks need to be traversed in dominance order.
+ SetVector<Block *> sortedBlocks = getBlocksSortedByDominance(*region);
+ for (Block *block : llvm::reverse(sortedBlocks)) {
+ // Only add blocks to the stack that are ancestors of the operations
+ // to sort.
+ if (ancestorBlocks.contains(block))
+ stack.push_back(block);
+ }
+ continue;
+ }
+
+ if (auto *block = dyn_cast<Block *>(current)) {
+ // Add all of the blocks operations to the stack.
+ for (Operation &op : llvm::reverse(*block))
+ stack.push_back(&op);
+ continue;
+ }
+
+ auto *op = cast<Operation *>(current);
+ if (toSort.contains(op))
+ result.insert(op);
+
+ // Add all the subregions that are ancestors of the operations to sort.
+ for (Region &subRegion : op->getRegions())
+ if (ancestorRegions.contains(&subRegion))
+ stack.push_back(&subRegion);
+ }
+ return result;
+ }
+
+ /// Operations to sort.
+ const SetVector<Operation *> &toSort;
+ /// Set containing all the ancestor regions of the operations to sort.
+ DenseSet<Region *> ancestorRegions;
+ /// Set containing all the ancestor blocks of the operations to sort.
+ DenseSet<Block *> ancestorBlocks;
+};
+} // namespace
+
+SetVector<Operation *>
+mlir::topologicalSort(const SetVector<Operation *> &toSort) {
+ return TopoSortHelper(toSort).sort();
+}
diff --git a/mlir/lib/Bindings/Python/IRAttributes.cpp b/mlir/lib/Bindings/Python/IRAttributes.cpp
index dda2003ba037..b5f31aa5dec5 100644
--- a/mlir/lib/Bindings/Python/IRAttributes.cpp
+++ b/mlir/lib/Bindings/Python/IRAttributes.cpp
@@ -15,6 +15,7 @@
#include "PybindUtils.h"
#include "llvm/ADT/ScopeExit.h"
+#include "llvm/Support/raw_ostream.h"
#include "mlir-c/BuiltinAttributes.h"
#include "mlir-c/BuiltinTypes.h"
@@ -72,6 +73,27 @@ Raises:
type or if the buffer does not meet expectations.
)";
+static const char kDenseElementsAttrGetFromListDocstring[] =
+ R"(Gets a DenseElementsAttr from a Python list of attributes.
+
+Note that it can be expensive to construct attributes individually.
+For a large number of elements, consider using a Python buffer or array instead.
+
+Args:
+ attrs: A list of attributes.
+ type: The desired shape and type of the resulting DenseElementsAttr.
+ If not provided, the element type is determined based on the type
+ of the 0th attribute and the shape is `[len(attrs)]`.
+ context: Explicit context, if not from context manager.
+
+Returns:
+ DenseElementsAttr on success.
+
+Raises:
+ ValueError: If the type of the attributes does not match the type
+ specified by `shaped_type`.
+)";
+
static const char kDenseResourceElementsAttrGetFromBufferDocstring[] =
R"(Gets a DenseResourceElementsAttr from a Python buffer or array.
@@ -648,6 +670,57 @@ public:
using PyConcreteAttribute::PyConcreteAttribute;
static PyDenseElementsAttribute
+ getFromList(py::list attributes, std::optional<PyType> explicitType,
+ DefaultingPyMlirContext contextWrapper) {
+
+ const size_t numAttributes = py::len(attributes);
+ if (numAttributes == 0)
+ throw py::value_error("Attributes list must be non-empty.");
+
+ MlirType shapedType;
+ if (explicitType) {
+ if ((!mlirTypeIsAShaped(*explicitType) ||
+ !mlirShapedTypeHasStaticShape(*explicitType))) {
+
+ std::string message;
+ llvm::raw_string_ostream os(message);
+ os << "Expected a static ShapedType for the shaped_type parameter: "
+ << py::repr(py::cast(*explicitType));
+ throw py::value_error(os.str());
+ }
+ shapedType = *explicitType;
+ } else {
+ SmallVector<int64_t> shape{static_cast<int64_t>(numAttributes)};
+ shapedType = mlirRankedTensorTypeGet(
+ shape.size(), shape.data(),
+ mlirAttributeGetType(pyTryCast<PyAttribute>(attributes[0])),
+ mlirAttributeGetNull());
+ }
+
+ SmallVector<MlirAttribute> mlirAttributes;
+ mlirAttributes.reserve(numAttributes);
+ for (const py::handle &attribute : attributes) {
+ MlirAttribute mlirAttribute = pyTryCast<PyAttribute>(attribute);
+ MlirType attrType = mlirAttributeGetType(mlirAttribute);
+ mlirAttributes.push_back(mlirAttribute);
+
+ if (!mlirTypeEqual(mlirShapedTypeGetElementType(shapedType), attrType)) {
+ std::string message;
+ llvm::raw_string_ostream os(message);
+ os << "All attributes must be of the same type and match "
+ << "the type parameter: expected=" << py::repr(py::cast(shapedType))
+ << ", but got=" << py::repr(py::cast(attrType));
+ throw py::value_error(os.str());
+ }
+ }
+
+ MlirAttribute elements = mlirDenseElementsAttrGet(
+ shapedType, mlirAttributes.size(), mlirAttributes.data());
+
+ return PyDenseElementsAttribute(contextWrapper->getRef(), elements);
+ }
+
+ static PyDenseElementsAttribute
getFromBuffer(py::buffer array, bool signless,
std::optional<PyType> explicitType,
std::optional<std::vector<int64_t>> explicitShape,
@@ -883,6 +956,10 @@ public:
py::arg("type") = py::none(), py::arg("shape") = py::none(),
py::arg("context") = py::none(),
kDenseElementsAttrGetDocstring)
+ .def_static("get", PyDenseElementsAttribute::getFromList,
+ py::arg("attrs"), py::arg("type") = py::none(),
+ py::arg("context") = py::none(),
+ kDenseElementsAttrGetFromListDocstring)
.def_static("get_splat", PyDenseElementsAttribute::getSplat,
py::arg("shaped_type"), py::arg("element_attr"),
"Gets a DenseElementsAttr where all values are the same")
diff --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp
index 01678a9719f9..2b2792ea6c77 100644
--- a/mlir/lib/Bindings/Python/IRCore.cpp
+++ b/mlir/lib/Bindings/Python/IRCore.cpp
@@ -240,7 +240,20 @@ struct PyGlobalDebugFlag {
// Debug flags.
py::class_<PyGlobalDebugFlag>(m, "_GlobalDebug", py::module_local())
.def_property_static("flag", &PyGlobalDebugFlag::get,
- &PyGlobalDebugFlag::set, "LLVM-wide debug flag");
+ &PyGlobalDebugFlag::set, "LLVM-wide debug flag")
+ .def_static(
+ "set_types",
+ [](const std::string &type) {
+ mlirSetGlobalDebugType(type.c_str());
+ },
+ "types"_a, "Sets specific debug types to be produced by LLVM")
+ .def_static("set_types", [](const std::vector<std::string> &types) {
+ std::vector<const char *> pointers;
+ pointers.reserve(types.size());
+ for (const std::string &str : types)
+ pointers.push_back(str.c_str());
+ mlirSetGlobalDebugTypes(pointers.data(), pointers.size());
+ });
}
};
diff --git a/mlir/lib/CAPI/Debug/Debug.cpp b/mlir/lib/CAPI/Debug/Debug.cpp
index 288ecd601274..320ece4998e0 100644
--- a/mlir/lib/CAPI/Debug/Debug.cpp
+++ b/mlir/lib/CAPI/Debug/Debug.cpp
@@ -16,3 +16,21 @@
void mlirEnableGlobalDebug(bool enable) { llvm::DebugFlag = enable; }
bool mlirIsGlobalDebugEnabled() { return llvm::DebugFlag; }
+
+void mlirSetGlobalDebugType(const char *type) {
+ // Depending on the NDEBUG flag, this name can be either a function or a macro
+ // that expands to something that isn't a funciton call, so we cannot
+ // explicitly prefix it with `llvm::` or declare `using` it.
+ using namespace llvm;
+ setCurrentDebugType(type);
+}
+
+void mlirSetGlobalDebugTypes(const char **types, intptr_t n) {
+ using namespace llvm;
+ setCurrentDebugTypes(types, n);
+}
+
+bool mlirIsCurrentDebugType(const char *type) {
+ using namespace llvm;
+ return isCurrentDebugType(type);
+}
diff --git a/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp b/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
index 1447b182ccfd..0be3d76f556d 100644
--- a/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
+++ b/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
@@ -15,6 +15,7 @@
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/EmitC/IR/EmitC.h"
+#include "mlir/Tools/PDLL/AST/Types.h"
#include "mlir/Transforms/DialectConversion.h"
using namespace mlir;
@@ -112,6 +113,93 @@ public:
}
};
+template <typename ArithOp, bool castToUnsigned>
+class CastConversion : public OpConversionPattern<ArithOp> {
+public:
+ using OpConversionPattern<ArithOp>::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(ArithOp op, typename ArithOp::Adaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+
+ Type opReturnType = this->getTypeConverter()->convertType(op.getType());
+ if (!isa_and_nonnull<IntegerType>(opReturnType))
+ return rewriter.notifyMatchFailure(op, "expected integer result type");
+
+ if (adaptor.getOperands().size() != 1) {
+ return rewriter.notifyMatchFailure(
+ op, "CastConversion only supports unary ops");
+ }
+
+ Type operandType = adaptor.getIn().getType();
+ if (!isa_and_nonnull<IntegerType>(operandType))
+ return rewriter.notifyMatchFailure(op, "expected integer operand type");
+
+ // Signed (sign-extending) casts from i1 are not supported.
+ if (operandType.isInteger(1) && !castToUnsigned)
+ return rewriter.notifyMatchFailure(op,
+ "operation not supported on i1 type");
+
+ // to-i1 conversions: arith semantics want truncation, whereas (bool)(v) is
+ // equivalent to (v != 0). Implementing as (bool)(v & 0x01) gives
+ // truncation.
+ if (opReturnType.isInteger(1)) {
+ auto constOne = rewriter.create<emitc::ConstantOp>(
+ op.getLoc(), operandType, rewriter.getIntegerAttr(operandType, 1));
+ auto oneAndOperand = rewriter.create<emitc::BitwiseAndOp>(
+ op.getLoc(), operandType, adaptor.getIn(), constOne);
+ rewriter.replaceOpWithNewOp<emitc::CastOp>(op, opReturnType,
+ oneAndOperand);
+ return success();
+ }
+
+ bool isTruncation = operandType.getIntOrFloatBitWidth() >
+ opReturnType.getIntOrFloatBitWidth();
+ bool doUnsigned = castToUnsigned || isTruncation;
+
+ Type castType = opReturnType;
+ // If the op is a ui variant and the type wanted as
+ // return type isn't unsigned, we need to issue an unsigned type to do
+ // the conversion.
+ if (castType.isUnsignedInteger() != doUnsigned) {
+ castType = rewriter.getIntegerType(opReturnType.getIntOrFloatBitWidth(),
+ /*isSigned=*/!doUnsigned);
+ }
+
+ Value actualOp = adaptor.getIn();
+ // Adapt the signedness of the operand if necessary
+ if (operandType.isUnsignedInteger() != doUnsigned) {
+ Type correctSignednessType =
+ rewriter.getIntegerType(operandType.getIntOrFloatBitWidth(),
+ /*isSigned=*/!doUnsigned);
+ actualOp = rewriter.template create<emitc::CastOp>(
+ op.getLoc(), correctSignednessType, actualOp);
+ }
+
+ auto result = rewriter.template create<emitc::CastOp>(op.getLoc(), castType,
+ actualOp);
+
+ // Cast to the expected output type
+ if (castType != opReturnType) {
+ result = rewriter.template create<emitc::CastOp>(op.getLoc(),
+ opReturnType, result);
+ }
+
+ rewriter.replaceOp(op, result);
+ return success();
+ }
+};
+
+template <typename ArithOp>
+class UnsignedCastConversion : public CastConversion<ArithOp, true> {
+ using CastConversion<ArithOp, true>::CastConversion;
+};
+
+template <typename ArithOp>
+class SignedCastConversion : public CastConversion<ArithOp, false> {
+ using CastConversion<ArithOp, false>::CastConversion;
+};
+
template <typename ArithOp, typename EmitCOp>
class ArithOpConversion final : public OpConversionPattern<ArithOp> {
public:
@@ -313,6 +401,10 @@ void mlir::populateArithToEmitCPatterns(TypeConverter &typeConverter,
IntegerOpConversion<arith::SubIOp, emitc::SubOp>,
CmpIOpConversion,
SelectOpConversion,
+ // Truncation is guaranteed for unsigned types.
+ UnsignedCastConversion<arith::TruncIOp>,
+ SignedCastConversion<arith::ExtSIOp>,
+ UnsignedCastConversion<arith::ExtUIOp>,
ItoFCastOpConversion<arith::SIToFPOp>,
ItoFCastOpConversion<arith::UIToFPOp>,
FtoICastOpConversion<arith::FPToSIOp>,
diff --git a/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp b/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
index 53b44aa3241b..94b7c8d4f2fd 100644
--- a/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
+++ b/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
@@ -449,61 +449,47 @@ mlir::convertFuncOpToLLVMFuncOp(FunctionOpInterface funcOp,
"region types conversion failed");
}
+ if (!shouldUseBarePtrCallConv(funcOp, &converter)) {
+ if (funcOp->getAttrOfType<UnitAttr>(
+ LLVM::LLVMDialect::getEmitCWrapperAttrName())) {
+ if (newFuncOp.isVarArg())
+ return funcOp.emitError("C interface for variadic functions is not "
+ "supported yet.");
+
+ if (newFuncOp.isExternal())
+ wrapExternalFunction(rewriter, funcOp->getLoc(), converter, funcOp,
+ newFuncOp);
+ else
+ wrapForExternalCallers(rewriter, funcOp->getLoc(), converter, funcOp,
+ newFuncOp);
+ }
+ } else {
+ modifyFuncOpToUseBarePtrCallingConv(
+ rewriter, funcOp->getLoc(), converter, newFuncOp,
+ llvm::cast<FunctionType>(funcOp.getFunctionType()).getInputs());
+ }
+
return newFuncOp;
}
namespace {
-struct FuncOpConversionBase : public ConvertOpToLLVMPattern<func::FuncOp> {
-protected:
- using ConvertOpToLLVMPattern<func::FuncOp>::ConvertOpToLLVMPattern;
-
- // Convert input FuncOp to LLVMFuncOp by using the LLVMTypeConverter provided
- // to this legalization pattern.
- FailureOr<LLVM::LLVMFuncOp>
- convertFuncOpToLLVMFuncOp(func::FuncOp funcOp,
- ConversionPatternRewriter &rewriter) const {
- return mlir::convertFuncOpToLLVMFuncOp(
- cast<FunctionOpInterface>(funcOp.getOperation()), rewriter,
- *getTypeConverter());
- }
-};
-
/// FuncOp legalization pattern that converts MemRef arguments to pointers to
/// MemRef descriptors (LLVM struct data types) containing all the MemRef type
/// information.
-struct FuncOpConversion : public FuncOpConversionBase {
+struct FuncOpConversion : public ConvertOpToLLVMPattern<func::FuncOp> {
FuncOpConversion(const LLVMTypeConverter &converter)
- : FuncOpConversionBase(converter) {}
+ : ConvertOpToLLVMPattern(converter) {}
LogicalResult
matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- FailureOr<LLVM::LLVMFuncOp> newFuncOp =
- convertFuncOpToLLVMFuncOp(funcOp, rewriter);
+ FailureOr<LLVM::LLVMFuncOp> newFuncOp = mlir::convertFuncOpToLLVMFuncOp(
+ cast<FunctionOpInterface>(funcOp.getOperation()), rewriter,
+ *getTypeConverter());
if (failed(newFuncOp))
return rewriter.notifyMatchFailure(funcOp, "Could not convert funcop");
- if (!shouldUseBarePtrCallConv(funcOp, this->getTypeConverter())) {
- if (funcOp->getAttrOfType<UnitAttr>(
- LLVM::LLVMDialect::getEmitCWrapperAttrName())) {
- if (newFuncOp->isVarArg())
- return funcOp->emitError("C interface for variadic functions is not "
- "supported yet.");
-
- if (newFuncOp->isExternal())
- wrapExternalFunction(rewriter, funcOp->getLoc(), *getTypeConverter(),
- funcOp, *newFuncOp);
- else
- wrapForExternalCallers(rewriter, funcOp->getLoc(),
- *getTypeConverter(), funcOp, *newFuncOp);
- }
- } else {
- modifyFuncOpToUseBarePtrCallingConv(rewriter, funcOp->getLoc(),
- *getTypeConverter(), *newFuncOp,
- funcOp.getFunctionType().getInputs());
- }
-
rewriter.eraseOp(funcOp);
return success();
}
diff --git a/mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp b/mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp
index a206c7b228d2..f6a6d1d7228a 100644
--- a/mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp
+++ b/mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp
@@ -185,21 +185,6 @@ struct MapInfoOpConversion : public ConvertOpToLLVMPattern<omp::MapInfoOp> {
}
};
-struct ReductionOpConversion : public ConvertOpToLLVMPattern<omp::ReductionOp> {
- using ConvertOpToLLVMPattern<omp::ReductionOp>::ConvertOpToLLVMPattern;
- LogicalResult
- matchAndRewrite(omp::ReductionOp curOp, OpAdaptor adaptor,
- ConversionPatternRewriter &rewriter) const override {
- if (isa<MemRefType>(curOp.getAccumulator().getType())) {
- // TODO: Support memref type in variable operands
- return rewriter.notifyMatchFailure(curOp, "memref is not supported yet");
- }
- rewriter.replaceOpWithNewOp<omp::ReductionOp>(
- curOp, TypeRange(), adaptor.getOperands(), curOp->getAttrs());
- return success();
- }
-};
-
template <typename OpType>
struct MultiRegionOpConversion : public ConvertOpToLLVMPattern<OpType> {
using ConvertOpToLLVMPattern<OpType>::ConvertOpToLLVMPattern;
@@ -246,9 +231,6 @@ void mlir::configureOpenMPToLLVMConversionLegality(
return typeConverter.isLegal(op->getOperandTypes()) &&
typeConverter.isLegal(op->getResultTypes());
});
- target.addDynamicallyLegalOp<mlir::omp::ReductionOp>([&](Operation *op) {
- return typeConverter.isLegal(op->getOperandTypes());
- });
target.addDynamicallyLegalOp<
mlir::omp::AtomicUpdateOp, mlir::omp::CriticalOp, mlir::omp::TargetOp,
mlir::omp::TargetDataOp, mlir::omp::LoopNestOp,
@@ -275,11 +257,11 @@ void mlir::populateOpenMPToLLVMConversionPatterns(LLVMTypeConverter &converter,
[&](omp::MapBoundsType type) -> Type { return type; });
patterns.add<
- AtomicReadOpConversion, MapInfoOpConversion, ReductionOpConversion,
+ AtomicReadOpConversion, MapInfoOpConversion,
MultiRegionOpConversion<omp::DeclareReductionOp>,
MultiRegionOpConversion<omp::PrivateClauseOp>,
RegionOpConversion<omp::CriticalOp>, RegionOpConversion<omp::LoopNestOp>,
- RegionOpConversion<omp::MasterOp>, ReductionOpConversion,
+ RegionOpConversion<omp::MasterOp>,
RegionOpConversion<omp::OrderedRegionOp>,
RegionOpConversion<omp::ParallelOp>, RegionOpConversion<omp::WsloopOp>,
RegionOpConversion<omp::SectionsOp>, RegionOpConversion<omp::SectionOp>,
diff --git a/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp b/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
index d8e473a562e5..87923477766d 100644
--- a/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
+++ b/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
@@ -356,6 +356,20 @@ struct TransposeOpToArmSMELowering
return failure();
auto loc = transposeOp.getLoc();
+ Value input = transposeOp.getVector();
+
+ if (auto xferOp = input.getDefiningOp<vector::TransferReadOp>();
+ xferOp && xferOp->hasOneUse()) {
+ // Fold transpose into transfer_read to enable in-flight transpose when
+ // converting to arm_sme.tile_load.
+ rewriter.modifyOpInPlace(xferOp, [&]() {
+ xferOp->setAttr(xferOp.getPermutationMapAttrName(),
+ AffineMapAttr::get(AffineMap::getPermutationMap(
+ permutation, transposeOp.getContext())));
+ });
+ rewriter.replaceOp(transposeOp, xferOp);
+ return success();
+ }
// Allocate buffer to store input tile to.
Value vscale =
@@ -372,8 +386,6 @@ struct TransposeOpToArmSMELowering
auto buffer = rewriter.create<memref::AllocaOp>(
loc, bufferType, ValueRange{numTileSlices, numTileSlices});
- Value input = transposeOp.getVector();
-
// Store input tile.
auto tileStoreOp = rewriter.create<arm_sme::TileStoreOp>(
loc, input, buffer, ValueRange{c0, c0});
diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
index 332f0a2eecfc..4496c2bc5fe8 100644
--- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
+++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
@@ -15,6 +15,7 @@
#include <type_traits>
#include "mlir/Analysis/SliceAnalysis.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
diff --git a/mlir/lib/Conversion/VectorToSPIRV/CMakeLists.txt b/mlir/lib/Conversion/VectorToSPIRV/CMakeLists.txt
index 113983146f5b..bb9f793d7fe0 100644
--- a/mlir/lib/Conversion/VectorToSPIRV/CMakeLists.txt
+++ b/mlir/lib/Conversion/VectorToSPIRV/CMakeLists.txt
@@ -14,6 +14,5 @@ add_mlir_conversion_library(MLIRVectorToSPIRV
MLIRSPIRVDialect
MLIRSPIRVConversion
MLIRVectorDialect
- MLIRVectorTransforms
MLIRTransforms
)
diff --git a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp
index c2dd37f48146..a9ed25fbfbe0 100644
--- a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp
+++ b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp
@@ -578,6 +578,47 @@ struct VectorShuffleOpConvert final
}
};
+struct VectorInterleaveOpConvert final
+ : public OpConversionPattern<vector::InterleaveOp> {
+ using OpConversionPattern::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(vector::InterleaveOp interleaveOp, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ // Check the result vector type.
+ VectorType oldResultType = interleaveOp.getResultVectorType();
+ Type newResultType = getTypeConverter()->convertType(oldResultType);
+ if (!newResultType)
+ return rewriter.notifyMatchFailure(interleaveOp,
+ "unsupported result vector type");
+
+ // Interleave the indices.
+ VectorType sourceType = interleaveOp.getSourceVectorType();
+ int n = sourceType.getNumElements();
+
+ // Input vectors of size 1 are converted to scalars by the type converter.
+ // We cannot use `spirv::VectorShuffleOp` directly in this case, and need to
+ // use `spirv::CompositeConstructOp`.
+ if (n == 1) {
+ Value newOperands[] = {adaptor.getLhs(), adaptor.getRhs()};
+ rewriter.replaceOpWithNewOp<spirv::CompositeConstructOp>(
+ interleaveOp, newResultType, newOperands);
+ return success();
+ }
+
+ auto seq = llvm::seq<int64_t>(2 * n);
+ auto indices = llvm::map_to_vector(
+ seq, [n](int i) { return (i % 2 ? n : 0) + i / 2; });
+
+ // Emit a SPIR-V shuffle.
+ rewriter.replaceOpWithNewOp<spirv::VectorShuffleOp>(
+ interleaveOp, newResultType, adaptor.getLhs(), adaptor.getRhs(),
+ rewriter.getI32ArrayAttr(indices));
+
+ return success();
+ }
+};
+
struct VectorLoadOpConverter final
: public OpConversionPattern<vector::LoadOp> {
using OpConversionPattern::OpConversionPattern;
@@ -822,16 +863,14 @@ void mlir::populateVectorToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
VectorReductionFloatMinMax<CL_FLOAT_MAX_MIN_OPS>,
VectorReductionFloatMinMax<GL_FLOAT_MAX_MIN_OPS>, VectorShapeCast,
VectorInsertStridedSliceOpConvert, VectorShuffleOpConvert,
- VectorSplatPattern, VectorLoadOpConverter, VectorStoreOpConverter>(
- typeConverter, patterns.getContext(), PatternBenefit(1));
+ VectorInterleaveOpConvert, VectorSplatPattern, VectorLoadOpConverter,
+ VectorStoreOpConverter>(typeConverter, patterns.getContext(),
+ PatternBenefit(1));
// Make sure that the more specialized dot product pattern has higher benefit
// than the generic one that extracts all elements.
patterns.add<VectorReductionToFPDotProd>(typeConverter, patterns.getContext(),
PatternBenefit(2));
-
- // Need this until vector.interleave is handled.
- vector::populateVectorInterleaveToShufflePatterns(patterns);
}
void mlir::populateVectorReductionToSPIRVDotProductPatterns(
diff --git a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
index 84ae4b52dcf4..7f3e43d0b4cd 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
@@ -12,6 +12,7 @@
#include "mlir/Dialect/Affine/LoopFusionUtils.h"
#include "mlir/Analysis/SliceAnalysis.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/Dialect/Affine/Analysis/AffineAnalysis.h"
#include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
#include "mlir/Dialect/Affine/Analysis/Utils.h"
diff --git a/mlir/lib/Dialect/Arith/IR/InferIntRangeInterfaceImpls.cpp b/mlir/lib/Dialect/Arith/IR/InferIntRangeInterfaceImpls.cpp
index 71eb36bb07a6..fbe2ecab8adc 100644
--- a/mlir/lib/Dialect/Arith/IR/InferIntRangeInterfaceImpls.cpp
+++ b/mlir/lib/Dialect/Arith/IR/InferIntRangeInterfaceImpls.cpp
@@ -19,6 +19,16 @@ using namespace mlir;
using namespace mlir::arith;
using namespace mlir::intrange;
+static intrange::OverflowFlags
+convertArithOverflowFlags(arith::IntegerOverflowFlags flags) {
+ intrange::OverflowFlags retFlags = intrange::OverflowFlags::None;
+ if (bitEnumContainsAny(flags, arith::IntegerOverflowFlags::nsw))
+ retFlags |= intrange::OverflowFlags::Nsw;
+ if (bitEnumContainsAny(flags, arith::IntegerOverflowFlags::nuw))
+ retFlags |= intrange::OverflowFlags::Nuw;
+ return retFlags;
+}
+
//===----------------------------------------------------------------------===//
// ConstantOp
//===----------------------------------------------------------------------===//
@@ -38,7 +48,8 @@ void arith::ConstantOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
void arith::AddIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
- setResultRange(getResult(), inferAdd(argRanges));
+ setResultRange(getResult(), inferAdd(argRanges, convertArithOverflowFlags(
+ getOverflowFlags())));
}
//===----------------------------------------------------------------------===//
@@ -47,7 +58,8 @@ void arith::AddIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
void arith::SubIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
- setResultRange(getResult(), inferSub(argRanges));
+ setResultRange(getResult(), inferSub(argRanges, convertArithOverflowFlags(
+ getOverflowFlags())));
}
//===----------------------------------------------------------------------===//
@@ -56,7 +68,8 @@ void arith::SubIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
void arith::MulIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
- setResultRange(getResult(), inferMul(argRanges));
+ setResultRange(getResult(), inferMul(argRanges, convertArithOverflowFlags(
+ getOverflowFlags())));
}
//===----------------------------------------------------------------------===//
@@ -302,7 +315,8 @@ void arith::SelectOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
void arith::ShLIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
- setResultRange(getResult(), inferShl(argRanges));
+ setResultRange(getResult(), inferShl(argRanges, convertArithOverflowFlags(
+ getOverflowFlags())));
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/ArmSME/Transforms/TileAllocation.cpp b/mlir/lib/Dialect/ArmSME/Transforms/TileAllocation.cpp
index acbbbe9932e1..733e758b4390 100644
--- a/mlir/lib/Dialect/ArmSME/Transforms/TileAllocation.cpp
+++ b/mlir/lib/Dialect/ArmSME/Transforms/TileAllocation.cpp
@@ -46,6 +46,7 @@
//===----------------------------------------------------------------------===//
#include "mlir/Analysis/Liveness.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/Dialect/ArmSME/IR/ArmSME.h"
#include "mlir/Dialect/ArmSME/Transforms/Passes.h"
#include "mlir/Dialect/ArmSME/Transforms/Transforms.h"
diff --git a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
index ef7b7a19489d..20f47574b25a 100644
--- a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
+++ b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
@@ -233,7 +233,7 @@ bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
}
//===----------------------------------------------------------------------===//
-// CallOp
+// CallOpaqueOp
//===----------------------------------------------------------------------===//
LogicalResult emitc::CallOpaqueOp::verify() {
diff --git a/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp b/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
index db1974ddb377..f4573030a457 100644
--- a/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
+++ b/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "mlir/Config/mlir-config.h"
#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h"
#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h"
@@ -39,7 +38,7 @@
using namespace mlir;
-#if MLIR_ENABLE_CUDA_CONVERSIONS
+#if LLVM_HAS_NVPTX_TARGET
namespace {
//===----------------------------------------------------------------------===//
@@ -128,4 +127,4 @@ void mlir::gpu::registerGPUToNVVMPipeline() {
buildLowerToNVVMPassPipeline);
}
-#endif // MLIR_ENABLE_CUDA_CONVERSIONS
+#endif // LLVM_HAS_NVPTX_TARGET
diff --git a/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp b/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp
index 836e939a8295..1e7596e8cc4a 100644
--- a/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp
@@ -13,7 +13,6 @@
#include "mlir/Dialect/GPU/Transforms/Passes.h"
-#include "mlir/Config/mlir-config.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
@@ -49,7 +48,7 @@ void GpuModuleToBinaryPass::getDependentDialects(
// Register all GPU related translations.
registry.insert<gpu::GPUDialect>();
registry.insert<LLVM::LLVMDialect>();
-#if MLIR_ENABLE_CUDA_CONVERSIONS
+#if LLVM_HAS_NVPTX_TARGET
registry.insert<NVVM::NVVMDialect>();
#endif
#if MLIR_ENABLE_ROCM_CONVERSIONS
diff --git a/mlir/lib/Dialect/IRDL/IR/IRDL.cpp b/mlir/lib/Dialect/IRDL/IR/IRDL.cpp
index 4eae2b03024c..e4728f55b49d 100644
--- a/mlir/lib/Dialect/IRDL/IR/IRDL.cpp
+++ b/mlir/lib/Dialect/IRDL/IR/IRDL.cpp
@@ -132,22 +132,37 @@ LogicalResult BaseOp::verify() {
return success();
}
+static LogicalResult
+checkSymbolIsTypeOrAttribute(SymbolTableCollection &symbolTable,
+ Operation *source, SymbolRefAttr symbol) {
+ Operation *targetOp = symbolTable.lookupNearestSymbolFrom(source, symbol);
+ if (!targetOp)
+ return source->emitOpError() << "symbol '" << symbol << "' not found";
+
+ if (!isa<TypeOp, AttributeOp>(targetOp))
+ return source->emitOpError() << "symbol '" << symbol
+ << "' does not refer to a type or attribute "
+ "definition (refers to '"
+ << targetOp->getName() << "')";
+
+ return success();
+}
+
LogicalResult BaseOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
std::optional<SymbolRefAttr> baseRef = getBaseRef();
if (!baseRef)
return success();
- TypeOp typeOp = symbolTable.lookupNearestSymbolFrom<TypeOp>(*this, *baseRef);
- if (typeOp)
- return success();
+ return checkSymbolIsTypeOrAttribute(symbolTable, *this, *baseRef);
+}
- AttributeOp attrOp =
- symbolTable.lookupNearestSymbolFrom<AttributeOp>(*this, *baseRef);
- if (attrOp)
+LogicalResult
+ParametricOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
+ std::optional<SymbolRefAttr> baseRef = getBaseType();
+ if (!baseRef)
return success();
- return emitOpError() << "'" << *baseRef
- << "' does not refer to a type or attribute definition";
+ return checkSymbolIsTypeOrAttribute(symbolTable, *this, *baseRef);
}
/// Parse a value with its variadicity first. By default, the variadicity is
diff --git a/mlir/lib/Dialect/Index/IR/InferIntRangeInterfaceImpls.cpp b/mlir/lib/Dialect/Index/IR/InferIntRangeInterfaceImpls.cpp
index b6b8a136791c..64adb6b85052 100644
--- a/mlir/lib/Dialect/Index/IR/InferIntRangeInterfaceImpls.cpp
+++ b/mlir/lib/Dialect/Index/IR/InferIntRangeInterfaceImpls.cpp
@@ -44,19 +44,32 @@ void BoolConstantOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
// we take the 64-bit result).
//===----------------------------------------------------------------------===//
+// Some arithmetic inference functions allow specifying special overflow / wrap
+// behavior. We do not require this for the IndexOps and use this helper to call
+// the inference function without any `OverflowFlags`.
+static std::function<ConstantIntRanges(ArrayRef<ConstantIntRanges>)>
+inferWithoutOverflowFlags(InferRangeWithOvfFlagsFn inferWithOvfFn) {
+ return [inferWithOvfFn](ArrayRef<ConstantIntRanges> argRanges) {
+ return inferWithOvfFn(argRanges, OverflowFlags::None);
+ };
+}
+
void AddOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
- setResultRange(getResult(), inferIndexOp(inferAdd, argRanges, CmpMode::Both));
+ setResultRange(getResult(), inferIndexOp(inferWithoutOverflowFlags(inferAdd),
+ argRanges, CmpMode::Both));
}
void SubOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
- setResultRange(getResult(), inferIndexOp(inferSub, argRanges, CmpMode::Both));
+ setResultRange(getResult(), inferIndexOp(inferWithoutOverflowFlags(inferSub),
+ argRanges, CmpMode::Both));
}
void MulOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
- setResultRange(getResult(), inferIndexOp(inferMul, argRanges, CmpMode::Both));
+ setResultRange(getResult(), inferIndexOp(inferWithoutOverflowFlags(inferMul),
+ argRanges, CmpMode::Both));
}
void DivUOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
@@ -127,7 +140,8 @@ void MinUOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
void ShlOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
- setResultRange(getResult(), inferIndexOp(inferShl, argRanges, CmpMode::Both));
+ setResultRange(getResult(), inferIndexOp(inferWithoutOverflowFlags(inferShl),
+ argRanges, CmpMode::Both));
}
void ShrSOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
diff --git a/mlir/lib/Dialect/LLVMIR/Transforms/CMakeLists.txt b/mlir/lib/Dialect/LLVMIR/Transforms/CMakeLists.txt
index c80494a44011..728885fcbeaf 100644
--- a/mlir/lib/Dialect/LLVMIR/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/LLVMIR/Transforms/CMakeLists.txt
@@ -6,7 +6,6 @@ add_mlir_dialect_library(MLIRLLVMIRTransforms
LegalizeForExport.cpp
OptimizeForNVVM.cpp
RequestCWrappers.cpp
- TypeConsistency.cpp
DEPENDS
MLIRLLVMPassIncGen
diff --git a/mlir/lib/Dialect/LLVMIR/Transforms/TypeConsistency.cpp b/mlir/lib/Dialect/LLVMIR/Transforms/TypeConsistency.cpp
deleted file mode 100644
index 0a372ad0c52f..000000000000
--- a/mlir/lib/Dialect/LLVMIR/Transforms/TypeConsistency.cpp
+++ /dev/null
@@ -1,575 +0,0 @@
-//===- TypeConsistency.cpp - Rewrites to improve type consistency ---------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "mlir/Dialect/LLVMIR/Transforms/TypeConsistency.h"
-#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
-#include "llvm/ADT/TypeSwitch.h"
-
-namespace mlir {
-namespace LLVM {
-#define GEN_PASS_DEF_LLVMTYPECONSISTENCY
-#include "mlir/Dialect/LLVMIR/Transforms/Passes.h.inc"
-} // namespace LLVM
-} // namespace mlir
-
-using namespace mlir;
-using namespace LLVM;
-
-//===----------------------------------------------------------------------===//
-// Utils
-//===----------------------------------------------------------------------===//
-
-/// Checks that a pointer value has a pointee type hint consistent with the
-/// expected type. Returns the type it actually hints to if it differs, or
-/// nullptr if the type is consistent or impossible to analyze.
-static Type isElementTypeInconsistent(Value addr, Type expectedType) {
- auto defOp = dyn_cast_or_null<GetResultPtrElementType>(addr.getDefiningOp());
- if (!defOp)
- return nullptr;
-
- Type elemType = defOp.getResultPtrElementType();
- if (!elemType)
- return nullptr;
-
- if (elemType == expectedType)
- return nullptr;
-
- return elemType;
-}
-
-//===----------------------------------------------------------------------===//
-// CanonicalizeAlignedGep
-//===----------------------------------------------------------------------===//
-
-/// Returns the amount of bytes the provided GEP elements will offset the
-/// pointer by. Returns nullopt if the offset could not be computed.
-static std::optional<uint64_t> gepToByteOffset(DataLayout &layout, GEPOp gep) {
-
- SmallVector<uint32_t> indices;
- // Ensures all indices are static and fetches them.
- for (auto index : gep.getIndices()) {
- IntegerAttr indexInt = llvm::dyn_cast_if_present<IntegerAttr>(index);
- if (!indexInt)
- return std::nullopt;
- int32_t gepIndex = indexInt.getInt();
- if (gepIndex < 0)
- return std::nullopt;
- indices.push_back(static_cast<uint32_t>(gepIndex));
- }
-
- uint64_t offset = indices[0] * layout.getTypeSize(gep.getElemType());
-
- Type currentType = gep.getElemType();
- for (uint32_t index : llvm::drop_begin(indices)) {
- bool shouldCancel =
- TypeSwitch<Type, bool>(currentType)
- .Case([&](LLVMArrayType arrayType) {
- if (arrayType.getNumElements() <= index)
- return true;
- offset += index * layout.getTypeSize(arrayType.getElementType());
- currentType = arrayType.getElementType();
- return false;
- })
- .Case([&](LLVMStructType structType) {
- ArrayRef<Type> body = structType.getBody();
- if (body.size() <= index)
- return true;
- for (uint32_t i = 0; i < index; i++) {
- if (!structType.isPacked())
- offset = llvm::alignTo(offset,
- layout.getTypeABIAlignment(body[i]));
- offset += layout.getTypeSize(body[i]);
- }
- currentType = body[index];
- return false;
- })
- .Default([](Type) { return true; });
-
- if (shouldCancel)
- return std::nullopt;
- }
-
- return offset;
-}
-
-/// Fills in `equivalentIndicesOut` with GEP indices that would be equivalent to
-/// offsetting a pointer by `offset` bytes, assuming the GEP has `base` as base
-/// type.
-static LogicalResult
-findIndicesForOffset(DataLayout &layout, Type base, uint64_t offset,
- SmallVectorImpl<GEPArg> &equivalentIndicesOut) {
-
- uint64_t baseSize = layout.getTypeSize(base);
- uint64_t rootIndex = offset / baseSize;
- if (rootIndex > std::numeric_limits<uint32_t>::max())
- return failure();
- equivalentIndicesOut.push_back(rootIndex);
-
- uint64_t distanceToStart = rootIndex * baseSize;
-
-#ifndef NDEBUG
- auto isWithinCurrentType = [&](Type currentType) {
- return offset < distanceToStart + layout.getTypeSize(currentType);
- };
-#endif
-
- Type currentType = base;
- while (distanceToStart < offset) {
- // While an index that does not perfectly align with offset has not been
- // reached...
-
- assert(isWithinCurrentType(currentType));
-
- bool shouldCancel =
- TypeSwitch<Type, bool>(currentType)
- .Case([&](LLVMArrayType arrayType) {
- // Find which element of the array contains the offset.
- uint64_t elemSize =
- layout.getTypeSize(arrayType.getElementType());
- uint64_t index = (offset - distanceToStart) / elemSize;
- equivalentIndicesOut.push_back(index);
- distanceToStart += index * elemSize;
-
- // Then, try to find where in the element the offset is. If the
- // offset is exactly the beginning of the element, the loop is
- // complete.
- currentType = arrayType.getElementType();
-
- // Only continue if the element in question can be indexed using
- // an i32.
- return index > std::numeric_limits<uint32_t>::max();
- })
- .Case([&](LLVMStructType structType) {
- ArrayRef<Type> body = structType.getBody();
- uint32_t index = 0;
-
- // Walk over the elements of the struct to find in which of them
- // the offset is.
- for (Type elem : body) {
- uint64_t elemSize = layout.getTypeSize(elem);
- if (!structType.isPacked()) {
- distanceToStart = llvm::alignTo(
- distanceToStart, layout.getTypeABIAlignment(elem));
- // If the offset is in padding, cancel the rewrite.
- if (offset < distanceToStart)
- return true;
- }
-
- if (offset < distanceToStart + elemSize) {
- // The offset is within this element, stop iterating the
- // struct and look within the current element.
- equivalentIndicesOut.push_back(index);
- currentType = elem;
- return false;
- }
-
- // The offset is not within this element, continue walking over
- // the struct.
- distanceToStart += elemSize;
- index++;
- }
-
- // The offset was supposed to be within this struct but is not.
- // This can happen if the offset points into final padding.
- // Anyway, nothing can be done.
- return true;
- })
- .Default([](Type) {
- // If the offset is within a type that cannot be split, no indices
- // will yield this offset. This can happen if the offset is not
- // perfectly aligned with a leaf type.
- // TODO: support vectors.
- return true;
- });
-
- if (shouldCancel)
- return failure();
- }
-
- return success();
-}
-
-/// Returns the consistent type for the GEP if the GEP is not type-consistent.
-/// Returns failure if the GEP is already consistent.
-static FailureOr<Type> getRequiredConsistentGEPType(GEPOp gep) {
- // GEP of typed pointers are not supported.
- if (!gep.getElemType())
- return failure();
-
- std::optional<Type> maybeBaseType = gep.getElemType();
- if (!maybeBaseType)
- return failure();
- Type baseType = *maybeBaseType;
-
- Type typeHint = isElementTypeInconsistent(gep.getBase(), baseType);
- if (!typeHint)
- return failure();
- return typeHint;
-}
-
-LogicalResult
-CanonicalizeAlignedGep::matchAndRewrite(GEPOp gep,
- PatternRewriter &rewriter) const {
- FailureOr<Type> typeHint = getRequiredConsistentGEPType(gep);
- if (failed(typeHint)) {
- // GEP is already canonical, nothing to do here.
- return failure();
- }
-
- DataLayout layout = DataLayout::closest(gep);
- std::optional<uint64_t> desiredOffset = gepToByteOffset(layout, gep);
- if (!desiredOffset)
- return failure();
-
- SmallVector<GEPArg> newIndices;
- if (failed(
- findIndicesForOffset(layout, *typeHint, *desiredOffset, newIndices)))
- return failure();
-
- rewriter.replaceOpWithNewOp<GEPOp>(
- gep, LLVM::LLVMPointerType::get(getContext()), *typeHint, gep.getBase(),
- newIndices, gep.getInbounds());
-
- return success();
-}
-
-namespace {
-/// Class abstracting over both array and struct types, turning each into ranges
-/// of their sub-types.
-class DestructurableTypeRange
- : public llvm::indexed_accessor_range<DestructurableTypeRange,
- DestructurableTypeInterface, Type,
- Type *, Type> {
-
- using Base = llvm::indexed_accessor_range<
- DestructurableTypeRange, DestructurableTypeInterface, Type, Type *, Type>;
-
-public:
- using Base::Base;
-
- /// Constructs a DestructurableTypeRange from either a LLVMStructType or
- /// LLVMArrayType.
- explicit DestructurableTypeRange(DestructurableTypeInterface base)
- : Base(base, 0, [&]() -> ptrdiff_t {
- return TypeSwitch<DestructurableTypeInterface, ptrdiff_t>(base)
- .Case([](LLVMStructType structType) {
- return structType.getBody().size();
- })
- .Case([](LLVMArrayType arrayType) {
- return arrayType.getNumElements();
- })
- .Default([](auto) -> ptrdiff_t {
- llvm_unreachable(
- "Only LLVMStructType or LLVMArrayType supported");
- });
- }()) {}
-
- /// Returns true if this is a range over a packed struct.
- bool isPacked() const {
- if (auto structType = dyn_cast<LLVMStructType>(getBase()))
- return structType.isPacked();
- return false;
- }
-
-private:
- static Type dereference(DestructurableTypeInterface base, ptrdiff_t index) {
- // i32 chosen because the implementations of ArrayType and StructType
- // specifically expect it to be 32 bit. They will fail otherwise.
- Type result = base.getTypeAtIndex(
- IntegerAttr::get(IntegerType::get(base.getContext(), 32), index));
- assert(result && "Should always succeed");
- return result;
- }
-
- friend Base;
-};
-} // namespace
-
-/// Returns the list of elements of `destructurableType` that are written to by
-/// a store operation writing `storeSize` bytes at `storeOffset`.
-/// `storeOffset` is required to cleanly point to an immediate element within
-/// the type. If the write operation were to write to any padding, write beyond
-/// the aggregate or partially write to a non-aggregate, failure is returned.
-static FailureOr<DestructurableTypeRange>
-getWrittenToFields(const DataLayout &dataLayout,
- DestructurableTypeInterface destructurableType,
- unsigned storeSize, unsigned storeOffset) {
- DestructurableTypeRange destructurableTypeRange(destructurableType);
-
- unsigned currentOffset = 0;
- for (; !destructurableTypeRange.empty();
- destructurableTypeRange = destructurableTypeRange.drop_front()) {
- Type type = destructurableTypeRange.front();
- if (!destructurableTypeRange.isPacked()) {
- unsigned alignment = dataLayout.getTypeABIAlignment(type);
- currentOffset = llvm::alignTo(currentOffset, alignment);
- }
-
- // currentOffset is guaranteed to be equal to offset since offset is either
- // 0 or stems from a type-consistent GEP indexing into just a single
- // aggregate.
- if (currentOffset == storeOffset)
- break;
-
- assert(currentOffset < storeOffset &&
- "storeOffset should cleanly point into an immediate field");
-
- currentOffset += dataLayout.getTypeSize(type);
- }
-
- size_t exclusiveEnd = 0;
- for (; exclusiveEnd < destructurableTypeRange.size() && storeSize > 0;
- exclusiveEnd++) {
- if (!destructurableTypeRange.isPacked()) {
- unsigned alignment =
- dataLayout.getTypeABIAlignment(destructurableTypeRange[exclusiveEnd]);
- // No padding allowed inbetween fields at this point in time.
- if (!llvm::isAligned(llvm::Align(alignment), currentOffset))
- return failure();
- }
-
- unsigned fieldSize =
- dataLayout.getTypeSize(destructurableTypeRange[exclusiveEnd]);
- if (fieldSize > storeSize) {
- // Partial writes into an aggregate are okay since subsequent pattern
- // applications can further split these up into writes into the
- // sub-elements.
- auto subAggregate = dyn_cast<DestructurableTypeInterface>(
- destructurableTypeRange[exclusiveEnd]);
- if (!subAggregate)
- return failure();
-
- // Avoid splitting redundantly by making sure the store into the
- // aggregate can actually be split.
- if (failed(getWrittenToFields(dataLayout, subAggregate, storeSize,
- /*storeOffset=*/0)))
- return failure();
-
- return destructurableTypeRange.take_front(exclusiveEnd + 1);
- }
- currentOffset += fieldSize;
- storeSize -= fieldSize;
- }
-
- // If the storeSize is not 0 at this point we are writing past the aggregate
- // as a whole. Abort.
- if (storeSize > 0)
- return failure();
- return destructurableTypeRange.take_front(exclusiveEnd);
-}
-
-/// Splits a store of the vector `value` into `address` at `storeOffset` into
-/// multiple stores of each element with the goal of each generated store
-/// becoming type-consistent through subsequent pattern applications.
-static void splitVectorStore(const DataLayout &dataLayout, Location loc,
- RewriterBase &rewriter, Value address,
- TypedValue<VectorType> value,
- unsigned storeOffset) {
- VectorType vectorType = value.getType();
- unsigned elementSize = dataLayout.getTypeSize(vectorType.getElementType());
-
- // Extract every element in the vector and store it in the given address.
- for (size_t index : llvm::seq<size_t>(0, vectorType.getNumElements())) {
- auto pos =
- rewriter.create<ConstantOp>(loc, rewriter.getI32IntegerAttr(index));
- auto extractOp = rewriter.create<ExtractElementOp>(loc, value, pos);
-
- // For convenience, we do indexing by calculating the final byte offset.
- // Other patterns will turn this into a type-consistent GEP.
- auto gepOp = rewriter.create<GEPOp>(
- loc, address.getType(), rewriter.getI8Type(), address,
- ArrayRef<GEPArg>{
- static_cast<int32_t>(storeOffset + index * elementSize)});
-
- rewriter.create<StoreOp>(loc, extractOp, gepOp);
- }
-}
-
-/// Splits a store of the integer `value` into `address` at `storeOffset` into
-/// multiple stores to each 'writtenToFields', making each store operation
-/// type-consistent.
-static void splitIntegerStore(const DataLayout &dataLayout, Location loc,
- RewriterBase &rewriter, Value address,
- Value value, unsigned storeSize,
- unsigned storeOffset,
- DestructurableTypeRange writtenToFields) {
- unsigned currentOffset = storeOffset;
- for (Type type : writtenToFields) {
- unsigned fieldSize = dataLayout.getTypeSize(type);
-
- // Extract the data out of the integer by first shifting right and then
- // truncating it.
- auto pos = rewriter.create<ConstantOp>(
- loc, rewriter.getIntegerAttr(value.getType(),
- (currentOffset - storeOffset) * 8));
-
- auto shrOp = rewriter.create<LShrOp>(loc, value, pos);
-
- // If we are doing a partial write into a direct field the remaining
- // `storeSize` will be less than the size of the field. We have to truncate
- // to the `storeSize` to avoid creating a store that wasn't in the original
- // code.
- IntegerType fieldIntType =
- rewriter.getIntegerType(std::min(fieldSize, storeSize) * 8);
- Value valueToStore = rewriter.create<TruncOp>(loc, fieldIntType, shrOp);
-
- // We create an `i8` indexed GEP here as that is the easiest (offset is
- // already known). Other patterns turn this into a type-consistent GEP.
- auto gepOp = rewriter.create<GEPOp>(
- loc, address.getType(), rewriter.getI8Type(), address,
- ArrayRef<GEPArg>{static_cast<int32_t>(currentOffset)});
- rewriter.create<StoreOp>(loc, valueToStore, gepOp);
-
- // No need to care about padding here since we already checked previously
- // that no padding exists in this range.
- currentOffset += fieldSize;
- storeSize -= fieldSize;
- }
-}
-
-LogicalResult SplitStores::matchAndRewrite(StoreOp store,
- PatternRewriter &rewriter) const {
- Type sourceType = store.getValue().getType();
- if (!isa<IntegerType, VectorType>(sourceType)) {
- // We currently only support integer and vector sources.
- return failure();
- }
-
- Type typeHint = isElementTypeInconsistent(store.getAddr(), sourceType);
- if (!typeHint) {
- // Nothing to do, since it is already consistent.
- return failure();
- }
-
- auto dataLayout = DataLayout::closest(store);
-
- unsigned storeSize = dataLayout.getTypeSize(sourceType);
- unsigned offset = 0;
- Value address = store.getAddr();
- if (auto gepOp = address.getDefiningOp<GEPOp>()) {
- // Currently only handle canonical GEPs with exactly two indices,
- // indexing a single aggregate deep.
- // If the GEP is not canonical we have to fail, otherwise we would not
- // create type-consistent IR.
- if (gepOp.getIndices().size() != 2 ||
- succeeded(getRequiredConsistentGEPType(gepOp)))
- return failure();
-
- // If the size of the element indexed by the GEP is smaller than the store
- // size, it is pointing into the middle of an aggregate with the store
- // storing into multiple adjacent elements. Destructure into the base
- // address of the aggregate with a store offset.
- if (storeSize > dataLayout.getTypeSize(gepOp.getResultPtrElementType())) {
- std::optional<uint64_t> byteOffset = gepToByteOffset(dataLayout, gepOp);
- if (!byteOffset)
- return failure();
-
- offset = *byteOffset;
- typeHint = gepOp.getElemType();
- address = gepOp.getBase();
- }
- }
-
- auto destructurableType = dyn_cast<DestructurableTypeInterface>(typeHint);
- if (!destructurableType)
- return failure();
-
- FailureOr<DestructurableTypeRange> writtenToElements =
- getWrittenToFields(dataLayout, destructurableType, storeSize, offset);
- if (failed(writtenToElements))
- return failure();
-
- if (writtenToElements->size() <= 1) {
- // Other patterns should take care of this case, we are only interested in
- // splitting element stores.
- return failure();
- }
-
- if (isa<IntegerType>(sourceType)) {
- splitIntegerStore(dataLayout, store.getLoc(), rewriter, address,
- store.getValue(), storeSize, offset, *writtenToElements);
- rewriter.eraseOp(store);
- return success();
- }
-
- // Add a reasonable bound to not split very large vectors that would end up
- // generating lots of code.
- if (dataLayout.getTypeSizeInBits(sourceType) > maxVectorSplitSize)
- return failure();
-
- // Vector types are simply split into its elements and new stores generated
- // with those. Subsequent pattern applications will split these stores further
- // if required.
- splitVectorStore(dataLayout, store.getLoc(), rewriter, address,
- cast<TypedValue<VectorType>>(store.getValue()), offset);
- rewriter.eraseOp(store);
- return success();
-}
-
-LogicalResult SplitGEP::matchAndRewrite(GEPOp gepOp,
- PatternRewriter &rewriter) const {
- FailureOr<Type> typeHint = getRequiredConsistentGEPType(gepOp);
- if (succeeded(typeHint) || gepOp.getIndices().size() <= 2) {
- // GEP is not canonical or a single aggregate deep, nothing to do here.
- return failure();
- }
-
- auto indexToGEPArg =
- [](GEPIndicesAdaptor<ValueRange>::value_type index) -> GEPArg {
- if (auto integerAttr = dyn_cast<IntegerAttr>(index))
- return integerAttr.getValue().getSExtValue();
- return cast<Value>(index);
- };
-
- GEPIndicesAdaptor<ValueRange> indices = gepOp.getIndices();
-
- auto splitIter = std::next(indices.begin(), 2);
-
- // Split of the first GEP using the first two indices.
- auto subGepOp = rewriter.create<GEPOp>(
- gepOp.getLoc(), gepOp.getType(), gepOp.getElemType(), gepOp.getBase(),
- llvm::map_to_vector(llvm::make_range(indices.begin(), splitIter),
- indexToGEPArg),
- gepOp.getInbounds());
-
- // The second GEP indexes on the result pointer element type of the previous
- // with all the remaining indices and a zero upfront. If this GEP has more
- // than two indices remaining it'll be further split in subsequent pattern
- // applications.
- SmallVector<GEPArg> newIndices = {0};
- llvm::transform(llvm::make_range(splitIter, indices.end()),
- std::back_inserter(newIndices), indexToGEPArg);
- rewriter.replaceOpWithNewOp<GEPOp>(gepOp, gepOp.getType(),
- subGepOp.getResultPtrElementType(),
- subGepOp, newIndices, gepOp.getInbounds());
- return success();
-}
-
-//===----------------------------------------------------------------------===//
-// Type consistency pass
-//===----------------------------------------------------------------------===//
-
-namespace {
-struct LLVMTypeConsistencyPass
- : public LLVM::impl::LLVMTypeConsistencyBase<LLVMTypeConsistencyPass> {
- void runOnOperation() override {
- RewritePatternSet rewritePatterns(&getContext());
- rewritePatterns.add<CanonicalizeAlignedGep>(&getContext());
- rewritePatterns.add<SplitStores>(&getContext(), maxVectorSplitSize);
- rewritePatterns.add<SplitGEP>(&getContext());
- FrozenRewritePatternSet frozen(std::move(rewritePatterns));
-
- if (failed(applyPatternsAndFoldGreedily(getOperation(), frozen)))
- signalPassFailure();
- }
-};
-} // namespace
-
-std::unique_ptr<Pass> LLVM::createTypeConsistencyPass() {
- return std::make_unique<LLVMTypeConsistencyPass>();
-}
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
index 3627ff6617ed..f35ab3b856b4 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
@@ -71,6 +71,99 @@ bool linalg::isaCopyOpInterface(LinalgOp linalgOp) {
}
//===----------------------------------------------------------------------===//
+// FillOpInterface implementation
+//===----------------------------------------------------------------------===//
+std::optional<Value> linalg::isaFillOpInterface(GenericOp genericOp) {
+ // Structural.
+ if (genericOp.getNumParallelLoops() != genericOp.getNumLoops() ||
+ genericOp.getNumDpsInputs() != 1 || genericOp.getNumDpsInits() != 1)
+ return std::nullopt;
+
+ // Input should be referenced and init should not.
+ if (!genericOp.payloadUsesValueFromOperand(genericOp.getDpsInputOperand(0)) ||
+ genericOp.payloadUsesValueFromOperand(genericOp.getDpsInitOperand(0)))
+ return std::nullopt;
+
+ OpOperand *value = genericOp.getDpsInputOperand(0);
+ if (!genericOp.isScalar(value))
+ return std::nullopt;
+
+ Block *body = genericOp.getBody();
+ if (body->getOperations().size() != 1)
+ return std::nullopt;
+
+ auto yieldOp = dyn_cast<linalg::YieldOp>(body->back());
+ if (!yieldOp || yieldOp.getNumOperands() != 1 ||
+ yieldOp->getOperand(0) != body->getArgument(0))
+ return std::nullopt;
+ return value->get();
+}
+
+//===----------------------------------------------------------------------===//
+// Elementwise Single Unary/Binary-OpInterface implementation
+//===----------------------------------------------------------------------===//
+static bool
+isaElemwiseSingleUnaryOrBinaryOpInterface(linalg::GenericOp genericOp,
+ unsigned arity) {
+ // Check all loops are parallel, and have only tensor semantics.
+ if (genericOp.getNumParallelLoops() != genericOp.getNumLoops() ||
+ genericOp.getNumLoops() < 1 || !genericOp.hasPureTensorSemantics())
+ return false;
+
+ // Check there are arity-inputs, 1-output and all are identity-maps.
+ if (genericOp.getNumDpsInputs() != arity || genericOp.getNumDpsInits() != 1 ||
+ !llvm::all_of(genericOp.getIndexingMapsArray(),
+ [](AffineMap map) { return map.isIdentity(); }))
+ return false;
+
+ // Init should not be referenced for elementwise operations.
+ if (genericOp.payloadUsesValueFromOperand(genericOp.getDpsInitOperand(0)))
+ return false;
+
+ // A linalg.generic could be series of elementwise ops e.g. exp(neg(x)) such
+ // as resulting from producer-consumer fusion. Here, we restrict to two ops in
+ // the body, where the first is the elementwise single op and the second a
+ // yield.
+ Block *body = genericOp.getBody();
+ if (body->getOperations().size() != 2)
+ return false;
+
+ Operation *op = &body->front();
+ if (op->getNumOperands() != arity || op->getNumResults() != 1)
+ return false;
+
+ auto yieldOp = dyn_cast<linalg::YieldOp>(body->back());
+ if (!yieldOp || yieldOp.getNumOperands() != 1 ||
+ yieldOp->getOperand(0).getDefiningOp() != op)
+ return false;
+ return true;
+}
+
+bool linalg::isaElemwiseSingleUnaryOpInterface(linalg::GenericOp genericOp) {
+ // All basic elemwise checks.
+ if (!isaElemwiseSingleUnaryOrBinaryOpInterface(genericOp, 1))
+ return false;
+
+ // Check input is actully used.
+ if (!genericOp.payloadUsesValueFromOperand(genericOp.getDpsInputOperand(0)))
+ return false;
+ return true;
+}
+
+bool linalg::isaElemwiseSingleBinaryOpInterface(linalg::GenericOp genericOp) {
+ if (!isaElemwiseSingleUnaryOrBinaryOpInterface(genericOp, 2))
+ return false;
+
+ // Check both inputs are used (elementwise).
+ OpOperand *inputOpOperand0 = genericOp.getDpsInputOperand(0);
+ OpOperand *inputOpOperand1 = genericOp.getDpsInputOperand(1);
+ if (!genericOp.payloadUsesValueFromOperand(inputOpOperand0) ||
+ !genericOp.payloadUsesValueFromOperand(inputOpOperand1))
+ return false;
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
// ContractionOpInterface implementation
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 13582a140a96..9b3121774ab3 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -2523,7 +2523,8 @@ DiagnosedSilenceableFailure transform::TileReductionUsingForOp::applyToOne(
if (failed(result))
return emitDefaultSilenceableFailure(target);
- results.push_back(result->initialOp);
+ for (Value initValue : result->initialValues)
+ results.push_back(initValue.getDefiningOp());
results.push_back(result->parallelTiledOp);
results.push_back(result->mergeOp);
results.push_back(result->loops.front());
@@ -2574,7 +2575,8 @@ DiagnosedSilenceableFailure transform::TileReductionUsingForallOp::applyToOne(
diag.attachNote(target.getLoc()) << "target operation";
return diag;
}
- results.push_back(result->initialOp);
+ for (Value initValue : result->initialValues)
+ results.push_back(initValue.getDefiningOp());
results.push_back(result->parallelTiledOp);
results.push_back(result->mergeOp);
results.push_back(result->loops);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp b/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp
index c07d1387ec75..91d4efa3372b 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp
@@ -244,8 +244,7 @@ struct BlockPackMatmul<linalg::GenericOp>
LogicalResult matchAndRewrite(linalg::GenericOp linalgOp,
PatternRewriter &rewriter) const override {
// Match suitable generics.
- if (failed(linalg::detail::verifyContractionInterface(
- linalgOp.getOperation()))) {
+ if (!linalg::isaContractionOpInterface(linalgOp)) {
return rewriter.notifyMatchFailure(linalgOp, "not a contraction");
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/MeshShardingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/MeshShardingInterfaceImpl.cpp
index 146e88076566..24001c543f35 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/MeshShardingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/MeshShardingInterfaceImpl.cpp
@@ -36,6 +36,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TypeSwitch.h"
#include <iterator>
+#include <numeric>
#include <optional>
#include <utility>
@@ -155,12 +156,12 @@ static Value createDestinationPassingStyleInitOperand(
tensor::getMixedSizes(builder, builder.getLoc(), spmdizedOperand);
PartialReductionOpInterface partialReductionIface =
llvm::cast<PartialReductionOpInterface>(op.getOperation());
- FailureOr<Operation *> reductionNeutralTensorOp =
+ assert(op->getNumResults() == 1 && "Multiple results not supported.");
+ FailureOr<SmallVector<Value>> reductionNeutralTensor =
partialReductionIface.generateInitialTensorForPartialReduction(
builder, builder.getLoc(), shape, {});
- assert(succeeded(reductionNeutralTensorOp));
- builder.create<scf::YieldOp>(
- reductionNeutralTensorOp.value()->getResult(0));
+ assert(succeeded(reductionNeutralTensor));
+ builder.create<scf::YieldOp>(reductionNeutralTensor.value());
}
return ifOp.getResult(0);
}
@@ -173,8 +174,7 @@ static SmallVector<Value> createDestinationPassingStyleInitOperands(
ImplicitLocOpBuilder &builder) {
// TODO: add support for multiple destination passing style initial value
// operands.
- // PartialReductionOpInterface::generateInitialTensorForPartialReduction
- // needs to also support multiple DPS initial operands.
+ assert(op.getNumDpsInits() == 1 && "Multiple initial values not supported.");
SmallVector<Value> newOperands = llvm::to_vector(spmdizedOperands);
auto operandIdx = op.getDpsInitOperand(0)->getOperandNumber();
Value spmdizedInitOperand =
@@ -279,6 +279,20 @@ struct StructuredOpShardingInterface
return res;
}
+ SmallVector<ReductionKind>
+ getReductionLoopIteratorKinds(Operation *op) const {
+ LinalgOp linalgOp = llvm::cast<LinalgOp>(op);
+ SmallVector<utils::IteratorType> iteratorTypes =
+ linalgOp.getIteratorTypesArray();
+ unsigned reductionItersCount = std::accumulate(
+ iteratorTypes.begin(), iteratorTypes.end(), 0,
+ [](unsigned count, utils::IteratorType iter) {
+ return count + (iter == utils::IteratorType::reduction);
+ });
+ mesh::ReductionKind reductionKind = getReductionKindOfLinalgOp(linalgOp);
+ return SmallVector<ReductionKind>(reductionItersCount, reductionKind);
+ }
+
LogicalResult spmdize(Operation *op, ArrayRef<Value> spmdizedOperands,
ArrayRef<MeshShardingAttr> operandShardings,
ArrayRef<MeshShardingAttr> resultShardings,
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp
index 4c437b5db2c7..2bc4d7fbfadc 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp
@@ -14,13 +14,50 @@
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/Linalg/IR/LinalgInterfaces.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
+#include "mlir/Dialect/Math/IR/Math.h"
#include "llvm/Support/Debug.h"
#define DEBUG_TYPE "linalg-specialization"
+#define REPLACE_BINARY_OP(NEWOP, OPERANDS_SWAP) \
+ (rewriter.replaceOpWithNewOp<NEWOP>( \
+ genericOp, \
+ ValueRange{genericOp.getDpsInputs()[(OPERANDS_SWAP) ? 1 : 0], \
+ genericOp.getDpsInputs()[(OPERANDS_SWAP) ? 0 : 1]}, \
+ ValueRange{genericOp.getDpsInits()[0]}))
+
+#define REPLACE_UNARY_OP(NEWOP) \
+ (rewriter.replaceOpWithNewOp<NEWOP>(genericOp, \
+ ValueRange{genericOp.getDpsInputs()[0]}, \
+ ValueRange{genericOp.getDpsInits()[0]}))
+
using namespace mlir;
using namespace mlir::linalg;
+// Given a elementwise single binary linalg generic op, checks whether the
+// binary op accesses operands as swapped. e.g.
+// this differentiates between a linalg-generic body that contains:
+// ^bb0(%a: f32, %b: f32, %c : f32):
+// %0 = arith.subf %a, %b : f32
+// linalg.yield %0: f32
+// against:
+// ^bb0(%a: f32, %b: f32, %c : f32):
+// %0 = arith.subf %b, %a : f32
+// linalg.yield %0: f32
+// Former is linalg.sub(a,b), latter is linalg.sub(b,a).
+static bool areBinOpsSwapped(GenericOp genericOp) {
+ Block *body = genericOp.getBody();
+ Operation *op = &body->front();
+ bool swapped = false;
+ if (op->getOpOperand(0).get() != body->getArgument(0)) {
+ swapped = true;
+ assert(op->getOpOperand(0).get() == body->getArgument(1) &&
+ op->getOpOperand(1).get() == body->getArgument(0) &&
+ "binary op uses just one block arg");
+ }
+ return swapped;
+}
+
FailureOr<LinalgOp> mlir::linalg::specializeGenericOp(RewriterBase &rewriter,
GenericOp genericOp) {
if (isaCopyOpInterface(genericOp)) {
@@ -28,5 +65,40 @@ FailureOr<LinalgOp> mlir::linalg::specializeGenericOp(RewriterBase &rewriter,
genericOp, genericOp.getDpsInputs()[0], genericOp.getDpsInits()[0]);
return namedOp;
}
+
+ if (isaFillOpInterface(genericOp)) {
+ LinalgOp namedOp = rewriter.replaceOpWithNewOp<FillOp>(
+ genericOp, genericOp.getDpsInputs()[0], genericOp.getDpsInits()[0]);
+ return namedOp;
+ }
+
+ if (isaElemwiseSingleUnaryOpInterface(genericOp)) {
+ Operation *op = &genericOp.getBody()->front();
+ if (isa<math::ExpOp>(op)) {
+ LinalgOp namedOp = REPLACE_UNARY_OP(ExpOp);
+ return namedOp;
+ }
+ }
+
+ if (isaElemwiseSingleBinaryOpInterface(genericOp)) {
+ bool swap = areBinOpsSwapped(genericOp);
+ Operation *op = &genericOp.getBody()->front();
+ if (isa<arith::AddFOp>(op)) {
+ LinalgOp namedOp = REPLACE_BINARY_OP(AddOp, swap);
+ return namedOp;
+ }
+ if (isa<arith::SubFOp>(op)) {
+ LinalgOp namedOp = REPLACE_BINARY_OP(SubOp, swap);
+ return namedOp;
+ }
+ if (isa<arith::MulFOp>(op)) {
+ LinalgOp namedOp = REPLACE_BINARY_OP(MulOp, swap);
+ return namedOp;
+ }
+ if (isa<arith::DivFOp>(op)) {
+ LinalgOp namedOp = REPLACE_BINARY_OP(DivOp, swap);
+ return namedOp;
+ }
+ }
return failure();
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index df4089d61bfd..fd314ef9f813 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -692,12 +692,13 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
op, "reduction dimension must be mapped to threads");
// 1. Create the inital tensor value.
- FailureOr<Operation *> identityTensor =
+ FailureOr<SmallVector<Value>> maybeInitTensors =
op.generateInitialTensorForPartialReduction(b, loc, numThreads,
reductionDim);
- if (failed(identityTensor))
- return b.notifyMatchFailure(op,
- "cannot create a tensor of identity value.");
+ if (failed(maybeInitTensors))
+ return b.notifyMatchFailure(
+ op, "Failed to create inital tensors for partial reduction");
+ SmallVector<Value> &initTensors = maybeInitTensors.value();
// Gather destination tensors.
SmallVector<Value> dest;
@@ -715,8 +716,8 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
// 2. Create the ForallOp with an empty region.
scf::ForallOp forallOp = b.create<scf::ForallOp>(
- loc, getAsOpFoldResult(materializedNonZeroNumThreads),
- (*identityTensor)->getResults(), mapping);
+ loc, getAsOpFoldResult(materializedNonZeroNumThreads), initTensors,
+ mapping);
// 3. Calculate the tile offsets and sizes for the subsequent loop that will
// be nested under `forallOp`.
@@ -726,7 +727,7 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
/*nominalTileSizes=*/std::nullopt, tiledOffsets,
tiledSizes);
- // 4. Clone the tileable op and update its destination operands to use the
+ // 4b. Clone the tileable op and update its destination operands to use the
// output bbArgs of the ForallOp.
SmallVector<Value> tilingResults;
ArrayRef<BlockArgument> destBbArgs = forallOp.getRegionIterArgs();
@@ -838,7 +839,7 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
// 8. Return.
ForallReductionTilingResult results;
- results.initialOp = *identityTensor;
+ results.initialValues = initTensors;
results.loops = forallOp;
results.parallelTiledOp = tiledOp;
results.mergeOp = mergeOp;
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
index bd870d4f982e..f512be46cc13 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
@@ -250,7 +250,7 @@ template <typename LinalgOpTy>
struct LinalgOpPartialReductionInterface
: public PartialReductionOpInterface::ExternalModel<
LinalgOpPartialReductionInterface<LinalgOpTy>, LinalgOpTy> {
- FailureOr<Operation *> generateInitialTensorForPartialReduction(
+ FailureOr<SmallVector<Value>> generateInitialTensorForPartialReduction(
Operation *op, OpBuilder &b, Location loc, ArrayRef<OpFoldResult> sizes,
ArrayRef<int> reductionDims) const {
auto linalgOp = cast<LinalgOp>(op);
@@ -258,50 +258,58 @@ struct LinalgOpPartialReductionInterface
if (linalgOp.hasPureBufferSemantics())
return op->emitOpError("expected operation to have tensor semantics");
- // Insert the new parallel dimension based on the index of the reduction
- // loops. This could be controlled by user for more flexibility.
- SmallVector<Operation *, 4> combinerOps;
- if (!matchReduction(linalgOp.getRegionOutputArgs(), 0, combinerOps) ||
- combinerOps.size() != 1)
- return op->emitOpError("Failed to anaysis the reduction operation.");
-
- Operation *reductionOp = combinerOps[0];
- std::optional<TypedAttr> identity = arith::getNeutralElement(reductionOp);
- if (!identity.has_value())
- return op->emitOpError(
- "Failed to get an identity value for the reduction operation.");
-
- ArrayRef<int64_t> oldShape =
- linalgOp.getShape(linalgOp.getDpsInitOperand(0));
-
- // Calculate the new shape, we insert the new dimensions based on the index
- // of the reduction dimensions.
- SmallVector<int64_t> newOutputShape;
- SmallVector<Value> dynamicDims;
- int64_t currReductionDims = 0;
- DenseSet<int> reductionDimsSet(reductionDims.begin(), reductionDims.end());
- for (int64_t idx :
- llvm::seq<int64_t>(0, oldShape.size() + reductionDims.size())) {
- if (reductionDimsSet.contains(idx)) {
- dispatchIndexOpFoldResults(sizes[idx], dynamicDims, newOutputShape);
- currReductionDims++;
- continue;
+ SmallVector<Value> inits;
+ for (int initIdx = 0, e = linalgOp.getNumDpsInits(); initIdx < e;
+ ++initIdx) {
+ // Insert the new parallel dimension based on the index of the reduction
+ // loops. This could be controlled by user for more flexibility.
+ SmallVector<Operation *, 4> combinerOps;
+ if (!matchReduction(linalgOp.getRegionOutputArgs(), initIdx,
+ combinerOps) ||
+ combinerOps.size() != 1)
+ return op->emitOpError("Failed to anaysis the reduction operation.");
+
+ Operation *reductionOp = combinerOps[0];
+ std::optional<TypedAttr> identity = arith::getNeutralElement(reductionOp);
+ if (!identity.has_value())
+ return op->emitOpError(
+ "Failed to get an identity value for the reduction operation.");
+
+ ArrayRef<int64_t> oldShape =
+ linalgOp.getShape(linalgOp.getDpsInitOperand(initIdx));
+
+ // Calculate the new shape, we insert the new dimensions based on the
+ // index of the reduction dimensions.
+ SmallVector<int64_t> newOutputShape;
+ SmallVector<Value> dynamicDims;
+ int64_t currReductionDims = 0;
+ DenseSet<int> reductionDimsSet(reductionDims.begin(),
+ reductionDims.end());
+ for (int64_t idx :
+ llvm::seq<int64_t>(0, oldShape.size() + reductionDims.size())) {
+ if (reductionDimsSet.contains(idx)) {
+ dispatchIndexOpFoldResults(sizes[idx], dynamicDims, newOutputShape);
+ currReductionDims++;
+ continue;
+ }
+ int64_t oldIdx = idx - currReductionDims;
+ int64_t dim = oldShape[oldIdx];
+ newOutputShape.push_back(dim);
+ if (ShapedType::isDynamic(dim))
+ dynamicDims.push_back(b.create<tensor::DimOp>(
+ loc, linalgOp.getDpsInitOperand(initIdx)->get(), oldIdx));
}
- int64_t oldIdx = idx - currReductionDims;
- int64_t dim = oldShape[oldIdx];
- newOutputShape.push_back(dim);
- if (ShapedType::isDynamic(dim))
- dynamicDims.push_back(b.create<tensor::DimOp>(
- loc, linalgOp.getDpsInitOperand(0)->get(), oldIdx));
+ Value emptyTensor = b.create<tensor::EmptyOp>(
+ loc, newOutputShape,
+ linalgOp.getRegionOutputArgs()[initIdx].getType(), dynamicDims);
+ Value constantOp = b.create<arith::ConstantOp>(loc, *identity);
+ auto identityTensor =
+ b.create<linalg::FillOp>(loc, constantOp, emptyTensor);
+ inits.push_back(identityTensor.getResult(0));
}
- Value emptyTensor = b.create<tensor::EmptyOp>(
- loc, newOutputShape, linalgOp.getRegionOutputArgs()[0].getType(),
- dynamicDims);
- Value constantOp = b.create<arith::ConstantOp>(loc, *identity);
- auto identityTensor =
- b.create<linalg::FillOp>(loc, constantOp, emptyTensor);
- return identityTensor.getOperation();
+
+ return inits;
}
Operation *tileToPartialReduction(Operation *op, OpBuilder &b, Location loc,
@@ -312,44 +320,64 @@ struct LinalgOpPartialReductionInterface
OpBuilder::InsertionGuard guard(b);
auto linalgOp = cast<LinalgOp>(op);
- AffineMap oldOutputMap =
- linalgOp.getMatchingIndexingMap(linalgOp.getDpsInitOperand(0));
- SmallVector<AffineExpr> outputExpr(oldOutputMap.getNumResults() +
- reductionDims.size());
-
- for (int idx : reductionDims)
- outputExpr[idx] = b.getAffineDimExpr(idx);
- int currExpr = 0;
- for (int idx : llvm::seq<int>(0, outputExpr.size())) {
- if (outputExpr[idx])
- continue;
- outputExpr[idx] = oldOutputMap.getResult(currExpr++);
+ // Step 1. Extend init maps to have reduction dimension dims, since we
+ // are converting them to parallel dimensions.
+ SmallVector<AffineMap> newInitMaps;
+ newInitMaps.reserve(linalgOp.getNumDpsInits());
+ for (int idx : llvm::seq<int>(0, linalgOp.getNumDpsInits())) {
+ // TODO: linalg::Generic doesn't have getDpsInitOperands. Can replace
+ // this with a for range loop when we have it.
+ AffineMap newMap =
+ linalgOp.getMatchingIndexingMap(linalgOp.getDpsInitOperand(idx));
+ for (int redPos : reductionDims) {
+ newMap = newMap.insertResult(b.getAffineDimExpr(redPos),
+ newMap.getNumResults());
+ }
+ newInitMaps.push_back(newMap);
}
- // Step 1: Extract a slice of the input operands.
- SmallVector<Value> valuesToTile = linalgOp.getDpsInputs();
- SmallVector<Value, 4> tiledOperands = makeTiledShapes(
- b, loc, linalgOp, valuesToTile, offsets, sizes, {}, true);
+ // Step 2a: Extract a slice of the input operands.
+ SmallVector<Value, 4> tiledInputs = makeTiledShapes(
+ b, loc, linalgOp, linalgOp.getDpsInputs(), offsets, sizes, {}, true);
+
+ // Step 2b: Extract a slice of the init operands.
+ SmallVector<Value, 1> tiledInits;
+ for (auto [valueMap, valueToTile] : llvm::zip_equal(newInitMaps, init)) {
+ int64_t initRank = valueMap.getNumResults();
+ SmallVector<OpFoldResult> initOffset(initRank, b.getIndexAttr(0));
+ SmallVector<OpFoldResult> initStride(initRank, b.getIndexAttr(1));
+ SmallVector<OpFoldResult> initSizes;
+ for (AffineExpr dimExpr : valueMap.getResults()) {
+ auto dim = cast<AffineDimExpr>(dimExpr);
+ initSizes.push_back(sizes[dim.getPosition()]);
+ }
+ // TODO: Use SubsetExtractOpInterface here once available.
+ auto extractSlice = b.create<tensor::ExtractSliceOp>(
+ loc, valueToTile, initOffset, initSizes, initStride);
+ tiledInits.push_back(extractSlice);
+ }
- // Step 2: Extract the accumulator operands
- SmallVector<OpFoldResult> strides(offsets.size(), b.getIndexAttr(1));
- SmallVector<OpFoldResult> outOffsets(offsets.size(), b.getIndexAttr(0));
- // TODO: use SubsetExtractOpInterface once it is available.
- Value out = b.create<tensor::ExtractSliceOp>(loc, init[0], outOffsets,
- sizes, strides);
+ // Update the indexing maps.
+ SmallVector<AffineMap> newMaps = linalgOp.getIndexingMapsArray();
+ // Change the init maps.
+ for (int idx : llvm::seq<int>(0, linalgOp.getNumDpsInits())) {
+ // TODO: linalg::Generic doesn't have getDpsInitOperands. Can replace
+ // this with a for range loop when we have it.
+ OpOperand *initOperand = linalgOp.getDpsInitOperand(idx);
+ int64_t mapIdx = linalgOp.getIndexingMapIndex(initOperand);
+ newMaps[mapIdx] = newInitMaps[idx];
+ }
- // Step3. Create a generic op where the reduction dimensions are replaced
- // by a parallel dimension of the size of reduction.
+ // Step 3. Change the reduction dim iterator types.
SmallVector<utils::IteratorType> newIteratorTypes =
linalgOp.getIteratorTypesArray();
for (int dim : reductionDims)
newIteratorTypes[dim] = utils::IteratorType::parallel;
- SmallVector<AffineMap> newMaps = linalgOp.getIndexingMapsArray();
- newMaps.back() = AffineMap::get(newMaps.back().getNumDims(), 0, outputExpr,
- linalgOp.getContext());
+
+ // Step 4. Create the new generic op.
auto genericOp =
- b.create<GenericOp>(loc, TypeRange({out.getType()}), tiledOperands,
- ValueRange({out}), newMaps, newIteratorTypes);
+ b.create<GenericOp>(loc, ValueRange(tiledInits).getTypes(), tiledInputs,
+ tiledInits, newMaps, newIteratorTypes);
IRMapping mapping;
op->getRegion(0).cloneInto(&genericOp.getRegion(),
genericOp.getRegion().begin(), mapping);
@@ -361,40 +389,53 @@ struct LinalgOpPartialReductionInterface
ArrayRef<int> reductionDims) const {
auto linalgOp = cast<LinalgOp>(op);
- DenseSet<int> reductionDimsSet(reductionDims.begin(), reductionDims.end());
-
- // Then create a new reduction that only reduce the newly added dimensions
- // from the previous op.
- int64_t intermRank = cast<ShapedType>(partialReduce[0].getType()).getRank();
- AffineMap inputMap = b.getMultiDimIdentityMap(intermRank);
- SmallVector<utils::IteratorType> reductionIteratorTypes;
- SmallVector<AffineExpr> exprs;
-
- for (int64_t i : llvm::seq<int64_t>(0, intermRank)) {
- if (reductionDimsSet.contains(i)) {
- reductionIteratorTypes.push_back(utils::IteratorType::reduction);
- } else {
- exprs.push_back(b.getAffineDimExpr(i));
- reductionIteratorTypes.push_back(utils::IteratorType::parallel);
+ // Step 1. Recover the dims that actually need to be merged from the
+ // original operation. We can classify the original iterators as follows:
+ //
+ // parallel --> parallel
+ // reduction + not in reductionDims --> parallel (already reduced)
+ // reduction + in reductionDims --> reduction (will reduce now)
+ SmallVector<utils::IteratorType> iterators(linalgOp.getNumLoops(),
+ utils::IteratorType::parallel);
+ for (int redIdx : reductionDims)
+ iterators[redIdx] = utils::IteratorType::reduction;
+
+ // Step 2. For each partial result, create a map to index it. This map
+ // is simply the indexing map for the original result with reductionDims
+ // appended (as produced in tileToPartialReduction).
+ int64_t numInits = linalgOp.getNumDpsInits();
+ SmallVector<AffineMap> indexingMaps(numInits * 2);
+ for (int idx : llvm::seq<int>(0, numInits)) {
+ AffineMap &inputMap = indexingMaps[idx];
+ AffineMap &outputMap = indexingMaps[numInits + idx];
+
+ outputMap =
+ linalgOp.getMatchingIndexingMap(linalgOp.getDpsInitOperand(idx));
+ inputMap = outputMap;
+ for (int redPos : reductionDims) {
+ inputMap = inputMap.insertResult(b.getAffineDimExpr(redPos),
+ inputMap.getNumResults());
}
}
- AffineMap outputMap =
- AffineMap::get(intermRank, 0, exprs, op->getContext());
- SmallVector<AffineMap> reductionMaps = {inputMap, outputMap};
-
- SmallVector<Operation *, 4> combinerOps;
- matchReduction(linalgOp.getRegionOutputArgs(), 0, combinerOps);
- Operation *reductionOp = combinerOps[0];
-
auto reduction = b.create<GenericOp>(
- loc, op->getResultTypes(), ValueRange({partialReduce[0]}),
- linalgOp.getDpsInits(), reductionMaps, reductionIteratorTypes,
- [reductionOp](OpBuilder &b, Location loc, ValueRange inputs) {
- Operation *clonedReductionOp = b.clone(*reductionOp);
- clonedReductionOp->setOperand(0, inputs[0]);
- clonedReductionOp->setOperand(1, inputs[1]);
- b.create<linalg::YieldOp>(loc, clonedReductionOp->getResult(0));
+ loc, op->getResultTypes(), partialReduce, linalgOp.getDpsInits(),
+ indexingMaps, iterators,
+ [&linalgOp](OpBuilder &b, Location loc, ValueRange inputs) {
+ int64_t numInits = linalgOp.getNumDpsInits();
+ SmallVector<Value> yieldedValues;
+ for (int idx : llvm::seq<int>(0, numInits)) {
+ // Get the combiner op.
+ SmallVector<Operation *, 4> combinerOps;
+ matchReduction(linalgOp.getRegionOutputArgs(), idx, combinerOps);
+ Operation *clonedReductionOp = b.clone(*combinerOps[0]);
+ // Combine the input at idx and output at numInits + idx.
+ clonedReductionOp->setOperand(0, inputs[idx]);
+ clonedReductionOp->setOperand(1, inputs[numInits + idx]);
+ // Yield.
+ yieldedValues.push_back(clonedReductionOp->getResult(0));
+ }
+ b.create<linalg::YieldOp>(loc, yieldedValues);
});
return reduction.getOperation();
}
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 45f39c80041c..d70e6d0b79cd 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -833,11 +833,31 @@ struct FoldSelfCopy : public OpRewritePattern<CopyOp> {
return success();
}
};
+
+struct FoldEmptyCopy final : public OpRewritePattern<CopyOp> {
+ using OpRewritePattern<CopyOp>::OpRewritePattern;
+
+ static bool isEmptyMemRef(BaseMemRefType type) {
+ return type.hasRank() &&
+ llvm::any_of(type.getShape(), [](int64_t x) { return x == 0; });
+ }
+
+ LogicalResult matchAndRewrite(CopyOp copyOp,
+ PatternRewriter &rewriter) const override {
+ if (isEmptyMemRef(copyOp.getSource().getType()) ||
+ isEmptyMemRef(copyOp.getTarget().getType())) {
+ rewriter.eraseOp(copyOp);
+ return success();
+ }
+
+ return failure();
+ }
+};
} // namespace
void CopyOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.add<FoldCopyOfCast, FoldSelfCopy>(context);
+ results.add<FoldCopyOfCast, FoldEmptyCopy, FoldSelfCopy>(context);
}
LogicalResult CopyOp::fold(FoldAdaptor adaptor,
diff --git a/mlir/lib/Dialect/Mesh/IR/MeshOps.cpp b/mlir/lib/Dialect/Mesh/IR/MeshOps.cpp
index d4329b401df1..ec1acbbb9349 100644
--- a/mlir/lib/Dialect/Mesh/IR/MeshOps.cpp
+++ b/mlir/lib/Dialect/Mesh/IR/MeshOps.cpp
@@ -20,6 +20,7 @@
#include "mlir/IR/Location.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
+#include "mlir/IR/Value.h"
#include "mlir/Interfaces/ViewLikeInterface.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
@@ -28,6 +29,7 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TypeSwitch.h"
+#include "llvm/Support/Casting.h"
#include <algorithm>
#include <functional>
#include <iterator>
@@ -99,7 +101,7 @@ Operation *MeshDialect::materializeConstant(OpBuilder &builder, Attribute value,
static FailureOr<MeshOp> getMeshAndVerify(Operation *op,
FlatSymbolRefAttr meshSymbol,
SymbolTableCollection &symbolTable) {
- mesh::MeshOp mesh = getMesh(op, meshSymbol, symbolTable);
+ mesh::MeshOp mesh = getMeshOrNull(op, meshSymbol, symbolTable);
if (!mesh) {
return op->emitError() << "Undefined required mesh symbol \""
<< meshSymbol.getValue() << "\".";
@@ -178,6 +180,88 @@ Type mesh::shardType(Type type, MeshOp mesh, MeshShardingAttr sharding) {
return type;
}
+void mlir::mesh::maybeInsertTargetShardingAnnotation(MeshShardingAttr sharding,
+ OpOperand &operand,
+ OpBuilder &builder) {
+ OpBuilder::InsertionGuard insertionGuard(builder);
+ Value operandValue = operand.get();
+ Operation *operandOp = operand.getOwner();
+ builder.setInsertionPointAfterValue(operandValue);
+ ShardOp shardOp = dyn_cast<ShardOp>(operandOp);
+ if (shardOp && shardOp.getShard() == sharding &&
+ !shardOp.getAnnotateForUsers()) {
+ // No need for anything the correct sharding is already set.
+ return;
+ }
+
+ auto newShardOp =
+ builder.create<ShardOp>(operandValue.getLoc(), operandValue, sharding,
+ /*annotate_for_users*/ false);
+ IRRewriter rewriter(builder);
+ rewriter.replaceUsesWithIf(
+ operandValue, newShardOp, [operandOp, operandValue](OpOperand &use) {
+ return use.getOwner() == operandOp && use.get() == operandValue;
+ });
+
+ if (!shardOp || shardOp.getAnnotateForUsers()) {
+ return;
+ }
+
+ auto newShardOp2 = builder.create<ShardOp>(
+ operandValue.getLoc(), newShardOp, sharding, /*annotate_for_users*/ true);
+ rewriter.replaceAllUsesExcept(newShardOp, newShardOp2, newShardOp2);
+}
+
+void mlir::mesh::maybeInsertTargetShardingAnnotation(MeshShardingAttr sharding,
+ OpResult result,
+ OpBuilder &builder) {
+ for (auto &use : llvm::make_early_inc_range(result.getUses())) {
+ maybeInsertTargetShardingAnnotation(sharding, use, builder);
+ }
+}
+
+void mlir::mesh::maybeInsertSourceShardingAnnotation(MeshShardingAttr sharding,
+ OpOperand &operand,
+ OpBuilder &builder) {
+ OpBuilder::InsertionGuard insertionGuard(builder);
+ Value operandValue = operand.get();
+ Operation *operandOp = operand.getOwner();
+ Operation *operandSrcOp = operandValue.getDefiningOp();
+ bool isBlockArg = !operandSrcOp;
+ ShardOp shardOp = dyn_cast_or_null<ShardOp>(operandSrcOp);
+
+ if (shardOp && shardOp.getShard() == sharding &&
+ shardOp.getAnnotateForUsers()) {
+ // No need for anything the correct sharding is already set.
+ return;
+ }
+
+ builder.setInsertionPoint(operandOp);
+ auto newShardOp =
+ builder.create<ShardOp>(operandValue.getLoc(), operandValue, sharding,
+ /*annotate_for_users*/ true);
+ IRRewriter rewriter(builder);
+ rewriter.replaceUsesWithIf(
+ operandValue, newShardOp, [operandOp, operandValue](OpOperand &use) {
+ return use.getOwner() == operandOp && use.get() == operandValue;
+ });
+
+ if (isBlockArg || !shardOp || !shardOp.getAnnotateForUsers()) {
+ // No need for resharding.
+ return;
+ }
+
+ builder.setInsertionPoint(newShardOp);
+ auto newPreceedingShardOp =
+ builder.create<ShardOp>(operandValue.getLoc(), operandValue, sharding,
+ /*annotate_for_users*/ false);
+ rewriter.replaceUsesWithIf(newShardOp.getOperand(), newPreceedingShardOp,
+ [&newShardOp](OpOperand &use) {
+ return use.getOwner() ==
+ newShardOp.getOperation();
+ });
+}
+
//===----------------------------------------------------------------------===//
// mesh.mesh op
//===----------------------------------------------------------------------===//
@@ -286,6 +370,10 @@ bool MeshShardingAttr::operator==(Attribute rhs) const {
return rhsAsMeshShardingAttr && *this == rhsAsMeshShardingAttr;
}
+bool MeshShardingAttr::operator!=(Attribute rhs) const {
+ return !(*this == rhs);
+}
+
bool MeshShardingAttr::operator==(MeshShardingAttr rhs) const {
if (getMesh() != rhs.getMesh() || getPartialAxes() != rhs.getPartialAxes()) {
return false;
@@ -311,6 +399,10 @@ bool MeshShardingAttr::operator==(MeshShardingAttr rhs) const {
std::mem_fn(&MeshAxesAttr::empty));
}
+bool MeshShardingAttr::operator!=(MeshShardingAttr rhs) const {
+ return !(*this == rhs);
+}
+
//===----------------------------------------------------------------------===//
// mesh.shard op
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp b/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp
index dbb9e667d470..54fc91cb2642 100644
--- a/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp
+++ b/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp
@@ -13,6 +13,7 @@
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/Support/LLVM.h"
+#include "mlir/Support/LogicalResult.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
@@ -388,22 +389,11 @@ FailureOr<ShardingOption> mesh::detail::defaultGetShardingOption(
return shardingOption;
}
-//===----------------------------------------------------------------------===//
-// detail::defaultAddShardingAnnotations
-//===----------------------------------------------------------------------===//
-
-// To add a `mesh.shard` op for the given result, based on the details provided
-// in `shardingOption`, `map`, and `loopTypes`.
-static LogicalResult addShardOp(OpBuilder &b, OpResult result,
- const ShardingOption &shardingOption,
- AffineMap map,
- ArrayRef<utils::IteratorType> loopTypes,
- ArrayRef<ReductionKind> reductionLoopKinds) {
- FailureOr<std::pair<bool, MeshShardingAttr>> maybeSharding =
- getMeshShardingAttr(result);
- if (succeeded(maybeSharding) && !maybeSharding->first)
- return success();
-
+// Get the sharding attributed for the given result and sharding option.
+MeshShardingAttr
+getShardingAttribute(OpResult result, const ShardingOption &shardingOption,
+ AffineMap map, ArrayRef<utils::IteratorType> loopTypes,
+ ArrayRef<ReductionKind> reductionLoopKinds) {
auto resultType = cast<RankedTensorType>(result.getType());
SmallVector<SmallVector<MeshAxis>> splitAxes(resultType.getRank());
SmallVector<MeshAxis> partialAxes;
@@ -438,26 +428,15 @@ static LogicalResult addShardOp(OpBuilder &b, OpResult result,
}
removeTrailingEmptySubArray(splitAxes);
- MeshShardingAttr shardAttr = MeshShardingAttr::get(
- b.getContext(), shardingOption.mesh, splitAxes, partialAxes, partialType);
- OpBuilder::InsertionGuard guard(b);
- b.setInsertionPointAfterValue(result);
- auto shardOp = b.create<ShardOp>(result.getLoc(), resultType, result,
- shardAttr, /*annotate_for_users*/ false);
- result.replaceAllUsesExcept(shardOp, shardOp);
- return success();
+ return MeshShardingAttr::get(result.getContext(), shardingOption.mesh,
+ splitAxes, partialAxes, partialType);
}
-// To add a `mesh.shard` op for the given operand, based on the details provided
-// in `shardingOption`, `map`, and `loopTypes`.
-static LogicalResult addShardOp(OpBuilder &b, OpOperand &opOperand,
- const ShardingOption &shardingOption,
- AffineMap map) {
- auto maybeShardingAttr = getMeshShardingAttr(opOperand);
- if (succeeded(maybeShardingAttr) && maybeShardingAttr->first)
- return success();
- Value operand = opOperand.get();
- auto operandType = cast<RankedTensorType>(operand.getType());
+static FailureOr<MeshShardingAttr>
+getShardingAttribute(OpOperand &opOperand, const ShardingOption &shardingOption,
+ AffineMap map) {
+ Value operandValue = opOperand.get();
+ auto operandType = cast<RankedTensorType>(operandValue.getType());
SmallVector<SmallVector<MeshAxis>> splitAxes(operandType.getRank());
unsigned numDims = map.getNumDims();
for (auto it : llvm::enumerate(map.getResults())) {
@@ -483,19 +462,79 @@ static LogicalResult addShardOp(OpBuilder &b, OpOperand &opOperand,
}
removeTrailingEmptySubArray(splitAxes);
- MeshShardingAttr shardAttr =
- MeshShardingAttr::get(b.getContext(), shardingOption.mesh, splitAxes);
+ return MeshShardingAttr::get(opOperand.get().getContext(),
+ shardingOption.mesh, splitAxes);
+}
+
+FailureOr<SmallVector<MeshShardingAttr>>
+mesh::detail::defaultGetShardingAnnotations(
+ Operation *op, const ShardingOption &shardingOption) {
+ SmallVector<MeshShardingAttr> res;
+
+ ShardingInterface shardingOp = llvm::cast<ShardingInterface>(op);
+ SmallVector<utils::IteratorType> loopTypes =
+ shardingOp.getLoopIteratorTypes();
+ SmallVector<ReductionKind> reductionKinds =
+ shardingOp.getReductionLoopIteratorKinds();
+ SmallVector<AffineMap> maps = shardingOp.getIndexingMaps();
+ unsigned numOperands = op->getNumOperands();
+
+ for (OpOperand &opOperand : op->getOpOperands()) {
+ FailureOr<MeshShardingAttr> shardingAttr = getShardingAttribute(
+ opOperand, shardingOption, maps[opOperand.getOperandNumber()]);
+ if (failed(shardingAttr))
+ return failure();
+ res.push_back(*shardingAttr);
+ }
+
+ for (OpResult result : op->getResults()) {
+ res.push_back(getShardingAttribute(
+ result, shardingOption, maps[numOperands + result.getResultNumber()],
+ loopTypes, reductionKinds));
+ }
+
+ return res;
+}
+
+//===----------------------------------------------------------------------===//
+// detail::defaultAddShardingAnnotations
+//===----------------------------------------------------------------------===//
+
+// To add a `mesh.shard` op for the given result, based on the details provided
+// in `shardingOption`, `map`, and `loopTypes`.
+static LogicalResult addShardOp(OpBuilder &b, OpResult result,
+ const ShardingOption &shardingOption,
+ AffineMap map,
+ ArrayRef<utils::IteratorType> loopTypes,
+ ArrayRef<ReductionKind> reductionLoopKinds) {
+ MeshShardingAttr shardAttr = getShardingAttribute(
+ result, shardingOption, map, loopTypes, reductionLoopKinds);
+ maybeInsertTargetShardingAnnotation(shardAttr, result, b);
+
+ return success();
+}
+
+// To add a `mesh.shard` op for the given operand, based on the details provided
+// in `shardingOption`, `map`, and `loopTypes`.
+static LogicalResult addShardOp(OpBuilder &b, OpOperand &opOperand,
+ const ShardingOption &shardingOption,
+ AffineMap map) {
+
+ FailureOr<MeshShardingAttr> shardAttr =
+ getShardingAttribute(opOperand, shardingOption, map);
+ if (failed(shardAttr)) {
+ return failure();
+ }
OpBuilder::InsertionGuard guard(b);
- b.setInsertionPoint(opOperand.getOwner());
- auto shardOp = b.create<ShardOp>(operand.getLoc(), operandType, operand,
- shardAttr, true);
- opOperand.set(shardOp);
+ maybeInsertSourceShardingAnnotation(*shardAttr, opOperand, b);
return success();
}
LogicalResult mesh::detail::defaultAddShardingAnnotations(
Operation *op, OpBuilder &b, const ShardingOption &shardingOption) {
+ assert(!shardingOption.empty && shardingOption.mesh);
+
ShardingInterface shardingOp = llvm::cast<ShardingInterface>(op);
SmallVector<utils::IteratorType> loopTypes =
shardingOp.getLoopIteratorTypes();
diff --git a/mlir/lib/Dialect/Mesh/Transforms/ShardingPropagation.cpp b/mlir/lib/Dialect/Mesh/Transforms/ShardingPropagation.cpp
index 29320f1e339f..870ac4a16808 100644
--- a/mlir/lib/Dialect/Mesh/Transforms/ShardingPropagation.cpp
+++ b/mlir/lib/Dialect/Mesh/Transforms/ShardingPropagation.cpp
@@ -12,9 +12,16 @@
#include "mlir/Dialect/Mesh/IR/MeshDialect.h"
#include "mlir/Dialect/Mesh/IR/MeshOps.h"
#include "mlir/Dialect/Mesh/Interfaces/ShardingInterface.h"
+#include "mlir/IR/Verifier.h"
#include "mlir/Interfaces/FunctionInterfaces.h"
#include "mlir/Pass/Pass.h"
+#include "mlir/Support/LogicalResult.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
#include <vector>
namespace mlir {
@@ -30,6 +37,70 @@ namespace mesh {
using namespace mlir;
using namespace mlir::mesh;
+enum class ReshardingRquirementKind {
+ NO_RESHARDING = 0,
+ NO_RESHARDING_FOR_EXPLICIT_ANNOTATIONS,
+ RESHARDING_FOR_EXPLICIT_ANNOTATIONS
+};
+
+#ifdef LLVM_DEBUG
+
+template <typename T>
+static llvm::raw_ostream &operator<<(llvm::raw_ostream &stream,
+ const SmallVector<T> &vec);
+template <typename... Ts>
+static llvm::raw_ostream &operator<<(llvm::raw_ostream &stream,
+ const std::tuple<Ts...> &t);
+static llvm::raw_ostream &operator<<(llvm::raw_ostream &stream,
+ ReshardingRquirementKind v);
+
+template <typename Stream, typename Range>
+static Stream &printRange(Stream &stream, Range &&range) {
+ stream << "[";
+ llvm::for_each(range, [&stream](auto &v) {
+ stream << v;
+ stream << ", ";
+ });
+ return stream << "]";
+}
+
+template <typename T>
+static llvm::raw_ostream &operator<<(llvm::raw_ostream &stream,
+ const SmallVector<T> &vec) {
+ return printRange(stream, vec);
+}
+
+[[maybe_unused]] static llvm::raw_ostream &operator<<(llvm::raw_ostream &stream,
+ const ShardingOption &v) {
+ return stream << "{empty = " << v.empty << ", mesh" << v.mesh
+ << ", shardingArray = " << v.shardingArray << "}";
+}
+
+template <typename Stream, typename... Ts, size_t... Is>
+static Stream &printTuple(Stream &stream, std::tuple<Ts...> tuple,
+ std::index_sequence<Is...>) {
+ static_assert(sizeof...(Is) == sizeof...(Ts),
+ "Indices must have same number of elements as tuple types!");
+ static_assert(sizeof...(Ts) > 0, "Cannot insert empty tuple into stream.");
+
+ stream << "{";
+ ((stream << std::get<Is>(tuple) << ", "), ...);
+ return stream << "}";
+}
+
+template <typename... Ts>
+static llvm::raw_ostream &operator<<(llvm::raw_ostream &stream,
+ const std::tuple<Ts...> &t) {
+ return printTuple(stream, t, std::index_sequence_for<Ts...>{});
+}
+
+[[maybe_unused]] static llvm::raw_ostream &
+operator<<(llvm::raw_ostream &stream, ReshardingRquirementKind v) {
+ return stream << static_cast<int>(v);
+}
+
+#endif // LLVM_DEBUG
+
//===----------------------------------------------------------------------===//
// Utilities
//===----------------------------------------------------------------------===//
@@ -77,6 +148,138 @@ getOrderedPossibleShardingAttrs(ArrayRef<MeshShardingAttr> mustShardings,
return allShardingAttrs;
}
+// The order of preference is form highest to lowest:
+// 1. No resharding is required (all existing annotations are compatible).
+// 2. No resharding for operands/results that have annotation specifically
+// targeting this operation. This means
+// * operands that are the result of `mesh.shard` ops marked with
+// `annotate_for_users`.
+// * results that are annotated with `mesh.shard` ops without
+// `annotate_for_users`.
+// 3. All other cases. Resharding is required for operands/results with
+// annotation targeting explicitly this operation.
+ReshardingRquirementKind getReshardingRquirementKind(
+ Operation *op,
+ const SmallVector<MeshShardingAttr> &operandAndResultShardings) {
+ ReshardingRquirementKind res = ReshardingRquirementKind::NO_RESHARDING;
+
+ size_t operandsCount = op->getOperands().size();
+ auto operandShardings =
+ llvm::make_range(operandAndResultShardings.begin(),
+ operandAndResultShardings.begin() + operandsCount);
+ auto resultShardings =
+ llvm::make_range(operandAndResultShardings.begin() + operandsCount,
+ operandAndResultShardings.end());
+
+ for (auto [operand, sharding] :
+ llvm::zip_equal(op->getOperands(), operandShardings)) {
+ ShardOp shardOp = llvm::dyn_cast_or_null<ShardOp>(operand.getDefiningOp());
+ if (!shardOp) {
+ continue;
+ }
+ bool needsResharding = shardOp.getShardAttr() != sharding;
+ bool isExplicitAnnotationForThisOp = shardOp.getAnnotateForUsers();
+ if (needsResharding) {
+ if (isExplicitAnnotationForThisOp) {
+ // This is the worst case. No need to continue.
+ return ReshardingRquirementKind::RESHARDING_FOR_EXPLICIT_ANNOTATIONS;
+ }
+ res = ReshardingRquirementKind::NO_RESHARDING_FOR_EXPLICIT_ANNOTATIONS;
+ }
+ }
+
+ for (auto [result, sharding] :
+ llvm::zip_equal(op->getResults(), resultShardings)) {
+ for (auto user : result.getUsers()) {
+ ShardOp shardOp = llvm::dyn_cast<ShardOp>(user);
+ if (!shardOp) {
+ continue;
+ }
+ bool needsResharding = shardOp.getShardAttr() != sharding;
+ bool isExplicitAnnotationForThisOp = !shardOp.getAnnotateForUsers();
+ if (needsResharding) {
+ if (isExplicitAnnotationForThisOp) {
+ // This is the worst case. No need to continue.
+ return ReshardingRquirementKind::RESHARDING_FOR_EXPLICIT_ANNOTATIONS;
+ }
+ res = ReshardingRquirementKind::NO_RESHARDING_FOR_EXPLICIT_ANNOTATIONS;
+ }
+ }
+ }
+
+ return res;
+}
+
+// From all the operand and result sharding combinations,
+// return the one that is most desirable.
+// The order of preference is:
+// 1. No resharding with respect to existing sharding annotations.
+// 2. Resharding for values that have already annotations that do not target
+// this op.
+// 3. Resharding of existing explicit sharding annotations for this op.
+static FailureOr<ShardingOption> selectShardingOption(
+ ShardingInterface shardingOp,
+ ArrayRef<SmallVector<MeshShardingAttr>> possibleOperandShardingAttrs,
+ ArrayRef<SmallVector<MeshShardingAttr>> possibleResultShardingAttrs) {
+ SmallVector<std::tuple<ShardingOption, ReshardingRquirementKind>>
+ shardingOptionsAndReshardingRequirements;
+
+ for (ArrayRef<MeshShardingAttr> resultShardings :
+ possibleResultShardingAttrs) {
+ for (ArrayRef<MeshShardingAttr> operandShardings :
+ possibleOperandShardingAttrs) {
+ FailureOr<ShardingOption> shardingOption =
+ shardingOp.getShardingOption(operandShardings, resultShardings);
+ if (failed(shardingOption) || shardingOption->empty) {
+ continue;
+ }
+ // These shardings may not be the same as those in operandShardings and
+ // resultShardings.
+ // They may be missing some annotations.
+ // Whatever is returned by getShardingAnnotations is exactly what the op
+ // needs.
+ FailureOr<SmallVector<MeshShardingAttr>> operandAndResultShardings =
+ shardingOp.getShardingAnnotations(*shardingOption);
+ if (failed(operandAndResultShardings)) {
+ return failure();
+ }
+
+ LLVM_DEBUG(DBGS() << "operandAndResultShardings = "
+ << *operandAndResultShardings << "\n";);
+
+ ReshardingRquirementKind reshardingRquirement =
+ getReshardingRquirementKind(shardingOp, *operandAndResultShardings);
+ if (reshardingRquirement == ReshardingRquirementKind::NO_RESHARDING) {
+ // This is the best case. No need to go on.
+ return *shardingOption;
+ }
+
+ shardingOptionsAndReshardingRequirements.emplace_back(
+ std::move(*shardingOption), reshardingRquirement);
+ }
+ }
+
+ if (shardingOptionsAndReshardingRequirements.empty()) {
+ return ShardingOption::makeEmpty();
+ }
+
+ std::partial_sort(
+ shardingOptionsAndReshardingRequirements.begin(),
+ shardingOptionsAndReshardingRequirements.begin() + 1,
+ shardingOptionsAndReshardingRequirements.end(),
+ [](const std::tuple<ShardingOption, ReshardingRquirementKind> &a,
+ const std::tuple<ShardingOption, ReshardingRquirementKind> &b) {
+ return std::get<ReshardingRquirementKind>(a) <
+ std::get<ReshardingRquirementKind>(b);
+ });
+
+ LLVM_DEBUG(DBGS() << "shardingOptionsAndReshardingRequirements = "
+ << shardingOptionsAndReshardingRequirements << "\n";);
+
+ return std::get<ShardingOption>(
+ shardingOptionsAndReshardingRequirements.front());
+}
+
// For each operation that implements the ShardingInterface, infer the sharding
// option of the operation from its operands and/or results using the
// `getShardingOption` method. If the inferred sharding option is not empty, add
@@ -135,32 +338,21 @@ static LogicalResult visitOp(Operation *op, OpBuilder &builder) {
SmallVector<SmallVector<MeshShardingAttr>> possibleResultShardingAttrs =
getOrderedPossibleShardingAttrs(resultMustShardings,
allowConflictsResultShardings);
- FailureOr<ShardingOption> finalShardingOption = failure();
- for (ArrayRef<MeshShardingAttr> resultShardings :
- possibleResultShardingAttrs) {
- if (succeeded(finalShardingOption))
- break;
- for (ArrayRef<MeshShardingAttr> operandShardings :
- possibleOperandShardingAttrs) {
- FailureOr<ShardingOption> shardingOption =
- shardingOp.getShardingOption(operandShardings, resultShardings);
- if (succeeded(shardingOption)) {
- finalShardingOption = shardingOption;
- break;
- }
- }
- }
+ FailureOr<ShardingOption> shardingOption = selectShardingOption(
+ shardingOp, possibleOperandShardingAttrs, possibleResultShardingAttrs);
- if (failed(finalShardingOption)) {
+ if (failed(shardingOption)) {
op->emitOpError() << "fail to get sharding option.";
return failure();
}
+
+ LLVM_DEBUG(DBGS() << "Selected sharding option: " << *shardingOption << "\n");
+
// sharding info is empty, return immediately
- if (finalShardingOption->empty)
+ if (shardingOption->empty)
return success();
- if (failed(
- shardingOp.addShardingAnnotations(builder, *finalShardingOption))) {
+ if (failed(shardingOp.addShardingAnnotations(builder, *shardingOption))) {
op->emitOpError() << "fail to set sharding annotations.";
return failure();
}
@@ -199,6 +391,7 @@ struct ShardingPropagation
LLVM_DEBUG(DBGS() << "After reversed order propagation:\n"
<< funcOp << "\n");
+ LLVM_DEBUG(assert(succeeded(mlir::verify(funcOp))));
// 2. propagate in original order
for (Operation &op : llvm::make_early_inc_range(block))
diff --git a/mlir/lib/Dialect/Mesh/Transforms/Spmdization.cpp b/mlir/lib/Dialect/Mesh/Transforms/Spmdization.cpp
index 6b1326d76bc4..f3e4b15aec11 100644
--- a/mlir/lib/Dialect/Mesh/Transforms/Spmdization.cpp
+++ b/mlir/lib/Dialect/Mesh/Transforms/Spmdization.cpp
@@ -493,8 +493,6 @@ TypedValue<ShapedType> reshard(ImplicitLocOpBuilder &builder, MeshOp mesh,
TypedValue<ShapedType> reshard(OpBuilder &builder, MeshOp mesh, ShardOp source,
ShardOp target,
TypedValue<ShapedType> sourceShardValue) {
- assert(!source.getAnnotateForUsers());
- assert(target.getAnnotateForUsers());
assert(source.getResult() == target.getOperand());
ImplicitLocOpBuilder implicitLocOpBuilder(target->getLoc(), builder);
return reshard(
@@ -628,7 +626,6 @@ spmdizeOperation(ShardOp shardOp, IRMapping &spmdizationMap,
targetSpmdValue = spmdizationMap.lookup(shardOp.getOperand());
} else {
// Insert resharding.
- assert(!srcShardOp.getAnnotateForUsers() && shardOp.getAnnotateForUsers());
TypedValue<ShapedType> srcSpmdValue = cast<TypedValue<ShapedType>>(
spmdizationMap.lookup(srcShardOp.getOperand()));
targetSpmdValue = reshard(builder, srcShardOp, shardOp, srcSpmdValue,
diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index 24a6d5b5d684..110873011fe3 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -1789,7 +1789,7 @@ LogicalResult DistributeOp::verify() {
}
//===----------------------------------------------------------------------===//
-// ReductionOp
+// DeclareReductionOp
//===----------------------------------------------------------------------===//
static ParseResult parseAtomicReductionRegion(OpAsmParser &parser,
@@ -1881,21 +1881,6 @@ LogicalResult DeclareReductionOp::verifyRegions() {
return success();
}
-LogicalResult ReductionOp::verify() {
- auto *op = (*this)->getParentWithTrait<ReductionClauseInterface::Trait>();
- if (!op)
- return emitOpError() << "must be used within an operation supporting "
- "reduction clause interface";
- while (op) {
- for (const auto &var :
- cast<ReductionClauseInterface>(op).getAllReductionVars())
- if (var == getAccumulator())
- return success();
- op = op->getParentWithTrait<ReductionClauseInterface::Trait>();
- }
- return emitOpError() << "the accumulator is not used by the parent";
-}
-
//===----------------------------------------------------------------------===//
// TaskOp
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Polynomial/IR/PolynomialAttributes.cpp b/mlir/lib/Dialect/Polynomial/IR/PolynomialAttributes.cpp
index 890ce5226c30..cc7d3172b1a1 100644
--- a/mlir/lib/Dialect/Polynomial/IR/PolynomialAttributes.cpp
+++ b/mlir/lib/Dialect/Polynomial/IR/PolynomialAttributes.cpp
@@ -101,7 +101,7 @@ parseMonomial(AsmParser &parser, Monomial &monomial, llvm::StringRef &variable,
return success();
}
-template <typename PolynoimalAttrTy, typename Monomial>
+template <typename Monomial>
LogicalResult
parsePolynomialAttr(AsmParser &parser, llvm::SmallVector<Monomial> &monomials,
llvm::StringSet<> &variables,
@@ -155,7 +155,7 @@ Attribute IntPolynomialAttr::parse(AsmParser &parser, Type type) {
llvm::SmallVector<IntMonomial> monomials;
llvm::StringSet<> variables;
- if (failed(parsePolynomialAttr<IntPolynomialAttr, IntMonomial>(
+ if (failed(parsePolynomialAttr<IntMonomial>(
parser, monomials, variables,
[&](IntMonomial &monomial) -> OptionalParseResult {
APInt parsedCoeff(apintBitWidth, 1);
@@ -175,7 +175,6 @@ Attribute IntPolynomialAttr::parse(AsmParser &parser, Type type) {
}
return IntPolynomialAttr::get(parser.getContext(), result.value());
}
-
Attribute FloatPolynomialAttr::parse(AsmParser &parser, Type type) {
if (failed(parser.parseLess()))
return {};
@@ -191,8 +190,8 @@ Attribute FloatPolynomialAttr::parse(AsmParser &parser, Type type) {
return OptionalParseResult(result);
};
- if (failed(parsePolynomialAttr<FloatPolynomialAttr, FloatMonomial>(
- parser, monomials, variables, parseAndStoreCoefficient))) {
+ if (failed(parsePolynomialAttr<FloatMonomial>(parser, monomials, variables,
+ parseAndStoreCoefficient))) {
return {};
}
diff --git a/mlir/lib/Dialect/Polynomial/IR/PolynomialCanonicalization.td b/mlir/lib/Dialect/Polynomial/IR/PolynomialCanonicalization.td
index 9d09799c1763..e37bcf76a20f 100644
--- a/mlir/lib/Dialect/Polynomial/IR/PolynomialCanonicalization.td
+++ b/mlir/lib/Dialect/Polynomial/IR/PolynomialCanonicalization.td
@@ -9,11 +9,14 @@
#ifndef POLYNOMIAL_CANONICALIZATION
#define POLYNOMIAL_CANONICALIZATION
-include "mlir/Dialect/Polynomial/IR/Polynomial.td"
include "mlir/Dialect/Arith/IR/ArithOps.td"
+include "mlir/Dialect/Polynomial/IR/Polynomial.td"
+include "mlir/IR/EnumAttr.td"
include "mlir/IR/OpBase.td"
include "mlir/IR/PatternBase.td"
+defvar DefOverflow = ConstantEnumCase<Arith_IntegerOverflowAttr, "none">;
+
// Get a -1 integer attribute of the same type as the polynomial SSA value's
// ring coefficient type.
def getMinusOne
@@ -39,4 +42,40 @@ def NTTAfterINTT : Pat<
[]
>;
+// NTTs are expensive, and addition in coefficient or NTT domain should be
+// equivalently expensive, so reducing the number of NTTs is optimal.
+// ntt(a) + ntt(b) -> ntt(a + b)
+def NTTOfAdd : Pat<
+ (Arith_AddIOp
+ (Polynomial_NTTOp $p1),
+ (Polynomial_NTTOp $p2),
+ $overflow),
+ (Polynomial_NTTOp (Polynomial_AddOp $p1, $p2)),
+ []
+>;
+// intt(a) + intt(b) -> intt(a + b)
+def INTTOfAdd : Pat<
+ (Polynomial_AddOp
+ (Polynomial_INTTOp $t1),
+ (Polynomial_INTTOp $t2)),
+ (Polynomial_INTTOp (Arith_AddIOp $t1, $t2, DefOverflow)),
+ []
+>;
+// repeated for sub
+def NTTOfSub : Pat<
+ (Arith_SubIOp
+ (Polynomial_NTTOp $p1),
+ (Polynomial_NTTOp $p2),
+ $overflow),
+ (Polynomial_NTTOp (Polynomial_SubOp $p1, $p2)),
+ []
+>;
+def INTTOfSub : Pat<
+ (Polynomial_SubOp
+ (Polynomial_INTTOp $t1),
+ (Polynomial_INTTOp $t2)),
+ (Polynomial_INTTOp (Arith_SubIOp $t1, $t2, DefOverflow)),
+ []
+>;
+
#endif // POLYNOMIAL_CANONICALIZATION
diff --git a/mlir/lib/Dialect/Polynomial/IR/PolynomialOps.cpp b/mlir/lib/Dialect/Polynomial/IR/PolynomialOps.cpp
index 1a2439fe810b..3d302797ce51 100644
--- a/mlir/lib/Dialect/Polynomial/IR/PolynomialOps.cpp
+++ b/mlir/lib/Dialect/Polynomial/IR/PolynomialOps.cpp
@@ -186,6 +186,88 @@ LogicalResult INTTOp::verify() {
return verifyNTTOp(this->getOperation(), ring, tensorType);
}
+ParseResult ConstantOp::parse(OpAsmParser &parser, OperationState &result) {
+ // Using the built-in parser.parseAttribute requires the full
+ // #polynomial.typed_int_polynomial syntax, which is excessive.
+ // Instead we parse a keyword int to signal it's an integer polynomial
+ Type type;
+ if (succeeded(parser.parseOptionalKeyword("float"))) {
+ Attribute floatPolyAttr = FloatPolynomialAttr::parse(parser, nullptr);
+ if (floatPolyAttr) {
+ if (parser.parseColon() || parser.parseType(type))
+ return failure();
+ result.addAttribute("value",
+ TypedFloatPolynomialAttr::get(type, floatPolyAttr));
+ result.addTypes(type);
+ return success();
+ }
+ }
+
+ if (succeeded(parser.parseOptionalKeyword("int"))) {
+ Attribute intPolyAttr = IntPolynomialAttr::parse(parser, nullptr);
+ if (intPolyAttr) {
+ if (parser.parseColon() || parser.parseType(type))
+ return failure();
+
+ result.addAttribute("value",
+ TypedIntPolynomialAttr::get(type, intPolyAttr));
+ result.addTypes(type);
+ return success();
+ }
+ }
+
+ // In the worst case, still accept the verbose versions.
+ TypedIntPolynomialAttr typedIntPolyAttr;
+ OptionalParseResult res =
+ parser.parseOptionalAttribute<TypedIntPolynomialAttr>(
+ typedIntPolyAttr, "value", result.attributes);
+ if (res.has_value() && succeeded(res.value())) {
+ result.addTypes(typedIntPolyAttr.getType());
+ return success();
+ }
+
+ TypedFloatPolynomialAttr typedFloatPolyAttr;
+ res = parser.parseAttribute<TypedFloatPolynomialAttr>(
+ typedFloatPolyAttr, "value", result.attributes);
+ if (res.has_value() && succeeded(res.value())) {
+ result.addTypes(typedFloatPolyAttr.getType());
+ return success();
+ }
+
+ return failure();
+}
+
+void ConstantOp::print(OpAsmPrinter &p) {
+ p << " ";
+ if (auto intPoly = dyn_cast<TypedIntPolynomialAttr>(getValue())) {
+ p << "int";
+ intPoly.getValue().print(p);
+ } else if (auto floatPoly = dyn_cast<TypedFloatPolynomialAttr>(getValue())) {
+ p << "float";
+ floatPoly.getValue().print(p);
+ } else {
+ assert(false && "unexpected attribute type");
+ }
+ p << " : ";
+ p.printType(getOutput().getType());
+}
+
+LogicalResult ConstantOp::inferReturnTypes(
+ MLIRContext *context, std::optional<mlir::Location> location,
+ ConstantOp::Adaptor adaptor,
+ llvm::SmallVectorImpl<mlir::Type> &inferredReturnTypes) {
+ Attribute operand = adaptor.getValue();
+ if (auto intPoly = dyn_cast<TypedIntPolynomialAttr>(operand)) {
+ inferredReturnTypes.push_back(intPoly.getType());
+ } else if (auto floatPoly = dyn_cast<TypedFloatPolynomialAttr>(operand)) {
+ inferredReturnTypes.push_back(floatPoly.getType());
+ } else {
+ assert(false && "unexpected attribute type");
+ return failure();
+ }
+ return success();
+}
+
//===----------------------------------------------------------------------===//
// TableGen'd canonicalization patterns
//===----------------------------------------------------------------------===//
@@ -201,10 +283,10 @@ void SubOp::getCanonicalizationPatterns(RewritePatternSet &results,
void NTTOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.add<NTTAfterINTT>(context);
+ results.add<NTTAfterINTT, NTTOfAdd, NTTOfSub>(context);
}
void INTTOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.add<INTTAfterNTT>(context);
+ results.add<INTTAfterNTT, INTTOfAdd, INTTOfSub>(context);
}
diff --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
index 1a84a59ddb69..a72dafe72517 100644
--- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
@@ -182,6 +182,9 @@ static LogicalResult generateLoopNestUsingForOp(
if (loops.empty())
return success();
+ assert(tiledResults.size() == destinationTensors.size() &&
+ "Number of results of body should be equal to number of iter args");
+
// 6. Yield all the results of the tiled operation.
SmallVector<Value> yieldedValues;
for (auto [tiledValue, destinationTensor, resultOffset, resultSize] :
@@ -694,9 +697,6 @@ mlir::scf::tileReductionUsingScf(RewriterBase &b,
tileSizesVector.append(iterationDomain.size() - tileSizesVector.size(),
zero);
}
- if (op->getNumResults() != 1)
- return b.notifyMatchFailure(
- op, "don't support ops with multiple results for now");
SmallVector<utils::IteratorType> iterators =
tilingInterfaceOp.getLoopIteratorTypes();
@@ -708,12 +708,13 @@ mlir::scf::tileReductionUsingScf(RewriterBase &b,
}
// 2. create the inital tensor value.
- FailureOr<Operation *> identityTensor =
+ FailureOr<SmallVector<Value>> maybeInitTensors =
op.generateInitialTensorForPartialReduction(b, loc, tileSizesVector,
reductionDims);
- if (failed(identityTensor))
- return b.notifyMatchFailure(op,
- "cannot create a tensor of identity value.");
+ if (failed(maybeInitTensors)) {
+ return b.notifyMatchFailure(op, "Failed to create initial tensors.");
+ }
+ SmallVector<Value> &initTensors = maybeInitTensors.value();
// 3. Define the callback to use for generating the inner most tile loop body.
Operation *parallelOp = nullptr;
@@ -753,29 +754,26 @@ mlir::scf::tileReductionUsingScf(RewriterBase &b,
tiledResult.append(parallelOp->result_begin(), parallelOp->result_end());
// 4d. Compute the offsets and sizes needed to insert the result of the
// tiled value back into destination before yielding the destination.
- SmallVector<OpFoldResult> outOffsets(offsets.size(), b.getIndexAttr(0));
- resultOffsets.emplace_back(std::move(outOffsets));
-
- SmallVector<OpFoldResult> outSizes;
- for (size_t i = 0; i < offsets.size(); i++) {
- outSizes.push_back(
- tensor::getMixedSize(b, loc, parallelOp->getResult(0), i));
+ for (int resultIdx : llvm::seq<int>(0, parallelOp->getNumResults())) {
+ SmallVector<OpFoldResult> outOffsets(offsets.size(), b.getIndexAttr(0));
+ resultOffsets.emplace_back(std::move(outOffsets));
+
+ SmallVector<OpFoldResult> outSizes;
+ for (size_t i = 0; i < offsets.size(); i++) {
+ outSizes.push_back(
+ tensor::getMixedSize(b, loc, parallelOp->getResult(resultIdx), i));
+ }
+ resultSizes.emplace_back(std::move(outSizes));
}
- resultSizes.emplace_back(std::move(outSizes));
return success();
};
// 5. Generate the tiled implementation using the destination tensors.
- SmallVector<Value> destinationTensors =
- llvm::map_to_vector(identityTensor.value()->getResults(),
- [](OpResult res) -> Value { return res; });
-
SmallVector<LoopLikeOpInterface> loops;
scf::SCFTilingOptions options;
options.setLoopType(scf::SCFTilingOptions::LoopType::ForOp);
if (failed(generateLoopNest(b, loc, options, iterationDomain, tileSizesVector,
- destinationTensors, innerYieldTiledValuesFn,
- loops)))
+ initTensors, innerYieldTiledValuesFn, loops)))
return b.notifyMatchFailure(op, "failed to tile for parallel reduction");
SmallVector<Value> replacements = llvm::map_to_vector(
@@ -787,7 +785,7 @@ mlir::scf::tileReductionUsingScf(RewriterBase &b,
b.replaceOp(op, mergeOp->getResults());
SCFReductionTilingResult results;
- results.initialOp = *identityTensor;
+ results.initialValues = initTensors;
results.loops = loops;
results.parallelTiledOp = parallelOp;
results.mergeOp = mergeOp;
diff --git a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
index 39f5cf1a7508..bb6c65a6f6ca 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
@@ -37,7 +37,7 @@ FailureOr<uint64_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
uint64_t properties = 0;
SmallVector<unsigned> structured;
- if (base.compare("structured") == 0) {
+ if (base == "structured") {
ParseResult res = parser.parseCommaSeparatedList(
mlir::OpAsmParser::Delimiter::OptionalSquare,
[&]() -> ParseResult { return parseStructured(parser, &structured); },
@@ -60,18 +60,18 @@ FailureOr<uint64_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
FAILURE_IF_FAILED(res)
// Set the base bit for properties.
- if (base.compare("dense") == 0) {
+ if (base == "dense") {
properties |= static_cast<uint64_t>(LevelFormat::Dense);
- } else if (base.compare("batch") == 0) {
+ } else if (base == "batch") {
properties |= static_cast<uint64_t>(LevelFormat::Batch);
- } else if (base.compare("compressed") == 0) {
+ } else if (base == "compressed") {
properties |= static_cast<uint64_t>(LevelFormat::Compressed);
- } else if (base.compare("structured") == 0) {
+ } else if (base == "structured") {
properties |= static_cast<uint64_t>(LevelFormat::NOutOfM);
properties |= nToBits(structured[0]) | mToBits(structured[1]);
- } else if (base.compare("loose_compressed") == 0) {
+ } else if (base == "loose_compressed") {
properties |= static_cast<uint64_t>(LevelFormat::LooseCompressed);
- } else if (base.compare("singleton") == 0) {
+ } else if (base == "singleton") {
properties |= static_cast<uint64_t>(LevelFormat::Singleton);
} else {
parser.emitError(loc, "unknown level format: ") << base;
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 8a6df82abb31..8545c7b9af8f 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -2609,6 +2609,9 @@ OpFoldResult InsertSliceOp::fold(FoldAdaptor) {
return getResult();
if (auto result = foldInsertAfterExtractSlice(*this))
return result;
+ if (llvm::any_of(getMixedSizes(),
+ [](OpFoldResult ofr) { return isConstantIntValue(ofr, 0); }))
+ return getDest();
return OpFoldResult();
}
diff --git a/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp
index 7a707e749e69..43ad0acaf742 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/EmptyOpPatterns.cpp
@@ -93,6 +93,49 @@ private:
bool foldSingleUseOnly = false;
};
+/// tensor.empty does not define any tensor contents, so an unpadded pack
+/// can be folded away.
+struct FoldEmptyTensorWithPackOp : public OpRewritePattern<PackOp> {
+ using OpRewritePattern<PackOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(PackOp packOp,
+ PatternRewriter &rewriter) const override {
+ // Check for tensor.empty source.
+ auto emptyOp = packOp.getSource().getDefiningOp<EmptyOp>();
+ if (!emptyOp)
+ return failure();
+
+ // Check for padding.
+ // Packing with padding cannot be simply removed.
+ if (packOp.getPaddingValue())
+ return rewriter.notifyMatchFailure(packOp, "expects no padding value");
+
+ // Replace the pack directly with its destination.
+ rewriter.replaceOp(packOp, packOp.getDest());
+
+ return success();
+ }
+};
+
+/// tensor.empty does not define any tensor contents, so an unpack
+/// can be folded away.
+struct FoldEmptyTensorWithUnPackOp : public OpRewritePattern<UnPackOp> {
+ using OpRewritePattern<UnPackOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(UnPackOp unPackOp,
+ PatternRewriter &rewriter) const override {
+ // Check for tensor.empty source.
+ auto emptyOp = unPackOp.getSource().getDefiningOp<EmptyOp>();
+ if (!emptyOp)
+ return failure();
+
+ // Replace the unpack directly with its destination.
+ rewriter.replaceOp(unPackOp, unPackOp.getDest());
+
+ return success();
+ }
+};
+
} // namespace
void mlir::tensor::populateFoldTensorEmptyPatterns(RewritePatternSet &patterns,
@@ -101,4 +144,6 @@ void mlir::tensor::populateFoldTensorEmptyPatterns(RewritePatternSet &patterns,
FoldEmptyTensorWithReshapeOp<tensor::ExpandShapeOp>,
FoldEmptyTensorWithReshapeOp<tensor::CollapseShapeOp>>(
patterns.getContext(), /*benefit=*/1, foldSingleUseOnly);
+ patterns.add<FoldEmptyTensorWithPackOp, FoldEmptyTensorWithUnPackOp>(
+ patterns.getContext(), /*benefit=*/1);
}
diff --git a/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
index ebcb34e9ef02..5d6e3ec9756a 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
@@ -91,7 +91,8 @@ struct SimplifyPackToExpandShape : public OpRewritePattern<PackOp> {
RankedTensorType sourceType = packOp.getSourceType();
if (failed(isPackOnInnerMostDim(rewriter, packOp)) &&
failed(isPackOn1D(rewriter, packOp, sourceType.getShape(),
- packOp.getStaticTiles()))) {
+ packOp.getStaticTiles())) &&
+ !packOp.isLikePad()) {
return failure();
}
@@ -152,7 +153,8 @@ struct SimplifyUnPackToCollapseShape : public OpRewritePattern<UnPackOp> {
RankedTensorType destType = unpackOp.getDestType();
if (failed(isUnpackOnInnerMostDim(rewriter, unpackOp)) &&
failed(isPackOn1D(rewriter, unpackOp, destType.getShape(),
- unpackOp.getStaticTiles()))) {
+ unpackOp.getStaticTiles())) &&
+ !unpackOp.isLikeUnPad()) {
return failure();
}
diff --git a/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
index d40e5f33d2a7..6cf0f845f59d 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
@@ -79,12 +79,42 @@ struct FoldInsertOfRankReducingInsert : public OpRewritePattern<OpTy> {
return success();
}
};
+
+/// Fold expand_shape which only adds static dimensions of size `1`
+/// into insert_slice.
+template <typename OpTy>
+struct FoldPaddingExpandIntoInsert : public OpRewritePattern<OpTy> {
+ using OpRewritePattern<OpTy>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(OpTy insertSliceOp,
+ PatternRewriter &rewriter) const override {
+ auto expandShapeOp = insertSliceOp.getSource()
+ .template getDefiningOp<tensor::ExpandShapeOp>();
+ if (!expandShapeOp)
+ return failure();
+
+ // Only fold away simple expansion where all added dimensions have static
+ // size `1`.
+ SliceVerificationResult res = isRankReducedType(
+ expandShapeOp.getResultType(), expandShapeOp.getSrcType());
+ if (res != SliceVerificationResult::Success)
+ return rewriter.notifyMatchFailure(insertSliceOp,
+ "expected rank increasing expansion");
+
+ rewriter.modifyOpInPlace(insertSliceOp, [&]() {
+ insertSliceOp.getSourceMutable().assign(expandShapeOp.getSrc());
+ });
+ return success();
+ }
+};
} // namespace
void mlir::tensor::populateReassociativeReshapeFoldingPatterns(
RewritePatternSet &patterns) {
patterns.add<FoldExpandOfRankReducingExtract,
FoldInsertOfRankReducingInsert<tensor::InsertSliceOp>,
- FoldInsertOfRankReducingInsert<tensor::ParallelInsertSliceOp>>(
+ FoldInsertOfRankReducingInsert<tensor::ParallelInsertSliceOp>,
+ FoldPaddingExpandIntoInsert<tensor::InsertSliceOp>,
+ FoldPaddingExpandIntoInsert<tensor::ParallelInsertSliceOp>>(
patterns.getContext());
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp
index 5326760c9b4e..77c97b2f1497 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp
@@ -30,7 +30,7 @@ namespace {
/// Example:
///
/// ```mlir
-/// vector.interleave %a, %b : vector<1x2x3x4xi64>
+/// vector.interleave %a, %b : vector<1x2x3x4xi64> -> vector<1x2x3x8xi64>
/// ```
/// Would be unrolled to:
/// ```mlir
@@ -39,14 +39,15 @@ namespace {
/// : vector<4xi64> from vector<1x2x3x4xi64> |
/// %1 = vector.extract %b[0, 0, 0] |
/// : vector<4xi64> from vector<1x2x3x4xi64> | - Repeated 6x for
-/// %2 = vector.interleave %0, %1 : vector<4xi64> | all leading positions
+/// %2 = vector.interleave %0, %1 : | all leading positions
+/// : vector<4xi64> -> vector<8xi64> |
/// %3 = vector.insert %2, %result [0, 0, 0] |
/// : vector<8xi64> into vector<1x2x3x8xi64> ┘
/// ```
///
/// Note: If any leading dimension before the `targetRank` is scalable the
/// unrolling will stop before the scalable dimension.
-class UnrollInterleaveOp : public OpRewritePattern<vector::InterleaveOp> {
+class UnrollInterleaveOp final : public OpRewritePattern<vector::InterleaveOp> {
public:
UnrollInterleaveOp(int64_t targetRank, MLIRContext *context,
PatternBenefit benefit = 1)
@@ -84,7 +85,7 @@ private:
/// Example:
///
/// ```mlir
-/// vector.interleave %a, %b : vector<7xi16>
+/// vector.interleave %a, %b : vector<7xi16> -> vector<14xi16>
/// ```
///
/// Is rewritten into:
@@ -93,10 +94,8 @@ private:
/// vector.shuffle %arg0, %arg1 [0, 7, 1, 8, 2, 9, 3, 10, 4, 11, 5, 12, 6, 13]
/// : vector<7xi16>, vector<7xi16>
/// ```
-class InterleaveToShuffle : public OpRewritePattern<vector::InterleaveOp> {
-public:
- InterleaveToShuffle(MLIRContext *context, PatternBenefit benefit = 1)
- : OpRewritePattern(context, benefit) {};
+struct InterleaveToShuffle final : OpRewritePattern<vector::InterleaveOp> {
+ using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(vector::InterleaveOp op,
PatternRewriter &rewriter) const override {
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
index b30b43d70bf0..c59012266ceb 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
@@ -90,14 +90,19 @@ namespace {
/// Note that an alternative is to transform it to linalg.transpose +
/// vector.transfer_read to do the transpose in memory instead.
struct TransferReadPermutationLowering
- : public OpRewritePattern<vector::TransferReadOp> {
- using OpRewritePattern::OpRewritePattern;
+ : public MaskableOpRewritePattern<vector::TransferReadOp> {
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
- LogicalResult matchAndRewrite(vector::TransferReadOp op,
- PatternRewriter &rewriter) const override {
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferReadOp op,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
+ // TODO: Support transfer_read inside MaskOp case.
+ if (maskOp)
+ return rewriter.notifyMatchFailure(op, "Masked case not supported");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
@@ -142,9 +147,9 @@ struct TransferReadPermutationLowering
// Transpose result of transfer_read.
SmallVector<int64_t> transposePerm(permutation.begin(), permutation.end());
- rewriter.replaceOpWithNewOp<vector::TransposeOp>(op, newRead,
- transposePerm);
- return success();
+ return rewriter
+ .create<vector::TransposeOp>(op.getLoc(), newRead, transposePerm)
+ .getResult();
}
};
@@ -165,14 +170,19 @@ struct TransferReadPermutationLowering
/// %v = vector.transfer_write %tmp ...
/// permutation_map: (d0, d1, d2, d3) -> (d2, d3)
struct TransferWritePermutationLowering
- : public OpRewritePattern<vector::TransferWriteOp> {
- using OpRewritePattern::OpRewritePattern;
+ : public MaskableOpRewritePattern<vector::TransferWriteOp> {
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
- LogicalResult matchAndRewrite(vector::TransferWriteOp op,
- PatternRewriter &rewriter) const override {
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferWriteOp op,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
+ // TODO: Support transfer_write inside MaskOp case.
+ if (maskOp)
+ return rewriter.notifyMatchFailure(op, "Masked case not supported");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
@@ -207,11 +217,14 @@ struct TransferWritePermutationLowering
op.getLoc(), op.getVector(), indices);
auto newMap = AffineMap::getMinorIdentityMap(
map.getNumDims(), map.getNumResults(), rewriter.getContext());
- rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
- op, newVec, op.getSource(), op.getIndices(), AffineMapAttr::get(newMap),
- op.getMask(), newInBoundsAttr);
-
- return success();
+ auto newWrite = rewriter.create<vector::TransferWriteOp>(
+ op.getLoc(), newVec, op.getSource(), op.getIndices(),
+ AffineMapAttr::get(newMap), op.getMask(), newInBoundsAttr);
+ if (newWrite.hasPureTensorSemantics())
+ return newWrite.getResult();
+ // In the memref case there's no return value. Use empty value to signal
+ // success.
+ return Value();
}
};
@@ -231,14 +244,19 @@ struct TransferWritePermutationLowering
/// vector<1x8x16xf32>
/// ```
struct TransferWriteNonPermutationLowering
- : public OpRewritePattern<vector::TransferWriteOp> {
- using OpRewritePattern::OpRewritePattern;
+ : public MaskableOpRewritePattern<vector::TransferWriteOp> {
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
- LogicalResult matchAndRewrite(vector::TransferWriteOp op,
- PatternRewriter &rewriter) const override {
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferWriteOp op,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
+ // TODO: Support transfer_write inside MaskOp case.
+ if (maskOp)
+ return rewriter.notifyMatchFailure(op, "Masked case not supported");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
@@ -285,10 +303,14 @@ struct TransferWriteNonPermutationLowering
newInBoundsValues.push_back(op.isDimInBounds(i));
}
ArrayAttr newInBoundsAttr = rewriter.getBoolArrayAttr(newInBoundsValues);
- rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
- op, newVec, op.getSource(), op.getIndices(), AffineMapAttr::get(newMap),
- newMask, newInBoundsAttr);
- return success();
+ auto newWrite = rewriter.create<vector::TransferWriteOp>(
+ op.getLoc(), newVec, op.getSource(), op.getIndices(),
+ AffineMapAttr::get(newMap), newMask, newInBoundsAttr);
+ if (newWrite.hasPureTensorSemantics())
+ return newWrite.getResult();
+ // In the memref case there's no return value. Use empty value to signal
+ // success.
+ return Value();
}
};
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
index 6025c4ad7c14..59b6cb3ae667 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
@@ -1090,7 +1090,7 @@ struct RewriteExtOfBitCast : OpRewritePattern<ExtOpType> {
/// %1 = arith.shli %0, 4 : vector<4xi8>
/// %2 = arith.shrsi %1, 4 : vector<4xi8>
/// %3 = arith.shrsi %0, 4 : vector<4xi8>
-/// %4 = vector.interleave %2, %3 : vector<4xi8>
+/// %4 = vector.interleave %2, %3 : vector<4xi8> -> vector<8xi8>
/// %5 = arith.extsi %4 : vector<8xi8> to vector<8xi32>
///
/// arith.sitofp %in : vector<8xi4> to vector<8xf32>
@@ -1099,7 +1099,7 @@ struct RewriteExtOfBitCast : OpRewritePattern<ExtOpType> {
/// %1 = arith.shli %0, 4 : vector<4xi8>
/// %2 = arith.shrsi %1, 4 : vector<4xi8>
/// %3 = arith.shrsi %0, 4 : vector<4xi8>
-/// %4 = vector.interleave %2, %3 : vector<4xi8>
+/// %4 = vector.interleave %2, %3 : vector<4xi8> -> vector<8xi8>
/// %5 = arith.sitofp %4 : vector<8xi8> to vector<8xf32>
///
/// Example (unsigned):
@@ -1108,7 +1108,7 @@ struct RewriteExtOfBitCast : OpRewritePattern<ExtOpType> {
/// %0 = vector.bitcast %in : vector<8xi4> to vector<4xi8>
/// %1 = arith.andi %0, 15 : vector<4xi8>
/// %2 = arith.shrui %0, 4 : vector<4xi8>
-/// %3 = vector.interleave %1, %2 : vector<4xi8>
+/// %3 = vector.interleave %1, %2 : vector<4xi8> -> vector<8xi8>
/// %4 = arith.extui %3 : vector<8xi8> to vector<8xi32>
///
template <typename ConversionOpType, bool isSigned>
diff --git a/mlir/lib/IR/Operation.cpp b/mlir/lib/IR/Operation.cpp
index 0feb078db297..b51357198b1c 100644
--- a/mlir/lib/IR/Operation.cpp
+++ b/mlir/lib/IR/Operation.cpp
@@ -801,6 +801,8 @@ ParseResult OpState::genericParseProperties(OpAsmParser &parser,
/// 'elidedProps'
void OpState::genericPrintProperties(OpAsmPrinter &p, Attribute properties,
ArrayRef<StringRef> elidedProps) {
+ if (!properties)
+ return;
auto dictAttr = dyn_cast_or_null<::mlir::DictionaryAttr>(properties);
if (dictAttr && !elidedProps.empty()) {
ArrayRef<NamedAttribute> attrs = dictAttr.getValue();
diff --git a/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp b/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp
index 6af229cae10a..fe1a67d62873 100644
--- a/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp
+++ b/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp
@@ -178,18 +178,24 @@ ConstantIntRanges mlir::intrange::truncRange(const ConstantIntRanges &range,
//===----------------------------------------------------------------------===//
ConstantIntRanges
-mlir::intrange::inferAdd(ArrayRef<ConstantIntRanges> argRanges) {
+mlir::intrange::inferAdd(ArrayRef<ConstantIntRanges> argRanges,
+ OverflowFlags ovfFlags) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];
- ConstArithFn uadd = [](const APInt &a,
- const APInt &b) -> std::optional<APInt> {
+
+ std::function uadd = [=](const APInt &a,
+ const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
- APInt result = a.uadd_ov(b, overflowed);
+ APInt result = any(ovfFlags & OverflowFlags::Nuw)
+ ? a.uadd_sat(b)
+ : a.uadd_ov(b, overflowed);
return overflowed ? std::optional<APInt>() : result;
};
- ConstArithFn sadd = [](const APInt &a,
- const APInt &b) -> std::optional<APInt> {
+ std::function sadd = [=](const APInt &a,
+ const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
- APInt result = a.sadd_ov(b, overflowed);
+ APInt result = any(ovfFlags & OverflowFlags::Nsw)
+ ? a.sadd_sat(b)
+ : a.sadd_ov(b, overflowed);
return overflowed ? std::optional<APInt>() : result;
};
@@ -205,19 +211,24 @@ mlir::intrange::inferAdd(ArrayRef<ConstantIntRanges> argRanges) {
//===----------------------------------------------------------------------===//
ConstantIntRanges
-mlir::intrange::inferSub(ArrayRef<ConstantIntRanges> argRanges) {
+mlir::intrange::inferSub(ArrayRef<ConstantIntRanges> argRanges,
+ OverflowFlags ovfFlags) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];
- ConstArithFn usub = [](const APInt &a,
- const APInt &b) -> std::optional<APInt> {
+ std::function usub = [=](const APInt &a,
+ const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
- APInt result = a.usub_ov(b, overflowed);
+ APInt result = any(ovfFlags & OverflowFlags::Nuw)
+ ? a.usub_sat(b)
+ : a.usub_ov(b, overflowed);
return overflowed ? std::optional<APInt>() : result;
};
- ConstArithFn ssub = [](const APInt &a,
- const APInt &b) -> std::optional<APInt> {
+ std::function ssub = [=](const APInt &a,
+ const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
- APInt result = a.ssub_ov(b, overflowed);
+ APInt result = any(ovfFlags & OverflowFlags::Nsw)
+ ? a.ssub_sat(b)
+ : a.ssub_ov(b, overflowed);
return overflowed ? std::optional<APInt>() : result;
};
ConstantIntRanges urange = computeBoundsBy(
@@ -232,19 +243,24 @@ mlir::intrange::inferSub(ArrayRef<ConstantIntRanges> argRanges) {
//===----------------------------------------------------------------------===//
ConstantIntRanges
-mlir::intrange::inferMul(ArrayRef<ConstantIntRanges> argRanges) {
+mlir::intrange::inferMul(ArrayRef<ConstantIntRanges> argRanges,
+ OverflowFlags ovfFlags) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];
- ConstArithFn umul = [](const APInt &a,
- const APInt &b) -> std::optional<APInt> {
+ std::function umul = [=](const APInt &a,
+ const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
- APInt result = a.umul_ov(b, overflowed);
+ APInt result = any(ovfFlags & OverflowFlags::Nuw)
+ ? a.umul_sat(b)
+ : a.umul_ov(b, overflowed);
return overflowed ? std::optional<APInt>() : result;
};
- ConstArithFn smul = [](const APInt &a,
- const APInt &b) -> std::optional<APInt> {
+ std::function smul = [=](const APInt &a,
+ const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
- APInt result = a.smul_ov(b, overflowed);
+ APInt result = any(ovfFlags & OverflowFlags::Nsw)
+ ? a.smul_sat(b)
+ : a.smul_ov(b, overflowed);
return overflowed ? std::optional<APInt>() : result;
};
@@ -542,32 +558,35 @@ mlir::intrange::inferXor(ArrayRef<ConstantIntRanges> argRanges) {
//===----------------------------------------------------------------------===//
ConstantIntRanges
-mlir::intrange::inferShl(ArrayRef<ConstantIntRanges> argRanges) {
+mlir::intrange::inferShl(ArrayRef<ConstantIntRanges> argRanges,
+ OverflowFlags ovfFlags) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];
- const APInt &lhsSMin = lhs.smin(), &lhsSMax = lhs.smax(),
- &lhsUMax = lhs.umax(), &rhsUMin = rhs.umin(),
- &rhsUMax = rhs.umax();
+ const APInt &rhsUMin = rhs.umin(), &rhsUMax = rhs.umax();
- ConstArithFn shl = [](const APInt &l,
- const APInt &r) -> std::optional<APInt> {
- return r.uge(r.getBitWidth()) ? std::optional<APInt>() : l.shl(r);
+ // The signed/unsigned overflow behavior of shl by `rhs` matches a mul with
+ // 2^rhs.
+ std::function ushl = [=](const APInt &l,
+ const APInt &r) -> std::optional<APInt> {
+ bool overflowed = false;
+ APInt result = any(ovfFlags & OverflowFlags::Nuw)
+ ? l.ushl_sat(r)
+ : l.ushl_ov(r, overflowed);
+ return overflowed ? std::optional<APInt>() : result;
+ };
+ std::function sshl = [=](const APInt &l,
+ const APInt &r) -> std::optional<APInt> {
+ bool overflowed = false;
+ APInt result = any(ovfFlags & OverflowFlags::Nsw)
+ ? l.sshl_sat(r)
+ : l.sshl_ov(r, overflowed);
+ return overflowed ? std::optional<APInt>() : result;
};
-
- // The minMax inference does not work when there is danger of overflow. In the
- // signed case, this leads to the obvious problem that the sign bit might
- // change. In the unsigned case, it also leads to problems because the largest
- // LHS shifted by the largest RHS does not necessarily result in the largest
- // result anymore.
- assert(rhsUMax.isNonNegative() && "Unexpected negative shift count");
- if (rhsUMax.uge(lhsSMin.getNumSignBits()) ||
- rhsUMax.uge(lhsSMax.getNumSignBits()))
- return ConstantIntRanges::maxRange(lhsUMax.getBitWidth());
ConstantIntRanges urange =
- minMaxBy(shl, {lhs.umin(), lhsUMax}, {rhsUMin, rhsUMax},
+ minMaxBy(ushl, {lhs.umin(), lhs.umax()}, {rhsUMin, rhsUMax},
/*isSigned=*/false);
ConstantIntRanges srange =
- minMaxBy(shl, {lhsSMin, lhsSMax}, {rhsUMin, rhsUMax},
+ minMaxBy(sshl, {lhs.smin(), lhs.smax()}, {rhsUMin, rhsUMax},
/*isSigned=*/true);
return urange.intersection(srange);
}
diff --git a/mlir/lib/Pass/IRPrinting.cpp b/mlir/lib/Pass/IRPrinting.cpp
index 72b94eeb0123..a12bdd935a48 100644
--- a/mlir/lib/Pass/IRPrinting.cpp
+++ b/mlir/lib/Pass/IRPrinting.cpp
@@ -9,8 +9,12 @@
#include "PassDetail.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/Pass/PassManager.h"
-#include "llvm/Support/Format.h"
+#include "mlir/Support/FileUtilities.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/ToolOutputFile.h"
using namespace mlir;
using namespace mlir::detail;
@@ -200,6 +204,149 @@ struct BasicIRPrinterConfig : public PassManager::IRPrinterConfig {
};
} // namespace
+/// Return pairs of (sanitized op name, symbol name) for `op` and all parent
+/// operations. Op names are sanitized by replacing periods with underscores.
+/// The pairs are returned in order of outer-most to inner-most (ancestors of
+/// `op` first, `op` last). This information is used to construct the directory
+/// tree for the `FileTreeIRPrinterConfig` below.
+/// The counter for `op` will be incremented by this call.
+static std::pair<SmallVector<std::pair<std::string, StringRef>>, std::string>
+getOpAndSymbolNames(Operation *op, StringRef passName,
+ llvm::DenseMap<Operation *, unsigned> &counters) {
+ SmallVector<std::pair<std::string, StringRef>> pathElements;
+ SmallVector<unsigned> countPrefix;
+
+ if (!counters.contains(op))
+ counters[op] = -1;
+
+ Operation *iter = op;
+ ++counters[op];
+ while (iter) {
+ countPrefix.push_back(counters[iter]);
+ StringAttr symbolName =
+ iter->getAttrOfType<StringAttr>(SymbolTable::getSymbolAttrName());
+ std::string opName =
+ llvm::join(llvm::split(iter->getName().getStringRef().str(), '.'), "_");
+ pathElements.emplace_back(opName, symbolName ? symbolName.strref()
+ : "no-symbol-name");
+ iter = iter->getParentOp();
+ }
+ // Return in the order of top level (module) down to `op`.
+ std::reverse(countPrefix.begin(), countPrefix.end());
+ std::reverse(pathElements.begin(), pathElements.end());
+
+ std::string passFileName = llvm::formatv(
+ "{0:$[_]}_{1}.mlir",
+ llvm::make_range(countPrefix.begin(), countPrefix.end()), passName);
+
+ return {pathElements, passFileName};
+}
+
+static LogicalResult createDirectoryOrPrintErr(llvm::StringRef dirPath) {
+ if (std::error_code ec =
+ llvm::sys::fs::create_directory(dirPath, /*IgnoreExisting=*/true)) {
+ llvm::errs() << "Error while creating directory " << dirPath << ": "
+ << ec.message() << "\n";
+ return failure();
+ }
+ return success();
+}
+
+/// Creates directories (if required) and opens an output file for the
+/// FileTreeIRPrinterConfig.
+static std::unique_ptr<llvm::ToolOutputFile>
+createTreePrinterOutputPath(Operation *op, llvm::StringRef passArgument,
+ llvm::StringRef rootDir,
+ llvm::DenseMap<Operation *, unsigned> &counters) {
+ // Create the path. We will create a tree rooted at the given 'rootDir'
+ // directory. The root directory will contain folders with the names of
+ // modules. Sub-directories within those folders mirror the nesting
+ // structure of the pass manager, using symbol names for directory names.
+ auto [opAndSymbolNames, fileName] =
+ getOpAndSymbolNames(op, passArgument, counters);
+
+ // Create all the directories, starting at the root. Abort early if we fail to
+ // create any directory.
+ llvm::SmallString<128> path(rootDir);
+ if (failed(createDirectoryOrPrintErr(path)))
+ return nullptr;
+
+ for (auto [opName, symbolName] : opAndSymbolNames) {
+ llvm::sys::path::append(path, opName + "_" + symbolName);
+ if (failed(createDirectoryOrPrintErr(path)))
+ return nullptr;
+ }
+
+ // Open output file.
+ llvm::sys::path::append(path, fileName);
+ std::string error;
+ std::unique_ptr<llvm::ToolOutputFile> file = openOutputFile(path, &error);
+ if (!file) {
+ llvm::errs() << "Error opening output file " << path << ": " << error
+ << "\n";
+ return nullptr;
+ }
+ return file;
+}
+
+namespace {
+/// A configuration that prints the IR before/after each pass to a set of files
+/// in the specified directory. The files are organized into subdirectories that
+/// mirror the nesting structure of the IR.
+struct FileTreeIRPrinterConfig : public PassManager::IRPrinterConfig {
+ FileTreeIRPrinterConfig(
+ std::function<bool(Pass *, Operation *)> shouldPrintBeforePass,
+ std::function<bool(Pass *, Operation *)> shouldPrintAfterPass,
+ bool printModuleScope, bool printAfterOnlyOnChange,
+ bool printAfterOnlyOnFailure, OpPrintingFlags opPrintingFlags,
+ llvm::StringRef treeDir)
+ : IRPrinterConfig(printModuleScope, printAfterOnlyOnChange,
+ printAfterOnlyOnFailure, opPrintingFlags),
+ shouldPrintBeforePass(std::move(shouldPrintBeforePass)),
+ shouldPrintAfterPass(std::move(shouldPrintAfterPass)),
+ treeDir(treeDir) {
+ assert((this->shouldPrintBeforePass || this->shouldPrintAfterPass) &&
+ "expected at least one valid filter function");
+ }
+
+ void printBeforeIfEnabled(Pass *pass, Operation *operation,
+ PrintCallbackFn printCallback) final {
+ if (!shouldPrintBeforePass || !shouldPrintBeforePass(pass, operation))
+ return;
+ std::unique_ptr<llvm::ToolOutputFile> file = createTreePrinterOutputPath(
+ operation, pass->getArgument(), treeDir, counters);
+ if (!file)
+ return;
+ printCallback(file->os());
+ file->keep();
+ }
+
+ void printAfterIfEnabled(Pass *pass, Operation *operation,
+ PrintCallbackFn printCallback) final {
+ if (!shouldPrintAfterPass || !shouldPrintAfterPass(pass, operation))
+ return;
+ std::unique_ptr<llvm::ToolOutputFile> file = createTreePrinterOutputPath(
+ operation, pass->getArgument(), treeDir, counters);
+ if (!file)
+ return;
+ printCallback(file->os());
+ file->keep();
+ }
+
+ /// Filter functions for before and after pass execution.
+ std::function<bool(Pass *, Operation *)> shouldPrintBeforePass;
+ std::function<bool(Pass *, Operation *)> shouldPrintAfterPass;
+
+ /// Directory that should be used as the root of the file tree.
+ std::string treeDir;
+
+ /// Counters used for labeling the prefix. Every op which could be targeted by
+ /// a pass gets its own counter.
+ llvm::DenseMap<Operation *, unsigned> counters;
+};
+
+} // namespace
+
/// Add an instrumentation to print the IR before and after pass execution,
/// using the provided configuration.
void PassManager::enableIRPrinting(std::unique_ptr<IRPrinterConfig> config) {
@@ -223,3 +370,16 @@ void PassManager::enableIRPrinting(
printModuleScope, printAfterOnlyOnChange, printAfterOnlyOnFailure,
opPrintingFlags, out));
}
+
+/// Add an instrumentation to print the IR before and after pass execution.
+void PassManager::enableIRPrintingToFileTree(
+ std::function<bool(Pass *, Operation *)> shouldPrintBeforePass,
+ std::function<bool(Pass *, Operation *)> shouldPrintAfterPass,
+ bool printModuleScope, bool printAfterOnlyOnChange,
+ bool printAfterOnlyOnFailure, StringRef printTreeDir,
+ OpPrintingFlags opPrintingFlags) {
+ enableIRPrinting(std::make_unique<FileTreeIRPrinterConfig>(
+ std::move(shouldPrintBeforePass), std::move(shouldPrintAfterPass),
+ printModuleScope, printAfterOnlyOnChange, printAfterOnlyOnFailure,
+ opPrintingFlags, printTreeDir));
+}
diff --git a/mlir/lib/Pass/PassManagerOptions.cpp b/mlir/lib/Pass/PassManagerOptions.cpp
index ffc53b7e3ed0..706a21a23ee3 100644
--- a/mlir/lib/Pass/PassManagerOptions.cpp
+++ b/mlir/lib/Pass/PassManagerOptions.cpp
@@ -58,6 +58,10 @@ struct PassManagerOptions {
llvm::cl::desc("When printing IR for print-ir-[before|after]{-all} "
"always print the top-level operation"),
llvm::cl::init(false)};
+ llvm::cl::opt<std::string> printTreeDir{
+ "mlir-print-ir-tree-dir",
+ llvm::cl::desc("When printing the IR before/after a pass, print file "
+ "tree rooted at this directory")};
/// Add an IR printing instrumentation if enabled by any 'print-ir' flags.
void addPrinterInstrumentation(PassManager &pm);
@@ -120,6 +124,13 @@ void PassManagerOptions::addPrinterInstrumentation(PassManager &pm) {
return;
// Otherwise, add the IR printing instrumentation.
+ if (!printTreeDir.empty()) {
+ pm.enableIRPrintingToFileTree(shouldPrintBeforePass, shouldPrintAfterPass,
+ printModuleScope, printAfterChange,
+ printAfterFailure, printTreeDir);
+ return;
+ }
+
pm.enableIRPrinting(shouldPrintBeforePass, shouldPrintAfterPass,
printModuleScope, printAfterChange, printAfterFailure,
llvm::errs());
diff --git a/mlir/lib/TableGen/CMakeLists.txt b/mlir/lib/TableGen/CMakeLists.txt
index 61e14feb6dc1..c4104e644147 100644
--- a/mlir/lib/TableGen/CMakeLists.txt
+++ b/mlir/lib/TableGen/CMakeLists.txt
@@ -40,6 +40,7 @@ llvm_add_library(MLIRTableGen STATIC
${MLIR_MAIN_INCLUDE_DIR}/mlir/TableGen
${MLIR_MAIN_INCLUDE_DIR}/mlir/Support
)
+set_target_properties(MLIRTableGen PROPERTIES FOLDER "MLIR/Tablegenning")
mlir_check_all_link_libraries(MLIRTableGen)
diff --git a/mlir/lib/Target/LLVM/CMakeLists.txt b/mlir/lib/Target/LLVM/CMakeLists.txt
index e0657c895e8a..5a3fa160850b 100644
--- a/mlir/lib/Target/LLVM/CMakeLists.txt
+++ b/mlir/lib/Target/LLVM/CMakeLists.txt
@@ -47,7 +47,7 @@ add_mlir_dialect_library(MLIRNVVMTarget
MLIRNVVMToLLVMIRTranslation
)
-if(MLIR_ENABLE_CUDA_CONVERSIONS)
+if ("NVPTX" IN_LIST LLVM_TARGETS_TO_BUILD)
# Find the CUDA toolkit.
find_package(CUDAToolkit)
diff --git a/mlir/lib/Target/LLVM/NVVM/Target.cpp b/mlir/lib/Target/LLVM/NVVM/Target.cpp
index e438ce84af1b..e75547ff9b85 100644
--- a/mlir/lib/Target/LLVM/NVVM/Target.cpp
+++ b/mlir/lib/Target/LLVM/NVVM/Target.cpp
@@ -13,7 +13,6 @@
#include "mlir/Target/LLVM/NVVM/Target.h"
-#include "mlir/Config/mlir-config.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/Target/LLVM/NVVM/Utils.h"
@@ -158,40 +157,43 @@ SerializeGPUModuleBase::loadBitcodeFiles(llvm::Module &module) {
return std::move(bcFiles);
}
-#if MLIR_ENABLE_CUDA_CONVERSIONS
+#if LLVM_HAS_NVPTX_TARGET
namespace {
class NVPTXSerializer : public SerializeGPUModuleBase {
public:
NVPTXSerializer(Operation &module, NVVMTargetAttr target,
const gpu::TargetOptions &targetOptions);
+ /// Returns the GPU module op being serialized.
gpu::GPUModuleOp getOperation();
- // Compile PTX to cubin using `ptxas`.
+ /// Compiles PTX to cubin using `ptxas`.
std::optional<SmallVector<char, 0>>
compileToBinary(const std::string &ptxCode);
- // Compile PTX to cubin using the `nvptxcompiler` library.
+ /// Compiles PTX to cubin using the `nvptxcompiler` library.
std::optional<SmallVector<char, 0>>
compileToBinaryNVPTX(const std::string &ptxCode);
+ /// Serializes the LLVM module to an object format, depending on the
+ /// compilation target selected in target options.
std::optional<SmallVector<char, 0>>
moduleToObject(llvm::Module &llvmModule) override;
private:
using TmpFile = std::pair<llvm::SmallString<128>, llvm::FileRemover>;
- // Create a temp file.
+ /// Creates a temp file.
std::optional<TmpFile> createTemp(StringRef name, StringRef suffix);
- // Find the `tool` path, where `tool` is the name of the binary to search,
- // i.e. `ptxas` or `fatbinary`. The search order is:
- // 1. The toolkit path in `targetOptions`.
- // 2. In the system PATH.
- // 3. The path from `getCUDAToolkitPath()`.
+ /// Finds the `tool` path, where `tool` is the name of the binary to search,
+ /// i.e. `ptxas` or `fatbinary`. The search order is:
+ /// 1. The toolkit path in `targetOptions`.
+ /// 2. In the system PATH.
+ /// 3. The path from `getCUDAToolkitPath()`.
std::optional<std::string> findTool(StringRef tool);
- // Target options.
+ /// Target options.
gpu::TargetOptions targetOptions;
};
} // namespace
@@ -515,7 +517,7 @@ NVPTXSerializer::compileToBinaryNVPTX(const std::string &ptxCode) {
std::optional<SmallVector<char, 0>>
NVPTXSerializer::moduleToObject(llvm::Module &llvmModule) {
- // Return LLVM IR if the compilation target is offload.
+ // Return LLVM IR if the compilation target is `offload`.
#define DEBUG_TYPE "serialize-to-llvm"
LLVM_DEBUG({
llvm::dbgs() << "LLVM IR for module: " << getOperation().getNameAttr()
@@ -549,7 +551,7 @@ NVPTXSerializer::moduleToObject(llvm::Module &llvmModule) {
});
#undef DEBUG_TYPE
- // Return PTX if the compilation target is assembly.
+ // Return PTX if the compilation target is `assembly`.
if (targetOptions.getCompilationTarget() ==
gpu::CompilationTarget::Assembly) {
// Make sure to include the null terminator.
@@ -564,7 +566,7 @@ NVPTXSerializer::moduleToObject(llvm::Module &llvmModule) {
return compileToBinary(*serializedISA);
#endif // MLIR_ENABLE_NVPTXCOMPILER
}
-#endif // MLIR_ENABLE_CUDA_CONVERSIONS
+#endif // LLVM_HAS_NVPTX_TARGET
std::optional<SmallVector<char, 0>>
NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
@@ -576,7 +578,7 @@ NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
module->emitError("Module must be a GPU module.");
return std::nullopt;
}
-#if MLIR_ENABLE_CUDA_CONVERSIONS
+#if LLVM_HAS_NVPTX_TARGET
NVPTXSerializer serializer(*module, cast<NVVMTargetAttr>(attribute), options);
serializer.init();
return serializer.run();
@@ -584,7 +586,7 @@ NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
module->emitError(
"The `NVPTX` target was not built. Please enable it when building LLVM.");
return std::nullopt;
-#endif // MLIR_ENABLE_CUDA_CONVERSIONS
+#endif // LLVM_HAS_NVPTX_TARGET
}
Attribute
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp
index eeda245ce969..d9cf85e4aeca 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "mlir/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/OpenACC/OpenACC.h"
#include "mlir/IR/BuiltinOps.h"
@@ -19,7 +20,6 @@
#include "mlir/Support/LLVM.h"
#include "mlir/Target/LLVMIR/Dialect/OpenMPCommon.h"
#include "mlir/Target/LLVMIR/ModuleTranslation.h"
-#include "mlir/Transforms/RegionUtils.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 34b6903f8da0..6ec4c120c11e 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
#include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
#include "mlir/Dialect/OpenMP/OpenMPInterfaces.h"
@@ -333,54 +334,6 @@ convertOmpCritical(Operation &opInst, llvm::IRBuilderBase &builder,
return success();
}
-/// Returns a reduction declaration that corresponds to the given reduction
-/// operation in the given container. Currently only supports reductions inside
-/// WsloopOp and ParallelOp but can be easily extended as long as the given
-/// construct implements getNumReductionVars.
-template <typename T>
-static std::optional<omp::DeclareReductionOp>
-findReductionDeclInContainer(T container, omp::ReductionOp reduction) {
- for (unsigned i = 0, e = container.getNumReductionVars(); i < e; ++i) {
- if (container.getReductionVars()[i] != reduction.getAccumulator())
- continue;
-
- SymbolRefAttr reductionSymbol =
- cast<SymbolRefAttr>((*container.getReductions())[i]);
- auto declareOp =
- SymbolTable::lookupNearestSymbolFrom<omp::DeclareReductionOp>(
- container, reductionSymbol);
- return declareOp;
- }
- return std::nullopt;
-}
-
-/// Searches for a reduction in a provided region and the regions
-/// it is nested in
-static omp::DeclareReductionOp findReductionDecl(Operation &containerOp,
- omp::ReductionOp reduction) {
- std::optional<omp::DeclareReductionOp> declareOp = std::nullopt;
- Operation *container = &containerOp;
-
- while (!declareOp.has_value() && container) {
- // Check if current container is supported for reductions searches
- if (auto par = dyn_cast<omp::ParallelOp>(*container)) {
- declareOp = findReductionDeclInContainer(par, reduction);
- } else if (auto loop = dyn_cast<omp::WsloopOp>(*container)) {
- declareOp = findReductionDeclInContainer(loop, reduction);
- } else {
- break;
- }
-
- // See if we can search parent for reductions as well
- container = containerOp.getParentOp();
- }
-
- assert(declareOp.has_value() &&
- "reduction operation must be associated with a declaration");
-
- return *declareOp;
-}
-
/// Populates `reductions` with reduction declarations used in the given loop.
template <typename T>
static void
@@ -1785,62 +1738,6 @@ convertOmpAtomicCapture(omp::AtomicCaptureOp atomicCaptureOp,
return updateGenStatus;
}
-/// Converts an OpenMP reduction operation using OpenMPIRBuilder. Expects the
-/// mapping between reduction variables and their private equivalents to have
-/// been stored on the ModuleTranslation stack. Currently only supports
-/// reduction within WsloopOp and ParallelOp, but can be easily extended.
-static LogicalResult
-convertOmpReductionOp(omp::ReductionOp reductionOp,
- llvm::IRBuilderBase &builder,
- LLVM::ModuleTranslation &moduleTranslation) {
- // Find the declaration that corresponds to the reduction op.
- omp::DeclareReductionOp declaration;
- Operation *reductionParent = reductionOp->getParentOp();
- if (dyn_cast<omp::ParallelOp>(reductionParent) ||
- dyn_cast<omp::WsloopOp>(reductionParent)) {
- declaration = findReductionDecl(*reductionParent, reductionOp);
- } else {
- llvm_unreachable("Unhandled reduction container");
- }
- assert(declaration && "could not find reduction declaration");
-
- // Retrieve the mapping between reduction variables and their private
- // equivalents.
- const DenseMap<Value, llvm::Value *> *reductionVariableMap = nullptr;
- moduleTranslation.stackWalk<OpenMPVarMappingStackFrame>(
- [&](const OpenMPVarMappingStackFrame &frame) {
- if (frame.mapping.contains(reductionOp.getAccumulator())) {
- reductionVariableMap = &frame.mapping;
- return WalkResult::interrupt();
- }
- return WalkResult::advance();
- });
- assert(reductionVariableMap && "couldn't find private reduction variables");
- // Translate the reduction operation by emitting the body of the corresponding
- // reduction declaration.
- Region &reductionRegion = declaration.getReductionRegion();
- llvm::Value *privateReductionVar =
- reductionVariableMap->lookup(reductionOp.getAccumulator());
- llvm::Value *reductionVal = builder.CreateLoad(
- moduleTranslation.convertType(reductionOp.getOperand().getType()),
- privateReductionVar);
-
- moduleTranslation.mapValue(reductionRegion.front().getArgument(0),
- reductionVal);
- moduleTranslation.mapValue(
- reductionRegion.front().getArgument(1),
- moduleTranslation.lookupValue(reductionOp.getOperand()));
-
- SmallVector<llvm::Value *> phis;
- if (failed(inlineConvertOmpRegions(reductionRegion, "omp.reduction.body",
- builder, moduleTranslation, &phis)))
- return failure();
- assert(phis.size() == 1 && "expected one value to be yielded from "
- "the reduction body declaration region");
- builder.CreateStore(phis[0], privateReductionVar);
- return success();
-}
-
/// Converts an OpenMP Threadprivate operation into LLVM IR using
/// OpenMPIRBuilder.
static LogicalResult
@@ -3349,9 +3246,6 @@ convertHostOrTargetOperation(Operation *op, llvm::IRBuilderBase &builder,
.Case([&](omp::ParallelOp op) {
return convertOmpParallel(op, builder, moduleTranslation);
})
- .Case([&](omp::ReductionOp reductionOp) {
- return convertOmpReductionOp(reductionOp, builder, moduleTranslation);
- })
.Case([&](omp::MasterOp) {
return convertOmpMaster(*op, builder, moduleTranslation);
})
diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index cf3257c8b9b8..1ec0736ec08b 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -16,6 +16,7 @@
#include "AttrKindDetail.h"
#include "DebugTranslation.h"
#include "LoopAnnotationTranslation.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/Dialect/DLTI/DLTI.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMInterfaces.h"
@@ -33,7 +34,6 @@
#include "mlir/Support/LogicalResult.h"
#include "mlir/Target/LLVMIR/LLVMTranslationInterface.h"
#include "mlir/Target/LLVMIR/TypeToLLVM.h"
-#include "mlir/Transforms/RegionUtils.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SetVector.h"
diff --git a/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp b/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp
index ed75b4a90536..4e19274c3da4 100644
--- a/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp
+++ b/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp
@@ -917,7 +917,7 @@ void MLIRDocument::getCodeActionForDiagnostic(
edit.range = lsp::Range(lsp::Position(pos.line, 0));
// Use the indent of the current line for the expected-* diagnostic.
- size_t indent = line.find_first_not_of(" ");
+ size_t indent = line.find_first_not_of(' ');
if (indent == StringRef::npos)
indent = line.size();
diff --git a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
index 44c5e9826f3b..a1b2893a973b 100644
--- a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
+++ b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
@@ -266,6 +266,8 @@ LogicalResult loadIRDLDialects(StringRef irdlFile, MLIRContext &ctx) {
// Parse the input file.
OwningOpRef<ModuleOp> module(parseSourceFile<ModuleOp>(sourceMgr, &ctx));
+ if (!module)
+ return failure();
// Load IRDL dialects.
return irdl::loadDialects(module.get());
diff --git a/mlir/lib/Transforms/Mem2Reg.cpp b/mlir/lib/Transforms/Mem2Reg.cpp
index e2e240ad865c..a452cc3fae8a 100644
--- a/mlir/lib/Transforms/Mem2Reg.cpp
+++ b/mlir/lib/Transforms/Mem2Reg.cpp
@@ -9,6 +9,7 @@
#include "mlir/Transforms/Mem2Reg.h"
#include "mlir/Analysis/DataLayoutAnalysis.h"
#include "mlir/Analysis/SliceAnalysis.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Dominance.h"
#include "mlir/IR/PatternMatch.h"
@@ -16,7 +17,6 @@
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/MemorySlotInterfaces.h"
#include "mlir/Transforms/Passes.h"
-#include "mlir/Transforms/RegionUtils.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/GenericIteratedDominanceFrontier.h"
diff --git a/mlir/lib/Transforms/SROA.cpp b/mlir/lib/Transforms/SROA.cpp
index 67cbade07bc9..39f7256fb789 100644
--- a/mlir/lib/Transforms/SROA.cpp
+++ b/mlir/lib/Transforms/SROA.cpp
@@ -9,6 +9,7 @@
#include "mlir/Transforms/SROA.h"
#include "mlir/Analysis/DataLayoutAnalysis.h"
#include "mlir/Analysis/SliceAnalysis.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/Interfaces/MemorySlotInterfaces.h"
#include "mlir/Transforms/Passes.h"
diff --git a/mlir/lib/Transforms/TopologicalSort.cpp b/mlir/lib/Transforms/TopologicalSort.cpp
index 1219968fb369..528f6ef67602 100644
--- a/mlir/lib/Transforms/TopologicalSort.cpp
+++ b/mlir/lib/Transforms/TopologicalSort.cpp
@@ -8,8 +8,8 @@
#include "mlir/Transforms/Passes.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/IR/RegionKindInterface.h"
-#include "mlir/Transforms/TopologicalSortUtils.h"
namespace mlir {
#define GEN_PASS_DEF_TOPOLOGICALSORT
diff --git a/mlir/lib/Transforms/Utils/CMakeLists.txt b/mlir/lib/Transforms/Utils/CMakeLists.txt
index d6aac0e2da4f..b5788c679edc 100644
--- a/mlir/lib/Transforms/Utils/CMakeLists.txt
+++ b/mlir/lib/Transforms/Utils/CMakeLists.txt
@@ -10,7 +10,6 @@ add_mlir_library(MLIRTransformUtils
LoopInvariantCodeMotionUtils.cpp
OneToNTypeConversion.cpp
RegionUtils.cpp
- TopologicalSortUtils.cpp
ADDITIONAL_HEADER_DIRS
${MLIR_MAIN_INCLUDE_DIR}/mlir/Transforms
diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp
index 192f59b35329..b5e641d39fc0 100644
--- a/mlir/lib/Transforms/Utils/RegionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "mlir/Transforms/RegionUtils.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Operation.h"
@@ -15,11 +16,9 @@
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
-#include "mlir/Transforms/TopologicalSortUtils.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/SmallSet.h"
#include <deque>
@@ -836,19 +835,3 @@ LogicalResult mlir::simplifyRegions(RewriterBase &rewriter,
return success(eliminatedBlocks || eliminatedOpsOrArgs ||
mergedIdenticalBlocks);
}
-
-SetVector<Block *> mlir::getBlocksSortedByDominance(Region &region) {
- // For each block that has not been visited yet (i.e. that has no
- // predecessors), add it to the list as well as its successors.
- SetVector<Block *> blocks;
- for (Block &b : region) {
- if (blocks.count(&b) == 0) {
- llvm::ReversePostOrderTraversal<Block *> traversal(&b);
- blocks.insert(traversal.begin(), traversal.end());
- }
- }
- assert(blocks.size() == region.getBlocks().size() &&
- "some blocks are not sorted");
-
- return blocks;
-}
diff --git a/mlir/lib/Transforms/ViewOpGraph.cpp b/mlir/lib/Transforms/ViewOpGraph.cpp
index c2eb2b893cea..b3c0a06c96fe 100644
--- a/mlir/lib/Transforms/ViewOpGraph.cpp
+++ b/mlir/lib/Transforms/ViewOpGraph.cpp
@@ -8,12 +8,12 @@
#include "mlir/Transforms/ViewOpGraph.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Operation.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/IndentedOstream.h"
-#include "mlir/Transforms/TopologicalSortUtils.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/GraphWriter.h"
#include <map>
diff --git a/mlir/python/mlir/dialects/linalg/__init__.py b/mlir/python/mlir/dialects/linalg/__init__.py
index 6e4cb1bd6267..8fb1227ee80f 100644
--- a/mlir/python/mlir/dialects/linalg/__init__.py
+++ b/mlir/python/mlir/dialects/linalg/__init__.py
@@ -55,7 +55,6 @@ from .._linalg_enum_gen import *
# TODO: guard against surprises and fail create Runtime Custom Ops with
# the same name as existing Core Named Ops.
from .opdsl.ops.core_named_ops import *
-from .opdsl.lang.emitter import isa
from ...ir import *
from .._ods_common import get_op_result_or_value as _get_op_result_or_value
@@ -71,7 +70,7 @@ def transpose(
if len(outs) > 1:
raise ValueError(f"{outs=} must have length 1.")
init = _get_op_result_or_value(outs[0])
- result_types = [init.type] if isa(RankedTensorType, init.type) else []
+ result_types = [init.type] if isinstance(init.type, RankedTensorType) else []
op = TransposeOp(
result=result_types,
@@ -93,7 +92,7 @@ def broadcast(
if len(outs) > 1:
raise ValueError(f"{outs=} must have length 1.")
init = _get_op_result_or_value(outs[0])
- result_types = [init.type] if isa(RankedTensorType, init.type) else []
+ result_types = [init.type] if isinstance(init.type, RankedTensorType) else []
op = BroadcastOp(
result=result_types,
diff --git a/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py b/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py
index 845b533db52a..254458a97882 100644
--- a/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py
+++ b/mlir/python/mlir/dialects/linalg/opdsl/lang/emitter.py
@@ -31,14 +31,6 @@ __all__ = [
ValueList = Union[Sequence[Value], OpResultList]
-def isa(cls: Type, ty: Type):
- try:
- cls(ty)
- return True
- except ValueError:
- return False
-
-
def prepare_common_structured_op(
op_config: LinalgStructuredOpConfig,
*ins: Value,
@@ -127,7 +119,7 @@ def prepare_common_structured_op(
op_config, in_arg_defs, ins, out_arg_defs, outs
)
- result_types = [t for t in out_types if isa(RankedTensorType, t)]
+ result_types = [t for t in out_types if isinstance(t, RankedTensorType)]
# Initialize the type dictionary with the predefined types.
type_mapping = dict() # type: Dict[str, Type]
diff --git a/mlir/test/Analysis/DataFlow/test-next-access.mlir b/mlir/test/Analysis/DataFlow/test-next-access.mlir
index 8825c699dd13..700a23aa8bc4 100644
--- a/mlir/test/Analysis/DataFlow/test-next-access.mlir
+++ b/mlir/test/Analysis/DataFlow/test-next-access.mlir
@@ -63,7 +63,7 @@ func.func @branch(%arg0: memref<f32>, %arg1: f32, %arg2: i1) -> f32 {
return %phi : f32
}
-// CHECK-LABEL @dead_branch
+// CHECK-LABEL: @dead_branch
func.func @dead_branch(%arg0: memref<f32>, %arg1: f32) -> f32 {
// CHECK: name = "store"
// CHECK-SAME: next_access = ["unknown", ["load 2"]]
@@ -191,7 +191,7 @@ func.func @loop_cf(%arg0: memref<?xf32>, %arg1: f32, %arg2: index, %arg3: index,
return %8 : f32
}
-// CHECK-LABEL @conditional_cf
+// CHECK-LABEL: @conditional_cf
func.func @conditional_cf(%arg0: i1, %arg1: memref<f32>) {
// CHECK: name = "pre"
// CHECK-SAME: next_access = {{\[}}["then", "post"]]
diff --git a/mlir/test/Analysis/test-liveness.mlir b/mlir/test/Analysis/test-liveness.mlir
index 8ae3d09a6cd1..61a1e5fffa88 100644
--- a/mlir/test/Analysis/test-liveness.mlir
+++ b/mlir/test/Analysis/test-liveness.mlir
@@ -493,3 +493,27 @@ func.func @nested_region3(
}
return %1 : i32
}
+
+// -----
+
+// CHECK-LABEL: Testing : nested_region4
+
+func.func @nested_region4(%arg0: index, %arg1: index, %arg2: index) {
+ // CHECK: Block: 0
+ // CHECK-NEXT: LiveIn:{{ *$}}
+ // CHECK-NEXT: LiveOut:{{ *$}}
+
+ // CHECK: {{^// +}}[[VAL3:[a-z0-9_]+]]{{ *:}}
+ // CHECK: {{^// +}}[[VAL4:[a-z0-9_]+]]{{ *:}}
+ %c0_i32 = arith.constant 0 : i32
+ %c1_i32 = arith.constant 1 : i32
+
+ %0 = scf.for %arg3 = %arg0 to %arg1 step %arg2 iter_args(%arg4 = %c0_i32) -> (i32) {
+ // CHECK: Block: 1
+ // CHECK-NEXT: LiveIn: [[VAL4]]{{ *$}}
+ // CHECK-NEXT: LiveOut:{{ *$}}
+ %1 = arith.addi %arg4, %c1_i32 : i32
+ scf.yield %1 : i32
+ }
+ return
+}
diff --git a/mlir/test/Analysis/test-topoligical-sort.mlir b/mlir/test/Analysis/test-topoligical-sort.mlir
index 860858640205..150aff854fc8 100644
--- a/mlir/test/Analysis/test-topoligical-sort.mlir
+++ b/mlir/test/Analysis/test-topoligical-sort.mlir
@@ -1,21 +1,38 @@
-// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(test-print-topological-sort))" 2>&1 | FileCheck %s
+// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(test-print-topological-sort))" --split-input-file | FileCheck %s
-// CHECK-LABEL: Testing : region
-// CHECK: arith.addi {{.*}} : index
-// CHECK-NEXT: scf.for
-// CHECK: } {__test_sort_original_idx__ = 2 : i64}
-// CHECK-NEXT: arith.addi {{.*}} : i32
-// CHECK-NEXT: arith.subi {{.*}} : i32
-func.func @region(
- %arg0 : index, %arg1 : index, %arg2 : index, %arg3 : index,
- %arg4 : i32, %arg5 : i32, %arg6 : i32,
- %buffer : memref<i32>) {
- %0 = arith.addi %arg4, %arg5 {__test_sort_original_idx__ = 0} : i32
- %idx = arith.addi %arg0, %arg1 {__test_sort_original_idx__ = 3} : index
- scf.for %arg7 = %idx to %arg2 step %arg3 {
- %2 = arith.addi %0, %arg5 : i32
- %3 = arith.subi %2, %arg6 {__test_sort_original_idx__ = 1} : i32
- memref.store %3, %buffer[] : memref<i32>
- } {__test_sort_original_idx__ = 2}
+// CHECK-LABEL: single_element
+func.func @single_element() {
+ // CHECK: test_sort_index = 0
+ return {test_to_sort}
+}
+
+// -----
+
+// CHECK-LABEL: @simple_region
+func.func @simple_region(%cond: i1) {
+ // CHECK: test_sort_index = 0
+ %0 = arith.constant {test_to_sort} 42 : i32
+ scf.if %cond {
+ %1 = arith.addi %0, %0 : i32
+ // CHECK: test_sort_index = 2
+ %2 = arith.subi %0, %1 {test_to_sort} : i32
+ // CHECK: test_sort_index = 1
+ } {test_to_sort}
+ return
+}
+
+// -----
+
+// CHECK-LABEL: @multi_region
+func.func @multi_region(%cond: i1) {
+ scf.if %cond {
+ // CHECK: test_sort_index = 0
+ %0 = arith.constant {test_to_sort} 42 : i32
+ }
+
+ scf.if %cond {
+ // CHECK: test_sort_index = 1
+ %0 = arith.constant {test_to_sort} 24 : i32
+ }
return
}
diff --git a/mlir/test/Transforms/test-toposort.mlir b/mlir/test/Analysis/test-toposort.mlir
index c47b885dbec7..c47b885dbec7 100644
--- a/mlir/test/Transforms/test-toposort.mlir
+++ b/mlir/test/Analysis/test-toposort.mlir
diff --git a/mlir/test/CAPI/CMakeLists.txt b/mlir/test/CAPI/CMakeLists.txt
index 57b342a5e26b..76bd4e60f77b 100644
--- a/mlir/test/CAPI/CMakeLists.txt
+++ b/mlir/test/CAPI/CMakeLists.txt
@@ -9,6 +9,8 @@ function(_add_capi_test_executable name)
add_llvm_executable(${name}
PARTIAL_SOURCES_INTENDED
${ARG_UNPARSED_ARGUMENTS})
+ set_target_properties(${name} PROPERTIES FOLDER "MLIR/Tests")
+
llvm_update_compile_flags(${name})
if(MLIR_BUILD_MLIR_C_DYLIB)
target_link_libraries(${name} PRIVATE
diff --git a/mlir/test/CMakeLists.txt b/mlir/test/CMakeLists.txt
index 8806a1dd9223..45009a78aa49 100644
--- a/mlir/test/CMakeLists.txt
+++ b/mlir/test/CMakeLists.txt
@@ -67,8 +67,8 @@ endif()
llvm_canonicalize_cmake_booleans(
LLVM_BUILD_EXAMPLES
+ LLVM_HAS_NVPTX_TARGET
MLIR_ENABLE_BINDINGS_PYTHON
- MLIR_ENABLE_CUDA_CONVERSIONS
MLIR_ENABLE_CUDA_RUNNER
MLIR_ENABLE_ROCM_CONVERSIONS
MLIR_ENABLE_ROCM_RUNNER
@@ -217,12 +217,12 @@ endif()
add_custom_target(check-mlir-build-only
DEPENDS ${MLIR_TEST_DEPENDS}
)
+set_target_properties(check-mlir-build-only PROPERTIES FOLDER "MLIR/Tests")
add_lit_testsuite(check-mlir "Running the MLIR regression tests"
${CMAKE_CURRENT_BINARY_DIR}
DEPENDS ${MLIR_TEST_DEPENDS}
)
-set_target_properties(check-mlir PROPERTIES FOLDER "Tests")
add_lit_testsuites(MLIR ${CMAKE_CURRENT_SOURCE_DIR}
DEPENDS ${MLIR_TEST_DEPENDS}
diff --git a/mlir/test/Conversion/ArithToEmitC/arith-to-emitc-unsupported.mlir b/mlir/test/Conversion/ArithToEmitC/arith-to-emitc-unsupported.mlir
index 66dfa8fa3e15..97e4593f97b9 100644
--- a/mlir/test/Conversion/ArithToEmitC/arith-to-emitc-unsupported.mlir
+++ b/mlir/test/Conversion/ArithToEmitC/arith-to-emitc-unsupported.mlir
@@ -63,3 +63,10 @@ func.func @arith_cast_fptoui_i1(%arg0: f32) -> i1 {
return %t: i1
}
+// -----
+
+func.func @arith_extsi_i1_to_i32(%arg0: i1) {
+ // expected-error @+1 {{failed to legalize operation 'arith.extsi'}}
+ %idx = arith.extsi %arg0 : i1 to i32
+ return
+}
diff --git a/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir b/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir
index 79fecd61494d..b453b69a214e 100644
--- a/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir
+++ b/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir
@@ -177,3 +177,66 @@ func.func @arith_int_to_float_cast_ops(%arg0: i8, %arg1: i64) {
return
}
+
+// -----
+
+func.func @arith_trunci(%arg0: i32) -> i8 {
+ // CHECK-LABEL: arith_trunci
+ // CHECK-SAME: (%[[Arg0:[^ ]*]]: i32)
+ // CHECK: %[[CastUI:.*]] = emitc.cast %[[Arg0]] : i32 to ui32
+ // CHECK: %[[Trunc:.*]] = emitc.cast %[[CastUI]] : ui32 to ui8
+ // CHECK: emitc.cast %[[Trunc]] : ui8 to i8
+ %truncd = arith.trunci %arg0 : i32 to i8
+
+ return %truncd : i8
+}
+
+// -----
+
+func.func @arith_trunci_to_i1(%arg0: i32) -> i1 {
+ // CHECK-LABEL: arith_trunci_to_i1
+ // CHECK-SAME: (%[[Arg0:[^ ]*]]: i32)
+ // CHECK: %[[Const:.*]] = "emitc.constant"
+ // CHECK-SAME: value = 1
+ // CHECK: %[[And:.*]] = emitc.bitwise_and %[[Arg0]], %[[Const]] : (i32, i32) -> i32
+ // CHECK: emitc.cast %[[And]] : i32 to i1
+ %truncd = arith.trunci %arg0 : i32 to i1
+
+ return %truncd : i1
+}
+
+// -----
+
+func.func @arith_extsi(%arg0: i32) {
+ // CHECK-LABEL: arith_extsi
+ // CHECK-SAME: ([[Arg0:[^ ]*]]: i32)
+ // CHECK: emitc.cast [[Arg0]] : i32 to i64
+ %extd = arith.extsi %arg0 : i32 to i64
+
+ return
+}
+
+// -----
+
+func.func @arith_extui(%arg0: i32) {
+ // CHECK-LABEL: arith_extui
+ // CHECK-SAME: (%[[Arg0:[^ ]*]]: i32)
+ // CHECK: %[[Conv0:.*]] = emitc.cast %[[Arg0]] : i32 to ui32
+ // CHECK: %[[Conv1:.*]] = emitc.cast %[[Conv0]] : ui32 to ui64
+ // CHECK: emitc.cast %[[Conv1]] : ui64 to i64
+ %extd = arith.extui %arg0 : i32 to i64
+
+ return
+}
+
+// -----
+
+func.func @arith_extui_i1_to_i32(%arg0: i1) {
+ // CHECK-LABEL: arith_extui_i1_to_i32
+ // CHECK-SAME: (%[[Arg0:[^ ]*]]: i1)
+ // CHECK: %[[Conv0:.*]] = emitc.cast %[[Arg0]] : i1 to ui1
+ // CHECK: %[[Conv1:.*]] = emitc.cast %[[Conv0]] : ui1 to ui32
+ // CHECK: emitc.cast %[[Conv1]] : ui32 to i32
+ %idx = arith.extui %arg0 : i1 to i32
+ return
+}
diff --git a/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir b/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir
index 1eb387ce0e5b..f58a2afa1a89 100644
--- a/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir
+++ b/mlir/test/Conversion/BufferizationToMemRef/bufferization-to-memref.mlir
@@ -79,7 +79,7 @@ func.func @conversion_dealloc_simple(%arg0: memref<2xf32>, %arg1: i1) {
return
}
-// CHECk: scf.if [[ARG1]] {
-// CHECk-NEXT: memref.dealloc [[ARG0]] : memref<2xf32>
-// CHECk-NEXT: }
-// CHECk-NEXT: return
+// CHECK: scf.if [[ARG1]] {
+// CHECK-NEXT: memref.dealloc [[ARG0]] : memref<2xf32>
+// CHECK-NEXT: }
+// CHECK-NEXT: return
diff --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
index dbf8ead49f78..1b046d32f163 100644
--- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
@@ -778,11 +778,11 @@ func.func @create_tensor_map(%devicePtr2d : memref<64x128xf32>, %devicePtr1d : m
%crd0 = arith.constant 64 : index
%crd1 = arith.constant 128 : index
%devicePtr2d_unranked = memref.cast %devicePtr2d : memref<64x128xf32> to memref<*xf32>
- // CHECK : llvm.call @mgpuTensorMapEncodeTiledMemref
+ // CHECK: llvm.call @mgpuTensorMapEncodeTiledMemref
%tensorMap2d = nvgpu.tma.create.descriptor %devicePtr2d_unranked box[%crd0, %crd1] : memref<*xf32> -> !tensorMap2d
%devicePtr1d_unranked = memref.cast %devicePtr1d : memref<128xf32> to memref<*xf32>
- // CHECK : llvm.call @mgpuTensorMapEncodeTiledMemref
+ // CHECK: llvm.call @mgpuTensorMapEncodeTiledMemref
%tensorMap1d = nvgpu.tma.create.descriptor %devicePtr1d_unranked box[%crd1] : memref<*xf32> -> !tensorMap1d
func.return
}
diff --git a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
index 802760f8c899..21947c242461 100644
--- a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
+++ b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
@@ -17,7 +17,7 @@ llvm.func @init_mbarrier(%barrier_gen : !llvm.ptr, %barrier : !llvm.ptr<3>, %cou
llvm.func @init_mbarrier_arrive_expect_tx(%barrier : !llvm.ptr<3>, %txcount : i32, %pred : i1) {
//CHECK: llvm.inline_asm has_side_effects asm_dialect = att "mbarrier.arrive.expect_tx.shared.b64 _, [$0], $1;", "r,r"
nvvm.mbarrier.arrive.expect_tx.shared %barrier, %txcount : !llvm.ptr<3>, i32
- //CHECK : llvm.inline_asm has_side_effects asm_dialect = att "@$2 mbarrier.arrive.expect_tx.shared.b64 _, [$0], $1;", "r,r,b "
+ //CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$2 mbarrier.arrive.expect_tx.shared.b64 _, [$0], $1;", "r,r,b"
nvvm.mbarrier.arrive.expect_tx.shared %barrier, %txcount, predicate = %pred : !llvm.ptr<3>, i32, i1
llvm.return
}
@@ -129,7 +129,7 @@ func.func @tma_load_5d_all(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %bar
func.func @tma_load_1d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %p : i1) {
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2} ], [$3];", "r,l,r,r"
nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0] : !llvm.ptr<3>, !llvm.ptr
- // CHECK : llvm.inline_asm has_side_effects asm_dialect = att "@$4 cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2} ], [$3];", "l,r,r,r,b"
+ // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$4 cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2} ], [$3];", "r,l,r,r,b"
nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0] predicate=%p : !llvm.ptr<3>, !llvm.ptr
return
}
@@ -138,7 +138,7 @@ func.func @tma_load_1d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier
func.func @tma_load_2d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %p : i1) {
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3} ], [$4];", "r,l,r,r,r"
nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0,%crd1] : !llvm.ptr<3>, !llvm.ptr
- // CHECK : llvm.inline_asm has_side_effects asm_dialect = att "@$5 cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3} ], [$4];", "l,r,r,r,r,b"
+ // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$5 cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3} ], [$4];", "r,l,r,r,r,b"
nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0,%crd1] predicate=%p : !llvm.ptr<3>, !llvm.ptr
return
}
@@ -147,7 +147,7 @@ func.func @tma_load_2d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier
func.func @tma_load_3d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %p : i1) {
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4} ], [$5];", "r,l,r,r,r,r"
nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0,%crd1,%crd2] : !llvm.ptr<3>, !llvm.ptr
- // CHECK : llvm.inline_asm has_side_effects asm_dialect = att "@$6 cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4}], [$5];", "l,r,r,r,r,r,b"
+ // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$6 cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4} ], [$5];", "r,l,r,r,r,r,b"
nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0,%crd1,%crd2] predicate=%p : !llvm.ptr<3>, !llvm.ptr
return
}
@@ -156,7 +156,7 @@ func.func @tma_load_3d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier
func.func @tma_load_4d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %p : i1) {
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5} ], [$6];", "r,l,r,r,r,r,r"
nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0,%crd1,%crd2,%crd3] : !llvm.ptr<3>, !llvm.ptr
- // CHECK : llvm.inline_asm has_side_effects asm_dialect = att "@$7 cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5}], [$6];", "l,r,r,r,r,r,r,b"
+ // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$7 cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5} ], [$6];", "r,l,r,r,r,r,r,b"
nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0,%crd1,%crd2,%crd3] predicate=%p : !llvm.ptr<3>, !llvm.ptr
return
}
@@ -165,7 +165,7 @@ func.func @tma_load_4d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier
func.func @tma_load_5d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %p : i1) {
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5,$6} ], [$7];", "r,l,r,r,r,r,r,r"
nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0,%crd1,%crd2,%crd3,%crd4] : !llvm.ptr<3>, !llvm.ptr
- // CHECK : llvm.inline_asm has_side_effects asm_dialect = att "@$8 cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5,$6}], [$7];", "l,r,r,r,r,r,r,r,b"
+ // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$8 cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5,$6} ], [$7];", "r,l,r,r,r,r,r,r,b"
nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0,%crd1,%crd2,%crd3,%crd4] predicate=%p : !llvm.ptr<3>, !llvm.ptr
return
}
@@ -688,7 +688,7 @@ func.func @fence_proxy() {
llvm.func @llvm_nvvm_barrier_arrive(%barID : i32, %numberOfThreads : i32) {
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "bar.arrive 0, $0;", "r" %[[numberOfThreads]] : (i32) -> ()
nvvm.barrier.arrive number_of_threads = %numberOfThreads
- // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "bar.arrive $0, $1", "r,r" %[[barId]], %[[numberOfThreads]] : (i32, i32) -> ()
+ // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "bar.arrive $0, $1;", "r,r" %[[barId]], %[[numberOfThreads]] : (i32, i32) -> ()
nvvm.barrier.arrive id = %barID number_of_threads = %numberOfThreads
llvm.return
}
diff --git a/mlir/test/Conversion/PDLToPDLInterp/pdl-to-pdl-interp-matcher.mlir b/mlir/test/Conversion/PDLToPDLInterp/pdl-to-pdl-interp-matcher.mlir
index 92afb765b5ab..ed6407a63239 100644
--- a/mlir/test/Conversion/PDLToPDLInterp/pdl-to-pdl-interp-matcher.mlir
+++ b/mlir/test/Conversion/PDLToPDLInterp/pdl-to-pdl-interp-matcher.mlir
@@ -588,7 +588,7 @@ module @variadic_results_all {
// CHECK-DAG: %[[OPS:.*]] = pdl_interp.get_users of %[[VAL0]] : !pdl.value
// CHECK-DAG: pdl_interp.foreach %[[OP:.*]] : !pdl.operation in %[[OPS]]
// CHECK-DAG: %[[OPERANDS:.*]] = pdl_interp.get_operands of %[[OP]]
- // CHECK-DAG pdl_interp.are_equal %[[VALS]], %[[OPERANDS]] -> ^{{.*}}, ^[[CONTINUE:.*]]
+ // CHECK-DAG: pdl_interp.are_equal %[[OPERANDS]], %[[VALS]] : !pdl.range<value> -> ^{{.*}}, ^[[CONTINUE:.*]]
// CHECK-DAG: pdl_interp.is_not_null %[[OP]]
// CHECK-DAG: pdl_interp.check_result_count of %[[OP]] is 0
pdl.pattern @variadic_results_all : benefit(1) {
@@ -701,7 +701,7 @@ module @common_connector {
// CHECK-DAG: pdl_interp.are_equal %[[ROOTA_OP]], %[[VAL0]] : !pdl.value
// CHECK-DAG: %[[ROOTB_OP:.*]] = pdl_interp.get_operand 0 of %[[ROOTB]]
// CHECK-DAG: pdl_interp.are_equal %[[ROOTB_OP]], %[[VAL0]] : !pdl.value
- // CHECK-DAG } -> ^[[CONTA:.*]]
+ // CHECK-DAG: } -> ^[[CONTA:.*]]
pdl.pattern @common_connector : benefit(1) {
%type = type
%op = operation -> (%type, %type : !pdl.type, !pdl.type)
@@ -742,7 +742,7 @@ module @common_connector_range {
// CHECK-DAG: pdl_interp.are_equal %[[ROOTA_OPS]], %[[VALS0]] : !pdl.range<value>
// CHECK-DAG: %[[ROOTB_OPS:.*]] = pdl_interp.get_operands of %[[ROOTB]]
// CHECK-DAG: pdl_interp.are_equal %[[ROOTB_OPS]], %[[VALS0]] : !pdl.range<value>
- // CHECK-DAG } -> ^[[CONTA:.*]]
+ // CHECK-DAG: } -> ^[[CONTA:.*]]
pdl.pattern @common_connector_range : benefit(1) {
%types = types
%op = operation -> (%types, %types : !pdl.range<type>, !pdl.range<type>)
diff --git a/mlir/test/Conversion/SPIRVToLLVM/spirv-storage-class-mapping.mlir b/mlir/test/Conversion/SPIRVToLLVM/spirv-storage-class-mapping.mlir
index b9c56a3fcffd..980406d775d1 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/spirv-storage-class-mapping.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/spirv-storage-class-mapping.mlir
@@ -91,5 +91,5 @@ spirv.func @pointerCodeSectionINTEL(!spirv.ptr<i1, CodeSectionINTEL>) "None"
spirv.func @pointerDeviceOnlyINTEL(!spirv.ptr<i1, DeviceOnlyINTEL>) "None"
// CHECK-OPENCL: llvm.func @pointerHostOnlyINTEL(!llvm.ptr<6>)
-// CHECK-UNKOWN: llvm.func @pointerHostOnlyINTEL(!llvm.ptr)
+// CHECK-UNKNOWN: llvm.func @pointerHostOnlyINTEL(!llvm.ptr)
spirv.func @pointerHostOnlyINTEL(!spirv.ptr<i1, HostOnlyINTEL>) "None"
diff --git a/mlir/test/Conversion/VectorToArmSME/vector-to-arm-sme.mlir b/mlir/test/Conversion/VectorToArmSME/vector-to-arm-sme.mlir
index ce0b46e0f061..f22b6de52f36 100644
--- a/mlir/test/Conversion/VectorToArmSME/vector-to-arm-sme.mlir
+++ b/mlir/test/Conversion/VectorToArmSME/vector-to-arm-sme.mlir
@@ -150,6 +150,39 @@ func.func @transfer_read_2d_transpose_with_mask_f32(%src : memref<?x?xf32>, %mas
// -----
+// CHECK-LABEL: @fold_transpose_into_load
+// CHECK-NOT: arm_sme.tile_store
+// CHECK: arm_sme.tile_load {{.*}} layout<vertical> : memref<?x?xf32>, vector<[4]x[4]xf32>
+// CHECK-NOT: arm_sme.tile_store
+func.func @fold_transpose_into_load(%src : memref<?x?xf32>) {
+ %c0 = arith.constant 0 : index
+ %pad = arith.constant 0.0 : f32
+ %0 = vector.transfer_read %src[%c0, %c0], %pad {in_bounds = [true, true]} : memref<?x?xf32>, vector<[4]x[4]xf32>
+ %1 = vector.transpose %0, [1, 0] : vector<[4]x[4]xf32> to vector<[4]x[4]xf32>
+ "prevent.dce"(%1) : (vector<[4]x[4]xf32>) -> ()
+}
+
+// -----
+
+/// Transposes with more than a single use cannot be folded into load and will
+/// instead be transposed via memory.
+
+// CHECK-LABEL: @fold_transpose_into_load_multi_use
+// CHECK: arm_sme.tile_load {{.*}} : memref<?x?xf32>, vector<[4]x[4]xf32>
+// CHECK: arm_sme.tile_store {{.*}} : memref<?x?xf32>, vector<[4]x[4]xf32>
+// CHECK: %[[TILE_TRANSPOSED_VIA_MEM:.*]] = arm_sme.tile_load {{.*}} layout<vertical> : memref<?x?xf32>, vector<[4]x[4]xf32>
+// CHECK: "prevent.dce"(%[[TILE_TRANSPOSED_VIA_MEM]]) : (vector<[4]x[4]xf32>) -> ()
+func.func @fold_transpose_into_load_multi_use(%src : memref<?x?xf32>) {
+ %c0 = arith.constant 0 : index
+ %pad = arith.constant 0.0 : f32
+ %0 = vector.transfer_read %src[%c0, %c0], %pad {in_bounds = [true, true]} : memref<?x?xf32>, vector<[4]x[4]xf32>
+ "test.some_use"(%0) : (vector<[4]x[4]xf32>) -> ()
+ %1 = vector.transpose %0, [1, 0] : vector<[4]x[4]xf32> to vector<[4]x[4]xf32>
+ "prevent.dce"(%1) : (vector<[4]x[4]xf32>) -> ()
+}
+
+// -----
+
//===----------------------------------------------------------------------===//
// vector.transfer_write
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index 439f1e920e39..a7a0ca3d43b0 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -2495,7 +2495,7 @@ func.func @vector_interleave_0d(%a: vector<i8>, %b: vector<i8>) -> vector<2xi8>
// CHECK: %[[RHS_RANK1:.*]] = builtin.unrealized_conversion_cast %[[RHS]] : vector<i8> to vector<1xi8>
// CHECK: %[[ZIP:.*]] = llvm.shufflevector %[[LHS_RANK1]], %[[RHS_RANK1]] [0, 1] : vector<1xi8>
// CHECK: return %[[ZIP]]
- %0 = vector.interleave %a, %b : vector<i8>
+ %0 = vector.interleave %a, %b : vector<i8> -> vector<2xi8>
return %0 : vector<2xi8>
}
@@ -2503,11 +2503,10 @@ func.func @vector_interleave_0d(%a: vector<i8>, %b: vector<i8>) -> vector<2xi8>
// CHECK-LABEL: @vector_interleave_1d
// CHECK-SAME: %[[LHS:.*]]: vector<8xf32>, %[[RHS:.*]]: vector<8xf32>)
-func.func @vector_interleave_1d(%a: vector<8xf32>, %b: vector<8xf32>) -> vector<16xf32>
-{
+func.func @vector_interleave_1d(%a: vector<8xf32>, %b: vector<8xf32>) -> vector<16xf32> {
// CHECK: %[[ZIP:.*]] = llvm.shufflevector %[[LHS]], %[[RHS]] [0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15] : vector<8xf32>
// CHECK: return %[[ZIP]]
- %0 = vector.interleave %a, %b : vector<8xf32>
+ %0 = vector.interleave %a, %b : vector<8xf32> -> vector<16xf32>
return %0 : vector<16xf32>
}
@@ -2515,11 +2514,10 @@ func.func @vector_interleave_1d(%a: vector<8xf32>, %b: vector<8xf32>) -> vector<
// CHECK-LABEL: @vector_interleave_1d_scalable
// CHECK-SAME: %[[LHS:.*]]: vector<[4]xi32>, %[[RHS:.*]]: vector<[4]xi32>)
-func.func @vector_interleave_1d_scalable(%a: vector<[4]xi32>, %b: vector<[4]xi32>) -> vector<[8]xi32>
-{
+func.func @vector_interleave_1d_scalable(%a: vector<[4]xi32>, %b: vector<[4]xi32>) -> vector<[8]xi32> {
// CHECK: %[[ZIP:.*]] = "llvm.intr.vector.interleave2"(%[[LHS]], %[[RHS]]) : (vector<[4]xi32>, vector<[4]xi32>) -> vector<[8]xi32>
// CHECK: return %[[ZIP]]
- %0 = vector.interleave %a, %b : vector<[4]xi32>
+ %0 = vector.interleave %a, %b : vector<[4]xi32> -> vector<[8]xi32>
return %0 : vector<[8]xi32>
}
@@ -2527,11 +2525,10 @@ func.func @vector_interleave_1d_scalable(%a: vector<[4]xi32>, %b: vector<[4]xi32
// CHECK-LABEL: @vector_interleave_2d
// CHECK-SAME: %[[LHS:.*]]: vector<2x3xi8>, %[[RHS:.*]]: vector<2x3xi8>)
-func.func @vector_interleave_2d(%a: vector<2x3xi8>, %b: vector<2x3xi8>) -> vector<2x6xi8>
-{
+func.func @vector_interleave_2d(%a: vector<2x3xi8>, %b: vector<2x3xi8>) -> vector<2x6xi8> {
// CHECK: llvm.shufflevector
// CHECK-NOT: vector.interleave {{.*}} : vector<2x3xi8>
- %0 = vector.interleave %a, %b : vector<2x3xi8>
+ %0 = vector.interleave %a, %b : vector<2x3xi8> -> vector<2x6xi8>
return %0 : vector<2x6xi8>
}
@@ -2539,10 +2536,9 @@ func.func @vector_interleave_2d(%a: vector<2x3xi8>, %b: vector<2x3xi8>) -> vecto
// CHECK-LABEL: @vector_interleave_2d_scalable
// CHECK-SAME: %[[LHS:.*]]: vector<2x[8]xi16>, %[[RHS:.*]]: vector<2x[8]xi16>)
-func.func @vector_interleave_2d_scalable(%a: vector<2x[8]xi16>, %b: vector<2x[8]xi16>) -> vector<2x[16]xi16>
-{
+func.func @vector_interleave_2d_scalable(%a: vector<2x[8]xi16>, %b: vector<2x[8]xi16>) -> vector<2x[16]xi16> {
// CHECK: llvm.intr.vector.interleave2
// CHECK-NOT: vector.interleave {{.*}} : vector<2x[8]xi16>
- %0 = vector.interleave %a, %b : vector<2x[8]xi16>
+ %0 = vector.interleave %a, %b : vector<2x[8]xi16> -> vector<2x[16]xi16>
return %0 : vector<2x[16]xi16>
}
diff --git a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir
index cddc4ee38535..2592d0fc0411 100644
--- a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir
+++ b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir
@@ -483,6 +483,30 @@ func.func @shuffle(%v0 : vector<1xi32>, %v1: vector<1xi32>) -> vector<2xi32> {
// -----
+// CHECK-LABEL: func @interleave
+// CHECK-SAME: (%[[ARG0:.+]]: vector<2xf32>, %[[ARG1:.+]]: vector<2xf32>)
+// CHECK: %[[SHUFFLE:.*]] = spirv.VectorShuffle [0 : i32, 2 : i32, 1 : i32, 3 : i32] %[[ARG0]], %[[ARG1]] : vector<2xf32>, vector<2xf32> -> vector<4xf32>
+// CHECK: return %[[SHUFFLE]]
+func.func @interleave(%a: vector<2xf32>, %b: vector<2xf32>) -> vector<4xf32> {
+ %0 = vector.interleave %a, %b : vector<2xf32> -> vector<4xf32>
+ return %0 : vector<4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @interleave_size1
+// CHECK-SAME: (%[[ARG0:.+]]: vector<1xf32>, %[[ARG1:.+]]: vector<1xf32>)
+// CHECK: %[[V0:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : vector<1xf32> to f32
+// CHECK: %[[V1:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : vector<1xf32> to f32
+// CHECK: %[[RES:.*]] = spirv.CompositeConstruct %[[V0]], %[[V1]] : (f32, f32) -> vector<2xf32>
+// CHECK: return %[[RES]]
+func.func @interleave_size1(%a: vector<1xf32>, %b: vector<1xf32>) -> vector<2xf32> {
+ %0 = vector.interleave %a, %b : vector<1xf32> -> vector<2xf32>
+ return %0 : vector<2xf32>
+}
+
+// -----
+
// CHECK-LABEL: func @reduction_add
// CHECK-SAME: (%[[V:.+]]: vector<4xi32>)
// CHECK: %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<4xi32>
diff --git a/mlir/test/Dialect/Affine/slicing-utils.mlir b/mlir/test/Dialect/Affine/slicing-utils.mlir
index 74379978fdf8..0848a924b9d9 100644
--- a/mlir/test/Dialect/Affine/slicing-utils.mlir
+++ b/mlir/test/Dialect/Affine/slicing-utils.mlir
@@ -28,15 +28,15 @@ func.func @slicing_test() {
// BWD: matched: %[[v1:.*]] {{.*}} backward static slice:
//
// FWDBWD: matched: %[[v1:.*]] {{.*}} static slice:
- // FWDBWD-DAG: %[[v4:.*]] = "slicing-test-op"() : () -> i4
- // FWDBWD-DAG: %[[v3:.*]] = "slicing-test-op"() : () -> i3
- // FWDBWD-NEXT: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
- // FWDBWD-DAG: %[[v2:.*]] = "slicing-test-op"() : () -> i2
- // FWDBWD-DAG: %[[v1:.*]] = "slicing-test-op"() : () -> i1
- // FWDBWD-NEXT: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
- // FWDBWD-DAG: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
- // FWDBWD-DAG: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
- // FWDBWD-NEXT: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
+ // FWDBWD: %[[v1:.*]] = "slicing-test-op"() : () -> i1
+ // FWDBWD: %[[v2:.*]] = "slicing-test-op"() : () -> i2
+ // FWDBWD: %[[v3:.*]] = "slicing-test-op"() : () -> i3
+ // FWDBWD: %[[v4:.*]] = "slicing-test-op"() : () -> i4
+ // FWDBWD: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
+ // FWDBWD: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
+ // FWDBWD: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
+ // FWDBWD: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
+ // FWDBWD: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
%1 = "slicing-test-op" () : () -> i1
@@ -49,15 +49,15 @@ func.func @slicing_test() {
// BWD: matched: %[[v2:.*]] {{.*}} backward static slice:
//
// FWDBWD-NEXT: matched: %[[v2:.*]] {{.*}} static slice:
- // FWDBWD-DAG: %[[v4:.*]] = "slicing-test-op"() : () -> i4
- // FWDBWD-DAG: %[[v3:.*]] = "slicing-test-op"() : () -> i3
- // FWDBWD-NEXT: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
- // FWDBWD-DAG: %[[v2:.*]] = "slicing-test-op"() : () -> i2
- // FWDBWD-DAG: %[[v1:.*]] = "slicing-test-op"() : () -> i1
- // FWDBWD-NEXT: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
- // FWDBWD-DAG: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
- // FWDBWD-DAG: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
- // FWDBWD-NEXT: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
+ // FWDBWD: %[[v1:.*]] = "slicing-test-op"() : () -> i1
+ // FWDBWD: %[[v2:.*]] = "slicing-test-op"() : () -> i2
+ // FWDBWD: %[[v3:.*]] = "slicing-test-op"() : () -> i3
+ // FWDBWD: %[[v4:.*]] = "slicing-test-op"() : () -> i4
+ // FWDBWD: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
+ // FWDBWD: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
+ // FWDBWD: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
+ // FWDBWD: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
+ // FWDBWD: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
%2 = "slicing-test-op" () : () -> i2
@@ -69,15 +69,15 @@ func.func @slicing_test() {
// BWD: matched: %[[v3:.*]] {{.*}} backward static slice:
//
// FWDBWD-NEXT: matched: %[[v3:.*]] {{.*}} static slice:
- // FWDBWD-DAG: %[[v2:.*]] = "slicing-test-op"() : () -> i2
- // FWDBWD-DAG: %[[v1:.*]] = "slicing-test-op"() : () -> i1
- // FWDBWD-NEXT: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
- // FWDBWD-NEXT: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
- // FWDBWD-DAG: %[[v4:.*]] = "slicing-test-op"() : () -> i4
- // FWDBWD-DAG: %[[v3:.*]] = "slicing-test-op"() : () -> i3
- // FWDBWD-NEXT: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
- // FWDBWD-NEXT: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
- // FWDBWD-NEXT: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
+ // FWDBWD: %[[v1:.*]] = "slicing-test-op"() : () -> i1
+ // FWDBWD: %[[v2:.*]] = "slicing-test-op"() : () -> i2
+ // FWDBWD: %[[v3:.*]] = "slicing-test-op"() : () -> i3
+ // FWDBWD: %[[v4:.*]] = "slicing-test-op"() : () -> i4
+ // FWDBWD: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
+ // FWDBWD: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
+ // FWDBWD: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
+ // FWDBWD: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
+ // FWDBWD: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
%3 = "slicing-test-op" () : () -> i3
@@ -89,15 +89,15 @@ func.func @slicing_test() {
// BWD: matched: %[[v4:.*]] {{.*}} backward static slice:
//
// FWDBWD-NEXT: matched: %[[v4:.*]] {{.*}} static slice:
- // FWDBWD-DAG: %[[v2:.*]] = "slicing-test-op"() : () -> i2
- // FWDBWD-DAG: %[[v1:.*]] = "slicing-test-op"() : () -> i1
- // FWDBWD-NEXT: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
- // FWDBWD-NEXT: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
- // FWDBWD-DAG: %[[v4:.*]] = "slicing-test-op"() : () -> i4
- // FWDBWD-DAG: %[[v3:.*]] = "slicing-test-op"() : () -> i3
- // FWDBWD-NEXT: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
- // FWDBWD-NEXT: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
- // FWDBWD-NEXT: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
+ // FWDBWD: %[[v1:.*]] = "slicing-test-op"() : () -> i1
+ // FWDBWD: %[[v2:.*]] = "slicing-test-op"() : () -> i2
+ // FWDBWD: %[[v3:.*]] = "slicing-test-op"() : () -> i3
+ // FWDBWD: %[[v4:.*]] = "slicing-test-op"() : () -> i4
+ // FWDBWD: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
+ // FWDBWD: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
+ // FWDBWD: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
+ // FWDBWD: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
+ // FWDBWD: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
%4 = "slicing-test-op" () : () -> i4
@@ -111,15 +111,15 @@ func.func @slicing_test() {
// BWD-DAG: %[[v2:.*]] = "slicing-test-op"() : () -> i2
//
// FWDBWD-NEXT: matched: %[[v5:.*]] {{.*}} static slice:
- // FWDBWD-DAG: %[[v4:.*]] = "slicing-test-op"() : () -> i4
- // FWDBWD-DAG: %[[v3:.*]] = "slicing-test-op"() : () -> i3
- // FWDBWD-NEXT: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
- // FWDBWD-DAG: %[[v2:.*]] = "slicing-test-op"() : () -> i2
- // FWDBWD-DAG: %[[v1:.*]] = "slicing-test-op"() : () -> i1
- // FWDBWD-NEXT: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
- // FWDBWD-DAG: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
- // FWDBWD-DAG: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
- // FWDBWD-NEXT: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
+ // FWDBWD: %[[v1:.*]] = "slicing-test-op"() : () -> i1
+ // FWDBWD: %[[v2:.*]] = "slicing-test-op"() : () -> i2
+ // FWDBWD: %[[v3:.*]] = "slicing-test-op"() : () -> i3
+ // FWDBWD: %[[v4:.*]] = "slicing-test-op"() : () -> i4
+ // FWDBWD: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
+ // FWDBWD: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
+ // FWDBWD: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
+ // FWDBWD: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
+ // FWDBWD: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
%5 = "slicing-test-op" (%1, %2) : (i1, i2) -> i5
@@ -132,15 +132,15 @@ func.func @slicing_test() {
// BWD-DAG: %[[v4:.*]] = "slicing-test-op"() : () -> i4
//
// FWDBWD-NEXT: matched: %[[v6:.*]] {{.*}} static slice:
- // FWDBWD-DAG: %[[v2:.*]] = "slicing-test-op"() : () -> i2
- // FWDBWD-DAG: %[[v1:.*]] = "slicing-test-op"() : () -> i1
- // FWDBWD-NEXT: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
- // FWDBWD-NEXT: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
- // FWDBWD-DAG: %[[v4:.*]] = "slicing-test-op"() : () -> i4
- // FWDBWD-DAG: %[[v3:.*]] = "slicing-test-op"() : () -> i3
- // FWDBWD-NEXT: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
- // FWDBWD-NEXT: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
- // FWDBWD-NEXT: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
+ // FWDBWD: %[[v1:.*]] = "slicing-test-op"() : () -> i1
+ // FWDBWD: %[[v2:.*]] = "slicing-test-op"() : () -> i2
+ // FWDBWD: %[[v3:.*]] = "slicing-test-op"() : () -> i3
+ // FWDBWD: %[[v4:.*]] = "slicing-test-op"() : () -> i4
+ // FWDBWD: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
+ // FWDBWD: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
+ // FWDBWD: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
+ // FWDBWD: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
+ // FWDBWD: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
%6 = "slicing-test-op" (%3, %4) : (i3, i4) -> i6
@@ -153,15 +153,15 @@ func.func @slicing_test() {
// BWD-NEXT: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
//
// FWDBWD-NEXT: matched: %[[v7:.*]] {{.*}} static slice:
- // FWDBWD-DAG: %[[v4:.*]] = "slicing-test-op"() : () -> i4
- // FWDBWD-DAG: %[[v3:.*]] = "slicing-test-op"() : () -> i3
- // FWDBWD-NEXT: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
- // FWDBWD-DAG: %[[v2:.*]] = "slicing-test-op"() : () -> i2
- // FWDBWD-DAG: %[[v1:.*]] = "slicing-test-op"() : () -> i1
+ // FWDBWD: %[[v1:.*]] = "slicing-test-op"() : () -> i1
+ // FWDBWD: %[[v2:.*]] = "slicing-test-op"() : () -> i2
+ // FWDBWD: %[[v3:.*]] = "slicing-test-op"() : () -> i3
+ // FWDBWD: %[[v4:.*]] = "slicing-test-op"() : () -> i4
// FWDBWD: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
- // FWDBWD-DAG: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
- // FWDBWD-DAG: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
- // FWDBWD-NEXT: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
+ // FWDBWD: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
+ // FWDBWD: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
+ // FWDBWD: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
+ // FWDBWD: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
%7 = "slicing-test-op" (%1, %5) : (i1, i5) -> i7
@@ -177,15 +177,15 @@ func.func @slicing_test() {
// BWD-NEXT: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
//
// FWDBWD-NEXT: matched: %[[v8:.*]] {{.*}} static slice:
- // FWDBWD-DAG: %[[v4:.*]] = "slicing-test-op"() : () -> i4
- // FWDBWD-DAG: %[[v3:.*]] = "slicing-test-op"() : () -> i3
- // FWDBWD-NEXT: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
- // FWDBWD-DAG: %[[v2:.*]] = "slicing-test-op"() : () -> i2
- // FWDBWD-DAG: %[[v1:.*]] = "slicing-test-op"() : () -> i1
- // FWDBWD-NEXT: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
- // FWDBWD-DAG: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
- // FWDBWD-DAG: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
- // FWDBWD-NEXT: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
+ // FWDBWD: %[[v1:.*]] = "slicing-test-op"() : () -> i1
+ // FWDBWD: %[[v2:.*]] = "slicing-test-op"() : () -> i2
+ // FWDBWD: %[[v3:.*]] = "slicing-test-op"() : () -> i3
+ // FWDBWD: %[[v4:.*]] = "slicing-test-op"() : () -> i4
+ // FWDBWD: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
+ // FWDBWD: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
+ // FWDBWD: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
+ // FWDBWD: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
+ // FWDBWD: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
%8 = "slicing-test-op" (%5, %6) : (i5, i6) -> i8
@@ -202,15 +202,15 @@ func.func @slicing_test() {
// BWD-NEXT: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
//
// FWDBWD-NEXT: matched: %[[v9:.*]] {{.*}} static slice:
- // FWDBWD-DAG: %[[v4:.*]] = "slicing-test-op"() : () -> i4
- // FWDBWD-DAG: %[[v3:.*]] = "slicing-test-op"() : () -> i3
- // FWDBWD-NEXT: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
- // FWDBWD-DAG: %[[v2:.*]] = "slicing-test-op"() : () -> i2
- // FWDBWD-DAG: %[[v1:.*]] = "slicing-test-op"() : () -> i1
- // FWDBWD-NEXT: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
- // FWDBWD-DAG: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
- // FWDBWD-DAG: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
- // FWDBWD-NEXT: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
+ // FWDBWD: %[[v1:.*]] = "slicing-test-op"() : () -> i1
+ // FWDBWD: %[[v2:.*]] = "slicing-test-op"() : () -> i2
+ // FWDBWD: %[[v3:.*]] = "slicing-test-op"() : () -> i3
+ // FWDBWD: %[[v4:.*]] = "slicing-test-op"() : () -> i4
+ // FWDBWD: %[[v5:.*]] = "slicing-test-op"(%[[v1]], %[[v2]]) : (i1, i2) -> i5
+ // FWDBWD: %[[v6:.*]] = "slicing-test-op"(%[[v3]], %[[v4]]) : (i3, i4) -> i6
+ // FWDBWD: %[[v7:.*]] = "slicing-test-op"(%[[v1]], %[[v5]]) : (i1, i5) -> i7
+ // FWDBWD: %[[v8:.*]] = "slicing-test-op"(%[[v5]], %[[v6]]) : (i5, i6) -> i8
+ // FWDBWD: %[[v9:.*]] = "slicing-test-op"(%[[v7]], %[[v8]]) : (i7, i8) -> i9
%9 = "slicing-test-op" (%7, %8) : (i7, i8) -> i9
diff --git a/mlir/test/Dialect/Arith/canonicalize.mlir b/mlir/test/Dialect/Arith/canonicalize.mlir
index e4f95bb0545a..1a387c20c4b2 100644
--- a/mlir/test/Dialect/Arith/canonicalize.mlir
+++ b/mlir/test/Dialect/Arith/canonicalize.mlir
@@ -2950,6 +2950,14 @@ func.func @unsignedExtendConstantResource() -> tensor<i16> {
return %ext : tensor<i16>
}
+// Just checks that this doesn't crash.
+// CHECK-LABEL: @signedExtendSplatAsDynamicShape
+func.func @signedExtendSplatAsDynamicShape() -> tensor<?xi64> {
+ %splat = arith.constant dense<5> : tensor<2xi16>
+ %extsplat = arith.extsi %splat : tensor<2xi16> to tensor<?xi64>
+ return %extsplat : tensor<?xi64>
+}
+
// CHECK-LABEL: @extsi_i0
// CHECK: %[[ZERO:.*]] = arith.constant 0 : i16
// CHECK: return %[[ZERO]] : i16
diff --git a/mlir/test/Dialect/Arith/int-range-interface.mlir b/mlir/test/Dialect/Arith/int-range-interface.mlir
index 16524b363472..5b538197a0c1 100644
--- a/mlir/test/Dialect/Arith/int-range-interface.mlir
+++ b/mlir/test/Dialect/Arith/int-range-interface.mlir
@@ -758,7 +758,7 @@ func.func private @callee(%arg0: memref<?xindex, 4>) {
}
// CHECK-LABEL: func @test_i8_bounds
-// CHECK: test.reflect_bounds {smax = 127 : i8, smin = -128 : i8, umax = -1 : i8, umin = 0 : i8}
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = -128 : si8, umax = 255 : ui8, umin = 0 : ui8}
func.func @test_i8_bounds() -> i8 {
%cst1 = arith.constant 1 : i8
%0 = test.with_bounds { umin = 0 : i8, umax = 255 : i8, smin = -128 : i8, smax = 127 : i8 } : i8
@@ -766,3 +766,136 @@ func.func @test_i8_bounds() -> i8 {
%2 = test.reflect_bounds %1 : i8
return %2: i8
}
+
+// CHECK-LABEL: func @test_add_1
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = -128 : si8, umax = 255 : ui8, umin = 0 : ui8}
+func.func @test_add_1() -> i8 {
+ %cst1 = arith.constant 1 : i8
+ %0 = test.with_bounds { umin = 0 : i8, umax = 255 : i8, smin = -128 : i8, smax = 127 : i8 } : i8
+ %1 = arith.addi %0, %cst1 : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
+
+// Tests below check inference with overflow flags.
+
+// CHECK-LABEL: func @test_add_i8_wrap1
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = -128 : si8, umax = 128 : ui8, umin = 1 : ui8}
+func.func @test_add_i8_wrap1() -> i8 {
+ %cst1 = arith.constant 1 : i8
+ %0 = test.with_bounds { umin = 0 : i8, umax = 127 : i8, smin = 0 : i8, smax = 127 : i8 } : i8
+ // smax overflow
+ %1 = arith.addi %0, %cst1 : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
+
+// CHECK-LABEL: func @test_add_i8_wrap2
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = -128 : si8, umax = 128 : ui8, umin = 1 : ui8}
+func.func @test_add_i8_wrap2() -> i8 {
+ %cst1 = arith.constant 1 : i8
+ %0 = test.with_bounds { umin = 0 : i8, umax = 127 : i8, smin = 0 : i8, smax = 127 : i8 } : i8
+ // smax overflow
+ %1 = arith.addi %0, %cst1 overflow<nuw> : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
+
+// CHECK-LABEL: func @test_add_i8_nowrap
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = 1 : si8, umax = 127 : ui8, umin = 1 : ui8}
+func.func @test_add_i8_nowrap() -> i8 {
+ %cst1 = arith.constant 1 : i8
+ %0 = test.with_bounds { umin = 0 : i8, umax = 127 : i8, smin = 0 : i8, smax = 127 : i8 } : i8
+ // nsw flag stops smax from overflowing
+ %1 = arith.addi %0, %cst1 overflow<nsw> : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
+
+// CHECK-LABEL: func @test_sub_i8_wrap1
+// CHECK: test.reflect_bounds {smax = 5 : si8, smin = -10 : si8, umax = 255 : ui8, umin = 0 : ui8} %1 : i8
+func.func @test_sub_i8_wrap1() -> i8 {
+ %cst10 = arith.constant 10 : i8
+ %0 = test.with_bounds { umin = 0 : i8, umax = 15 : i8, smin = 0 : i8, smax = 15 : i8 } : i8
+ // umin underflows
+ %1 = arith.subi %0, %cst10 : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
+
+// CHECK-LABEL: func @test_sub_i8_wrap2
+// CHECK: test.reflect_bounds {smax = 5 : si8, smin = -10 : si8, umax = 255 : ui8, umin = 0 : ui8} %1 : i8
+func.func @test_sub_i8_wrap2() -> i8 {
+ %cst10 = arith.constant 10 : i8
+ %0 = test.with_bounds { umin = 0 : i8, umax = 15 : i8, smin = 0 : i8, smax = 15 : i8 } : i8
+ // umin underflows
+ %1 = arith.subi %0, %cst10 overflow<nsw> : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
+
+// CHECK-LABEL: func @test_sub_i8_nowrap
+// CHECK: test.reflect_bounds {smax = 5 : si8, smin = 0 : si8, umax = 5 : ui8, umin = 0 : ui8}
+func.func @test_sub_i8_nowrap() -> i8 {
+ %cst10 = arith.constant 10 : i8
+ %0 = test.with_bounds { umin = 0 : i8, umax = 15 : i8, smin = 0 : i8, smax = 15 : i8 } : i8
+ // nuw flag stops umin from underflowing
+ %1 = arith.subi %0, %cst10 overflow<nuw> : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
+
+// CHECK-LABEL: func @test_mul_i8_wrap
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = -128 : si8, umax = 200 : ui8, umin = 100 : ui8}
+func.func @test_mul_i8_wrap() -> i8 {
+ %cst10 = arith.constant 10 : i8
+ %0 = test.with_bounds { umin = 10 : i8, umax = 20 : i8, smin = 10 : i8, smax = 20 : i8 } : i8
+ // smax overflows
+ %1 = arith.muli %0, %cst10 : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
+
+// CHECK-LABEL: func @test_mul_i8_nowrap
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = 100 : si8, umax = 127 : ui8, umin = 100 : ui8}
+func.func @test_mul_i8_nowrap() -> i8 {
+ %cst10 = arith.constant 10 : i8
+ %0 = test.with_bounds { umin = 10 : i8, umax = 20 : i8, smin = 10 : i8, smax = 20 : i8 } : i8
+ // nsw stops overflow
+ %1 = arith.muli %0, %cst10 overflow<nsw> : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
+
+// CHECK-LABEL: func @test_shl_i8_wrap1
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = -128 : si8, umax = 160 : ui8, umin = 80 : ui8}
+func.func @test_shl_i8_wrap1() -> i8 {
+ %cst3 = arith.constant 3 : i8
+ %0 = test.with_bounds { umin = 10 : i8, umax = 20 : i8, smin = 10 : i8, smax = 20 : i8 } : i8
+ // smax overflows
+ %1 = arith.shli %0, %cst3 : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
+
+// CHECK-LABEL: func @test_shl_i8_wrap2
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = -128 : si8, umax = 160 : ui8, umin = 80 : ui8}
+func.func @test_shl_i8_wrap2() -> i8 {
+ %cst3 = arith.constant 3 : i8
+ %0 = test.with_bounds { umin = 10 : i8, umax = 20 : i8, smin = 10 : i8, smax = 20 : i8 } : i8
+ // smax overflows
+ %1 = arith.shli %0, %cst3 overflow<nuw> : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
+
+// CHECK-LABEL: func @test_shl_i8_nowrap
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = 80 : si8, umax = 127 : ui8, umin = 80 : ui8}
+func.func @test_shl_i8_nowrap() -> i8 {
+ %cst3 = arith.constant 3 : i8
+ %0 = test.with_bounds { umin = 10 : i8, umax = 20 : ui8, smin = 10 : i8, smax = 20 : i8 } : i8
+ // nsw stops smax overflow
+ %1 = arith.shli %0, %cst3 overflow<nsw> : i8
+ %2 = test.reflect_bounds %1 : i8
+ return %2: i8
+}
diff --git a/mlir/test/Dialect/Arith/int-range-opts.mlir b/mlir/test/Dialect/Arith/int-range-opts.mlir
index 6179003ab4e7..dd62a481a124 100644
--- a/mlir/test/Dialect/Arith/int-range-opts.mlir
+++ b/mlir/test/Dialect/Arith/int-range-opts.mlir
@@ -75,7 +75,7 @@ func.func @test() -> i1 {
// -----
// CHECK-LABEL: func @test
-// CHECK: test.reflect_bounds {smax = 24 : i8, smin = 0 : i8, umax = 24 : i8, umin = 0 : i8}
+// CHECK: test.reflect_bounds {smax = 24 : si8, smin = 0 : si8, umax = 24 : ui8, umin = 0 : ui8}
func.func @test() -> i8 {
%cst1 = arith.constant 1 : i8
%i8val = test.with_bounds { umin = 0 : i8, umax = 12 : i8, smin = 0 : i8, smax = 12 : i8 } : i8
@@ -87,7 +87,7 @@ func.func @test() -> i8 {
// -----
// CHECK-LABEL: func @test
-// CHECK: test.reflect_bounds {smax = 127 : i8, smin = -128 : i8, umax = -1 : i8, umin = 0 : i8}
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = -128 : si8, umax = 254 : ui8, umin = 0 : ui8}
func.func @test() -> i8 {
%cst1 = arith.constant 1 : i8
%i8val = test.with_bounds { umin = 0 : i8, umax = 127 : i8, smin = 0 : i8, smax = 127 : i8 } : i8
diff --git a/mlir/test/Dialect/Arith/unsigned-when-equivalent.mlir b/mlir/test/Dialect/Arith/unsigned-when-equivalent.mlir
index ce77d3d2f425..49bd74cfe912 100644
--- a/mlir/test/Dialect/Arith/unsigned-when-equivalent.mlir
+++ b/mlir/test/Dialect/Arith/unsigned-when-equivalent.mlir
@@ -1,6 +1,6 @@
// RUN: mlir-opt -arith-unsigned-when-equivalent %s | FileCheck %s
-// CHECK-LABEL func @not_with_maybe_overflow
+// CHECK-LABEL: func @not_with_maybe_overflow
// CHECK: arith.divsi
// CHECK: arith.ceildivsi
// CHECK: arith.floordivsi
@@ -32,7 +32,7 @@ func.func @not_with_maybe_overflow(%arg0 : i32) {
func.return
}
-// CHECK-LABEL func @yes_with_no_overflow
+// CHECK-LABEL: func @yes_with_no_overflow
// CHECK: arith.divui
// CHECK: arith.ceildivui
// CHECK: arith.divui
diff --git a/mlir/test/Dialect/ArmSME/tile-allocation-liveness.mlir b/mlir/test/Dialect/ArmSME/tile-allocation-liveness.mlir
index 88fc8a8923d3..fe4c005c7c42 100644
--- a/mlir/test/Dialect/ArmSME/tile-allocation-liveness.mlir
+++ b/mlir/test/Dialect/ArmSME/tile-allocation-liveness.mlir
@@ -366,15 +366,15 @@ func.func @avoidable_spill(%a: vector<[4]xf32>, %b: vector<[4]xf32>, %c: vector<
// CHECK-LIVE-RANGE-LABEL: @cond_branch_with_backedge
// CHECK-LIVE-RANGE: ^bb1:
-// CHECK-LIVE-RANGE--NEXT: ||| | arith.cmpi
-// CHECK-LIVE-RANGE--NEXT: EEE E cf.cond_br
+// CHECK-LIVE-RANGE-NEXT: ||| | arith.cmpi
+// CHECK-LIVE-RANGE-NEXT: EEE E cf.cond_br
//
-// CHECK-LIVE-RANGE--NEXT: ^[[BB3_COPIES:[[:alnum:]]+]]:
-// CHECK-LIVE-RANGE--NEXT: ||| ES arm_sme.copy_tile
-// CHECK-LIVE-RANGE--NEXT: E|| |S arm_sme.copy_tile
-// CHECK-LIVE-RANGE--NEXT: E| ||S arm_sme.copy_tile
-// CHECK-LIVE-RANGE--NEXT: E |||S arm_sme.copy_tile
-// CHECK-LIVE-RANGE--NEXT: EEEE cf.br
+// CHECK-LIVE-RANGE-NEXT: ^[[BB3_COPIES:[[:alnum:]]+]]:
+// CHECK-LIVE-RANGE-NEXT: ||| ES arm_sme.copy_tile
+// CHECK-LIVE-RANGE-NEXT: E|| |S arm_sme.copy_tile
+// CHECK-LIVE-RANGE-NEXT: E| ||S arm_sme.copy_tile
+// CHECK-LIVE-RANGE-NEXT: E |||S arm_sme.copy_tile
+// CHECK-LIVE-RANGE-NEXT: EEEE cf.br
//
// It is important to note that the first three live ranges in ^bb1 do not end
// at the `cf.cond_br` they are live-out via the backedge bb1 -> bb2 -> bb1.
@@ -389,15 +389,15 @@ func.func @avoidable_spill(%a: vector<[4]xf32>, %b: vector<[4]xf32>, %c: vector<
//
// CHECK-LIVE-RANGE: ========== Coalesced Live Ranges:
// CHECK-LIVE-RANGE: ^bb1:
-// CHECK-LIVE-RANGE--NEXT: |||| arith.cmpi
-// CHECK-LIVE-RANGE--NEXT: EEEE cf.cond_br
+// CHECK-LIVE-RANGE-NEXT: |||| arith.cmpi
+// CHECK-LIVE-RANGE-NEXT: EEEE cf.cond_br
//
-// CHECK-LIVE-RANGE--NEXT: ^[[BB3_COPIES]]:
-// CHECK-LIVE-RANGE--NEXT: |||| arm_sme.copy_tile
-// CHECK-LIVE-RANGE--NEXT: |||| arm_sme.copy_tile
-// CHECK-LIVE-RANGE--NEXT: |||| arm_sme.copy_tile
-// CHECK-LIVE-RANGE--NEXT: |||| arm_sme.copy_tile
-// CHECK-LIVE-RANGE--NEXT: EEEE cf.br
+// CHECK-LIVE-RANGE-NEXT: ^[[BB3_COPIES]]:
+// CHECK-LIVE-RANGE-NEXT: |||| arm_sme.copy_tile
+// CHECK-LIVE-RANGE-NEXT: |||| arm_sme.copy_tile
+// CHECK-LIVE-RANGE-NEXT: |||| arm_sme.copy_tile
+// CHECK-LIVE-RANGE-NEXT: |||| arm_sme.copy_tile
+// CHECK-LIVE-RANGE-NEXT: EEEE cf.br
// CHECK-LABEL: @cond_branch_with_backedge
// CHECK-NOT: tile_id = 16
diff --git a/mlir/test/Dialect/Bufferization/Transforms/lower-deallocations-func.mlir b/mlir/test/Dialect/Bufferization/Transforms/lower-deallocations-func.mlir
index 03cf10aa0c05..3de3a6a693cf 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/lower-deallocations-func.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/lower-deallocations-func.mlir
@@ -9,10 +9,10 @@ func.func @conversion_dealloc_simple(%arg0: memref<2xf32>, %arg1: i1) {
return
}
-// CHECk: scf.if [[ARG1]] {
-// CHECk-NEXT: memref.dealloc [[ARG0]] : memref<2xf32>
-// CHECk-NEXT: }
-// CHECk-NEXT: return
+// CHECK: scf.if [[ARG1]] {
+// CHECK-NEXT: memref.dealloc [[ARG0]] : memref<2xf32>
+// CHECK-NEXT: }
+// CHECK-NEXT: return
// -----
diff --git a/mlir/test/Dialect/Bufferization/Transforms/lower-deallocations.mlir b/mlir/test/Dialect/Bufferization/Transforms/lower-deallocations.mlir
index 2c69fcab08a8..5fedd45555fc 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/lower-deallocations.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/lower-deallocations.mlir
@@ -29,10 +29,10 @@ func.func @conversion_dealloc_simple(%arg0: memref<2xf32>, %arg1: i1) {
return
}
-// CHECk: scf.if [[ARG1]] {
-// CHECk-NEXT: memref.dealloc [[ARG0]] : memref<2xf32>
-// CHECk-NEXT: }
-// CHECk-NEXT: return
+// CHECK: scf.if [[ARG1]] {
+// CHECK-NEXT: memref.dealloc [[ARG0]] : memref<2xf32>
+// CHECK-NEXT: }
+// CHECK-NEXT: return
// -----
diff --git a/mlir/test/Dialect/GPU/barrier-elimination.mlir b/mlir/test/Dialect/GPU/barrier-elimination.mlir
index 844dc7dd6ac0..1f5b84937deb 100644
--- a/mlir/test/Dialect/GPU/barrier-elimination.mlir
+++ b/mlir/test/Dialect/GPU/barrier-elimination.mlir
@@ -61,7 +61,7 @@ func.func @write_in_a_loop(%arg0: memref<?xf32>, %arg1: f32) attributes {__paral
return
}
-// CHECK-LABEL @read_read_write_loop
+// CHECK-LABEL: @read_read_write_loop
func.func @read_read_write_loop(%arg0: memref<?xf32>, %arg1: f32) attributes {__parallel_region_boundary_for_test} {
%c0 = arith.constant 0 : index
%c42 = arith.constant 42 : index
diff --git a/mlir/test/Dialect/GPU/ops.mlir b/mlir/test/Dialect/GPU/ops.mlir
index 511b01887747..ba7897f4e80c 100644
--- a/mlir/test/Dialect/GPU/ops.mlir
+++ b/mlir/test/Dialect/GPU/ops.mlir
@@ -227,7 +227,7 @@ module attributes {gpu.container_module} {
gpu.return
}
- // CHECK-LABEL gpu.func @printf_test
+ // CHECK-LABEL: gpu.func @printf_test
// CHECK: (%[[ARG0:.*]]: i32)
// CHECK: gpu.printf "Value: %d" %[[ARG0]] : i32
gpu.func @printf_test(%arg0 : i32) {
diff --git a/mlir/test/Dialect/GPU/outlining.mlir b/mlir/test/Dialect/GPU/outlining.mlir
index 5e4724c9d309..47ebe326b5d1 100644
--- a/mlir/test/Dialect/GPU/outlining.mlir
+++ b/mlir/test/Dialect/GPU/outlining.mlir
@@ -123,7 +123,7 @@ llvm.func @launch_from_llvm_func() {
llvm.return
}
-// CHECK-DL-LABLE: gpu.module @launch_from_llvm_func_kernel attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32 : i32>>}
+// CHECK-DL-LABEL: gpu.module @launch_from_llvm_func_kernel attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32 : i32>>}
// -----
diff --git a/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir b/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir
index 07e719798b85..732f40c4333d 100644
--- a/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir
+++ b/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir
@@ -27,4 +27,4 @@ func.func @test_math(%arg0 : f32) {
gpu.terminator
}
return
-} \ No newline at end of file
+}
diff --git a/mlir/test/Dialect/IRDL/invalid.irdl.mlir b/mlir/test/Dialect/IRDL/invalid.irdl.mlir
index d62bb498a7ad..f207d31cf158 100644
--- a/mlir/test/Dialect/IRDL/invalid.irdl.mlir
+++ b/mlir/test/Dialect/IRDL/invalid.irdl.mlir
@@ -6,7 +6,7 @@ func.func private @foo()
irdl.dialect @testd {
irdl.type @type {
- // expected-error@+1 {{'@foo' does not refer to a type or attribute definition}}
+ // expected-error@+1 {{symbol '@foo' not found}}
%0 = irdl.base @foo
irdl.parameters(%0)
}
@@ -41,3 +41,18 @@ irdl.dialect @testd {
irdl.parameters(%0)
}
}
+
+// -----
+
+irdl.dialect @invalid_parametric {
+ irdl.operation @foo {
+ // expected-error@+1 {{symbol '@not_a_type_or_attr' does not refer to a type or attribute definition}}
+ %param = irdl.parametric @not_a_type_or_attr<>
+ irdl.results(%param)
+ }
+
+ irdl.operation @not_a_type_or_attr {
+ %param = irdl.is i1
+ irdl.results(%param)
+ }
+}
diff --git a/mlir/test/Dialect/LLVMIR/nvvm.mlir b/mlir/test/Dialect/LLVMIR/nvvm.mlir
index de2904d15b64..a7bdceba01c1 100644
--- a/mlir/test/Dialect/LLVMIR/nvvm.mlir
+++ b/mlir/test/Dialect/LLVMIR/nvvm.mlir
@@ -464,24 +464,24 @@ llvm.func private @mbarrier_test_wait_shared(%barrier: !llvm.ptr<3>, %token : i6
llvm.return
}
-// CHECK-LABEL : @wgmma_fence_aligned
+// CHECK-LABEL: @wgmma_fence_aligned
func.func @wgmma_fence_aligned() {
- // CHECK : nvvm.wgmma.fence.aligned
+ // CHECK: nvvm.wgmma.fence.aligned
nvvm.wgmma.fence.aligned
return
}
-// CHECK-LABEL : @wgmma_commit_group_sync_aligned
+// CHECK-LABEL: @wgmma_commit_group_sync_aligned
func.func @wgmma_commit_group_sync_aligned() {
- // CHECK : nvvm.wgmma.commit.group.sync.aligned
+ // CHECK: nvvm.wgmma.commit.group.sync.aligned
nvvm.wgmma.commit.group.sync.aligned
return
}
-// CHECK-LABEL : @wgmma_commit_group_sync_aligned
+// CHECK-LABEL: @wgmma_wait_group_sync_aligned
func.func @wgmma_wait_group_sync_aligned() {
- // CHECK : nvvm.wgmma.wait.group.sync.aligned
+ // CHECK: nvvm.wgmma.wait.group.sync.aligned
nvvm.wgmma.wait.group.sync.aligned 0
return
}
@@ -495,7 +495,7 @@ gpu.module @module_1 [#nvvm.target<chip = "sm_90", features = "+ptx70", link = [
gpu.module @module_2 [#nvvm.target<chip = "sm_90">, #nvvm.target<chip = "sm_80">, #nvvm.target<chip = "sm_70">] {
}
-// CHECK-LABEL : nvvm.grid_constant
+// CHECK-LABEL: nvvm.grid_constant
llvm.func @kernel_func(%arg0: !llvm.ptr {llvm.byval = i32, nvvm.grid_constant}) attributes {nvvm.kernel} {
llvm.return
}
diff --git a/mlir/test/Dialect/LLVMIR/type-consistency.mlir b/mlir/test/Dialect/LLVMIR/type-consistency.mlir
deleted file mode 100644
index c9c1355d16df..000000000000
--- a/mlir/test/Dialect/LLVMIR/type-consistency.mlir
+++ /dev/null
@@ -1,533 +0,0 @@
-// RUN: mlir-opt %s --pass-pipeline="builtin.module(llvm.func(llvm-type-consistency))" --split-input-file | FileCheck %s
-
-// CHECK-LABEL: llvm.func @same_address
-llvm.func @same_address(%arg: i32) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
- // CHECK: = llvm.getelementptr %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
- %7 = llvm.getelementptr %1[8] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i32, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @same_address_keep_inbounds
-llvm.func @same_address_keep_inbounds(%arg: i32) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
- // CHECK: = llvm.getelementptr inbounds %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
- %7 = llvm.getelementptr inbounds %1[8] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i32, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @index_in_final_padding
-llvm.func @index_in_final_padding(%arg: i32) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i8)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i8)> : (i32) -> !llvm.ptr
- // CHECK: = llvm.getelementptr %[[ALLOCA]][7] : (!llvm.ptr) -> !llvm.ptr, i8
- %7 = llvm.getelementptr %1[7] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i32, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @index_out_of_bounds
-llvm.func @index_out_of_bounds(%arg: i32) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32)> : (i32) -> !llvm.ptr
- // CHECK: = llvm.getelementptr %[[ALLOCA]][9] : (!llvm.ptr) -> !llvm.ptr, i8
- %7 = llvm.getelementptr %1[9] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i32, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @index_in_padding
-llvm.func @index_in_padding(%arg: i16) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i16, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i16, i32)> : (i32) -> !llvm.ptr
- // CHECK: = llvm.getelementptr %[[ALLOCA]][2] : (!llvm.ptr) -> !llvm.ptr, i8
- %7 = llvm.getelementptr %1[2] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i16, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @index_not_in_padding_because_packed
-llvm.func @index_not_in_padding_because_packed(%arg: i16) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", packed (i16, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", packed (i16, i32)> : (i32) -> !llvm.ptr
- // CHECK: = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", packed (i16, i32)>
- %7 = llvm.getelementptr %1[2] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i16, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @no_crash_on_negative_gep_index
-llvm.func @no_crash_on_negative_gep_index() {
- %0 = llvm.mlir.constant(1.000000e+00 : f16) : f16
- %1 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32)>
- %2 = llvm.alloca %1 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
- // CHECK: llvm.getelementptr %[[ALLOCA]][-1] : (!llvm.ptr) -> !llvm.ptr, f32
- %3 = llvm.getelementptr %2[-1] : (!llvm.ptr) -> !llvm.ptr, f32
- llvm.store %0, %3 : f16, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @coalesced_store_ints
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @coalesced_store_ints(%arg: i64) {
- // CHECK-DAG: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK-DAG: %[[CST32:.*]] = llvm.mlir.constant(32 : i64) : i64
-
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32)> : (i32) -> !llvm.ptr
-
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @coalesced_store_ints_offset
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @coalesced_store_ints_offset(%arg: i64) {
- // CHECK-DAG: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK-DAG: %[[CST32:.*]] = llvm.mlir.constant(32 : i64) : i64
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i64, i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i64, i32, i32)> : (i32) -> !llvm.ptr
- %3 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32)>
-
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- llvm.store %arg, %3 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @coalesced_store_floats
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @coalesced_store_floats(%arg: i64) {
- // CHECK-DAG: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK-DAG: %[[CST32:.*]] = llvm.mlir.constant(32 : i64) : i64
- %0 = llvm.mlir.constant(1 : i32) : i32
-
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (f32, f32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (f32, f32)> : (i32) -> !llvm.ptr
-
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (f32, f32)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// Padding test purposefully not modified.
-
-// CHECK-LABEL: llvm.func @coalesced_store_padding_inbetween
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @coalesced_store_padding_inbetween(%arg: i64) {
- %0 = llvm.mlir.constant(1 : i32) : i32
-
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i16, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i16, i32)> : (i32) -> !llvm.ptr
- // CHECK: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// Padding test purposefully not modified.
-
-// CHECK-LABEL: llvm.func @coalesced_store_padding_end
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @coalesced_store_padding_end(%arg: i64) {
- %0 = llvm.mlir.constant(1 : i32) : i32
-
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i16)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i16)> : (i32) -> !llvm.ptr
- // CHECK: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @coalesced_store_past_end
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @coalesced_store_past_end(%arg: i64) {
- %0 = llvm.mlir.constant(1 : i32) : i32
-
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32)> : (i32) -> !llvm.ptr
- // CHECK: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @coalesced_store_packed_struct
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @coalesced_store_packed_struct(%arg: i64) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK-DAG: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK-DAG: %[[CST16:.*]] = llvm.mlir.constant(16 : i64) : i64
- // CHECK-DAG: %[[CST48:.*]] = llvm.mlir.constant(48 : i64) : i64
-
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", packed (i16, i32, i16)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", packed (i16, i32, i16)> : (i32) -> !llvm.ptr
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i16
- // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST16]]
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", packed (i16, i32, i16)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST48]]
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i16
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", packed (i16, i32, i16)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @vector_write_split
-// CHECK-SAME: %[[ARG:.*]]: vector<4xi32>
-llvm.func @vector_write_split(%arg: vector<4xi32>) {
- // CHECK-DAG: %[[CST0:.*]] = llvm.mlir.constant(0 : i32) : i32
- // CHECK-DAG: %[[CST1:.*]] = llvm.mlir.constant(1 : i32) : i32
- // CHECK-DAG: %[[CST2:.*]] = llvm.mlir.constant(2 : i32) : i32
- // CHECK-DAG: %[[CST3:.*]] = llvm.mlir.constant(3 : i32) : i32
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32, i32)> : (i32) -> !llvm.ptr
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST0]] : i32] : vector<4xi32>
- // CHECK: llvm.store %[[EXTRACT]], %[[ALLOCA]] : i32, !llvm.ptr
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST1]] : i32] : vector<4xi32>
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST2]] : i32] : vector<4xi32>
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST3]] : i32] : vector<4xi32>
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 3] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
-
- llvm.store %arg, %1 : vector<4xi32>, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @vector_write_split_offset
-// CHECK-SAME: %[[ARG:.*]]: vector<4xi32>
-llvm.func @vector_write_split_offset(%arg: vector<4xi32>) {
- // CHECK-DAG: %[[CST0:.*]] = llvm.mlir.constant(0 : i32) : i32
- // CHECK-DAG: %[[CST1:.*]] = llvm.mlir.constant(1 : i32) : i32
- // CHECK-DAG: %[[CST2:.*]] = llvm.mlir.constant(2 : i32) : i32
- // CHECK-DAG: %[[CST3:.*]] = llvm.mlir.constant(3 : i32) : i32
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i64, i32, i32, i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i64, i32, i32, i32, i32)> : (i32) -> !llvm.ptr
- %2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32, i32, i32)>
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST0]] : i32] : vector<4xi32>
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST1]] : i32] : vector<4xi32>
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST2]] : i32] : vector<4xi32>
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 3] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST3]] : i32] : vector<4xi32>
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 4] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
-
- llvm.store %arg, %2 : vector<4xi32>, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// Small test that a split vector store will be further optimized (to than e.g.
-// split integer loads to structs as shown here)
-
-// CHECK-LABEL: llvm.func @vector_write_split_struct
-// CHECK-SAME: %[[ARG:.*]]: vector<2xi64>
-llvm.func @vector_write_split_struct(%arg: vector<2xi64>) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32, i32)> : (i32) -> !llvm.ptr
-
- // CHECK-COUNT-4: llvm.store %{{.*}}, %{{.*}} : i32, !llvm.ptr
-
- llvm.store %arg, %1 : vector<2xi64>, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @gep_split
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @gep_split(%arg: i64) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.array<2 x struct<"foo", (i64)>>
- %1 = llvm.alloca %0 x !llvm.array<2 x struct<"foo", (i64)>> : (i32) -> !llvm.ptr
- %3 = llvm.getelementptr %1[0, 1, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<2 x struct<"foo", (i64)>>
- // CHECK: %[[TOP_GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<2 x struct<"foo", (i64)>>
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64)>
- // CHECK: llvm.store %[[ARG]], %[[GEP]]
- llvm.store %arg, %3 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @coalesced_store_ints_subaggregate
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @coalesced_store_ints_subaggregate(%arg: i64) {
- // CHECK-DAG: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK-DAG: %[[CST32:.*]] = llvm.mlir.constant(32 : i64) : i64
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i64, struct<(i32, i32)>)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i64, struct<(i32, i32)>)> : (i32) -> !llvm.ptr
- %3 = llvm.getelementptr %1[0, 1, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, struct<(i32, i32)>)>
-
- // CHECK: %[[TOP_GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, struct<(i32, i32)>)>
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: llvm.store %[[TRUNC]], %[[TOP_GEP]]
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i32, i32)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- llvm.store %arg, %3 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @gep_result_ptr_type_dynamic
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @gep_result_ptr_type_dynamic(%arg: i64) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.array<2 x struct<"foo", (i64)>>
- %1 = llvm.alloca %0 x !llvm.array<2 x struct<"foo", (i64)>> : (i32) -> !llvm.ptr
- %3 = llvm.getelementptr %1[0, %arg, 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<2 x struct<"foo", (i64)>>
- // CHECK: %[[TOP_GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, %[[ARG]]] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<2 x struct<"foo", (i64)>>
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64)>
- // CHECK: llvm.store %[[ARG]], %[[GEP]]
- llvm.store %arg, %3 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @overlapping_int_aggregate_store
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @overlapping_int_aggregate_store(%arg: i64) {
- // CHECK-DAG: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK-DAG: %[[CST16:.*]] = llvm.mlir.constant(16 : i64) : i64
-
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)> : (i32) -> !llvm.ptr
-
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i16
- // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
-
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST16]] : i64
- // CHECK: [[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i48
- // CHECK: %[[TOP_GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
-
- // Normal integer splitting of [[TRUNC]] follows:
-
- // CHECK: llvm.store %{{.*}}, %[[TOP_GEP]]
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
-
- llvm.store %arg, %1 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @overlapping_vector_aggregate_store
-// CHECK-SAME: %[[ARG:.*]]: vector<4xi16>
-llvm.func @overlapping_vector_aggregate_store(%arg: vector<4 x i16>) {
- // CHECK-DAG: %[[CST0:.*]] = llvm.mlir.constant(0 : i32) : i32
- // CHECK-DAG: %[[CST1:.*]] = llvm.mlir.constant(1 : i32) : i32
- // CHECK-DAG: %[[CST2:.*]] = llvm.mlir.constant(2 : i32) : i32
- // CHECK-DAG: %[[CST3:.*]] = llvm.mlir.constant(3 : i32) : i32
-
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)> : (i32) -> !llvm.ptr
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST0]] : i32]
- // CHECK: llvm.store %[[EXTRACT]], %[[ALLOCA]]
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST1]] : i32]
- // CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP0]]
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST2]] : i32]
- // CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
- // CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[GEP0]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP1]]
-
- // CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST3]] : i32]
- // CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
- // CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[GEP0]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP1]]
-
- llvm.store %arg, %1 : vector<4 x i16>, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @partially_overlapping_aggregate_store
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @partially_overlapping_aggregate_store(%arg: i64) {
- // CHECK-DAG: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK-DAG: %[[CST16:.*]] = llvm.mlir.constant(16 : i64) : i64
-
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i16, struct<(i16, i16, i16, i16)>)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i16, struct<(i16, i16, i16, i16)>)> : (i32) -> !llvm.ptr
-
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i16
- // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
-
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST16]] : i64
- // CHECK: [[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i48
- // CHECK: %[[TOP_GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16, i16)>)>
-
- // Normal integer splitting of [[TRUNC]] follows:
-
- // CHECK: llvm.store %{{.*}}, %[[TOP_GEP]]
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
-
- // It is important that there are no more stores at this point.
- // Specifically a store into the fourth field of %[[TOP_GEP]] would
- // incorrectly change the semantics of the code.
- // CHECK-NOT: llvm.store %{{.*}}, %{{.*}}
-
- llvm.store %arg, %1 : i64, !llvm.ptr
-
- llvm.return
-}
-
-// -----
-
-// Here a split is undesirable since the store does a partial store into the field.
-
-// CHECK-LABEL: llvm.func @undesirable_overlapping_aggregate_store
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @undesirable_overlapping_aggregate_store(%arg: i64) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, struct<(i64, i16, i16, i16)>)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, struct<(i64, i16, i16, i16)>)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, struct<(i64, i16, i16, i16)>)>
- %2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, struct<(i64, i16, i16, i16)>)>
- // CHECK: llvm.store %[[ARG]], %[[GEP]]
- llvm.store %arg, %2 : i64, !llvm.ptr
-
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @coalesced_store_ints_array
-// CHECK-SAME: %[[ARG:.*]]: i64
-llvm.func @coalesced_store_ints_array(%arg: i64) {
- // CHECK-DAG: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64
- // CHECK-DAG: %[[CST32:.*]] = llvm.mlir.constant(32 : i64) : i64
-
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.array<2 x i32>
- %1 = llvm.alloca %0 x !llvm.array<2 x i32> : (i32) -> !llvm.ptr
-
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
- // CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
- // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<2 x i32>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
diff --git a/mlir/test/Dialect/Linalg/block-pack-matmul.mlir b/mlir/test/Dialect/Linalg/block-pack-matmul.mlir
index cc9af913ca15..8a8260817769 100644
--- a/mlir/test/Dialect/Linalg/block-pack-matmul.mlir
+++ b/mlir/test/Dialect/Linalg/block-pack-matmul.mlir
@@ -476,3 +476,32 @@ func.func @block_generic_matmul_transpose_b(
// CHECK-SAME: inner_dims_pos = [0, 1] inner_tiles = [32, 16]
// CHECK-SAME: into %[[C]] : tensor<2x4x32x16xf32> -> tensor<64x64xf32>
// CHECK: return %[[RES_UNPACKED]] : tensor<64x64xf32>
+
+// -----
+
+#map = affine_map<(d0, d1) -> (d0, d1)>
+
+func.func @non_contraction_generic(
+ %A: tensor<64x128xf32>) -> tensor<64x128xf32> {
+ %c0 = arith.constant 0.000000e+00 : f32
+ %0 = linalg.generic {indexing_maps = [#map], iterator_types = ["parallel", "parallel"]}
+ outs(%A : tensor<64x128xf32>) {
+ ^bb0(%out: f32):
+ %1 = arith.maximumf %out, %c0 : f32
+ linalg.yield %1 : f32
+ } -> tensor<64x128xf32>
+ return %0 : tensor<64x128xf32>
+}
+
+// CHECK-DAG: #[[$MAP:.+]] = affine_map<(d0, d1) -> (d0, d1)>
+
+// CHECK-LABEL: func @non_contraction_generic(
+// CHECK-SAME: %[[A:[0-9a-z]+]]: tensor<64x128xf32>
+// CHECK-DAG: %[[C0:.+]] = arith.constant 0.000000e+00 : f32
+// CHECK-NOT: tensor.pack
+// CHECK: %[[GENERIC:.+]] = linalg.generic
+// CHECK-SAME: indexing_maps = [#[[$MAP]]]
+// CHECK-SAME: iterator_types = ["parallel", "parallel"]
+// CHECK-SAME: outs(%[[A]] : tensor<64x128xf32>)
+// CHECK-NOT: tensor.unpack
+// CHECK: return %[[GENERIC]] : tensor<64x128xf32>
diff --git a/mlir/test/Dialect/Linalg/data-layout-propagation.mlir b/mlir/test/Dialect/Linalg/data-layout-propagation.mlir
index bee08503298f..9140904620ac 100644
--- a/mlir/test/Dialect/Linalg/data-layout-propagation.mlir
+++ b/mlir/test/Dialect/Linalg/data-layout-propagation.mlir
@@ -795,7 +795,7 @@ func.func @reduction_pack_transpose_inner_dims(%arg0: tensor<128x256x32xi32>,
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
// CHECK: %[[ARG1_EMPTY:.+]] = tensor.empty() : tensor<4x16x16x32xi32>
// CHECK: %[[PACK_ARG1:.+]] = tensor.pack %[[ARG1]]
-// CHECK-SME: inner_dims_pos = [1, 0] inner_tiles = [16, 32]
+// CHECK-SAME: inner_dims_pos = [1, 0] inner_tiles = [16, 32]
// CHECK-SAME: into %[[ARG1_EMPTY]]
// CHECK: %[[ARG0_EMPTY:.+]] = tensor.empty() : tensor<4x16x32x16x32xi32>
// CHECK: %[[PACK_ARG0:.+]] = tensor.pack %[[ARG0]]
diff --git a/mlir/test/Dialect/Linalg/mesh-sharding-propagation.mlir b/mlir/test/Dialect/Linalg/mesh-sharding-propagation.mlir
new file mode 100644
index 000000000000..59fd548dc2ef
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/mesh-sharding-propagation.mlir
@@ -0,0 +1,34 @@
+// RUN: mlir-opt \
+// RUN: --verify-each \
+// RUN: --pass-pipeline="builtin.module(func.func(sharding-propagation))" \
+// RUN: %s | FileCheck %s
+
+mesh.mesh @mesh_2_2(shape = 2)
+
+// CHECK-LABEL: func @matmul_shard_prallel_axis
+func.func @matmul_shard_prallel_axis(
+ // CHECK-SAME: %[[IN1:[A-Za-z0-9_]+]]: tensor<2x3xf32>,
+ %arg0 : tensor<2x3xf32>,
+ // CHECK-SAME: %[[IN2:[A-Za-z0-9_]+]]: tensor<3x2xf32>,
+ %arg1 : tensor<3x2xf32>,
+ // CHECK-SAME: %[[DPS_OUT:[A-Za-z0-9_]+]]: tensor<2x2xf32>
+ %out_dps: tensor<2x2xf32>
+) -> tensor<2x2xf32> {
+ // CHECK: %[[IN1_ANNOTATED_0:.*]] = mesh.shard %[[IN1]] to <@mesh_2, {{\[}}[0]]> : tensor<2x3xf32>
+ // CHECK: %[[IN1_ANNOTATED_1:.*]] = mesh.shard %[[IN1_ANNOTATED_0]] to <@mesh_2, {{\[}}[0]]> annotate_for_users : tensor<2x3xf32>
+ // CHECK: %[[IN2_ANNOTATED:.*]] = mesh.shard %[[IN2]] to <@mesh_2, []> annotate_for_users : tensor<3x2xf32>
+ // CHECK: %[[DPS_OUT_ANNOTATED:.*]] = mesh.shard %[[DPS_OUT]] to <@mesh_2, {{\[}}[0]]> annotate_for_users : tensor<2x2xf32>
+ %arg0_sharded = mesh.shard %arg0 to <@mesh_2, [[0]]> : tensor<2x3xf32>
+
+ // CHECK: %[[RES:.*]] = linalg.matmul ins(%[[IN1_ANNOTATED_1]], %[[IN2_ANNOTATED]] : tensor<2x3xf32>, tensor<3x2xf32>)
+ // CHECK-SAME: outs(%[[DPS_OUT_ANNOTATED]] : tensor<2x2xf32>) -> tensor<2x2xf32>
+ %res = linalg.matmul ins(%arg0_sharded, %arg1 : tensor<2x3xf32>, tensor<3x2xf32>)
+ outs(%out_dps : tensor<2x2xf32>) -> tensor<2x2xf32>
+
+ // CHECK: %[[RES_ANNOTATED_0:.*]] = mesh.shard %[[RES]] to <@mesh_2, {{\[}}[0]]> : tensor<2x2xf32>
+ // CHECK: %[[RES_ANNOTATED_1:.*]] = mesh.shard %[[RES_ANNOTATED_0]] to <@mesh_2, {{\[}}[]]> annotate_for_users : tensor<2x2xf32>
+ %res_sharded = mesh.shard %res to <@mesh_2, [[]]> annotate_for_users : tensor<2x2xf32>
+
+ // CHECK: return %[[RES_ANNOTATED_1]] : tensor<2x2xf32>
+ return %res_sharded : tensor<2x2xf32>
+}
diff --git a/mlir/test/Dialect/Linalg/transform-op-specialize.mlir b/mlir/test/Dialect/Linalg/transform-op-specialize.mlir
index 8a22c115f311..35679db7412f 100644
--- a/mlir/test/Dialect/Linalg/transform-op-specialize.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-specialize.mlir
@@ -141,3 +141,28 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
+
+// -----
+
+#map = affine_map<(d0, d1) -> ()>
+#map1 = affine_map<(d0, d1) -> (d0, d1)>
+func.func @linalg_generic_fill(%arg0: tensor<7x7xf32>) -> tensor<7x7xf32> {
+ %cst = arith.constant 0.000000e+00 : f32
+ %0 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel"]} ins(%cst : f32) outs(%arg0 : tensor<7x7xf32>) {
+ ^bb0(%in: f32, %out: f32):
+ linalg.yield %in : f32
+ } -> tensor<7x7xf32>
+ return %0 : tensor<7x7xf32>
+}
+// CHECK-LABEL: linalg_generic_fill
+// CHECK-SAME: %[[ARG0:.+]]: tensor<7x7xf32>) -> tensor<7x7xf32>
+// CHECK: %[[CST:.+]] = arith.constant 0.000000e+00 : f32
+// CHECK: %{{.*}} = linalg.fill ins(%[[CST]] : f32) outs(%[[ARG0]] : tensor<7x7xf32>) -> tensor<7x7xf32>
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.specialize %0 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_binary.mlir b/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_binary.mlir
new file mode 100644
index 000000000000..d45025de931c
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_binary.mlir
@@ -0,0 +1,76 @@
+// RUN: mlir-opt --transform-interpreter --split-input-file --verify-diagnostics %s | FileCheck %s
+
+#map = affine_map<(d0, d1) -> (d0, d1)>
+func.func @specialize_add(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) {
+ ^bb0(%in: f32, %in_0: f32, %out: f32):
+ %1 = arith.addf %in, %in_0 : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+// CHECK-LABEL: specialize_add
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG1:.+]]: tensor<?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>) -> tensor<?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.add ins(%[[ARG0]], %[[ARG1]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ARG2]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+
+func.func @specialize_sub(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) {
+ ^bb0(%in: f32, %in_0: f32, %out: f32):
+ %1 = arith.subf %in, %in_0 : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+// CHECK-LABEL: specialize_sub
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG1:.+]]: tensor<?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>) -> tensor<?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.sub ins(%[[ARG0]], %[[ARG1]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ARG2]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+
+func.func @specialize_sub_swapped_operands(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) {
+ ^bb0(%in: f32, %in_0: f32, %out: f32):
+ %1 = arith.subf %in_0, %in : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+// CHECK-LABEL: specialize_sub
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG1:.+]]: tensor<?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>) -> tensor<?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.sub ins(%[[ARG1]], %[[ARG0]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ARG2]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+
+func.func @specialize_mul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) {
+ ^bb0(%in: f32, %in_0: f32, %out: f32):
+ %1 = arith.mulf %in, %in_0 : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+// CHECK-LABEL: specialize_mul
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG1:.+]]: tensor<?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>) -> tensor<?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.mul ins(%[[ARG0]], %[[ARG1]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ARG2]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+
+func.func @specialize_div(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) {
+ ^bb0(%in: f32, %in_0: f32, %out: f32):
+ %1 = arith.divf %in, %in_0 : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+// CHECK-LABEL: specialize_div
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG1:.+]]: tensor<?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>) -> tensor<?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.div ins(%[[ARG0]], %[[ARG1]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ARG2]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.specialize %0 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_unary.mlir b/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_unary.mlir
new file mode 100644
index 000000000000..89a8baa453e9
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_unary.mlir
@@ -0,0 +1,25 @@
+// RUN: mlir-opt --transform-interpreter --split-input-file --verify-diagnostics %s | FileCheck %s
+
+#umap = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+func.func @specialize_exp(%arg0: tensor<?x?x?xf32>, %arg1: tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
+ %0 = linalg.generic
+ {indexing_maps = [#umap, #umap], iterator_types = ["parallel", "parallel","parallel"]}
+ ins(%arg0 : tensor<?x?x?xf32>) outs(%arg1 : tensor<?x?x?xf32>) {
+ ^bb0(%in: f32, %out: f32):
+ %1 = math.exp %in : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?x?xf32>
+ return %0 : tensor<?x?x?xf32>
+}
+// CHECK-LABEL: specialize_exp
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?x?xf32>, %[[ARG1:.+]]: tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.exp ins(%[[ARG0]] : tensor<?x?x?xf32>) outs(%[[ARG1]] : tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.specialize %0 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir b/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir
index 0e1512717a22..f3cf7c4dffa0 100644
--- a/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir
+++ b/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir
@@ -80,13 +80,14 @@ module attributes {transform.with_named_sequence} {
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0)[s0] -> (-d0 + s0, 5)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d0, d1)>
-// CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1) -> (d1)>
+// CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1) -> (d1, d0)>
+// CHECK-DAG: #[[MAP3:.*]] = affine_map<(d0, d1) -> (d1)>
// CHECK: func @reduction_tile_transpose
// CHECK: tensor.empty(%{{.*}}) : tensor<5x?xf32>
// CHECK: linalg.fill {{.*}} : tensor<5x?xf32>) -> tensor<5x?xf32>
// CHECK: scf.for
// CHECK: %[[EXT:.*]] = tensor.extract_slice %[[ARG3:.*]][0, 0] [%[[D0:.*]], %[[D1:.*]]] [1, 1] : tensor<5x?xf32> to tensor<?x?xf32>
-// CHECK: %[[R:.*]] = linalg.generic {indexing_maps = [#[[MAP1]], #[[MAP1]]], iterator_types = ["parallel", "parallel"]} ins(%[[L:.*]] : tensor<?x?xf32>) outs(%[[EXT]] : tensor<?x?xf32>)
+// CHECK: %[[R:.*]] = linalg.generic {indexing_maps = [#[[MAP1]], #[[MAP2]]], iterator_types = ["parallel", "parallel"]} ins(%[[L:.*]] : tensor<?x?xf32>) outs(%[[EXT]] : tensor<?x?xf32>)
// CHECK: %[[INS:.*]] = tensor.insert_slice %[[R]] into %[[ARG3]][0, 0] [%[[D0]], %[[D1]]] [1, 1] : tensor<?x?xf32> into tensor<5x?xf32>
// CHECK: scf.yield {{.*}} : tensor<5x?xf32>
// CHECK: }
@@ -403,3 +404,48 @@ module {
// CHECK: scf.yield %[[L1]] : tensor<4096x2x64xf32>
// CHECK: %[[OUT2:.*]] = linalg.generic {indexing_maps = [{{.*}}, {{.*}}], iterator_types = ["parallel", "reduction", "reduction"]} ins(%{{.*}} : tensor<4096x2x64xf32>) outs(%{{.*}} : tensor<4096xf32>)
// CHECK: return %[[OUT2]] : tensor<4096xf32>
+
+// -----
+
+func.func @reduction_tile_multiple_results(%arg0: tensor<?x?xf32>, %out: tensor<?xf32>, %out2: tensor<?xf32>) -> (tensor<?xf32>, tensor<?xf32>) {
+ %red:2 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
+ affine_map<(d0, d1) -> (d0)>,
+ affine_map<(d0, d1) -> (d0)>],
+ iterator_types = ["parallel", "reduction"]}
+ ins(%arg0 : tensor<?x?xf32>)
+ outs(%out, %out2 : tensor<?xf32>, tensor<?xf32>) {
+ ^bb0(%arg7: f32, %arg9: f32, %arg9_1: f32):
+ %1 = arith.mulf %arg7, %arg7 : f32
+ %2 = arith.addf %1, %arg9 : f32
+ %3 = arith.maximumf %1, %arg9_1 : f32
+ linalg.yield %2, %3 : f32, f32
+ } -> (tensor<?xf32>, tensor<?xf32>)
+ return %red#0, %red#1 : tensor<?xf32>, tensor<?xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1, %12, %2, %3, %loop = transform.structured.tile_reduction_using_for %0
+ by tile_sizes = [0, 5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+ transform.yield
+ }
+}
+
+// CHECK: func @reduction_tile_multiple_results
+// CHECK-DAG: %[[SUM_ID:.+]] = arith.constant 0.000000e+00 : f32
+// CHECK-DAG: %[[MAX_ID:.+]] = arith.constant 0xFF800000 : f32
+// CHECK-DAG: %[[SUM_INIT:.+]] = linalg.fill ins(%[[SUM_ID]] : f32) outs(%{{.*}} : tensor<?x5xf32>) -> tensor<?x5xf32>
+// CHECK-DAG: %[[MAX_INIT:.+]] = linalg.fill ins(%[[MAX_ID]] : f32) outs(%{{.*}} : tensor<?x5xf32>) -> tensor<?x5xf32>
+// CHECK: %[[OUT:.+]]:2 = scf.for
+// CHECK-SAME: iter_args(%[[SUM:.+]] = %[[SUM_INIT]], %[[MAX:.+]] = %[[MAX_INIT]])
+// CHECK: %[[UPDATED:.*]]:2 = linalg.generic
+// CHECK: arith.mulf
+// CHECK: arith.addf
+// CHECK: arith.maximumf
+// CHECK: %[[INSERT1:.+]] = tensor.insert_slice %[[UPDATED]]#0 into %[[SUM]]
+// CHECK: %[[INSERT2:.+]] = tensor.insert_slice %[[UPDATED]]#1 into %[[MAX]]
+// CHECK: scf.yield %[[INSERT1]], %[[INSERT1]]
+// CHECK: linalg.generic
+// CHECK: arith.addf
+// CHECK: arith.maximumf
diff --git a/mlir/test/Dialect/Math/expand-math.mlir b/mlir/test/Dialect/Math/expand-math.mlir
index 016a7bbdeb56..c10a78ca4ae4 100644
--- a/mlir/test/Dialect/Math/expand-math.mlir
+++ b/mlir/test/Dialect/Math/expand-math.mlir
@@ -221,7 +221,7 @@ func.func @roundf_func(%a: f32) -> f32 {
// CHECK-LABEL: func @powf_func
// CHECK-SAME: ([[ARG0:%.+]]: f64, [[ARG1:%.+]]: f64)
func.func @powf_func(%a: f64, %b: f64) ->f64 {
- // CHECK-DAG = [[CST0:%.+]] = arith.constant 0.000000e+00
+ // CHECK-DAG: [[CST0:%.+]] = arith.constant 0.000000e+00
// CHECK-DAG: [[TWO:%.+]] = arith.constant 2.000000e+00
// CHECK-DAG: [[NEGONE:%.+]] = arith.constant -1.000000e+00
// CHECK-DAG: [[SQR:%.+]] = arith.mulf [[ARG0]], [[ARG0]]
diff --git a/mlir/test/Dialect/MemRef/canonicalize.mlir b/mlir/test/Dialect/MemRef/canonicalize.mlir
index f442a61dc31e..c4ff6480a4ce 100644
--- a/mlir/test/Dialect/MemRef/canonicalize.mlir
+++ b/mlir/test/Dialect/MemRef/canonicalize.mlir
@@ -692,6 +692,16 @@ func.func @self_copy(%m1: memref<?xf32>) {
// -----
+// CHECK-LABEL: func @empty_copy
+// CHECK-NEXT: return
+func.func @empty_copy(%m1: memref<0x10xf32>, %m2: memref<?x10xf32>) {
+ memref.copy %m1, %m2 : memref<0x10xf32> to memref<?x10xf32>
+ memref.copy %m2, %m1 : memref<?x10xf32> to memref<0x10xf32>
+ return
+}
+
+// -----
+
func.func @scopeMerge() {
memref.alloca_scope {
%cnt = "test.count"() : () -> index
diff --git a/mlir/test/Dialect/Mesh/sharding-propagation.mlir b/mlir/test/Dialect/Mesh/sharding-propagation.mlir
index 270787ab5188..11a80594adb7 100644
--- a/mlir/test/Dialect/Mesh/sharding-propagation.mlir
+++ b/mlir/test/Dialect/Mesh/sharding-propagation.mlir
@@ -1,5 +1,6 @@
-// RUN: mlir-opt --pass-pipeline="builtin.module(func.func(sharding-propagation))" %s | FileCheck %s
+// RUN: mlir-opt --pass-pipeline="builtin.module(func.func(sharding-propagation,cse))" %s | FileCheck %s
+mesh.mesh @mesh_2(shape = 2)
mesh.mesh @mesh_1d(shape = ?)
mesh.mesh @mesh_2d(shape = 2x4)
mesh.mesh @mesh_3d(shape = ?x?x?)
@@ -73,12 +74,11 @@ func.func @arrow_structure(%arg0: tensor<8x16xf32>) -> (tensor<8x16xf32>, tensor
// CHECK-NEXT: %[[V5:.*]] = tosa.abs %[[V4]]
// CHECK-NEXT: %[[V6:.*]] = mesh.shard %[[V5]] to <@mesh_2d, {{\[\[}}0], [1]]> : tensor<8x16xf32>
%1 = tosa.abs %0 : (tensor<8x16xf32>) -> tensor<8x16xf32>
- // CHECK-NEXT: %[[V7:.*]] = mesh.shard %[[V3]] to <@mesh_2d, {{\[\[}}0], [1]]> annotate_for_users : tensor<8x16xf32>
- // CHECK-NEXT: %[[V8:.*]] = tosa.negate %[[V7]]
- // CHECK-NEXT: %[[V9:.*]] = mesh.shard %[[V8]] to <@mesh_2d, {{\[\[}}0], [1]]> : tensor<8x16xf32>
+ // CHECK-NEXT: %[[V7:.*]] = tosa.negate %[[V4]]
+ // CHECK-NEXT: %[[V8:.*]] = mesh.shard %[[V7]] to <@mesh_2d, {{\[\[}}0], [1]]> : tensor<8x16xf32>
%2 = tosa.negate %0 : (tensor<8x16xf32>) -> tensor<8x16xf32>
%3 = mesh.shard %2 to <@mesh_2d, [[0], [1]]> : tensor<8x16xf32>
- // CHECK-NEXT: return %[[V6]], %[[V9]]
+ // CHECK-NEXT: return %[[V6]], %[[V8]]
return %1, %3 : tensor<8x16xf32>, tensor<8x16xf32>
}
@@ -135,6 +135,34 @@ func.func @matmul_on_use_shard_m_and_duplicted_k(%arg0: tensor<2x16x8xf32>, %arg
return %2 : tensor<2x16x32xf32>
}
+// CHECK-LABEL: func.func @resolve_conflicting_annotations
+func.func @resolve_conflicting_annotations(
+ // CHECK-SAME: %[[IN1:.*]]: tensor<2x3xf32>,
+ %arg0: tensor<2x3xf32>,
+ // CHECK-SAME: %[[IN2:.*]]: tensor<3x2xf32>,
+ %arg1: tensor<3x2xf32>,
+ // CHECK-SAME: %[[OUT_DPS:.*]]: tensor<2x2xf32>
+ %out_dps: tensor<2x2xf32>
+// CHECK-SAME: ) -> tensor<2x2xf32> {
+) -> tensor<2x2xf32> {
+ // CHECK: %[[IN1_SHARDED1:.*]] = mesh.shard %[[IN1]] to <@mesh_2, {{\[\[}}0]]> : tensor<2x3xf32>
+ // CHECK: %[[IN1_SHARDED2:.*]] = mesh.shard %[[IN1_SHARDED1]] to <@mesh_2, {{\[}}]> annotate_for_users : tensor<2x3xf32>
+ // CHECK: %[[IN2_SHARDED:.*]] = mesh.shard %[[IN2]] to <@mesh_2, []> annotate_for_users : tensor<3x2xf32>
+ // CHECK: %[[OUT_DPS_SHARDED:.*]] = mesh.shard %[[OUT_DPS]] to <@mesh_2, {{\[}}]> annotate_for_users : tensor<2x2xf32>
+ %arg0_sharded = mesh.shard %arg0 to <@mesh_2, [[0]]> : tensor<2x3xf32>
+
+ // CHECK: %[[MATMUL:.*]] = linalg.matmul ins(%[[IN1_SHARDED2]], %[[IN2_SHARDED]] : tensor<2x3xf32>, tensor<3x2xf32>)
+ // CHECK-SAME: outs(%[[OUT_DPS_SHARDED]] : tensor<2x2xf32>) -> tensor<2x2xf32>
+ %res = linalg.matmul ins(%arg0_sharded, %arg1 : tensor<2x3xf32>, tensor<3x2xf32>)
+ outs(%out_dps : tensor<2x2xf32>) -> tensor<2x2xf32>
+
+ // CHECK: %[[MATMUL_SHARDED1:.*]] = mesh.shard %[[MATMUL]] to <@mesh_2, {{\[\[}}]]> : tensor<2x2xf32>
+ %res_sharded = mesh.shard %res to <@mesh_2, [[]]> : tensor<2x2xf32>
+
+ // CHECK: return %[[MATMUL_SHARDED1]] : tensor<2x2xf32>
+ return %res_sharded : tensor<2x2xf32>
+}
+
// https://arxiv.org/abs/2211.05102 Figure 2(a)
// CHECK-LABEL: func.func @mlp_1d_weight_stationary
// CHECK-SAME: %[[ARG0:.*]]: tensor<2x4x8xf32>, %[[ARG1:.*]]: tensor<2x8x32xf32>, %[[ARG2:.*]]: tensor<2x32x8xf32>
diff --git a/mlir/test/Dialect/Mesh/spmdization.mlir b/mlir/test/Dialect/Mesh/spmdization.mlir
index 2df247aba351..d7a1e2fd9d27 100644
--- a/mlir/test/Dialect/Mesh/spmdization.mlir
+++ b/mlir/test/Dialect/Mesh/spmdization.mlir
@@ -16,6 +16,21 @@ func.func @full_replication(
return %1 : tensor<2xi8>
}
+// CHECK-LABEL: func @sharding_triplet
+func.func @sharding_triplet(
+ // CHECK-SAME: %[[ARG:.*]]: tensor<1xf32>
+ %arg0: tensor<2xf32>
+// CHECK-SAME: ) -> tensor<2xf32> {
+) -> tensor<2xf32> {
+ // CHECK: %[[ALL_GATHER:.*]] = mesh.all_gather %[[ARG]] on @mesh_1d mesh_axes = [0] gather_axis = 0 : tensor<1xf32> -> tensor<2xf32>
+ %sharding_annotated = mesh.shard %arg0 to <@mesh_1d, [[0]]> : tensor<2xf32>
+ %sharding_annotated_0 = mesh.shard %sharding_annotated to <@mesh_1d, [[0]]> annotate_for_users : tensor<2xf32>
+ %sharding_annotated_1 = mesh.shard %sharding_annotated_0 to <@mesh_1d, [[]]> : tensor<2xf32>
+ // CHECK: return %[[ALL_GATHER]] : tensor<2xf32>
+ return %sharding_annotated_1 : tensor<2xf32>
+}
+
+
// CHECK-LABEL: func @move_split_axis
func.func @move_split_axis(
// CHECK-SAME: %[[ARG:.*]]: tensor<1x2xi8>
diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir
index db016fe8e7ba..115d164b6cc7 100644
--- a/mlir/test/Dialect/OpenMP/invalid.mlir
+++ b/mlir/test/Dialect/OpenMP/invalid.mlir
@@ -648,7 +648,6 @@ func.func @foo(%lb : index, %ub : index, %step : index) {
omp.wsloop reduction(@foo %0 -> %prv : !llvm.ptr) {
omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
%2 = arith.constant 2.0 : f32
- omp.reduction %2, %1 : f32, !llvm.ptr
omp.yield
}
omp.terminator
@@ -678,7 +677,6 @@ func.func @foo(%lb : index, %ub : index, %step : index) {
omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr, @add_f32 %0 -> %prv1 : !llvm.ptr) {
omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
%2 = arith.constant 2.0 : f32
- omp.reduction %2, %0 : f32, !llvm.ptr
omp.yield
}
omp.terminator
@@ -713,7 +711,6 @@ func.func @foo(%lb : index, %ub : index, %step : index, %mem : memref<1xf32>) {
omp.wsloop reduction(@add_f32 %mem -> %prv : memref<1xf32>) {
omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) {
%2 = arith.constant 2.0 : f32
- omp.reduction %2, %mem : f32, memref<1xf32>
omp.yield
}
omp.terminator
diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir
index 0d5fd9383a92..caf25a3cb59f 100644
--- a/mlir/test/Dialect/OpenMP/ops.mlir
+++ b/mlir/test/Dialect/OpenMP/ops.mlir
@@ -1003,8 +1003,6 @@ func.func @omp_teams(%lb : i32, %ub : i32, %if_cond : i1, %num_threads : i32,
// CHECK: omp.teams reduction(@add_f32 -> %{{.+}} : !llvm.ptr) {
omp.teams reduction(@add_f32 -> %0 : !llvm.ptr) {
%1 = arith.constant 2.0 : f32
- // CHECK: omp.reduction %{{.+}}, %{{.+}}
- omp.reduction %1, %0 : f32, !llvm.ptr
// CHECK: omp.terminator
omp.terminator
}
@@ -1028,15 +1026,11 @@ func.func @sections_reduction() {
// CHECK: omp.section
omp.section {
%1 = arith.constant 2.0 : f32
- // CHECK: omp.reduction %{{.+}}, %{{.+}}
- omp.reduction %1, %0 : f32, !llvm.ptr
omp.terminator
}
// CHECK: omp.section
omp.section {
%1 = arith.constant 3.0 : f32
- // CHECK: omp.reduction %{{.+}}, %{{.+}}
- omp.reduction %1, %0 : f32, !llvm.ptr
omp.terminator
}
omp.terminator
@@ -1130,14 +1124,10 @@ func.func @sections_reduction2() {
omp.sections reduction(@add2_f32 -> %0 : memref<1xf32>) {
omp.section {
%1 = arith.constant 2.0 : f32
- // CHECK: omp.reduction
- omp.reduction %1, %0 : f32, memref<1xf32>
omp.terminator
}
omp.section {
%1 = arith.constant 2.0 : f32
- // CHECK: omp.reduction
- omp.reduction %1, %0 : f32, memref<1xf32>
omp.terminator
}
omp.terminator
diff --git a/mlir/test/Dialect/Polynomial/canonicalization.mlir b/mlir/test/Dialect/Polynomial/canonicalization.mlir
index dbfbf2d93f11..489d9ec2720d 100644
--- a/mlir/test/Dialect/Polynomial/canonicalization.mlir
+++ b/mlir/test/Dialect/Polynomial/canonicalization.mlir
@@ -43,3 +43,60 @@ func.func @test_canonicalize_sub(%poly0 : !sub_ty, %poly1 : !sub_ty) -> !sub_ty
// CHECK: [[ADD:%.+]] = polynomial.add %[[p0]], %[[p1neg]]
return %0 : !sub_ty
}
+
+// CHECK-LABEL: test_canonicalize_fold_add_through_ntt
+// CHECK: polynomial.add
+// CHECK-NOT: polynomial.ntt
+// CHECK-NOT: polynomial.intt
+func.func @test_canonicalize_fold_add_through_ntt(
+ %poly0 : !ntt_poly_ty,
+ %poly1 : !ntt_poly_ty) -> !ntt_poly_ty {
+ %0 = polynomial.ntt %poly0 : !ntt_poly_ty -> !tensor_ty
+ %1 = polynomial.ntt %poly1 : !ntt_poly_ty -> !tensor_ty
+ %a_plus_b = arith.addi %0, %1 : !tensor_ty
+ %out = polynomial.intt %a_plus_b : !tensor_ty -> !ntt_poly_ty
+ return %out : !ntt_poly_ty
+}
+
+// CHECK-LABEL: test_canonicalize_fold_add_through_intt
+// CHECK: arith.addi
+// CHECK-NOT: polynomial.intt
+// CHECK-NOT: polynomial.iintt
+func.func @test_canonicalize_fold_add_through_intt(
+ %tensor0 : !tensor_ty,
+ %tensor1 : !tensor_ty) -> !tensor_ty {
+ %0 = polynomial.intt %tensor0 : !tensor_ty -> !ntt_poly_ty
+ %1 = polynomial.intt %tensor1 : !tensor_ty -> !ntt_poly_ty
+ %a_plus_b = polynomial.add %0, %1 : !ntt_poly_ty
+ %out = polynomial.ntt %a_plus_b : !ntt_poly_ty -> !tensor_ty
+ return %out : !tensor_ty
+}
+
+// CHECK-LABEL: test_canonicalize_fold_sub_through_ntt
+// CHECK: polynomial.mul_scalar
+// CHECK: polynomial.add
+// CHECK-NOT: polynomial.ntt
+// CHECK-NOT: polynomial.intt
+func.func @test_canonicalize_fold_sub_through_ntt(
+ %poly0 : !ntt_poly_ty,
+ %poly1 : !ntt_poly_ty) -> !ntt_poly_ty {
+ %0 = polynomial.ntt %poly0 : !ntt_poly_ty -> !tensor_ty
+ %1 = polynomial.ntt %poly1 : !ntt_poly_ty -> !tensor_ty
+ %a_plus_b = arith.subi %0, %1 : !tensor_ty
+ %out = polynomial.intt %a_plus_b : !tensor_ty -> !ntt_poly_ty
+ return %out : !ntt_poly_ty
+}
+
+// CHECK-LABEL: test_canonicalize_fold_sub_through_intt
+// CHECK: arith.subi
+// CHECK-NOT: polynomial.intt
+// CHECK-NOT: polynomial.iintt
+func.func @test_canonicalize_fold_sub_through_intt(
+ %tensor0 : !tensor_ty,
+ %tensor1 : !tensor_ty) -> !tensor_ty {
+ %0 = polynomial.intt %tensor0 : !tensor_ty -> !ntt_poly_ty
+ %1 = polynomial.intt %tensor1 : !tensor_ty -> !ntt_poly_ty
+ %a_plus_b = polynomial.sub %0, %1 : !ntt_poly_ty
+ %out = polynomial.ntt %a_plus_b : !ntt_poly_ty -> !tensor_ty
+ return %out : !tensor_ty
+}
diff --git a/mlir/test/Dialect/Polynomial/ops.mlir b/mlir/test/Dialect/Polynomial/ops.mlir
index ff709960c50e..4716e37ff885 100644
--- a/mlir/test/Dialect/Polynomial/ops.mlir
+++ b/mlir/test/Dialect/Polynomial/ops.mlir
@@ -74,15 +74,19 @@ module {
func.func @test_monic_monomial_mul() {
%five = arith.constant 5 : index
- %0 = polynomial.constant {value=#one_plus_x_squared} : !polynomial.polynomial<ring=#ring1>
+ %0 = polynomial.constant int<1 + x**2> : !polynomial.polynomial<ring=#ring1>
%1 = polynomial.monic_monomial_mul %0, %five : (!polynomial.polynomial<ring=#ring1>, index) -> !polynomial.polynomial<ring=#ring1>
return
}
func.func @test_constant() {
- %0 = polynomial.constant {value=#one_plus_x_squared} : !polynomial.polynomial<ring=#ring1>
- %1 = polynomial.constant {value=#polynomial.int_polynomial<1 + x**2>} : !polynomial.polynomial<ring=#ring1>
- %2 = polynomial.constant {value=#polynomial.float_polynomial<1.5 + 0.5 x**2>} : !polynomial.polynomial<ring=#ring2>
+ %0 = polynomial.constant int<1 + x**2> : !polynomial.polynomial<ring=#ring1>
+ %1 = polynomial.constant int<1 + x**2> : !polynomial.polynomial<ring=#ring1>
+ %2 = polynomial.constant float<1.5 + 0.5 x**2> : !polynomial.polynomial<ring=#ring2>
+
+ // Test verbose fallbacks
+ %verb0 = polynomial.constant #polynomial.typed_int_polynomial<1 + x**2> : !polynomial.polynomial<ring=#ring1>
+ %verb2 = polynomial.constant #polynomial.typed_float_polynomial<1.5 + 0.5 x**2> : !polynomial.polynomial<ring=#ring2>
return
}
diff --git a/mlir/test/Dialect/SCF/transform-ops.mlir b/mlir/test/Dialect/SCF/transform-ops.mlir
index f4b0db7fb1f9..a4daa86583c3 100644
--- a/mlir/test/Dialect/SCF/transform-ops.mlir
+++ b/mlir/test/Dialect/SCF/transform-ops.mlir
@@ -6,11 +6,11 @@
// CHECK: scf.for
// CHECK: arith.addi
//
-// CHECK: func @foo[[SUFFIX:.+]](%{{.+}}, %{{.+}}, %{{.+}})
+// CHECK: func @foo[[$SUFFIX:.+]](%{{.+}}, %{{.+}}, %{{.+}})
// CHECK: scf.for
// CHECK: arith.addi
//
-// CHECK-LABEL @loop_outline_op
+// CHECK-LABEL: @loop_outline_op
func.func @loop_outline_op(%arg0: index, %arg1: index, %arg2: index) {
// CHECK: scf.for
// CHECK-NOT: scf.for
@@ -23,7 +23,7 @@ func.func @loop_outline_op(%arg0: index, %arg1: index, %arg2: index) {
}
// CHECK: scf.execute_region
// CHECK-NOT: scf.for
- // CHECK: func.call @foo[[SUFFIX]]
+ // CHECK: func.call @foo[[$SUFFIX]]
scf.for %j = %arg0 to %arg1 step %arg2 {
arith.addi %j, %j : index
}
diff --git a/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir b/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
index 7dc0bd99f54b..5c24f0e6a7d3 100644
--- a/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
@@ -180,7 +180,7 @@ func.func @logicalUnary(%arg0 : i32)
func.func @select_op_bool(%arg0: i1) -> () {
%0 = spirv.Constant true
%1 = spirv.Constant false
- // CHECK : spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : i1, i1
+ // CHECK: spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : i1, i1
%2 = spirv.Select %arg0, %0, %1 : i1, i1
return
}
@@ -188,7 +188,7 @@ func.func @select_op_bool(%arg0: i1) -> () {
func.func @select_op_int(%arg0: i1) -> () {
%0 = spirv.Constant 2 : i32
%1 = spirv.Constant 3 : i32
- // CHECK : spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : i1, i32
+ // CHECK: spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : i1, i32
%2 = spirv.Select %arg0, %0, %1 : i1, i32
return
}
@@ -196,7 +196,7 @@ func.func @select_op_int(%arg0: i1) -> () {
func.func @select_op_float(%arg0: i1) -> () {
%0 = spirv.Constant 2.0 : f32
%1 = spirv.Constant 3.0 : f32
- // CHECK : spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : i1, f32
+ // CHECK: spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : i1, f32
%2 = spirv.Select %arg0, %0, %1 : i1, f32
return
}
@@ -204,7 +204,7 @@ func.func @select_op_float(%arg0: i1) -> () {
func.func @select_op_ptr(%arg0: i1) -> () {
%0 = spirv.Variable : !spirv.ptr<f32, Function>
%1 = spirv.Variable : !spirv.ptr<f32, Function>
- // CHECK : spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : i1, !spirv.ptr<f32, Function>
+ // CHECK: spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : i1, !spirv.ptr<f32, Function>
%2 = spirv.Select %arg0, %0, %1 : i1, !spirv.ptr<f32, Function>
return
}
@@ -212,7 +212,7 @@ func.func @select_op_ptr(%arg0: i1) -> () {
func.func @select_op_vec(%arg0: i1) -> () {
%0 = spirv.Constant dense<[2.0, 3.0, 4.0]> : vector<3xf32>
%1 = spirv.Constant dense<[5.0, 6.0, 7.0]> : vector<3xf32>
- // CHECK : spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : i1, vector<3xf32>
+ // CHECK: spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : i1, vector<3xf32>
%2 = spirv.Select %arg0, %0, %1 : i1, vector<3xf32>
return
}
@@ -220,7 +220,7 @@ func.func @select_op_vec(%arg0: i1) -> () {
func.func @select_op_vec_condn_vec(%arg0: vector<3xi1>) -> () {
%0 = spirv.Constant dense<[2.0, 3.0, 4.0]> : vector<3xf32>
%1 = spirv.Constant dense<[5.0, 6.0, 7.0]> : vector<3xf32>
- // CHECK : spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : vector<3xi1>, vector<3xf32>
+ // CHECK: spirv.Select {{%.*}}, {{%.*}}, {{%.*}} : vector<3xi1>, vector<3xf32>
%2 = spirv.Select %arg0, %0, %1 : vector<3xi1>, vector<3xf32>
return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/structure-ops.mlir b/mlir/test/Dialect/SPIRV/IR/structure-ops.mlir
index db0f52dcc40e..1eed5892a085 100644
--- a/mlir/test/Dialect/SPIRV/IR/structure-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/structure-ops.mlir
@@ -330,7 +330,7 @@ spirv.module Logical GLSL450 {
// TODO: Fix test case after initialization with normal constant is addressed
// spirv.module Logical GLSL450 {
// %0 = spirv.Constant 4.0 : f32
-// // CHECK1: spirv.Variable init(%0) : !spirv.ptr<f32, Private>
+// COM: CHECK: spirv.Variable init(%0) : !spirv.ptr<f32, Private>
// spirv.GlobalVariable @var1 init(%0) : !spirv.ptr<f32, Private>
// }
@@ -372,7 +372,7 @@ spirv.module Logical GLSL450 {
// TODO: Fix test case after initialization with constant is addressed
// spirv.module Logical GLSL450 {
// %0 = spirv.Constant 4.0 : f32
-// // CHECK1: spirv.GlobalVariable @var1 initializer(%0) {binding = 5 : i32} : !spirv.ptr<f32, Private>
+// COM: CHECK: spirv.GlobalVariable @var1 initializer(%0) {binding = 5 : i32} : !spirv.ptr<f32, Private>
// spirv.GlobalVariable @var1 initializer(%0) {binding = 5 : i32} : !spirv.ptr<f32, Private>
// }
diff --git a/mlir/test/Dialect/Tensor/canonicalize.mlir b/mlir/test/Dialect/Tensor/canonicalize.mlir
index b5a82eb3e903..f7fbd3834288 100644
--- a/mlir/test/Dialect/Tensor/canonicalize.mlir
+++ b/mlir/test/Dialect/Tensor/canonicalize.mlir
@@ -542,6 +542,18 @@ func.func @trivial_insert_slice(%arg0 : tensor<4x6x16x32xi8>, %arg1 : tensor<4x6
// -----
+// CHECK-LABEL: func @empty_insert_slice
+// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: tensor<0x2xi8>
+// CHECK-SAME: %[[ARG1:.[a-z0-9A-Z_]+]]: tensor<3x3xi8>
+// CHECK-NOT: tensor.extract_slice
+// CHECK: return %[[ARG1]] : tensor<3x3xi8>
+func.func @empty_insert_slice(%arg0 : tensor<0x2xi8>, %arg1 : tensor<3x3xi8>) -> tensor<3x3xi8> {
+ %0 = tensor.insert_slice %arg0 into %arg1[0, 0] [0, 2] [1, 1] : tensor<0x2xi8> into tensor<3x3xi8>
+ return %0 : tensor<3x3xi8>
+}
+
+// -----
+
// CHECK-LABEL: func @rank_reducing_tensor_of_cast
// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: tensor<4x6x16x32xi8>
// CHECK: %[[S:.+]] = tensor.extract_slice %arg0[0, 1, 0, 0] [1, 1, 16, 32] [1, 1, 1, 1] : tensor<4x6x16x32xi8> to tensor<16x32xi8>
@@ -2511,4 +2523,3 @@ func.func @dim_out_of_bounds() -> vector<7xi32> {
%16 = affine.vector_load %alloc_21[%c1, %c1, %dim] : memref<?x26x2xi32>, vector<7xi32>
return %16 : vector<7xi32>
}
-
diff --git a/mlir/test/Dialect/Tensor/fold-empty-op.mlir b/mlir/test/Dialect/Tensor/fold-empty-op.mlir
index e200a4f89261..e94f6ec7ec56 100644
--- a/mlir/test/Dialect/Tensor/fold-empty-op.mlir
+++ b/mlir/test/Dialect/Tensor/fold-empty-op.mlir
@@ -64,6 +64,79 @@ func.func @rank_reducing_empty_tensor_extract(%sz : index, %idx : index) -> tens
return %r: tensor<2xf32>
}
+func.func @pack_empty(%arg0: tensor<8x8x32x32xf32>) -> tensor<8x8x32x32xf32> {
+ %empty_unpacked = tensor.empty() : tensor<256x256xf32>
+ %packed = tensor.pack %empty_unpacked
+ inner_dims_pos = [0, 1] inner_tiles = [32, 32]
+ into %arg0 : tensor<256x256xf32> -> tensor<8x8x32x32xf32>
+ return %packed : tensor<8x8x32x32xf32>
+}
+
+// CHECK-LABEL: func.func @pack_empty(
+// CHECK-SAME: %[[T:.+]]: tensor<8x8x32x32xf32>
+// CHECK-NOT: tensor.pack
+// CHECK: return %[[T]] : tensor<8x8x32x32xf32>
+
+func.func @pack_empty_dynamic(%arg0: tensor<?x?x?x?xf32>, %dim0: index, %dim1: index) -> tensor<?x?x?x?xf32> {
+ %empty_unpacked = tensor.empty(%dim0, %dim1) : tensor<?x?xf32>
+ %packed = tensor.pack %empty_unpacked
+ inner_dims_pos = [0, 1] inner_tiles = [32, 32]
+ into %arg0 : tensor<?x?xf32> -> tensor<?x?x?x?xf32>
+ return %packed : tensor<?x?x?x?xf32>
+}
+
+// CHECK-LABEL: func.func @pack_empty_dynamic(
+// CHECK-SAME: %[[T:.+]]: tensor<?x?x?x?xf32>,
+// CHECK-SAME: %[[DIM0:[a-zA-Z0-9_]+]]: index,
+// CHECK-SAME: %[[DIM1:[a-zA-Z0-9_]+]]: index
+// CHECK-NOT: tensor.pack
+// CHECK: return %[[T]] : tensor<?x?x?x?xf32>
+
+func.func @unpack_empty(%arg0: tensor<256x256xf32>) -> tensor<256x256xf32> {
+ %empty_packed = tensor.empty() : tensor<8x8x32x32xf32>
+ %unpacked = tensor.unpack %empty_packed
+ inner_dims_pos = [0, 1] inner_tiles = [32, 32]
+ into %arg0 : tensor<8x8x32x32xf32> -> tensor<256x256xf32>
+ return %unpacked : tensor<256x256xf32>
+}
+
+// CHECK-LABEL: func.func @unpack_empty(
+// CHECK-SAME: %[[T:.+]]: tensor<256x256xf32>
+// CHECK-NOT: tensor.unpack
+// CHECK: return %[[T]] : tensor<256x256xf32>
+
+func.func @unpack_empty_dynamic(%arg0: tensor<?x?xf32>, %dim0: index, %dim1: index, %dim2: index, %dim3: index) -> tensor<?x?xf32> {
+ %empty_packed = tensor.empty(%dim0, %dim1, %dim2, %dim3) : tensor<?x?x?x?xf32>
+ %unpacked = tensor.unpack %empty_packed
+ inner_dims_pos = [0, 1] inner_tiles = [32, 32]
+ into %arg0 : tensor<?x?x?x?xf32> -> tensor<?x?xf32>
+ return %unpacked : tensor<?x?xf32>
+}
+
+// CHECK-LABEL: func.func @unpack_empty_dynamic(
+// CHECK-SAME: %[[T:.+]]: tensor<?x?xf32>,
+// CHECK-SAME: %[[DIM0:[a-zA-Z0-9_]+]]: index,
+// CHECK-SAME: %[[DIM1:[a-zA-Z0-9_]+]]: index,
+// CHECK-SAME: %[[DIM2:[a-zA-Z0-9_]+]]: index,
+// CHECK-SAME: %[[DIM3:[a-zA-Z0-9_]+]]: index
+// CHECK-NOT: tensor.unpack
+// CHECK: return %[[T]] : tensor<?x?xf32>
+
+func.func @pack_padded_empty(%arg0: tensor<8x8x32x32xf32>) -> tensor<8x8x32x32xf32> {
+ %pad = arith.constant 1.0 : f32
+ %empty_unpacked = tensor.empty() : tensor<256x256xf32>
+ %packed = tensor.pack %empty_unpacked
+ padding_value(%pad : f32)
+ inner_dims_pos = [0, 1] inner_tiles = [32, 32]
+ into %arg0 : tensor<256x256xf32> -> tensor<8x8x32x32xf32>
+ return %packed : tensor<8x8x32x32xf32>
+}
+
+// CHECK-LABEL: func.func @pack_padded_empty(
+// CHECK-SAME: %[[T:.+]]: tensor<8x8x32x32xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack
+// CHECK: return %[[PACK]] : tensor<8x8x32x32xf32>
+
// -----
module attributes {transform.with_named_sequence} {
diff --git a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
index 9f486f9146ad..9a3143f5e550 100644
--- a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
+++ b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
@@ -544,7 +544,7 @@ func.func @linalg_transpose_tensor_unpack_fold(%arg0: tensor<1x1x4x16xi32>) -> t
// CHECK-SAME: outer_dims_perm = [1, 0]
// CHECK-SAME: inner_dims_pos = [1, 0]
// CHECK-SAME: inner_tiles = [4, 16]
-// CHEKC-SAME: into %[[OUT]] : tensor<1x1x4x16xi32> -> tensor<16x4xi32>
+// CHECK-SAME: into %[[OUT]] : tensor<1x1x4x16xi32> -> tensor<16x4xi32>
// CHECK: return %[[UNPACK]] : tensor<16x4xi32>
// CHECK: }
diff --git a/mlir/test/Dialect/Tensor/fold-reassociative-reshapes.mlir b/mlir/test/Dialect/Tensor/fold-reassociative-reshapes.mlir
index d3ac6ce792f3..644d9a918f6c 100644
--- a/mlir/test/Dialect/Tensor/fold-reassociative-reshapes.mlir
+++ b/mlir/test/Dialect/Tensor/fold-reassociative-reshapes.mlir
@@ -54,3 +54,105 @@ func.func @rank_reducing_parallel_insert_of_collapse_shape(
}
return %1 : tensor<?x?x?x?xf32>
}
+
+// -----
+
+// CHECK-LABEL: func @insert_of_padding_expand_shape(
+// CHECK-SAME: %[[t:.*]]: tensor<?x?xf32>
+// CHECK-SAME: %[[d:.*]]: tensor<?x?x?x?xf32>
+// CHECK-SAME: %[[x:[a-zA-Z0-9_]+]]: index
+// CHECK-SAME: %[[y:[a-zA-Z0-9_]+]]: index
+// CHECK: %[[insert:.*]] = tensor.insert_slice %[[t]] into %[[d]][%[[x]], %[[y]], 0, 0] [1, %{{.*}}, 1, %{{.*}}] [1, 1, 1, 1] : tensor<?x?xf32> into tensor<?x?x?x?xf32>
+// CHECK: return %[[insert]]
+func.func @insert_of_padding_expand_shape(
+ %t: tensor<?x?xf32>, %d: tensor<?x?x?x?xf32>, %x: index, %y: index)
+ -> tensor<?x?x?x?xf32> {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %sz0 = tensor.dim %t, %c0 : tensor<?x?xf32>
+ %sz1 = tensor.dim %t, %c1 : tensor<?x?xf32>
+ %0 = tensor.expand_shape %t [[0, 1], [2, 3]] output_shape [1, %sz0, 1, %sz1]
+ : tensor<?x?xf32> into tensor<1x?x1x?xf32>
+ %1 = tensor.insert_slice %0 into %d[%x, %y, 0, 0][1, %sz0, 1, %sz1][1, 1, 1, 1]
+ : tensor<1x?x1x?xf32> into tensor<?x?x?x?xf32>
+ return %1 : tensor<?x?x?x?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @insert_of_non_padding_expand_shape(
+// CHECK-SAME: %[[t:.*]]: tensor<?x?xf32>
+// CHECK-SAME: %[[d:.*]]: tensor<?x?x?x?xf32>
+// CHECK-SAME: %[[x:[a-zA-Z0-9_]+]]: index
+// CHECK-SAME: %[[y:[a-zA-Z0-9_]+]]: index
+// CHECK-SAME: %[[sz:[a-zA-Z0-9_]+]]: index
+// CHECK: %[[expand:.*]] = tensor.expand_shape %[[t]] {{\[}}[0, 1], [2]] output_shape [%[[sz]], %{{.*}}, %{{.*}}] : tensor<?x?xf32> into tensor<?x?x?xf32>
+// CHECK: %[[insert:.*]] = tensor.insert_slice %[[expand]] into %[[d]][%[[x]], %[[y]], 0, 0] [%[[sz]], 1, %{{.*}}, %{{.*}}] [1, 1, 1, 1] : tensor<?x?x?xf32> into tensor<?x?x?x?xf32>
+// CHECK: return %[[insert]]
+func.func @insert_of_non_padding_expand_shape(
+ %t: tensor<?x?xf32>, %d: tensor<?x?x?x?xf32>, %x: index, %y: index, %sz: index)
+ -> tensor<?x?x?x?xf32> {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %sz0 = tensor.dim %t, %c0 : tensor<?x?xf32>
+ %sz1 = tensor.dim %t, %c1 : tensor<?x?xf32>
+ %0 = tensor.expand_shape %t [[0, 1], [2]] output_shape [%sz, %sz0, %sz1]
+ : tensor<?x?xf32> into tensor<?x?x?xf32>
+ %1 = tensor.insert_slice %0 into %d[%x, %y, 0, 0][%sz, 1, %sz0, %sz1][1, 1, 1, 1]
+ : tensor<?x?x?xf32> into tensor<?x?x?x?xf32>
+ return %1 : tensor<?x?x?x?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @parallel_insert_of_padding_expand_shape(
+// CHECK-SAME: %[[t:.*]]: tensor<?x?xf32>
+// CHECK-SAME: %[[d:.*]]: tensor<?x?x?x?xf32>
+// CHECK-SAME: %[[x:[a-zA-Z0-9_]+]]: index
+// CHECK-SAME: %[[y:[a-zA-Z0-9_]+]]: index
+// CHECK: tensor.parallel_insert_slice %[[t]] into %{{.*}}[%{{.*}}, %{{.*}}, 0, 0] [1, %{{.*}}, 1, %{{.*}}] [1, 1, 1, 1] : tensor<?x?xf32> into tensor<?x?x?x?xf32>
+func.func @parallel_insert_of_padding_expand_shape(
+ %t: tensor<?x?xf32>, %d: tensor<?x?x?x?xf32>, %x: index, %y: index)
+ -> tensor<?x?x?x?xf32> {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %sz0 = tensor.dim %t, %c0 : tensor<?x?xf32>
+ %sz1 = tensor.dim %t, %c1 : tensor<?x?xf32>
+ %0 = tensor.expand_shape %t [[0, 1], [2, 3]] output_shape [1, %sz0, 1, %sz1]
+ : tensor<?x?xf32> into tensor<1x?x1x?xf32>
+ %1 = scf.forall (%i, %j) in (%x, %y) shared_outs(%o = %d) -> (tensor<?x?x?x?xf32>) {
+ scf.forall.in_parallel {
+ tensor.parallel_insert_slice %0 into %o[%i, %j, 0, 0][1, %sz0, 1, %sz1][1, 1, 1, 1]
+ : tensor<1x?x1x?xf32> into tensor<?x?x?x?xf32>
+ }
+ }
+ return %1 : tensor<?x?x?x?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @parallel_insert_of_non_padding_expand_shape(
+// CHECK-SAME: %[[t:.*]]: tensor<?x?xf32>
+// CHECK-SAME: %[[d:.*]]: tensor<?x?x?x?xf32>
+// CHECK-SAME: %[[x:[a-zA-Z0-9_]+]]: index
+// CHECK-SAME: %[[y:[a-zA-Z0-9_]+]]: index
+// CHECK-SAME: %[[sz:[a-zA-Z0-9_]+]]: index
+// CHECK: %[[expand:.*]] = tensor.expand_shape %[[t]] {{\[}}[0, 1], [2]] output_shape [%[[sz]], %{{.*}}, %{{.*}}] : tensor<?x?xf32> into tensor<?x?x?xf32>
+// CHECK: tensor.parallel_insert_slice %[[expand]] into %{{.*}}[%{{.*}}, %{{.*}}, 0, 0] [%[[sz]], 1, %{{.*}}, %{{.*}}] [1, 1, 1, 1] : tensor<?x?x?xf32> into tensor<?x?x?x?xf32>
+func.func @parallel_insert_of_non_padding_expand_shape(
+ %t: tensor<?x?xf32>, %d: tensor<?x?x?x?xf32>, %x: index, %y: index, %sz: index)
+ -> tensor<?x?x?x?xf32> {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %sz0 = tensor.dim %t, %c0 : tensor<?x?xf32>
+ %sz1 = tensor.dim %t, %c1 : tensor<?x?xf32>
+ %0 = tensor.expand_shape %t [[0, 1], [2]] output_shape [%sz, %sz0, %sz1]
+ : tensor<?x?xf32> into tensor<?x?x?xf32>
+ %1 = scf.forall (%i, %j) in (%x, %y) shared_outs(%o = %d) -> (tensor<?x?x?x?xf32>) {
+ scf.forall.in_parallel {
+ tensor.parallel_insert_slice %0 into %o[%i, %j, 0, 0][%sz, 1, %sz0, %sz1][1, 1, 1, 1]
+ : tensor<?x?x?xf32> into tensor<?x?x?x?xf32>
+ }
+ }
+ return %1 : tensor<?x?x?x?xf32>
+}
diff --git a/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir b/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir
index 5a2eade0eccc..f9e51ae52a74 100644
--- a/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir
+++ b/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir
@@ -266,3 +266,131 @@ func.func @unpack_16x1x1x2_to_32x1(%arg0 : tensor<16x1x1x2xf32>) -> tensor<32x1x
: tensor<16x1x1x2xf32> -> tensor<32x1xf32>
return %unpack : tensor<32x1xf32>
}
+
+// -----
+
+// CHECK-LABEL: func.func @pad_like_pack(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<32x64xf32>)
+// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1, 2], [3]] output_shape [1, 1, 32, 64] : tensor<32x64xf32> into tensor<1x1x32x64xf32>
+// CHECK: return %[[EXPANDED]] : tensor<1x1x32x64xf32>
+func.func @pad_like_pack(%arg0: tensor<32x64xf32>) -> tensor<1x1x32x64xf32> {
+ %empty = tensor.empty() : tensor<1x1x32x64xf32>
+ %0 = tensor.pack %arg0 inner_dims_pos = [0, 1] inner_tiles = [32, 64] into %empty : tensor<32x64xf32> -> tensor<1x1x32x64xf32>
+ return %0 : tensor<1x1x32x64xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @pad_like_pack_with_outer_dims_perm(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<32x64xf32>)
+// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1, 2], [3]] output_shape [1, 1, 32, 64] : tensor<32x64xf32> into tensor<1x1x32x64xf32>
+// CHECK: return %[[EXPANDED]] : tensor<1x1x32x64xf32>
+func.func @pad_like_pack_with_outer_dims_perm(%arg0: tensor<32x64xf32>) -> tensor<1x1x32x64xf32> {
+ %empty = tensor.empty() : tensor<1x1x32x64xf32>
+ %0 = tensor.pack %arg0 outer_dims_perm = [1, 0] inner_dims_pos = [0, 1] inner_tiles = [32, 64] into %empty : tensor<32x64xf32> -> tensor<1x1x32x64xf32>
+ return %0 : tensor<1x1x32x64xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @inner_pad_like_pack(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<32x64xf32>)
+// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0], [1, 2]] output_shape [32, 1, 64] : tensor<32x64xf32> into tensor<32x1x64xf32>
+// CHECK: return %[[EXPANDED]] : tensor<32x1x64xf32>
+func.func @inner_pad_like_pack(%arg0: tensor<32x64xf32>) -> tensor<32x1x64xf32> {
+ %empty = tensor.empty() : tensor<32x1x64xf32>
+ %0 = tensor.pack %arg0 inner_dims_pos = [1] inner_tiles = [64] into %empty : tensor<32x64xf32> -> tensor<32x1x64xf32>
+ return %0 : tensor<32x1x64xf32>
+}
+
+// -----
+
+// Do not simplify pack with inner dimension shuffling.
+// CHECK-LABEL: func.func @pad_and_inner_dim_shuffle_pack(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<32x64xf32>)
+// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<1x1x64x32xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] inner_dims_pos = [1, 0] inner_tiles = [64, 32] into %[[EMPTY]] : tensor<32x64xf32> -> tensor<1x1x64x32xf32>
+// CHECK: return %[[PACK]] : tensor<1x1x64x32xf32>
+func.func @pad_and_inner_dim_shuffle_pack(%arg0: tensor<32x64xf32>) -> tensor<1x1x64x32xf32> {
+ %empty = tensor.empty() : tensor<1x1x64x32xf32>
+ %0 = tensor.pack %arg0 inner_dims_pos = [1, 0] inner_tiles = [64, 32] into %empty : tensor<32x64xf32> -> tensor<1x1x64x32xf32>
+ return %0 : tensor<1x1x64x32xf32>
+}
+
+// -----
+
+// Do not simplify pack with inner dimension transpose.
+// CHECK-LABEL: func.func @pad_like_pack_with_transpose(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<32x64x16xf32>)
+// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<32x1x16x64xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] inner_dims_pos = [1] inner_tiles = [64] into %[[EMPTY]] : tensor<32x64x16xf32> -> tensor<32x1x16x64xf32>
+// CHECK: return %[[PACK]] : tensor<32x1x16x64xf32>
+func.func @pad_like_pack_with_transpose(%arg0: tensor<32x64x16xf32>) -> tensor<32x1x16x64xf32> {
+ %empty = tensor.empty() : tensor<32x1x16x64xf32>
+ %0 = tensor.pack %arg0 inner_dims_pos = [1] inner_tiles = [64] into %empty : tensor<32x64x16xf32> -> tensor<32x1x16x64xf32>
+ return %0 : tensor<32x1x16x64xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @unpad_like_unpack(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<1x1x32x64xf32>)
+// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1, 2], [3]] : tensor<1x1x32x64xf32> into tensor<32x64xf32>
+// CHECK: return %[[COLLAPSED]] : tensor<32x64xf32>
+func.func @unpad_like_unpack(%arg0: tensor<1x1x32x64xf32>) -> tensor<32x64xf32> {
+ %empty = tensor.empty() : tensor<32x64xf32>
+ %0 = tensor.unpack %arg0 inner_dims_pos = [0, 1] inner_tiles = [32, 64] into %empty : tensor<1x1x32x64xf32> -> tensor<32x64xf32>
+ return %0 : tensor<32x64xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @unpad_like_unpack_with_outer_dims_perm(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<1x1x32x64xf32>)
+// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1, 2], [3]] : tensor<1x1x32x64xf32> into tensor<32x64xf32>
+// CHECK: return %[[COLLAPSED]] : tensor<32x64xf32>
+func.func @unpad_like_unpack_with_outer_dims_perm(%arg0: tensor<1x1x32x64xf32>) -> tensor<32x64xf32> {
+ %empty = tensor.empty() : tensor<32x64xf32>
+ %0 = tensor.unpack %arg0 outer_dims_perm = [1, 0] inner_dims_pos = [0, 1] inner_tiles = [32, 64] into %empty : tensor<1x1x32x64xf32> -> tensor<32x64xf32>
+ return %0 : tensor<32x64xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @inner_unpad_like_unpack(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<32x1x64xf32>)
+// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0], [1, 2]] : tensor<32x1x64xf32> into tensor<32x64xf32>
+// CHECK: return %[[COLLAPSED]] : tensor<32x64xf32>
+func.func @inner_unpad_like_unpack(%arg0: tensor<32x1x64xf32>) -> tensor<32x64xf32> {
+ %empty = tensor.empty() : tensor<32x64xf32>
+ %0 = tensor.unpack %arg0 inner_dims_pos = [1] inner_tiles = [64] into %empty : tensor<32x1x64xf32> -> tensor<32x64xf32>
+ return %0 : tensor<32x64xf32>
+}
+
+// -----
+
+// Do not simplify unpack with inner dimension shuffling.
+// CHECK-LABEL: func.func @unpad_and_inner_dim_shuffle_pack(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<1x1x32x64xf32>)
+// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<64x32xf32>
+// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[ARG0]] inner_dims_pos = [1, 0] inner_tiles = [32, 64] into %[[EMPTY]] : tensor<1x1x32x64xf32> -> tensor<64x32xf32>
+// CHECK: return %[[UNPACK]] : tensor<64x32xf32>
+func.func @unpad_and_inner_dim_shuffle_pack(%arg0: tensor<1x1x32x64xf32>) -> tensor<64x32xf32> {
+ %empty = tensor.empty() : tensor<64x32xf32>
+ %0 = tensor.unpack %arg0 inner_dims_pos = [1, 0] inner_tiles = [32, 64] into %empty : tensor<1x1x32x64xf32> -> tensor<64x32xf32>
+ return %0 : tensor<64x32xf32>
+}
+
+// -----
+
+// Do not simplify unpack with inner dimension transpose.
+// CHECK-LABEL: func.func @unpad_like_unpack_with_transpose(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<32x1x16x64xf32>)
+// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<32x64x16xf32>
+// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[ARG0]] inner_dims_pos = [1] inner_tiles = [64] into %[[EMPTY]] : tensor<32x1x16x64xf32> -> tensor<32x64x16xf32>
+// CHECK: return %[[UNPACK]] : tensor<32x64x16xf32>
+func.func @unpad_like_unpack_with_transpose(%arg0: tensor<32x1x16x64xf32>) -> tensor<32x64x16xf32> {
+ %empty = tensor.empty() : tensor<32x64x16xf32>
+ %0 = tensor.unpack %arg0 inner_dims_pos = [1] inner_tiles = [64] into %empty : tensor<32x1x16x64xf32> -> tensor<32x64x16xf32>
+ return %0 : tensor<32x64x16xf32>
+}
diff --git a/mlir/test/Dialect/Vector/canonicalize.mlir b/mlir/test/Dialect/Vector/canonicalize.mlir
index 61a5f2a96e1c..22af91e0eb32 100644
--- a/mlir/test/Dialect/Vector/canonicalize.mlir
+++ b/mlir/test/Dialect/Vector/canonicalize.mlir
@@ -2576,9 +2576,8 @@ func.func @load_store_forwarding_rank_mismatch(%v0: vector<4x1x1xf32>, %arg0: te
// CHECK-LABEL: func.func @rank_0_shuffle_to_interleave(
// CHECK-SAME: %[[LHS:.*]]: vector<f64>, %[[RHS:.*]]: vector<f64>)
-func.func @rank_0_shuffle_to_interleave(%arg0: vector<f64>, %arg1: vector<f64>) -> vector<2xf64>
-{
- // CHECK: %[[ZIP:.*]] = vector.interleave %[[LHS]], %[[RHS]] : vector<f64>
+func.func @rank_0_shuffle_to_interleave(%arg0: vector<f64>, %arg1: vector<f64>) -> vector<2xf64> {
+ // CHECK: %[[ZIP:.*]] = vector.interleave %[[LHS]], %[[RHS]] : vector<f64> -> vector<2xf64>
// CHECK: return %[[ZIP]]
%0 = vector.shuffle %arg0, %arg1 [0, 1] : vector<f64>, vector<f64>
return %0 : vector<2xf64>
@@ -2589,7 +2588,7 @@ func.func @rank_0_shuffle_to_interleave(%arg0: vector<f64>, %arg1: vector<f64>)
// CHECK-LABEL: func.func @rank_1_shuffle_to_interleave(
// CHECK-SAME: %[[LHS:.*]]: vector<6xi32>, %[[RHS:.*]]: vector<6xi32>)
func.func @rank_1_shuffle_to_interleave(%arg0: vector<6xi32>, %arg1: vector<6xi32>) -> vector<12xi32> {
- // CHECK: %[[ZIP:.*]] = vector.interleave %[[LHS]], %[[RHS]] : vector<6xi32>
+ // CHECK: %[[ZIP:.*]] = vector.interleave %[[LHS]], %[[RHS]] : vector<6xi32> -> vector<12xi32>
// CHECK: return %[[ZIP]]
%0 = vector.shuffle %arg0, %arg1 [0, 6, 1, 7, 2, 8, 3, 9, 4, 10, 5, 11] : vector<6xi32>, vector<6xi32>
return %0 : vector<12xi32>
diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir
index c9f7e9c6e2fb..1516f51fe145 100644
--- a/mlir/test/Dialect/Vector/invalid.mlir
+++ b/mlir/test/Dialect/Vector/invalid.mlir
@@ -1798,3 +1798,59 @@ func.func @invalid_outerproduct1(%src : memref<?xf32>) {
// expected-error @+1 {{'vector.outerproduct' op expected 1-d vector for operand #1}}
%op = vector.outerproduct %0, %1 : vector<[4]x[4]xf32>, vector<[4]xf32>
}
+
+// -----
+
+func.func @deinterleave_zero_dim_fail(%vec : vector<f32>) {
+ // expected-error @+1 {{'vector.deinterleave' op operand #0 must be vector of any type values, but got 'vector<f32>}}
+ %0, %1 = vector.deinterleave %vec : vector<f32> -> vector<f32>
+ return
+}
+
+// -----
+
+func.func @deinterleave_one_dim_fail(%vec : vector<1xf32>) {
+ // expected-error @+1 {{'vector.deinterleave' op failed to verify that the trailing dimension of the source vector has an even number of elements}}
+ %0, %1 = vector.deinterleave %vec : vector<1xf32> -> vector<1xf32>
+ return
+}
+
+// -----
+
+func.func @deinterleave_oversized_output_fail(%vec : vector<4xf32>) {
+ // expected-error @+1 {{'vector.deinterleave' op failed to verify that the trailing dimension of the results is half the width of source trailing dimension}}
+ %0, %1 = "vector.deinterleave" (%vec) : (vector<4xf32>) -> (vector<8xf32>, vector<8xf32>)
+ return
+}
+
+// -----
+
+func.func @deinterleave_output_dim_size_mismatch(%vec : vector<4xf32>) {
+ // expected-error @+1 {{'vector.deinterleave' op failed to verify that the trailing dimension of the results is half the width of source trailing dimension}}
+ %0, %1 = "vector.deinterleave" (%vec) : (vector<4xf32>) -> (vector<4xf32>, vector<2xf32>)
+ return
+}
+
+// -----
+
+func.func @deinterleave_n_dim_rank_fail(%vec : vector<2x3x4xf32>) {
+ // expected-error @+1 {{'vector.deinterleave' op failed to verify that the trailing dimension of the results is half the width of source trailing dimension}}
+ %0, %1 = "vector.deinterleave" (%vec) : (vector<2x3x4xf32>) -> (vector<2x3x4xf32>, vector<2x3x2xf32>)
+ return
+}
+
+// -----
+
+func.func @deinterleave_scalable_dim_size_fail(%vec : vector<2x[4]xf32>) {
+ // expected-error @+1 {{'vector.deinterleave' op failed to verify that all of {res1, res2} have same type}}
+ %0, %1 = "vector.deinterleave" (%vec) : (vector<2x[4]xf32>) -> (vector<2x[2]xf32>, vector<2x[1]xf32>)
+ return
+}
+
+// -----
+
+func.func @deinterleave_scalable_rank_fail(%vec : vector<2x[4]xf32>) {
+ // expected-error @+1 {{'vector.deinterleave' op failed to verify that all of {res1, res2} have same type}}
+ %0, %1 = "vector.deinterleave" (%vec) : (vector<2x[4]xf32>) -> (vector<2x[2]xf32>, vector<[2]xf32>)
+ return
+}
diff --git a/mlir/test/Dialect/Vector/ops.mlir b/mlir/test/Dialect/Vector/ops.mlir
index 79a80be4f8b2..c868c881d079 100644
--- a/mlir/test/Dialect/Vector/ops.mlir
+++ b/mlir/test/Dialect/Vector/ops.mlir
@@ -1084,35 +1084,77 @@ func.func @fastmath(%x: vector<42xf32>) -> f32 {
// CHECK-LABEL: @interleave_0d
func.func @interleave_0d(%a: vector<f32>, %b: vector<f32>) -> vector<2xf32> {
- // CHECK: vector.interleave %{{.*}}, %{{.*}} : vector<f32>
- %0 = vector.interleave %a, %b : vector<f32>
+ // CHECK: vector.interleave %{{.*}}, %{{.*}} : vector<f32> -> vector<2xf32>
+ %0 = vector.interleave %a, %b : vector<f32> -> vector<2xf32>
return %0 : vector<2xf32>
}
// CHECK-LABEL: @interleave_1d
func.func @interleave_1d(%a: vector<4xf32>, %b: vector<4xf32>) -> vector<8xf32> {
// CHECK: vector.interleave %{{.*}}, %{{.*}} : vector<4xf32>
- %0 = vector.interleave %a, %b : vector<4xf32>
+ %0 = vector.interleave %a, %b : vector<4xf32> -> vector<8xf32>
return %0 : vector<8xf32>
}
// CHECK-LABEL: @interleave_1d_scalable
func.func @interleave_1d_scalable(%a: vector<[8]xi16>, %b: vector<[8]xi16>) -> vector<[16]xi16> {
// CHECK: vector.interleave %{{.*}}, %{{.*}} : vector<[8]xi16>
- %0 = vector.interleave %a, %b : vector<[8]xi16>
+ %0 = vector.interleave %a, %b : vector<[8]xi16> -> vector<[16]xi16>
return %0 : vector<[16]xi16>
}
// CHECK-LABEL: @interleave_2d
func.func @interleave_2d(%a: vector<2x8xf32>, %b: vector<2x8xf32>) -> vector<2x16xf32> {
// CHECK: vector.interleave %{{.*}}, %{{.*}} : vector<2x8xf32>
- %0 = vector.interleave %a, %b : vector<2x8xf32>
+ %0 = vector.interleave %a, %b : vector<2x8xf32> -> vector<2x16xf32>
return %0 : vector<2x16xf32>
}
// CHECK-LABEL: @interleave_2d_scalable
func.func @interleave_2d_scalable(%a: vector<2x[2]xf64>, %b: vector<2x[2]xf64>) -> vector<2x[4]xf64> {
// CHECK: vector.interleave %{{.*}}, %{{.*}} : vector<2x[2]xf64>
- %0 = vector.interleave %a, %b : vector<2x[2]xf64>
+ %0 = vector.interleave %a, %b : vector<2x[2]xf64> -> vector<2x[4]xf64>
return %0 : vector<2x[4]xf64>
}
+
+// CHECK-LABEL: @deinterleave_1d
+func.func @deinterleave_1d(%arg: vector<4xf32>) -> (vector<2xf32>, vector<2xf32>) {
+ // CHECK: vector.deinterleave %{{.*}} : vector<4xf32> -> vector<2xf32>
+ %0, %1 = vector.deinterleave %arg : vector<4xf32> -> vector<2xf32>
+ return %0, %1 : vector<2xf32>, vector<2xf32>
+}
+
+// CHECK-LABEL: @deinterleave_1d_scalable
+func.func @deinterleave_1d_scalable(%arg: vector<[4]xf32>) -> (vector<[2]xf32>, vector<[2]xf32>) {
+ // CHECK: vector.deinterleave %{{.*}} : vector<[4]xf32> -> vector<[2]xf32>
+ %0, %1 = vector.deinterleave %arg : vector<[4]xf32> -> vector<[2]xf32>
+ return %0, %1 : vector<[2]xf32>, vector<[2]xf32>
+}
+
+// CHECK-LABEL: @deinterleave_2d
+func.func @deinterleave_2d(%arg: vector<3x4xf32>) -> (vector<3x2xf32>, vector<3x2xf32>) {
+ // CHECK: vector.deinterleave %{{.*}} : vector<3x4xf32> -> vector<3x2xf32>
+ %0, %1 = vector.deinterleave %arg : vector<3x4xf32> -> vector<3x2xf32>
+ return %0, %1 : vector<3x2xf32>, vector<3x2xf32>
+}
+
+// CHECK-LABEL: @deinterleave_2d_scalable
+func.func @deinterleave_2d_scalable(%arg: vector<3x[4]xf32>) -> (vector<3x[2]xf32>, vector<3x[2]xf32>) {
+ // CHECK: vector.deinterleave %{{.*}} : vector<3x[4]xf32> -> vector<3x[2]xf32>
+ %0, %1 = vector.deinterleave %arg : vector<3x[4]xf32> -> vector<3x[2]xf32>
+ return %0, %1 : vector<3x[2]xf32>, vector<3x[2]xf32>
+}
+
+// CHECK-LABEL: @deinterleave_nd
+func.func @deinterleave_nd(%arg: vector<2x3x4x6xf32>) -> (vector<2x3x4x3xf32>, vector<2x3x4x3xf32>) {
+ // CHECK: vector.deinterleave %{{.*}} : vector<2x3x4x6xf32> -> vector<2x3x4x3xf32>
+ %0, %1 = vector.deinterleave %arg : vector<2x3x4x6xf32> -> vector<2x3x4x3xf32>
+ return %0, %1 : vector<2x3x4x3xf32>, vector<2x3x4x3xf32>
+}
+
+// CHECK-LABEL: @deinterleave_nd_scalable
+func.func @deinterleave_nd_scalable(%arg:vector<2x3x4x[6]xf32>) -> (vector<2x3x4x[3]xf32>, vector<2x3x4x[3]xf32>) {
+ // CHECK: vector.deinterleave %{{.*}} : vector<2x3x4x[6]xf32> -> vector<2x3x4x[3]xf32>
+ %0, %1 = vector.deinterleave %arg : vector<2x3x4x[6]xf32> -> vector<2x3x4x[3]xf32>
+ return %0, %1 : vector<2x3x4x[3]xf32>, vector<2x3x4x[3]xf32>
+}
diff --git a/mlir/test/Dialect/Vector/vector-interleave-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-interleave-lowering-transforms.mlir
index 3dd4857860eb..598f7d70b4f1 100644
--- a/mlir/test/Dialect/Vector/vector-interleave-lowering-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-interleave-lowering-transforms.mlir
@@ -2,8 +2,7 @@
// CHECK-LABEL: @vector_interleave_2d
// CHECK-SAME: %[[LHS:.*]]: vector<2x3xi8>, %[[RHS:.*]]: vector<2x3xi8>)
-func.func @vector_interleave_2d(%a: vector<2x3xi8>, %b: vector<2x3xi8>) -> vector<2x6xi8>
-{
+func.func @vector_interleave_2d(%a: vector<2x3xi8>, %b: vector<2x3xi8>) -> vector<2x6xi8> {
// CHECK-DAG: %[[CST:.*]] = arith.constant dense<0>
// CHECK-DAG: %[[LHS_0:.*]] = vector.extract %[[LHS]][0]
// CHECK-DAG: %[[RHS_0:.*]] = vector.extract %[[RHS]][0]
@@ -14,14 +13,13 @@ func.func @vector_interleave_2d(%a: vector<2x3xi8>, %b: vector<2x3xi8>) -> vecto
// CHECK-DAG: %[[RES_0:.*]] = vector.insert %[[ZIP_0]], %[[CST]] [0]
// CHECK-DAG: %[[RES_1:.*]] = vector.insert %[[ZIP_1]], %[[RES_0]] [1]
// CHECK-NEXT: return %[[RES_1]] : vector<2x6xi8>
- %0 = vector.interleave %a, %b : vector<2x3xi8>
+ %0 = vector.interleave %a, %b : vector<2x3xi8> -> vector<2x6xi8>
return %0 : vector<2x6xi8>
}
// CHECK-LABEL: @vector_interleave_2d_scalable
// CHECK-SAME: %[[LHS:.*]]: vector<2x[8]xi16>, %[[RHS:.*]]: vector<2x[8]xi16>)
-func.func @vector_interleave_2d_scalable(%a: vector<2x[8]xi16>, %b: vector<2x[8]xi16>) -> vector<2x[16]xi16>
-{
+func.func @vector_interleave_2d_scalable(%a: vector<2x[8]xi16>, %b: vector<2x[8]xi16>) -> vector<2x[16]xi16> {
// CHECK-DAG: %[[CST:.*]] = arith.constant dense<0>
// CHECK-DAG: %[[LHS_0:.*]] = vector.extract %[[LHS]][0]
// CHECK-DAG: %[[RHS_0:.*]] = vector.extract %[[RHS]][0]
@@ -32,7 +30,7 @@ func.func @vector_interleave_2d_scalable(%a: vector<2x[8]xi16>, %b: vector<2x[8]
// CHECK-DAG: %[[RES_0:.*]] = vector.insert %[[ZIP_0]], %[[CST]] [0]
// CHECK-DAG: %[[RES_1:.*]] = vector.insert %[[ZIP_1]], %[[RES_0]] [1]
// CHECK-NEXT: return %[[RES_1]] : vector<2x[16]xi16>
- %0 = vector.interleave %a, %b : vector<2x[8]xi16>
+ %0 = vector.interleave %a, %b : vector<2x[8]xi16> -> vector<2x[16]xi16>
return %0 : vector<2x[16]xi16>
}
@@ -44,17 +42,17 @@ func.func @vector_interleave_4d(%a: vector<1x2x3x4xi64>, %b: vector<1x2x3x4xi64>
// CHECK: %[[RHS_0:.*]] = vector.extract %[[RHS]][0, 0, 0] : vector<4xi64> from vector<1x2x3x4xi64>
// CHECK: %[[ZIP_0:.*]] = vector.interleave %[[LHS_0]], %[[RHS_0]] : vector<4xi64>
// CHECK: %[[RES_0:.*]] = vector.insert %[[ZIP_0]], %{{.*}} [0, 0, 0] : vector<8xi64> into vector<1x2x3x8xi64>
- // CHECK-COUNT-5: vector.interleave %{{.*}}, %{{.*}} : vector<4xi64>
- %0 = vector.interleave %a, %b : vector<1x2x3x4xi64>
+ // CHECK-COUNT-5: vector.interleave %{{.*}}, %{{.*}} : vector<4xi64> -> vector<8xi64>
+ %0 = vector.interleave %a, %b : vector<1x2x3x4xi64> -> vector<1x2x3x8xi64>
return %0 : vector<1x2x3x8xi64>
}
// CHECK-LABEL: @vector_interleave_nd_with_scalable_dim
-func.func @vector_interleave_nd_with_scalable_dim(%a: vector<1x3x[2]x2x3x4xf16>, %b: vector<1x3x[2]x2x3x4xf16>) -> vector<1x3x[2]x2x3x8xf16>
-{
+func.func @vector_interleave_nd_with_scalable_dim(
+ %a: vector<1x3x[2]x2x3x4xf16>, %b: vector<1x3x[2]x2x3x4xf16>) -> vector<1x3x[2]x2x3x8xf16> {
// The scalable dim blocks unrolling so only the first two dims are unrolled.
// CHECK-COUNT-3: vector.interleave %{{.*}}, %{{.*}} : vector<[2]x2x3x4xf16>
- %0 = vector.interleave %a, %b : vector<1x3x[2]x2x3x4xf16>
+ %0 = vector.interleave %a, %b : vector<1x3x[2]x2x3x4xf16> -> vector<1x3x[2]x2x3x8xf16>
return %0 : vector<1x3x[2]x2x3x8xf16>
}
diff --git a/mlir/test/Dialect/Vector/vector-interleave-to-shuffle.mlir b/mlir/test/Dialect/Vector/vector-interleave-to-shuffle.mlir
index ed3b3396bf3e..d59cd4e6765b 100644
--- a/mlir/test/Dialect/Vector/vector-interleave-to-shuffle.mlir
+++ b/mlir/test/Dialect/Vector/vector-interleave-to-shuffle.mlir
@@ -1,9 +1,8 @@
// RUN: mlir-opt %s --transform-interpreter | FileCheck %s
// CHECK-LABEL: @vector_interleave_to_shuffle
-func.func @vector_interleave_to_shuffle(%a: vector<7xi16>, %b: vector<7xi16>) -> vector<14xi16>
-{
- %0 = vector.interleave %a, %b : vector<7xi16>
+func.func @vector_interleave_to_shuffle(%a: vector<7xi16>, %b: vector<7xi16>) -> vector<14xi16> {
+ %0 = vector.interleave %a, %b : vector<7xi16> -> vector<14xi16>
return %0 : vector<14xi16>
}
// CHECK: vector.shuffle %arg0, %arg1 [0, 7, 1, 8, 2, 9, 3, 10, 4, 11, 5, 12, 6, 13] : vector<7xi16>, vector<7xi16>
diff --git a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
index e48af3cd7aac..349dc1ab31d4 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
@@ -46,6 +46,51 @@ func.func @permutation_with_mask_xfer_write_scalable(%arg0: vector<4x[8]xi16>, %
return
}
+// transfer_write in MaskOp case not supported.
+// CHECK-LABEL: func @masked_permutation_xfer_write_fixed_width
+// CHECK-SAME: %[[ARG_0:.*]]: tensor<?x?xf32>,
+// CHECK-SAME: %[[ARG_1:.*]]: vector<16xf32>,
+// CHECK-SAME: %[[IDX:.*]]: index,
+// CHECK-SAME: %[[MASK:.*]]: vector<16xi1>
+// CHECK-NOT: vector.transpose
+// CHECK: %[[RES:.*]] = vector.mask %[[MASK]] { vector.transfer_write %[[ARG_1]], %[[ARG_0]]{{.*}} vector<16xf32>, tensor<?x?xf32> } : vector<16xi1> -> tensor<?x?xf32>
+func.func @masked_permutation_xfer_write_fixed_width(%t: tensor<?x?xf32>, %val: vector<16xf32>, %idx: index, %mask: vector<16xi1>) -> tensor<?x?xf32> {
+ %r = vector.mask %mask { vector.transfer_write %val, %t[%idx, %idx] {permutation_map = affine_map<(d0, d1) -> (d0)>} : vector<16xf32>, tensor<?x?xf32> } : vector<16xi1> -> tensor<?x?xf32>
+ return %r : tensor<?x?xf32>
+}
+
+// CHECK-LABEL: func.func @masked_permutation_xfer_write_scalable(
+// CHECK-SAME: %[[ARG_0:.*]]: vector<4x[8]xi16>,
+// CHECK-SAME: %[[ARG_1:.*]]: tensor<?x?x?x?xf32>,
+// CHECK-SAME: %[[MASK:.*]]: vector<4x[8]xi1>)
+// CHECK-SAME: -> tensor<?x?x?x?xf32> {
+// CHECK-NOT: vector.transpose
+// CHECK: %[[R:.*]] = vector.mask %[[MASK]] { vector.transfer_write %[[ARG_0]], %[[ARG_1]]{{.*}} : vector<4x[8]xi16>, tensor<?x?x?x?xf32> } : vector<4x[8]xi1> -> tensor<?x?x?x?xf32>
+func.func @masked_permutation_xfer_write_scalable(%arg0: vector<4x[8]xi16>, %t: tensor<?x?x?x?xf32>, %mask: vector<4x[8]xi1>) -> tensor<?x?x?x?xf32> {
+ %c0 = arith.constant 0 : index
+ %r = vector.mask %mask { vector.transfer_write %arg0, %t[%c0, %c0, %c0, %c0] {in_bounds = [true, true], permutation_map = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+} : vector<4x[8]xi16>, tensor<?x?x?x?xf32> } : vector<4x[8]xi1> -> tensor<?x?x?x?xf32>
+
+ return %r : tensor<?x?x?x?xf32>
+}
+
+// transfer_write in MaskOp case not supported.
+// CHECK-LABEL: func @masked_non_permutation_xfer_write_fixed_width
+// CHECK-SAME: %[[ARG0:.*]]: tensor<?x?x?x?xf32>
+// CHECK-SAME: %[[ARG1:.*]]: vector<14x8x16xf32>
+// CHECK-SAME: %[[IDX:.*]]: index) -> tensor<?x?x?x?xf32>
+// CHECK-NOT: vector.broadcast
+// CHECK: %[[masked1:.*]] = vector.mask %0 { vector.transfer_write %[[ARG1]], %[[ARG0]]{{.*}} : vector<14x8x16xf32>, tensor<?x?x?x?xf32> } : vector<14x8x16xi1> -> tensor<?x?x?x?xf32>
+func.func @masked_non_permutation_xfer_write_fixed_width(
+ %arg0 : tensor<?x?x?x?xf32>,
+ %v1 : vector<14x8x16xf32>, %dim : index) -> tensor<?x?x?x?xf32> {
+ %c0 = arith.constant 0 : index
+ %mask = vector.create_mask %dim, %dim, %dim : vector<14x8x16xi1>
+ %0 = vector.mask %mask { vector.transfer_write %v1, %arg0[%c0, %c0, %c0, %c0] {in_bounds = [false, false, true], permutation_map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>} : vector<14x8x16xf32>, tensor<?x?x?x?xf32> } : vector<14x8x16xi1> -> tensor<?x?x?x?xf32>
+
+ return %0 : tensor<?x?x?x?xf32>
+}
+
///----------------------------------------------------------------------------------------
/// vector.transfer_read
///----------------------------------------------------------------------------------------
@@ -101,6 +146,37 @@ func.func @permutation_with_mask_xfer_read_scalable(%mem: memref<?x?xf32>, %dim_
return %1 : vector<8x[4]x2xf32>
}
+// transfer_read in MaskOp case not supported.
+// CHECK-LABEL: func @masked_permutation_xfer_read_fixed_width
+// CHECK-SAME: %[[ARG_0:.*]]: tensor<?x1xf32>,
+// CHECK-SAME: %[[ARG_1:.*]]: vector<4x1xi1>
+// CHECK-NOT: vector.transpose
+// CHECK: vector.mask %[[ARG_1]] { vector.transfer_read %[[ARG_0]]{{.*}}: tensor<?x1xf32>, vector<1x4x4xf32> } : vector<4x1xi1> -> vector<1x4x4xf32>
+func.func @masked_permutation_xfer_read_fixed_width(%arg0: tensor<?x1xf32>, %mask : vector<4x1xi1>) {
+ %cst = arith.constant 0.000000e+00 : f32
+ %c0 = arith.constant 0 : index
+ %3 = vector.mask %mask { vector.transfer_read %arg0[%c0, %c0], %cst {permutation_map = affine_map<(d0, d1) -> (d1, 0, d0)>} : tensor<?x1xf32>, vector<1x4x4xf32> } : vector<4x1xi1> -> vector<1x4x4xf32>
+ call @test.some_use(%3) : (vector<1x4x4xf32>) -> ()
+ return
+}
+func.func private @test.some_use(vector<1x4x4xf32>)
+
+// CHECK-LABEL: func.func @masked_permutation_xfer_read_scalable(
+// CHECK-SAME: %[[ARG_0:.*]]: tensor<?x?xf32>,
+// CHECK-SAME: %[[MASK:.*]]: vector<2x[4]xi1>) -> vector<8x[4]x2xf32> {
+// CHECK-NOT: vector.transpose
+// CHECK: %[[T_READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[ARG_0]]{{.*}} : tensor<?x?xf32>, vector<8x[4]x2xf32> } : vector<2x[4]xi1> -> vector<8x[4]x2xf32>
+func.func @masked_permutation_xfer_read_scalable(%t: tensor<?x?xf32>, %mask : vector<2x[4]xi1>) -> vector<8x[4]x2xf32> {
+
+ %c0 = arith.constant 0 : index
+ %cst_0 = arith.constant 0.000000e+00 : f32
+
+ %1 = vector.mask %mask { vector.transfer_read %t[%c0, %c0], %cst_0
+ {in_bounds = [true, true, true], permutation_map = affine_map<(d0, d1) -> (0, d1, d0)>}
+ : tensor<?x?xf32>, vector<8x[4]x2xf32> } :vector<2x[4]xi1> -> vector<8x[4]x2xf32>
+ return %1 : vector<8x[4]x2xf32>
+}
+
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
%f = transform.structured.match ops{["func.func"]} in %module_op
diff --git a/mlir/test/IR/parser.mlir b/mlir/test/IR/parser.mlir
index 020942e7f4c1..bcc146ea0101 100644
--- a/mlir/test/IR/parser.mlir
+++ b/mlir/test/IR/parser.mlir
@@ -597,7 +597,7 @@ func.func @funcattrwithblock() -> ()
return
}
-// CHECK-label func @funcsimplemap
+// CHECK-LABEL: func @funcsimplemap
#map_simple0 = affine_map<()[] -> (10)>
#map_simple1 = affine_map<()[s0] -> (s0)>
#map_non_simple0 = affine_map<(d0)[] -> (d0)>
diff --git a/mlir/test/IR/properties.mlir b/mlir/test/IR/properties.mlir
index 1d22cb1940f2..01ea856b0316 100644
--- a/mlir/test/IR/properties.mlir
+++ b/mlir/test/IR/properties.mlir
@@ -38,3 +38,14 @@ test.using_property_ref_in_custom 1 + 4 = 5
// GENERIC: "test.with_default_valued_properties"()
// GENERIC-SAME: <{a = 0 : i32}>
test.with_default_valued_properties <{a = 0 : i32}>
+
+// CHECK: test.with_optional_properties
+// CHECK-SAME: <{b = 0 : i32}>
+// GENERIC: "test.with_optional_properties"()
+// GENERIC-SAME: <{b = 0 : i32}>
+test.with_optional_properties <{b = 0 : i32}>
+
+// CHECK: test.with_optional_properties {{$}}
+// GENERIC: "test.with_optional_properties"()
+// GENERIC-SAME: : () -> ()
+test.with_optional_properties
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir
index 07989bd71f50..e9f1bbeafacd 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir
@@ -17,7 +17,7 @@ func.func @entry() {
// CHECK: ( 1, 1, 1, 1
// CHECK: ( 2, 2, 2, 2
- %v3 = vector.interleave %v1, %v2 : vector<[4]xf32>
+ %v3 = vector.interleave %v1, %v2 : vector<[4]xf32> -> vector<[8]xf32>
vector.print %v3 : vector<[8]xf32>
// CHECK: ( 1, 2, 1, 2, 1, 2, 1, 2
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir
index 0bc78af6aba0..d6962cbe2776 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir
@@ -16,7 +16,7 @@ func.func @entry() {
// CHECK: ( ( 1, 1, 1, 1 ), ( 1, 1, 1, 1 ) )
// CHECK: ( ( 2, 2, 2, 2 ), ( 2, 2, 2, 2 ) )
- %v3 = vector.interleave %v1, %v2 : vector<2x4xf32>
+ %v3 = vector.interleave %v1, %v2 : vector<2x4xf32> -> vector<2x8xf32>
vector.print %v3 : vector<2x8xf32>
// CHECK: ( ( 1, 2, 1, 2, 1, 2, 1, 2 ), ( 1, 2, 1, 2, 1, 2, 1, 2 ) )
diff --git a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x8_8x128_noswizzle.mlir b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x8_8x128_noswizzle.mlir
index 2e59b7234e53..391fda82e1e1 100644
--- a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x8_8x128_noswizzle.mlir
+++ b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x8_8x128_noswizzle.mlir
@@ -6,15 +6,6 @@
// RUN: --entry-point-result=void \
// RUN: | FileCheck %s
-// Basic PTX check to make sure we are generating the right instructions.
-
-// CHECK-PTX: mbarrier.init.shared.b64
-// CHECK-PTX: mbarrier.arrive.expect_tx.shared.b64
-// CHECK-PTX: cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes
-// CHECK-PTX: cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes
-// CHECK-PTX: mbarrier.arrive.expect_tx.shared.b64
-// CHECK-PTX: mbarrier.try_wait.parity.shared.b64
-
// RUN: mlir-opt %s --convert-nvgpu-to-nvvm \
// RUN: -gpu-kernel-outlining \
// RUN: -convert-nvvm-to-llvm \
diff --git a/mlir/test/Pass/ir-printing-file-tree.mlir b/mlir/test/Pass/ir-printing-file-tree.mlir
new file mode 100644
index 000000000000..b00d77db2c60
--- /dev/null
+++ b/mlir/test/Pass/ir-printing-file-tree.mlir
@@ -0,0 +1,41 @@
+// Test filtering by "before"
+// RUN: rm -rf %t || true
+// RUN: mlir-opt %s -mlir-print-ir-tree-dir=%t \
+// RUN: -pass-pipeline='builtin.module(builtin.module(func.func(cse,canonicalize)))' \
+// RUN: -mlir-print-ir-before=cse
+// RUN: test -f %t/builtin_module_outer/builtin_module_inner/func_func_symB/0_0_0_cse.mlir
+// RUN: test ! -f %t/builtin_module_outer/builtin_module_inner/func_func_symB/0_0_1_canonicalize.mlir
+// RUN: test -f %t/builtin_module_outer/builtin_module_inner/func_func_symC/0_0_0_cse.mlir
+// RUN: test ! -f %t/builtin_module_outer/builtin_module_inner/func_func_symC/0_0_1_canonicalize.mlir
+
+// Test printing after all and the counter mechanism.
+// RUN: rm -rf %t || true
+// RUN: mlir-opt %s -mlir-print-ir-tree-dir=%t \
+// RUN: -pass-pipeline='builtin.module(canonicalize,canonicalize,func.func(cse),builtin.module(canonicalize,func.func(cse,canonicalize),cse),cse)' \
+// RUN: -mlir-print-ir-after-all
+// RUN: test -f %t/builtin_module_outer/0_canonicalize.mlir
+// RUN: test -f %t/builtin_module_outer/1_canonicalize.mlir
+// RUN: test -f %t/builtin_module_outer/func_func_symA/1_0_cse.mlir
+// RUN: test -f %t/builtin_module_outer/builtin_module_inner/1_0_canonicalize.mlir
+// RUN: test -f %t/builtin_module_outer/builtin_module_inner/func_func_symB/1_0_0_cse.mlir
+// RUN: test -f %t/builtin_module_outer/builtin_module_inner/func_func_symB/1_0_1_canonicalize.mlir
+// RUN: test -f %t/builtin_module_outer/builtin_module_inner/func_func_symC/1_0_0_cse.mlir
+// RUN: test -f %t/builtin_module_outer/builtin_module_inner/func_func_symC/1_0_1_canonicalize.mlir
+// RUN: test -f %t/builtin_module_outer/builtin_module_inner/1_1_cse.mlir
+// RUN: test -f %t/builtin_module_outer/2_cse.mlir
+
+builtin.module @outer {
+
+ func.func @symA() {
+ return
+ }
+
+ builtin.module @inner {
+ func.func @symB() {
+ return
+ }
+ func.func @symC() {
+ return
+ }
+ }
+}
diff --git a/mlir/test/Target/LLVMIR/Import/global-variables.ll b/mlir/test/Target/LLVMIR/Import/global-variables.ll
index 9d9734045988..902f77bd7e6c 100644
--- a/mlir/test/Target/LLVMIR/Import/global-variables.ll
+++ b/mlir/test/Target/LLVMIR/Import/global-variables.ll
@@ -36,7 +36,7 @@
; CHECK-DAG: %[[ADDR:[0-9]+]] = llvm.mlir.addressof @global_int : !llvm.ptr
; CHECK-DAG: %[[IDX:[0-9]+]] = llvm.mlir.constant(2 : i32) : i32
; CHECK-DAG: %[[GEP:[0-9]+]] = llvm.getelementptr %[[ADDR]][%[[IDX]]] : (!llvm.ptr, i32) -> !llvm.ptr
-; CHECK-DAG llvm.return %[[GEP]] : !llvm.ptr
+; CHECK-DAG: llvm.return %[[GEP]] : !llvm.ptr
@global_gep_const_expr = internal constant ptr getelementptr (i32, ptr @global_int, i32 2)
; // -----
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index e43024ff868e..9a5528002ef5 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -732,7 +732,7 @@ define void @coro_promise(ptr %0, i32 %1, i1 %2) {
; CHECK-LABEL: llvm.func @eh_typeid_for
define void @eh_typeid_for(ptr %0) {
; CHECK: llvm.intr.eh.typeid.for %{{.*}} : (!llvm.ptr) -> i32
- %2 = call i32 @llvm.eh.typeid.for(ptr %0)
+ %2 = call i32 @llvm.eh.typeid.for.p0(ptr %0)
ret void
}
@@ -1082,7 +1082,7 @@ declare i1 @llvm.coro.end(ptr, i1, token)
declare ptr @llvm.coro.free(token, ptr nocapture readonly)
declare void @llvm.coro.resume(ptr)
declare ptr @llvm.coro.promise(ptr nocapture, i32, i1)
-declare i32 @llvm.eh.typeid.for(ptr)
+declare i32 @llvm.eh.typeid.for.p0(ptr)
declare ptr @llvm.stacksave.p0()
declare ptr addrspace(1) @llvm.stacksave.p1()
declare void @llvm.stackrestore.p0(ptr)
diff --git a/mlir/test/Target/LLVMIR/Import/metadata-loop.ll b/mlir/test/Target/LLVMIR/Import/metadata-loop.ll
index 3516101a2367..20431a7412bd 100644
--- a/mlir/test/Target/LLVMIR/Import/metadata-loop.ll
+++ b/mlir/test/Target/LLVMIR/Import/metadata-loop.ll
@@ -324,7 +324,7 @@ end:
; // -----
; Verify the unused access group is not imported.
-; CHECK-COUNT1: #llvm.access_group
+; CHECK-COUNT-1: #llvm.access_group
; CHECK-LABEL: @unused_parallel_access
define void @unused_parallel_access(ptr %arg) {
diff --git a/mlir/test/Target/LLVMIR/llvmir-debug.mlir b/mlir/test/Target/LLVMIR/llvmir-debug.mlir
index 1cb94bca169d..2792f13e4ef8 100644
--- a/mlir/test/Target/LLVMIR/llvmir-debug.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-debug.mlir
@@ -234,7 +234,7 @@ llvm.func @func_with_inlined_dbg_value(%arg0: i32) -> (i32) {
// CHECK-DAG: ![[LEXICAL_BLOCK_FILE:.*]] = distinct !DILexicalBlockFile(scope: ![[INNER_FUNC]], file: ![[FILE]], discriminator: 0)
// CHECK-DAG: ![[VAR_LOC0]] = !DILocalVariable(name: "a", scope: ![[OUTER_FUNC]], file: ![[FILE]]
// CHECK-DAG: ![[VAR_LOC1]] = !DILocalVariable(name: "b", scope: ![[LEXICAL_BLOCK_FILE]], file: ![[FILE]]
-// CHECK-DAG ![[LABEL]] = !DILabel(scope: ![[LEXICAL_BLOCK_FILE]], name: "label", file: ![[FILE]], line: 42)
+// CHECK-DAG: ![[LABEL]] = !DILabel(scope: ![[LEXICAL_BLOCK_FILE]], name: "label", file: ![[FILE]], line: 42)
// -----
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index 238c3e4263cb..1e533aeacfb4 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -724,7 +724,7 @@ llvm.func @coro_promise(%arg0: !llvm.ptr, %arg1 : i32, %arg2 : i1) {
// CHECK-LABEL: @eh_typeid_for
llvm.func @eh_typeid_for(%arg0 : !llvm.ptr) {
- // CHECK: call i32 @llvm.eh.typeid.for
+ // CHECK: call i32 @llvm.eh.typeid.for.p0
%0 = llvm.intr.eh.typeid.for %arg0 : (!llvm.ptr) -> i32
llvm.return
}
diff --git a/mlir/test/Target/LLVMIR/omptarget-array-sectioning-host.mlir b/mlir/test/Target/LLVMIR/omptarget-array-sectioning-host.mlir
index 08ccbf04014a..0016a1f05a2b 100644
--- a/mlir/test/Target/LLVMIR/omptarget-array-sectioning-host.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-array-sectioning-host.mlir
@@ -42,7 +42,7 @@ module attributes {omp.is_target_device = false} {
// CHECK: @.offload_sizes = private unnamed_addr constant [2 x i64] [i64 36, i64 108]
// CHECK: @.offload_maptypes = private unnamed_addr constant [2 x i64] [i64 35, i64 35]
-// CHECKL: @.offload_mapnames = private constant [2 x ptr] [ptr @0, ptr @1]
+// CHECK: @.offload_mapnames = private constant [2 x ptr] [ptr @0, ptr @1]
// CHECK: define void @_3d_target_array_section()
diff --git a/mlir/test/Transforms/test-convert-func-op.mlir b/mlir/test/Transforms/test-convert-func-op.mlir
new file mode 100644
index 000000000000..6e96703cda57
--- /dev/null
+++ b/mlir/test/Transforms/test-convert-func-op.mlir
@@ -0,0 +1,12 @@
+// RUN: mlir-opt %s -test-convert-func-op | FileCheck %s
+
+// CHECK-LABEL: llvm.func @add
+func.func @add(%arg0: i32, %arg1: i32) -> i32 attributes { llvm.emit_c_interface } {
+ %res = arith.addi %arg0, %arg1 : i32
+ return %res : i32
+}
+// CHECK-LABEL: llvm.func @_mlir_ciface_add
+// CHECK-SAME: [[ARG0:%[a-zA-Z0-9_]+]]: i32
+// CHECK-SAME: [[ARG1:%[a-zA-Z0-9_]+]]: i32
+// CHECK-NEXT: [[RES:%.*]] = llvm.call @add([[ARG0]], [[ARG1]])
+// CHECK-NEXT: llvm.return [[RES]]
diff --git a/mlir/test/lib/Analysis/CMakeLists.txt b/mlir/test/lib/Analysis/CMakeLists.txt
index d168888c1e71..7c6b31ae8b73 100644
--- a/mlir/test/lib/Analysis/CMakeLists.txt
+++ b/mlir/test/lib/Analysis/CMakeLists.txt
@@ -10,6 +10,7 @@ add_mlir_library(MLIRTestAnalysis
TestMemRefDependenceCheck.cpp
TestMemRefStrideCalculation.cpp
TestSlice.cpp
+ TestTopologicalSort.cpp
DataFlow/TestDeadCodeAnalysis.cpp
DataFlow/TestDenseBackwardDataFlowAnalysis.cpp
diff --git a/mlir/test/lib/Analysis/TestSlice.cpp b/mlir/test/lib/Analysis/TestSlice.cpp
index b445febde597..7e8320dbf3ec 100644
--- a/mlir/test/lib/Analysis/TestSlice.cpp
+++ b/mlir/test/lib/Analysis/TestSlice.cpp
@@ -1,4 +1,4 @@
-//===------------- TestSlice.cpp - Test slice related analisis ------------===//
+//===- TestSlice.cpp - Test slice related analisis ------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,13 +6,15 @@
//
//===----------------------------------------------------------------------===//
-#include "mlir/Analysis/SliceAnalysis.h"
+#include "mlir/Analysis/TopologicalSortUtils.h"
+#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/Pass/Pass.h"
using namespace mlir;
-static const StringLiteral kOrderMarker = "__test_sort_original_idx__";
+static const StringLiteral kToSortMark = "test_to_sort";
+static const StringLiteral kOrderIndex = "test_sort_index";
namespace {
@@ -23,23 +25,20 @@ struct TestTopologicalSortPass
StringRef getArgument() const final { return "test-print-topological-sort"; }
StringRef getDescription() const final {
- return "Print operations in topological order";
+ return "Sorts operations topologically and attaches attributes with their "
+ "corresponding index in the ordering to them";
}
void runOnOperation() override {
- std::map<int, Operation *> ops;
- getOperation().walk([&ops](Operation *op) {
- if (auto originalOrderAttr = op->getAttrOfType<IntegerAttr>(kOrderMarker))
- ops[originalOrderAttr.getInt()] = op;
+ SetVector<Operation *> toSort;
+ getOperation().walk([&](Operation *op) {
+ if (op->hasAttrOfType<UnitAttr>(kToSortMark))
+ toSort.insert(op);
});
- SetVector<Operation *> sortedOp;
- for (auto op : ops)
- sortedOp.insert(op.second);
- sortedOp = topologicalSort(sortedOp);
- llvm::errs() << "Testing : " << getOperation().getName() << "\n";
- for (Operation *op : sortedOp) {
- op->print(llvm::errs());
- llvm::errs() << "\n";
- }
+
+ auto i32Type = IntegerType::get(&getContext(), 32);
+ SetVector<Operation *> sortedOps = topologicalSort(toSort);
+ for (auto [index, op] : llvm::enumerate(sortedOps))
+ op->setAttr(kOrderIndex, IntegerAttr::get(i32Type, index));
}
};
diff --git a/mlir/test/lib/Transforms/TestTopologicalSort.cpp b/mlir/test/lib/Analysis/TestTopologicalSort.cpp
index 3b110c712620..c7e0206b2a4d 100644
--- a/mlir/test/lib/Transforms/TestTopologicalSort.cpp
+++ b/mlir/test/lib/Analysis/TestTopologicalSort.cpp
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
+#include "mlir/Analysis/TopologicalSortUtils.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/Pass.h"
-#include "mlir/Transforms/TopologicalSortUtils.h"
using namespace mlir;
diff --git a/mlir/test/lib/Conversion/FuncToLLVM/CMakeLists.txt b/mlir/test/lib/Conversion/FuncToLLVM/CMakeLists.txt
index 45ba62d839d3..d3dbc94a99bc 100644
--- a/mlir/test/lib/Conversion/FuncToLLVM/CMakeLists.txt
+++ b/mlir/test/lib/Conversion/FuncToLLVM/CMakeLists.txt
@@ -1,6 +1,7 @@
# Exclude tests from libMLIR.so
add_mlir_library(MLIRTestFuncToLLVM
TestConvertCallOp.cpp
+ TestConvertFuncOp.cpp
EXCLUDE_FROM_LIBMLIR
diff --git a/mlir/test/lib/Conversion/FuncToLLVM/TestConvertFuncOp.cpp b/mlir/test/lib/Conversion/FuncToLLVM/TestConvertFuncOp.cpp
new file mode 100644
index 000000000000..e25e890e2290
--- /dev/null
+++ b/mlir/test/lib/Conversion/FuncToLLVM/TestConvertFuncOp.cpp
@@ -0,0 +1,93 @@
+//===- TestConvertFuncOp.cpp - Test LLVM Conversion of Func FuncOp --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "TestDialect.h"
+
+#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h"
+#include "mlir/Conversion/LLVMCommon/ConversionTarget.h"
+#include "mlir/Conversion/LLVMCommon/Pattern.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Pass/Pass.h"
+
+using namespace mlir;
+
+namespace {
+
+/// Test helper Conversion Pattern to directly call `convertFuncOpToLLVMFuncOp`
+/// to verify this utility function includes all functionalities of conversion
+struct FuncOpConversion : public ConvertOpToLLVMPattern<func::FuncOp> {
+ FuncOpConversion(const LLVMTypeConverter &converter)
+ : ConvertOpToLLVMPattern(converter) {}
+
+ LogicalResult
+ matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ FailureOr<LLVM::LLVMFuncOp> newFuncOp = mlir::convertFuncOpToLLVMFuncOp(
+ cast<FunctionOpInterface>(funcOp.getOperation()), rewriter,
+ *getTypeConverter());
+ if (failed(newFuncOp))
+ return rewriter.notifyMatchFailure(funcOp, "Could not convert funcop");
+
+ rewriter.eraseOp(funcOp);
+ return success();
+ }
+};
+
+struct ReturnOpConversion : public ConvertOpToLLVMPattern<func::ReturnOp> {
+ ReturnOpConversion(const LLVMTypeConverter &converter)
+ : ConvertOpToLLVMPattern(converter) {}
+
+ LogicalResult
+ matchAndRewrite(func::ReturnOp returnOp, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(returnOp,
+ returnOp->getOperands());
+ return success();
+ }
+};
+
+struct TestConvertFuncOp
+ : public PassWrapper<TestConvertFuncOp, OperationPass<ModuleOp>> {
+ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestConvertFuncOp)
+
+ void getDependentDialects(DialectRegistry &registry) const final {
+ registry.insert<LLVM::LLVMDialect>();
+ }
+
+ StringRef getArgument() const final { return "test-convert-func-op"; }
+
+ StringRef getDescription() const final {
+ return "Tests conversion of `func.func` to `llvm.func` for different "
+ "attributes";
+ }
+
+ void runOnOperation() override {
+ MLIRContext *ctx = &getContext();
+
+ LowerToLLVMOptions options(ctx);
+ // Populate type conversions.
+ LLVMTypeConverter typeConverter(ctx, options);
+
+ RewritePatternSet patterns(ctx);
+ patterns.add<FuncOpConversion>(typeConverter);
+ patterns.add<ReturnOpConversion>(typeConverter);
+
+ LLVMConversionTarget target(getContext());
+ if (failed(applyPartialConversion(getOperation(), target,
+ std::move(patterns))))
+ signalPassFailure();
+ }
+};
+
+} // namespace
+
+namespace mlir::test {
+void registerConvertFuncOpPass() { PassRegistration<TestConvertFuncOp>(); }
+} // namespace mlir::test
diff --git a/mlir/test/lib/Dialect/Test/TestOpDefs.cpp b/mlir/test/lib/Dialect/Test/TestOpDefs.cpp
index bfee0391f670..b058a8e1abbc 100644
--- a/mlir/test/lib/Dialect/Test/TestOpDefs.cpp
+++ b/mlir/test/lib/Dialect/Test/TestOpDefs.cpp
@@ -706,11 +706,20 @@ void TestReflectBoundsOp::inferResultRanges(
const ConstantIntRanges &range = argRanges[0];
MLIRContext *ctx = getContext();
Builder b(ctx);
- auto intTy = getType();
- setUminAttr(b.getIntegerAttr(intTy, range.umin()));
- setUmaxAttr(b.getIntegerAttr(intTy, range.umax()));
- setSminAttr(b.getIntegerAttr(intTy, range.smin()));
- setSmaxAttr(b.getIntegerAttr(intTy, range.smax()));
+ Type sIntTy, uIntTy;
+ // For plain `IntegerType`s, we can derive the appropriate signed and unsigned
+ // Types for the Attributes.
+ if (auto intTy = llvm::dyn_cast<IntegerType>(getType())) {
+ unsigned bitwidth = intTy.getWidth();
+ sIntTy = b.getIntegerType(bitwidth, /*isSigned=*/true);
+ uIntTy = b.getIntegerType(bitwidth, /*isSigned=*/false);
+ } else
+ sIntTy = uIntTy = getType();
+
+ setUminAttr(b.getIntegerAttr(uIntTy, range.umin()));
+ setUmaxAttr(b.getIntegerAttr(uIntTy, range.umax()));
+ setSminAttr(b.getIntegerAttr(sIntTy, range.smin()));
+ setSmaxAttr(b.getIntegerAttr(sIntTy, range.smax()));
setResultRanges(getResult(), range);
}
diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td
index c5d0341b7de7..18324482153a 100644
--- a/mlir/test/lib/Dialect/Test/TestOps.td
+++ b/mlir/test/lib/Dialect/Test/TestOps.td
@@ -1697,6 +1697,12 @@ def : Pat<
ConstantStrAttr<StrAttr, "MatchVariadic">)>;
def : Pat<
+ (MixedVOperandOp5 $input1a, $input1b, $input2, $attr1,
+ ConstantStrAttr<StrAttr, "MatchInverseVariadic">),
+ (MixedVOperandOp3 $input2, (variadic $input1b), (variadic $input1a),
+ ConstantAttr<I32Attr, "1">:$attr1)>;
+
+def : Pat<
(MixedVOperandOp4 (variadic (MixedVOperandInOutI32Op $input1a),
(MixedVOperandInOutI32Op $input1b)),
$input2, ConstantAttr<I32Attr, "1">:$attr1),
@@ -3107,6 +3113,11 @@ def TestOpWithDefaultValuedProperties : TEST_Op<"with_default_valued_properties"
let arguments = (ins DefaultValuedAttr<I32Attr, "0">:$a);
}
+def TestOpWithOptionalProperties : TEST_Op<"with_optional_properties"> {
+ let assemblyFormat = "prop-dict attr-dict";
+ let arguments = (ins OptionalAttr<I32Attr>:$a, OptionalAttr<I32Attr>:$b);
+}
+
//===----------------------------------------------------------------------===//
// Test Dataflow
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/lib/Transforms/CMakeLists.txt b/mlir/test/lib/Transforms/CMakeLists.txt
index a849b7ebd29e..975a41ac3d5f 100644
--- a/mlir/test/lib/Transforms/CMakeLists.txt
+++ b/mlir/test/lib/Transforms/CMakeLists.txt
@@ -26,7 +26,6 @@ add_mlir_library(MLIRTestTransforms
TestInlining.cpp
TestIntRangeInference.cpp
TestMakeIsolatedFromAbove.cpp
- TestTopologicalSort.cpp
${MLIRTestTransformsPDLSrc}
EXCLUDE_FROM_LIBMLIR
diff --git a/mlir/test/lit.cfg.py b/mlir/test/lit.cfg.py
index ea6d9ae71b77..9ed3a2efcb8f 100644
--- a/mlir/test/lit.cfg.py
+++ b/mlir/test/lit.cfg.py
@@ -245,7 +245,7 @@ def have_host_jit_feature_support(feature_name):
if have_host_jit_feature_support("jit"):
config.available_features.add("host-supports-jit")
-if config.run_cuda_tests:
+if config.run_nvptx_tests:
config.available_features.add("host-supports-nvptx")
if config.run_rocm_tests:
diff --git a/mlir/test/lit.site.cfg.py.in b/mlir/test/lit.site.cfg.py.in
index c0fa1b8980e5..4f5186df7d20 100644
--- a/mlir/test/lit.site.cfg.py.in
+++ b/mlir/test/lit.site.cfg.py.in
@@ -25,7 +25,7 @@ config.mlir_cmake_dir = "@MLIR_CMAKE_DIR@"
config.mlir_lib_dir = "@MLIR_LIB_DIR@"
config.build_examples = @LLVM_BUILD_EXAMPLES@
-config.run_cuda_tests = @MLIR_ENABLE_CUDA_CONVERSIONS@
+config.run_nvptx_tests = @LLVM_HAS_NVPTX_TARGET@
config.enable_cuda_runner = @MLIR_ENABLE_CUDA_RUNNER@
config.run_rocm_tests = @MLIR_ENABLE_ROCM_CONVERSIONS@
config.enable_rocm_runner = @MLIR_ENABLE_ROCM_RUNNER@
diff --git a/mlir/test/mlir-tblgen/op-decl-and-defs.td b/mlir/test/mlir-tblgen/op-decl-and-defs.td
index 499e3ceecaf0..836ddca5eb84 100644
--- a/mlir/test/mlir-tblgen/op-decl-and-defs.td
+++ b/mlir/test/mlir-tblgen/op-decl-and-defs.td
@@ -58,7 +58,8 @@ def NS_AOp : NS_Op<"a_op", [IsolatedFromAbove, IsolatedFromAbove]> {
// CHECK: namespace detail {
// CHECK: class AOpGenericAdaptorBase {
// CHECK: public:
-// CHECK: AOpGenericAdaptorBase(AOp{{[[:space:]]}}
+// CHECK: AOpGenericAdaptorBase(::mlir::DictionaryAttr attrs = {}, const ::mlir::EmptyProperties &properties = {}, ::mlir::RegionRange regions = {}) : odsAttrs(attrs), odsRegions(regions)
+// CHECK: AOpGenericAdaptorBase(::mlir::Operation *op) : odsAttrs(op->getRawDictionaryAttrs()), odsOpName(op->getName()), odsRegions(op->getRegions()) {}
// CHECK: ::mlir::IntegerAttr getAttr1Attr();
// CHECK: uint32_t getAttr1();
// CHECK: ::mlir::FloatAttr getSomeAttr2Attr();
@@ -128,15 +129,8 @@ def NS_AOp : NS_Op<"a_op", [IsolatedFromAbove, IsolatedFromAbove]> {
// DEFS-LABEL: NS::AOp definitions
-// DEFS: AOpGenericAdaptorBase::AOpGenericAdaptorBase(::mlir::DictionaryAttr attrs, const ::mlir::EmptyProperties &properties, ::mlir::RegionRange regions) : odsAttrs(attrs), odsRegions(regions)
-
// Check that `getAttrDictionary()` is used when not using properties.
-// DEFS: AOpGenericAdaptorBase::AOpGenericAdaptorBase(AOp op)
-// DEFS-SAME: op->getAttrDictionary()
-// DEFS-SAME: p.getProperties()
-// DEFS-SAME: op->getRegions()
-
// DECLS: ::mlir::RegionRange AOpGenericAdaptorBase::getSomeRegions()
// DECLS-NEXT: return odsRegions.drop_front(1);
// DECLS: ::mlir::RegionRange AOpGenericAdaptorBase::getRegions()
@@ -344,12 +338,11 @@ def NS_NOp : NS_Op<"op_with_properties", []> {
let arguments = (ins Property<"unsigned">:$value);
}
-// Check that `getDiscardableAttrDictionary()` is used with properties.
-
-// DEFS: NOpGenericAdaptorBase::NOpGenericAdaptorBase(NOp op) : NOpGenericAdaptorBase(
-// DEFS-SAME: op->getDiscardableAttrDictionary()
-// DEFS-SAME: op.getProperties()
-// DEFS-SAME: op->getRegions()
+// DEFS: NOpGenericAdaptorBase::NOpGenericAdaptorBase(NOp op) :
+// DEFS-SAME: odsAttrs(op->getRawDictionaryAttrs())
+// DEFS-SAME: odsOpName(op->getName())
+// DEFS-SAME: properties(op.getProperties())
+// DEFS-SAME: odsRegions(op->getRegions())
// Test that type defs have the proper namespaces when used as a constraint.
// ---
diff --git a/mlir/test/mlir-tblgen/op-operand.td b/mlir/test/mlir-tblgen/op-operand.td
index a74970824479..a2fa1f7046a9 100644
--- a/mlir/test/mlir-tblgen/op-operand.td
+++ b/mlir/test/mlir-tblgen/op-operand.td
@@ -15,9 +15,6 @@ def OpA : NS_Op<"one_normal_operand_op", []> {
// CHECK-LABEL: OpA definitions
-// CHECK: OpAGenericAdaptorBase::OpAGenericAdaptorBase
-// CHECK-SAME: odsAttrs(attrs)
-
// CHECK: void OpA::build
// CHECK: ::mlir::Value input
// CHECK: odsState.addOperands(input);
diff --git a/mlir/test/mlir-tblgen/pattern.mlir b/mlir/test/mlir-tblgen/pattern.mlir
index 7f9c450f15b2..5ff8710b9377 100644
--- a/mlir/test/mlir-tblgen/pattern.mlir
+++ b/mlir/test/mlir-tblgen/pattern.mlir
@@ -527,6 +527,14 @@ func.func @testMatchVariadic(%arg0: i32, %arg1: i32, %arg2: i32, %arg3: i32) ->
return
}
+// CHECK-LABEL: @testReplaceVariadic
+func.func @testReplaceVariadic(%arg0: i32, %arg1: i32, %arg2: i32, %arg3: i32) -> () {
+ // CHECK: "test.mixed_variadic_in3"(%arg2, %arg1, %arg0) <{count = 1 : i32}>
+ "test.mixed_variadic_in5"(%arg0, %arg1, %arg2) <{attr1 = 0 : i32, pattern_name = "MatchInverseVariadic"}> : (i32, i32, i32) -> ()
+
+ return
+}
+
// CHECK-LABEL: @testMatchVariadicSubDag
func.func @testMatchVariadicSubDag(%arg0: i32, %arg1: i32, %arg2: i32) -> () {
// CHECK: %[[IN0:.*]] = "test.mixed_variadic_in_out_i32"(%arg0) : (i32) -> i32
diff --git a/mlir/test/mlir-vulkan-runner/iaddcarry_extended.mlir b/mlir/test/mlir-vulkan-runner/addui_extended.mlir
index 9b1f1964b3f9..9b1f1964b3f9 100644
--- a/mlir/test/mlir-vulkan-runner/iaddcarry_extended.mlir
+++ b/mlir/test/mlir-vulkan-runner/addui_extended.mlir
diff --git a/mlir/test/python/dialects/transform_structured_ext.py b/mlir/test/python/dialects/transform_structured_ext.py
index 935534edba7a..f97017b7a2c7 100644
--- a/mlir/test/python/dialects/transform_structured_ext.py
+++ b/mlir/test/python/dialects/transform_structured_ext.py
@@ -443,7 +443,7 @@ def testTileExplicitLoopTypeAll(target):
structured.TileUsingForOp(types, target, sizes=[2, 3, 4])
# CHECK-LABEL: TEST: testTileExplicitLoopTypeAll
# CHECK: = transform.structured.tile
- # CHECK-SAME : (!transform.any_op) -> (!transform.any_op, !transform.op<"scf.for">,
+ # CHECK-SAME: (!transform.any_op) -> (!transform.any_op, !transform.op<"scf.for">,
# CHECK-SAME: !transform.op<"scf.parallel">, !transform.op<"scf.forall">
diff --git a/mlir/test/python/ir/array_attributes.py b/mlir/test/python/ir/array_attributes.py
index 9251588a4c48..2bc403aace83 100644
--- a/mlir/test/python/ir/array_attributes.py
+++ b/mlir/test/python/ir/array_attributes.py
@@ -51,6 +51,87 @@ def testGetDenseElementsUnSupportedTypeOkIfExplicitTypeProvided():
################################################################################
+# Tests of the list of attributes .get() factory method
+################################################################################
+
+
+# CHECK-LABEL: TEST: testGetDenseElementsFromList
+@run
+def testGetDenseElementsFromList():
+ with Context(), Location.unknown():
+ attrs = [FloatAttr.get(F64Type.get(), 1.0), FloatAttr.get(F64Type.get(), 2.0)]
+ attr = DenseElementsAttr.get(attrs)
+
+ # CHECK: dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf64>
+ print(attr)
+
+
+# CHECK-LABEL: TEST: testGetDenseElementsFromListWithExplicitType
+@run
+def testGetDenseElementsFromListWithExplicitType():
+ with Context(), Location.unknown():
+ attrs = [FloatAttr.get(F64Type.get(), 1.0), FloatAttr.get(F64Type.get(), 2.0)]
+ shaped_type = ShapedType(Type.parse("tensor<2xf64>"))
+ attr = DenseElementsAttr.get(attrs, shaped_type)
+
+ # CHECK: dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf64>
+ print(attr)
+
+
+# CHECK-LABEL: TEST: testGetDenseElementsFromListEmptyList
+@run
+def testGetDenseElementsFromListEmptyList():
+ with Context(), Location.unknown():
+ attrs = []
+
+ try:
+ attr = DenseElementsAttr.get(attrs)
+ except ValueError as e:
+ # CHECK: Attributes list must be non-empty
+ print(e)
+
+
+# CHECK-LABEL: TEST: testGetDenseElementsFromListNonAttributeType
+@run
+def testGetDenseElementsFromListNonAttributeType():
+ with Context(), Location.unknown():
+ attrs = [1.0]
+
+ try:
+ attr = DenseElementsAttr.get(attrs)
+ except RuntimeError as e:
+ # CHECK: Invalid attribute when attempting to create an ArrayAttribute
+ print(e)
+
+
+# CHECK-LABEL: TEST: testGetDenseElementsFromListMismatchedType
+@run
+def testGetDenseElementsFromListMismatchedType():
+ with Context(), Location.unknown():
+ attrs = [FloatAttr.get(F64Type.get(), 1.0), FloatAttr.get(F64Type.get(), 2.0)]
+ shaped_type = ShapedType(Type.parse("tensor<2xf32>"))
+
+ try:
+ attr = DenseElementsAttr.get(attrs, shaped_type)
+ except ValueError as e:
+ # CHECK: All attributes must be of the same type and match the type parameter
+ print(e)
+
+
+# CHECK-LABEL: TEST: testGetDenseElementsFromListMixedTypes
+@run
+def testGetDenseElementsFromListMixedTypes():
+ with Context(), Location.unknown():
+ attrs = [FloatAttr.get(F64Type.get(), 1.0), FloatAttr.get(F32Type.get(), 2.0)]
+
+ try:
+ attr = DenseElementsAttr.get(attrs)
+ except ValueError as e:
+ # CHECK: All attributes must be of the same type and match the type parameter
+ print(e)
+
+
+################################################################################
# Splats.
################################################################################
@@ -205,6 +286,7 @@ def testGetDenseElementsBoolSplat():
### float and double arrays.
+
# CHECK-LABEL: TEST: testGetDenseElementsF16
@run
def testGetDenseElementsF16():
diff --git a/mlir/tools/mlir-linalg-ods-gen/CMakeLists.txt b/mlir/tools/mlir-linalg-ods-gen/CMakeLists.txt
index 787a0bb35d7b..ef5ce8882ec7 100644
--- a/mlir/tools/mlir-linalg-ods-gen/CMakeLists.txt
+++ b/mlir/tools/mlir-linalg-ods-gen/CMakeLists.txt
@@ -18,6 +18,7 @@ setup_host_tool(mlir-linalg-ods-yaml-gen MLIR_LINALG_ODS_YAML_GEN MLIR_LINALG_OD
if(NOT ${MLIR_LINALG_ODS_YAML_GEN_EXE} STREQUAL "mlir-linalg-ods-yaml-gen")
add_custom_target(mlir-linalg-ods-yaml-gen-host DEPENDS ${MLIR_LINALG_ODS_YAML_GEN_EXE})
+ set_target_properties(mlir-linalg-ods-yaml-gen-host PROPERTIES FOLDER "MLIR/Tablegenning")
if(NOT LLVM_BUILD_UTILS)
set_target_properties(mlir-linalg-ods-yaml-gen PROPERTIES EXCLUDE_FROM_ALL ON)
diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp
index 1dfc5d178b61..0e8b161d5134 100644
--- a/mlir/tools/mlir-opt/mlir-opt.cpp
+++ b/mlir/tools/mlir-opt/mlir-opt.cpp
@@ -71,6 +71,7 @@ namespace test {
void registerTestCompositePass();
void registerCommutativityUtils();
void registerConvertCallOpPass();
+void registerConvertFuncOpPass();
void registerInliner();
void registerMemRefBoundCheck();
void registerPatternsTestPass();
@@ -199,6 +200,7 @@ void registerTestPasses() {
mlir::test::registerTestCompositePass();
mlir::test::registerCommutativityUtils();
mlir::test::registerConvertCallOpPass();
+ mlir::test::registerConvertFuncOpPass();
mlir::test::registerInliner();
mlir::test::registerMemRefBoundCheck();
mlir::test::registerPatternsTestPass();
diff --git a/mlir/tools/mlir-pdll/CMakeLists.txt b/mlir/tools/mlir-pdll/CMakeLists.txt
index 67b65d7ad572..35b8870a1b1a 100644
--- a/mlir/tools/mlir-pdll/CMakeLists.txt
+++ b/mlir/tools/mlir-pdll/CMakeLists.txt
@@ -21,7 +21,6 @@ add_tablegen(mlir-pdll MLIR_PDLL
${LIBS}
)
-set_target_properties(mlir-pdll PROPERTIES FOLDER "Tablegenning")
target_link_libraries(mlir-pdll PRIVATE ${LIBS})
mlir_check_all_link_libraries(mlir-pdll)
diff --git a/mlir/tools/mlir-src-sharder/CMakeLists.txt b/mlir/tools/mlir-src-sharder/CMakeLists.txt
index 4ef870b61124..6f98bd15fc18 100644
--- a/mlir/tools/mlir-src-sharder/CMakeLists.txt
+++ b/mlir/tools/mlir-src-sharder/CMakeLists.txt
@@ -8,7 +8,7 @@ add_tablegen(mlir-src-sharder MLIR_SRC_SHARDER
${LIBS}
)
-set_target_properties(mlir-src-sharder PROPERTIES FOLDER "Tablegenning")
+set_target_properties(mlir-src-sharder PROPERTIES FOLDER "MLIR/Tablegenning")
target_link_libraries(mlir-src-sharder PRIVATE ${LIBS})
mlir_check_all_link_libraries(mlir-src-sharder)
diff --git a/mlir/tools/mlir-tblgen/CMakeLists.txt b/mlir/tools/mlir-tblgen/CMakeLists.txt
index f2c5e4b3f87a..20a200bc3540 100644
--- a/mlir/tools/mlir-tblgen/CMakeLists.txt
+++ b/mlir/tools/mlir-tblgen/CMakeLists.txt
@@ -33,7 +33,6 @@ add_tablegen(mlir-tblgen MLIR
SPIRVUtilsGen.cpp
)
-set_target_properties(mlir-tblgen PROPERTIES FOLDER "Tablegenning")
target_link_libraries(mlir-tblgen
PRIVATE
MLIRTblgenLib)
diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
index e013ccac5dd0..adda7ce6fc6c 100644
--- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
+++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
@@ -4101,7 +4101,8 @@ OpOperandAdaptorEmitter::OpOperandAdaptorEmitter(
"{}");
}
paramList.emplace_back("::mlir::RegionRange", "regions", "{}");
- auto *baseConstructor = genericAdaptorBase.addConstructor(paramList);
+ auto *baseConstructor =
+ genericAdaptorBase.addConstructor<Method::Inline>(paramList);
baseConstructor->addMemberInitializer("odsAttrs", "attrs");
if (useProperties)
baseConstructor->addMemberInitializer("properties", "properties");
@@ -4163,14 +4164,24 @@ OpOperandAdaptorEmitter::OpOperandAdaptorEmitter(
// and the value range from the parameter.
{
// Base class is in the cpp file and can simply access the members of the op
- // class to initialize the template independent fields.
- auto *constructor = genericAdaptorBase.addConstructor(
- MethodParameter(op.getCppClassName(), "op"));
- constructor->addMemberInitializer(
- genericAdaptorBase.getClassName(),
- llvm::Twine(!useProperties ? "op->getAttrDictionary()"
- : "op->getDiscardableAttrDictionary()") +
- ", op.getProperties(), op->getRegions()");
+ // class to initialize the template independent fields. If the op doesn't
+ // have properties, we can emit a generic constructor inline. Otherwise,
+ // emit it out-of-line because we need the op to be defined.
+ Constructor *constructor;
+ if (useProperties) {
+ constructor = genericAdaptorBase.addConstructor(
+ MethodParameter(op.getCppClassName(), "op"));
+ } else {
+ constructor = genericAdaptorBase.addConstructor<Method::Inline>(
+ MethodParameter("::mlir::Operation *", "op"));
+ }
+ constructor->addMemberInitializer("odsAttrs",
+ "op->getRawDictionaryAttrs()");
+ // Retrieve the operation name from the op directly.
+ constructor->addMemberInitializer("odsOpName", "op->getName()");
+ if (useProperties)
+ constructor->addMemberInitializer("properties", "op.getProperties()");
+ constructor->addMemberInitializer("odsRegions", "op->getRegions()");
// Generic adaptor is templated and therefore defined inline in the header.
// We cannot use the Op class here as it is an incomplete type (we have a
diff --git a/mlir/tools/mlir-tblgen/RewriterGen.cpp b/mlir/tools/mlir-tblgen/RewriterGen.cpp
index e63a065a0708..d8e16d98fd75 100644
--- a/mlir/tools/mlir-tblgen/RewriterGen.cpp
+++ b/mlir/tools/mlir-tblgen/RewriterGen.cpp
@@ -159,6 +159,10 @@ private:
// Returns the symbol of the old value serving as the replacement.
StringRef handleReplaceWithValue(DagNode tree);
+ // Emits the C++ statement to replace the matched DAG with an array of
+ // matched values.
+ std::string handleVariadic(DagNode tree, int depth);
+
// Trailing directives are used at the end of DAG node argument lists to
// specify additional behaviour for op matchers and creators, etc.
struct TrailingDirectives {
@@ -1241,6 +1245,9 @@ std::string PatternEmitter::handleResultPattern(DagNode resultTree,
if (resultTree.isReplaceWithValue())
return handleReplaceWithValue(resultTree).str();
+ if (resultTree.isVariadic())
+ return handleVariadic(resultTree, depth);
+
// Normal op creation.
auto symbol = handleOpCreation(resultTree, resultIndex, depth);
if (resultTree.getSymbol().empty()) {
@@ -1251,6 +1258,26 @@ std::string PatternEmitter::handleResultPattern(DagNode resultTree,
return symbol;
}
+std::string PatternEmitter::handleVariadic(DagNode tree, int depth) {
+ assert(tree.isVariadic());
+
+ auto name = std::string(formatv("tblgen_variadic_values_{0}", nextValueId++));
+ symbolInfoMap.bindValue(name);
+ os << "::llvm::SmallVector<::mlir::Value, 4> " << name << ";\n";
+ for (int i = 0, e = tree.getNumArgs(); i != e; ++i) {
+ if (auto child = tree.getArgAsNestedDag(i)) {
+ os << name << ".push_back(" << handleResultPattern(child, i, depth + 1)
+ << ");\n";
+ } else {
+ os << name << ".push_back("
+ << handleOpArgument(tree.getArgAsLeaf(i), tree.getArgName(i))
+ << ");\n";
+ }
+ }
+
+ return name;
+}
+
StringRef PatternEmitter::handleReplaceWithValue(DagNode tree) {
assert(tree.isReplaceWithValue());
diff --git a/mlir/unittests/CMakeLists.txt b/mlir/unittests/CMakeLists.txt
index 6d8aa290e82f..c5f0d7e384d0 100644
--- a/mlir/unittests/CMakeLists.txt
+++ b/mlir/unittests/CMakeLists.txt
@@ -1,5 +1,3 @@
-set_target_properties(MLIRUnitTests PROPERTIES FOLDER "MLIR Tests")
-
# To silence warning caused by Wundef.
add_definitions(-DGTEST_NO_LLVM_SUPPORT=0)
diff --git a/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp b/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
index cea49356538f..a8fe20d52fb2 100644
--- a/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
+++ b/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
@@ -30,7 +30,7 @@
using namespace mlir;
// Skip the test if the NVPTX target was not built.
-#if MLIR_ENABLE_CUDA_CONVERSIONS
+#if LLVM_HAS_NVPTX_TARGET
#define SKIP_WITHOUT_NVPTX(x) x
#else
#define SKIP_WITHOUT_NVPTX(x) DISABLED_##x
diff --git a/offload/CMakeLists.txt b/offload/CMakeLists.txt
index c3dcebfb7301..03b620186562 100644
--- a/offload/CMakeLists.txt
+++ b/offload/CMakeLists.txt
@@ -2,6 +2,7 @@
# to build offload with CMake.
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "liboffload")
if ("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_SOURCE_DIR}")
set(OPENMP_STANDALONE_BUILD TRUE)
@@ -143,9 +144,31 @@ set(LIBOMPTARGET_PLUGINS_TO_BUILD "all" CACHE STRING
if(LIBOMPTARGET_PLUGINS_TO_BUILD STREQUAL "all")
set(LIBOMPTARGET_PLUGINS_TO_BUILD ${LIBOMPTARGET_ALL_PLUGIN_TARGETS})
endif()
+
+if(NOT CMAKE_SYSTEM_NAME MATCHES "Linux" AND
+ "host" IN_LIST LIBOMPTARGET_PLUGINS_TO_BUILD)
+ message(STATUS "Not building host plugin: only Linux systems are supported")
+ list(REMOVE_ITEM LIBOMPTARGET_PLUGINS_TO_BUILD "host")
+endif()
+if(NOT (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86_64)|(ppc64le)|(aarch64)$"
+ AND CMAKE_SYSTEM_NAME MATCHES "Linux"))
+ if("amdgpu" IN_LIST LIBOMPTARGET_PLUGINS_TO_BUILD)
+ message(STATUS "Not building AMDGPU plugin: only support AMDGPU in "
+ "Linux x86_64, ppc64le, or aarch64 hosts")
+ list(REMOVE_ITEM LIBOMPTARGET_PLUGINS_TO_BUILD "amdgpu")
+ endif()
+ if("nvptx" IN_LIST LIBOMPTARGET_PLUGINS_TO_BUILD)
+ message(STATUS "Not building CUDA plugin: only support AMDGPU in "
+ "Linux x86_64, ppc64le, or aarch64 hosts")
+ list(REMOVE_ITEM LIBOMPTARGET_PLUGINS_TO_BUILD "cuda")
+ endif()
+endif()
message(STATUS "Building the offload library with support for "
"the \"${LIBOMPTARGET_PLUGINS_TO_BUILD}\" plugins")
+set(LIBOMPTARGET_DLOPEN_PLUGINS "${LIBOMPTARGET_PLUGINS_TO_BUILD}" CACHE STRING
+ "Semicolon-separated list of plugins to use 'dlopen' for runtime linking")
+
set(LIBOMPTARGET_ENUM_PLUGIN_TARGETS "")
foreach(plugin IN LISTS LIBOMPTARGET_PLUGINS_TO_BUILD)
set(LIBOMPTARGET_ENUM_PLUGIN_TARGETS
diff --git a/offload/DeviceRTL/include/Utils.h b/offload/DeviceRTL/include/Utils.h
index 4ab0aea46eea..d43b7f5c95de 100644
--- a/offload/DeviceRTL/include/Utils.h
+++ b/offload/DeviceRTL/include/Utils.h
@@ -25,6 +25,8 @@ int32_t shuffle(uint64_t Mask, int32_t Var, int32_t SrcLane);
int32_t shuffleDown(uint64_t Mask, int32_t Var, uint32_t Delta, int32_t Width);
+uint64_t ballotSync(uint64_t Mask, int32_t Pred);
+
/// Return \p LowBits and \p HighBits packed into a single 64 bit value.
uint64_t pack(uint32_t LowBits, uint32_t HighBits);
diff --git a/offload/DeviceRTL/src/Mapping.cpp b/offload/DeviceRTL/src/Mapping.cpp
index b2028a8fb4f5..4f39d2a299ee 100644
--- a/offload/DeviceRTL/src/Mapping.cpp
+++ b/offload/DeviceRTL/src/Mapping.cpp
@@ -364,4 +364,8 @@ _TGT_KERNEL_LANGUAGE(block_id, getBlockIdInKernel)
_TGT_KERNEL_LANGUAGE(block_dim, getNumberOfThreadsInBlock)
_TGT_KERNEL_LANGUAGE(grid_dim, getNumberOfBlocksInKernel)
+extern "C" uint64_t ompx_ballot_sync(uint64_t mask, int pred) {
+ return utils::ballotSync(mask, pred);
+}
+
#pragma omp end declare target
diff --git a/offload/DeviceRTL/src/Utils.cpp b/offload/DeviceRTL/src/Utils.cpp
index d07ac0fb499c..606e3bec0d33 100644
--- a/offload/DeviceRTL/src/Utils.cpp
+++ b/offload/DeviceRTL/src/Utils.cpp
@@ -37,6 +37,8 @@ int32_t shuffle(uint64_t Mask, int32_t Var, int32_t SrcLane);
int32_t shuffleDown(uint64_t Mask, int32_t Var, uint32_t LaneDelta,
int32_t Width);
+uint64_t ballotSync(uint64_t Mask, int32_t Pred);
+
/// AMDGCN Implementation
///
///{
@@ -57,6 +59,10 @@ int32_t shuffleDown(uint64_t Mask, int32_t Var, uint32_t LaneDelta,
return __builtin_amdgcn_ds_bpermute(Index << 2, Var);
}
+uint64_t ballotSync(uint64_t Mask, int32_t Pred) {
+ return Mask & __builtin_amdgcn_ballot_w64(Pred);
+}
+
bool isSharedMemPtr(const void *Ptr) {
return __builtin_amdgcn_is_shared(
(const __attribute__((address_space(0))) void *)Ptr);
@@ -80,6 +86,10 @@ int32_t shuffleDown(uint64_t Mask, int32_t Var, uint32_t Delta, int32_t Width) {
return __nvvm_shfl_sync_down_i32(Mask, Var, Delta, T);
}
+uint64_t ballotSync(uint64_t Mask, int32_t Pred) {
+ return __nvvm_vote_ballot_sync(static_cast<uint32_t>(Mask), Pred);
+}
+
bool isSharedMemPtr(const void *Ptr) { return __nvvm_isspacep_shared(Ptr); }
#pragma omp end declare variant
@@ -103,6 +113,10 @@ int32_t utils::shuffleDown(uint64_t Mask, int32_t Var, uint32_t Delta,
return impl::shuffleDown(Mask, Var, Delta, Width);
}
+uint64_t utils::ballotSync(uint64_t Mask, int32_t Pred) {
+ return impl::ballotSync(Mask, Pred);
+}
+
bool utils::isSharedMemPtr(void *Ptr) { return impl::isSharedMemPtr(Ptr); }
extern "C" {
diff --git a/offload/cmake/Modules/LibomptargetGetDependencies.cmake b/offload/cmake/Modules/LibomptargetGetDependencies.cmake
index e37b86b2a81f..c296f7ea3863 100644
--- a/offload/cmake/Modules/LibomptargetGetDependencies.cmake
+++ b/offload/cmake/Modules/LibomptargetGetDependencies.cmake
@@ -3,7 +3,6 @@
#
# libffi : required to launch target kernels given function and argument
# pointers.
-# CUDA : required to control offloading to NVIDIA GPUs.
include (FindPackageHandleStandardArgs)
@@ -44,13 +43,6 @@ find_package(FFI QUIET)
set(LIBOMPTARGET_DEP_LIBFFI_FOUND ${FFI_FOUND})
################################################################################
-# Looking for CUDA...
-################################################################################
-
-find_package(CUDAToolkit QUIET)
-set(LIBOMPTARGET_DEP_CUDA_FOUND ${CUDAToolkit_FOUND})
-
-################################################################################
# Looking for NVIDIA GPUs...
################################################################################
set(LIBOMPTARGET_DEP_CUDA_ARCH "sm_35")
diff --git a/offload/plugins-nextgen/amdgpu/CMakeLists.txt b/offload/plugins-nextgen/amdgpu/CMakeLists.txt
index 2f4057c0ae7e..47cd2feefc72 100644
--- a/offload/plugins-nextgen/amdgpu/CMakeLists.txt
+++ b/offload/plugins-nextgen/amdgpu/CMakeLists.txt
@@ -1,11 +1,6 @@
# As of rocm-3.7, hsa is installed with cmake packages and kmt is found via hsa
find_package(hsa-runtime64 QUIET 1.2.0 HINTS ${CMAKE_INSTALL_PREFIX} PATHS /opt/rocm)
-if(NOT (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86_64)|(ppc64le)|(aarch64)$" AND CMAKE_SYSTEM_NAME MATCHES "Linux"))
- message(STATUS "Not building AMDGPU NextGen plugin: only support AMDGPU in Linux x86_64, ppc64le, or aarch64 hosts")
- return()
-endif()
-
# Create the library and add the default arguments.
add_target_library(omptarget.rtl.amdgpu AMDGPU)
@@ -13,8 +8,7 @@ target_sources(omptarget.rtl.amdgpu PRIVATE src/rtl.cpp)
target_include_directories(omptarget.rtl.amdgpu PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/utils)
-option(LIBOMPTARGET_FORCE_DLOPEN_LIBHSA "Build with dlopened libhsa" OFF)
-if(hsa-runtime64_FOUND AND NOT LIBOMPTARGET_FORCE_DLOPEN_LIBHSA)
+if(hsa-runtime64_FOUND AND NOT "amdgpu" IN_LIST LIBOMPTARGET_DLOPEN_PLUGINS)
message(STATUS "Building AMDGPU plugin linked against libhsa")
target_link_libraries(omptarget.rtl.amdgpu PRIVATE hsa-runtime64::hsa-runtime64)
else()
diff --git a/offload/plugins-nextgen/common/include/JIT.h b/offload/plugins-nextgen/common/include/JIT.h
index b22197b89208..4414926a6178 100644
--- a/offload/plugins-nextgen/common/include/JIT.h
+++ b/offload/plugins-nextgen/common/include/JIT.h
@@ -55,10 +55,6 @@ struct JITEngine {
process(const __tgt_device_image &Image,
target::plugin::GenericDeviceTy &Device);
- /// Return true if \p Image is a bitcode image that can be JITed for the given
- /// architecture.
- Expected<bool> checkBitcodeImage(StringRef Buffer) const;
-
private:
/// Compile the bitcode image \p Image and generate the binary image that can
/// be loaded to the target device of the triple \p Triple architecture \p
diff --git a/offload/plugins-nextgen/common/include/PluginInterface.h b/offload/plugins-nextgen/common/include/PluginInterface.h
index 83f6e8d76fec..eda6a4fd541e 100644
--- a/offload/plugins-nextgen/common/include/PluginInterface.h
+++ b/offload/plugins-nextgen/common/include/PluginInterface.h
@@ -1052,6 +1052,10 @@ struct GenericPluginTy {
/// given target. Returns true if the \p Image is compatible with the plugin.
Expected<bool> checkELFImage(StringRef Image) const;
+ /// Return true if the \p Image can be compiled to run on the platform's
+ /// target architecture.
+ Expected<bool> checkBitcodeImage(StringRef Image) const;
+
/// Indicate if an image is compatible with the plugin devices. Notice that
/// this function may be called before actually initializing the devices. So
/// we could not move this function into GenericDeviceTy.
@@ -1066,8 +1070,11 @@ protected:
public:
// TODO: This plugin interface needs to be cleaned up.
+ /// Returns true if the plugin has been initialized.
+ int32_t is_initialized() const;
+
/// Returns non-zero if the provided \p Image can be executed by the runtime.
- int32_t is_valid_binary(__tgt_device_image *Image);
+ int32_t is_valid_binary(__tgt_device_image *Image, bool Initialized = true);
/// Initialize the device inside of the plugin.
int32_t init_device(int32_t DeviceId);
@@ -1187,6 +1194,9 @@ public:
void **KernelPtr);
private:
+ /// Indicates if the platform runtime has been fully initialized.
+ bool Initialized = false;
+
/// Number of devices available for the plugin.
int32_t NumDevices = 0;
diff --git a/offload/plugins-nextgen/common/src/JIT.cpp b/offload/plugins-nextgen/common/src/JIT.cpp
index 9d58e6060646..9dbba1459839 100644
--- a/offload/plugins-nextgen/common/src/JIT.cpp
+++ b/offload/plugins-nextgen/common/src/JIT.cpp
@@ -323,19 +323,3 @@ JITEngine::process(const __tgt_device_image &Image,
return &Image;
}
-
-Expected<bool> JITEngine::checkBitcodeImage(StringRef Buffer) const {
- TimeTraceScope TimeScope("Check bitcode image");
-
- assert(identify_magic(Buffer) == file_magic::bitcode &&
- "Input is not bitcode");
-
- LLVMContext Context;
- auto ModuleOrErr = getLazyBitcodeModule(MemoryBufferRef(Buffer, ""), Context,
- /*ShouldLazyLoadMetadata=*/true);
- if (!ModuleOrErr)
- return ModuleOrErr.takeError();
- Module &M = **ModuleOrErr;
-
- return Triple(M.getTargetTriple()).getArch() == TT.getArch();
-}
diff --git a/offload/plugins-nextgen/common/src/PluginInterface.cpp b/offload/plugins-nextgen/common/src/PluginInterface.cpp
index 550ebc9c28b2..913721a15d71 100644
--- a/offload/plugins-nextgen/common/src/PluginInterface.cpp
+++ b/offload/plugins-nextgen/common/src/PluginInterface.cpp
@@ -24,6 +24,7 @@
#include "omp-tools.h"
#endif
+#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/JSON.h"
@@ -1495,6 +1496,7 @@ Error GenericPluginTy::init() {
if (!NumDevicesOrErr)
return NumDevicesOrErr.takeError();
+ Initialized = true;
NumDevices = *NumDevicesOrErr;
if (NumDevices == 0)
return Plugin::success();
@@ -1578,14 +1580,27 @@ Expected<bool> GenericPluginTy::checkELFImage(StringRef Image) const {
if (!MachineOrErr)
return MachineOrErr.takeError();
- if (!*MachineOrErr)
+ return MachineOrErr;
+}
+
+Expected<bool> GenericPluginTy::checkBitcodeImage(StringRef Image) const {
+ if (identify_magic(Image) != file_magic::bitcode)
return false;
- // Perform plugin-dependent checks for the specific architecture if needed.
- return isELFCompatible(Image);
+ LLVMContext Context;
+ auto ModuleOrErr = getLazyBitcodeModule(MemoryBufferRef(Image, ""), Context,
+ /*ShouldLazyLoadMetadata=*/true);
+ if (!ModuleOrErr)
+ return ModuleOrErr.takeError();
+ Module &M = **ModuleOrErr;
+
+ return Triple(M.getTargetTriple()).getArch() == getTripleArch();
}
-int32_t GenericPluginTy::is_valid_binary(__tgt_device_image *Image) {
+int32_t GenericPluginTy::is_initialized() const { return Initialized; }
+
+int32_t GenericPluginTy::is_valid_binary(__tgt_device_image *Image,
+ bool Initialized) {
StringRef Buffer(reinterpret_cast<const char *>(Image->ImageStart),
target::getPtrDiff(Image->ImageEnd, Image->ImageStart));
@@ -1603,10 +1618,17 @@ int32_t GenericPluginTy::is_valid_binary(__tgt_device_image *Image) {
auto MatchOrErr = checkELFImage(Buffer);
if (Error Err = MatchOrErr.takeError())
return HandleError(std::move(Err));
- return *MatchOrErr;
+ if (!Initialized || !*MatchOrErr)
+ return *MatchOrErr;
+
+ // Perform plugin-dependent checks for the specific architecture if needed.
+ auto CompatibleOrErr = isELFCompatible(Buffer);
+ if (Error Err = CompatibleOrErr.takeError())
+ return HandleError(std::move(Err));
+ return *CompatibleOrErr;
}
case file_magic::bitcode: {
- auto MatchOrErr = getJIT().checkBitcodeImage(Buffer);
+ auto MatchOrErr = checkBitcodeImage(Buffer);
if (Error Err = MatchOrErr.takeError())
return HandleError(std::move(Err));
return *MatchOrErr;
diff --git a/offload/plugins-nextgen/cuda/CMakeLists.txt b/offload/plugins-nextgen/cuda/CMakeLists.txt
index 10ff612848ad..5fdfb8f9cf62 100644
--- a/offload/plugins-nextgen/cuda/CMakeLists.txt
+++ b/offload/plugins-nextgen/cuda/CMakeLists.txt
@@ -1,17 +1,10 @@
-if (NOT (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86_64)|(ppc64le)|(aarch64)$" AND CMAKE_SYSTEM_NAME MATCHES "Linux"))
- message(STATUS "Not building CUDA NextGen offloading plugin: only support CUDA in Linux x86_64, ppc64le, or aarch64 hosts.")
- return()
-endif()
-
-message(STATUS "Building CUDA NextGen offloading plugin.")
-
# Create the library and add the default arguments.
add_target_library(omptarget.rtl.cuda CUDA)
target_sources(omptarget.rtl.cuda PRIVATE src/rtl.cpp)
-option(LIBOMPTARGET_FORCE_DLOPEN_LIBCUDA "Build with dlopened libcuda" OFF)
-if(LIBOMPTARGET_DEP_CUDA_FOUND AND NOT LIBOMPTARGET_FORCE_DLOPEN_LIBCUDA)
+find_package(CUDAToolkit QUIET)
+if(CUDAToolkit_FOUND AND NOT "cuda" IN_LIST LIBOMPTARGET_DLOPEN_PLUGINS)
message(STATUS "Building CUDA plugin linked against libcuda")
target_link_libraries(omptarget.rtl.cuda PRIVATE CUDA::cuda_driver)
else()
diff --git a/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h b/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h
index 32031c28f879..d65e5cf61e09 100644
--- a/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h
+++ b/offload/plugins-nextgen/cuda/dynamic_cuda/cuda.h
@@ -16,6 +16,15 @@
#include <cstddef>
#include <cstdint>
+#define cuDeviceTotalMem cuDeviceTotalMem_v2
+#define cuModuleGetGlobal cuModuleGetGlobal_v2
+#define cuMemGetInfo cuMemGetInfo_v2
+#define cuMemAlloc cuMemAlloc_v2
+#define cuMemFree cuMemFree_v2
+#define cuMemAllocHost cuMemAllocHost_v2
+#define cuDevicePrimaryCtxRelease cuDevicePrimaryCtxRelease_v2
+#define cuDevicePrimaryCtxSetFlags cuDevicePrimaryCtxSetFlags_v2
+
typedef int CUdevice;
typedef uintptr_t CUdeviceptr;
typedef struct CUmod_st *CUmodule;
diff --git a/offload/plugins-nextgen/exports b/offload/plugins-nextgen/exports
deleted file mode 100644
index cc7beda183af..000000000000
--- a/offload/plugins-nextgen/exports
+++ /dev/null
@@ -1,6 +0,0 @@
-VERS1.0 {
- global:
- __tgt_rtl*;
- local:
- *;
-};
diff --git a/offload/plugins-nextgen/host/CMakeLists.txt b/offload/plugins-nextgen/host/CMakeLists.txt
index 9c6aa274921b..817d128f9241 100644
--- a/offload/plugins-nextgen/host/CMakeLists.txt
+++ b/offload/plugins-nextgen/host/CMakeLists.txt
@@ -1,7 +1,3 @@
-if(NOT CMAKE_SYSTEM_NAME MATCHES "Linux")
- return()
-endif()
-
set(supported_targets x86_64 aarch64 ppc64 ppc64le s390x)
if(NOT ${CMAKE_SYSTEM_PROCESSOR} IN_LIST supported_targets)
message(STATUS "Not building ${machine} NextGen offloading plugin")
diff --git a/offload/src/PluginManager.cpp b/offload/src/PluginManager.cpp
index 191afa345641..f72007849e36 100644
--- a/offload/src/PluginManager.cpp
+++ b/offload/src/PluginManager.cpp
@@ -34,15 +34,8 @@ void PluginManager::init() {
// Attempt to create an instance of each supported plugin.
#define PLUGIN_TARGET(Name) \
do { \
- auto Plugin = std::unique_ptr<GenericPluginTy>(createPlugin_##Name()); \
- if (auto Err = Plugin->init()) { \
- [[maybe_unused]] std::string InfoMsg = toString(std::move(Err)); \
- DP("Failed to init plugin: %s\n", InfoMsg.c_str()); \
- } else { \
- DP("Registered plugin %s with %d visible device(s)\n", \
- Plugin->getName(), Plugin->number_of_devices()); \
- Plugins.emplace_back(std::move(Plugin)); \
- } \
+ Plugins.emplace_back( \
+ std::unique_ptr<GenericPluginTy>(createPlugin_##Name())); \
} while (false);
#include "Shared/Targets.def"
@@ -160,6 +153,27 @@ void PluginManager::registerLib(__tgt_bin_desc *Desc) {
if (Entry.flags == OMP_REGISTER_REQUIRES)
PM->addRequirements(Entry.data);
+ // Initialize all the plugins that have associated images.
+ for (auto &Plugin : Plugins) {
+ if (Plugin->is_initialized())
+ continue;
+
+ // Extract the exectuable image and extra information if availible.
+ for (int32_t i = 0; i < Desc->NumDeviceImages; ++i) {
+ if (!Plugin->is_valid_binary(&Desc->DeviceImages[i],
+ /*Initialized=*/false))
+ continue;
+
+ if (auto Err = Plugin->init()) {
+ [[maybe_unused]] std::string InfoMsg = toString(std::move(Err));
+ DP("Failed to init plugin: %s\n", InfoMsg.c_str());
+ } else {
+ DP("Registered plugin %s with %d visible device(s)\n",
+ Plugin->getName(), Plugin->number_of_devices());
+ }
+ }
+ }
+
// Extract the exectuable image and extra information if availible.
for (int32_t i = 0; i < Desc->NumDeviceImages; ++i)
PM->addDeviceImage(*Desc, Desc->DeviceImages[i]);
@@ -177,7 +191,7 @@ void PluginManager::registerLib(__tgt_bin_desc *Desc) {
if (!R.number_of_devices())
continue;
- if (!R.is_valid_binary(Img)) {
+ if (!R.is_valid_binary(Img, /*Initialized=*/true)) {
DP("Image " DPxMOD " is NOT compatible with RTL %s!\n",
DPxPTR(Img->ImageStart), R.getName());
continue;
diff --git a/offload/test/offloading/dynamic_module.c b/offload/test/offloading/dynamic_module.c
index f1e9862002a1..9dcf3a1ae649 100644
--- a/offload/test/offloading/dynamic_module.c
+++ b/offload/test/offloading/dynamic_module.c
@@ -2,6 +2,8 @@
// RUN: %libomptarget-compile-generic %t.so && %libomptarget-run-generic 2>&1 | %fcheck-generic
// RUN: %libomptarget-compileopt-generic -DSHARED -fPIC -shared -o %t.so && \
// RUN: %libomptarget-compileopt-generic %t.so && %libomptarget-run-generic 2>&1 | %fcheck-generic
+//
+// UNSUPPORTED: x86_64-pc-linux-gnu
#ifdef SHARED
void foo() {}
diff --git a/offload/test/offloading/fortran/dump_map_tables.f90 b/offload/test/offloading/fortran/dump_map_tables.f90
new file mode 100644
index 000000000000..cb66ef348e3c
--- /dev/null
+++ b/offload/test/offloading/fortran/dump_map_tables.f90
@@ -0,0 +1,38 @@
+! Offloading test with runtine call to ompx_dump_mapping_tables
+! Fortran array writing some values and printing the variable mapped to device
+! correctly receives the updates made on the device.
+! REQUIRES: flang
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+
+program map_dump_example
+ INTERFACE
+ SUBROUTINE ompx_dump_mapping_tables() BIND(C)
+ END SUBROUTINE ompx_dump_mapping_tables
+ END INTERFACE
+
+ integer i,j,k,N
+ integer async_q(4)
+ real :: A(5000000)
+ N=5000000
+ do i=1, N
+ A(i)=0
+ enddo
+! clang-format off
+! CHECK: omptarget device 0 info: OpenMP Host-Device pointer mappings after block
+! CHECK-NEXT: omptarget device 0 info: Host Ptr Target Ptr Size (B) DynRefCount HoldRefCount Declaration
+! CHECK-NEXT: omptarget device 0 info: {{(0x[0-9a-f]{16})}} {{(0x[0-9a-f]{16})}} 20000000 1 0 {{.*}} at a(:n):21:11
+! clang-format on
+!$omp target enter data map(to:A(:N))
+ call ompx_dump_mapping_tables()
+!$omp target parallel do
+ do i=1, N
+ A(i)=A(i)*2
+ enddo
+!$omp target exit data map(from:A)
+end program
diff --git a/offload/test/offloading/ompx_bare_ballot_sync.c b/offload/test/offloading/ompx_bare_ballot_sync.c
new file mode 100644
index 000000000000..d8e17691bf9c
--- /dev/null
+++ b/offload/test/offloading/ompx_bare_ballot_sync.c
@@ -0,0 +1,45 @@
+// RUN: %libomptarget-compilexx-run-and-check-generic
+//
+// UNSUPPORTED: x86_64-pc-linux-gnu
+// UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+// UNSUPPORTED: aarch64-unknown-linux-gnu
+// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+// UNSUPPORTED: s390x-ibm-linux-gnu
+// UNSUPPORTED: s390x-ibm-linux-gnu-LTO
+
+#if defined __AMDGCN_WAVEFRONT_SIZE && __AMDGCN_WAVEFRONT_SIZE == 64
+#define MASK 0xaaaaaaaaaaaaaaaa
+#else
+#define MASK 0xaaaaaaaa
+#endif
+
+#include <assert.h>
+#include <ompx.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char *argv[]) {
+ const int num_blocks = 1;
+ const int block_size = 256;
+ const int N = num_blocks * block_size;
+ uint64_t *data = (uint64_t *)malloc(N * sizeof(uint64_t));
+
+ for (int i = 0; i < N; ++i)
+ data[i] = i & 0x1;
+
+#pragma omp target teams ompx_bare num_teams(num_blocks) thread_limit(block_size) map(tofrom: data[0:N])
+ {
+ int tid = ompx_thread_id_x();
+ uint64_t mask = ompx_ballot_sync(~0U, data[tid]);
+ data[tid] += mask;
+ }
+
+ for (int i = 0; i < N; ++i)
+ assert(data[i] == ((i & 0x1) + MASK));
+
+ // CHECK: PASS
+ printf("PASS\n");
+
+ return 0;
+}
diff --git a/openmp/CMakeLists.txt b/openmp/CMakeLists.txt
index 9097ca562300..c228a392e4c7 100644
--- a/openmp/CMakeLists.txt
+++ b/openmp/CMakeLists.txt
@@ -1,4 +1,5 @@
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "OpenMP")
set(LLVM_COMMON_CMAKE_UTILS ${CMAKE_CURRENT_SOURCE_DIR}/../cmake)
diff --git a/openmp/cmake/OpenMPTesting.cmake b/openmp/cmake/OpenMPTesting.cmake
index ab2348ae59b5..c67ad8b1cbd9 100644
--- a/openmp/cmake/OpenMPTesting.cmake
+++ b/openmp/cmake/OpenMPTesting.cmake
@@ -58,7 +58,7 @@ if (${OPENMP_STANDALONE_BUILD})
set(DEFAULT_LIT_ARGS "${DEFAULT_LIT_ARGS} --no-progress-bar")
endif()
if (${CMAKE_SYSTEM_NAME} MATCHES "AIX")
- set(DEFAULT_LIT_ARGS "${DEFAULT_LIT_ARGS} --time-tests --timeout=1800")
+ set(DEFAULT_LIT_ARGS "${DEFAULT_LIT_ARGS} --time-tests --timeout=3000")
endif()
set(OPENMP_LIT_ARGS "${DEFAULT_LIT_ARGS}" CACHE STRING "Options for lit.")
separate_arguments(OPENMP_LIT_ARGS)
diff --git a/openmp/docs/CMakeLists.txt b/openmp/docs/CMakeLists.txt
index 1e4be31a6f60..4cb9fb486ff3 100644
--- a/openmp/docs/CMakeLists.txt
+++ b/openmp/docs/CMakeLists.txt
@@ -78,6 +78,7 @@ if (LLVM_ENABLE_DOXYGEN)
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating openmp doxygen documentation." VERBATIM)
+ set_target_properties(doxygen-openmp PROPERTIES FOLDER "OpenMP/Docs")
if (LLVM_BUILD_DOCS)
add_dependencies(doxygen doxygen-openmp)
diff --git a/openmp/docs/SupportAndFAQ.rst b/openmp/docs/SupportAndFAQ.rst
index 9e6974dfbb13..a158422befd0 100644
--- a/openmp/docs/SupportAndFAQ.rst
+++ b/openmp/docs/SupportAndFAQ.rst
@@ -454,6 +454,15 @@ Q: What command line options can I use for OpenMP?
We recommend taking a look at the OpenMP
:doc:`command line argument reference <CommandLineArgumentReference>` page.
+Q: Can I build the offloading runtimes without CUDA or HSA?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+By default, the offloading runtime will load the associated vendor runtime
+during initialization rather than directly linking against them. This allows the
+program to be built and run on many machine. If you wish to directly link
+against these libraries, use the ``LIBOMPTARGET_DLOPEN_PLUGINS=""`` option to
+suppress it for each plugin. The default value is every plugin enabled with
+``LIBOMPTARGET_PLUGINS_TO_BUILD``.
+
Q: Why is my build taking a long time?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When installing OpenMP and other LLVM components, the build time on multicore
diff --git a/openmp/docs/remarks/OMP121.rst b/openmp/docs/remarks/OMP121.rst
index 88561b8a1fe1..f3ceeac7f3ab 100644
--- a/openmp/docs/remarks/OMP121.rst
+++ b/openmp/docs/remarks/OMP121.rst
@@ -1,6 +1,6 @@
.. _omp121:
-Value has potential side effects preventing SPMD-mode execution. Add `__attribute__((assume(\"ompx_spmd_amenable\")))` to the called function to override. [OMP121]
+Value has potential side effects preventing SPMD-mode execution. Add `[[omp::assume(\"ompx_spmd_amenable\")]]` to the called function to override. [OMP121]
===================================================================================================================================================================
This analysis remarks indicates that a potential side-effect that cannot be
@@ -42,7 +42,7 @@ or operations that cannot be executed in SPMD-mode.
$ clang++ -fopenmp -fopenmp-targets=nvptx64 -O2 -Rpass-analysis=openmp-opt omp121.cpp
omp121.cpp:8:13: remark: Value has potential side effects preventing SPMD-mode
- execution. Add `__attribute__((assume("ompx_spmd_amenable")))` to the called function
+ execution. Add `[[omp::assume("ompx_spmd_amenable")]]` to the called function
to override. [OMP121]
int x = work();
^
@@ -53,7 +53,7 @@ contain any code that prevents SPMD-mode execution.
.. code-block:: c++
- __attribute__((assume("ompx_spmd_amenable"))) extern int work();
+ [[omp::assume("ompx_spmd_amenable")]] extern int work();
void use(int x);
diff --git a/openmp/docs/remarks/OMP133.rst b/openmp/docs/remarks/OMP133.rst
index f025352de105..5a734479d495 100644
--- a/openmp/docs/remarks/OMP133.rst
+++ b/openmp/docs/remarks/OMP133.rst
@@ -1,4 +1,4 @@
-Call may contain unknown parallel regions. Use `__attribute__((assume("omp_no_parallelism")))` to override. [OMP133]
+Call may contain unknown parallel regions. Use `[[omp::assume("omp_no_parallelism")]]` to override. [OMP133]
====================================================================================================================
.. _omp133:
@@ -33,7 +33,7 @@ regions. This is typically coupled with the :ref:`OMP132 <omp132>` remark.
$ clang++ -fopenmp -fopenmp-targets=nvptx64 -O2 -Rpass-analysis=openmp-opt omp133.cpp
omp133.cpp:6:5: remark: Call may contain unknown parallel regions. Use
- `__attribute__((assume("omp_no_parallelism")))` to override. [OMP133]
+ `[[omp::assume("omp_no_parallelism")]]` to override. [OMP133]
setup();
^
@@ -43,7 +43,7 @@ specialized state machine.
.. code-block:: c++
- __attribute__((assume("omp_no_parallelism"))) extern void setup();
+ [[omp::assume("omp_no_parallelism")]] extern void setup();
void foo() {
diff --git a/openmp/docs/remarks/OptimizationRemarks.rst b/openmp/docs/remarks/OptimizationRemarks.rst
index a29dce60e073..2c683a4376c4 100644
--- a/openmp/docs/remarks/OptimizationRemarks.rst
+++ b/openmp/docs/remarks/OptimizationRemarks.rst
@@ -81,7 +81,7 @@ OpenMP Remarks
* - :ref:`OMP121 <omp121>`
- Analysis
- Value has potential side effects preventing SPMD-mode execution. Add
- `__attribute__((assume(\"ompx_spmd_amenable\")))` to the called function
+ `[[omp::assume(\"ompx_spmd_amenable\")]]` to the called function
to override.
* - :ref:`OMP130 <omp130>`
- Optimization
@@ -96,7 +96,7 @@ OpenMP Remarks
* - :ref:`OMP133 <omp133>`
- Analysis
- Call may contain unknown parallel regions. Use
- `__attribute__((assume("omp_no_parallelism")))` to override.
+ `[[omp::assume("omp_no_parallelism")]]` to override.
* - :ref:`OMP140 <omp140>`
- Analysis
- Could not internalize function. Some optimizations may not be possible.
diff --git a/openmp/runtime/cmake/LibompMicroTests.cmake b/openmp/runtime/cmake/LibompMicroTests.cmake
index e8cc218af0c2..6fcde3725993 100644
--- a/openmp/runtime/cmake/LibompMicroTests.cmake
+++ b/openmp/runtime/cmake/LibompMicroTests.cmake
@@ -126,6 +126,7 @@ macro(libomp_test_touch_recipe test_touch_dir)
endmacro()
libomp_append(libomp_test_touch_env "KMP_VERSION=1")
add_custom_target(libomp-test-touch DEPENDS ${libomp_test_touch_targets})
+set_target_properties(libomp-test-touch PROPERTIES FOLDER "OpenMP/Tests")
if(WIN32)
libomp_test_touch_recipe(test-touch-mt)
libomp_test_touch_recipe(test-touch-md)
@@ -135,6 +136,7 @@ endif()
# test-relo
add_custom_target(libomp-test-relo DEPENDS test-relo/.success)
+set_target_properties(libomp-test-relo PROPERTIES FOLDER "OpenMP/Tests")
add_custom_command(
OUTPUT test-relo/.success test-relo/readelf.log
COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/test-relo
@@ -146,6 +148,7 @@ add_custom_command(
# test-execstack
add_custom_target(libomp-test-execstack DEPENDS test-execstack/.success)
+set_target_properties(libomp-test-execstack PROPERTIES FOLDER "OpenMP/Tests")
add_custom_command(
OUTPUT test-execstack/.success
COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/test-execstack
@@ -157,6 +160,7 @@ add_custom_command(
# test-instr
add_custom_target(libomp-test-instr DEPENDS test-instr/.success)
+set_target_properties(libomp-test-instr PROPERTIES FOLDER "OpenMP/Tests")
add_custom_command(
OUTPUT test-instr/.success
COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/test-instr
@@ -168,6 +172,7 @@ add_custom_command(
# test-deps
add_custom_target(libomp-test-deps DEPENDS test-deps/.success)
+set_target_properties(libomp-test-deps PROPERTIES FOLDER "OpenMP/Tests")
set(libomp_expected_library_deps)
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
set(libomp_expected_library_deps libc.so.7 libthr.so.3 libm.so.5)
diff --git a/openmp/runtime/src/CMakeLists.txt b/openmp/runtime/src/CMakeLists.txt
index 94eeea63b804..612d784be8a5 100644
--- a/openmp/runtime/src/CMakeLists.txt
+++ b/openmp/runtime/src/CMakeLists.txt
@@ -170,6 +170,7 @@ libomp_get_libflags(LIBOMP_CONFIGURED_LIBFLAGS)
# Build libomp library. Add LLVMSupport dependency if building in-tree with libomptarget profiling enabled.
if(OPENMP_STANDALONE_BUILD OR (NOT OPENMP_ENABLE_LIBOMP_PROFILING))
add_library(omp ${LIBOMP_LIBRARY_KIND} ${LIBOMP_SOURCE_FILES})
+ set_property(TARGET omp PROPERTY FOLDER "OpenMP/Libraries")
# Linking command will include libraries in LIBOMP_CONFIGURED_LIBFLAGS
target_link_libraries(omp ${LIBOMP_CONFIGURED_LIBFLAGS} ${LIBOMP_DL_LIBS})
else()
@@ -251,6 +252,7 @@ set(LIBOMPTARGET_OPENMP_HOST_RTL_FOLDER "${LIBOMP_LIBRARY_DIR}" CACHE STRING
# Create *.inc before compiling any sources
# objects depend on : .inc files
add_custom_target(libomp-needed-headers DEPENDS kmp_i18n_id.inc kmp_i18n_default.inc)
+set_target_properties(libomp-needed-headers PROPERTIES FOLDER "OpenMP/Sourcegenning")
add_dependencies(omp libomp-needed-headers)
# Windows specific build rules
@@ -292,6 +294,7 @@ if(WIN32)
set(LIBOMP_IMP_LIB_TARGET omp)
set(LIBOMP_GENERATED_DEF_FILE ${LIBOMP_LIB_NAME}.def)
add_custom_target(libomp-needed-def-file DEPENDS ${LIBOMP_GENERATED_DEF_FILE})
+ set_target_properties(libomp-needed-def-file PROPERTIES FOLDER "OpenMP/Sourcegenning")
add_dependencies(omp libomp-needed-def-file)
# Create the main def file with ordinals to use for building the runtime dll to maintain backwards compatible exports order
@@ -310,6 +313,7 @@ if(WIN32)
# Create the auxiliary def file without ordinals to use for building the import library to import by name
set(LIBOMPIMP_GENERATED_DEF_FILE ${LIBOMP_LIB_NAME}.imp.def)
add_custom_target(libompimp-needed-def-file DEPENDS ${LIBOMPIMP_GENERATED_DEF_FILE})
+ set_target_properties(libompimp-needed-def-file PROPERTIES FOLDER "OpenMP/Resources")
add_custom_command(
OUTPUT ${LIBOMPIMP_GENERATED_DEF_FILE}
COMMAND ${PERL_EXECUTABLE} ${LIBOMP_TOOLS_DIR}/generate-def.pl ${LIBOMP_GDFLAGS} -D NAME=${LIBOMP_LIB_FILE} -D NOORDINALS
@@ -319,6 +323,7 @@ if(WIN32)
# while this merely generates an import library off a def file, CMAKE still requires it to have a "source" so feed it a dummy one,
# making it a .txt which CMAKE will filter out from the librarian (a .cpp will make lib.exe punt trying to resolve the .def symbols)
add_library(${LIBOMP_IMP_LIB_TARGET} STATIC kmp_dummy.txt)
+ set_target_properties(${LIBOMP_IMP_LIB_TARGET} PROPERTIES FOLDER "OpenMP/Libraries")
set_target_properties(${LIBOMP_IMP_LIB_TARGET} PROPERTIES
PREFIX "" SUFFIX "" OUTPUT_NAME "${LIBOMP_IMP_LIB_FILE}" LINKER_LANGUAGE ${LIBOMP_LINKER_LANGUAGE}
STATIC_LIBRARY_OPTIONS "${CMAKE_LINK_DEF_FILE_FLAG}${CMAKE_CURRENT_BINARY_DIR}/${LIBOMPIMP_GENERATED_DEF_FILE}")
@@ -354,6 +359,7 @@ elseif(${LIBOMP_FORTRAN_MODULES})
set(ADDITIONAL_Fortran_FLAGS "-fno-range-check")
endif()
add_custom_target(libomp-mod ALL DEPENDS omp_lib.mod omp_lib_kinds.mod)
+ set_target_properties(libomp-mod PROPERTIES FOLDER "OpenMP/Misc")
libomp_get_fflags(LIBOMP_CONFIGURED_FFLAGS)
if(CMAKE_Fortran_COMPILER_SUPPORTS_F90)
set(LIBOMP_FORTRAN_SOURCE_FILE omp_lib.F90)
@@ -379,6 +385,7 @@ endif()
# Micro test rules for after library has been built (cmake/LibompMicroTests.cmake)
include(LibompMicroTests)
add_custom_target(libomp-micro-tests)
+set_target_properties(libomp-micro-tests PROPERTIES FOLDER "OpenMP/Tests")
if(NOT ${MIC} AND NOT CMAKE_CROSSCOMPILING)
add_dependencies(libomp-micro-tests libomp-test-touch)
endif()
diff --git a/openmp/runtime/src/include/ompx.h.var b/openmp/runtime/src/include/ompx.h.var
index 579d31aa98c5..19851880c3ac 100644
--- a/openmp/runtime/src/include/ompx.h.var
+++ b/openmp/runtime/src/include/ompx.h.var
@@ -9,6 +9,8 @@
#ifndef __OMPX_H
#define __OMPX_H
+typedef unsigned long uint64_t;
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -81,6 +83,10 @@ _TGT_KERNEL_LANGUAGE_HOST_IMPL_SYNC_C(void, sync_block_divergent, int Ordering,
#undef _TGT_KERNEL_LANGUAGE_HOST_IMPL_SYNC_C
///}
+static inline uint64_t ompx_ballot_sync(uint64_t mask, int pred) {
+ __builtin_trap();
+}
+
#pragma omp end declare variant
/// ompx_{sync_block}_{,divergent}
@@ -109,6 +115,8 @@ _TGT_KERNEL_LANGUAGE_DECL_GRID_C(grid_dim)
#undef _TGT_KERNEL_LANGUAGE_DECL_GRID_C
///}
+uint64_t ompx_ballot_sync(uint64_t mask, int pred);
+
#ifdef __cplusplus
}
#endif
@@ -160,6 +168,10 @@ _TGT_KERNEL_LANGUAGE_HOST_IMPL_SYNC_CXX(void, sync_block_divergent,
#undef _TGT_KERNEL_LANGUAGE_HOST_IMPL_SYNC_CXX
///}
+static inline uint64_t ballot_sync(uint64_t mask, int pred) {
+ return ompx_ballot_sync(mask, pred);
+}
+
} // namespace ompx
#endif
diff --git a/openmp/runtime/test/lit.cfg b/openmp/runtime/test/lit.cfg
index e8f7f3470580..14c746898213 100644
--- a/openmp/runtime/test/lit.cfg
+++ b/openmp/runtime/test/lit.cfg
@@ -171,10 +171,14 @@ config.substitutions.append(("%libomp-c99-compile-and-run", \
"%libomp-c99-compile && %libomp-run"))
config.substitutions.append(("%libomp-cxx-compile-and-run", \
"%libomp-cxx-compile && %libomp-run"))
+config.substitutions.append(("%libomp-cxx20-compile-and-run", \
+ "%libomp-cxx20-compile && %libomp-run"))
config.substitutions.append(("%libomp-cxx-compile-c", \
"%clangXX %openmp_flags %flags -std=c++17 -x c++ %s -o %t" + libs))
config.substitutions.append(("%libomp-cxx-compile", \
"%clangXX %openmp_flags %flags -std=c++17 %s -o %t" + libs))
+config.substitutions.append(("%libomp-cxx20-compile", \
+ "%clangXX %openmp_flags %flags -std=c++20 %s -o %t" + libs))
config.substitutions.append(("%libomp-compile", \
"%clang %openmp_flags %flags %s -o %t" + libs))
config.substitutions.append(("%libomp-irbuilder-compile", \
diff --git a/openmp/runtime/test/transform/tile/foreach.cpp b/openmp/runtime/test/transform/tile/foreach.cpp
new file mode 100644
index 000000000000..4fb359576097
--- /dev/null
+++ b/openmp/runtime/test/transform/tile/foreach.cpp
@@ -0,0 +1,228 @@
+// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines
+
+#ifndef HEADER
+#define HEADER
+
+#include <cstdlib>
+#include <cstdarg>
+#include <cstdio>
+#include <vector>
+
+struct Reporter {
+ const char *name;
+
+ Reporter(const char *name) : name(name) { print("ctor"); }
+
+ Reporter() : name("<anon>") { print("ctor"); }
+
+ Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); }
+
+ Reporter(Reporter &&that) : name(that.name) { print("move ctor"); }
+
+ ~Reporter() { print("dtor"); }
+
+ const Reporter &operator=(const Reporter &that) {
+ print("copy assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ const Reporter &operator=(Reporter &&that) {
+ print("move assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ struct Iterator {
+ const Reporter *owner;
+ int pos;
+
+ Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {}
+
+ Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) {
+ owner->print("iterator copy ctor");
+ }
+
+ Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) {
+ owner->print("iterator move ctor");
+ }
+
+ ~Iterator() { owner->print("iterator dtor"); }
+
+ const Iterator &operator=(const Iterator &that) {
+ owner->print("iterator copy assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ const Iterator &operator=(Iterator &&that) {
+ owner->print("iterator move assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ bool operator==(const Iterator &that) const {
+ owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos);
+ return this->pos == that.pos;
+ }
+
+ Iterator &operator++() {
+ owner->print("iterator prefix ++");
+ pos -= 1;
+ return *this;
+ }
+
+ Iterator operator++(int) {
+ owner->print("iterator postfix ++");
+ auto result = *this;
+ pos -= 1;
+ return result;
+ }
+
+ int operator*() const {
+ int result = 2 - pos;
+ owner->print("iterator deref: %i", result);
+ return result;
+ }
+
+ size_t operator-(const Iterator &that) const {
+ int result = (2 - this->pos) - (2 - that.pos);
+ owner->print("iterator distance: %d", result);
+ return result;
+ }
+
+ Iterator operator+(int steps) const {
+ owner->print("iterator advance: %i += %i", 2 - this->pos, steps);
+ return Iterator(owner, pos - steps);
+ }
+
+ void print(const char *msg) const { owner->print(msg); }
+ };
+
+ Iterator begin() const {
+ print("begin()");
+ return Iterator(this, 2);
+ }
+
+ Iterator end() const {
+ print("end()");
+ return Iterator(this, -1);
+ }
+
+ void print(const char *msg, ...) const {
+ va_list args;
+ va_start(args, msg);
+ printf("[%s] ", name);
+ vprintf(msg, args);
+ printf("\n");
+ va_end(args);
+ }
+};
+
+int main() {
+ printf("do\n");
+#pragma omp tile sizes(2, 2)
+ for (Reporter c{"C"}; auto &&v : Reporter("A"))
+ for (Reporter d{"D"}; auto &&w : Reporter("B"))
+ printf("v=%d w=%d\n", v, w);
+ printf("done\n");
+ return EXIT_SUCCESS;
+}
+
+#endif /* HEADER */
+
+// CHECK: do
+// CHECK-NEXT: [C] ctor
+// CHECK-NEXT: [A] ctor
+// CHECK-NEXT: [A] end()
+// CHECK-NEXT: [A] begin()
+// CHECK-NEXT: [A] begin()
+// CHECK-NEXT: [A] iterator distance: 3
+// CHECK-NEXT: [D] ctor
+// CHECK-NEXT: [B] ctor
+// CHECK-NEXT: [B] end()
+// CHECK-NEXT: [B] begin()
+// CHECK-NEXT: [B] begin()
+// CHECK-NEXT: [B] iterator distance: 3
+// CHECK-NEXT: [A] iterator advance: 0 += 0
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 0
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: v=0 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: v=0 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 1
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 1
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: v=1 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: v=1 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 0
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 0
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: v=0 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 1
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 1
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: v=1 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 2
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 2
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: v=2 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: v=2 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 2
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 2
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: v=2 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] dtor
+// CHECK-NEXT: [D] dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] dtor
+// CHECK-NEXT: [C] dtor
+// CHECK-NEXT: done
diff --git a/openmp/runtime/test/transform/tile/iterfor.cpp b/openmp/runtime/test/transform/tile/iterfor.cpp
new file mode 100644
index 000000000000..12613544f6e5
--- /dev/null
+++ b/openmp/runtime/test/transform/tile/iterfor.cpp
@@ -0,0 +1,233 @@
+// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines
+
+#ifndef HEADER
+#define HEADER
+
+#include <cstdlib>
+#include <cstdarg>
+#include <cstdio>
+#include <vector>
+
+struct Reporter {
+ const char *name;
+
+ Reporter(const char *name) : name(name) { print("ctor"); }
+
+ Reporter() : name("<anon>") { print("ctor"); }
+
+ Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); }
+
+ Reporter(Reporter &&that) : name(that.name) { print("move ctor"); }
+
+ ~Reporter() { print("dtor"); }
+
+ const Reporter &operator=(const Reporter &that) {
+ print("copy assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ const Reporter &operator=(Reporter &&that) {
+ print("move assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ struct Iterator {
+ const Reporter *owner;
+ int pos;
+
+ Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {}
+
+ Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) {
+ owner->print("iterator copy ctor");
+ }
+
+ Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) {
+ owner->print("iterator move ctor");
+ }
+
+ ~Iterator() { owner->print("iterator dtor"); }
+
+ const Iterator &operator=(const Iterator &that) {
+ owner->print("iterator copy assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ const Iterator &operator=(Iterator &&that) {
+ owner->print("iterator move assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ bool operator==(const Iterator &that) const {
+ owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos);
+ return this->pos == that.pos;
+ }
+
+ bool operator!=(const Iterator &that) const {
+ owner->print("iterator %d != %d", 2 - this->pos, 2 - that.pos);
+ return this->pos == that.pos;
+ }
+
+ Iterator &operator++() {
+ owner->print("iterator prefix ++");
+ pos -= 1;
+ return *this;
+ }
+
+ Iterator operator++(int) {
+ owner->print("iterator postfix ++");
+ auto result = *this;
+ pos -= 1;
+ return result;
+ }
+
+ int operator*() const {
+ int result = 2 - pos;
+ owner->print("iterator deref: %i", result);
+ return result;
+ }
+
+ size_t operator-(const Iterator &that) const {
+ int result = (2 - this->pos) - (2 - that.pos);
+ owner->print("iterator distance: %d", result);
+ return result;
+ }
+
+ Iterator operator+(int steps) const {
+ owner->print("iterator advance: %i += %i", 2 - this->pos, steps);
+ return Iterator(owner, pos - steps);
+ }
+ };
+
+ Iterator begin() const {
+ print("begin()");
+ return Iterator(this, 2);
+ }
+
+ Iterator end() const {
+ print("end()");
+ return Iterator(this, -1);
+ }
+
+ void print(const char *msg, ...) const {
+ va_list args;
+ va_start(args, msg);
+ printf("[%s] ", name);
+ vprintf(msg, args);
+ printf("\n");
+ va_end(args);
+ }
+};
+
+int main() {
+ printf("do\n");
+ {
+ Reporter A("A"), B("B");
+#pragma omp tile sizes(2, 2)
+ for (auto it = A.begin(); it != A.end(); ++it)
+ for (auto jt = B.begin(); jt != B.end(); ++jt)
+ printf("i=%d j=%d\n", *it, *jt);
+ }
+ printf("done\n");
+ return EXIT_SUCCESS;
+}
+
+#endif /* HEADER */
+
+// CHECK: do
+// CHECK-NEXT: [A] ctor
+// CHECK-NEXT: [B] ctor
+// CHECK-NEXT: [A] begin()
+// CHECK-NEXT: [A] begin()
+// CHECK-NEXT: [A] end()
+// CHECK-NEXT: [A] iterator distance: 3
+// CHECK-NEXT: [B] begin()
+// CHECK-NEXT: [B] begin()
+// CHECK-NEXT: [B] end()
+// CHECK-NEXT: [B] iterator distance: 3
+// CHECK-NEXT: [A] iterator advance: 0 += 0
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 0
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=0 j=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 0
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=0 j=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 1
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 1
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=1 j=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 1
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=1 j=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 0
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 0
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=0 j=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 1
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 1
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=1 j=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 2
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 2
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=2 j=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 2
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=2 j=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 2
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 2
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=2 j=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [B] dtor
+// CHECK-NEXT: [A] dtor
+// CHECK-NEXT: done
diff --git a/openmp/runtime/test/transform/tile/parallel-wsloop-collapse-foreach.cpp b/openmp/runtime/test/transform/tile/parallel-wsloop-collapse-foreach.cpp
new file mode 100644
index 000000000000..b1f4d98a52dd
--- /dev/null
+++ b/openmp/runtime/test/transform/tile/parallel-wsloop-collapse-foreach.cpp
@@ -0,0 +1,366 @@
+// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines
+
+#ifndef HEADER
+#define HEADER
+
+#include <cstdlib>
+#include <cstdarg>
+#include <cstdio>
+#include <vector>
+
+struct Reporter {
+ const char *name;
+
+ Reporter(const char *name) : name(name) { print("ctor"); }
+
+ Reporter() : name("<anon>") { print("ctor"); }
+
+ Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); }
+
+ Reporter(Reporter &&that) : name(that.name) { print("move ctor"); }
+
+ ~Reporter() { print("dtor"); }
+
+ const Reporter &operator=(const Reporter &that) {
+ print("copy assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ const Reporter &operator=(Reporter &&that) {
+ print("move assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ struct Iterator {
+ const Reporter *owner;
+ int pos;
+
+ Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {}
+
+ Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) {
+ owner->print("iterator copy ctor");
+ }
+
+ Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) {
+ owner->print("iterator move ctor");
+ }
+
+ ~Iterator() { owner->print("iterator dtor"); }
+
+ const Iterator &operator=(const Iterator &that) {
+ owner->print("iterator copy assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ const Iterator &operator=(Iterator &&that) {
+ owner->print("iterator move assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ bool operator==(const Iterator &that) const {
+ owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos);
+ return this->pos == that.pos;
+ }
+
+ Iterator &operator++() {
+ owner->print("iterator prefix ++");
+ pos -= 1;
+ return *this;
+ }
+
+ Iterator operator++(int) {
+ owner->print("iterator postfix ++");
+ auto result = *this;
+ pos -= 1;
+ return result;
+ }
+
+ int operator*() const {
+ int result = 2 - pos;
+ owner->print("iterator deref: %i", result);
+ return result;
+ }
+
+ size_t operator-(const Iterator &that) const {
+ int result = (2 - this->pos) - (2 - that.pos);
+ owner->print("iterator distance: %d", result);
+ return result;
+ }
+
+ Iterator operator+(int steps) const {
+ owner->print("iterator advance: %i += %i", 2 - this->pos, steps);
+ return Iterator(owner, pos - steps);
+ }
+ };
+
+ Iterator begin() const {
+ print("begin()");
+ return Iterator(this, 2);
+ }
+
+ Iterator end() const {
+ print("end()");
+ return Iterator(this, -1);
+ }
+
+ void print(const char *msg, ...) const {
+ va_list args;
+ va_start(args, msg);
+ printf("[%s] ", name);
+ vprintf(msg, args);
+ printf("\n");
+ va_end(args);
+ }
+};
+
+int main() {
+ printf("do\n");
+#pragma omp parallel for collapse(3) num_threads(1)
+ for (int i = 0; i < 3; ++i)
+#pragma omp tile sizes(2, 2)
+ for (Reporter c{"C"}; auto &&v : Reporter("A"))
+ for (Reporter d{"D"}; auto &&w : Reporter("B"))
+ printf("i=%d v=%d w=%d\n", i, v, w);
+ printf("done\n");
+ return EXIT_SUCCESS;
+}
+
+#endif /* HEADER */
+
+// CHECK: do
+// CHECK-NEXT: [C] ctor
+// CHECK-NEXT: [A] ctor
+// CHECK-NEXT: [A] end()
+// CHECK-NEXT: [A] begin()
+// CHECK-NEXT: [A] begin()
+// CHECK-NEXT: [A] iterator distance: 3
+// CHECK-NEXT: [D] ctor
+// CHECK-NEXT: [B] ctor
+// CHECK-NEXT: [B] end()
+// CHECK-NEXT: [B] begin()
+// CHECK-NEXT: [B] begin()
+// CHECK-NEXT: [B] iterator distance: 3
+// CHECK-NEXT: [A] iterator advance: 0 += 0
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 0
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=0 v=0 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=0 v=0 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 1
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 1
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=0 v=1 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=0 v=1 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 0
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 0
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=0 v=0 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 1
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 1
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=0 v=1 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 2
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 2
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=0 v=2 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=0 v=2 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 2
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 2
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=0 v=2 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 0
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 0
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=1 v=0 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=1 v=0 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 1
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 1
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=1 v=1 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=1 v=1 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 0
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 0
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=1 v=0 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 1
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 1
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=1 v=1 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 2
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 2
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=1 v=2 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=1 v=2 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 2
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 2
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=1 v=2 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 0
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 0
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=2 v=0 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=2 v=0 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 1
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 1
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=2 v=1 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=2 v=1 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 0
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 0
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=2 v=0 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 1
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 1
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=2 v=1 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 2
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 2
+// CHECK-NEXT: [B] iterator advance: 0 += 0
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 0
+// CHECK-NEXT: i=2 v=2 w=0
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator advance: 0 += 1
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 1
+// CHECK-NEXT: i=2 v=2 w=1
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator advance: 0 += 2
+// CHECK-NEXT: [A] iterator move assign
+// CHECK-NEXT: [A] iterator deref: 2
+// CHECK-NEXT: [B] iterator advance: 0 += 2
+// CHECK-NEXT: [B] iterator move assign
+// CHECK-NEXT: [B] iterator deref: 2
+// CHECK-NEXT: i=2 v=2 w=2
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] iterator dtor
+// CHECK-NEXT: [B] dtor
+// CHECK-NEXT: [D] dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] iterator dtor
+// CHECK-NEXT: [A] dtor
+// CHECK-NEXT: [C] dtor
+// CHECK-NEXT: done
diff --git a/openmp/runtime/test/transform/unroll/factor_foreach.cpp b/openmp/runtime/test/transform/unroll/factor_foreach.cpp
new file mode 100644
index 000000000000..29fef7c18736
--- /dev/null
+++ b/openmp/runtime/test/transform/unroll/factor_foreach.cpp
@@ -0,0 +1,162 @@
+// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines
+
+#ifndef HEADER
+#define HEADER
+
+#include <cstdlib>
+#include <cstdarg>
+#include <cstdio>
+#include <vector>
+
+struct Reporter {
+ const char *name;
+
+ Reporter(const char *name) : name(name) { print("ctor"); }
+
+ Reporter() : name("<anon>") { print("ctor"); }
+
+ Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); }
+
+ Reporter(Reporter &&that) : name(that.name) { print("move ctor"); }
+
+ ~Reporter() { print("dtor"); }
+
+ const Reporter &operator=(const Reporter &that) {
+ print("copy assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ const Reporter &operator=(Reporter &&that) {
+ print("move assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ struct Iterator {
+ const Reporter *owner;
+ int pos;
+
+ Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {}
+
+ Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) {
+ owner->print("iterator copy ctor");
+ }
+
+ Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) {
+ owner->print("iterator move ctor");
+ }
+
+ ~Iterator() { owner->print("iterator dtor"); }
+
+ const Iterator &operator=(const Iterator &that) {
+ owner->print("iterator copy assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ const Iterator &operator=(Iterator &&that) {
+ owner->print("iterator move assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ bool operator==(const Iterator &that) const {
+ owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos);
+ return this->pos == that.pos;
+ }
+
+ bool operator!=(const Iterator &that) const {
+ owner->print("iterator %d != %d", 2 - this->pos, 2 - that.pos);
+ return this->pos != that.pos;
+ }
+
+ Iterator &operator++() {
+ owner->print("iterator prefix ++");
+ pos -= 1;
+ return *this;
+ }
+
+ Iterator operator++(int) {
+ owner->print("iterator postfix ++");
+ auto result = *this;
+ pos -= 1;
+ return result;
+ }
+
+ int operator*() const {
+ int result = 2 - pos;
+ owner->print("iterator deref: %i", result);
+ return result;
+ }
+
+ size_t operator-(const Iterator &that) const {
+ int result = (2 - this->pos) - (2 - that.pos);
+ owner->print("iterator distance: %d", result);
+ return result;
+ }
+
+ Iterator operator+(int steps) const {
+ owner->print("iterator advance: %i += %i", 2 - this->pos, steps);
+ return Iterator(owner, pos - steps);
+ }
+
+ void print(const char *msg) const { owner->print(msg); }
+ };
+
+ Iterator begin() const {
+ print("begin()");
+ return Iterator(this, 2);
+ }
+
+ Iterator end() const {
+ print("end()");
+ return Iterator(this, -1);
+ }
+
+ void print(const char *msg, ...) const {
+ va_list args;
+ va_start(args, msg);
+ printf("[%s] ", name);
+ vprintf(msg, args);
+ printf("\n");
+ va_end(args);
+ }
+};
+
+int main() {
+ printf("do\n");
+#pragma omp unroll partial(2)
+ for (Reporter c{"init-stmt"}; auto &&v : Reporter("range"))
+ printf("v=%d\n", v);
+ printf("done\n");
+ return EXIT_SUCCESS;
+}
+
+#endif /* HEADER */
+
+// CHECK: do
+// CHECK-NEXT: [init-stmt] ctor
+// CHECK-NEXT: [range] ctor
+// CHECK-NEXT: [range] begin()
+// CHECK-NEXT: [range] end()
+// CHECK-NEXT: [range] iterator 0 != 3
+// CHECK-NEXT: [range] iterator deref: 0
+// CHECK-NEXT: v=0
+// CHECK-NEXT: [range] iterator prefix ++
+// CHECK-NEXT: [range] iterator 1 != 3
+// CHECK-NEXT: [range] iterator deref: 1
+// CHECK-NEXT: v=1
+// CHECK-NEXT: [range] iterator prefix ++
+// CHECK-NEXT: [range] iterator 2 != 3
+// CHECK-NEXT: [range] iterator deref: 2
+// CHECK-NEXT: v=2
+// CHECK-NEXT: [range] iterator prefix ++
+// CHECK-NEXT: [range] iterator 3 != 3
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] dtor
+// CHECK-NEXT: [init-stmt] dtor
+// CHECK-NEXT: done
diff --git a/openmp/runtime/test/transform/unroll/factor_intfor.c b/openmp/runtime/test/transform/unroll/factor_intfor.c
new file mode 100644
index 000000000000..42ebeb48e41c
--- /dev/null
+++ b/openmp/runtime/test/transform/unroll/factor_intfor.c
@@ -0,0 +1,25 @@
+// RUN: %libomp-compile-and-run | FileCheck %s --match-full-lines
+
+#ifndef HEADER
+#define HEADER
+
+#include <stdlib.h>
+#include <stdio.h>
+
+int main() {
+ printf("do\n");
+#pragma omp unroll partial(2)
+ for (int i = 7; i < 19; i += 3)
+ printf("i=%d\n", i);
+ printf("done\n");
+ return EXIT_SUCCESS;
+}
+
+#endif /* HEADER */
+
+// CHECK: do
+// CHECK-NEXT: i=7
+// CHECK-NEXT: i=10
+// CHECK-NEXT: i=13
+// CHECK-NEXT: i=16
+// CHECK-NEXT: done
diff --git a/openmp/runtime/test/transform/unroll/factor_iterfor.cpp b/openmp/runtime/test/transform/unroll/factor_iterfor.cpp
new file mode 100644
index 000000000000..0298477110b2
--- /dev/null
+++ b/openmp/runtime/test/transform/unroll/factor_iterfor.cpp
@@ -0,0 +1,169 @@
+// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines
+
+#ifndef HEADER
+#define HEADER
+
+#include <cstdlib>
+#include <cstdarg>
+#include <cstdio>
+#include <vector>
+
+struct Reporter {
+ const char *name;
+
+ Reporter(const char *name) : name(name) { print("ctor"); }
+
+ Reporter() : name("<anon>") { print("ctor"); }
+
+ Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); }
+
+ Reporter(Reporter &&that) : name(that.name) { print("move ctor"); }
+
+ ~Reporter() { print("dtor"); }
+
+ const Reporter &operator=(const Reporter &that) {
+ print("copy assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ const Reporter &operator=(Reporter &&that) {
+ print("move assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ struct Iterator {
+ const Reporter *owner;
+ int pos;
+
+ Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {}
+
+ Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) {
+ print("iterator copy ctor");
+ }
+
+ Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) {
+ print("iterator move ctor");
+ }
+
+ ~Iterator() { print("iterator dtor"); }
+
+ const Iterator &operator=(const Iterator &that) {
+ print("iterator copy assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ const Iterator &operator=(Iterator &&that) {
+ print("iterator move assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ bool operator==(const Iterator &that) const {
+ owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos);
+ return this->pos == that.pos;
+ }
+
+ bool operator!=(const Iterator &that) const {
+ owner->print("iterator %d != %d", 2 - this->pos, 2 - that.pos);
+ return this->pos != that.pos;
+ }
+
+ Iterator &operator++() {
+ print("iterator prefix ++");
+ pos -= 1;
+ return *this;
+ }
+
+ Iterator operator++(int) {
+ print("iterator postfix ++");
+ auto result = *this;
+ pos -= 1;
+ return result;
+ }
+
+ int operator*() const {
+ int result = 2 - pos;
+ owner->print("iterator deref: %i", result);
+ return result;
+ }
+
+ size_t operator-(const Iterator &that) const {
+ int result = (2 - this->pos) - (2 - that.pos);
+ owner->print("iterator distance: %d", result);
+ return result;
+ }
+
+ Iterator operator+(int steps) const {
+ owner->print("iterator advance: %i += %i", 2 - this->pos, steps);
+ return Iterator(owner, pos - steps);
+ }
+
+ void print(const char *msg) const { owner->print(msg); }
+ };
+
+ Iterator begin() const {
+ print("begin()");
+ return Iterator(this, 2);
+ }
+
+ Iterator end() const {
+ print("end()");
+ return Iterator(this, -1);
+ }
+
+ void print(const char *msg, ...) const {
+ va_list args;
+ va_start(args, msg);
+ printf("[%s] ", name);
+ vprintf(msg, args);
+ printf("\n");
+ va_end(args);
+ }
+};
+
+int main() {
+ printf("do\n");
+ {
+ Reporter range("range");
+#pragma omp unroll partial(2)
+ for (auto it = range.begin(); it != range.end(); ++it)
+ printf("v=%d\n", *it);
+ }
+ printf("done\n");
+ return EXIT_SUCCESS;
+}
+
+#endif /* HEADER */
+
+// CHECK: do
+// CHECK-NEXT: [range] ctor
+// CHECK-NEXT: [range] begin()
+// CHECK-NEXT: [range] end()
+// CHECK-NEXT: [range] iterator 0 != 3
+// CHECK-NEXT: [range] iterator deref: 0
+// CHECK-NEXT: v=0
+// CHECK-NEXT: [range] iterator prefix ++
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] end()
+// CHECK-NEXT: [range] iterator 1 != 3
+// CHECK-NEXT: [range] iterator deref: 1
+// CHECK-NEXT: v=1
+// CHECK-NEXT: [range] iterator prefix ++
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] end()
+// CHECK-NEXT: [range] iterator 2 != 3
+// CHECK-NEXT: [range] iterator deref: 2
+// CHECK-NEXT: v=2
+// CHECK-NEXT: [range] iterator prefix ++
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] end()
+// CHECK-NEXT: [range] iterator 3 != 3
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] dtor
+// CHECK-NEXT: done
diff --git a/openmp/runtime/test/transform/unroll/factor_parallel-wsloop-collapse-foreach.cpp b/openmp/runtime/test/transform/unroll/factor_parallel-wsloop-collapse-foreach.cpp
new file mode 100644
index 000000000000..71567faf7964
--- /dev/null
+++ b/openmp/runtime/test/transform/unroll/factor_parallel-wsloop-collapse-foreach.cpp
@@ -0,0 +1,199 @@
+// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines
+
+#ifndef HEADER
+#define HEADER
+
+#include <cstdlib>
+#include <cstdarg>
+#include <cstdio>
+#include <vector>
+
+struct Reporter {
+ const char *name;
+
+ Reporter(const char *name) : name(name) { print("ctor"); }
+
+ Reporter() : name("<anon>") { print("ctor"); }
+
+ Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); }
+
+ Reporter(Reporter &&that) : name(that.name) { print("move ctor"); }
+
+ ~Reporter() { print("dtor"); }
+
+ const Reporter &operator=(const Reporter &that) {
+ print("copy assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ const Reporter &operator=(Reporter &&that) {
+ print("move assign");
+ this->name = that.name;
+ return *this;
+ }
+
+ struct Iterator {
+ const Reporter *owner;
+ int pos;
+
+ Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {}
+
+ Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) {
+ owner->print("iterator copy ctor");
+ }
+
+ Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) {
+ owner->print("iterator move ctor");
+ }
+
+ ~Iterator() { owner->print("iterator dtor"); }
+
+ const Iterator &operator=(const Iterator &that) {
+ owner->print("iterator copy assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ const Iterator &operator=(Iterator &&that) {
+ owner->print("iterator move assign");
+ this->owner = that.owner;
+ this->pos = that.pos;
+ return *this;
+ }
+
+ bool operator==(const Iterator &that) const {
+ owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos);
+ return this->pos == that.pos;
+ }
+
+ bool operator!=(const Iterator &that) const {
+ owner->print("iterator %d != %d", 2 - this->pos, 2 - that.pos);
+ return this->pos != that.pos;
+ }
+
+ Iterator &operator++() {
+ owner->print("iterator prefix ++");
+ pos -= 1;
+ return *this;
+ }
+
+ Iterator operator++(int) {
+ owner->print("iterator postfix ++");
+ auto result = *this;
+ pos -= 1;
+ return result;
+ }
+
+ int operator*() const {
+ int result = 2 - pos;
+ owner->print("iterator deref: %i", result);
+ return result;
+ }
+
+ size_t operator-(const Iterator &that) const {
+ int result = (2 - this->pos) - (2 - that.pos);
+ owner->print("iterator distance: %d", result);
+ return result;
+ }
+
+ Iterator operator+(int steps) const {
+ owner->print("iterator advance: %i += %i", 2 - this->pos, steps);
+ return Iterator(owner, pos - steps);
+ }
+
+ void print(const char *msg) const { owner->print(msg); }
+ };
+
+ Iterator begin() const {
+ print("begin()");
+ return Iterator(this, 2);
+ }
+
+ Iterator end() const {
+ print("end()");
+ return Iterator(this, -1);
+ }
+
+ void print(const char *msg, ...) const {
+ va_list args;
+ va_start(args, msg);
+ printf("[%s] ", name);
+ vprintf(msg, args);
+ printf("\n");
+ va_end(args);
+ }
+};
+
+int main() {
+ printf("do\n");
+#pragma omp parallel for collapse(2) num_threads(1)
+ for (int i = 0; i < 3; ++i)
+#pragma omp unroll partial(2)
+ for (Reporter c{"init-stmt"}; auto &&v : Reporter("range"))
+ printf("i=%d v=%d\n", i, v);
+ printf("done\n");
+ return EXIT_SUCCESS;
+}
+
+#endif /* HEADER */
+
+// CHECK: do
+// CHECK-NEXT: [init-stmt] ctor
+// CHECK-NEXT: [range] ctor
+// CHECK-NEXT: [range] end()
+// CHECK-NEXT: [range] begin()
+// CHECK-NEXT: [range] begin()
+// CHECK-NEXT: [range] iterator distance: 3
+// CHECK-NEXT: [range] iterator advance: 0 += 0
+// CHECK-NEXT: [range] iterator move assign
+// CHECK-NEXT: [range] iterator deref: 0
+// CHECK-NEXT: i=0 v=0
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator advance: 0 += 1
+// CHECK-NEXT: [range] iterator move assign
+// CHECK-NEXT: [range] iterator deref: 1
+// CHECK-NEXT: i=0 v=1
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator advance: 0 += 2
+// CHECK-NEXT: [range] iterator move assign
+// CHECK-NEXT: [range] iterator deref: 2
+// CHECK-NEXT: i=0 v=2
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator advance: 0 += 0
+// CHECK-NEXT: [range] iterator move assign
+// CHECK-NEXT: [range] iterator deref: 0
+// CHECK-NEXT: i=1 v=0
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator advance: 0 += 1
+// CHECK-NEXT: [range] iterator move assign
+// CHECK-NEXT: [range] iterator deref: 1
+// CHECK-NEXT: i=1 v=1
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator advance: 0 += 2
+// CHECK-NEXT: [range] iterator move assign
+// CHECK-NEXT: [range] iterator deref: 2
+// CHECK-NEXT: i=1 v=2
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator advance: 0 += 0
+// CHECK-NEXT: [range] iterator move assign
+// CHECK-NEXT: [range] iterator deref: 0
+// CHECK-NEXT: i=2 v=0
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator advance: 0 += 1
+// CHECK-NEXT: [range] iterator move assign
+// CHECK-NEXT: [range] iterator deref: 1
+// CHECK-NEXT: i=2 v=1
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator advance: 0 += 2
+// CHECK-NEXT: [range] iterator move assign
+// CHECK-NEXT: [range] iterator deref: 2
+// CHECK-NEXT: i=2 v=2
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] iterator dtor
+// CHECK-NEXT: [range] dtor
+// CHECK-NEXT: [init-stmt] dtor
+// CHECK-NEXT: done
diff --git a/openmp/runtime/test/transform/unroll/factor_parallel-wsloop-collapse-intfor.cpp b/openmp/runtime/test/transform/unroll/factor_parallel-wsloop-collapse-intfor.cpp
new file mode 100644
index 000000000000..0a31f8db0701
--- /dev/null
+++ b/openmp/runtime/test/transform/unroll/factor_parallel-wsloop-collapse-intfor.cpp
@@ -0,0 +1,32 @@
+// RUN: %libomp-cxx-compile-and-run | FileCheck %s --match-full-lines
+
+#ifndef HEADER
+#define HEADER
+
+#include <cstdlib>
+#include <cstdio>
+
+int main() {
+ printf("do\n");
+#pragma omp parallel for collapse(2) num_threads(1)
+ for (int i = 0; i < 3; ++i)
+#pragma omp unroll partial(2)
+ for (int j = 0; j < 3; ++j)
+ printf("i=%d j=%d\n", i, j);
+ printf("done\n");
+ return EXIT_SUCCESS;
+}
+
+#endif /* HEADER */
+
+// CHECK: do
+// CHECK-NEXT: i=0 j=0
+// CHECK-NEXT: i=0 j=1
+// CHECK-NEXT: i=0 j=2
+// CHECK-NEXT: i=1 j=0
+// CHECK-NEXT: i=1 j=1
+// CHECK-NEXT: i=1 j=2
+// CHECK-NEXT: i=2 j=0
+// CHECK-NEXT: i=2 j=1
+// CHECK-NEXT: i=2 j=2
+// CHECK-NEXT: done
diff --git a/openmp/runtime/test/transform/unroll/full_intfor.c b/openmp/runtime/test/transform/unroll/full_intfor.c
new file mode 100644
index 000000000000..081451109176
--- /dev/null
+++ b/openmp/runtime/test/transform/unroll/full_intfor.c
@@ -0,0 +1,25 @@
+// RUN: %libomp-compile-and-run | FileCheck %s --match-full-lines
+
+#ifndef HEADER
+#define HEADER
+
+#include <stdlib.h>
+#include <stdio.h>
+
+int main() {
+ printf("do\n");
+#pragma omp unroll full
+ for (int i = 7; i < 19; i += 3)
+ printf("i=%d\n", i);
+ printf("done\n");
+ return EXIT_SUCCESS;
+}
+
+#endif /* HEADER */
+
+// CHECK: do
+// CHECK-NEXT: i=7
+// CHECK-NEXT: i=10
+// CHECK-NEXT: i=13
+// CHECK-NEXT: i=16
+// CHECK-NEXT: done
diff --git a/openmp/runtime/test/transform/unroll/heuristic_intfor.c b/openmp/runtime/test/transform/unroll/heuristic_intfor.c
new file mode 100644
index 000000000000..b07bec7d82f0
--- /dev/null
+++ b/openmp/runtime/test/transform/unroll/heuristic_intfor.c
@@ -0,0 +1,25 @@
+// RUN: %libomp-compile-and-run | FileCheck %s --match-full-lines
+
+#ifndef HEADER
+#define HEADER
+
+#include <stdlib.h>
+#include <stdio.h>
+
+int main() {
+ printf("do\n");
+#pragma omp unroll
+ for (int i = 7; i < 19; i += 3)
+ printf("i=%d\n", i);
+ printf("done\n");
+ return EXIT_SUCCESS;
+}
+
+#endif /* HEADER */
+
+// CHECK: do
+// CHECK-NEXT: i=7
+// CHECK-NEXT: i=10
+// CHECK-NEXT: i=13
+// CHECK-NEXT: i=16
+// CHECK-NEXT: done
diff --git a/openmp/runtime/test/transform/unroll/partial_intfor.c b/openmp/runtime/test/transform/unroll/partial_intfor.c
new file mode 100644
index 000000000000..2ede94e70e12
--- /dev/null
+++ b/openmp/runtime/test/transform/unroll/partial_intfor.c
@@ -0,0 +1,25 @@
+// RUN: %libomp-compile-and-run | FileCheck %s --match-full-lines
+
+#ifndef HEADER
+#define HEADER
+
+#include <stdlib.h>
+#include <stdio.h>
+
+int main() {
+ printf("do\n");
+#pragma omp unroll partial
+ for (int i = 7; i < 19; i += 3)
+ printf("i=%d\n", i);
+ printf("done\n");
+ return EXIT_SUCCESS;
+}
+
+#endif /* HEADER */
+
+// CHECK: do
+// CHECK-NEXT: i=7
+// CHECK-NEXT: i=10
+// CHECK-NEXT: i=13
+// CHECK-NEXT: i=16
+// CHECK-NEXT: done
diff --git a/polly/CMakeLists.txt b/polly/CMakeLists.txt
index 5d0f2cd7f00e..b4cfc77d0f21 100644
--- a/polly/CMakeLists.txt
+++ b/polly/CMakeLists.txt
@@ -4,6 +4,7 @@ if (NOT DEFINED LLVM_MAIN_SRC_DIR)
cmake_minimum_required(VERSION 3.20.0)
set(POLLY_STANDALONE_BUILD TRUE)
endif()
+set(LLVM_SUBPROJECT_TITLE "Polly")
# Must go below project(..)
include(GNUInstallDirs)
@@ -157,8 +158,7 @@ foreach (file IN LISTS files)
endforeach ()
add_custom_target(polly-check-format DEPENDS ${check_format_depends})
-set_target_properties(polly-check-format PROPERTIES FOLDER "Polly")
+set_target_properties(polly-check-format PROPERTIES FOLDER "Polly/Metatargets")
add_custom_target(polly-update-format DEPENDS ${update_format_depends})
-set_target_properties(polly-update-format PROPERTIES FOLDER "Polly")
-
+set_target_properties(polly-update-format PROPERTIES FOLDER "Polly/Metatargets")
diff --git a/polly/cmake/polly_macros.cmake b/polly/cmake/polly_macros.cmake
index df541eeccc4c..a791be4fa5bc 100644
--- a/polly/cmake/polly_macros.cmake
+++ b/polly/cmake/polly_macros.cmake
@@ -21,7 +21,7 @@ macro(add_polly_library name)
set(libkind)
endif()
add_library( ${name} ${libkind} ${srcs} )
- set_target_properties(${name} PROPERTIES FOLDER "Polly")
+ set_target_properties(${name} PROPERTIES FOLDER "Polly/Libraries")
if( LLVM_COMMON_DEPENDS )
add_dependencies( ${name} ${LLVM_COMMON_DEPENDS} )
@@ -64,7 +64,7 @@ macro(add_polly_loadable_module name)
endif()
set(MODULE TRUE)
add_polly_library(${name} ${srcs})
- set_target_properties(${name} PROPERTIES FOLDER "Polly")
+ set_target_properties(${name} PROPERTIES FOLDER "Polly/Loadable Modules")
if (GLOBAL_NOT_MODULE)
unset (MODULE)
endif()
diff --git a/polly/docs/CMakeLists.txt b/polly/docs/CMakeLists.txt
index a1ef5ce5277f..2bd16e53c542 100644
--- a/polly/docs/CMakeLists.txt
+++ b/polly/docs/CMakeLists.txt
@@ -77,6 +77,7 @@ if (LLVM_ENABLE_DOXYGEN)
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating polly doxygen documentation." VERBATIM)
+ set_target_properties(doxygen-polly PROPERTIES FOLDER "Polly/Docs")
if (LLVM_BUILD_DOCS)
add_dependencies(doxygen doxygen-polly)
diff --git a/polly/lib/CMakeLists.txt b/polly/lib/CMakeLists.txt
index 4557878e515e..d91f4ecd37e6 100644
--- a/polly/lib/CMakeLists.txt
+++ b/polly/lib/CMakeLists.txt
@@ -92,8 +92,6 @@ add_llvm_pass_plugin(Polly
LINK_COMPONENTS
${POLLY_COMPONENTS}
)
-set_target_properties(obj.Polly PROPERTIES FOLDER "Polly")
-set_target_properties(Polly PROPERTIES FOLDER "Polly")
if (MSVC_IDE OR XCODE)
# Configure source groups for Polly source files. By default, in the IDE there
@@ -120,7 +118,7 @@ if (WIN32 OR CYGWIN OR NOT LLVM_ENABLE_PIC)
# Add dummy target, either because loadable modules are not supported
# as on Windows or because PIC code has been disabled
add_custom_target(LLVMPolly)
- set_target_properties(LLVMPolly PROPERTIES FOLDER "Polly")
+ set_target_properties(LLVMPolly PROPERTIES FOLDER "Polly/Loadable Modules")
else ()
add_polly_loadable_module(LLVMPolly
Plugin/Polly.cpp
diff --git a/polly/lib/External/CMakeLists.txt b/polly/lib/External/CMakeLists.txt
index 1869410c8baa..5dd69b7199dc 100644
--- a/polly/lib/External/CMakeLists.txt
+++ b/polly/lib/External/CMakeLists.txt
@@ -302,7 +302,7 @@ if (POLLY_BUNDLED_ISL)
add_executable(polly-isl-test
isl/isl_test.c
)
- set_target_properties(polly-isl-test PROPERTIES FOLDER "Polly")
+ set_target_properties(polly-isl-test PROPERTIES FOLDER "Polly/Tests")
target_link_libraries(polly-isl-test PRIVATE
PollyISL
diff --git a/polly/test/CMakeLists.txt b/polly/test/CMakeLists.txt
index 81cee34a780d..4548f01d925a 100644
--- a/polly/test/CMakeLists.txt
+++ b/polly/test/CMakeLists.txt
@@ -1,7 +1,7 @@
set(LLVM_SHLIBEXT "${CMAKE_SHARED_MODULE_SUFFIX}")
add_custom_target(check-polly)
-set_target_properties(check-polly PROPERTIES FOLDER "Polly")
+set_target_properties(check-polly PROPERTIES FOLDER "Polly/Meta")
if(NOT LLVM_MAIN_SRC_DIR)
find_program(LLVM_OPT NAMES opt HINTS ${LLVM_TOOLS_BINARY_DIR})
@@ -64,7 +64,6 @@ add_lit_testsuite(check-polly-tests "Running polly regression tests"
polly_unit_site_config=${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg
DEPENDS ${POLLY_TEST_DEPS}
)
-set_target_properties(check-polly-tests PROPERTIES FOLDER "Polly")
add_dependencies(check-polly check-polly-tests)
configure_lit_site_cfg(
@@ -80,7 +79,6 @@ if (POLLY_GTEST_AVAIL)
EXCLUDE_FROM_CHECK_ALL
DEPENDS PollyUnitTests
)
- set_target_properties(check-polly-unittests PROPERTIES FOLDER "Polly")
endif ()
configure_file(
@@ -94,7 +92,6 @@ if (POLLY_BUNDLED_ISL)
EXCLUDE_FROM_CHECK_ALL
DEPENDS polly-isl-test
)
- set_target_properties(check-polly-isl PROPERTIES FOLDER "Polly")
endif (POLLY_BUNDLED_ISL)
# Run polly-check-format as part of polly-check only if we are compiling with
@@ -114,5 +111,5 @@ configure_file(
# Add a legacy target spelling: polly-test
add_custom_target(polly-test)
-set_target_properties(polly-test PROPERTIES FOLDER "Polly")
+set_target_properties(polly-test PROPERTIES FOLDER "Polly/Metatargets")
add_dependencies(polly-test check-polly)
diff --git a/polly/test/CodeGen/20100617.ll b/polly/test/CodeGen/20100617.ll
index 71a889f067b8..7229a6e3d524 100644
--- a/polly/test/CodeGen/20100617.ll
+++ b/polly/test/CodeGen/20100617.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @init_array() nounwind {
diff --git a/polly/test/CodeGen/20100622.ll b/polly/test/CodeGen/20100622.ll
index 872d6a0d75cf..bed737741abb 100644
--- a/polly/test/CodeGen/20100622.ll
+++ b/polly/test/CodeGen/20100622.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-codegen -disable-output < %s
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | not FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s | not FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
diff --git a/polly/test/CodeGen/20100707.ll b/polly/test/CodeGen/20100707.ll
index 338198084fc7..ee0422e07c4e 100644
--- a/polly/test/CodeGen/20100707.ll
+++ b/polly/test/CodeGen/20100707.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @clause_SetSplitField(i32 %Length) nounwind inlinehint {
diff --git a/polly/test/CodeGen/20100707_2.ll b/polly/test/CodeGen/20100707_2.ll
index df784c6d7957..a4cd76af9dd3 100644
--- a/polly/test/CodeGen/20100707_2.ll
+++ b/polly/test/CodeGen/20100707_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@win193 = external global [4 x [36 x double]], align 32 ; <ptr> [#uses=3]
diff --git a/polly/test/CodeGen/20100708.ll b/polly/test/CodeGen/20100708.ll
index 50b8e385df53..9080451aeae5 100644
--- a/polly/test/CodeGen/20100708.ll
+++ b/polly/test/CodeGen/20100708.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-detect < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define fastcc void @execute() nounwind {
diff --git a/polly/test/CodeGen/20100708_2.ll b/polly/test/CodeGen/20100708_2.ll
index 2f4807d9e4d7..51dc9d311f07 100644
--- a/polly/test/CodeGen/20100708_2.ll
+++ b/polly/test/CodeGen/20100708_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @init_array() nounwind {
diff --git a/polly/test/CodeGen/20100713.ll b/polly/test/CodeGen/20100713.ll
index edd352a4c4cc..a836795c9907 100644
--- a/polly/test/CodeGen/20100713.ll
+++ b/polly/test/CodeGen/20100713.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @fft_float(i32 %NumSamples) nounwind {
diff --git a/polly/test/CodeGen/20100713_2.ll b/polly/test/CodeGen/20100713_2.ll
index 92f8959d91d6..28b984bd5900 100644
--- a/polly/test/CodeGen/20100713_2.ll
+++ b/polly/test/CodeGen/20100713_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define hidden void @luaD_callhook() nounwind {
diff --git a/polly/test/CodeGen/20100717.ll b/polly/test/CodeGen/20100717.ll
index a400eeaa3370..51c453cfe438 100644
--- a/polly/test/CodeGen/20100717.ll
+++ b/polly/test/CodeGen/20100717.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -disable-output < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @matrixTranspose(ptr %A) nounwind {
diff --git a/polly/test/CodeGen/20100718-DomInfo-2.ll b/polly/test/CodeGen/20100718-DomInfo-2.ll
index 512b4c5c99af..fdac75f1b999 100644
--- a/polly/test/CodeGen/20100718-DomInfo-2.ll
+++ b/polly/test/CodeGen/20100718-DomInfo-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -verify-dom-info -disable-output < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -verify-dom-info -disable-output < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @getNonAffNeighbour() nounwind {
diff --git a/polly/test/CodeGen/20100718-DomInfo.ll b/polly/test/CodeGen/20100718-DomInfo.ll
index e12334359c33..da68eb0dd8fa 100644
--- a/polly/test/CodeGen/20100718-DomInfo.ll
+++ b/polly/test/CodeGen/20100718-DomInfo.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -verify-dom-info -disable-output < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -verify-dom-info -disable-output < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @intrapred_luma_16x16(i32 %predmode) nounwind {
diff --git a/polly/test/CodeGen/20100720-MultipleConditions.ll b/polly/test/CodeGen/20100720-MultipleConditions.ll
index 9f2268713853..3dece4efdcd0 100644
--- a/polly/test/CodeGen/20100720-MultipleConditions.ll
+++ b/polly/test/CodeGen/20100720-MultipleConditions.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-ast -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s
;int bar1();
;int bar2();
diff --git a/polly/test/CodeGen/20100809-IndependentBlock.ll b/polly/test/CodeGen/20100809-IndependentBlock.ll
index 8d596689d8ae..f45b6544464d 100644
--- a/polly/test/CodeGen/20100809-IndependentBlock.ll
+++ b/polly/test/CodeGen/20100809-IndependentBlock.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -disable-output < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @cfft2(ptr %x) nounwind {
entry:
diff --git a/polly/test/CodeGen/20100811-ScalarDependencyBetweenBrAndCnd.ll b/polly/test/CodeGen/20100811-ScalarDependencyBetweenBrAndCnd.ll
index 261a205560b5..82da9d248642 100644
--- a/polly/test/CodeGen/20100811-ScalarDependencyBetweenBrAndCnd.ll
+++ b/polly/test/CodeGen/20100811-ScalarDependencyBetweenBrAndCnd.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -disable-output < %s
target datalayout =
"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/CodeGen/20101030-Overflow.ll b/polly/test/CodeGen/20101030-Overflow.ll
index caaa4851f93e..fecdb9d4fed1 100644
--- a/polly/test/CodeGen/20101030-Overflow.ll
+++ b/polly/test/CodeGen/20101030-Overflow.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @compdecomp() nounwind {
diff --git a/polly/test/CodeGen/20101103-Overflow3.ll b/polly/test/CodeGen/20101103-Overflow3.ll
index b2faf14fba0b..f1503e25fcc4 100644
--- a/polly/test/CodeGen/20101103-Overflow3.ll
+++ b/polly/test/CodeGen/20101103-Overflow3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @Reflection_coefficients(ptr %r) nounwind {
bb20:
diff --git a/polly/test/CodeGen/20101103-signmissmatch.ll b/polly/test/CodeGen/20101103-signmissmatch.ll
index e157d292dc8a..3d0c929446f4 100644
--- a/polly/test/CodeGen/20101103-signmissmatch.ll
+++ b/polly/test/CodeGen/20101103-signmissmatch.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @CleanNet() nounwind {
diff --git a/polly/test/CodeGen/20110226-Ignore-Dead-Code.ll b/polly/test/CodeGen/20110226-Ignore-Dead-Code.ll
index c792d8c3d0bf..0e62e678f0ae 100644
--- a/polly/test/CodeGen/20110226-Ignore-Dead-Code.ll
+++ b/polly/test/CodeGen/20110226-Ignore-Dead-Code.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @main() nounwind {
diff --git a/polly/test/CodeGen/20110226-PHI-Node-removed.ll b/polly/test/CodeGen/20110226-PHI-Node-removed.ll
index 3458d75c47a0..32b018f24e54 100644
--- a/polly/test/CodeGen/20110226-PHI-Node-removed.ll
+++ b/polly/test/CodeGen/20110226-PHI-Node-removed.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/CodeGen/20120316-InvalidCast.ll b/polly/test/CodeGen/20120316-InvalidCast.ll
index 8355cc51c468..b87a3dc60dea 100644
--- a/polly/test/CodeGen/20120316-InvalidCast.ll
+++ b/polly/test/CodeGen/20120316-InvalidCast.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
; CHECK: polly.start
diff --git a/polly/test/CodeGen/20120403-RHS-type-mismatch.ll b/polly/test/CodeGen/20120403-RHS-type-mismatch.ll
index 1d629e388452..dac78bf04a25 100644
--- a/polly/test/CodeGen/20120403-RHS-type-mismatch.ll
+++ b/polly/test/CodeGen/20120403-RHS-type-mismatch.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
; We just check that this compilation does not crash.
diff --git a/polly/test/CodeGen/20130221.ll b/polly/test/CodeGen/20130221.ll
index 45414671081a..5728a768a3b3 100644
--- a/polly/test/CodeGen/20130221.ll
+++ b/polly/test/CodeGen/20130221.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
define void @list_sequence(ptr %A) {
diff --git a/polly/test/CodeGen/20150328-SCEVExpanderIntroducesNewIV.ll b/polly/test/CodeGen/20150328-SCEVExpanderIntroducesNewIV.ll
index d54be5c3f35f..cafd68e50825 100644
--- a/polly/test/CodeGen/20150328-SCEVExpanderIntroducesNewIV.ll
+++ b/polly/test/CodeGen/20150328-SCEVExpanderIntroducesNewIV.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/Intrinsics/llvm-expect.ll b/polly/test/CodeGen/Intrinsics/llvm-expect.ll
index 84057e276521..47fd4f07e467 100644
--- a/polly/test/CodeGen/Intrinsics/llvm-expect.ll
+++ b/polly/test/CodeGen/Intrinsics/llvm-expect.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; Check that we generate code without crashing.
;
diff --git a/polly/test/CodeGen/LoopParallelMD/do_not_mutate_debug_info.ll b/polly/test/CodeGen/LoopParallelMD/do_not_mutate_debug_info.ll
index b04319550938..28531244421d 100644
--- a/polly/test/CodeGen/LoopParallelMD/do_not_mutate_debug_info.ll
+++ b/polly/test/CodeGen/LoopParallelMD/do_not_mutate_debug_info.ll
@@ -1,6 +1,6 @@
; This test checks that we do not accidently mutate the debug info when
; inserting loop parallel metadata.
-; RUN: opt %loadPolly < %s -S -polly -polly-codegen -polly-ast-detect-parallel | FileCheck %s
+; RUN: opt %loadNPMPolly < %s -S -polly -passes=polly-codegen -polly-ast-detect-parallel | FileCheck %s
; CHECK-NOT: !7 = !{!7}
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/LoopParallelMD/loop_nest_param_parallel.ll b/polly/test/CodeGen/LoopParallelMD/loop_nest_param_parallel.ll
index 7b131c5ebcbd..9bb086fa79ae 100644
--- a/polly/test/CodeGen/LoopParallelMD/loop_nest_param_parallel.ll
+++ b/polly/test/CodeGen/LoopParallelMD/loop_nest_param_parallel.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-ast-detect-parallel -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-ast-detect-parallel -S < %s | FileCheck %s
;
; Check that we mark multiple parallel loops correctly including the memory instructions.
;
diff --git a/polly/test/CodeGen/LoopParallelMD/single_loop_param_parallel.ll b/polly/test/CodeGen/LoopParallelMD/single_loop_param_parallel.ll
index ec927acb1ec7..96b50cef179a 100644
--- a/polly/test/CodeGen/LoopParallelMD/single_loop_param_parallel.ll
+++ b/polly/test/CodeGen/LoopParallelMD/single_loop_param_parallel.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s -check-prefix=SEQUENTIAL
-; RUN: opt %loadPolly -polly-codegen -polly-ast-detect-parallel -S < %s | FileCheck %s -check-prefix=PARALLEL
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s -check-prefix=SEQUENTIAL
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-ast-detect-parallel -S < %s | FileCheck %s -check-prefix=PARALLEL
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; This is a trivially parallel loop. We just use it to ensure that we actually
diff --git a/polly/test/CodeGen/MemAccess/bad_alignment.ll b/polly/test/CodeGen/MemAccess/bad_alignment.ll
index 32f3cfe963b7..82fff27dd0eb 100644
--- a/polly/test/CodeGen/MemAccess/bad_alignment.ll
+++ b/polly/test/CodeGen/MemAccess/bad_alignment.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -disable-output 2>&1 < %s | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly -passes=polly-import-jscop -disable-output 2>&1 < %s | FileCheck %s
;
; Check that we do not allow to access elements not accessed before because the
; alignment information would become invalid.
diff --git a/polly/test/CodeGen/MemAccess/codegen_address_space.ll b/polly/test/CodeGen/MemAccess/codegen_address_space.ll
index 7c9b12d64f9c..3ce363e8f09f 100644
--- a/polly/test/CodeGen/MemAccess/codegen_address_space.ll
+++ b/polly/test/CodeGen/MemAccess/codegen_address_space.ll
@@ -1,4 +1,4 @@
-;RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-codegen < %s -S | FileCheck %s
+;RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed < %s -S | FileCheck %s
;int A[100];
;
diff --git a/polly/test/CodeGen/MemAccess/codegen_constant_offset.ll b/polly/test/CodeGen/MemAccess/codegen_constant_offset.ll
index e008a789fe7d..0563ca87eef5 100644
--- a/polly/test/CodeGen/MemAccess/codegen_constant_offset.ll
+++ b/polly/test/CodeGen/MemAccess/codegen_constant_offset.ll
@@ -1,4 +1,4 @@
-;RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-codegen < %s -S | FileCheck %s
+;RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed < %s -S | FileCheck %s
;int A[100];
;
diff --git a/polly/test/CodeGen/MemAccess/codegen_simple.ll b/polly/test/CodeGen/MemAccess/codegen_simple.ll
index 5ba6f3269fb9..ee0187fe97d2 100644
--- a/polly/test/CodeGen/MemAccess/codegen_simple.ll
+++ b/polly/test/CodeGen/MemAccess/codegen_simple.ll
@@ -1,4 +1,4 @@
-;RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-codegen < %s -S | FileCheck %s
+;RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed < %s -S | FileCheck %s
;int A[100];
;
diff --git a/polly/test/CodeGen/MemAccess/codegen_simple_float.ll b/polly/test/CodeGen/MemAccess/codegen_simple_float.ll
index cf8913fc5197..6970565bf023 100644
--- a/polly/test/CodeGen/MemAccess/codegen_simple_float.ll
+++ b/polly/test/CodeGen/MemAccess/codegen_simple_float.ll
@@ -1,4 +1,4 @@
-;RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-codegen < %s -S | FileCheck %s
+;RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed < %s -S | FileCheck %s
;
;float A[100];
;
diff --git a/polly/test/CodeGen/MemAccess/codegen_simple_md.ll b/polly/test/CodeGen/MemAccess/codegen_simple_md.ll
index e4afcc8d2243..f0896e2bf609 100644
--- a/polly/test/CodeGen/MemAccess/codegen_simple_md.ll
+++ b/polly/test/CodeGen/MemAccess/codegen_simple_md.ll
@@ -1,5 +1,5 @@
-;RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed+withconst -polly-codegen < %s -S | FileCheck -check-prefix=WITHCONST %s
-;RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed+withoutconst -polly-codegen < %s -S | FileCheck -check-prefix=WITHOUTCONST %s
+;RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed+withconst < %s -S | FileCheck -check-prefix=WITHCONST %s
+;RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed+withoutconst < %s -S | FileCheck -check-prefix=WITHOUTCONST %s
;int A[1040];
;
diff --git a/polly/test/CodeGen/MemAccess/codegen_simple_md_float.ll b/polly/test/CodeGen/MemAccess/codegen_simple_md_float.ll
index c9913f3ed873..99fc36996f08 100644
--- a/polly/test/CodeGen/MemAccess/codegen_simple_md_float.ll
+++ b/polly/test/CodeGen/MemAccess/codegen_simple_md_float.ll
@@ -1,5 +1,5 @@
-;RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed+withconst -polly-codegen < %s -S | FileCheck -check-prefix=WITHCONST %s
-;RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed+withoutconst -polly-codegen < %s -S | FileCheck -check-prefix=WITHOUTCONST %s
+;RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed+withconst < %s -S | FileCheck -check-prefix=WITHCONST %s
+;RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed+withoutconst < %s -S | FileCheck -check-prefix=WITHOUTCONST %s
;
;float A[1040];
;
diff --git a/polly/test/CodeGen/MemAccess/different_types.ll b/polly/test/CodeGen/MemAccess/different_types.ll
index 624de62911ff..53718194c25a 100644
--- a/polly/test/CodeGen/MemAccess/different_types.ll
+++ b/polly/test/CodeGen/MemAccess/different_types.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-import-jscop \
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' \
; RUN: \
-; RUN: -polly-codegen -S < %s | FileCheck %s
+; RUN: -S < %s | FileCheck %s
;
; void foo(float A[], float B[]) {
; for (long i = 0; i < 100; i++)
diff --git a/polly/test/CodeGen/MemAccess/generate-all.ll b/polly/test/CodeGen/MemAccess/generate-all.ll
index 6f92ba13587e..d1f695d436da 100644
--- a/polly/test/CodeGen/MemAccess/generate-all.ll
+++ b/polly/test/CodeGen/MemAccess/generate-all.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-codegen -polly-codegen-generate-expressions=false \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-codegen-generate-expressions=false \
; RUN: -S < %s | FileCheck %s -check-prefix=SCEV
-; RUN: opt %loadPolly -polly-codegen -polly-codegen-generate-expressions=true \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-codegen-generate-expressions=true \
; RUN: -S < %s | FileCheck %s -check-prefix=ASTEXPR
;
; void foo(float A[]) {
diff --git a/polly/test/CodeGen/MemAccess/invariant_base_ptr.ll b/polly/test/CodeGen/MemAccess/invariant_base_ptr.ll
index a6d1de0aac63..5c926ac63841 100644
--- a/polly/test/CodeGen/MemAccess/invariant_base_ptr.ll
+++ b/polly/test/CodeGen/MemAccess/invariant_base_ptr.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-import-jscop \
-; RUN: -polly-codegen -polly-invariant-load-hoisting -S \
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' \
+; RUN: -polly-invariant-load-hoisting -S \
; RUN: 2>&1 < %s | FileCheck %s
; Setting new access functions where the base pointer of the array that is newly
diff --git a/polly/test/CodeGen/MemAccess/multiple_types.ll b/polly/test/CodeGen/MemAccess/multiple_types.ll
index 1793bd30fc5b..7848977ce031 100644
--- a/polly/test/CodeGen/MemAccess/multiple_types.ll
+++ b/polly/test/CodeGen/MemAccess/multiple_types.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop \
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=polly-import-jscop,polly-codegen' \
; RUN: -polly-allow-differing-element-types \
-; RUN: -polly-codegen -S < %s | FileCheck %s
+; RUN: -S < %s | FileCheck %s
;
; // Check that accessing one array with different types works.
; void multiple_types(char *Short, char *Float, char *Double) {
diff --git a/polly/test/CodeGen/MemAccess/simple.ll b/polly/test/CodeGen/MemAccess/simple.ll
index 39e8a2c91b79..5077e1a1b5a2 100644
--- a/polly/test/CodeGen/MemAccess/simple.ll
+++ b/polly/test/CodeGen/MemAccess/simple.ll
@@ -1,4 +1,4 @@
-;RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -stats < %s 2>&1 | FileCheck %s
+;RUN: opt %loadNPMPolly -passes=polly-import-jscop -polly-import-jscop-postfix=transformed -stats < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;int A[100];
diff --git a/polly/test/CodeGen/MemAccess/update_access_functions.ll b/polly/test/CodeGen/MemAccess/update_access_functions.ll
index 05d208708a36..51fa97adb3c3 100644
--- a/polly/test/CodeGen/MemAccess/update_access_functions.ll
+++ b/polly/test/CodeGen/MemAccess/update_access_functions.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-import-jscop \
-; RUN: -polly-import-jscop-postfix=transformed -polly-codegen \
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' \
+; RUN: -polly-import-jscop-postfix=transformed \
; RUN: < %s -S | FileCheck %s
; CHECK-LABEL: polly.stmt.loop1:
diff --git a/polly/test/CodeGen/OpenMP/alias-metadata.ll b/polly/test/CodeGen/OpenMP/alias-metadata.ll
index 07d79631b2cb..b80b18f43326 100644
--- a/polly/test/CodeGen/OpenMP/alias-metadata.ll
+++ b/polly/test/CodeGen/OpenMP/alias-metadata.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-parallel -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-parallel -S < %s | FileCheck %s
;
; void foo(float *A, float *B) {
; for (long i = 0; i < 1000; i++)
diff --git a/polly/test/CodeGen/OpenMP/floord-as-argument-to-subfunction.ll b/polly/test/CodeGen/OpenMP/floord-as-argument-to-subfunction.ll
index eb9dfcd9e920..9eb7f5f2a5e9 100644
--- a/polly/test/CodeGen/OpenMP/floord-as-argument-to-subfunction.ll
+++ b/polly/test/CodeGen/OpenMP/floord-as-argument-to-subfunction.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-opt-max-coefficient=-1 -polly-parallel -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-opt-max-coefficient=-1 -polly-parallel -passes=polly-codegen -S < %s | FileCheck %s
;
; Check that we do not crash but generate parallel code
;
diff --git a/polly/test/CodeGen/OpenMP/inlineasm.ll b/polly/test/CodeGen/OpenMP/inlineasm.ll
index 69b1b0aa53f3..82a73780886e 100644
--- a/polly/test/CodeGen/OpenMP/inlineasm.ll
+++ b/polly/test/CodeGen/OpenMP/inlineasm.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-parallel -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-opt-isl,polly-codegen' -polly-parallel -S < %s | FileCheck %s
; llvm.org/PR51960
; CHECK-LABEL: define internal void @foo_polly_subfn
diff --git a/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded.ll b/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded.ll
index 30beef5b0709..b4c61d197b42 100644
--- a/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded.ll
+++ b/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -polly-parallel \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -polly-parallel \
; RUN: -polly-parallel-force -S < %s | FileCheck %s
;
; Test to verify that we hand down the preloaded A[0] to the OpenMP subfunction.
diff --git a/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded_different_bb.ll b/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded_different_bb.ll
index fe5d2ab8c96d..8cf6148a7b44 100644
--- a/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded_different_bb.ll
+++ b/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded_different_bb.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -polly-parallel \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -polly-parallel \
; RUN: -polly-parallel-force -S < %s | FileCheck %s
;
; Test to verify that we hand down the preloaded A[0] to the OpenMP subfunction.
diff --git a/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded_pass_only_needed.ll b/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded_pass_only_needed.ll
index 49b9321c40b8..823e5cab55ab 100644
--- a/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded_pass_only_needed.ll
+++ b/polly/test/CodeGen/OpenMP/invariant_base_pointer_preloaded_pass_only_needed.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -polly-parallel \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -polly-parallel \
; RUN: -polly-parallel-force -S < %s | FileCheck %s
;
; Test to verify that we hand down the preloaded A[0] to the OpenMP subfunction but
diff --git a/polly/test/CodeGen/OpenMP/invariant_base_pointers_preloaded.ll b/polly/test/CodeGen/OpenMP/invariant_base_pointers_preloaded.ll
index 06c4cdab45f1..5557839e715e 100644
--- a/polly/test/CodeGen/OpenMP/invariant_base_pointers_preloaded.ll
+++ b/polly/test/CodeGen/OpenMP/invariant_base_pointers_preloaded.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -polly-parallel \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -polly-parallel \
; RUN: -polly-parallel-force -S < %s | FileCheck %s
;
; Test to verify that we hand down the preloaded A[0] to the OpenMP subfunction.
diff --git a/polly/test/CodeGen/OpenMP/loop-body-references-outer-iv.ll b/polly/test/CodeGen/OpenMP/loop-body-references-outer-iv.ll
index db58c3ab7593..a987fac31b74 100644
--- a/polly/test/CodeGen/OpenMP/loop-body-references-outer-iv.ll
+++ b/polly/test/CodeGen/OpenMP/loop-body-references-outer-iv.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=AST
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=AST
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -passes=polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
; This code has failed the scev based code generation as the scev in the scop
; contains an AddRecExpr of an outer loop. When generating code, we did not
diff --git a/polly/test/CodeGen/OpenMP/loop-body-references-outer-values-2.ll b/polly/test/CodeGen/OpenMP/loop-body-references-outer-values-2.ll
index c2ddc1e26496..b81e120f8c22 100644
--- a/polly/test/CodeGen/OpenMP/loop-body-references-outer-values-2.ll
+++ b/polly/test/CodeGen/OpenMP/loop-body-references-outer-values-2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=AST
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=AST
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -passes=polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
; AST: #pragma simd
; AST: #pragma omp parallel for
diff --git a/polly/test/CodeGen/OpenMP/loop-body-references-outer-values-3.ll b/polly/test/CodeGen/OpenMP/loop-body-references-outer-values-3.ll
index 0f025bb94112..c4ad665c7b6c 100644
--- a/polly/test/CodeGen/OpenMP/loop-body-references-outer-values-3.ll
+++ b/polly/test/CodeGen/OpenMP/loop-body-references-outer-values-3.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-parallel -polly-parallel-force -polly-invariant-load-hoisting=true -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=AST
-; RUN: opt %loadPolly -basic-aa -polly-parallel -polly-parallel-force -polly-invariant-load-hoisting=true -polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-parallel -polly-parallel-force -polly-invariant-load-hoisting=true '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=AST
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-parallel -polly-parallel-force -polly-invariant-load-hoisting=true -passes=polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
; The interesting part of this test case is the instruction:
; %tmp = bitcast i8* %call to i64**
diff --git a/polly/test/CodeGen/OpenMP/loop-body-references-outer-values.ll b/polly/test/CodeGen/OpenMP/loop-body-references-outer-values.ll
index f9612d77533d..07aae42335b6 100644
--- a/polly/test/CodeGen/OpenMP/loop-body-references-outer-values.ll
+++ b/polly/test/CodeGen/OpenMP/loop-body-references-outer-values.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=AST
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-codegen -S < %s | FileCheck %s -check-prefix=IR
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=AST
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -passes=polly-codegen -S < %s | FileCheck %s -check-prefix=IR
; Make sure we correctly forward the reference to 'A' to the OpenMP subfunction.
;
diff --git a/polly/test/CodeGen/OpenMP/loop-bounds-reference-outer-ids.ll b/polly/test/CodeGen/OpenMP/loop-bounds-reference-outer-ids.ll
index da9da18c89b2..27e1bdd2dfbd 100644
--- a/polly/test/CodeGen/OpenMP/loop-bounds-reference-outer-ids.ll
+++ b/polly/test/CodeGen/OpenMP/loop-bounds-reference-outer-ids.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-parallel -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=AST
-; RUN: opt %loadPolly -polly-parallel -polly-codegen -S < %s | FileCheck %s -check-prefix=IR
+; RUN: opt %loadNPMPolly -polly-parallel '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=AST
+; RUN: opt %loadNPMPolly -polly-parallel -passes=polly-codegen -S < %s | FileCheck %s -check-prefix=IR
;
; float A[100];
;
diff --git a/polly/test/CodeGen/OpenMP/mapped-phi-access.ll b/polly/test/CodeGen/OpenMP/mapped-phi-access.ll
index 1b8433693abf..ac78b4e6c0c5 100644
--- a/polly/test/CodeGen/OpenMP/mapped-phi-access.ll
+++ b/polly/test/CodeGen/OpenMP/mapped-phi-access.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-parallel -polly-delicm -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-parallel '-passes=polly-delicm,polly-codegen' -S < %s | FileCheck %s
;
; Verify that -polly-parallel can handle mapped scalar MemoryAccesses.
;
diff --git a/polly/test/CodeGen/OpenMP/matmul-parallel.ll b/polly/test/CodeGen/OpenMP/matmul-parallel.ll
index 5ee9a7c7a824..43326b29f7ef 100644
--- a/polly/test/CodeGen/OpenMP/matmul-parallel.ll
+++ b/polly/test/CodeGen/OpenMP/matmul-parallel.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-parallel -polly-opt-isl -polly-ast -disable-output -debug-only=polly-ast < %s 2>&1 | FileCheck --check-prefix=AST %s
-; RUN: opt %loadPolly -polly-parallel -polly-opt-isl -polly-codegen -S < %s | FileCheck --check-prefix=CODEGEN %s
+; RUN: opt %loadNPMPolly -polly-parallel '-passes=polly-opt-isl,print<polly-ast>' -disable-output -debug-only=polly-ast < %s 2>&1 | FileCheck --check-prefix=AST %s
+; RUN: opt %loadNPMPolly -polly-parallel '-passes=polly-opt-isl,polly-codegen' -S < %s | FileCheck --check-prefix=CODEGEN %s
; REQUIRES: asserts
; Parallelization of detected matrix-multiplication.
diff --git a/polly/test/CodeGen/OpenMP/recomputed-srem.ll b/polly/test/CodeGen/OpenMP/recomputed-srem.ll
index cfae8e943cf1..67db35ae2ca2 100644
--- a/polly/test/CodeGen/OpenMP/recomputed-srem.ll
+++ b/polly/test/CodeGen/OpenMP/recomputed-srem.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-codegen -polly-parallel \
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -passes=polly-codegen -polly-parallel \
; RUN: -polly-parallel-force -S < %s | FileCheck %s
;
; Test to verify that we pass %rem96 to the parallel subfunction.
diff --git a/polly/test/CodeGen/OpenMP/reference-argument-from-non-affine-region.ll b/polly/test/CodeGen/OpenMP/reference-argument-from-non-affine-region.ll
index f243c3a04949..96dc4250cd05 100644
--- a/polly/test/CodeGen/OpenMP/reference-argument-from-non-affine-region.ll
+++ b/polly/test/CodeGen/OpenMP/reference-argument-from-non-affine-region.ll
@@ -1,15 +1,15 @@
-; RUN: opt %loadPolly -polly-parallel \
-; RUN: -polly-parallel-force -polly-codegen \
+; RUN: opt %loadNPMPolly -polly-parallel \
+; RUN: -polly-parallel-force -passes=polly-codegen \
; RUN: -S -verify-dom-info < %s \
; RUN: | FileCheck %s -check-prefix=IR
-; RUN: opt %loadPolly -polly-parallel \
-; RUN: -polly-parallel-force -polly-codegen -polly-scheduling=runtime \
+; RUN: opt %loadNPMPolly -polly-parallel \
+; RUN: -polly-parallel-force -passes=polly-codegen -polly-scheduling=runtime \
; RUN: -S -verify-dom-info < %s \
; RUN: | FileCheck %s -check-prefix=IR
-; RUN: opt %loadPolly -polly-parallel \
-; RUN: -polly-parallel-force -polly-codegen -polly-omp-backend=LLVM \
+; RUN: opt %loadNPMPolly -polly-parallel \
+; RUN: -polly-parallel-force -passes=polly-codegen -polly-omp-backend=LLVM \
; RUN: -S -verify-dom-info < %s \
; RUN: | FileCheck %s -check-prefix=LIBOMP-IR
diff --git a/polly/test/CodeGen/OpenMP/reference-other-bb.ll b/polly/test/CodeGen/OpenMP/reference-other-bb.ll
index b7abdc23d258..dbfbd9a90508 100644
--- a/polly/test/CodeGen/OpenMP/reference-other-bb.ll
+++ b/polly/test/CodeGen/OpenMP/reference-other-bb.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -passes=polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
; IR: @foo_polly_subfn
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/OpenMP/reference-preceeding-loop.ll b/polly/test/CodeGen/OpenMP/reference-preceeding-loop.ll
index b88589f39a6f..ee43b8aa34a4 100644
--- a/polly/test/CodeGen/OpenMP/reference-preceeding-loop.ll
+++ b/polly/test/CodeGen/OpenMP/reference-preceeding-loop.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=AST
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=AST
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -passes=polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
; - Test the case where scalar evolution references a loop that is outside
diff --git a/polly/test/CodeGen/OpenMP/reference_latest.ll b/polly/test/CodeGen/OpenMP/reference_latest.ll
index 54875c2630f0..7a8cd77bb157 100644
--- a/polly/test/CodeGen/OpenMP/reference_latest.ll
+++ b/polly/test/CodeGen/OpenMP/reference_latest.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-delicm -polly-simplify -polly-parallel -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-delicm,polly-simplify,polly-codegen' -polly-parallel -S < %s | FileCheck %s
;
; Test that parallel codegen handles scalars mapped to other arrays.
; After mapping "store double %add10" references the array "MemRef2".
diff --git a/polly/test/CodeGen/OpenMP/scev-rewriting.ll b/polly/test/CodeGen/OpenMP/scev-rewriting.ll
index 1b229fc19d25..9b79f2909448 100644
--- a/polly/test/CodeGen/OpenMP/scev-rewriting.ll
+++ b/polly/test/CodeGen/OpenMP/scev-rewriting.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly < %s -polly-vectorizer=stripmine -polly-parallel -polly-parallel-force -polly-process-unprofitable -polly-codegen -S | FileCheck %s
+; RUN: opt %loadNPMPolly < %s -polly-vectorizer=stripmine -polly-parallel -polly-parallel-force -polly-process-unprofitable -passes=polly-codegen -S | FileCheck %s
; CHECK: define internal void @DoStringSort_polly_subfn
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-unknown-linux-gnueabi"
diff --git a/polly/test/CodeGen/OpenMP/single_loop.ll b/polly/test/CodeGen/OpenMP/single_loop.ll
index f79653a08d21..e5aee840ade7 100644
--- a/polly/test/CodeGen/OpenMP/single_loop.ll
+++ b/polly/test/CodeGen/OpenMP/single_loop.ll
@@ -1,14 +1,14 @@
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=AST
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=AST
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -passes=polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-import-jscop -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=AST-STRIDE4
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-import-jscop -polly-codegen -S < %s | FileCheck %s -check-prefix=IR-STRIDE4
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force '-passes=polly-import-jscop,print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=AST-STRIDE4
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force '-passes=polly-import-jscop,polly-codegen' -S < %s | FileCheck %s -check-prefix=IR-STRIDE4
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-codegen -polly-omp-backend=LLVM -polly-scheduling=static -polly-scheduling-chunksize=43 -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-STATIC-CHUNKED
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-codegen -polly-omp-backend=LLVM -polly-scheduling=static -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-STATIC
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-codegen -polly-omp-backend=LLVM -polly-scheduling=dynamic -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-DYNAMIC
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-codegen -polly-omp-backend=LLVM -polly-scheduling=dynamic -polly-scheduling-chunksize=4 -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-DYNAMIC-FOUR
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-import-jscop -polly-codegen -polly-omp-backend=LLVM -S < %s | FileCheck %s -check-prefix=LIBOMP-IR-STRIDE4
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -passes=polly-codegen -polly-omp-backend=LLVM -polly-scheduling=static -polly-scheduling-chunksize=43 -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-STATIC-CHUNKED
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -passes=polly-codegen -polly-omp-backend=LLVM -polly-scheduling=static -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-STATIC
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -passes=polly-codegen -polly-omp-backend=LLVM -polly-scheduling=dynamic -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-DYNAMIC
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -passes=polly-codegen -polly-omp-backend=LLVM -polly-scheduling=dynamic -polly-scheduling-chunksize=4 -S -verify-dom-info < %s | FileCheck %s -check-prefix=LIBOMP-IR-DYNAMIC-FOUR
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force '-passes=polly-import-jscop,polly-codegen' -polly-omp-backend=LLVM -S < %s | FileCheck %s -check-prefix=LIBOMP-IR-STRIDE4
; This extensive test case tests the creation of the full set of OpenMP calls
; as well as the subfunction creation using a trivial loop as example.
diff --git a/polly/test/CodeGen/OpenMP/single_loop_with_loop_invariant_baseptr.ll b/polly/test/CodeGen/OpenMP/single_loop_with_loop_invariant_baseptr.ll
index 50da5dd2b7c0..c519bfdee7a5 100644
--- a/polly/test/CodeGen/OpenMP/single_loop_with_loop_invariant_baseptr.ll
+++ b/polly/test/CodeGen/OpenMP/single_loop_with_loop_invariant_baseptr.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -tbaa -polly-parallel -polly-parallel-force -polly-parallel-force -polly-invariant-load-hoisting=true -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=AST
-; RUN: opt %loadPolly -tbaa -polly-parallel -polly-parallel-force -polly-parallel-force -polly-invariant-load-hoisting=true -polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
+; RUN: opt %loadNPMPolly -aa-pipeline=tbaa -polly-parallel -polly-parallel-force -polly-parallel-force -polly-invariant-load-hoisting=true '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=AST
+; RUN: opt %loadNPMPolly -aa-pipeline=tbaa -polly-parallel -polly-parallel-force -polly-parallel-force -polly-invariant-load-hoisting=true -passes=polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
; #define N 1024
; float A[N];
diff --git a/polly/test/CodeGen/OpenMP/single_loop_with_param.ll b/polly/test/CodeGen/OpenMP/single_loop_with_param.ll
index d01b7a2fdcad..f6dfd62d6bcc 100644
--- a/polly/test/CodeGen/OpenMP/single_loop_with_param.ll
+++ b/polly/test/CodeGen/OpenMP/single_loop_with_param.ll
@@ -1,15 +1,15 @@
-; RUN: opt %loadPolly -polly-parallel \
-; RUN: -polly-parallel-force -polly-codegen \
+; RUN: opt %loadNPMPolly -polly-parallel \
+; RUN: -polly-parallel-force -passes=polly-codegen \
; RUN: -S -verify-dom-info < %s \
; RUN: | FileCheck %s -check-prefix=IR
-; RUN: opt %loadPolly -polly-parallel \
-; RUN: -polly-parallel-force -polly-codegen -polly-omp-backend=LLVM \
+; RUN: opt %loadNPMPolly -polly-parallel \
+; RUN: -polly-parallel-force -passes=polly-codegen -polly-omp-backend=LLVM \
; RUN: -S -verify-dom-info < %s \
; RUN: | FileCheck %s -check-prefix=LIBOMP-IR
-; RUN: opt %loadPolly -polly-parallel \
-; RUN: -polly-parallel-force -polly-codegen -polly-omp-backend=LLVM \
+; RUN: opt %loadNPMPolly -polly-parallel \
+; RUN: -polly-parallel-force -passes=polly-codegen -polly-omp-backend=LLVM \
; RUN: -polly-scheduling=static \
; RUN: -S -verify-dom-info < %s \
; RUN: | FileCheck %s -check-prefix=LIBOMP-STATIC-IR
diff --git a/polly/test/CodeGen/OpenMP/two-parallel-loops-reference-outer-indvar.ll b/polly/test/CodeGen/OpenMP/two-parallel-loops-reference-outer-indvar.ll
index 05c6ed177e9c..934e04461f13 100644
--- a/polly/test/CodeGen/OpenMP/two-parallel-loops-reference-outer-indvar.ll
+++ b/polly/test/CodeGen/OpenMP/two-parallel-loops-reference-outer-indvar.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=AST
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=AST
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -passes=polly-codegen -S -verify-dom-info < %s | FileCheck %s -check-prefix=IR
; This test case verifies that we create correct code even if two OpenMP loops
; share common outer variables.
diff --git a/polly/test/CodeGen/PHIInExit.ll b/polly/test/CodeGen/PHIInExit.ll
index eadd6054386b..3e0c9d67d5ca 100644
--- a/polly/test/CodeGen/PHIInExit.ll
+++ b/polly/test/CodeGen/PHIInExit.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
%struct..0__pthread_mutex_s = type { i32, i32, i32, i32, i32, i32, %struct.__pthread_list_t }
diff --git a/polly/test/CodeGen/RuntimeDebugBuilder/combine_different_values.ll b/polly/test/CodeGen/RuntimeDebugBuilder/combine_different_values.ll
index 84827dd26049..76b2fa9a35b2 100644
--- a/polly/test/CodeGen/RuntimeDebugBuilder/combine_different_values.ll
+++ b/polly/test/CodeGen/RuntimeDebugBuilder/combine_different_values.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S \
; RUN: -polly-codegen-add-debug-printing \
; RUN: -polly-ignore-aliasing < %s | FileCheck %s
diff --git a/polly/test/CodeGen/RuntimeDebugBuilder/stmt_tracing.ll b/polly/test/CodeGen/RuntimeDebugBuilder/stmt_tracing.ll
index 822eccc306ef..4ffb7fd6e462 100644
--- a/polly/test/CodeGen/RuntimeDebugBuilder/stmt_tracing.ll
+++ b/polly/test/CodeGen/RuntimeDebugBuilder/stmt_tracing.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen-trace-stmts -polly-codegen-trace-scalars -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-codegen-trace-stmts -polly-codegen-trace-scalars -passes=polly-codegen -S < %s | FileCheck %s
;
define void @func(i32 %n, ptr %A) {
diff --git a/polly/test/CodeGen/alias-check-multi-dim.ll b/polly/test/CodeGen/alias-check-multi-dim.ll
index d923a4cc14fd..0440bda74b39 100644
--- a/polly/test/CodeGen/alias-check-multi-dim.ll
+++ b/polly/test/CodeGen/alias-check-multi-dim.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen \
+; RUN: opt %loadNPMPolly -passes=polly-codegen \
; RUN: -S < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/alias_metadata_too_many_arrays.ll b/polly/test/CodeGen/alias_metadata_too_many_arrays.ll
index 7c5ca012a378..68c17a807e8e 100644
--- a/polly/test/CodeGen/alias_metadata_too_many_arrays.ll
+++ b/polly/test/CodeGen/alias_metadata_too_many_arrays.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-ignore-aliasing -S < %s \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-ignore-aliasing -S < %s \
; RUN: | FileCheck %s
;
; void manyarrays(float A1[], float A2[], float A3[], float A4[], float A5[],
diff --git a/polly/test/CodeGen/aliasing_different_base_and_access_type.ll b/polly/test/CodeGen/aliasing_different_base_and_access_type.ll
index a087414b8403..8e1fc3b32835 100644
--- a/polly/test/CodeGen/aliasing_different_base_and_access_type.ll
+++ b/polly/test/CodeGen/aliasing_different_base_and_access_type.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; We have to cast %B to "short *" before we create RTCs.
;
diff --git a/polly/test/CodeGen/aliasing_different_pointer_types.ll b/polly/test/CodeGen/aliasing_different_pointer_types.ll
index 91f5eab6b2a6..e601c22b978d 100644
--- a/polly/test/CodeGen/aliasing_different_pointer_types.ll
+++ b/polly/test/CodeGen/aliasing_different_pointer_types.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; Check that we cast the different pointer types correctly before we compare
; them in the RTC's. We use i8* as max pointer type.
diff --git a/polly/test/CodeGen/aliasing_multidimensional_access.ll b/polly/test/CodeGen/aliasing_multidimensional_access.ll
index 48768399e850..e1dae03280a0 100644
--- a/polly/test/CodeGen/aliasing_multidimensional_access.ll
+++ b/polly/test/CodeGen/aliasing_multidimensional_access.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; Check that we calculate the maximal access into array A correctly and track the overflow state.
;
diff --git a/polly/test/CodeGen/aliasing_parametric_simple_1.ll b/polly/test/CodeGen/aliasing_parametric_simple_1.ll
index 5422da4426e9..a79ba2532535 100644
--- a/polly/test/CodeGen/aliasing_parametric_simple_1.ll
+++ b/polly/test/CodeGen/aliasing_parametric_simple_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; void jd(int *A, int *B, int c) {
; for (int i = 0; i < 1024; i++)
diff --git a/polly/test/CodeGen/aliasing_parametric_simple_2.ll b/polly/test/CodeGen/aliasing_parametric_simple_2.ll
index de945d403f92..efe4af1c9e7c 100644
--- a/polly/test/CodeGen/aliasing_parametric_simple_2.ll
+++ b/polly/test/CodeGen/aliasing_parametric_simple_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; void jd(int *A, int *B, int c) {
; for (int i = 0; i < 1024; i++)
diff --git a/polly/test/CodeGen/aliasing_struct_element.ll b/polly/test/CodeGen/aliasing_struct_element.ll
index 2219ca9d28bb..3079e58d7dab 100644
--- a/polly/test/CodeGen/aliasing_struct_element.ll
+++ b/polly/test/CodeGen/aliasing_struct_element.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; We should only access (or compute the address of) "the first element" of %S
; as it is a single struct not a struct array. The maximal access to S, thus
diff --git a/polly/test/CodeGen/alignment.ll b/polly/test/CodeGen/alignment.ll
index a94b1f7e2883..e0f6a959476f 100644
--- a/polly/test/CodeGen/alignment.ll
+++ b/polly/test/CodeGen/alignment.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; Check that the special alignment information is kept
;
diff --git a/polly/test/CodeGen/annotated_alias_scopes.ll b/polly/test/CodeGen/annotated_alias_scopes.ll
index f8d14cd34b62..b1777a1b5f5d 100644
--- a/polly/test/CodeGen/annotated_alias_scopes.ll
+++ b/polly/test/CodeGen/annotated_alias_scopes.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s --check-prefix=SCOPES
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s --check-prefix=SCOPES
;
; Check that we create alias scopes that indicate the accesses to A, B and C cannot alias in any way.
;
diff --git a/polly/test/CodeGen/blas_sscal_simplified.ll b/polly/test/CodeGen/blas_sscal_simplified.ll
index a370fcff46f8..99f2eae9dd8e 100644
--- a/polly/test/CodeGen/blas_sscal_simplified.ll
+++ b/polly/test/CodeGen/blas_sscal_simplified.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
;
; Regression test for a bug in the runtime check generation.
diff --git a/polly/test/CodeGen/conflict-between-loop-invariant-code-hosting-and-escape-map-computation.ll b/polly/test/CodeGen/conflict-between-loop-invariant-code-hosting-and-escape-map-computation.ll
index e0f8c435879a..5dba93373b70 100644
--- a/polly/test/CodeGen/conflict-between-loop-invariant-code-hosting-and-escape-map-computation.ll
+++ b/polly/test/CodeGen/conflict-between-loop-invariant-code-hosting-and-escape-map-computation.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable -passes=polly-codegen -disable-output < %s
;
; CHECK: store i32 %tmp14_p_scalar_, ptr %tmp14.s2a
; CHECK: %tmp14.final_reload = load i32, ptr %tmp14.s2a
diff --git a/polly/test/CodeGen/constant_condition.ll b/polly/test/CodeGen/constant_condition.ll
index dad1f6cffd17..905aa52df508 100644
--- a/polly/test/CodeGen/constant_condition.ll
+++ b/polly/test/CodeGen/constant_condition.ll
@@ -1,4 +1,4 @@
-;RUN: opt %loadPolly -polly-prepare -polly-print-ast -disable-output < %s | FileCheck %s
+;RUN: opt %loadNPMPolly '-passes=polly-prepare,scop(print<polly-ast>)' -disable-output < %s 2>&1 | FileCheck %s
;#include <string.h>
;int A[1];
diff --git a/polly/test/CodeGen/create-conditional-scop.ll b/polly/test/CodeGen/create-conditional-scop.ll
index f51a2dcc9b3c..b8c9a81b71a9 100644
--- a/polly/test/CodeGen/create-conditional-scop.ll
+++ b/polly/test/CodeGen/create-conditional-scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-codegen -verify-loop-info < %s -S | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -passes=polly-codegen -verify-loop-info < %s -S | FileCheck %s
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32"
diff --git a/polly/test/CodeGen/dead_invariant_load_instruction_referenced_by_parameter_1.ll b/polly/test/CodeGen/dead_invariant_load_instruction_referenced_by_parameter_1.ll
index 991e3c83eef1..6ffe6bf67d54 100644
--- a/polly/test/CodeGen/dead_invariant_load_instruction_referenced_by_parameter_1.ll
+++ b/polly/test/CodeGen/dead_invariant_load_instruction_referenced_by_parameter_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
;
; Check we do not crash even though the dead %tmp8 is referenced by a parameter
; and we do not pre-load it (as it is dead).
diff --git a/polly/test/CodeGen/dead_invariant_load_instruction_referenced_by_parameter_2.ll b/polly/test/CodeGen/dead_invariant_load_instruction_referenced_by_parameter_2.ll
index 153f6912cea5..68c247a60831 100644
--- a/polly/test/CodeGen/dead_invariant_load_instruction_referenced_by_parameter_2.ll
+++ b/polly/test/CodeGen/dead_invariant_load_instruction_referenced_by_parameter_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
;
; Check we do not crash even though there is a dead load that is referenced by
; a parameter and we do not pre-load it (as it is dead).
diff --git a/polly/test/CodeGen/debug-intrinsics.ll b/polly/test/CodeGen/debug-intrinsics.ll
index 2feeb7c838b0..25c63da4891c 100644
--- a/polly/test/CodeGen/debug-intrinsics.ll
+++ b/polly/test/CodeGen/debug-intrinsics.ll
@@ -1,9 +1,9 @@
-; RUN: opt %loadPolly \
-; RUN: -polly-analyze-read-only-scalars=false -polly-codegen -S < %s | \
+; RUN: opt %loadNPMPolly \
+; RUN: -polly-analyze-read-only-scalars=false -passes=polly-codegen -S < %s | \
; RUN: FileCheck %s
-; RUN: opt %loadPolly \
-; RUN: -polly-analyze-read-only-scalars=true -polly-codegen -S < %s | \
+; RUN: opt %loadNPMPolly \
+; RUN: -polly-analyze-read-only-scalars=true -passes=polly-codegen -S < %s | \
; RUN: FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/dominance_problem_after_early_codegen_bailout.ll b/polly/test/CodeGen/dominance_problem_after_early_codegen_bailout.ll
index c9e006a01204..edc03333a358 100644
--- a/polly/test/CodeGen/dominance_problem_after_early_codegen_bailout.ll
+++ b/polly/test/CodeGen/dominance_problem_after_early_codegen_bailout.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -disable-output < %s
;
; This caused dominance problems at some point as we do bail out during
; code generation. Just verify it runs through.
diff --git a/polly/test/CodeGen/empty_domain_in_context.ll b/polly/test/CodeGen/empty_domain_in_context.ll
index c67ace9502e1..a2fe805f402e 100644
--- a/polly/test/CodeGen/empty_domain_in_context.ll
+++ b/polly/test/CodeGen/empty_domain_in_context.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-optree -polly-opt-isl -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-optree,polly-opt-isl,polly-codegen' -S < %s | FileCheck %s
;
; llvm.org/PR35362
; isl codegen does not allow to generate isl_ast_expr from pw_aff which have an
diff --git a/polly/test/CodeGen/entry_with_trivial_phi.ll b/polly/test/CodeGen/entry_with_trivial_phi.ll
index b057690ab29b..f2c9da04d649 100644
--- a/polly/test/CodeGen/entry_with_trivial_phi.ll
+++ b/polly/test/CodeGen/entry_with_trivial_phi.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s
;
; The entry of this scop's simple region (entry.split => for.end) has an trivial
; PHI node. LCSSA may create such PHI nodes. This is a breakdown of this case in
diff --git a/polly/test/CodeGen/entry_with_trivial_phi_other_bb.ll b/polly/test/CodeGen/entry_with_trivial_phi_other_bb.ll
index 5673cc746b5f..2f1ec1a7872a 100644
--- a/polly/test/CodeGen/entry_with_trivial_phi_other_bb.ll
+++ b/polly/test/CodeGen/entry_with_trivial_phi_other_bb.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; The entry of this scop's simple region (entry.split => for.end) has an trivial
; PHI node that is used in a different of the scop region. LCSSA may create such
diff --git a/polly/test/CodeGen/error-stmt-in-non-affine-region.ll b/polly/test/CodeGen/error-stmt-in-non-affine-region.ll
index 9832afe7a5fd..63b6becd1957 100644
--- a/polly/test/CodeGen/error-stmt-in-non-affine-region.ll
+++ b/polly/test/CodeGen/error-stmt-in-non-affine-region.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
; XFAIL: *
;
; CHECK-LABEL: polly.stmt.if.then:
diff --git a/polly/test/CodeGen/error_block_contains_invalid_memory_access.ll b/polly/test/CodeGen/error_block_contains_invalid_memory_access.ll
index 048847f3e322..008e16caf9c2 100644
--- a/polly/test/CodeGen/error_block_contains_invalid_memory_access.ll
+++ b/polly/test/CodeGen/error_block_contains_invalid_memory_access.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/exprModDiv.ll b/polly/test/CodeGen/exprModDiv.ll
index 936b018bc1ad..c9b419abe324 100644
--- a/polly/test/CodeGen/exprModDiv.ll
+++ b/polly/test/CodeGen/exprModDiv.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-import-jscop \
-; RUN: -polly-codegen -S < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-import-jscop \
-; RUN: -polly-codegen -polly-import-jscop-postfix=pow2 \
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' \
+; RUN: -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' \
+; RUN: -polly-import-jscop-postfix=pow2 \
; RUN: -S < %s | FileCheck %s -check-prefix=POW2
;
; void exprModDiv(float *A, float *B, float *C, long N, long p) {
diff --git a/polly/test/CodeGen/hoisted_load_escapes_through_phi.ll b/polly/test/CodeGen/hoisted_load_escapes_through_phi.ll
index d7588b3b8e00..1ca2413fd5e1 100644
--- a/polly/test/CodeGen/hoisted_load_escapes_through_phi.ll
+++ b/polly/test/CodeGen/hoisted_load_escapes_through_phi.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -S -polly-codegen \
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen \
; RUN: -polly-invariant-load-hoisting=false < %s | FileCheck %s
-; RUN: opt %loadPolly -S -polly-codegen \
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen \
; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
;
; Check that we generate valid code even if the load of cont_STACKPOINTER is
diff --git a/polly/test/CodeGen/hoisting_1.ll b/polly/test/CodeGen/hoisting_1.ll
index 86b56637bc2c..1f065bec8032 100644
--- a/polly/test/CodeGen/hoisting_1.ll
+++ b/polly/test/CodeGen/hoisting_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -tbaa -polly-codegen -polly-allow-differing-element-types -disable-output %s
+; RUN: opt %loadNPMPolly -aa-pipeline=tbaa -passes=polly-codegen -polly-allow-differing-element-types -disable-output %s
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/hoisting_2.ll b/polly/test/CodeGen/hoisting_2.ll
index 1f1be11c2d98..e76ee066af08 100644
--- a/polly/test/CodeGen/hoisting_2.ll
+++ b/polly/test/CodeGen/hoisting_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -tbaa -polly-codegen -polly-allow-differing-element-types -disable-output %s
+; RUN: opt %loadNPMPolly -aa-pipeline=tbaa -passes=polly-codegen -polly-allow-differing-element-types -disable-output %s
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/inner_scev_sdiv_1.ll b/polly/test/CodeGen/inner_scev_sdiv_1.ll
index 1a463fc178d1..d210105c46ba 100644
--- a/polly/test/CodeGen/inner_scev_sdiv_1.ll
+++ b/polly/test/CodeGen/inner_scev_sdiv_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s
;
; Excerpt from the test-suite's oggenc reduced using bugpoint.
;
diff --git a/polly/test/CodeGen/inner_scev_sdiv_2.ll b/polly/test/CodeGen/inner_scev_sdiv_2.ll
index 76138034603e..74b914d1d87a 100644
--- a/polly/test/CodeGen/inner_scev_sdiv_2.ll
+++ b/polly/test/CodeGen/inner_scev_sdiv_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; The SCEV expression in this test case refers to a sequence of sdiv
; instructions, which are part of different bbs in the SCoP. When code
diff --git a/polly/test/CodeGen/inner_scev_sdiv_3.ll b/polly/test/CodeGen/inner_scev_sdiv_3.ll
index 874ead14ded2..33440457bd46 100644
--- a/polly/test/CodeGen/inner_scev_sdiv_3.ll
+++ b/polly/test/CodeGen/inner_scev_sdiv_3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; This test case has a inner SCEV sdiv that will escape the SCoP. Just check we
; do not crash and generate valid code.
diff --git a/polly/test/CodeGen/inner_scev_sdiv_in_lb.ll b/polly/test/CodeGen/inner_scev_sdiv_in_lb.ll
index 6514e18687e4..31c14e85f253 100644
--- a/polly/test/CodeGen/inner_scev_sdiv_in_lb.ll
+++ b/polly/test/CodeGen/inner_scev_sdiv_in_lb.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s --check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s --check-prefix=CODEGEN
;
; CHECK: [N] -> { Stmt_bb11[i0, i1] : i0 < N and i1 >= 0 and 3i1 <= -3 + i0 };
; CODEGEN: polly
diff --git a/polly/test/CodeGen/inner_scev_sdiv_in_lb_invariant.ll b/polly/test/CodeGen/inner_scev_sdiv_in_lb_invariant.ll
index 032942923379..b42371b0891e 100644
--- a/polly/test/CodeGen/inner_scev_sdiv_in_lb_invariant.ll
+++ b/polly/test/CodeGen/inner_scev_sdiv_in_lb_invariant.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen \
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen \
; RUN: < %s | FileCheck %s
;
; Check that this will not crash our code generation.
diff --git a/polly/test/CodeGen/inner_scev_sdiv_in_rtc.ll b/polly/test/CodeGen/inner_scev_sdiv_in_rtc.ll
index f7292ca3073a..45af63402c98 100644
--- a/polly/test/CodeGen/inner_scev_sdiv_in_rtc.ll
+++ b/polly/test/CodeGen/inner_scev_sdiv_in_rtc.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen \
+; RUN: opt %loadNPMPolly -passes=polly-codegen \
; RUN: -S < %s | FileCheck %s
;
; This will just check that we generate valid code here.
diff --git a/polly/test/CodeGen/intrinsics_lifetime.ll b/polly/test/CodeGen/intrinsics_lifetime.ll
index 6141b3abdd8a..6dca218b6386 100644
--- a/polly/test/CodeGen/intrinsics_lifetime.ll
+++ b/polly/test/CodeGen/intrinsics_lifetime.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -passes=polly-codegen -S < %s | FileCheck %s
;
; Verify that we remove the lifetime markers from everywhere.
;
diff --git a/polly/test/CodeGen/intrinsics_misc.ll b/polly/test/CodeGen/intrinsics_misc.ll
index c0a52fe97329..84164893ebf7 100644
--- a/polly/test/CodeGen/intrinsics_misc.ll
+++ b/polly/test/CodeGen/intrinsics_misc.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -passes=polly-codegen -S < %s | FileCheck %s
;
; Verify that we remove the misc intrinsics from the optimized SCoP.
;
diff --git a/polly/test/CodeGen/inv-load-lnt-crash-wrong-order-2.ll b/polly/test/CodeGen/inv-load-lnt-crash-wrong-order-2.ll
index 6727247a7f04..e7cbf748bea7 100644
--- a/polly/test/CodeGen/inv-load-lnt-crash-wrong-order-2.ll
+++ b/polly/test/CodeGen/inv-load-lnt-crash-wrong-order-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S \
; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
;
; This crashed our codegen at some point, verify it runs through
diff --git a/polly/test/CodeGen/inv-load-lnt-crash-wrong-order-3.ll b/polly/test/CodeGen/inv-load-lnt-crash-wrong-order-3.ll
index a573049c8f67..24e9240c234d 100644
--- a/polly/test/CodeGen/inv-load-lnt-crash-wrong-order-3.ll
+++ b/polly/test/CodeGen/inv-load-lnt-crash-wrong-order-3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S \
; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
;
; This crashed our codegen at some point, verify it runs through
diff --git a/polly/test/CodeGen/inv-load-lnt-crash-wrong-order.ll b/polly/test/CodeGen/inv-load-lnt-crash-wrong-order.ll
index e05ca9951434..d1d861e316ee 100644
--- a/polly/test/CodeGen/inv-load-lnt-crash-wrong-order.ll
+++ b/polly/test/CodeGen/inv-load-lnt-crash-wrong-order.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S \
; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
;
; This crashed our codegen at some point, verify it runs through
diff --git a/polly/test/CodeGen/invariant-load-dimension.ll b/polly/test/CodeGen/invariant-load-dimension.ll
index 7793c3b3bee3..21e53055c56b 100644
--- a/polly/test/CodeGen/invariant-load-dimension.ll
+++ b/polly/test/CodeGen/invariant-load-dimension.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-invariant-load-hoisting -polly-print-scops -disable-output < %s | FileCheck %s -check-prefix=SCOPS
-; RUN: opt %loadPolly -S < %s -polly-codegen -polly-process-unprofitable -polly-invariant-load-hoisting | FileCheck %s -check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly -polly-process-unprofitable -polly-invariant-load-hoisting '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=SCOPS
+; RUN: opt %loadNPMPolly -S < %s -passes=polly-codegen -polly-process-unprofitable -polly-invariant-load-hoisting | FileCheck %s -check-prefix=CODEGEN
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n8:16:32-S64"
diff --git a/polly/test/CodeGen/invariant-load-preload-base-pointer-origin-first.ll b/polly/test/CodeGen/invariant-load-preload-base-pointer-origin-first.ll
index 474100995fd8..1fd9cb81771c 100644
--- a/polly/test/CodeGen/invariant-load-preload-base-pointer-origin-first.ll
+++ b/polly/test/CodeGen/invariant-load-preload-base-pointer-origin-first.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen -polly-invariant-load-hoisting=true < %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen -polly-invariant-load-hoisting=true < %s
;
; Check that we generate valid code as we did non preload the base pointer
; origin of %tmp4 at some point.
diff --git a/polly/test/CodeGen/invariant_cannot_handle_void.ll b/polly/test/CodeGen/invariant_cannot_handle_void.ll
index de5d13d6a69a..0859a4e4997e 100644
--- a/polly/test/CodeGen/invariant_cannot_handle_void.ll
+++ b/polly/test/CodeGen/invariant_cannot_handle_void.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-invariant-load-hoisting=true -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=SCOP
-; RUN: opt %loadPolly -S -polly-codegen -polly-invariant-load-hoisting=true %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-invariant-load-hoisting=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=SCOP
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen -polly-invariant-load-hoisting=true %s | FileCheck %s
;
; The offset of the %tmp1 load wrt. to %buff (62 bytes) is not divisible
; by the type size (i32 = 4 bytes), thus we will have to represent %buff
diff --git a/polly/test/CodeGen/invariant_load.ll b/polly/test/CodeGen/invariant_load.ll
index be3f7a32f35b..2d5e6042ea6a 100644
--- a/polly/test/CodeGen/invariant_load.ll
+++ b/polly/test/CodeGen/invariant_load.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
;
; CHECK-LABEL: polly.preload.begin:
; CHECK-NEXT: %polly.access.B = getelementptr i32, ptr %B, i64 0
diff --git a/polly/test/CodeGen/invariant_load_address_space.ll b/polly/test/CodeGen/invariant_load_address_space.ll
index 7c611ad3dd87..3d1958e5b8a4 100644
--- a/polly/test/CodeGen/invariant_load_address_space.ll
+++ b/polly/test/CodeGen/invariant_load_address_space.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
;
; CHECK-LABEL: polly.preload.begin:
; CHECK-NEXT: %polly.access.B = getelementptr i32, ptr addrspace(1) %B, i64 0
diff --git a/polly/test/CodeGen/invariant_load_alias_metadata.ll b/polly/test/CodeGen/invariant_load_alias_metadata.ll
index 5a82d82d43f8..252463384a5c 100644
--- a/polly/test/CodeGen/invariant_load_alias_metadata.ll
+++ b/polly/test/CodeGen/invariant_load_alias_metadata.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true \
; RUN: -S < %s | FileCheck %s
;
; This test case checks whether Polly generates alias metadata in case of
diff --git a/polly/test/CodeGen/invariant_load_base_pointer.ll b/polly/test/CodeGen/invariant_load_base_pointer.ll
index eb07f8317b79..d4ac433475f0 100644
--- a/polly/test/CodeGen/invariant_load_base_pointer.ll
+++ b/polly/test/CodeGen/invariant_load_base_pointer.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -S < %s | FileCheck %s
;
; CHECK-LABEL: polly.preload.begin:
; CHECK-NEXT: %polly.access.BPLoc = getelementptr ptr, ptr %BPLoc, i64 0
diff --git a/polly/test/CodeGen/invariant_load_base_pointer_conditional.ll b/polly/test/CodeGen/invariant_load_base_pointer_conditional.ll
index 538077bb09e8..06a9a93363ed 100644
--- a/polly/test/CodeGen/invariant_load_base_pointer_conditional.ll
+++ b/polly/test/CodeGen/invariant_load_base_pointer_conditional.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -S < %s | FileCheck %s
;
; CHECK-LABEL: polly.preload.begin:
; CHECK-NEXT: %0 = sext i32 %N to i64
diff --git a/polly/test/CodeGen/invariant_load_base_pointer_conditional_2.ll b/polly/test/CodeGen/invariant_load_base_pointer_conditional_2.ll
index 7c2fb3ef97ed..66ab9a31b103 100644
--- a/polly/test/CodeGen/invariant_load_base_pointer_conditional_2.ll
+++ b/polly/test/CodeGen/invariant_load_base_pointer_conditional_2.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-invariant-load-hoisting=true -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -S -polly-codegen -polly-invariant-load-hoisting=true < %s | FileCheck %s --check-prefix=IR
-; RUN: opt %loadPolly -S -polly-codegen -polly-invariant-load-hoisting=true --polly-overflow-tracking=always < %s | FileCheck %s --check-prefix=IRA
+; RUN: opt %loadNPMPolly -polly-invariant-load-hoisting=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen -polly-invariant-load-hoisting=true < %s | FileCheck %s --check-prefix=IR
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen -polly-invariant-load-hoisting=true --polly-overflow-tracking=always < %s | FileCheck %s --check-prefix=IRA
;
; As (p + q) can overflow we have to check that we load from
; I[p + q] only if it does not.
diff --git a/polly/test/CodeGen/invariant_load_canonicalize_array_baseptrs.ll b/polly/test/CodeGen/invariant_load_canonicalize_array_baseptrs.ll
index dc5a4c890381..fa904e9b96d3 100644
--- a/polly/test/CodeGen/invariant_load_canonicalize_array_baseptrs.ll
+++ b/polly/test/CodeGen/invariant_load_canonicalize_array_baseptrs.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s \
; RUN: -polly-invariant-load-hoisting \
; RUN: | FileCheck %s
diff --git a/polly/test/CodeGen/invariant_load_condition.ll b/polly/test/CodeGen/invariant_load_condition.ll
index edf0814d8983..36e588329d66 100644
--- a/polly/test/CodeGen/invariant_load_condition.ll
+++ b/polly/test/CodeGen/invariant_load_condition.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
;
; CHECK-LABEL: polly.preload.begin:
; CHECK-NEXT: %polly.access.C = getelementptr i32, ptr %C, i64 0
diff --git a/polly/test/CodeGen/invariant_load_different_sized_types.ll b/polly/test/CodeGen/invariant_load_different_sized_types.ll
index 5b91a1901061..2995bce4c660 100644
--- a/polly/test/CodeGen/invariant_load_different_sized_types.ll
+++ b/polly/test/CodeGen/invariant_load_different_sized_types.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S \
; RUN: -polly-allow-differing-element-types < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/invariant_load_escaping.ll b/polly/test/CodeGen/invariant_load_escaping.ll
index efccdf468a18..416148b72303 100644
--- a/polly/test/CodeGen/invariant_load_escaping.ll
+++ b/polly/test/CodeGen/invariant_load_escaping.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
;
; int f(int *A, int *B) {
; // Possible aliasing between A and B but if not then *B would be
diff --git a/polly/test/CodeGen/invariant_load_escaping_second_scop.ll b/polly/test/CodeGen/invariant_load_escaping_second_scop.ll
index c0ea888acdde..906bfc1805d3 100644
--- a/polly/test/CodeGen/invariant_load_escaping_second_scop.ll
+++ b/polly/test/CodeGen/invariant_load_escaping_second_scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -polly-process-unprofitable -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -polly-process-unprofitable -S < %s | FileCheck %s
;
; void fence(void);
;
diff --git a/polly/test/CodeGen/invariant_load_in_non_affine_subregion.ll b/polly/test/CodeGen/invariant_load_in_non_affine_subregion.ll
index 241252b5d549..472c6c67a45e 100644
--- a/polly/test/CodeGen/invariant_load_in_non_affine_subregion.ll
+++ b/polly/test/CodeGen/invariant_load_in_non_affine_subregion.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
;
; This crashed at some point as the invariant load is in a non-affine
; subregion. Just check it does not anymore.
diff --git a/polly/test/CodeGen/invariant_load_loop_ub.ll b/polly/test/CodeGen/invariant_load_loop_ub.ll
index ab9aa0dc69a7..1db27ad8e58b 100644
--- a/polly/test/CodeGen/invariant_load_loop_ub.ll
+++ b/polly/test/CodeGen/invariant_load_loop_ub.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -polly-process-unprofitable -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -polly-process-unprofitable -S < %s | FileCheck %s
;
; CHECK: polly.start
;
diff --git a/polly/test/CodeGen/invariant_load_not_executed_but_in_parameters.ll b/polly/test/CodeGen/invariant_load_not_executed_but_in_parameters.ll
index 08ff0871b610..01b01761d908 100644
--- a/polly/test/CodeGen/invariant_load_not_executed_but_in_parameters.ll
+++ b/polly/test/CodeGen/invariant_load_not_executed_but_in_parameters.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -disable-output < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -disable-output < %s
;
; Check that this does not crash as the invariant load is not executed (thus
; not preloaded) but still referenced by one of the parameters.
diff --git a/polly/test/CodeGen/invariant_load_outermost.ll b/polly/test/CodeGen/invariant_load_outermost.ll
index f42135c09014..7e0550fb3be9 100644
--- a/polly/test/CodeGen/invariant_load_outermost.ll
+++ b/polly/test/CodeGen/invariant_load_outermost.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
; CHECK: polly.start
diff --git a/polly/test/CodeGen/invariant_load_parameters_cyclic_dependence.ll b/polly/test/CodeGen/invariant_load_parameters_cyclic_dependence.ll
index d365c99eff66..abf957b556da 100644
--- a/polly/test/CodeGen/invariant_load_parameters_cyclic_dependence.ll
+++ b/polly/test/CodeGen/invariant_load_parameters_cyclic_dependence.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s --check-prefix=SCOP
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s --check-prefix=SCOP
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
;
; SCOP: Assumed Context:
; SCOP-NEXT: [p_0, tmp4] -> { : }
diff --git a/polly/test/CodeGen/invariant_load_ptr_ptr_noalias.ll b/polly/test/CodeGen/invariant_load_ptr_ptr_noalias.ll
index b4d4c55f0d9b..b565f1bd5096 100644
--- a/polly/test/CodeGen/invariant_load_ptr_ptr_noalias.ll
+++ b/polly/test/CodeGen/invariant_load_ptr_ptr_noalias.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-codegen -polly-invariant-load-hoisting=true -polly-ignore-aliasing -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable -passes=polly-codegen -polly-invariant-load-hoisting=true -polly-ignore-aliasing -S < %s | FileCheck %s
;
; CHECK-LABEL: polly.preload.begin:
; CHECK: %polly.access.A = getelementptr ptr, ptr %A, i64 42
diff --git a/polly/test/CodeGen/invariant_load_scalar_dep.ll b/polly/test/CodeGen/invariant_load_scalar_dep.ll
index 05a40a4c47cc..ba2999e27984 100644
--- a/polly/test/CodeGen/invariant_load_scalar_dep.ll
+++ b/polly/test/CodeGen/invariant_load_scalar_dep.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -S < %s | FileCheck %s
;
; CHECK-LABEL: polly.preload.begin:
; CHECK: %polly.access.B = getelementptr i32, ptr %B, i64 0
diff --git a/polly/test/CodeGen/invariant_load_scalar_escape_alloca_sharing.ll b/polly/test/CodeGen/invariant_load_scalar_escape_alloca_sharing.ll
index 44c035855b76..26c964c9c6a7 100644
--- a/polly/test/CodeGen/invariant_load_scalar_escape_alloca_sharing.ll
+++ b/polly/test/CodeGen/invariant_load_scalar_escape_alloca_sharing.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s
;
; Verify the preloaded %tmp0 is stored and communicated in the same alloca.
; In this case, we do not reload %ncol.load from the scalar stack slot, but
diff --git a/polly/test/CodeGen/invariant_loads_from_struct_with_different_types_1.ll b/polly/test/CodeGen/invariant_loads_from_struct_with_different_types_1.ll
index 0b6929a5fd3f..6bf11d5697bd 100644
--- a/polly/test/CodeGen/invariant_loads_from_struct_with_different_types_1.ll
+++ b/polly/test/CodeGen/invariant_loads_from_struct_with_different_types_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true < %s
;
; Check we do not crash even though we pre-load values with different types
; from the same base pointer.
diff --git a/polly/test/CodeGen/invariant_loads_from_struct_with_different_types_2.ll b/polly/test/CodeGen/invariant_loads_from_struct_with_different_types_2.ll
index 2eb913fed447..07ce94152245 100644
--- a/polly/test/CodeGen/invariant_loads_from_struct_with_different_types_2.ll
+++ b/polly/test/CodeGen/invariant_loads_from_struct_with_different_types_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true < %s
;
; Check we do not crash even though we pre-load values with different types
; from the same base pointer.
diff --git a/polly/test/CodeGen/invariant_loads_ignore_parameter_bounds.ll b/polly/test/CodeGen/invariant_loads_ignore_parameter_bounds.ll
index a0c1f891bdf6..19b30afd33ba 100644
--- a/polly/test/CodeGen/invariant_loads_ignore_parameter_bounds.ll
+++ b/polly/test/CodeGen/invariant_loads_ignore_parameter_bounds.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting \
; RUN: -polly-ignore-parameter-bounds -S < %s | FileCheck %s
; CHECK: polly.preload.begin:
diff --git a/polly/test/CodeGen/invariant_verify_function_failed.ll b/polly/test/CodeGen/invariant_verify_function_failed.ll
index 6020caeee85d..c9affac076e9 100644
--- a/polly/test/CodeGen/invariant_verify_function_failed.ll
+++ b/polly/test/CodeGen/invariant_verify_function_failed.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -polly-codegen -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,scop(polly-codegen)' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; This crashed at some point as the pointer returned by the call
; to @__errno_location is invariant and defined in the SCoP but not
diff --git a/polly/test/CodeGen/invariant_verify_function_failed_2.ll b/polly/test/CodeGen/invariant_verify_function_failed_2.ll
index 81a4bd1dc153..7ef5608d7d19 100644
--- a/polly/test/CodeGen/invariant_verify_function_failed_2.ll
+++ b/polly/test/CodeGen/invariant_verify_function_failed_2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -S -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s -check-prefix=SCOPS
-; RUN: opt %loadPolly -S -polly-codegen -polly-invariant-load-hoisting=true %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s -check-prefix=SCOPS
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen -polly-invariant-load-hoisting=true %s | FileCheck %s
;
; Check we generate valid code.
diff --git a/polly/test/CodeGen/issue56692.ll b/polly/test/CodeGen/issue56692.ll
index e935e43bfa44..34c4e398e2ac 100644
--- a/polly/test/CodeGen/issue56692.ll
+++ b/polly/test/CodeGen/issue56692.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-parallel -polly-parallel-force -polly-omp-backend=LLVM -polly-codegen-verify -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-parallel -polly-parallel-force -polly-omp-backend=LLVM -polly-codegen-verify -passes=polly-codegen -S < %s | FileCheck %s
; https://github.com/llvm/llvm-project/issues/56692
;
; CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call({{.*}}), !dbg ![[OPTLOC:[0-9]+]]
diff --git a/polly/test/CodeGen/large-numbers-in-boundary-context.ll b/polly/test/CodeGen/large-numbers-in-boundary-context.ll
index a0328dfec651..b228baf9bdf2 100644
--- a/polly/test/CodeGen/large-numbers-in-boundary-context.ll
+++ b/polly/test/CodeGen/large-numbers-in-boundary-context.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
; XFAIL: *
;
; The boundary context contains a constant that does not fit in 64 bits. Hence,
diff --git a/polly/test/CodeGen/load_subset_with_context.ll b/polly/test/CodeGen/load_subset_with_context.ll
index ef0e051d5635..ccd4198b9fe8 100644
--- a/polly/test/CodeGen/load_subset_with_context.ll
+++ b/polly/test/CodeGen/load_subset_with_context.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed -S < %s | FileCheck %s
;
; A load must provide a value for every statement instance.
; Statement instances not in the SCoP's context are irrelevant.
diff --git a/polly/test/CodeGen/loop-invariant-load-type-mismatch.ll b/polly/test/CodeGen/loop-invariant-load-type-mismatch.ll
index 90c61c591623..d9065858ff25 100644
--- a/polly/test/CodeGen/loop-invariant-load-type-mismatch.ll
+++ b/polly/test/CodeGen/loop-invariant-load-type-mismatch.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/polly/test/CodeGen/loop_with_condition.ll b/polly/test/CodeGen/loop_with_condition.ll
index 618a542c179a..49e312404cca 100644
--- a/polly/test/CodeGen/loop_with_condition.ll
+++ b/polly/test/CodeGen/loop_with_condition.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
;#include <string.h>
;#define N 1024
diff --git a/polly/test/CodeGen/loop_with_condition_2.ll b/polly/test/CodeGen/loop_with_condition_2.ll
index b1a116785069..8ae38eeeb498 100644
--- a/polly/test/CodeGen/loop_with_condition_2.ll
+++ b/polly/test/CodeGen/loop_with_condition_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
; Verify that we actually detect this loop as the innermost loop even though
; there is a conditional inside.
diff --git a/polly/test/CodeGen/loop_with_condition_ineq.ll b/polly/test/CodeGen/loop_with_condition_ineq.ll
index c35208c72dfe..64019a609021 100644
--- a/polly/test/CodeGen/loop_with_condition_ineq.ll
+++ b/polly/test/CodeGen/loop_with_condition_ineq.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
;#include <string.h>
;#define N 1024
diff --git a/polly/test/CodeGen/loop_with_condition_nested.ll b/polly/test/CodeGen/loop_with_condition_nested.ll
index 24a49b47d9e6..5dcb51dcb91c 100644
--- a/polly/test/CodeGen/loop_with_condition_nested.ll
+++ b/polly/test/CodeGen/loop_with_condition_nested.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -basic-aa -polly-codegen < %s | opt -passes='print<loops>' -disable-output 2>&1 | FileCheck %s -check-prefix=LOOPS
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -passes=polly-codegen < %s | opt -passes='print<loops>' -disable-output 2>&1 | FileCheck %s -check-prefix=LOOPS
;#include <string.h>
diff --git a/polly/test/CodeGen/loop_with_conditional_entry_edge_split_hard_case.ll b/polly/test/CodeGen/loop_with_conditional_entry_edge_split_hard_case.ll
index 4444cf1dc4dd..26fe4eb82ae4 100644
--- a/polly/test/CodeGen/loop_with_conditional_entry_edge_split_hard_case.ll
+++ b/polly/test/CodeGen/loop_with_conditional_entry_edge_split_hard_case.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; Test case to trigger the hard way of creating a unique entering
; edge for the SCoP. It is triggered because the entering edge
diff --git a/polly/test/CodeGen/memcpy_annotations.ll b/polly/test/CodeGen/memcpy_annotations.ll
index a0a09b75c82e..501aa8fbea4d 100644
--- a/polly/test/CodeGen/memcpy_annotations.ll
+++ b/polly/test/CodeGen/memcpy_annotations.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; Verify that @llvm.memcpy does not get a !alias.scope annotation.
; @llvm.memcpy takes two pointers, it is ambiguous to which the
diff --git a/polly/test/CodeGen/multidim-non-matching-typesize-2.ll b/polly/test/CodeGen/multidim-non-matching-typesize-2.ll
index 63afad6e2f41..f63eb18118e7 100644
--- a/polly/test/CodeGen/multidim-non-matching-typesize-2.ll
+++ b/polly/test/CodeGen/multidim-non-matching-typesize-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -disable-basic-aa -polly-codegen \
+; RUN: opt %loadNPMPolly -disable-basic-aa -passes=polly-codegen \
; RUN: -S < %s | FileCheck %s
; CHECK: polly
target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
diff --git a/polly/test/CodeGen/multidim-non-matching-typesize.ll b/polly/test/CodeGen/multidim-non-matching-typesize.ll
index d117cefe3376..63e43c83ada5 100644
--- a/polly/test/CodeGen/multidim-non-matching-typesize.ll
+++ b/polly/test/CodeGen/multidim-non-matching-typesize.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -disable-basic-aa -polly-codegen \
+; RUN: opt %loadNPMPolly -disable-basic-aa -passes=polly-codegen \
; RUN: -S < %s | FileCheck %s
target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
diff --git a/polly/test/CodeGen/multidim_2d_parametric_array_static_loop_bounds.ll b/polly/test/CodeGen/multidim_2d_parametric_array_static_loop_bounds.ll
index 464ddb3740f7..86b17573caad 100644
--- a/polly/test/CodeGen/multidim_2d_parametric_array_static_loop_bounds.ll
+++ b/polly/test/CodeGen/multidim_2d_parametric_array_static_loop_bounds.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; Derived from the following code:
diff --git a/polly/test/CodeGen/multidim_alias_check.ll b/polly/test/CodeGen/multidim_alias_check.ll
index 585577da0e6d..93e34e2fd0fc 100644
--- a/polly/test/CodeGen/multidim_alias_check.ll
+++ b/polly/test/CodeGen/multidim_alias_check.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; CHECK: %polly.access.sext.A = sext i32 %n to i64
diff --git a/polly/test/CodeGen/multiple-codegens.ll b/polly/test/CodeGen/multiple-codegens.ll
index f950fa4a3e1d..2fa974e66df5 100644
--- a/polly/test/CodeGen/multiple-codegens.ll
+++ b/polly/test/CodeGen/multiple-codegens.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-scops -polly-opt-isl -polly-codegen -polly-scops -polly-codegen -S < %s | FileCheck %s
; RUN: opt %loadNPMPolly "-passes=scop(polly-opt-isl,polly-codegen,polly-codegen)" -S < %s | FileCheck %s
; RUN: opt %loadNPMPolly "-passes=scop(polly-opt-isl,polly-codegen),scop(polly-codegen)" -S < %s | FileCheck %s
;
diff --git a/polly/test/CodeGen/multiple-scops-in-a-row.ll b/polly/test/CodeGen/multiple-scops-in-a-row.ll
index a24a2e71ad4e..b81ba04e3646 100644
--- a/polly/test/CodeGen/multiple-scops-in-a-row.ll
+++ b/polly/test/CodeGen/multiple-scops-in-a-row.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
; This test case has two scops in a row. When code generating the first scop,
; the second scop is invalidated. This test case verifies that we do not crash
diff --git a/polly/test/CodeGen/multiple-types-invariant-load-2.ll b/polly/test/CodeGen/multiple-types-invariant-load-2.ll
index 0fd1df75e2ec..f6aca37c932b 100644
--- a/polly/test/CodeGen/multiple-types-invariant-load-2.ll
+++ b/polly/test/CodeGen/multiple-types-invariant-load-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S \
; RUN: -polly-allow-differing-element-types < %s | FileCheck %s
; CHECK: polly
diff --git a/polly/test/CodeGen/multiple-types-invariant-load.ll b/polly/test/CodeGen/multiple-types-invariant-load.ll
index b1434679e3d1..930041eaddaa 100644
--- a/polly/test/CodeGen/multiple-types-invariant-load.ll
+++ b/polly/test/CodeGen/multiple-types-invariant-load.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-differing-element-types -polly-codegen -S \
+; RUN: opt %loadNPMPolly -polly-allow-differing-element-types -passes=polly-codegen -S \
; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
; CHECK: %polly.access.global.load = getelementptr i32, ptr %global.load, i64 0
diff --git a/polly/test/CodeGen/multiple_sai_fro_same_base_address.ll b/polly/test/CodeGen/multiple_sai_fro_same_base_address.ll
index 0163f248229e..1e06a7e186bb 100644
--- a/polly/test/CodeGen/multiple_sai_fro_same_base_address.ll
+++ b/polly/test/CodeGen/multiple_sai_fro_same_base_address.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-position=before-vectorizer -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=SCOP
-; RUN: opt %loadPolly -polly-position=before-vectorizer -polly-codegen -S < %s | FileCheck %s --check-prefix=IR
+; RUN: opt %loadNPMPolly -polly-position=before-vectorizer '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=SCOP
+; RUN: opt %loadNPMPolly -polly-position=before-vectorizer -passes=polly-codegen -S < %s | FileCheck %s --check-prefix=IR
; The IR has two ScopArrayInfo for the value %next.0. This used to produce two
; phi nodes in polly.merge_new_and_old, one illegaly using the result of the
diff --git a/polly/test/CodeGen/no-overflow-tracking.ll b/polly/test/CodeGen/no-overflow-tracking.ll
index f11e8927ddee..d5ad9a7aef23 100644
--- a/polly/test/CodeGen/no-overflow-tracking.ll
+++ b/polly/test/CodeGen/no-overflow-tracking.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-invariant-load-hoisting=true -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-invariant-load-hoisting=true -polly-overflow-tracking=never -polly-codegen -S < %s | FileCheck %s --check-prefix=IR
+; RUN: opt %loadNPMPolly -polly-invariant-load-hoisting=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-invariant-load-hoisting=true -polly-overflow-tracking=never -passes=polly-codegen -S < %s | FileCheck %s --check-prefix=IR
;
; As (p + q) can overflow we have to check that we load from
; I[p + q] only if it does not.
diff --git a/polly/test/CodeGen/no_guard_bb.ll b/polly/test/CodeGen/no_guard_bb.ll
index 47c87ff7c868..a022083f43a9 100644
--- a/polly/test/CodeGen/no_guard_bb.ll
+++ b/polly/test/CodeGen/no_guard_bb.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S -verify-dom-info < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S -verify-dom-info < %s | FileCheck %s
;
; CHECK-NOT: br i1 true, label %polly.{{.*}}, label %polly.{{.*}}
;
diff --git a/polly/test/CodeGen/non-affine-dominance-generated-entering.ll b/polly/test/CodeGen/non-affine-dominance-generated-entering.ll
index ebf36acc8d96..6015516a3bc4 100644
--- a/polly/test/CodeGen/non-affine-dominance-generated-entering.ll
+++ b/polly/test/CodeGen/non-affine-dominance-generated-entering.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; llvm.org/PR25439
; Scalar reloads in the generated entering block were not recognized as
diff --git a/polly/test/CodeGen/non-affine-exit-node-dominance.ll b/polly/test/CodeGen/non-affine-exit-node-dominance.ll
index af19d2420e3e..0d0f634ed7c1 100644
--- a/polly/test/CodeGen/non-affine-exit-node-dominance.ll
+++ b/polly/test/CodeGen/non-affine-exit-node-dominance.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; llvm.org/PR25439
; The dominance of the generated non-affine subregion block was based on the
diff --git a/polly/test/CodeGen/non-affine-phi-node-expansion-2.ll b/polly/test/CodeGen/non-affine-phi-node-expansion-2.ll
index 2aca316d4c88..b7394b248404 100644
--- a/polly/test/CodeGen/non-affine-phi-node-expansion-2.ll
+++ b/polly/test/CodeGen/non-affine-phi-node-expansion-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen \
+; RUN: opt %loadNPMPolly -passes=polly-codegen \
; RUN: -S < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/non-affine-phi-node-expansion-3.ll b/polly/test/CodeGen/non-affine-phi-node-expansion-3.ll
index 18a4b6e4ed4a..b9386333a79b 100644
--- a/polly/test/CodeGen/non-affine-phi-node-expansion-3.ll
+++ b/polly/test/CodeGen/non-affine-phi-node-expansion-3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen \
+; RUN: opt %loadNPMPolly -passes=polly-codegen \
; RUN: -S < %s | FileCheck %s
define void @foo(ptr %A, i1 %cond0, i1 %cond1) {
diff --git a/polly/test/CodeGen/non-affine-phi-node-expansion-4.ll b/polly/test/CodeGen/non-affine-phi-node-expansion-4.ll
index 8a07ee7c7424..6460c427270f 100644
--- a/polly/test/CodeGen/non-affine-phi-node-expansion-4.ll
+++ b/polly/test/CodeGen/non-affine-phi-node-expansion-4.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen \
+; RUN: opt %loadNPMPolly -passes=polly-codegen \
; RUN: -S < %s | FileCheck %s
define void @foo(ptr %A, i1 %cond0, i1 %cond1) {
diff --git a/polly/test/CodeGen/non-affine-phi-node-expansion.ll b/polly/test/CodeGen/non-affine-phi-node-expansion.ll
index 091fc3e323dc..8fd8cc14124b 100644
--- a/polly/test/CodeGen/non-affine-phi-node-expansion.ll
+++ b/polly/test/CodeGen/non-affine-phi-node-expansion.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen \
+; RUN: opt %loadNPMPolly -passes=polly-codegen \
; RUN: -S < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/non-affine-region-exit-phi-incoming-synthesize-2.ll b/polly/test/CodeGen/non-affine-region-exit-phi-incoming-synthesize-2.ll
index 6a1d1f12ba9c..007a4c586aa3 100644
--- a/polly/test/CodeGen/non-affine-region-exit-phi-incoming-synthesize-2.ll
+++ b/polly/test/CodeGen/non-affine-region-exit-phi-incoming-synthesize-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; This caused the code generation to generate invalid code as the same operand
; of the PHI node in the non-affine region was synthesized at the wrong place.
diff --git a/polly/test/CodeGen/non-affine-region-exit-phi-incoming-synthesize.ll b/polly/test/CodeGen/non-affine-region-exit-phi-incoming-synthesize.ll
index 036bf34cb7f7..20edbf2bd6c0 100644
--- a/polly/test/CodeGen/non-affine-region-exit-phi-incoming-synthesize.ll
+++ b/polly/test/CodeGen/non-affine-region-exit-phi-incoming-synthesize.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; This caused the code generation to generate invalid code as the same BBMap was
; used for the whole non-affine region. When %add is synthesized for the
diff --git a/polly/test/CodeGen/non-affine-region-implicit-store.ll b/polly/test/CodeGen/non-affine-region-implicit-store.ll
index e89197e24852..0ff39d3fe882 100644
--- a/polly/test/CodeGen/non-affine-region-implicit-store.ll
+++ b/polly/test/CodeGen/non-affine-region-implicit-store.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; llvm.org/PR25438
; After loop versioning, a dominance check of a non-affine subregion's exit node
diff --git a/polly/test/CodeGen/non-affine-region-phi-references-in-scop-value.ll b/polly/test/CodeGen/non-affine-region-phi-references-in-scop-value.ll
index f6e4eb57319d..7df3d8976ea8 100644
--- a/polly/test/CodeGen/non-affine-region-phi-references-in-scop-value.ll
+++ b/polly/test/CodeGen/non-affine-region-phi-references-in-scop-value.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-allow-nonaffine-loops \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-allow-nonaffine-loops \
; RUN: -S < %s | FileCheck %s
; This test verifies that values defined in another scop statement and used by
diff --git a/polly/test/CodeGen/non-affine-subregion-dominance-reuse.ll b/polly/test/CodeGen/non-affine-subregion-dominance-reuse.ll
index 6c749a404336..179062dd62d0 100644
--- a/polly/test/CodeGen/non-affine-subregion-dominance-reuse.ll
+++ b/polly/test/CodeGen/non-affine-subregion-dominance-reuse.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S -verify-dom-info \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S -verify-dom-info \
; RUN: < %s | FileCheck %s
;
; Check that we do not reuse the B[i-1] GEP created in block S again in
diff --git a/polly/test/CodeGen/non-affine-switch.ll b/polly/test/CodeGen/non-affine-switch.ll
index 9c08b98700ae..427e7e2461f1 100644
--- a/polly/test/CodeGen/non-affine-switch.ll
+++ b/polly/test/CodeGen/non-affine-switch.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly \
-; RUN: -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly \
+; RUN: -S -passes=polly-codegen < %s | FileCheck %s
;
; void f(int *A, int N) {
; for (int i = 0; i < N; i++)
diff --git a/polly/test/CodeGen/non-affine-synthesized-in-branch.ll b/polly/test/CodeGen/non-affine-synthesized-in-branch.ll
index cc0e60abcd09..292c0f2b5394 100644
--- a/polly/test/CodeGen/non-affine-synthesized-in-branch.ll
+++ b/polly/test/CodeGen/non-affine-synthesized-in-branch.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable -passes=polly-codegen -S < %s | FileCheck %s
;
; llvm.org/PR25412
; %synthgep caused %gep to be synthesized in subregion_if which was reused for
diff --git a/polly/test/CodeGen/non-affine-update.ll b/polly/test/CodeGen/non-affine-update.ll
index d2b7fae75b23..03f091a40501 100644
--- a/polly/test/CodeGen/non-affine-update.ll
+++ b/polly/test/CodeGen/non-affine-update.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-import-jscop \
-; RUN: -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' \
+; RUN: -S < %s | FileCheck %s
;
; void non-affine-update(double A[], double C[], double B[]) {
; for (int i = 0; i < 10; i++) {
diff --git a/polly/test/CodeGen/non-hoisted-load-needed-as-base-ptr.ll b/polly/test/CodeGen/non-hoisted-load-needed-as-base-ptr.ll
index 5f6642b0630d..153cdb7ed9f6 100644
--- a/polly/test/CodeGen/non-hoisted-load-needed-as-base-ptr.ll
+++ b/polly/test/CodeGen/non-hoisted-load-needed-as-base-ptr.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -tbaa -polly-codegen -disable-output %s
+; RUN: opt %loadNPMPolly -aa-pipeline=tbaa -passes=polly-codegen -disable-output %s
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/non_affine_float_compare.ll b/polly/test/CodeGen/non_affine_float_compare.ll
index be310b5bf5ca..304a9016665c 100644
--- a/polly/test/CodeGen/non_affine_float_compare.ll
+++ b/polly/test/CodeGen/non_affine_float_compare.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen \
+; RUN: opt %loadNPMPolly -passes=polly-codegen \
; RUN: -polly-allow-nonaffine-branches -S -verify-dom-info \
; RUN: < %s | FileCheck %s
;
diff --git a/polly/test/CodeGen/only_non_affine_error_region.ll b/polly/test/CodeGen/only_non_affine_error_region.ll
index b2ad1c1fe3fd..445cef0d6f69 100644
--- a/polly/test/CodeGen/only_non_affine_error_region.ll
+++ b/polly/test/CodeGen/only_non_affine_error_region.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; CHECK-NOT: polly.start
;
diff --git a/polly/test/CodeGen/openmp_limit_threads.ll b/polly/test/CodeGen/openmp_limit_threads.ll
index e8eb819f13d9..4c33be340725 100644
--- a/polly/test/CodeGen/openmp_limit_threads.ll
+++ b/polly/test/CodeGen/openmp_limit_threads.ll
@@ -1,10 +1,10 @@
-; RUN: opt %loadPolly -polly-codegen -polly-parallel -S < %s | FileCheck %s --check-prefix=AUTO
-; RUN: opt %loadPolly -polly-codegen -polly-parallel -polly-num-threads=1 -S < %s | FileCheck %s --check-prefix=ONE
-; RUN: opt %loadPolly -polly-codegen -polly-parallel -polly-num-threads=4 -S < %s | FileCheck %s --check-prefix=FOUR
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-parallel -S < %s | FileCheck %s --check-prefix=AUTO
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-parallel -polly-num-threads=1 -S < %s | FileCheck %s --check-prefix=ONE
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-parallel -polly-num-threads=4 -S < %s | FileCheck %s --check-prefix=FOUR
-; RUN: opt %loadPolly -polly-codegen -polly-parallel -polly-omp-backend=LLVM -S < %s | FileCheck %s --check-prefix=LIBOMP-AUTO
-; RUN: opt %loadPolly -polly-codegen -polly-parallel -polly-omp-backend=LLVM -polly-num-threads=1 -S < %s | FileCheck %s --check-prefix=LIBOMP-ONE
-; RUN: opt %loadPolly -polly-codegen -polly-parallel -polly-omp-backend=LLVM -polly-num-threads=4 -S < %s | FileCheck %s --check-prefix=LIBOMP-FOUR
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-parallel -polly-omp-backend=LLVM -S < %s | FileCheck %s --check-prefix=LIBOMP-AUTO
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-parallel -polly-omp-backend=LLVM -polly-num-threads=1 -S < %s | FileCheck %s --check-prefix=LIBOMP-ONE
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-parallel -polly-omp-backend=LLVM -polly-num-threads=4 -S < %s | FileCheck %s --check-prefix=LIBOMP-FOUR
; Ensure that the provided thread numbers are forwarded to the OpenMP calls.
;
diff --git a/polly/test/CodeGen/out-of-scop-phi-node-use.ll b/polly/test/CodeGen/out-of-scop-phi-node-use.ll
index 54e909ecf378..a4f942309ed2 100644
--- a/polly/test/CodeGen/out-of-scop-phi-node-use.ll
+++ b/polly/test/CodeGen/out-of-scop-phi-node-use.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/polly/test/CodeGen/param_div_div_div_2.ll b/polly/test/CodeGen/param_div_div_div_2.ll
index 764ca241f166..8eba6444abb1 100644
--- a/polly/test/CodeGen/param_div_div_div_2.ll
+++ b/polly/test/CodeGen/param_div_div_div_2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s --check-prefix=IR
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s --check-prefix=IR
;
; Check that we guard the divisions because we moved them and thereby increased
; their domain.
diff --git a/polly/test/CodeGen/partial_write_array.ll b/polly/test/CodeGen/partial_write_array.ll
index 6dc5550d82af..8bb1bc2c3d8c 100644
--- a/polly/test/CodeGen/partial_write_array.ll
+++ b/polly/test/CodeGen/partial_write_array.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed -S < %s | FileCheck %s
;
; Partial write of an array access.
;
diff --git a/polly/test/CodeGen/partial_write_emptyset.ll b/polly/test/CodeGen/partial_write_emptyset.ll
index a25195f11ed7..67828808e2fa 100644
--- a/polly/test/CodeGen/partial_write_emptyset.ll
+++ b/polly/test/CodeGen/partial_write_emptyset.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed -S < %s | FileCheck %s
;
; Partial write, where "partial" is the empty set.
; The store is never executed in this case and we do generate it in the
diff --git a/polly/test/CodeGen/partial_write_full_write_that_appears_partial.ll b/polly/test/CodeGen/partial_write_full_write_that_appears_partial.ll
index 18a809b30557..b26bd81b5663 100644
--- a/polly/test/CodeGen/partial_write_full_write_that_appears_partial.ll
+++ b/polly/test/CodeGen/partial_write_full_write_that_appears_partial.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
; CHECK: polly.stmt.if.then81: ; preds = %polly.stmt.if.end75
; CHECK-NEXT: store float undef, ptr %fX64, align 4, !alias.scope !0, !noalias !3
diff --git a/polly/test/CodeGen/partial_write_impossible_restriction.ll b/polly/test/CodeGen/partial_write_impossible_restriction.ll
index 178227fef8e5..edee3b913ce7 100644
--- a/polly/test/CodeGen/partial_write_impossible_restriction.ll
+++ b/polly/test/CodeGen/partial_write_impossible_restriction.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed -S < %s | FileCheck %s
;
; The isl scheduler isolates %cond.false into two instances.
; A partial write access in one of the instances was never executed,
diff --git a/polly/test/CodeGen/partial_write_in_region.ll b/polly/test/CodeGen/partial_write_in_region.ll
index d8f57b35d585..7c138c82091e 100644
--- a/polly/test/CodeGen/partial_write_in_region.ll
+++ b/polly/test/CodeGen/partial_write_in_region.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-import-jscop \
-; RUN: -polly-import-jscop-postfix=transformed -polly-codegen \
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' \
+; RUN: -polly-import-jscop-postfix=transformed \
; RUN: -verify-dom-info \
; RUN: -S < %s | FileCheck %s
;
diff --git a/polly/test/CodeGen/partial_write_in_region_with_loop.ll b/polly/test/CodeGen/partial_write_in_region_with_loop.ll
index 48a9dbef21d1..ba15a7871f43 100644
--- a/polly/test/CodeGen/partial_write_in_region_with_loop.ll
+++ b/polly/test/CodeGen/partial_write_in_region_with_loop.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-import-jscop \
-; RUN: -polly-import-jscop-postfix=transformed -polly-codegen \
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' \
+; RUN: -polly-import-jscop-postfix=transformed \
; RUN: -verify-dom-info -polly-allow-nonaffine-loops \
; RUN: -S < %s | FileCheck %s
diff --git a/polly/test/CodeGen/partial_write_mapped_scalar.ll b/polly/test/CodeGen/partial_write_mapped_scalar.ll
index 9137ef2123c8..b8c413885cdb 100644
--- a/polly/test/CodeGen/partial_write_mapped_scalar.ll
+++ b/polly/test/CodeGen/partial_write_mapped_scalar.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed -S < %s | FileCheck %s
;
; Partial write of a (mapped) scalar.
;
diff --git a/polly/test/CodeGen/partial_write_mapped_scalar_subregion.ll b/polly/test/CodeGen/partial_write_mapped_scalar_subregion.ll
index e054b65eadf3..8c1953a05ad3 100644
--- a/polly/test/CodeGen/partial_write_mapped_scalar_subregion.ll
+++ b/polly/test/CodeGen/partial_write_mapped_scalar_subregion.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed -S < %s | FileCheck %s
;
; Partial write of a (mapped) scalar in a non-affine subregion.
;
diff --git a/polly/test/CodeGen/perf_monitoring.ll b/polly/test/CodeGen/perf_monitoring.ll
index 2abbf24f5e78..4b91e5055c0b 100644
--- a/polly/test/CodeGen/perf_monitoring.ll
+++ b/polly/test/CodeGen/perf_monitoring.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-codegen-perf-monitoring \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-codegen-perf-monitoring \
; RUN: -S < %s | FileCheck %s
; void f(long A[], long N) {
diff --git a/polly/test/CodeGen/perf_monitoring_cycles_per_scop.ll b/polly/test/CodeGen/perf_monitoring_cycles_per_scop.ll
index 11d63fc47658..d5c33d64f341 100644
--- a/polly/test/CodeGen/perf_monitoring_cycles_per_scop.ll
+++ b/polly/test/CodeGen/perf_monitoring_cycles_per_scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-codegen-perf-monitoring \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-codegen-perf-monitoring \
; RUN: -S < %s | FileCheck %s
; void f(long A[], long N) {
diff --git a/polly/test/CodeGen/perf_monitoring_trip_counts_per_scop.ll b/polly/test/CodeGen/perf_monitoring_trip_counts_per_scop.ll
index 9b7f324df8e4..ab99c4d2de06 100644
--- a/polly/test/CodeGen/perf_monitoring_trip_counts_per_scop.ll
+++ b/polly/test/CodeGen/perf_monitoring_trip_counts_per_scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-codegen-perf-monitoring \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-codegen-perf-monitoring \
; RUN: -S < %s | FileCheck %s
; void f(long A[], long N) {
diff --git a/polly/test/CodeGen/phi-defined-before-scop.ll b/polly/test/CodeGen/phi-defined-before-scop.ll
index a3b1ba264f04..f08322281d3c 100644
--- a/polly/test/CodeGen/phi-defined-before-scop.ll
+++ b/polly/test/CodeGen/phi-defined-before-scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
; CHECK-LABEL: polly.merge_new_and_old:
; CHECK-NEXT: %tmp7.ph.merge = phi ptr [ %tmp7.ph.final_reload, %polly.exiting ], [ %tmp7.ph, %bb6.region_exiting ]
diff --git a/polly/test/CodeGen/phi_after_error_block_outside_of_scop.ll b/polly/test/CodeGen/phi_after_error_block_outside_of_scop.ll
index c34ebfc3ca02..e096aa2f4f8c 100644
--- a/polly/test/CodeGen/phi_after_error_block_outside_of_scop.ll
+++ b/polly/test/CodeGen/phi_after_error_block_outside_of_scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
; Make sure code generation does not break in case an 'error block' is detected
; outside of the scope. In this situation, we should not affect code generation.
diff --git a/polly/test/CodeGen/phi_condition_modeling_1.ll b/polly/test/CodeGen/phi_condition_modeling_1.ll
index b14d32921cf7..9d73d8a79255 100644
--- a/polly/test/CodeGen/phi_condition_modeling_1.ll
+++ b/polly/test/CodeGen/phi_condition_modeling_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; void f(int *A, int c, int N) {
; int tmp;
diff --git a/polly/test/CodeGen/phi_condition_modeling_2.ll b/polly/test/CodeGen/phi_condition_modeling_2.ll
index dab2977bf065..2d1364842d73 100644
--- a/polly/test/CodeGen/phi_condition_modeling_2.ll
+++ b/polly/test/CodeGen/phi_condition_modeling_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; void f(int *A, int c, int N) {
; int tmp;
diff --git a/polly/test/CodeGen/phi_conditional_simple_1.ll b/polly/test/CodeGen/phi_conditional_simple_1.ll
index f1b93b540f70..25bcf2a118ef 100644
--- a/polly/test/CodeGen/phi_conditional_simple_1.ll
+++ b/polly/test/CodeGen/phi_conditional_simple_1.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; void jd(int *A, int c) {
; for (int i = 0; i < 1024; i++) {
diff --git a/polly/test/CodeGen/phi_in_exit_early_lnt_failure_1.ll b/polly/test/CodeGen/phi_in_exit_early_lnt_failure_1.ll
index 13688480e315..43d29b9ec864 100644
--- a/polly/test/CodeGen/phi_in_exit_early_lnt_failure_1.ll
+++ b/polly/test/CodeGen/phi_in_exit_early_lnt_failure_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; This caused an lnt crash at some point, just verify it will run through.
;
diff --git a/polly/test/CodeGen/phi_in_exit_early_lnt_failure_2.ll b/polly/test/CodeGen/phi_in_exit_early_lnt_failure_2.ll
index 01dd450590d9..9f28024fcfa0 100644
--- a/polly/test/CodeGen/phi_in_exit_early_lnt_failure_2.ll
+++ b/polly/test/CodeGen/phi_in_exit_early_lnt_failure_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; This caused an lnt crash at some point, just verify it will run through and
; produce the PHI node in the exit we are looking for.
diff --git a/polly/test/CodeGen/phi_in_exit_early_lnt_failure_3.ll b/polly/test/CodeGen/phi_in_exit_early_lnt_failure_3.ll
index 66b95b0e0317..73e99ac0f32c 100644
--- a/polly/test/CodeGen/phi_in_exit_early_lnt_failure_3.ll
+++ b/polly/test/CodeGen/phi_in_exit_early_lnt_failure_3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; This caused an lnt crash at some point, just verify it will run through and
; produce the PHI node in the exit we are looking for.
diff --git a/polly/test/CodeGen/phi_in_exit_early_lnt_failure_5.ll b/polly/test/CodeGen/phi_in_exit_early_lnt_failure_5.ll
index 9a046367e768..6c9bd56a9872 100644
--- a/polly/test/CodeGen/phi_in_exit_early_lnt_failure_5.ll
+++ b/polly/test/CodeGen/phi_in_exit_early_lnt_failure_5.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; This caused an lnt crash at some point, just verify it will run through and
; produce the PHI node in the exit we are looking for.
diff --git a/polly/test/CodeGen/phi_loop_carried_float.ll b/polly/test/CodeGen/phi_loop_carried_float.ll
index ca1870fb3a09..d671db08b06c 100644
--- a/polly/test/CodeGen/phi_loop_carried_float.ll
+++ b/polly/test/CodeGen/phi_loop_carried_float.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; float f(float *A, int N) {
; float tmp = 0;
diff --git a/polly/test/CodeGen/phi_loop_carried_float_escape.ll b/polly/test/CodeGen/phi_loop_carried_float_escape.ll
index 3b2ed01863b1..3e244c5e1332 100644
--- a/polly/test/CodeGen/phi_loop_carried_float_escape.ll
+++ b/polly/test/CodeGen/phi_loop_carried_float_escape.ll
@@ -1,8 +1,8 @@
-; RUN: opt %loadPolly -S \
-; RUN: -polly-analyze-read-only-scalars=false -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S \
+; RUN: -polly-analyze-read-only-scalars=false -passes=polly-codegen < %s | FileCheck %s
-; RUN: opt %loadPolly -S \
-; RUN: -polly-analyze-read-only-scalars=true -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S \
+; RUN: -polly-analyze-read-only-scalars=true -passes=polly-codegen < %s | FileCheck %s
;
; float f(float *A, int N) {
; float tmp = 0;
diff --git a/polly/test/CodeGen/phi_scalar_simple_1.ll b/polly/test/CodeGen/phi_scalar_simple_1.ll
index d62975b6a7b3..80a1c41b83ac 100644
--- a/polly/test/CodeGen/phi_scalar_simple_1.ll
+++ b/polly/test/CodeGen/phi_scalar_simple_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; int jd(int *restrict A, int x, int N) {
; for (int i = 1; i < N; i++)
diff --git a/polly/test/CodeGen/phi_scalar_simple_2.ll b/polly/test/CodeGen/phi_scalar_simple_2.ll
index e58945d39960..614c8acfb9f8 100644
--- a/polly/test/CodeGen/phi_scalar_simple_2.ll
+++ b/polly/test/CodeGen/phi_scalar_simple_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; int jd(int *restrict A, int x, int N, int c) {
; for (int i = 0; i < N; i++)
diff --git a/polly/test/CodeGen/phi_with_multi_exiting_edges_2.ll b/polly/test/CodeGen/phi_with_multi_exiting_edges_2.ll
index 17e4b7d6b4de..7e21666f1db0 100644
--- a/polly/test/CodeGen/phi_with_multi_exiting_edges_2.ll
+++ b/polly/test/CodeGen/phi_with_multi_exiting_edges_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; CHECK: polly.merge_new_and_old:
; CHECK: %result.ph.merge = phi float [ %result.ph.final_reload, %polly.exiting ], [ %result.ph, %next.region_exiting ]
diff --git a/polly/test/CodeGen/phi_with_one_exit_edge.ll b/polly/test/CodeGen/phi_with_one_exit_edge.ll
index 81fd73b51c79..36a8684dbc37 100644
--- a/polly/test/CodeGen/phi_with_one_exit_edge.ll
+++ b/polly/test/CodeGen/phi_with_one_exit_edge.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
;
; CHECK: polly.merge_new_and_old:
diff --git a/polly/test/CodeGen/pointer-type-expressions-2.ll b/polly/test/CodeGen/pointer-type-expressions-2.ll
index b261cfe53321..918e4c6c9c0b 100644
--- a/polly/test/CodeGen/pointer-type-expressions-2.ll
+++ b/polly/test/CodeGen/pointer-type-expressions-2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s -check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s -check-prefix=CODEGEN
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @foo(ptr %start, ptr %end) {
diff --git a/polly/test/CodeGen/pointer-type-expressions.ll b/polly/test/CodeGen/pointer-type-expressions.ll
index 6bb3fa242362..e7feebc163d4 100644
--- a/polly/test/CodeGen/pointer-type-expressions.ll
+++ b/polly/test/CodeGen/pointer-type-expressions.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s -check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s -check-prefix=CODEGEN
; void f(int a[], int N, float *P) {
; int i;
diff --git a/polly/test/CodeGen/pointer-type-pointer-type-comparison.ll b/polly/test/CodeGen/pointer-type-pointer-type-comparison.ll
index eaef64017aa7..9ee050a1e507 100644
--- a/polly/test/CodeGen/pointer-type-pointer-type-comparison.ll
+++ b/polly/test/CodeGen/pointer-type-pointer-type-comparison.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s -check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s -check-prefix=CODEGEN
;
; void f(int a[], int N, float *P, float *Q) {
diff --git a/polly/test/CodeGen/pointer_rem.ll b/polly/test/CodeGen/pointer_rem.ll
index 5c92ee52da2c..b8202318a3ec 100644
--- a/polly/test/CodeGen/pointer_rem.ll
+++ b/polly/test/CodeGen/pointer_rem.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-scops -polly-print-ast -disable-output -S < %s | FileCheck %s --check-prefix=AST
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-scops -polly-codegen -S < %s | FileCheck %s --check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly -polly-process-unprofitable '-passes=print<polly-function-scops>,scop(print<polly-ast>)' -disable-output -S < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly -polly-process-unprofitable '-passes=print<polly-function-scops>,scop(polly-codegen)' -S < %s | FileCheck %s --check-prefix=CODEGEN
target datalayout = "e-m:e-i64:64-i128:128-n8:16:32:64-S128"
target triple = "aarch64--linux-gnu"
diff --git a/polly/test/CodeGen/pr25241.ll b/polly/test/CodeGen/pr25241.ll
index 9fa67e083a6c..4a4add8ba2a6 100644
--- a/polly/test/CodeGen/pr25241.ll
+++ b/polly/test/CodeGen/pr25241.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
; PR25241 (https://llvm.org/bugs/show_bug.cgi?id=25241)
; Ensure that synthesized values of a PHI node argument are generated in the
diff --git a/polly/test/CodeGen/ptrtoint_as_parameter.ll b/polly/test/CodeGen/ptrtoint_as_parameter.ll
index 4f6c8079729d..a551d810c080 100644
--- a/polly/test/CodeGen/ptrtoint_as_parameter.ll
+++ b/polly/test/CodeGen/ptrtoint_as_parameter.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; CHECK: if.then260:
; CHECK-NEXT: %p.4 = getelementptr inbounds i8, ptr null, i64 1
diff --git a/polly/test/CodeGen/read-only-scalars.ll b/polly/test/CodeGen/read-only-scalars.ll
index a5e1d2719d7d..365cbbce495f 100644
--- a/polly/test/CodeGen/read-only-scalars.ll
+++ b/polly/test/CodeGen/read-only-scalars.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-analyze-read-only-scalars=false -polly-codegen \
+; RUN: opt %loadNPMPolly -polly-analyze-read-only-scalars=false -passes=polly-codegen \
; RUN: \
; RUN: -S < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-analyze-read-only-scalars=true -polly-codegen \
+; RUN: opt %loadNPMPolly -polly-analyze-read-only-scalars=true -passes=polly-codegen \
; RUN: \
; RUN: -S < %s | FileCheck %s -check-prefix=SCALAR
diff --git a/polly/test/CodeGen/reduction.ll b/polly/test/CodeGen/reduction.ll
index 6e5a230ad231..8c5f70770a1c 100644
--- a/polly/test/CodeGen/reduction.ll
+++ b/polly/test/CodeGen/reduction.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s 2>&1 | not FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s 2>&1 | not FileCheck %s
;#include <string.h>
;#include <stdio.h>
diff --git a/polly/test/CodeGen/reduction_2.ll b/polly/test/CodeGen/reduction_2.ll
index 7a50cea31400..4aa306775e78 100644
--- a/polly/test/CodeGen/reduction_2.ll
+++ b/polly/test/CodeGen/reduction_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-invariant-load-hoisting=true -polly-print-ast -disable-output < %s | FileCheck %s --allow-empty
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-invariant-load-hoisting=true '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s --allow-empty
;#include <string.h>
;#include <stdio.h>
diff --git a/polly/test/CodeGen/reduction_simple_binary.ll b/polly/test/CodeGen/reduction_simple_binary.ll
index c7c5501bb7ed..0fe1085dbbac 100644
--- a/polly/test/CodeGen/reduction_simple_binary.ll
+++ b/polly/test/CodeGen/reduction_simple_binary.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; CHECK: pragma simd reduction
;
diff --git a/polly/test/CodeGen/region-with-instructions.ll b/polly/test/CodeGen/region-with-instructions.ll
index 28cabefbf68b..e5f7d0f9ef5d 100644
--- a/polly/test/CodeGen/region-with-instructions.ll
+++ b/polly/test/CodeGen/region-with-instructions.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
; CHECK-LABEL: polly.stmt.bb48:
; CHECK-NEXT: %[[offset:.*]] = shl i64 %polly.indvar, 3
diff --git a/polly/test/CodeGen/region_exiting-domtree.ll b/polly/test/CodeGen/region_exiting-domtree.ll
index 05983da0a3e3..06e0d9df3d95 100644
--- a/polly/test/CodeGen/region_exiting-domtree.ll
+++ b/polly/test/CodeGen/region_exiting-domtree.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -verify-dom-info -disable-output < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -verify-dom-info -disable-output < %s
; Verify that the DominatorTree is preserved correctly for the inserted
; %polly.stmt.exit.exit block, which serves as new exit block for the generated
diff --git a/polly/test/CodeGen/region_multiexit_partialwrite.ll b/polly/test/CodeGen/region_multiexit_partialwrite.ll
index b98d7f58732a..39e04dbf93ac 100644
--- a/polly/test/CodeGen/region_multiexit_partialwrite.ll
+++ b/polly/test/CodeGen/region_multiexit_partialwrite.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-codegen' -polly-import-jscop-postfix=transformed -S < %s | FileCheck %s
;
; This text case has a partial write of PHI in a region-statement. It
; requires that the new PHINode from the region's exiting block is
diff --git a/polly/test/CodeGen/run-time-condition-with-scev-parameters.ll b/polly/test/CodeGen/run-time-condition-with-scev-parameters.ll
index 0f62a8c743df..4afaab5bbad0 100644
--- a/polly/test/CodeGen/run-time-condition-with-scev-parameters.ll
+++ b/polly/test/CodeGen/run-time-condition-with-scev-parameters.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
; TODO: FIXME: Simplify the context.
; AST: if (n >= 1 && 0 == n <= -1)
diff --git a/polly/test/CodeGen/run-time-condition.ll b/polly/test/CodeGen/run-time-condition.ll
index 0faefad8aef4..914b76f5e0be 100644
--- a/polly/test/CodeGen/run-time-condition.ll
+++ b/polly/test/CodeGen/run-time-condition.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -passes=polly-codegen -S < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/polly/test/CodeGen/scalar-references-used-in-scop-compute.ll b/polly/test/CodeGen/scalar-references-used-in-scop-compute.ll
index 3f88942c2300..0b49da0d0e09 100644
--- a/polly/test/CodeGen/scalar-references-used-in-scop-compute.ll
+++ b/polly/test/CodeGen/scalar-references-used-in-scop-compute.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
; Test the code generation in the presence of a scalar out-of-scop value being
; used from within the SCoP.
diff --git a/polly/test/CodeGen/scalar-store-from-same-bb.ll b/polly/test/CodeGen/scalar-store-from-same-bb.ll
index ac8fab4b7a0d..3f232da37e4c 100644
--- a/polly/test/CodeGen/scalar-store-from-same-bb.ll
+++ b/polly/test/CodeGen/scalar-store-from-same-bb.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly \
-; RUN: -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly \
+; RUN: -passes=polly-codegen -S < %s | FileCheck %s
; This test ensures that the expression N + 1 that is stored in the phi-node
; alloca, is directly computed and not incorrectly transfered through memory.
diff --git a/polly/test/CodeGen/scalar_codegen_crash.ll b/polly/test/CodeGen/scalar_codegen_crash.ll
index c41a00f59e81..375f097283b0 100644
--- a/polly/test/CodeGen/scalar_codegen_crash.ll
+++ b/polly/test/CodeGen/scalar_codegen_crash.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly \
-; RUN: -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly \
+; RUN: -passes=polly-codegen -S < %s | FileCheck %s
; This test cases used to crash the scalar code generation. Check that we
; can generate code for it.
diff --git a/polly/test/CodeGen/scev-backedgetaken.ll b/polly/test/CodeGen/scev-backedgetaken.ll
index 15e12ee8b451..f5e68ec930d1 100644
--- a/polly/test/CodeGen/scev-backedgetaken.ll
+++ b/polly/test/CodeGen/scev-backedgetaken.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; llvm.org/PR48422
; Use of ScalarEvolution in Codegen not possible because DominatorTree is not updated.
diff --git a/polly/test/CodeGen/scev-division-invariant-load.ll b/polly/test/CodeGen/scev-division-invariant-load.ll
index 3156bdc9f5ce..70f090eae07b 100644
--- a/polly/test/CodeGen/scev-division-invariant-load.ll
+++ b/polly/test/CodeGen/scev-division-invariant-load.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s
;
; Check that we generate valid code as we did not use the preloaded
; value of %tmp1 for the access function of the preloaded %tmp4.
diff --git a/polly/test/CodeGen/scev.ll b/polly/test/CodeGen/scev.ll
index 07d726d97caf..e2b5afda1bff 100644
--- a/polly/test/CodeGen/scev.ll
+++ b/polly/test/CodeGen/scev.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-detect < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define fastcc void @f () inlinehint align 2 {
diff --git a/polly/test/CodeGen/scev_expansion_in_nonaffine.ll b/polly/test/CodeGen/scev_expansion_in_nonaffine.ll
index f61f21d4adb8..6c6c2572da10 100644
--- a/polly/test/CodeGen/scev_expansion_in_nonaffine.ll
+++ b/polly/test/CodeGen/scev_expansion_in_nonaffine.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S \
; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
; bugpoint-reduced testcase of MiBench/consumer-lame/quantize-pvt.c from the
diff --git a/polly/test/CodeGen/scev_looking_through_bitcasts.ll b/polly/test/CodeGen/scev_looking_through_bitcasts.ll
index c87d932479b7..142e83f820fe 100644
--- a/polly/test/CodeGen/scev_looking_through_bitcasts.ll
+++ b/polly/test/CodeGen/scev_looking_through_bitcasts.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; Scalar write of bitcasted value. Instead of writing %b of type
; %structty, the SCEV expression looks through the bitcast such that
diff --git a/polly/test/CodeGen/scop_expander_insert_point.ll b/polly/test/CodeGen/scop_expander_insert_point.ll
index 8492873b22ed..92f2772155ee 100644
--- a/polly/test/CodeGen/scop_expander_insert_point.ll
+++ b/polly/test/CodeGen/scop_expander_insert_point.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S \
; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
;
; CHECK: entry:
diff --git a/polly/test/CodeGen/scop_expander_segfault.ll b/polly/test/CodeGen/scop_expander_segfault.ll
index 293c1e527959..d94a1fdfb2c1 100644
--- a/polly/test/CodeGen/scop_expander_segfault.ll
+++ b/polly/test/CodeGen/scop_expander_segfault.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S %s | FileCheck %s
;
; This test was extracted from gcc in SPEC2006 and it crashed our code
; generation, or to be more precise, the ScopExpander due to a endless
diff --git a/polly/test/CodeGen/scop_never_executed_runtime_check_location.ll b/polly/test/CodeGen/scop_never_executed_runtime_check_location.ll
index 91a58159b5f9..9f968e5657c9 100644
--- a/polly/test/CodeGen/scop_never_executed_runtime_check_location.ll
+++ b/polly/test/CodeGen/scop_never_executed_runtime_check_location.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
; Verify that we generate the runtime check code after the conditional branch
; in the SCoP region entering block (here %entry).
diff --git a/polly/test/CodeGen/select-base-pointer.ll b/polly/test/CodeGen/select-base-pointer.ll
index 29bc40074e1f..85be37755c47 100644
--- a/polly/test/CodeGen/select-base-pointer.ll
+++ b/polly/test/CodeGen/select-base-pointer.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -tbaa -polly-codegen -disable-output %s
+; RUN: opt %loadNPMPolly -aa-pipeline=tbaa -passes=polly-codegen -disable-output %s
;
; Check that we do not crash here.
;
diff --git a/polly/test/CodeGen/sequential_loops.ll b/polly/test/CodeGen/sequential_loops.ll
index 97d280de3cd2..33a3ee9fbbd4 100644
--- a/polly/test/CodeGen/sequential_loops.ll
+++ b/polly/test/CodeGen/sequential_loops.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
;#include <string.h>
;#define N 1024
diff --git a/polly/test/CodeGen/simple_loop_non_single_exit.ll b/polly/test/CodeGen/simple_loop_non_single_exit.ll
index dc1b09b765a1..a7e36bc4c733 100644
--- a/polly/test/CodeGen/simple_loop_non_single_exit.ll
+++ b/polly/test/CodeGen/simple_loop_non_single_exit.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s -check-prefix=CHECK-CODE
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s -check-prefix=CHECK-CODE
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/CodeGen/simple_loop_non_single_exit_2.ll b/polly/test/CodeGen/simple_loop_non_single_exit_2.ll
index 178601cac9b8..22e9da09ef85 100644
--- a/polly/test/CodeGen/simple_loop_non_single_exit_2.ll
+++ b/polly/test/CodeGen/simple_loop_non_single_exit_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s -check-prefix=CHECK-CODE
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s -check-prefix=CHECK-CODE
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/CodeGen/simple_non_single_entry.ll b/polly/test/CodeGen/simple_non_single_entry.ll
index 3b4bf59bdc65..c33a77ae0793 100644
--- a/polly/test/CodeGen/simple_non_single_entry.ll
+++ b/polly/test/CodeGen/simple_non_single_entry.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s -check-prefix=CHECK-CODE
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s -check-prefix=CHECK-CODE
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/CodeGen/simple_nonaffine_loop.ll b/polly/test/CodeGen/simple_nonaffine_loop.ll
index d4e9c6082e6c..bc62047a80a3 100644
--- a/polly/test/CodeGen/simple_nonaffine_loop.ll
+++ b/polly/test/CodeGen/simple_nonaffine_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-allow-nonaffine -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-allow-nonaffine -disable-output < %s | FileCheck %s
;#include <stdio.h>
;#include <stdlib.h>
diff --git a/polly/test/CodeGen/single_do_loop_int_max_iterations.ll b/polly/test/CodeGen/single_do_loop_int_max_iterations.ll
index 9648fbe1cf12..a65e3a25f035 100644
--- a/polly/test/CodeGen/single_do_loop_int_max_iterations.ll
+++ b/polly/test/CodeGen/single_do_loop_int_max_iterations.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
;#define N 20
;#include "limits.h"
diff --git a/polly/test/CodeGen/single_do_loop_int_param_iterations.ll b/polly/test/CodeGen/single_do_loop_int_param_iterations.ll
index f28d828a5da0..acccb48f18a3 100644
--- a/polly/test/CodeGen/single_do_loop_int_param_iterations.ll
+++ b/polly/test/CodeGen/single_do_loop_int_param_iterations.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
; XFAIL: *
;define N 20
diff --git a/polly/test/CodeGen/single_do_loop_ll_max_iterations.ll b/polly/test/CodeGen/single_do_loop_ll_max_iterations.ll
index 68aaab96083a..7a67f6ba96ce 100644
--- a/polly/test/CodeGen/single_do_loop_ll_max_iterations.ll
+++ b/polly/test/CodeGen/single_do_loop_ll_max_iterations.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s
;#define N 20
;#include "limits.h"
diff --git a/polly/test/CodeGen/single_do_loop_one_iteration.ll b/polly/test/CodeGen/single_do_loop_one_iteration.ll
index 9d97cb854734..2d939167b71e 100644
--- a/polly/test/CodeGen/single_do_loop_one_iteration.ll
+++ b/polly/test/CodeGen/single_do_loop_one_iteration.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
; XFAIL: *
;#define N 20
diff --git a/polly/test/CodeGen/single_do_loop_scev_replace.ll b/polly/test/CodeGen/single_do_loop_scev_replace.ll
index 7963d9d29fe8..83c9e9d0324c 100644
--- a/polly/test/CodeGen/single_do_loop_scev_replace.ll
+++ b/polly/test/CodeGen/single_do_loop_scev_replace.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
;#define N 20
;#include "limits.h"
diff --git a/polly/test/CodeGen/single_loop.ll b/polly/test/CodeGen/single_loop.ll
index 68cc498b43e0..2db34663e93c 100644
--- a/polly/test/CodeGen/single_loop.ll
+++ b/polly/test/CodeGen/single_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
;#include <string.h>
;#define N 1024
diff --git a/polly/test/CodeGen/single_loop_int_max_iterations.ll b/polly/test/CodeGen/single_loop_int_max_iterations.ll
index bfb5e4ab2698..f83e8823c63d 100644
--- a/polly/test/CodeGen/single_loop_int_max_iterations.ll
+++ b/polly/test/CodeGen/single_loop_int_max_iterations.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
;#define N 20
;#include "limits.h"
diff --git a/polly/test/CodeGen/single_loop_ll_max_iterations.ll b/polly/test/CodeGen/single_loop_ll_max_iterations.ll
index bdfd7fce4204..1427189d74a7 100644
--- a/polly/test/CodeGen/single_loop_ll_max_iterations.ll
+++ b/polly/test/CodeGen/single_loop_ll_max_iterations.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
;#include "limits.h"
;#define N 20
diff --git a/polly/test/CodeGen/single_loop_one_iteration.ll b/polly/test/CodeGen/single_loop_one_iteration.ll
index 7d4dd590fab9..1a70d4a879d8 100644
--- a/polly/test/CodeGen/single_loop_one_iteration.ll
+++ b/polly/test/CodeGen/single_loop_one_iteration.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
;#define N 20
;
diff --git a/polly/test/CodeGen/single_loop_param.ll b/polly/test/CodeGen/single_loop_param.ll
index 5d72da354fdc..44ce1236e9f8 100644
--- a/polly/test/CodeGen/single_loop_param.ll
+++ b/polly/test/CodeGen/single_loop_param.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@A = common global [1024 x i32] zeroinitializer, align 16 ; <ptr> [#uses=3]
diff --git a/polly/test/CodeGen/single_loop_param_less_equal.ll b/polly/test/CodeGen/single_loop_param_less_equal.ll
index e63ee299a37c..fda9bfab11b8 100644
--- a/polly/test/CodeGen/single_loop_param_less_equal.ll
+++ b/polly/test/CodeGen/single_loop_param_less_equal.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s -check-prefix=CODEGEN
-; RUN: opt %loadPolly -polly-codegen < %s | opt -passes='print<loops>' -disable-output 2>&1 | FileCheck %s -check-prefix=LOOPS
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s -check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly -passes=polly-codegen < %s | opt -passes='print<loops>' -disable-output 2>&1 | FileCheck %s -check-prefix=LOOPS
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@A = common global [1024 x i32] zeroinitializer
diff --git a/polly/test/CodeGen/single_loop_param_less_than.ll b/polly/test/CodeGen/single_loop_param_less_than.ll
index 95130f926450..b888c860eacd 100644
--- a/polly/test/CodeGen/single_loop_param_less_than.ll
+++ b/polly/test/CodeGen/single_loop_param_less_than.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s -check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s -check-prefix=CODEGEN
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@A = common global [1024 x i32] zeroinitializer
diff --git a/polly/test/CodeGen/single_loop_zero_iterations.ll b/polly/test/CodeGen/single_loop_zero_iterations.ll
index 4f189687d330..b1ce491b5c8a 100644
--- a/polly/test/CodeGen/single_loop_zero_iterations.ll
+++ b/polly/test/CodeGen/single_loop_zero_iterations.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=SCALAR --allow-empty
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=SCALAR --allow-empty
;#define N 20
;
diff --git a/polly/test/CodeGen/split_edge_of_exit.ll b/polly/test/CodeGen/split_edge_of_exit.ll
index 56ce215a62b2..f4b17e687ada 100644
--- a/polly/test/CodeGen/split_edge_of_exit.ll
+++ b/polly/test/CodeGen/split_edge_of_exit.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -verify-region-info -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -verify-region-info -disable-output < %s
;
; This is a scop directly precedented by a region, i.e. the scop's entry is the
; region's exit block. This test is to ensure that the RegionInfo is correctly
diff --git a/polly/test/CodeGen/split_edges.ll b/polly/test/CodeGen/split_edges.ll
index e01d901e298c..b921202285bb 100644
--- a/polly/test/CodeGen/split_edges.ll
+++ b/polly/test/CodeGen/split_edges.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -verify-region-info -verify-dom-info -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -verify-region-info -verify-dom-info -S < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@A = common global [1536 x float] zeroinitializer
diff --git a/polly/test/CodeGen/split_edges_2.ll b/polly/test/CodeGen/split_edges_2.ll
index 4135d6feeb3e..8f4d48f5dcb0 100644
--- a/polly/test/CodeGen/split_edges_2.ll
+++ b/polly/test/CodeGen/split_edges_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -verify-region-info -verify-dom-info -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -verify-region-info -verify-dom-info -S < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/CodeGen/srem-in-other-bb.ll b/polly/test/CodeGen/srem-in-other-bb.ll
index 8bde1a3bbc1d..a13a1b6ab98f 100644
--- a/polly/test/CodeGen/srem-in-other-bb.ll
+++ b/polly/test/CodeGen/srem-in-other-bb.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S \
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S \
; RUN: < %s | FileCheck %s
;
; void pos(float *A, long n) {
diff --git a/polly/test/CodeGen/stack-overflow-in-load-hoisting.ll b/polly/test/CodeGen/stack-overflow-in-load-hoisting.ll
index 02dfe96e3e91..cb9d9a2ec492 100644
--- a/polly/test/CodeGen/stack-overflow-in-load-hoisting.ll
+++ b/polly/test/CodeGen/stack-overflow-in-load-hoisting.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -verify-dom-info -polly-codegen -S < %s \
+; RUN: opt %loadNPMPolly -verify-dom-info -passes=polly-codegen -S < %s \
; RUN: -polly-invariant-load-hoisting=true | FileCheck %s
;
; This caused an infinite recursion during invariant load hoisting at some
diff --git a/polly/test/CodeGen/stmt_split_no_dependence.ll b/polly/test/CodeGen/stmt_split_no_dependence.ll
index a395aa14b4c8..381cd30a2ae6 100644
--- a/polly/test/CodeGen/stmt_split_no_dependence.ll
+++ b/polly/test/CodeGen/stmt_split_no_dependence.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; CHECK: store i32 %9, ptr %scevgep, align 4, !alias.scope !1, !noalias !4
; CHECK: store i32 %11, ptr %scevgep4, align 4, !alias.scope !4, !noalias !1
diff --git a/polly/test/CodeGen/switch-in-non-affine-region.ll b/polly/test/CodeGen/switch-in-non-affine-region.ll
index 930755ef5648..1a9e7081bebd 100644
--- a/polly/test/CodeGen/switch-in-non-affine-region.ll
+++ b/polly/test/CodeGen/switch-in-non-affine-region.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly \
-; RUN: -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly \
+; RUN: -S -passes=polly-codegen < %s | FileCheck %s
;
; void f(int *A, int N) {
; for (int i = 0; i < N; i++)
diff --git a/polly/test/CodeGen/synthesizable_phi_write_after_loop.ll b/polly/test/CodeGen/synthesizable_phi_write_after_loop.ll
index 6a8d3b94d1cc..b2a062363eef 100644
--- a/polly/test/CodeGen/synthesizable_phi_write_after_loop.ll
+++ b/polly/test/CodeGen/synthesizable_phi_write_after_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; Check for the correct written value of a scalar phi write whose value is
; defined within the loop, but its effective value is its last definition when
diff --git a/polly/test/CodeGen/test-invalid-operands-for-select-2.ll b/polly/test/CodeGen/test-invalid-operands-for-select-2.ll
index 5fa4773398fd..5668063c27c8 100644
--- a/polly/test/CodeGen/test-invalid-operands-for-select-2.ll
+++ b/polly/test/CodeGen/test-invalid-operands-for-select-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen -verify-loop-info < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen -verify-loop-info < %s | FileCheck %s
;
; Check that we do not crash as described here: http://llvm.org/bugs/show_bug.cgi?id=21167
;
diff --git a/polly/test/CodeGen/test-invalid-operands-for-select.ll b/polly/test/CodeGen/test-invalid-operands-for-select.ll
index 40695af3e847..9f5013cf1bb1 100644
--- a/polly/test/CodeGen/test-invalid-operands-for-select.ll
+++ b/polly/test/CodeGen/test-invalid-operands-for-select.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; Check that we do not crash as described here: http://llvm.org/PR21167
;
diff --git a/polly/test/CodeGen/test.ll b/polly/test/CodeGen/test.ll
index ac99688ed9e8..aad998ba2728 100644
--- a/polly/test/CodeGen/test.ll
+++ b/polly/test/CodeGen/test.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
; XFAIL: *
;int bar1();
diff --git a/polly/test/CodeGen/two-loops-right-after-each-other-2.ll b/polly/test/CodeGen/two-loops-right-after-each-other-2.ll
index a7cae0a921ca..1c68389eaeba 100644
--- a/polly/test/CodeGen/two-loops-right-after-each-other-2.ll
+++ b/polly/test/CodeGen/two-loops-right-after-each-other-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
; CHECK: polly.merge_new_and_old:
; CHECK-NEXT: merge = phi
diff --git a/polly/test/CodeGen/two-scops-in-row-invalidate-scevs.ll b/polly/test/CodeGen/two-scops-in-row-invalidate-scevs.ll
index 4470f970fc1e..4396c38310dc 100644
--- a/polly/test/CodeGen/two-scops-in-row-invalidate-scevs.ll
+++ b/polly/test/CodeGen/two-scops-in-row-invalidate-scevs.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; CHECK-LABEL: for.cond:
; CHECK: %num.0 = phi i32 [ %add, %for.body15 ], [ 0, %for.cond.pre_entry_bb ]
diff --git a/polly/test/CodeGen/two-scops-in-row.ll b/polly/test/CodeGen/two-scops-in-row.ll
index 3e922cba1916..dd3f310ef150 100644
--- a/polly/test/CodeGen/two-scops-in-row.ll
+++ b/polly/test/CodeGen/two-scops-in-row.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-ignore-aliasing -disable-output < %s | FileCheck %s -check-prefix=SCALAR
-; RUN: opt %loadPolly -polly-codegen -polly-ignore-aliasing -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-ignore-aliasing -disable-output < %s | FileCheck %s -check-prefix=SCALAR
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-ignore-aliasing -disable-output < %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; SCALAR: if (
diff --git a/polly/test/CodeGen/udiv_expansion_position.ll b/polly/test/CodeGen/udiv_expansion_position.ll
index bb37fed4a41e..354e3cd18010 100644
--- a/polly/test/CodeGen/udiv_expansion_position.ll
+++ b/polly/test/CodeGen/udiv_expansion_position.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s | FileCheck %s
;
; Verify we do not crash when we synthezise code for the udiv in the SCoP.
;
diff --git a/polly/test/CodeGen/uninitialized_scalar_memory.ll b/polly/test/CodeGen/uninitialized_scalar_memory.ll
index 935ccc3d6289..e08af07e604e 100644
--- a/polly/test/CodeGen/uninitialized_scalar_memory.ll
+++ b/polly/test/CodeGen/uninitialized_scalar_memory.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s | FileCheck %s
;
; Verify we initialize the scalar locations reserved for the incoming phi
; values.
diff --git a/polly/test/CodeGen/unpredictable-loop-unsynthesizable.ll b/polly/test/CodeGen/unpredictable-loop-unsynthesizable.ll
index 9164bb4532e6..46706804a81b 100644
--- a/polly/test/CodeGen/unpredictable-loop-unsynthesizable.ll
+++ b/polly/test/CodeGen/unpredictable-loop-unsynthesizable.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops \
-; RUN: -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-codegen \
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' \
+; RUN: -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -passes=polly-codegen \
; RUN: -polly-invariant-load-hoisting=true -disable-output < %s
; The loop for.body is a scop with invariant load hoisting, but does not
diff --git a/polly/test/CodeGen/variant_load_empty_domain.ll b/polly/test/CodeGen/variant_load_empty_domain.ll
index f5ad0b195818..6f2d3dc582db 100644
--- a/polly/test/CodeGen/variant_load_empty_domain.ll
+++ b/polly/test/CodeGen/variant_load_empty_domain.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -disable-output < %s
;
;
; void f(int *A) {
diff --git a/polly/test/CodeGen/whole-scop-non-affine-subregion.ll b/polly/test/CodeGen/whole-scop-non-affine-subregion.ll
index 931e644f6b8f..b342b1cb5aa2 100644
--- a/polly/test/CodeGen/whole-scop-non-affine-subregion.ll
+++ b/polly/test/CodeGen/whole-scop-non-affine-subregion.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly \
-; RUN: -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly \
+; RUN: -passes=polly-codegen -S < %s | FileCheck %s
; CHECK: polly.start
; int /* pure */ g()
diff --git a/polly/test/DeLICM/confused_order.ll b/polly/test/DeLICM/confused_order.ll
index 2015ebcf58f1..0c19eb6aa605 100644
--- a/polly/test/DeLICM/confused_order.ll
+++ b/polly/test/DeLICM/confused_order.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-delicm -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-delicm -disable-output -pass-remarks-missed=polly-delicm < %s 2>&1 | FileCheck %s -check-prefix=REMARKS
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-delicm>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-delicm' -polly-import-jscop-postfix=transformed -disable-output -pass-remarks-missed=polly-delicm < %s 2>&1 | FileCheck %s -check-prefix=REMARKS
;
; ForwardOptree changes the SCoP and may already map some accesses.
; DeLICM must be prepared to encounter implicit reads
diff --git a/polly/test/DeLICM/contradicting_assumed_context_and_domain.ll b/polly/test/DeLICM/contradicting_assumed_context_and_domain.ll
index 4e039b22b415..66d9ae889e65 100644
--- a/polly/test/DeLICM/contradicting_assumed_context_and_domain.ll
+++ b/polly/test/DeLICM/contradicting_assumed_context_and_domain.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
;
; The domain of bb14 contradicts the SCoP's assumptions. This leads to
; 'anything goes' inside the statement since it is never executed,
diff --git a/polly/test/DeLICM/load-in-cond-inf-loop.ll b/polly/test/DeLICM/load-in-cond-inf-loop.ll
index f0aecfd87a15..f6e23110aa6f 100644
--- a/polly/test/DeLICM/load-in-cond-inf-loop.ll
+++ b/polly/test/DeLICM/load-in-cond-inf-loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
; When %b is 0, %for.body13 is an infite loop. In this case the loaded
; value %1 is not used anywhere.
diff --git a/polly/test/DeLICM/map_memset_zero.ll b/polly/test/DeLICM/map_memset_zero.ll
index 1a08eee63fe9..9a8e5989fdad 100644
--- a/polly/test/DeLICM/map_memset_zero.ll
+++ b/polly/test/DeLICM/map_memset_zero.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output < %s | FileCheck -match-full-lines %s
-; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-delicm>)" -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck -match-full-lines %s
;
; Check that PHI mapping works even in presence of a memset whose'
; zero value is used.
diff --git a/polly/test/DeLICM/nomap_alreadymapped.ll b/polly/test/DeLICM/nomap_alreadymapped.ll
index 7adf4ba88385..da5f4ec24a47 100644
--- a/polly/test/DeLICM/nomap_alreadymapped.ll
+++ b/polly/test/DeLICM/nomap_alreadymapped.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/nomap_escaping.ll b/polly/test/DeLICM/nomap_escaping.ll
index 034c0a96ccf2..60955368fe59 100644
--- a/polly/test/DeLICM/nomap_escaping.ll
+++ b/polly/test/DeLICM/nomap_escaping.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/nomap_occupied.ll b/polly/test/DeLICM/nomap_occupied.ll
index db33532b1e65..9ba8ce264123 100644
--- a/polly/test/DeLICM/nomap_occupied.ll
+++ b/polly/test/DeLICM/nomap_occupied.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/nomap_readonly.ll b/polly/test/DeLICM/nomap_readonly.ll
index 1f3b5746fe9b..7a185d336bad 100644
--- a/polly/test/DeLICM/nomap_readonly.ll
+++ b/polly/test/DeLICM/nomap_readonly.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
;
; void func(double *A) {
; fsomeval = 21.0 + 21.0;
diff --git a/polly/test/DeLICM/nomap_spuriouswrite.ll b/polly/test/DeLICM/nomap_spuriouswrite.ll
index ef470f715bbe..0ed7f6ee8e23 100644
--- a/polly/test/DeLICM/nomap_spuriouswrite.ll
+++ b/polly/test/DeLICM/nomap_spuriouswrite.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/nomap_storagesize.ll b/polly/test/DeLICM/nomap_storagesize.ll
index fab8d54c2bdf..bf851ac342d2 100644
--- a/polly/test/DeLICM/nomap_storagesize.ll
+++ b/polly/test/DeLICM/nomap_storagesize.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
;
; void func(float *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/nomap_writewrite.ll b/polly/test/DeLICM/nomap_writewrite.ll
index 06192d9ae19e..9fcd52aad743 100644
--- a/polly/test/DeLICM/nomap_writewrite.ll
+++ b/polly/test/DeLICM/nomap_writewrite.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/outofquota-reverseDomain.ll b/polly/test/DeLICM/outofquota-reverseDomain.ll
index d40ee03cf3bc..1f7527c84120 100644
--- a/polly/test/DeLICM/outofquota-reverseDomain.ll
+++ b/polly/test/DeLICM/outofquota-reverseDomain.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-delicm-max-ops=1000000 -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-delicm-max-ops=1000000 '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
;
; This causes an assertion to fail on out-of-quota after 1000000 operations.
; (The error was specific to -polly-delicm-max-ops=1000000 and changes
diff --git a/polly/test/DeLICM/pass_existence.ll b/polly/test/DeLICM/pass_existence.ll
index 7ed2da9c1da1..64302d998326 100644
--- a/polly/test/DeLICM/pass_existence.ll
+++ b/polly/test/DeLICM/pass_existence.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-delicm -disable-output < %s
-; RUN: opt %loadPolly -polly-print-delicm -disable-output < %s | FileCheck %s
-; RUN: opt %loadNPMPolly "-passes=scop(print<polly-delicm>)" -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-delicm -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=scop(print<polly-delicm>)' -disable-output < %s | FileCheck %s
;
; Simple test for the existence of the DeLICM pass.
;
diff --git a/polly/test/DeLICM/pr41656.ll b/polly/test/DeLICM/pr41656.ll
index 965ad9f62ac3..d7cfde35a6e8 100644
--- a/polly/test/DeLICM/pr41656.ll
+++ b/polly/test/DeLICM/pr41656.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>,scop(print<polly-delicm>)' -disable-output < %s 2>&1 | FileCheck %s
;
; llvm.org/PR41656
;
diff --git a/polly/test/DeLICM/pr48783.ll b/polly/test/DeLICM/pr48783.ll
index 3cbd54b93baf..e3c3eb6a19cc 100644
--- a/polly/test/DeLICM/pr48783.ll
+++ b/polly/test/DeLICM/pr48783.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>,scop(print<polly-delicm>)' -disable-output < %s 2>&1 | FileCheck %s
;
; llvm.org/PR48783
;
diff --git a/polly/test/DeLICM/reduction.ll b/polly/test/DeLICM/reduction.ll
index 78c1a4ce5288..29b7a3617300 100644
--- a/polly/test/DeLICM/reduction.ll
+++ b/polly/test/DeLICM/reduction.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-delicm-partial-writes=true -polly-print-delicm -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-delicm-partial-writes=true '-passes=print<polly-delicm>' -disable-output < %s | FileCheck -match-full-lines %s
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/reduction_looprotate_gvnpre_cond1.ll b/polly/test/DeLICM/reduction_looprotate_gvnpre_cond1.ll
index b5bc0d589c65..d9c5268e631d 100644
--- a/polly/test/DeLICM/reduction_looprotate_gvnpre_cond1.ll
+++ b/polly/test/DeLICM/reduction_looprotate_gvnpre_cond1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Load (but not store) of A[j] hoisted, reduction only over some iterations.
;
diff --git a/polly/test/DeLICM/reduction_looprotate_gvnpre_cond2.ll b/polly/test/DeLICM/reduction_looprotate_gvnpre_cond2.ll
index e995be1143a6..6a4223f5af65 100644
--- a/polly/test/DeLICM/reduction_looprotate_gvnpre_cond2.ll
+++ b/polly/test/DeLICM/reduction_looprotate_gvnpre_cond2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Load (but not store) of A[j] hoisted, reduction not written in all iterations.
; FIXME: %join is not mapped because the MemoryKind::Value mapping does not
diff --git a/polly/test/DeLICM/reduction_looprotate_gvnpre_nopreheader.ll b/polly/test/DeLICM/reduction_looprotate_gvnpre_nopreheader.ll
index ca3a1211ca49..bf4b8018d552 100644
--- a/polly/test/DeLICM/reduction_looprotate_gvnpre_nopreheader.ll
+++ b/polly/test/DeLICM/reduction_looprotate_gvnpre_nopreheader.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Hosted reduction load (but not the store) without preheader.
;
diff --git a/polly/test/DeLICM/reduction_looprotate_licm_nopreheader.ll b/polly/test/DeLICM/reduction_looprotate_licm_nopreheader.ll
index 41538239fbd8..027df44e8619 100644
--- a/polly/test/DeLICM/reduction_looprotate_licm_nopreheader.ll
+++ b/polly/test/DeLICM/reduction_looprotate_licm_nopreheader.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
;
; Register-promoted reduction but without preheader.
;
diff --git a/polly/test/DeLICM/reduction_looprotate_loopguard_gvnpre.ll b/polly/test/DeLICM/reduction_looprotate_loopguard_gvnpre.ll
index 35c723e864d2..4ea3fa53a339 100644
--- a/polly/test/DeLICM/reduction_looprotate_loopguard_gvnpre.ll
+++ b/polly/test/DeLICM/reduction_looprotate_loopguard_gvnpre.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Reduction over parametric number of elements and a loopguard if the
; reduction loop is not executed at all. Load hoisted before loop.
diff --git a/polly/test/DeLICM/reduction_looprotate_loopguard_licm1.ll b/polly/test/DeLICM/reduction_looprotate_loopguard_licm1.ll
index 2b5f4d8151a8..2e7abe444ad6 100644
--- a/polly/test/DeLICM/reduction_looprotate_loopguard_licm1.ll
+++ b/polly/test/DeLICM/reduction_looprotate_loopguard_licm1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Reduction over parametric number of elements and a loopguard if the
; reduction loop is not executed at all.
diff --git a/polly/test/DeLICM/reduction_looprotate_loopguard_licm2.ll b/polly/test/DeLICM/reduction_looprotate_loopguard_licm2.ll
index 2e92813d5551..60afdeb5fc97 100644
--- a/polly/test/DeLICM/reduction_looprotate_loopguard_licm2.ll
+++ b/polly/test/DeLICM/reduction_looprotate_loopguard_licm2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Reduction over parametric number of elements and a loopguard if the
; reduction loop is not executed at all, such that A[j] is also not written to.
diff --git a/polly/test/DeLICM/reduction_looprotate_loopguard_licm3.ll b/polly/test/DeLICM/reduction_looprotate_loopguard_licm3.ll
index 784c8ef2d321..e63b457de92d 100644
--- a/polly/test/DeLICM/reduction_looprotate_loopguard_licm3.ll
+++ b/polly/test/DeLICM/reduction_looprotate_loopguard_licm3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Reduction over parametric number of elements and a loopguard if the
; reduction loop is not executed at all, such that A[j] is also not accessed.
diff --git a/polly/test/DeLICM/reduction_unrelatedunusual.ll b/polly/test/DeLICM/reduction_unrelatedunusual.ll
index 04c437770700..97826f603e5d 100644
--- a/polly/test/DeLICM/reduction_unrelatedunusual.ll
+++ b/polly/test/DeLICM/reduction_unrelatedunusual.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-delicm-partial-writes=true -polly-print-delicm -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-delicm-partial-writes=true '-passes=print<polly-delicm>' -disable-output < %s | FileCheck -match-full-lines %s
;
; Map %add and %phi to A[j].
; The non-analyzable store to C[0] is unrelated and can be ignored.
diff --git a/polly/test/DeLICM/reject_loadafterstore.ll b/polly/test/DeLICM/reject_loadafterstore.ll
index 8af6e5e4818c..4460620852a8 100644
--- a/polly/test/DeLICM/reject_loadafterstore.ll
+++ b/polly/test/DeLICM/reject_loadafterstore.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output -pass-remarks-missed=polly-delicm < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output -pass-remarks-missed=polly-delicm < %s 2>&1 | FileCheck %s
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/reject_outofquota.ll b/polly/test/DeLICM/reject_outofquota.ll
index 551431f0823c..820679a5349d 100644
--- a/polly/test/DeLICM/reject_outofquota.ll
+++ b/polly/test/DeLICM/reject_outofquota.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-delicm -pass-remarks-analysis=polly-delicm -polly-delicm-max-ops=1 -disable-output < %s 2>&1 | FileCheck %s
-; RUN: opt %loadPolly -polly-delicm -polly-print-dependences -polly-delicm-max-ops=1 -polly-dependences-computeout=0 -disable-output < %s | FileCheck %s -check-prefix=DEP
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -pass-remarks-analysis=polly-delicm -polly-delicm-max-ops=1 -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-delicm,print<polly-dependences>' -polly-delicm-max-ops=1 -polly-dependences-computeout=0 -disable-output < %s | FileCheck %s -check-prefix=DEP
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/reject_storeafterstore.ll b/polly/test/DeLICM/reject_storeafterstore.ll
index 1ec5ef67344c..ddd13dad2ed3 100644
--- a/polly/test/DeLICM/reject_storeafterstore.ll
+++ b/polly/test/DeLICM/reject_storeafterstore.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/reject_storeinsubregion.ll b/polly/test/DeLICM/reject_storeinsubregion.ll
index 1d38e8066568..c987156b51cd 100644
--- a/polly/test/DeLICM/reject_storeinsubregion.ll
+++ b/polly/test/DeLICM/reject_storeinsubregion.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/reject_unusualstore.ll b/polly/test/DeLICM/reject_unusualstore.ll
index a18a0c3ce9c4..342888c6654f 100644
--- a/polly/test/DeLICM/reject_unusualstore.ll
+++ b/polly/test/DeLICM/reject_unusualstore.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-delicm -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-delicm -disable-output -stats < %s 2>&1 | FileCheck %s --check-prefix=STATS
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-delicm>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -passes=polly-delicm -disable-output -stats < %s 2>&1 | FileCheck %s --check-prefix=STATS
; REQUIRES: asserts
;
; void func(double *A) {
diff --git a/polly/test/DeLICM/skip_maywrite.ll b/polly/test/DeLICM/skip_maywrite.ll
index 1e5f6b169fe4..0d30791cd94e 100644
--- a/polly/test/DeLICM/skip_maywrite.ll
+++ b/polly/test/DeLICM/skip_maywrite.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeLICM/skip_multiaccess.ll b/polly/test/DeLICM/skip_multiaccess.ll
index 6a8c8e5325e1..a7c79f752463 100644
--- a/polly/test/DeLICM/skip_multiaccess.ll
+++ b/polly/test/DeLICM/skip_multiaccess.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-delicm -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-delicm -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
;
; llvm.org/PR34485
; llvm.org/PR34989
diff --git a/polly/test/DeLICM/skip_notinloop.ll b/polly/test/DeLICM/skip_notinloop.ll
index 0730a3a9a4f5..8e265e19aefe 100644
--- a/polly/test/DeLICM/skip_notinloop.ll
+++ b/polly/test/DeLICM/skip_notinloop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
;
; void func(double *A) {
; double phi = 0.0;
diff --git a/polly/test/DeLICM/skip_scalaraccess.ll b/polly/test/DeLICM/skip_scalaraccess.ll
index fa95d382409a..2cf13afe11cd 100644
--- a/polly/test/DeLICM/skip_scalaraccess.ll
+++ b/polly/test/DeLICM/skip_scalaraccess.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -pass-remarks-missed=polly-delicm -disable-output < %s 2>&1 | FileCheck %s
;
; void func(double *A) {
; for (int j = 0; j < 2; j += 1) { /* outer */
diff --git a/polly/test/DeadCodeElimination/chained_iterations.ll b/polly/test/DeadCodeElimination/chained_iterations.ll
index b79fdd659aae..f3bf07bb40d8 100644
--- a/polly/test/DeadCodeElimination/chained_iterations.ll
+++ b/polly/test/DeadCodeElimination/chained_iterations.ll
@@ -1,5 +1,5 @@
-; RUN: opt -S %loadPolly -basic-aa -polly-dependences-analysis-type=value-based -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt -S %loadPolly -basic-aa -polly-dependences-analysis-type=value-based -polly-dce -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=CHECK-DCE
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa -polly-dependences-analysis-type=value-based '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa -polly-dependences-analysis-type=value-based '-passes=polly-dce,print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=CHECK-DCE
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
;
; for(i = 0; i < 200; i++ )
diff --git a/polly/test/DeadCodeElimination/chained_iterations_2.ll b/polly/test/DeadCodeElimination/chained_iterations_2.ll
index 1d1af92db5da..52f034f0e56c 100644
--- a/polly/test/DeadCodeElimination/chained_iterations_2.ll
+++ b/polly/test/DeadCodeElimination/chained_iterations_2.ll
@@ -1,5 +1,5 @@
-; RUN: opt -S %loadPolly -basic-aa -polly-dependences-analysis-type=value-based -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt -S %loadPolly -basic-aa -polly-dependences-analysis-type=value-based -polly-dce -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=CHECK-DCE
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa -polly-dependences-analysis-type=value-based '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa -polly-dependences-analysis-type=value-based '-passes=polly-dce,print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=CHECK-DCE
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
;
; for(i = 0; i < 200; i++ )
diff --git a/polly/test/DeadCodeElimination/computeout.ll b/polly/test/DeadCodeElimination/computeout.ll
index 51850d7da349..e54df42ed1db 100644
--- a/polly/test/DeadCodeElimination/computeout.ll
+++ b/polly/test/DeadCodeElimination/computeout.ll
@@ -1,6 +1,5 @@
-; RUN: opt -S %loadPolly -basic-aa -polly-dce -polly-print-ast -disable-output < %s | FileCheck %s
; RUN: opt -S %loadNPMPolly "-passes=scop(polly-dce,print<polly-ast>)" < %s | FileCheck %s
-; RUN: opt -S %loadPolly -basic-aa -polly-dce -polly-print-ast -polly-dependences-computeout=1 -disable-output < %s | FileCheck %s -check-prefix=TIMEOUT
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa "-passes=scop(polly-dce,print<polly-ast>)" -polly-dependences-computeout=1 -disable-output < %s | FileCheck %s -check-prefix=TIMEOUT
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; for(i = 0; i < 100; i++ )
diff --git a/polly/test/DeadCodeElimination/dead_iteration_elimination.ll b/polly/test/DeadCodeElimination/dead_iteration_elimination.ll
index f496f7828e3d..c102f60abb65 100644
--- a/polly/test/DeadCodeElimination/dead_iteration_elimination.ll
+++ b/polly/test/DeadCodeElimination/dead_iteration_elimination.ll
@@ -1,4 +1,3 @@
-; RUN: opt -S %loadPolly -basic-aa -polly-dependences-analysis-type=value-based -polly-dce -polly-dce-precise-steps=2 -polly-print-ast -disable-output < %s | FileCheck %s
; RUN: opt -S %loadNPMPolly "-passes=scop(polly-dce,print<polly-ast>)" -polly-dependences-analysis-type=value-based -polly-dce-precise-steps=2 < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
;
diff --git a/polly/test/DeadCodeElimination/non-affine-affine-mix.ll b/polly/test/DeadCodeElimination/non-affine-affine-mix.ll
index e6a5dd204ca1..36f55476fed2 100644
--- a/polly/test/DeadCodeElimination/non-affine-affine-mix.ll
+++ b/polly/test/DeadCodeElimination/non-affine-affine-mix.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-dce -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine '-passes=polly-dce,print<polly-ast>' -disable-output < %s | FileCheck %s
;
; void f(int *A) {
; for (int i = 0; i < 1024; i++)
diff --git a/polly/test/DeadCodeElimination/non-affine.ll b/polly/test/DeadCodeElimination/non-affine.ll
index 38a7fcbcf9c9..ef528b4124c6 100644
--- a/polly/test/DeadCodeElimination/non-affine.ll
+++ b/polly/test/DeadCodeElimination/non-affine.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-dce -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine '-passes=polly-dce,print<polly-ast>' -disable-output < %s | FileCheck %s
;
; CHECK: for (int c0 = 0; c0 <= 1023; c0 += 1)
;
diff --git a/polly/test/DeadCodeElimination/null_schedule.ll b/polly/test/DeadCodeElimination/null_schedule.ll
index 633a84b5d92b..01d34e95629b 100644
--- a/polly/test/DeadCodeElimination/null_schedule.ll
+++ b/polly/test/DeadCodeElimination/null_schedule.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %loadPolly -basic-aa -polly-dependences-analysis-type=value-based -polly-dce -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=CHECK-DCE
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa -polly-dependences-analysis-type=value-based '-passes=polly-dce,print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=CHECK-DCE
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; A[0] = 1;
;
diff --git a/polly/test/DependenceInfo/computeout.ll b/polly/test/DependenceInfo/computeout.ll
index 048de29864d3..c2a3456b3dc8 100644
--- a/polly/test/DependenceInfo/computeout.ll
+++ b/polly/test/DependenceInfo/computeout.ll
@@ -1,7 +1,5 @@
-; RUN: opt -S %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s -check-prefix=VALUE
-; RUN: opt -S %loadPolly -polly-print-function-dependences -disable-output < %s | FileCheck %s -check-prefix=FUNC-VALUE
-; RUN: opt -S %loadPolly -polly-print-dependences -polly-dependences-computeout=1 -disable-output < %s | FileCheck %s -check-prefix=TIMEOUT
-; RUN: opt -S %loadPolly -polly-print-function-dependences -polly-dependences-computeout=1 -disable-output < %s | FileCheck %s -check-prefix=TIMEOUT
+; RUN: opt -S %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s -check-prefix=VALUE
+; RUN: opt -S %loadNPMPolly '-passes=print<polly-dependences>' -polly-dependences-computeout=1 -disable-output < %s | FileCheck %s -check-prefix=TIMEOUT
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; for(i = 0; i < 100; i++ )
diff --git a/polly/test/DependenceInfo/different_schedule_dimensions.ll b/polly/test/DependenceInfo/different_schedule_dimensions.ll
index 3f966168d3b7..f89791f42f9d 100644
--- a/polly/test/DependenceInfo/different_schedule_dimensions.ll
+++ b/polly/test/DependenceInfo/different_schedule_dimensions.ll
@@ -1,7 +1,5 @@
-; RUN: opt -S %loadPolly -polly-print-dependences \
+; RUN: opt -S %loadNPMPolly '-passes=print<polly-dependences>' \
; RUN: -disable-output < %s | FileCheck %s
-; RUN: opt -S %loadPolly -polly-print-function-dependences \
-; RUN: -disable-output < %s | FileCheck %s -check-prefix=FUNC
; CHECK: RAW dependences:
; CHECK: { Stmt_bb9[0] -> Stmt_bb10[0] }
diff --git a/polly/test/DependenceInfo/do_pluto_matmult.ll b/polly/test/DependenceInfo/do_pluto_matmult.ll
index d71608e80e70..b88cf9bf5475 100644
--- a/polly/test/DependenceInfo/do_pluto_matmult.ll
+++ b/polly/test/DependenceInfo/do_pluto_matmult.ll
@@ -1,7 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-dependences -polly-dependences-analysis-type=value-based -disable-output < %s | FileCheck %s -check-prefix=VALUE
-; RUN: opt %loadPolly -basic-aa -polly-print-dependences -polly-dependences-analysis-type=memory-based -disable-output < %s | FileCheck %s -check-prefix=MEMORY
-; RUN: opt %loadPolly -basic-aa -polly-print-function-dependences -polly-dependences-analysis-type=value-based -disable-output < %s | FileCheck %s -check-prefix=FUNC-VALUE
-; RUN: opt %loadPolly -basic-aa -polly-print-function-dependences -polly-dependences-analysis-type=memory-based -disable-output < %s | FileCheck %s -check-prefix=FUNC-MEMORY
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-dependences>' -polly-dependences-analysis-type=value-based -disable-output < %s | FileCheck %s -check-prefix=VALUE
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-dependences>' -polly-dependences-analysis-type=memory-based -disable-output < %s | FileCheck %s -check-prefix=MEMORY
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/DependenceInfo/fine_grain_dep_0.ll b/polly/test/DependenceInfo/fine_grain_dep_0.ll
index 9c79e360690a..f93814c1c4be 100644
--- a/polly/test/DependenceInfo/fine_grain_dep_0.ll
+++ b/polly/test/DependenceInfo/fine_grain_dep_0.ll
@@ -1,7 +1,6 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-dependences -polly-dependences-analysis-type=value-based -polly-dependences-analysis-level=reference-wise -disable-output < %s | FileCheck %s --check-prefix=REF
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-dependences -polly-dependences-analysis-type=value-based -polly-dependences-analysis-level=access-wise -disable-output < %s | FileCheck %s --check-prefix=ACC
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-function-dependences -polly-dependences-analysis-type=value-based -polly-dependences-analysis-level=access-wise -disable-output < %s | FileCheck %s --check-prefix=ACC
-;
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-dependences>' -polly-dependences-analysis-type=value-based -polly-dependences-analysis-level=reference-wise -disable-output < %s | FileCheck %s --check-prefix=REF
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-dependences>' -polly-dependences-analysis-type=value-based -polly-dependences-analysis-level=access-wise -disable-output < %s | FileCheck %s --check-prefix=ACC
+
; REF: RAW dependences:
; REF-NEXT: [N] -> { [Stmt_for_body[i0] -> MemRef_b[]] -> [Stmt_for_body[6 + i0] -> MemRef_b[]] : 0 <= i0 <= -13 + N; Stmt_for_body[i0] -> Stmt_for_body[6 + i0] : 0 <= i0 <= -13 + N; Stmt_for_body[i0] -> Stmt_for_body[4 + i0] : 0 <= i0 <= -11 + N; [Stmt_for_body[i0] -> MemRef_a[]] -> [Stmt_for_body[4 + i0] -> MemRef_a[]] : 0 <= i0 <= -11 + N }
; REF-NEXT: WAR dependences:
diff --git a/polly/test/DependenceInfo/generate_may_write_dependence_info.ll b/polly/test/DependenceInfo/generate_may_write_dependence_info.ll
index 0b7f2d48da9f..677323495476 100644
--- a/polly/test/DependenceInfo/generate_may_write_dependence_info.ll
+++ b/polly/test/DependenceInfo/generate_may_write_dependence_info.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s -check-prefix=VALUE
+; RUN: opt -S %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s -check-prefix=VALUE
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; for (int i = 0; i < N; i++) {
diff --git a/polly/test/DependenceInfo/infeasible_context.ll b/polly/test/DependenceInfo/infeasible_context.ll
index d701b821e15c..cde3102dc3dc 100644
--- a/polly/test/DependenceInfo/infeasible_context.ll
+++ b/polly/test/DependenceInfo/infeasible_context.ll
@@ -1,10 +1,9 @@
-; RUN: opt %loadPolly -polly-print-function-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: | FileCheck %s -check-prefix=FUNC-SCOP
-; RUN: opt %loadPolly -polly-print-function-dependences -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,scop(print<polly-dependences>)' -disable-output < %s 2>&1 \
; RUN: | FileCheck %s -check-prefix=FUNC-DEPS
;
; FUNC-SCOP-NOT: Statement
-; FUNC-DEPS-LABEL: Printing analysis 'Polly - Calculate dependences for all the SCoPs of a function' for function 'readgeo'
; FUNC-DEPS-NOT: RAW dependences
;
; Due to an infeasible run-time check, scop object is empty and we do not compute dependences.
diff --git a/polly/test/DependenceInfo/may_writes_do_not_block_must_writes_for_war.ll b/polly/test/DependenceInfo/may_writes_do_not_block_must_writes_for_war.ll
index 09c516274708..392a34769cdd 100644
--- a/polly/test/DependenceInfo/may_writes_do_not_block_must_writes_for_war.ll
+++ b/polly/test/DependenceInfo/may_writes_do_not_block_must_writes_for_war.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; Verify that the presence of a may-write (S1) between a read (S0) and a
; must-write (S2) does not block the generation of RAW dependences. This makes
diff --git a/polly/test/DependenceInfo/nonaffine-condition-buildMemoryAccess.ll b/polly/test/DependenceInfo/nonaffine-condition-buildMemoryAccess.ll
index 25c7e3d6e442..ae5fd3beed39 100644
--- a/polly/test/DependenceInfo/nonaffine-condition-buildMemoryAccess.ll
+++ b/polly/test/DependenceInfo/nonaffine-condition-buildMemoryAccess.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -polly-allow-nonaffine-loops -polly-allow-nonaffine -debug-only=polly-dependence < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-allow-nonaffine-loops -polly-allow-nonaffine -debug-only=polly-dependence < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
; CHECK: MayWriteAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/DependenceInfo/reduction_complex_location.ll b/polly/test/DependenceInfo/reduction_complex_location.ll
index 7ca839996326..7722ee974c3f 100644
--- a/polly/test/DependenceInfo/reduction_complex_location.ll
+++ b/polly/test/DependenceInfo/reduction_complex_location.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-dependences -polly-dependences-analysis-level=reference-wise -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-dependences -polly-dependences-analysis-level=access-wise -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -polly-dependences-analysis-level=reference-wise -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -polly-dependences-analysis-level=access-wise -disable-output < %s | FileCheck %s
;
; CHECK: RAW dependences:
; CHECK-NEXT: { }
diff --git a/polly/test/DependenceInfo/reduction_dependences_equal_non_reduction_dependences.ll b/polly/test/DependenceInfo/reduction_dependences_equal_non_reduction_dependences.ll
index 3632bd202da2..840d1f32dca3 100644
--- a/polly/test/DependenceInfo/reduction_dependences_equal_non_reduction_dependences.ll
+++ b/polly/test/DependenceInfo/reduction_dependences_equal_non_reduction_dependences.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-stmt-granularity=bb -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-stmt-granularity=bb '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; This loopnest contains a reduction which imposes the same dependences as the
; accesses to the array A. We need to ensure we keep the dependences of A.
diff --git a/polly/test/DependenceInfo/reduction_dependences_not_null.ll b/polly/test/DependenceInfo/reduction_dependences_not_null.ll
index 69fd74478ecc..56d84a9aec6d 100644
--- a/polly/test/DependenceInfo/reduction_dependences_not_null.ll
+++ b/polly/test/DependenceInfo/reduction_dependences_not_null.ll
@@ -1,7 +1,7 @@
; Test that the reduction dependences are always initialised, even in a case
; where we have no reduction. If this object is NULL, then isl operations on
; it will fail.
-; RUN: opt -S %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s -check-prefix=VALUE
+; RUN: opt -S %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s -check-prefix=VALUE
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; for(i = 0; i < 100; i++ )
diff --git a/polly/test/DependenceInfo/reduction_mixed_reduction_and_non_reduction_dependences.ll b/polly/test/DependenceInfo/reduction_mixed_reduction_and_non_reduction_dependences.ll
index 71903d9e7111..76c7fc64ae89 100644
--- a/polly/test/DependenceInfo/reduction_mixed_reduction_and_non_reduction_dependences.ll
+++ b/polly/test/DependenceInfo/reduction_mixed_reduction_and_non_reduction_dependences.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; CHECK: RAW dependences:
; CHECK-NEXT: { Stmt_for_body3[i0, i1] -> Stmt_for_body3[i0 + i1, o1] : i0 >= 0 and 0 <= i1 <= 1023 - i0 and i1 <= 1 and 0 < o1 <= 511 }
diff --git a/polly/test/DependenceInfo/reduction_multiple_loops_array_sum.ll b/polly/test/DependenceInfo/reduction_multiple_loops_array_sum.ll
index 234de5c367a0..02b814a0d7c0 100644
--- a/polly/test/DependenceInfo/reduction_multiple_loops_array_sum.ll
+++ b/polly/test/DependenceInfo/reduction_multiple_loops_array_sum.ll
@@ -1,6 +1,6 @@
-; RUN: opt -basic-aa %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
-; RUN: opt -basic-aa %loadPolly -polly-print-dependences -polly-dependences-analysis-level=reference-wise -disable-output < %s | FileCheck %s
-; RUN: opt -basic-aa %loadPolly -polly-print-dependences -polly-dependences-analysis-level=access-wise -disable-output < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa %loadNPMPolly '-passes=print<polly-dependences>' -polly-dependences-analysis-level=reference-wise -disable-output < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa %loadNPMPolly '-passes=print<polly-dependences>' -polly-dependences-analysis-level=access-wise -disable-output < %s | FileCheck %s
;
; Verify that only the inner reduction like accesses cause reduction dependences
;
diff --git a/polly/test/DependenceInfo/reduction_multiple_loops_array_sum_2.ll b/polly/test/DependenceInfo/reduction_multiple_loops_array_sum_2.ll
index acd674dc0117..91bd35deebd0 100644
--- a/polly/test/DependenceInfo/reduction_multiple_loops_array_sum_2.ll
+++ b/polly/test/DependenceInfo/reduction_multiple_loops_array_sum_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -basic-aa -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -aa-pipeline=basic-aa -disable-output < %s | FileCheck %s
;
; CHECK: RAW dependences:
; CHECK-NEXT: { }
diff --git a/polly/test/DependenceInfo/reduction_multiple_loops_array_sum_3.ll b/polly/test/DependenceInfo/reduction_multiple_loops_array_sum_3.ll
index bdfcfc99c8cb..040d51378239 100644
--- a/polly/test/DependenceInfo/reduction_multiple_loops_array_sum_3.ll
+++ b/polly/test/DependenceInfo/reduction_multiple_loops_array_sum_3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -basic-aa -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -aa-pipeline=basic-aa -disable-output < %s | FileCheck %s
;
; CHECK: Reduction dependences:
; CHECK-NEXT: { Stmt_for_inc[i0, i1] -> Stmt_for_inc[i0, 1 + i1] : 0 <= i0 <= 99 and 0 <= i1 <= 98 }
diff --git a/polly/test/DependenceInfo/reduction_multiple_reductions.ll b/polly/test/DependenceInfo/reduction_multiple_reductions.ll
index cf705080e03d..527a8cfc3556 100644
--- a/polly/test/DependenceInfo/reduction_multiple_reductions.ll
+++ b/polly/test/DependenceInfo/reduction_multiple_reductions.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; Verify we do not have dependences between the if and the else clause
;
diff --git a/polly/test/DependenceInfo/reduction_multiple_reductions_2.ll b/polly/test/DependenceInfo/reduction_multiple_reductions_2.ll
index 8d8557a129ab..fb5fd96a2e42 100644
--- a/polly/test/DependenceInfo/reduction_multiple_reductions_2.ll
+++ b/polly/test/DependenceInfo/reduction_multiple_reductions_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
;
; These are the important RAW dependences, as they need to originate/end in only one iteration:
diff --git a/polly/test/DependenceInfo/reduction_only_reduction_like_access.ll b/polly/test/DependenceInfo/reduction_only_reduction_like_access.ll
index 7b4a68a2a897..3ec3920268b4 100644
--- a/polly/test/DependenceInfo/reduction_only_reduction_like_access.ll
+++ b/polly/test/DependenceInfo/reduction_only_reduction_like_access.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; FIXME: Change the comment once we allow different pointers
; The statement is "almost" reduction like but should not yield any reduction dependences
diff --git a/polly/test/DependenceInfo/reduction_partially_escaping_intermediate_in_other_stmt.ll b/polly/test/DependenceInfo/reduction_partially_escaping_intermediate_in_other_stmt.ll
index 0d09e5a861a0..23bd8ef25bd7 100644
--- a/polly/test/DependenceInfo/reduction_partially_escaping_intermediate_in_other_stmt.ll
+++ b/polly/test/DependenceInfo/reduction_partially_escaping_intermediate_in_other_stmt.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -basic-aa -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -aa-pipeline=basic-aa -disable-output < %s | FileCheck %s
;
; CHECK: Reduction dependences:
; CHECK-NEXT: [N] -> { Stmt_for_body3[i0, i1] -> Stmt_for_body3[i0, 1 + i1] : 0 <= i0 <= 1023 and i1 >= 0 and 1024 - N + i0 <= i1 <= 1022 }
diff --git a/polly/test/DependenceInfo/reduction_privatization_deps.ll b/polly/test/DependenceInfo/reduction_privatization_deps.ll
index ce90e21a898d..0e0f71737ffd 100644
--- a/polly/test/DependenceInfo/reduction_privatization_deps.ll
+++ b/polly/test/DependenceInfo/reduction_privatization_deps.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; CHECK: RAW dependences:
; CHECK-NEXT: { Stmt_S1[i0, i1] -> Stmt_S2[-1 + i0 + i1] : 0 <= i0 <= 1023 and i1 >= 0 and -i0 < i1 <= 1024 - i0 and i1 <= 1023; Stmt_S0[i0] -> Stmt_S1[o0, i0 - o0] : i0 <= 1023 and 0 <= o0 <= i0 }
diff --git a/polly/test/DependenceInfo/reduction_privatization_deps_2.ll b/polly/test/DependenceInfo/reduction_privatization_deps_2.ll
index 4904004d4781..cafa319e2cc7 100644
--- a/polly/test/DependenceInfo/reduction_privatization_deps_2.ll
+++ b/polly/test/DependenceInfo/reduction_privatization_deps_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; We have privatization dependences from a textually later statement to a
; textually earlier one, but the dependences still go forward in time.
diff --git a/polly/test/DependenceInfo/reduction_privatization_deps_3.ll b/polly/test/DependenceInfo/reduction_privatization_deps_3.ll
index a3935ebd6cc4..d86da92fbcab 100644
--- a/polly/test/DependenceInfo/reduction_privatization_deps_3.ll
+++ b/polly/test/DependenceInfo/reduction_privatization_deps_3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; CHECK: RAW dependences:
; CHECK-NEXT: { Stmt_S1[i0] -> Stmt_S3[2 + i0] : 0 <= i0 <= 96; Stmt_S2[i0, i1] -> Stmt_S3[o0] : i1 <= 1 - i0 and -i1 < o0 <= 1 and o0 <= 1 + i0 - i1; Stmt_S3[i0] -> Stmt_S2[o0, 1 - i0] : 0 <= i0 <= 1 and i0 < o0 <= 98 }
diff --git a/polly/test/DependenceInfo/reduction_privatization_deps_4.ll b/polly/test/DependenceInfo/reduction_privatization_deps_4.ll
index 10d726af5145..d84c04fc309b 100644
--- a/polly/test/DependenceInfo/reduction_privatization_deps_4.ll
+++ b/polly/test/DependenceInfo/reduction_privatization_deps_4.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; CHECK: RAW dependences:
; CHECK-NEXT: { Stmt_S1[i0] -> Stmt_S2[i0, i0] : 0 <= i0 <= 98; Stmt_S2[i0, i0] -> Stmt_S3[i0] : 0 <= i0 <= 98; Stmt_S3[i0] -> Stmt_S2[o0, i0] : i0 >= 0 and i0 < o0 <= 98; Stmt_S2[i0, i1] -> Stmt_S1[i1] : i0 >= 0 and i0 < i1 <= 98 }
diff --git a/polly/test/DependenceInfo/reduction_privatization_deps_5.ll b/polly/test/DependenceInfo/reduction_privatization_deps_5.ll
index e8d51181725e..592c7238c3c5 100644
--- a/polly/test/DependenceInfo/reduction_privatization_deps_5.ll
+++ b/polly/test/DependenceInfo/reduction_privatization_deps_5.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; CHECK: RAW dependences:
; CHECK-NEXT: { Stmt_S1[i0, 0] -> Stmt_S2[i0, 0] : 0 <= i0 <= 98; Stmt_S2[i0, 0] -> Stmt_S1[1 + i0, 0] : 0 <= i0 <= 97 }
diff --git a/polly/test/DependenceInfo/reduction_sequence.ll b/polly/test/DependenceInfo/reduction_sequence.ll
index 4a4688953938..7ce9d37d395b 100644
--- a/polly/test/DependenceInfo/reduction_sequence.ll
+++ b/polly/test/DependenceInfo/reduction_sequence.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
; void manyreductions(long *A) {
; for (long i = 0; i < 1024; i++)
diff --git a/polly/test/DependenceInfo/reduction_simple_iv.ll b/polly/test/DependenceInfo/reduction_simple_iv.ll
index e3307afae08b..d13d14ecaad9 100644
--- a/polly/test/DependenceInfo/reduction_simple_iv.ll
+++ b/polly/test/DependenceInfo/reduction_simple_iv.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; CHECK: RAW dependences:
; CHECK-NEXT: { }
diff --git a/polly/test/DependenceInfo/reduction_simple_iv_debug_wrapped_dependences.ll b/polly/test/DependenceInfo/reduction_simple_iv_debug_wrapped_dependences.ll
index c7651c39a563..4c97fbb1aacb 100644
--- a/polly/test/DependenceInfo/reduction_simple_iv_debug_wrapped_dependences.ll
+++ b/polly/test/DependenceInfo/reduction_simple_iv_debug_wrapped_dependences.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -debug-only=polly-dependence -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -debug-only=polly-dependence -disable-output < %s 2>&1 | FileCheck %s
;
; REQUIRES: asserts
;
diff --git a/polly/test/DependenceInfo/reduction_simple_privatization_deps_2.ll b/polly/test/DependenceInfo/reduction_simple_privatization_deps_2.ll
index b61fd8453a8c..804005cf72a7 100644
--- a/polly/test/DependenceInfo/reduction_simple_privatization_deps_2.ll
+++ b/polly/test/DependenceInfo/reduction_simple_privatization_deps_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; CHECK: RAW dependences:
; CHECK-NEXT: { Stmt_S1[i0, i1] -> Stmt_S2[i0] : 0 <= i0 <= 99 and 0 <= i1 <= 99; Stmt_S0[i0] -> Stmt_S1[i0, o1] : 0 <= i0 <= 99 and 0 <= o1 <= 99; Stmt_S2[i0] -> Stmt_S0[1 + i0] : 0 <= i0 <= 98 }
diff --git a/polly/test/DependenceInfo/reduction_simple_privatization_deps_w_parameter.ll b/polly/test/DependenceInfo/reduction_simple_privatization_deps_w_parameter.ll
index a3a87c70d905..9596827b4cbb 100644
--- a/polly/test/DependenceInfo/reduction_simple_privatization_deps_w_parameter.ll
+++ b/polly/test/DependenceInfo/reduction_simple_privatization_deps_w_parameter.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; CHECK: RAW dependences:
; CHECK-NEXT: [N] -> { Stmt_S1[i0] -> Stmt_S2[] : N >= 11 and 0 <= i0 <= 1023; Stmt_S0[] -> Stmt_S1[o0] : N >= 11 and 0 <= o0 <= 1023 }
diff --git a/polly/test/DependenceInfo/reduction_two_reductions_different_rloops.ll b/polly/test/DependenceInfo/reduction_two_reductions_different_rloops.ll
index c90462962ce0..d67683d11a4b 100644
--- a/polly/test/DependenceInfo/reduction_two_reductions_different_rloops.ll
+++ b/polly/test/DependenceInfo/reduction_two_reductions_different_rloops.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-stmt-granularity=bb -polly-print-dependences -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-stmt-granularity=bb '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s
;
; CHECK: RAW dependences:
; CHECK-NEXT: { }
diff --git a/polly/test/DependenceInfo/sequential_loops.ll b/polly/test/DependenceInfo/sequential_loops.ll
index 8dfa13cb9db8..6ae720030332 100644
--- a/polly/test/DependenceInfo/sequential_loops.ll
+++ b/polly/test/DependenceInfo/sequential_loops.ll
@@ -1,34 +1,43 @@
-; RUN: opt -S %loadPolly -basic-aa -polly-print-dependences -polly-dependences-analysis-type=value-based -disable-output < %s | FileCheck %s -check-prefix=VALUE
-; RUN: opt -S %loadPolly -basic-aa -polly-print-dependences -polly-dependences-analysis-type=memory-based -disable-output < %s | FileCheck %s -check-prefix=MEMORY
-; RUN: opt -S %loadPolly -basic-aa -polly-print-dependences -polly-dependences-analysis-type=value-based -polly-dependences-analysis-level=access-wise -disable-output < %s | FileCheck %s -check-prefix=VALUE_ACCESS
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-dependences>' -polly-dependences-analysis-type=value-based -disable-output < %s | FileCheck %s -check-prefix=VALUE
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-dependences>' -polly-dependences-analysis-type=memory-based -disable-output < %s | FileCheck %s -check-prefix=MEMORY
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-dependences>' -polly-dependences-analysis-type=value-based -polly-dependences-analysis-level=access-wise -disable-output < %s | FileCheck %s -check-prefix=VALUE_ACCESS
-; VALUE-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.3' in function 'sequential_writes':
-; VALUE-NEXT: RAW dependences:
+; VALUE: RAW dependences:
; VALUE-NEXT: { }
; VALUE-NEXT: WAR dependences:
; VALUE-NEXT: { }
; VALUE-NEXT: WAW dependences:
; VALUE-NEXT: { Stmt_S1[i0] -> Stmt_S3[i0] : 10 <= i0 <= 99; Stmt_S1[i0] -> Stmt_S2[i0] : 0 <= i0 <= 9; Stmt_S2[i0] -> Stmt_S3[i0] : 0 <= i0 <= 9 }
;
-;VALUE_ACCESS-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.3' in function 'sequential_writes':
-;VALUE_ACCESS-NEXT: RAW dependences:
-;VALUE_ACCESS-NEXT: { }
-;VALUE_ACCESS-NEXT: WAR dependences:
-;VALUE_ACCESS-NEXT: { }
-;VALUE_ACCESS-NEXT: WAW dependences:
-;VALUE_ACCESS-NEXT: { Stmt_S1[i0] -> Stmt_S3[i0] : 10 <= i0 <= 99; Stmt_S1[i0] -> Stmt_S2[i0] : 0 <= i0 <= 9; [Stmt_S2[i0] -> Stmt_S2_Write0[]] -> [Stmt_S3[i0] -> Stmt_S3_Write0[]] : 0 <= i0 <= 9; Stmt_S2[i0] -> Stmt_S3[i0] : 0 <= i0 <= 9; [Stmt_S1[i0] -> Stmt_S1_Write0[]] -> [Stmt_S2[i0] -> Stmt_S2_Write0[]] : 0 <= i0 <= 9; [Stmt_S1[i0] -> Stmt_S1_Write0[]] -> [Stmt_S3[i0] -> Stmt_S3_Write0[]] : 10 <= i0 <= 99 }
-
-;
-; VALUE-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.3' in function 'read_after_writes':
-; VALUE-NEXT: RAW dependences:
+; VALUE: RAW dependences:
; VALUE-NEXT: { Stmt_S2[i0] -> Stmt_S3[i0] : 0 <= i0 <= 9; Stmt_S1[i0] -> Stmt_S3[i0] : 10 <= i0 <= 99 }
; VALUE-NEXT: WAR dependences:
; VALUE-NEXT: { }
; VALUE-NEXT: WAW dependences:
; VALUE-NEXT: { Stmt_S1[i0] -> Stmt_S2[i0] : 0 <= i0 <= 9 }
;
-;VALUE_ACCESS-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.3' in function 'read_after_writes':
-;VALUE_ACCESS-NEXT: RAW dependences:
+; VALUE: RAW dependences:
+; VALUE-NEXT: { }
+; VALUE-NEXT: WAR dependences:
+; VALUE-NEXT: { Stmt_S1[i0] -> Stmt_S2[i0] : 0 <= i0 <= 9; Stmt_S1[i0] -> Stmt_S3[i0] : 10 <= i0 <= 99 }
+; VALUE-NEXT: WAW dependences:
+; VALUE-NEXT: { Stmt_S2[i0] -> Stmt_S3[i0] : 0 <= i0 <= 9 }
+;
+; VALUE: RAW dependences:
+; VALUE-NEXT: [p] -> { Stmt_S1[i0] -> Stmt_S2[-p + i0] : i0 >= p and 0 <= i0 <= 99 and i0 <= 9 + p }
+; VALUE-NEXT: WAR dependences:
+; VALUE-NEXT: [p] -> { }
+; VALUE-NEXT: WAW dependences:
+; VALUE-NEXT: [p] -> { }
+;
+;VALUE_ACCESS: RAW dependences:
+;VALUE_ACCESS-NEXT: { }
+;VALUE_ACCESS-NEXT: WAR dependences:
+;VALUE_ACCESS-NEXT: { }
+;VALUE_ACCESS-NEXT: WAW dependences:
+;VALUE_ACCESS-NEXT: { Stmt_S1[i0] -> Stmt_S3[i0] : 10 <= i0 <= 99; Stmt_S1[i0] -> Stmt_S2[i0] : 0 <= i0 <= 9; [Stmt_S2[i0] -> Stmt_S2_Write0[]] -> [Stmt_S3[i0] -> Stmt_S3_Write0[]] : 0 <= i0 <= 9; Stmt_S2[i0] -> Stmt_S3[i0] : 0 <= i0 <= 9; [Stmt_S1[i0] -> Stmt_S1_Write0[]] -> [Stmt_S2[i0] -> Stmt_S2_Write0[]] : 0 <= i0 <= 9; [Stmt_S1[i0] -> Stmt_S1_Write0[]] -> [Stmt_S3[i0] -> Stmt_S3_Write0[]] : 10 <= i0 <= 99 }
+;
+;VALUE_ACCESS: RAW dependences:
;VALUE_ACCESS-NEXT: { Stmt_S1[i0] -> Stmt_S3[i0] : 10 <= i0 <= 99; Stmt_S2[i0] -> Stmt_S3[i0] : 0 <= i0 <= 9; [Stmt_S2[i0] -> Stmt_S2_Write0[]] -> [Stmt_S3[i0] -> Stmt_S3_Read0[]] : 0 <= i0 <= 9; [Stmt_S1[i0] -> Stmt_S1_Write0[]] -> [Stmt_S3[i0] -> Stmt_S3_Read0[]] : 10 <= i0 <= 99 }
;VALUE_ACCESS-NEXT: WAR dependences:
@@ -36,64 +45,42 @@
;VALUE_ACCESS-NEXT: WAW dependences:
;VALUE_ACCESS-NEXT: { [Stmt_S1[i0] -> Stmt_S1_Write0[]] -> [Stmt_S2[i0] -> Stmt_S2_Write0[]] : 0 <= i0 <= 9; Stmt_S1[i0] -> Stmt_S2[i0] : 0 <= i0 <= 9 }
;
-; VALUE-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.3' in function 'write_after_read':
-; VALUE-NEXT: RAW dependences:
-; VALUE-NEXT: { }
-; VALUE-NEXT: WAR dependences:
-; VALUE-NEXT: { Stmt_S1[i0] -> Stmt_S2[i0] : 0 <= i0 <= 9; Stmt_S1[i0] -> Stmt_S3[i0] : 10 <= i0 <= 99 }
-; VALUE-NEXT: WAW dependences:
-; VALUE-NEXT: { Stmt_S2[i0] -> Stmt_S3[i0] : 0 <= i0 <= 9 }
-;
-;VALUE_ACCESS-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.3' in function 'write_after_read':
-;VALUE_ACCESS-NEXT: RAW dependences:
+;VALUE_ACCESS: RAW dependences:
;VALUE_ACCESS-NEXT: { }
;VALUE_ACCESS-NEXT: WAR dependences:
;VALUE_ACCESS-NEXT: { Stmt_S1[i0] -> Stmt_S2[i0] : 0 <= i0 <= 9; Stmt_S1[i0] -> Stmt_S3[i0] : 10 <= i0 <= 99; [Stmt_S1[i0] -> Stmt_S1_Read0[]] -> [Stmt_S2[i0] -> Stmt_S2_Write0[]] : 0 <= i0 <= 9; [Stmt_S1[i0] -> Stmt_S1_Read0[]] -> [Stmt_S3[i0] -> Stmt_S3_Write0[]] : 10 <= i0 <= 99 }
;VALUE_ACCESS-NEXT: WAW dependences:
;VALUE_ACCESS-NEXT: { Stmt_S2[i0] -> Stmt_S3[i0] : 0 <= i0 <= 9; [Stmt_S2[i0] -> Stmt_S2_Write0[]] -> [Stmt_S3[i0] -> Stmt_S3_Write0[]] : 0 <= i0 <= 9 }
;
-; VALUE-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.2' in function 'parametric_offset':
-; VALUE-NEXT: RAW dependences:
-; VALUE-NEXT: [p] -> { Stmt_S1[i0] -> Stmt_S2[-p + i0] : i0 >= p and 0 <= i0 <= 99 and i0 <= 9 + p }
-; VALUE-NEXT: WAR dependences:
-; VALUE-NEXT: [p] -> { }
-; VALUE-NEXT: WAW dependences:
-; VALUE-NEXT: [p] -> { }
-;
-;VALUE_ACCESS-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.2' in function 'parametric_offset':
-;VALUE_ACCESS-NEXT: RAW dependences:
+;VALUE_ACCESS: RAW dependences:
;VALUE_ACCESS-NEXT: [p] -> { Stmt_S1[i0] -> Stmt_S2[-p + i0] : i0 >= p and 0 <= i0 <= 99 and i0 <= 9 + p; [Stmt_S1[i0] -> Stmt_S1_Write0[]] -> [Stmt_S2[-p + i0] -> Stmt_S2_Read0[]] : i0 >= p and 0 <= i0 <= 99 and i0 <= 9 + p }
;VALUE_ACCESS-NEXT: WAR dependences:
;VALUE_ACCESS-NEXT: [p] -> { }
;VALUE_ACCESS-NEXT: WAW dependences:
;VALUE_ACCESS-NEXT: [p] -> { }
-; MEMORY-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.3' in function 'sequential_writes':
-; MEMORY-NEXT: RAW dependences:
+; MEMORY: RAW dependences:
; MEMORY-NEXT: { }
; MEMORY-NEXT: WAR dependences:
; MEMORY-NEXT: { }
; MEMORY-NEXT: WAW dependences:
; MEMORY-NEXT: { Stmt_S1[i0] -> Stmt_S3[i0] : 0 <= i0 <= 99; Stmt_S1[i0] -> Stmt_S2[i0] : 0 <= i0 <= 9; Stmt_S2[i0] -> Stmt_S3[i0] : 0 <= i0 <= 9 }
;
-; MEMORY-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.3' in function 'read_after_writes':
-; MEMORY-NEXT: RAW dependences:
+; MEMORY: RAW dependences:
; MEMORY-NEXT: { Stmt_S2[i0] -> Stmt_S3[i0] : 0 <= i0 <= 9; Stmt_S1[i0] -> Stmt_S3[i0] : 0 <= i0 <= 99 }
; MEMORY-NEXT: WAR dependences:
; MEMORY-NEXT: { }
; MEMORY-NEXT: WAW dependences:
; MEMORY-NEXT: { Stmt_S1[i0] -> Stmt_S2[i0] : 0 <= i0 <= 9 }
;
-; MEMORY-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.3' in function 'write_after_read':
-; MEMORY-NEXT: RAW dependences:
+; MEMORY: RAW dependences:
; MEMORY-NEXT: { }
; MEMORY-NEXT: WAR dependences:
; MEMORY-NEXT: { Stmt_S1[i0] -> Stmt_S2[i0] : 0 <= i0 <= 9; Stmt_S1[i0] -> Stmt_S3[i0] : 0 <= i0 <= 99 }
; MEMORY-NEXT: WAW dependences:
; MEMORY-NEXT: { Stmt_S2[i0] -> Stmt_S3[i0] : 0 <= i0 <= 9 }
;
-; MEMORY-LABEL: Printing analysis 'Polly - Calculate dependences' for region: 'S1 => exit.2' in function 'parametric_offset':
-; MEMORY-NEXT: RAW dependences:
+; MEMORY: RAW dependences:
; MEMORY-NEXT: [p] -> { Stmt_S1[i0] -> Stmt_S2[-p + i0] : i0 >= p and 0 <= i0 <= 99 and i0 <= 9 + p }
; MEMORY-NEXT: WAR dependences:
; MEMORY-NEXT: [p] -> { }
diff --git a/polly/test/ForwardOpTree/atax.ll b/polly/test/ForwardOpTree/atax.ll
index 0690c1b000fa..496e8315b068 100644
--- a/polly/test/ForwardOpTree/atax.ll
+++ b/polly/test/ForwardOpTree/atax.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-optree-normalize-phi=true -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-optree-normalize-phi=true '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ForwardOpTree/changed-kind.ll b/polly/test/ForwardOpTree/changed-kind.ll
index a1d59825b3b2..b9081f373404 100644
--- a/polly/test/ForwardOpTree/changed-kind.ll
+++ b/polly/test/ForwardOpTree/changed-kind.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
; In the code below, %0 is known to be equal to the content of @c (constant 0).
; Thus, in order to save a scalar dependency, forward-optree replaces
diff --git a/polly/test/ForwardOpTree/forward_from_region.ll b/polly/test/ForwardOpTree/forward_from_region.ll
index 53d22800081e..767a580dccf9 100644
--- a/polly/test/ForwardOpTree/forward_from_region.ll
+++ b/polly/test/ForwardOpTree/forward_from_region.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Move instructions from region statements.
;
diff --git a/polly/test/ForwardOpTree/forward_hoisted.ll b/polly/test/ForwardOpTree/forward_hoisted.ll
index 32fca00141dd..5d0b0a884b76 100644
--- a/polly/test/ForwardOpTree/forward_hoisted.ll
+++ b/polly/test/ForwardOpTree/forward_hoisted.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-invariant-load-hoisting=true -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-invariant-load-hoisting=true '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Move %val to %bodyB, so %bodyA can be removed (by -polly-simplify).
; This involves making the load-hoisted %val1 to be made available in %bodyB.
diff --git a/polly/test/ForwardOpTree/forward_instruction.ll b/polly/test/ForwardOpTree/forward_instruction.ll
index 1dcd64357324..50a9b07b8a05 100644
--- a/polly/test/ForwardOpTree/forward_instruction.ll
+++ b/polly/test/ForwardOpTree/forward_instruction.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Move %val to %bodyB, so %bodyA can be removed (by -polly-simplify)
;
diff --git a/polly/test/ForwardOpTree/forward_into_region.ll b/polly/test/ForwardOpTree/forward_into_region.ll
index dd18cfe5e61a..ef71b11dc571 100644
--- a/polly/test/ForwardOpTree/forward_into_region.ll
+++ b/polly/test/ForwardOpTree/forward_into_region.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Move instructions to region statements.
;
diff --git a/polly/test/ForwardOpTree/forward_into_region_redundant_use.ll b/polly/test/ForwardOpTree/forward_into_region_redundant_use.ll
index e5458c027880..1c585446ae63 100644
--- a/polly/test/ForwardOpTree/forward_into_region_redundant_use.ll
+++ b/polly/test/ForwardOpTree/forward_into_region_redundant_use.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-invariant-load-hoisting=true -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-invariant-load-hoisting=true '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
define void @foo(ptr %A, i32 %p, ptr %B) {
diff --git a/polly/test/ForwardOpTree/forward_load.ll b/polly/test/ForwardOpTree/forward_load.ll
index 86e3cb0203fa..0bba41833fb1 100644
--- a/polly/test/ForwardOpTree/forward_load.ll
+++ b/polly/test/ForwardOpTree/forward_load.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-optree>)" -disable-output < %s | FileCheck %s -match-full-lines
;
; Rematerialize a load.
diff --git a/polly/test/ForwardOpTree/forward_load_differentarray.ll b/polly/test/ForwardOpTree/forward_load_differentarray.ll
index 786277bdeb87..364bf3ef3713 100644
--- a/polly/test/ForwardOpTree/forward_load_differentarray.ll
+++ b/polly/test/ForwardOpTree/forward_load_differentarray.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; To forward %val, B[j] cannot be reused in bodyC because it is overwritten
; between. Verify that instead the alternative C[j] is used.
diff --git a/polly/test/ForwardOpTree/forward_load_double_write.ll b/polly/test/ForwardOpTree/forward_load_double_write.ll
index 1618722381fc..4c30c7f8da56 100644
--- a/polly/test/ForwardOpTree/forward_load_double_write.ll
+++ b/polly/test/ForwardOpTree/forward_load_double_write.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Rematerialize a load even in case two writes of identical values are in
; one scop statement.
diff --git a/polly/test/ForwardOpTree/forward_load_fromloop.ll b/polly/test/ForwardOpTree/forward_load_fromloop.ll
index 8f08a1356c38..1494e872a894 100644
--- a/polly/test/ForwardOpTree/forward_load_fromloop.ll
+++ b/polly/test/ForwardOpTree/forward_load_fromloop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Forward a the LoadInst %val into %bodyB. %val is executed multiple times,
; we must get the last loaded values.
diff --git a/polly/test/ForwardOpTree/forward_load_indirect.ll b/polly/test/ForwardOpTree/forward_load_indirect.ll
index f83af61e6741..51ce94d26727 100644
--- a/polly/test/ForwardOpTree/forward_load_indirect.ll
+++ b/polly/test/ForwardOpTree/forward_load_indirect.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Forward an operand tree consisting of a speculatable instruction (%add)
; and a load (%val).
diff --git a/polly/test/ForwardOpTree/forward_load_memset_after.ll b/polly/test/ForwardOpTree/forward_load_memset_after.ll
index 13797a44c862..bd2cad411ecc 100644
--- a/polly/test/ForwardOpTree/forward_load_memset_after.ll
+++ b/polly/test/ForwardOpTree/forward_load_memset_after.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Rematerialize a load in the presence of a non-store WRITE access.
;
diff --git a/polly/test/ForwardOpTree/forward_load_memset_before.ll b/polly/test/ForwardOpTree/forward_load_memset_before.ll
index 60b1e076b980..3e89dea37775 100644
--- a/polly/test/ForwardOpTree/forward_load_memset_before.ll
+++ b/polly/test/ForwardOpTree/forward_load_memset_before.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Rematerialize a load in the presence of a non-store WRITE access.
;
diff --git a/polly/test/ForwardOpTree/forward_load_tripleuse.ll b/polly/test/ForwardOpTree/forward_load_tripleuse.ll
index 1d0df2a22e87..7526a8313945 100644
--- a/polly/test/ForwardOpTree/forward_load_tripleuse.ll
+++ b/polly/test/ForwardOpTree/forward_load_tripleuse.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-optree -polly-codegen -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-optree>,polly-codegen' -disable-output < %s | FileCheck %s -match-full-lines
;
; %val1 is used three times: Twice by its own operand tree of %val2 and once
; more by the store in %bodyB.
diff --git a/polly/test/ForwardOpTree/forward_load_unrelatedunusual.ll b/polly/test/ForwardOpTree/forward_load_unrelatedunusual.ll
index b7bae5628986..daf289d8b0da 100644
--- a/polly/test/ForwardOpTree/forward_load_unrelatedunusual.ll
+++ b/polly/test/ForwardOpTree/forward_load_unrelatedunusual.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Rematerialize a load.
; The non-analyzable store to C[0] is unrelated and can be ignored.
diff --git a/polly/test/ForwardOpTree/forward_phi_load.ll b/polly/test/ForwardOpTree/forward_phi_load.ll
index 0b0bb209a3ef..1457aa96e2de 100644
--- a/polly/test/ForwardOpTree/forward_phi_load.ll
+++ b/polly/test/ForwardOpTree/forward_phi_load.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-optree-normalize-phi=true -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-optree-normalize-phi=true '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Rematerialize a load.
;
diff --git a/polly/test/ForwardOpTree/forward_readonly.ll b/polly/test/ForwardOpTree/forward_readonly.ll
index a29c5bff5d70..646121c4efef 100644
--- a/polly/test/ForwardOpTree/forward_readonly.ll
+++ b/polly/test/ForwardOpTree/forward_readonly.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-analyze-read-only-scalars=true -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines -check-prefixes=STATS,MODEL
-; RUN: opt %loadPolly -polly-analyze-read-only-scalars=false -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines -check-prefixes=STATS,NOMODEL
+; RUN: opt %loadNPMPolly -polly-analyze-read-only-scalars=true '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines -check-prefixes=STATS,MODEL
+; RUN: opt %loadNPMPolly -polly-analyze-read-only-scalars=false '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines -check-prefixes=STATS,NOMODEL
;
; Move %val to %bodyB, so %bodyA can be removed (by -polly-simplify)
;
diff --git a/polly/test/ForwardOpTree/forward_reusue.ll b/polly/test/ForwardOpTree/forward_reusue.ll
index ead8c7379803..d8ad31782ecb 100644
--- a/polly/test/ForwardOpTree/forward_reusue.ll
+++ b/polly/test/ForwardOpTree/forward_reusue.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Move operand tree without duplicating values used multiple times.
;
diff --git a/polly/test/ForwardOpTree/forward_store.ll b/polly/test/ForwardOpTree/forward_store.ll
index a6369eb303c1..17cb8b395eb3 100644
--- a/polly/test/ForwardOpTree/forward_store.ll
+++ b/polly/test/ForwardOpTree/forward_store.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Rematerialize a load.
;
diff --git a/polly/test/ForwardOpTree/forward_synthesizable_definloop.ll b/polly/test/ForwardOpTree/forward_synthesizable_definloop.ll
index f0da9320c43f..57b68180bb12 100644
--- a/polly/test/ForwardOpTree/forward_synthesizable_definloop.ll
+++ b/polly/test/ForwardOpTree/forward_synthesizable_definloop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Copy %val to bodyB, assuming the exit value of %i.
;
diff --git a/polly/test/ForwardOpTree/forward_synthesizable_indvar.ll b/polly/test/ForwardOpTree/forward_synthesizable_indvar.ll
index a38ab543e255..b4828e4c2c42 100644
--- a/polly/test/ForwardOpTree/forward_synthesizable_indvar.ll
+++ b/polly/test/ForwardOpTree/forward_synthesizable_indvar.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Test support for (synthesizable) inducation variables.
;
diff --git a/polly/test/ForwardOpTree/forward_synthesizable_useinloop.ll b/polly/test/ForwardOpTree/forward_synthesizable_useinloop.ll
index bb1760ae0ffb..3228bb60d2ca 100644
--- a/polly/test/ForwardOpTree/forward_synthesizable_useinloop.ll
+++ b/polly/test/ForwardOpTree/forward_synthesizable_useinloop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Synthesizable values defined outside of a loop can be used
; inside the loop.
diff --git a/polly/test/ForwardOpTree/forward_transitive.ll b/polly/test/ForwardOpTree/forward_transitive.ll
index 243889437149..aacf1358648f 100644
--- a/polly/test/ForwardOpTree/forward_transitive.ll
+++ b/polly/test/ForwardOpTree/forward_transitive.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Move %v and %val to %bodyB, so %bodyA can be removed (by -polly-simplify)
;
diff --git a/polly/test/ForwardOpTree/jacobi-1d.ll b/polly/test/ForwardOpTree/jacobi-1d.ll
index 05ccd998c1a2..c9c71a15a426 100644
--- a/polly/test/ForwardOpTree/jacobi-1d.ll
+++ b/polly/test/ForwardOpTree/jacobi-1d.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-optree-normalize-phi=true -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-optree-normalize-phi=true '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ForwardOpTree/noforward_from_region.ll b/polly/test/ForwardOpTree/noforward_from_region.ll
index 30150912f32e..bd5864c25f54 100644
--- a/polly/test/ForwardOpTree/noforward_from_region.ll
+++ b/polly/test/ForwardOpTree/noforward_from_region.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Ensure we do not move instructions from region statements in case the
; instruction to move loads from an array which is also written to from
diff --git a/polly/test/ForwardOpTree/noforward_load_conditional.ll b/polly/test/ForwardOpTree/noforward_load_conditional.ll
index eaa0fc52186b..5474e740de80 100644
--- a/polly/test/ForwardOpTree/noforward_load_conditional.ll
+++ b/polly/test/ForwardOpTree/noforward_load_conditional.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; B[j] is overwritten by at least one statement between the
; definition of %val and its use. Hence, it cannot be forwarded.
diff --git a/polly/test/ForwardOpTree/noforward_load_writebetween.ll b/polly/test/ForwardOpTree/noforward_load_writebetween.ll
index e2272c1c1f13..697c940be4fd 100644
--- a/polly/test/ForwardOpTree/noforward_load_writebetween.ll
+++ b/polly/test/ForwardOpTree/noforward_load_writebetween.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Cannot rematerialize %val from B[0] at bodyC because B[0] has been
; overwritten in bodyB.
diff --git a/polly/test/ForwardOpTree/noforward_outofquota.ll b/polly/test/ForwardOpTree/noforward_outofquota.ll
index 2ec965d71184..306bb8d7558d 100644
--- a/polly/test/ForwardOpTree/noforward_outofquota.ll
+++ b/polly/test/ForwardOpTree/noforward_outofquota.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-optree-max-ops=1 -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
-; RUN: opt %loadPolly -polly-optree-max-ops=1 -polly-optree -disable-output -stats < %s 2>&1 | FileCheck %s -match-full-lines -check-prefix=STATS
+; RUN: opt %loadNPMPolly -polly-optree-max-ops=1 '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-optree-max-ops=1 -passes=polly-optree -disable-output -stats < %s 2>&1 | FileCheck %s -match-full-lines -check-prefix=STATS
; REQUIRES: asserts
;
; for (int j = 0; j < n; j += 1) {
diff --git a/polly/test/ForwardOpTree/noforward_partial.ll b/polly/test/ForwardOpTree/noforward_partial.ll
index 127ac9ff5f14..edb5d34801cc 100644
--- a/polly/test/ForwardOpTree/noforward_partial.ll
+++ b/polly/test/ForwardOpTree/noforward_partial.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Not the entire operand tree can be forwarded,
; some scalar dependencies would remain.
diff --git a/polly/test/ForwardOpTree/noforward_phi.ll b/polly/test/ForwardOpTree/noforward_phi.ll
index 58d41a410d3b..755abad4336e 100644
--- a/polly/test/ForwardOpTree/noforward_phi.ll
+++ b/polly/test/ForwardOpTree/noforward_phi.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Do not move PHI nodes.
;
diff --git a/polly/test/ForwardOpTree/noforward_selfrefphi.ll b/polly/test/ForwardOpTree/noforward_selfrefphi.ll
index b2d4dc51c978..be7e82f72633 100644
--- a/polly/test/ForwardOpTree/noforward_selfrefphi.ll
+++ b/polly/test/ForwardOpTree/noforward_selfrefphi.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-optree-normalize-phi=true -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-optree-normalize-phi=true '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Contains a self-referencing PHINode that would require a
; transitive closure to handle.
diff --git a/polly/test/ForwardOpTree/noforward_sideffects.ll b/polly/test/ForwardOpTree/noforward_sideffects.ll
index a5633769f670..c01b72a1c142 100644
--- a/polly/test/ForwardOpTree/noforward_sideffects.ll
+++ b/polly/test/ForwardOpTree/noforward_sideffects.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Do not forward instructions with side-effects (here: function call).
;
diff --git a/polly/test/ForwardOpTree/noforward_synthesizable_unknownit.ll b/polly/test/ForwardOpTree/noforward_synthesizable_unknownit.ll
index f589fde6e415..776d848072a2 100644
--- a/polly/test/ForwardOpTree/noforward_synthesizable_unknownit.ll
+++ b/polly/test/ForwardOpTree/noforward_synthesizable_unknownit.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Do not try to forward %i.trunc, it is not synthesizable in %body.
;
diff --git a/polly/test/ForwardOpTree/out-of-quota1.ll b/polly/test/ForwardOpTree/out-of-quota1.ll
index 7afdb8e60244..ee3e32698dd0 100644
--- a/polly/test/ForwardOpTree/out-of-quota1.ll
+++ b/polly/test/ForwardOpTree/out-of-quota1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-optree -disable-output %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-optree>' -disable-output %s | FileCheck %s
; This used to loop infinitely because of UINT_MAX returned by ISL on out-of-quota.
diff --git a/polly/test/IstAstInfo/alias_checks_with_empty_context.ll b/polly/test/IstAstInfo/alias_checks_with_empty_context.ll
index 9b95cd5b4bbd..81c29536010b 100644
--- a/polly/test/IstAstInfo/alias_checks_with_empty_context.ll
+++ b/polly/test/IstAstInfo/alias_checks_with_empty_context.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s \
; RUN: | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/IstAstInfo/alias_simple_1.ll b/polly/test/IstAstInfo/alias_simple_1.ll
index 83d470c2d19b..904f55dc32ce 100644
--- a/polly/test/IstAstInfo/alias_simple_1.ll
+++ b/polly/test/IstAstInfo/alias_simple_1.ll
@@ -1,8 +1,8 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -disable-output < %s | FileCheck %s --check-prefix=NOAA
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=BASI
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -tbaa -disable-output < %s | FileCheck %s --check-prefix=TBAA
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -scev-aa -disable-output < %s | FileCheck %s --check-prefix=SCEV
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -globals-aa -disable-output < %s | FileCheck %s --check-prefix=GLOB
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline= -disable-output < %s | FileCheck %s --check-prefix=NOAA
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=basic-aa -disable-output < %s | FileCheck %s --check-prefix=BASI
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=tbaa -disable-output < %s | FileCheck %s --check-prefix=TBAA
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=scev-aa -disable-output < %s | FileCheck %s --check-prefix=SCEV
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=globals-aa -disable-output < %s | FileCheck %s --check-prefix=GLOB
;
; int A[1024];
;
diff --git a/polly/test/IstAstInfo/alias_simple_2.ll b/polly/test/IstAstInfo/alias_simple_2.ll
index bbf528f93b47..5fae579995b2 100644
--- a/polly/test/IstAstInfo/alias_simple_2.ll
+++ b/polly/test/IstAstInfo/alias_simple_2.ll
@@ -1,9 +1,9 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -disable-output < %s | FileCheck %s --check-prefix=NOAA
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=BASI
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -tbaa -disable-output < %s | FileCheck %s --check-prefix=TBAA
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -scev-aa -disable-output < %s | FileCheck %s --check-prefix=SCEV
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -globals-aa -disable-output < %s | FileCheck %s --check-prefix=GLOB
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -globals-aa -polly-allow-nonaffine -disable-output < %s | FileCheck %s --check-prefix=NONAFFINE
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline= -disable-output < %s | FileCheck %s --check-prefix=NOAA
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=basic-aa -disable-output < %s | FileCheck %s --check-prefix=BASI
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=tbaa -disable-output < %s | FileCheck %s --check-prefix=TBAA
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=scev-aa -disable-output < %s | FileCheck %s --check-prefix=SCEV
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=globals-aa -disable-output < %s | FileCheck %s --check-prefix=GLOB
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=globals-aa -polly-allow-nonaffine -disable-output < %s | FileCheck %s --check-prefix=NONAFFINE
;
; int A[1024], B[1024];
;
diff --git a/polly/test/IstAstInfo/alias_simple_3.ll b/polly/test/IstAstInfo/alias_simple_3.ll
index 9067521323ab..8599c2993474 100644
--- a/polly/test/IstAstInfo/alias_simple_3.ll
+++ b/polly/test/IstAstInfo/alias_simple_3.ll
@@ -1,8 +1,8 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -disable-output < %s | FileCheck %s --check-prefix=NOAA
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=BASI
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -tbaa -disable-output < %s | FileCheck %s --check-prefix=TBAA
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -scev-aa -disable-output < %s | FileCheck %s --check-prefix=SCEV
-; RUN: opt %loadPolly -polly-print-ast -disable-basic-aa -globals-aa -disable-output < %s | FileCheck %s --check-prefix=GLOB
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline= -disable-output < %s | FileCheck %s --check-prefix=NOAA
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=basic-aa -disable-output < %s | FileCheck %s --check-prefix=BASI
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=tbaa -disable-output < %s | FileCheck %s --check-prefix=TBAA
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=scev-aa -disable-output < %s | FileCheck %s --check-prefix=SCEV
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=globals-aa -disable-output < %s | FileCheck %s --check-prefix=GLOB
;
; int A[1024];
; float B[1024];
diff --git a/polly/test/IstAstInfo/aliasing_arrays_with_identical_base.ll b/polly/test/IstAstInfo/aliasing_arrays_with_identical_base.ll
index 0cabd20168ba..dc21dc1f96a4 100644
--- a/polly/test/IstAstInfo/aliasing_arrays_with_identical_base.ll
+++ b/polly/test/IstAstInfo/aliasing_arrays_with_identical_base.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s \
; RUN: -polly-invariant-load-hoisting \
; RUN: | FileCheck %s
diff --git a/polly/test/IstAstInfo/aliasing_multiple_alias_groups.ll b/polly/test/IstAstInfo/aliasing_multiple_alias_groups.ll
index b824c211fd31..8d4adfa405f0 100644
--- a/polly/test/IstAstInfo/aliasing_multiple_alias_groups.ll
+++ b/polly/test/IstAstInfo/aliasing_multiple_alias_groups.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=NOAA
-; RUN: opt %loadPolly -polly-print-ast -tbaa -disable-output < %s | FileCheck %s --check-prefix=TBAA
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline= -disable-output < %s | FileCheck %s --check-prefix=NOAA
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -aa-pipeline=tbaa -disable-output < %s | FileCheck %s --check-prefix=TBAA
;
; void jd(int *Int0, int *Int1, float *Float0, float *Float1) {
; for (int i = 0; i < 1024; i++) {
diff --git a/polly/test/IstAstInfo/aliasing_parametric_simple_1.ll b/polly/test/IstAstInfo/aliasing_parametric_simple_1.ll
index e0c3255dd766..be37b27b6e37 100644
--- a/polly/test/IstAstInfo/aliasing_parametric_simple_1.ll
+++ b/polly/test/IstAstInfo/aliasing_parametric_simple_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output %s | FileCheck %s
;
; void jd(int *A, int *B, int c) {
; for (int i = 0; i < 1024; i++)
diff --git a/polly/test/IstAstInfo/aliasing_parametric_simple_2.ll b/polly/test/IstAstInfo/aliasing_parametric_simple_2.ll
index 74bad6c75784..15550583340d 100644
--- a/polly/test/IstAstInfo/aliasing_parametric_simple_2.ll
+++ b/polly/test/IstAstInfo/aliasing_parametric_simple_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
;
; void jd(int *A, int *B, int c) {
; for (int i = 0; i < 1024; i++)
diff --git a/polly/test/IstAstInfo/dependence_distance_minimal.ll b/polly/test/IstAstInfo/dependence_distance_minimal.ll
index c6b1d156e55d..d69cc3f9fc3f 100644
--- a/polly/test/IstAstInfo/dependence_distance_minimal.ll
+++ b/polly/test/IstAstInfo/dependence_distance_minimal.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; The minimal dependence distance of the innermost loop should be 1 instead of 250.
; CHECK: #pragma minimal dependence distance: 1
diff --git a/polly/test/IstAstInfo/domain_bounded_only_with_context.ll b/polly/test/IstAstInfo/domain_bounded_only_with_context.ll
index 32cebd7a3a8b..e2cf0bd9c0df 100644
--- a/polly/test/IstAstInfo/domain_bounded_only_with_context.ll
+++ b/polly/test/IstAstInfo/domain_bounded_only_with_context.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
; CHECK: {
; CHECK-NEXT: if (p <= -1 || p >= 1)
diff --git a/polly/test/IstAstInfo/non_affine_access.ll b/polly/test/IstAstInfo/non_affine_access.ll
index d8757b2e21cf..98e8d2db959f 100644
--- a/polly/test/IstAstInfo/non_affine_access.ll
+++ b/polly/test/IstAstInfo/non_affine_access.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-ast-print-accesses -polly-allow-nonaffine -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-ast-print-accesses -polly-allow-nonaffine -disable-output < %s | FileCheck %s
;
; void non_affine_access(float A[]) {
; for (long i = 0; i < 1024; i++)
diff --git a/polly/test/IstAstInfo/reduction_clauses_onedimensional_access.ll b/polly/test/IstAstInfo/reduction_clauses_onedimensional_access.ll
index 8d52e345a76d..c20a7d6db13c 100644
--- a/polly/test/IstAstInfo/reduction_clauses_onedimensional_access.ll
+++ b/polly/test/IstAstInfo/reduction_clauses_onedimensional_access.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; CHECK: #pragma known-parallel reduction (^ : MemRef_sum)
; void f(int N, int M, int *sum) {
diff --git a/polly/test/IstAstInfo/reduction_dependences_equal_non_reduction_dependences.ll b/polly/test/IstAstInfo/reduction_dependences_equal_non_reduction_dependences.ll
index 9c6eea6aaa1e..e6092f0b068f 100644
--- a/polly/test/IstAstInfo/reduction_dependences_equal_non_reduction_dependences.ll
+++ b/polly/test/IstAstInfo/reduction_dependences_equal_non_reduction_dependences.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; This loopnest contains a reduction which imposes the same dependences as the
; accesses to the array A. We need to ensure we do __not__ parallelize anything
diff --git a/polly/test/IstAstInfo/reduction_different_reduction_clauses.ll b/polly/test/IstAstInfo/reduction_different_reduction_clauses.ll
index 5104f716d810..14de70f9357c 100644
--- a/polly/test/IstAstInfo/reduction_different_reduction_clauses.ll
+++ b/polly/test/IstAstInfo/reduction_different_reduction_clauses.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; CHECK: #pragma simd reduction (+ : MemRef_sum{{[1,2]}}, MemRef_sum{{[1,2]}}) reduction (* : MemRef_prod) reduction (| : MemRef_or) reduction (& : MemRef_and)
; CHECK: #pragma known-parallel reduction (+ : MemRef_sum{{[1,2]}}, MemRef_sum{{[1,2]}}) reduction (* : MemRef_prod) reduction (| : MemRef_or) reduction (& : MemRef_and)
diff --git a/polly/test/IstAstInfo/reduction_modulo_and_loop_reversal_schedule.ll b/polly/test/IstAstInfo/reduction_modulo_and_loop_reversal_schedule.ll
index 8a42cf8bd165..15fca884c2b6 100644
--- a/polly/test/IstAstInfo/reduction_modulo_and_loop_reversal_schedule.ll
+++ b/polly/test/IstAstInfo/reduction_modulo_and_loop_reversal_schedule.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; CHECK: #pragma known-parallel reduction (+ : MemRef_A)
; CHECK-NEXT: for (int c0 = 0; c0 <= 2; c0 += 1) {
diff --git a/polly/test/IstAstInfo/reduction_modulo_and_loop_reversal_schedule_2.ll b/polly/test/IstAstInfo/reduction_modulo_and_loop_reversal_schedule_2.ll
index 8f5efd165546..44e9aa4d1e56 100644
--- a/polly/test/IstAstInfo/reduction_modulo_and_loop_reversal_schedule_2.ll
+++ b/polly/test/IstAstInfo/reduction_modulo_and_loop_reversal_schedule_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; CHECK: #pragma known-parallel reduction
; CHECK: for (int c0 = 0; c0 <= 2; c0 += 1) {
diff --git a/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions.ll b/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions.ll
index a711a36a367f..266753555cab 100644
--- a/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions.ll
+++ b/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; CHECK: #pragma known-parallel
; CHECK: for (int c0 = 0; c0 <= 1; c0 += 1)
diff --git a/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_2.ll b/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_2.ll
index 485d6965b6d3..46b2559c6e0b 100644
--- a/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_2.ll
+++ b/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; Verify that the outer dimension doesnt't carry reduction dependences
;
diff --git a/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_3.ll b/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_3.ll
index 375fabbf6a8b..6f40ee90fef5 100644
--- a/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_3.ll
+++ b/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; Verify that the outer dimension doesnt't carry reduction dependences
;
diff --git a/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_4.ll b/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_4.ll
index 584c076dcff4..f82b9569a88b 100644
--- a/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_4.ll
+++ b/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_4.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; Verify that the outer dimension doesnt't carry reduction dependences
;
diff --git a/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_5.ll b/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_5.ll
index eaa3444a04d7..b889db4819cd 100644
--- a/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_5.ll
+++ b/polly/test/IstAstInfo/reduction_modulo_schedule_multiple_dimensions_5.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; Verify that only the outer dimension needs privatization
;
diff --git a/polly/test/IstAstInfo/reduction_multiple_dimensions.ll b/polly/test/IstAstInfo/reduction_multiple_dimensions.ll
index 9618ec872c38..2a8fd7a4f670 100644
--- a/polly/test/IstAstInfo/reduction_multiple_dimensions.ll
+++ b/polly/test/IstAstInfo/reduction_multiple_dimensions.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; CHECK-NOT:#pragma known-parallel reduction
; CHECK: #pragma known-parallel
diff --git a/polly/test/IstAstInfo/reduction_multiple_dimensions_2.ll b/polly/test/IstAstInfo/reduction_multiple_dimensions_2.ll
index af317570eb37..25f2fa597e34 100644
--- a/polly/test/IstAstInfo/reduction_multiple_dimensions_2.ll
+++ b/polly/test/IstAstInfo/reduction_multiple_dimensions_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; CHECK-NOT:#pragma known-parallel reduction
; CHECK: #pragma known-parallel
diff --git a/polly/test/IstAstInfo/reduction_multiple_dimensions_3.ll b/polly/test/IstAstInfo/reduction_multiple_dimensions_3.ll
index 1f7191433bf8..0d6be9a9da9b 100644
--- a/polly/test/IstAstInfo/reduction_multiple_dimensions_3.ll
+++ b/polly/test/IstAstInfo/reduction_multiple_dimensions_3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; CHECK-NOT:#pragma known-parallel reduction
; CHECK: #pragma known-parallel
diff --git a/polly/test/IstAstInfo/reduction_multiple_dimensions_4.ll b/polly/test/IstAstInfo/reduction_multiple_dimensions_4.ll
index 40bae5e9ac6c..8b537513cc8d 100644
--- a/polly/test/IstAstInfo/reduction_multiple_dimensions_4.ll
+++ b/polly/test/IstAstInfo/reduction_multiple_dimensions_4.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s | FileCheck %s
;
; CHECK-NOT:#pragma known-parallel reduction
; CHECK: #pragma known-parallel
diff --git a/polly/test/IstAstInfo/run-time-condition.ll b/polly/test/IstAstInfo/run-time-condition.ll
index ccc9c7cfd321..44d3534f651c 100644
--- a/polly/test/IstAstInfo/run-time-condition.ll
+++ b/polly/test/IstAstInfo/run-time-condition.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
; for (i = 0; i < 1024; i++)
; A[i] = B[i];
diff --git a/polly/test/IstAstInfo/runtime_context_with_error_blocks.ll b/polly/test/IstAstInfo/runtime_context_with_error_blocks.ll
index 2853e0acf9b8..8c3f230cb413 100644
--- a/polly/test/IstAstInfo/runtime_context_with_error_blocks.ll
+++ b/polly/test/IstAstInfo/runtime_context_with_error_blocks.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
;
; Verify we do not simplify the runtime check to "true" due to the domain
; constraints as the test contains an error block that influenced the domains
diff --git a/polly/test/IstAstInfo/simple-run-time-condition.ll b/polly/test/IstAstInfo/simple-run-time-condition.ll
index 5fb99f0676b7..488cd180b899 100644
--- a/polly/test/IstAstInfo/simple-run-time-condition.ll
+++ b/polly/test/IstAstInfo/simple-run-time-condition.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-precise-inbounds -polly-precise-fold-accesses -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-precise-inbounds -polly-precise-fold-accesses -disable-output < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/polly/test/IstAstInfo/single_loop_strip_mine.ll b/polly/test/IstAstInfo/single_loop_strip_mine.ll
index 1c627f817b0b..afe6179188c0 100644
--- a/polly/test/IstAstInfo/single_loop_strip_mine.ll
+++ b/polly/test/IstAstInfo/single_loop_strip_mine.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -basic-aa -polly-import-jscop -polly-ast-print-accesses -polly-ast-detect-parallel -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=CHECK-VECTOR
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-ast-print-accesses -polly-ast-detect-parallel '-passes=polly-import-jscop,print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=CHECK-VECTOR
; for (i = 0; i < 1024; i++)
; A[i] = B[i];
diff --git a/polly/test/IstAstInfo/single_loop_uint_max_iterations.ll b/polly/test/IstAstInfo/single_loop_uint_max_iterations.ll
index f1cd5dae11ce..f614f90fc3fc 100644
--- a/polly/test/IstAstInfo/single_loop_uint_max_iterations.ll
+++ b/polly/test/IstAstInfo/single_loop_uint_max_iterations.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
; XFAIL: *
;#include "limits.h"
diff --git a/polly/test/IstAstInfo/single_loop_ull_max_iterations.ll b/polly/test/IstAstInfo/single_loop_ull_max_iterations.ll
index d421e221240a..e91ea1327869 100644
--- a/polly/test/IstAstInfo/single_loop_ull_max_iterations.ll
+++ b/polly/test/IstAstInfo/single_loop_ull_max_iterations.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s | FileCheck %s
; XFAIL: *
;#include "limits.h"
diff --git a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Bad-relation.ll b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Bad-relation.ll
index d4a1a6222518..49a962592bb9 100644
--- a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Bad-relation.ll
+++ b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Bad-relation.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: expecting other token
;
diff --git a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-No-accesses-key.ll b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-No-accesses-key.ll
index 43f9d3eda049..749b962b260f 100644
--- a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-No-accesses-key.ll
+++ b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-No-accesses-key.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: Statement from JScop file has no key name 'accesses' for index 1.
;
diff --git a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Not-enough-MemAcc.ll b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Not-enough-MemAcc.ll
index 24ad03741216..1d97e3ebca62 100644
--- a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Not-enough-MemAcc.ll
+++ b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Not-enough-MemAcc.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: The number of memory accesses in the JSop file and the number of memory accesses differ for index 0.
;
diff --git a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Not-enough-statements.ll b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Not-enough-statements.ll
index 1060926e7fac..f4b739398f9f 100644
--- a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Not-enough-statements.ll
+++ b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Not-enough-statements.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: The number of indices and the number of statements differ.
;
diff --git a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Relation-mispelled.ll b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Relation-mispelled.ll
index 07975976c38b..1f5cda3518a2 100644
--- a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Relation-mispelled.ll
+++ b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Relation-mispelled.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: Memory access number 0 has no key name 'relation' for statement number 1.
;
diff --git a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Statements-mispelled.ll b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Statements-mispelled.ll
index 9f7259633811..0c750849b51e 100644
--- a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Statements-mispelled.ll
+++ b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Statements-mispelled.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: JScop file has no key name 'statements'.
;
diff --git a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Undeclared-ScopArrayInfo.ll b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Undeclared-ScopArrayInfo.ll
index df7eb42da85f..d8c9c3f4ab2e 100644
--- a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Undeclared-ScopArrayInfo.ll
+++ b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Undeclared-ScopArrayInfo.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: JScop file contains access function with undeclared ScopArrayInfo
;
diff --git a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Wrong-number-dimensions.ll b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Wrong-number-dimensions.ll
index 61c1173db2e7..f8d7cb8c1453 100644
--- a/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Wrong-number-dimensions.ll
+++ b/polly/test/JSONExporter/ImportAccesses/ImportAccesses-Wrong-number-dimensions.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: JScop file changes the number of parameter dimensions.
;
diff --git a/polly/test/JSONExporter/ImportArrays/ImportArrays-Mispelled-type.ll b/polly/test/JSONExporter/ImportArrays/ImportArrays-Mispelled-type.ll
index a14ae5c4d1bc..6e13a5e413d7 100644
--- a/polly/test/JSONExporter/ImportArrays/ImportArrays-Mispelled-type.ll
+++ b/polly/test/JSONExporter/ImportArrays/ImportArrays-Mispelled-type.ll
@@ -1,4 +1,4 @@
- ; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -disable-output < %s 2>&1 | FileCheck %s
+ ; RUN: not --crash opt %loadNPMPolly -passes=polly-import-jscop -polly-import-jscop-postfix=transformed -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Array has not a valid type.
;
diff --git a/polly/test/JSONExporter/ImportArrays/ImportArrays-Negative-size.ll b/polly/test/JSONExporter/ImportArrays/ImportArrays-Negative-size.ll
index 2a03197f1c1b..7f6578776e0b 100644
--- a/polly/test/JSONExporter/ImportArrays/ImportArrays-Negative-size.ll
+++ b/polly/test/JSONExporter/ImportArrays/ImportArrays-Negative-size.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop -polly-import-jscop-postfix=transformed -disable-output < %s 2>&1 | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly -polly-stmt-granularity=bb -passes=polly-import-jscop -polly-import-jscop-postfix=transformed -disable-output < %s 2>&1 | FileCheck %s
;
; #define Ni 1056
; #define Nj 1056
diff --git a/polly/test/JSONExporter/ImportArrays/ImportArrays-No-name.ll b/polly/test/JSONExporter/ImportArrays/ImportArrays-No-name.ll
index 45bb3495de08..e698bdc488c2 100644
--- a/polly/test/JSONExporter/ImportArrays/ImportArrays-No-name.ll
+++ b/polly/test/JSONExporter/ImportArrays/ImportArrays-No-name.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -disable-output < %s 2>&1 | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly -passes=polly-import-jscop -polly-import-jscop-postfix=transformed -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Array has no key 'name'.
;
diff --git a/polly/test/JSONExporter/ImportArrays/ImportArrays-No-sizes-key.ll b/polly/test/JSONExporter/ImportArrays/ImportArrays-No-sizes-key.ll
index 5bbb974346ba..f130b6556e3e 100644
--- a/polly/test/JSONExporter/ImportArrays/ImportArrays-No-sizes-key.ll
+++ b/polly/test/JSONExporter/ImportArrays/ImportArrays-No-sizes-key.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -disable-output < %s 2>&1 | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly -passes=polly-import-jscop -polly-import-jscop-postfix=transformed -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Array has no key 'sizes'.
;
diff --git a/polly/test/JSONExporter/ImportArrays/ImportArrays-No-type-key.ll b/polly/test/JSONExporter/ImportArrays/ImportArrays-No-type-key.ll
index af013992fca0..68d2e50c6730 100644
--- a/polly/test/JSONExporter/ImportArrays/ImportArrays-No-type-key.ll
+++ b/polly/test/JSONExporter/ImportArrays/ImportArrays-No-type-key.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -disable-output < %s 2>&1 | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly -passes=polly-import-jscop -polly-import-jscop-postfix=transformed -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Array has no key 'type'.
;
diff --git a/polly/test/JSONExporter/ImportContext/ImportContext-Context-mispelled.ll b/polly/test/JSONExporter/ImportContext/ImportContext-Context-mispelled.ll
index 2490e44ec347..94c77dc2a013 100644
--- a/polly/test/JSONExporter/ImportContext/ImportContext-Context-mispelled.ll
+++ b/polly/test/JSONExporter/ImportContext/ImportContext-Context-mispelled.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: JScop file has no key named 'context'.
;
diff --git a/polly/test/JSONExporter/ImportContext/ImportContext-Not-parameter-set.ll b/polly/test/JSONExporter/ImportContext/ImportContext-Not-parameter-set.ll
index 66ce6a6ed922..c20d5c02d662 100644
--- a/polly/test/JSONExporter/ImportContext/ImportContext-Not-parameter-set.ll
+++ b/polly/test/JSONExporter/ImportContext/ImportContext-Not-parameter-set.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: The isl_set is not a parameter set.
;
diff --git a/polly/test/JSONExporter/ImportContext/ImportContext-Unvalid-Context.ll b/polly/test/JSONExporter/ImportContext/ImportContext-Unvalid-Context.ll
index 7bcc54dde52e..92f4d61212e9 100644
--- a/polly/test/JSONExporter/ImportContext/ImportContext-Unvalid-Context.ll
+++ b/polly/test/JSONExporter/ImportContext/ImportContext-Unvalid-Context.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: unexpected isl_token
;
diff --git a/polly/test/JSONExporter/ImportContext/ImportContext-Wrong-dimension.ll b/polly/test/JSONExporter/ImportContext/ImportContext-Wrong-dimension.ll
index 65cdcbdcdef6..89668d8d573b 100644
--- a/polly/test/JSONExporter/ImportContext/ImportContext-Wrong-dimension.ll
+++ b/polly/test/JSONExporter/ImportContext/ImportContext-Wrong-dimension.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: Imported context has the wrong number of parameters : Found 2 Expected 1
;
diff --git a/polly/test/JSONExporter/ImportSchedule/ImportSchedule-No-schedule-key.ll b/polly/test/JSONExporter/ImportSchedule/ImportSchedule-No-schedule-key.ll
index b52db0876cc5..efe15c14ce90 100644
--- a/polly/test/JSONExporter/ImportSchedule/ImportSchedule-No-schedule-key.ll
+++ b/polly/test/JSONExporter/ImportSchedule/ImportSchedule-No-schedule-key.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: Statement 0 has no 'schedule' key.
;
diff --git a/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Schedule-not-valid.ll b/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Schedule-not-valid.ll
index 5ce3ad267bb0..db516f6d7d33 100644
--- a/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Schedule-not-valid.ll
+++ b/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Schedule-not-valid.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: expecting other token
;
diff --git a/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Statements-mispelled.ll b/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Statements-mispelled.ll
index 4329653899b2..b93c984d7d9d 100644
--- a/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Statements-mispelled.ll
+++ b/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Statements-mispelled.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: JScop file has no key name 'statements'.
;
diff --git a/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Wrong-number-statements.ll b/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Wrong-number-statements.ll
index f66fc6c1e5d7..3fa14c64cd63 100644
--- a/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Wrong-number-statements.ll
+++ b/polly/test/JSONExporter/ImportSchedule/ImportSchedule-Wrong-number-statements.ll
@@ -1,4 +1,4 @@
-; RUN: not --crash opt %loadPolly -polly-import-jscop -polly-ast -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
+; RUN: not --crash opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-ast>' -polly-ast-detect-parallel -disable-output < %s 2>&1 >/dev/null | FileCheck %s
;
; CHECK: The number of indices and the number of statements differ.
;
diff --git a/polly/test/MaximalStaticExpansion/load_after_store_same_statement.ll b/polly/test/MaximalStaticExpansion/load_after_store_same_statement.ll
index 791210f7710d..1d81ff7ef2dc 100644
--- a/polly/test/MaximalStaticExpansion/load_after_store_same_statement.ll
+++ b/polly/test/MaximalStaticExpansion/load_after_store_same_statement.ll
@@ -1,6 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-mse -polly-print-scops -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-mse>)" -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-mse -polly-print-scops -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1| FileCheck %s --check-prefix=MSE
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-mse>)" -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1 | FileCheck %s --check-prefix=MSE
;
; Verify that the expansion of an array with load after store in a same statement is not done.
diff --git a/polly/test/MaximalStaticExpansion/read_from_original.ll b/polly/test/MaximalStaticExpansion/read_from_original.ll
index 59f9379516c7..57017381c661 100644
--- a/polly/test/MaximalStaticExpansion/read_from_original.ll
+++ b/polly/test/MaximalStaticExpansion/read_from_original.ll
@@ -1,6 +1,4 @@
-; RUN: opt %loadPolly -polly-mse -polly-print-scops -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-mse>)" -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-mse -polly-print-scops -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1| FileCheck %s --check-prefix=MSE
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-mse>)" -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1 | FileCheck %s --check-prefix=MSE
;
; Verify that Polly detects problems and does not expand the array
diff --git a/polly/test/MaximalStaticExpansion/too_many_writes.ll b/polly/test/MaximalStaticExpansion/too_many_writes.ll
index 50a66cd11d0a..7e33de17a174 100644
--- a/polly/test/MaximalStaticExpansion/too_many_writes.ll
+++ b/polly/test/MaximalStaticExpansion/too_many_writes.ll
@@ -1,6 +1,4 @@
-; RUN: opt %loadPolly -polly-mse -polly-print-scops -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-mse>)" -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-mse -polly-print-scops -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1 | FileCheck %s --check-prefix=MSE
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-mse>)" -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1 | FileCheck %s --check-prefix=MSE
;
; Verify that Polly detects problems and does not expand the array
diff --git a/polly/test/MaximalStaticExpansion/working_deps_between_inners.ll b/polly/test/MaximalStaticExpansion/working_deps_between_inners.ll
index 8e2707cfee64..355fc02600d5 100644
--- a/polly/test/MaximalStaticExpansion/working_deps_between_inners.ll
+++ b/polly/test/MaximalStaticExpansion/working_deps_between_inners.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-mse -polly-print-scops -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-mse>)" -disable-output < %s | FileCheck %s
;
; Verify that the accesses are correctly expanded for MemoryKind::Array
diff --git a/polly/test/MaximalStaticExpansion/working_deps_between_inners_phi.ll b/polly/test/MaximalStaticExpansion/working_deps_between_inners_phi.ll
index 2bf49b89db05..930539547cc9 100644
--- a/polly/test/MaximalStaticExpansion/working_deps_between_inners_phi.ll
+++ b/polly/test/MaximalStaticExpansion/working_deps_between_inners_phi.ll
@@ -1,6 +1,4 @@
-; RUN: opt %loadPolly -polly-mse -polly-print-scops -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-mse>)" -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-mse -polly-print-scops -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1 | FileCheck %s --check-prefix=MSE
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-mse>)" -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1 | FileCheck %s --check-prefix=MSE
;
; Verify that the accesses are correctly expanded for MemoryKind::Array and MemoryKind::PHI.
diff --git a/polly/test/MaximalStaticExpansion/working_expansion.ll b/polly/test/MaximalStaticExpansion/working_expansion.ll
index bb5b2360143f..a055e50225e9 100644
--- a/polly/test/MaximalStaticExpansion/working_expansion.ll
+++ b/polly/test/MaximalStaticExpansion/working_expansion.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-mse -polly-print-scops -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-mse>)" -disable-output < %s | FileCheck %s
;
; Verify that the accesses are correctly expanded for MemoryKind::Array
diff --git a/polly/test/MaximalStaticExpansion/working_expansion_multiple_dependences_per_statement.ll b/polly/test/MaximalStaticExpansion/working_expansion_multiple_dependences_per_statement.ll
index 89ff7890fc7e..77338c9aac20 100644
--- a/polly/test/MaximalStaticExpansion/working_expansion_multiple_dependences_per_statement.ll
+++ b/polly/test/MaximalStaticExpansion/working_expansion_multiple_dependences_per_statement.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-mse -polly-print-scops -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-mse>)" -disable-output < %s | FileCheck %s
;
; Verify that the accesses are correctly expanded
diff --git a/polly/test/MaximalStaticExpansion/working_expansion_multiple_instruction_per_statement.ll b/polly/test/MaximalStaticExpansion/working_expansion_multiple_instruction_per_statement.ll
index 7ffd39f0f534..9cfa5536072b 100644
--- a/polly/test/MaximalStaticExpansion/working_expansion_multiple_instruction_per_statement.ll
+++ b/polly/test/MaximalStaticExpansion/working_expansion_multiple_instruction_per_statement.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-mse -polly-print-scops -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-mse>)" -disable-output < %s | FileCheck %s
;
; Verify that the accesses are correctly expanded
diff --git a/polly/test/MaximalStaticExpansion/working_phi_expansion.ll b/polly/test/MaximalStaticExpansion/working_phi_expansion.ll
index 43919c61b045..63e4d4804627 100644
--- a/polly/test/MaximalStaticExpansion/working_phi_expansion.ll
+++ b/polly/test/MaximalStaticExpansion/working_phi_expansion.ll
@@ -1,6 +1,4 @@
-; RUN: opt %loadPolly -polly-mse -polly-print-scops -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-mse>)" -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-mse -polly-print-scops -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1 | FileCheck %s --check-prefix=MSE
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-mse>)" -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1 | FileCheck %s --check-prefix=MSE
;
; Verify that the accesses are correctly expanded for MemoryKind::PHI
diff --git a/polly/test/MaximalStaticExpansion/working_phi_two_scalars.ll b/polly/test/MaximalStaticExpansion/working_phi_two_scalars.ll
index a581a389e742..87bd57abab8d 100644
--- a/polly/test/MaximalStaticExpansion/working_phi_two_scalars.ll
+++ b/polly/test/MaximalStaticExpansion/working_phi_two_scalars.ll
@@ -1,6 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-mse -polly-print-scops -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-mse>)" -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-mse -polly-print-scops -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1 | FileCheck %s --check-prefix=MSE
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-mse>)" -pass-remarks-analysis="polly-mse" -disable-output < %s 2>&1 | FileCheck %s --check-prefix=MSE
;
; Verify that the accesses are correctly expanded for MemoryKind::PHI
diff --git a/polly/test/MaximalStaticExpansion/working_value_expansion.ll b/polly/test/MaximalStaticExpansion/working_value_expansion.ll
index d54eff9e03ec..cc28a78c3867 100644
--- a/polly/test/MaximalStaticExpansion/working_value_expansion.ll
+++ b/polly/test/MaximalStaticExpansion/working_value_expansion.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-mse -polly-print-scops -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-mse>)" -disable-output < %s | FileCheck %s
;
; Verify that the accesses are correctly expanded for MemoryKind::Value
diff --git a/polly/test/PruneUnprofitable/prune_only_scalardeps.ll b/polly/test/PruneUnprofitable/prune_only_scalardeps.ll
index 31db5560c051..9cc2aecf002d 100644
--- a/polly/test/PruneUnprofitable/prune_only_scalardeps.ll
+++ b/polly/test/PruneUnprofitable/prune_only_scalardeps.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-process-unprofitable=false -polly-unprofitable-scalar-accs=false -polly-prune-unprofitable -disable-output -stats < %s 2>&1 | FileCheck -match-full-lines %s
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-process-unprofitable=false -polly-unprofitable-scalar-accs=false "-passes=scop(polly-prune-unprofitable)" -disable-output -stats < %s 2>&1 | FileCheck -match-full-lines %s
; REQUIRES: asserts
;
diff --git a/polly/test/ScheduleOptimizer/2012-03-16-Empty-Domain.ll b/polly/test/ScheduleOptimizer/2012-03-16-Empty-Domain.ll
index 5acc35343ac3..38facb1688c4 100644
--- a/polly/test/ScheduleOptimizer/2012-03-16-Empty-Domain.ll
+++ b/polly/test/ScheduleOptimizer/2012-03-16-Empty-Domain.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -S < %s
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -S < %s
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32"
define void @sdbout_label() nounwind {
diff --git a/polly/test/ScheduleOptimizer/2013-04-11-Empty-Domain-two.ll b/polly/test/ScheduleOptimizer/2013-04-11-Empty-Domain-two.ll
index 3f4237b330b2..835986049899 100644
--- a/polly/test/ScheduleOptimizer/2013-04-11-Empty-Domain-two.ll
+++ b/polly/test/ScheduleOptimizer/2013-04-11-Empty-Domain-two.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -S < %s
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -S < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; Check that we handle statements with an empty iteration domain correctly.
diff --git a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-double.ll b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-double.ll
index a61af2d092f3..5e4ce8225a23 100644
--- a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-double.ll
+++ b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-double.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s
define void @func(i32 %n, ptr noalias nonnull %A, ptr noalias nonnull %B) {
entry:
diff --git a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-except-first.ll b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-except-first.ll
index 185d5c5b8c25..de4c387a1d87 100644
--- a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-except-first.ll
+++ b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-except-first.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s --check-prefixes=CHECK,RAW
-; RUN: opt %loadPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s --check-prefixes=CHECK,OPT
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --check-prefixes=CHECK,RAW
+; RUN: opt %loadNPMPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --check-prefixes=CHECK,OPT
define void @func(i32 %n, ptr noalias nonnull %A, ptr noalias nonnull %B, i32 %k) {
entry:
diff --git a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-except-third.ll b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-except-third.ll
index f1eca0ede061..91bd549c3c7e 100644
--- a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-except-third.ll
+++ b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-except-third.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s --check-prefixes=CHECK,RAW
-; RUN: opt %loadPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --check-prefixes=CHECK,RAW
+; RUN: opt %loadNPMPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --check-prefixes=CHECK
define void @func(i32 %n, ptr noalias nonnull %A, ptr noalias nonnull %B, i32 %k) {
entry:
diff --git a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner-carried.ll b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner-carried.ll
index 35903ced7741..8b69d9e12c0f 100644
--- a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner-carried.ll
+++ b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner-carried.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s --check-prefixes=CHECK,RAW
-; RUN: opt %loadPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s --check-prefixes=CHECK,OPT
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --check-prefixes=CHECK,RAW
+; RUN: opt %loadNPMPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --check-prefixes=CHECK,OPT
define void @func(i32 %n, ptr noalias nonnull %A) {
entry:
diff --git a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner-third.ll b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner-third.ll
index 1fb8c001069f..49d112474034 100644
--- a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner-third.ll
+++ b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner-third.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s --check-prefixes=CHECK,RAW
-; RUN: opt %loadPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --check-prefixes=CHECK,RAW
+; RUN: opt %loadNPMPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --check-prefixes=CHECK
define void @func(i32 %n, ptr noalias nonnull %A, ptr noalias nonnull %B, i32 %k) {
entry:
diff --git a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner.ll b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner.ll
index 2db6833fa897..a449a2fda9ba 100644
--- a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner.ll
+++ b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-inner.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s
define void @func(i32 %n, ptr noalias nonnull %A) {
entry:
diff --git a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-simple.ll b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-simple.ll
index 49d008ba2cfa..798e9b9a7c14 100644
--- a/polly/test/ScheduleOptimizer/GreedyFuse/fuse-simple.ll
+++ b/polly/test/ScheduleOptimizer/GreedyFuse/fuse-simple.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s
define void @func(i32 %n, ptr noalias nonnull %A) {
entry:
diff --git a/polly/test/ScheduleOptimizer/GreedyFuse/nofuse-simple.ll b/polly/test/ScheduleOptimizer/GreedyFuse/nofuse-simple.ll
index 175b85997ec0..4d0ccc988a5c 100644
--- a/polly/test/ScheduleOptimizer/GreedyFuse/nofuse-simple.ll
+++ b/polly/test/ScheduleOptimizer/GreedyFuse/nofuse-simple.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s
; This could theoretically be fused by adjusting the offset of the second loop by %k (instead of relying on schedule dimensions).
diff --git a/polly/test/ScheduleOptimizer/GreedyFuse/nofuse-with-middle.ll b/polly/test/ScheduleOptimizer/GreedyFuse/nofuse-with-middle.ll
index 48ba20347d55..bf470b91a702 100644
--- a/polly/test/ScheduleOptimizer/GreedyFuse/nofuse-with-middle.ll
+++ b/polly/test/ScheduleOptimizer/GreedyFuse/nofuse-with-middle.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-reschedule=1 -polly-loopfusion-greedy=1 -polly-postopts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s
define void @func(i32 %n, ptr noalias nonnull %A, ptr noalias nonnull %B, i32 %k) {
entry:
diff --git a/polly/test/ScheduleOptimizer/ManualOptimization/disable_nonforced.ll b/polly/test/ScheduleOptimizer/ManualOptimization/disable_nonforced.ll
index 537721f8718a..b0f75dd50ef8 100644
--- a/polly/test/ScheduleOptimizer/ManualOptimization/disable_nonforced.ll
+++ b/polly/test/ScheduleOptimizer/ManualOptimization/disable_nonforced.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-opt-isl -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Check that the disable_nonforced metadata is honored; optimization
; heuristics/rescheduling must not be applied.
diff --git a/polly/test/ScheduleOptimizer/ManualOptimization/distribute_heuristic.ll b/polly/test/ScheduleOptimizer/ManualOptimization/distribute_heuristic.ll
index aaf4d27f4c5e..900360d7533f 100644
--- a/polly/test/ScheduleOptimizer/ManualOptimization/distribute_heuristic.ll
+++ b/polly/test/ScheduleOptimizer/ManualOptimization/distribute_heuristic.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-pragma-based-opts=1 -polly-print-opt-isl -disable-output < %s | FileCheck %s --match-full-lines --check-prefix=ON
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-pragma-based-opts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s --match-full-lines --check-prefix=OFF
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -polly-pragma-based-opts=1 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --match-full-lines --check-prefix=ON
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -polly-pragma-based-opts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --match-full-lines --check-prefix=OFF
;
define void @func(i32 %n, ptr noalias nonnull %A, ptr noalias nonnull %B) {
entry:
diff --git a/polly/test/ScheduleOptimizer/ManualOptimization/distribute_illegal_looploc.ll b/polly/test/ScheduleOptimizer/ManualOptimization/distribute_illegal_looploc.ll
index b1e94227c9a5..d45b62433dbb 100644
--- a/polly/test/ScheduleOptimizer/ManualOptimization/distribute_illegal_looploc.ll
+++ b/polly/test/ScheduleOptimizer/ManualOptimization/distribute_illegal_looploc.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-reschedule=0 -polly-pragma-based-opts=1 -disable-output < %s 2>&1 | FileCheck %s --match-full-lines
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-reschedule=0 -polly-pragma-based-opts=1 -disable-output < %s 2>&1 | FileCheck %s --match-full-lines
;
; CHECK: warning: distribute_illegal.c:2:3: not applying loop fission/distribution: cannot ensure semantic equivalence due to possible dependency violations
;
diff --git a/polly/test/ScheduleOptimizer/ManualOptimization/distribute_illegal_pragmaloc.ll b/polly/test/ScheduleOptimizer/ManualOptimization/distribute_illegal_pragmaloc.ll
index fc0df85b1346..d835e66693fb 100644
--- a/polly/test/ScheduleOptimizer/ManualOptimization/distribute_illegal_pragmaloc.ll
+++ b/polly/test/ScheduleOptimizer/ManualOptimization/distribute_illegal_pragmaloc.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-reschedule=0 -polly-pragma-based-opts=1 -disable-output < %s 2>&1 | FileCheck %s --match-full-lines
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-reschedule=0 -polly-pragma-based-opts=1 -disable-output < %s 2>&1 | FileCheck %s --match-full-lines
;
; CHECK: warning: distribute_illegal.c:1:42: not applying loop fission/distribution: cannot ensure semantic equivalence due to possible dependency violations
;
diff --git a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_disable.ll b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_disable.ll
index 9537f3a9b0a8..a5781a7f6036 100644
--- a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_disable.ll
+++ b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_disable.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-pragma-based-opts=1 -polly-print-opt-isl -disable-output < %s | FileCheck %s --match-full-lines
+; RUN: opt %loadNPMPolly -polly-pragma-based-opts=1 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --match-full-lines
;
; Override unroll metadata with llvm.loop.unroll.disable.
;
diff --git a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_double.ll b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_double.ll
index b0310970f8d6..cccf136a1c4a 100644
--- a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_double.ll
+++ b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_double.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-opt-isl -disable-output < %s | FileCheck %s --match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --match-full-lines
;
; Apply two loop transformations. First partial, then full unrolling.
;
diff --git a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_full.ll b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_full.ll
index b9a4c845477c..4d499078a436 100644
--- a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_full.ll
+++ b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_full.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-opt-isl -disable-output < %s | FileCheck %s --match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --match-full-lines
;
; Full unroll of a loop with 5 iterations.
;
diff --git a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_heuristic.ll b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_heuristic.ll
index 0387aecd683b..d67472ab8693 100644
--- a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_heuristic.ll
+++ b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_heuristic.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-pragma-based-opts=1 -polly-print-opt-isl -disable-output < %s | FileCheck %s --match-full-lines
-; RUN: opt %loadPolly -polly-pragma-based-opts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s --match-full-lines
+; RUN: opt %loadNPMPolly -polly-pragma-based-opts=1 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --match-full-lines
+; RUN: opt %loadNPMPolly -polly-pragma-based-opts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --match-full-lines
;
; Unrolling with heuristic factor.
; Currently not supported and expected to be handled by LLVM's unroll pass.
diff --git a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_partial.ll b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_partial.ll
index 81e40f0a98bb..90101b4fde39 100644
--- a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_partial.ll
+++ b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_partial.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-pragma-based-opts=1 -polly-print-opt-isl -disable-output < %s | FileCheck %s --match-full-lines
-; RUN: opt %loadPolly -polly-pragma-based-opts=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s --check-prefix=OFF --match-full-lines
+; RUN: opt %loadNPMPolly -polly-pragma-based-opts=1 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --match-full-lines
+; RUN: opt %loadNPMPolly -polly-pragma-based-opts=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --check-prefix=OFF --match-full-lines
;
; Partial unroll by a factor of 4.
;
diff --git a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_partial_followup.ll b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_partial_followup.ll
index 8665f68b99c1..4cfa3fb91151 100644
--- a/polly/test/ScheduleOptimizer/ManualOptimization/unroll_partial_followup.ll
+++ b/polly/test/ScheduleOptimizer/ManualOptimization/unroll_partial_followup.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-print-opt-isl -disable-output < %s | FileCheck %s --check-prefix=OPT --match-full-lines
-; RUN: opt %loadPolly -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST --match-full-lines
-; RUN: opt %loadPolly -polly-opt-isl -polly-codegen -simplifycfg -S < %s | FileCheck %s --check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s --check-prefix=OPT --match-full-lines
+; RUN: opt %loadNPMPolly '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s --check-prefix=AST --match-full-lines
+; RUN: opt %loadNPMPolly '-passes=scop(polly-opt-isl,polly-codegen),simplifycfg' -S < %s | FileCheck %s --check-prefix=CODEGEN
;
; Partial unroll by a factor of 4.
;
@@ -49,7 +49,7 @@ return:
; OPT-NEXT: - filter: "[n] -> { Stmt_body[i0] : (1 + i0) mod 4 = 0 }"
-; AST-LABEL: Printing analysis 'Polly - Generate an AST of the SCoP (isl)'for => return' in function 'func':
+; AST-LABEL: :: isl ast :: func :: %for---%return
; AST: // Loop with Metadata
; AST-NEXT: for (int c0 = 0; c0 < n; c0 += 4) {
diff --git a/polly/test/ScheduleOptimizer/SIMDInParallelFor.ll b/polly/test/ScheduleOptimizer/SIMDInParallelFor.ll
index 8585634e10ff..3f6f50e34775 100644
--- a/polly/test/ScheduleOptimizer/SIMDInParallelFor.ll
+++ b/polly/test/ScheduleOptimizer/SIMDInParallelFor.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-parallel -polly-vectorizer=stripmine -polly-codegen-verify -polly-opt-isl -polly-print-ast -polly-codegen -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-parallel -polly-vectorizer=stripmine -passes=polly-codegen-verify '-passes=polly-opt-isl,print<polly-ast>,polly-codegen' -disable-output < %s | FileCheck %s
;
; Check that there are no nested #pragma omp parallel for inside a
; #pragma omp parallel for loop.
diff --git a/polly/test/ScheduleOptimizer/computeout.ll b/polly/test/ScheduleOptimizer/computeout.ll
index 35e3416f91d1..a3286b481ffb 100644
--- a/polly/test/ScheduleOptimizer/computeout.ll
+++ b/polly/test/ScheduleOptimizer/computeout.ll
@@ -1,6 +1,4 @@
-; RUN: opt -S %loadPolly -basic-aa -polly-opt-isl -polly-isl-arg=--no-schedule-serialize-sccs -polly-print-ast -disable-output < %s | FileCheck %s
; RUN: opt -S %loadNPMPolly "-passes=scop(polly-opt-isl,print<polly-ast>)" -polly-isl-arg=--no-schedule-serialize-sccs -disable-output < %s | FileCheck %s
-; RUN: opt -S %loadPolly -basic-aa -polly-opt-isl -polly-isl-arg=--schedule-serialize-sccs -polly-dependences-computeout=1 -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=TIMEOUT
; RUN: opt -S %loadNPMPolly "-passes=scop(polly-opt-isl,print<polly-ast>)" -polly-isl-arg=--no-schedule-serialize-sccs -polly-dependences-computeout=1 -disable-output < %s | FileCheck %s -check-prefix=TIMEOUT
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/ScheduleOptimizer/ensure-correct-tile-sizes.ll b/polly/test/ScheduleOptimizer/ensure-correct-tile-sizes.ll
index 43caca5372ad..928ee858ae6d 100644
--- a/polly/test/ScheduleOptimizer/ensure-correct-tile-sizes.ll
+++ b/polly/test/ScheduleOptimizer/ensure-correct-tile-sizes.ll
@@ -1,9 +1,9 @@
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-remarks-minimal \
-; RUN: -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -polly-process-unprofitable -polly-remarks-minimal \
+; RUN: '-passes=polly-opt-isl,print<polly-ast>' -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=1 \
; RUN: -polly-target-vector-register-bitwidth=4096 \
-; RUN: -polly-target-1st-cache-level-associativity=3 -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: -polly-target-1st-cache-level-associativity=3 -disable-output < %s | FileCheck %s
;
; /* Test that Polly does not crash due to configurations that can lead to
; incorrect tile size computations.
diff --git a/polly/test/ScheduleOptimizer/focaltech_test_detail_threshold-7bc17e.ll b/polly/test/ScheduleOptimizer/focaltech_test_detail_threshold-7bc17e.ll
index daa1afdd0aa8..b533cb870bdc 100644
--- a/polly/test/ScheduleOptimizer/focaltech_test_detail_threshold-7bc17e.ll
+++ b/polly/test/ScheduleOptimizer/focaltech_test_detail_threshold-7bc17e.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-vectorizer=stripmine -polly-invariant-load-hoisting -polly-optimized-scops -polly-print-opt-isl -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-opt-isl>)" -polly-vectorizer=stripmine -polly-invariant-load-hoisting -disable-output < %s | FileCheck %s
;
; llvm.org/PR46578
diff --git a/polly/test/ScheduleOptimizer/full_partial_tile_separation.ll b/polly/test/ScheduleOptimizer/full_partial_tile_separation.ll
index 06e86d7da1c6..3dd579ed736f 100644
--- a/polly/test/ScheduleOptimizer/full_partial_tile_separation.ll
+++ b/polly/test/ScheduleOptimizer/full_partial_tile_separation.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %loadPolly -polly-pattern-matching-based-opts=false -polly-vectorizer=stripmine -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt -S %loadNPMPolly -polly-pattern-matching-based-opts=false -polly-vectorizer=stripmine '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
; CHECK: // 1st level tiling - Tiles
; CHECK-NEXT: #pragma known-parallel
; CHECK-NEXT: for (int c0 = 0; c0 <= floord(ni - 1, 32); c0 += 1)
diff --git a/polly/test/ScheduleOptimizer/line-tiling-2.ll b/polly/test/ScheduleOptimizer/line-tiling-2.ll
index eb374cb07cf3..3a2c566d19d3 100644
--- a/polly/test/ScheduleOptimizer/line-tiling-2.ll
+++ b/polly/test/ScheduleOptimizer/line-tiling-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-tile-sizes=1,64 -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-tile-sizes=1,64 '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
; CHECK: for (int c0 = 0; c0 <= 1023; c0 += 1)
; CHECK: for (int c1 = 0; c1 <= 7; c1 += 1)
diff --git a/polly/test/ScheduleOptimizer/line-tiling.ll b/polly/test/ScheduleOptimizer/line-tiling.ll
index 2f14ac1d02a5..0dbdeff4742b 100644
--- a/polly/test/ScheduleOptimizer/line-tiling.ll
+++ b/polly/test/ScheduleOptimizer/line-tiling.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-tile-sizes=64,1 -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-tile-sizes=64,1 '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
; CHECK: for (int c0 = 0; c0 <= 15; c0 += 1)
; CHECK: for (int c1 = 0; c1 <= 511; c1 += 1)
diff --git a/polly/test/ScheduleOptimizer/mat_mul_pattern_data_layout.ll b/polly/test/ScheduleOptimizer/mat_mul_pattern_data_layout.ll
index faf51e097a70..8f270b94617f 100644
--- a/polly/test/ScheduleOptimizer/mat_mul_pattern_data_layout.ll
+++ b/polly/test/ScheduleOptimizer/mat_mul_pattern_data_layout.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-associativity=8 \
diff --git a/polly/test/ScheduleOptimizer/mat_mul_pattern_data_layout_2.ll b/polly/test/ScheduleOptimizer/mat_mul_pattern_data_layout_2.ll
index 30b693a2e241..de1c815f9235 100644
--- a/polly/test/ScheduleOptimizer/mat_mul_pattern_data_layout_2.ll
+++ b/polly/test/ScheduleOptimizer/mat_mul_pattern_data_layout_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-associativity=8 \
@@ -6,7 +6,7 @@
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-2nd-cache-level-size=262144 \
; RUN: -polly-target-vector-register-bitwidth=256 \
-; RUN: -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
;
; /* C := alpha*A*B + beta*C */
; /* _PB_NK % Kc != 0 */
@@ -18,7 +18,7 @@
; C[i][j] += alpha * A[i][k] * B[k][j];
; }
;
-; CHECK-LABEL: Printing analysis 'Polly - Generate an AST from the SCoP (isl)' for region: 'bb8 => bb32' in function 'kernel_gemm':
+; CHECK-LABEL: :: isl ast :: kernel_gemm :: %bb8---%bb32
; CHECK: {
; CHECK-NEXT: // 1st level tiling - Tiles
; CHECK-NEXT: for (int c0 = 0; c0 <= 32; c0 += 1)
diff --git a/polly/test/ScheduleOptimizer/one-dimensional-band.ll b/polly/test/ScheduleOptimizer/one-dimensional-band.ll
index 4592907a44ad..a097d4a43cfd 100644
--- a/polly/test/ScheduleOptimizer/one-dimensional-band.ll
+++ b/polly/test/ScheduleOptimizer/one-dimensional-band.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
;
; void jacobi1d(long T, long N, float *A, float *B) {
; long t, i, j;
diff --git a/polly/test/ScheduleOptimizer/outer_coincidence.ll b/polly/test/ScheduleOptimizer/outer_coincidence.ll
index 2ab33edda86b..7c1af80c9ffa 100644
--- a/polly/test/ScheduleOptimizer/outer_coincidence.ll
+++ b/polly/test/ScheduleOptimizer/outer_coincidence.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-tiling=0 -polly-parallel -polly-opt-outer-coincidence=no -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-tiling=0 -polly-parallel -polly-opt-outer-coincidence=yes -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=OUTER
+; RUN: opt %loadNPMPolly -polly-tiling=0 -polly-parallel -polly-opt-outer-coincidence=no '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-tiling=0 -polly-parallel -polly-opt-outer-coincidence=yes '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s --check-prefix=OUTER
; By skewing, the diagonal can be made parallel. ISL does this when the Check
; the 'outer_coincidence' option is enabled.
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts-after-delicm.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts-after-delicm.ll
index 66011168fcc1..8228a5c08f59 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts-after-delicm.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts-after-delicm.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly \
+; RUN: opt %loadNPMPolly \
; RUN: -polly-pattern-matching-based-opts=true \
-; RUN: -polly-optree -polly-delicm -polly-simplify \
-; RUN: -polly-opt-isl -polly-tc-opt=true -debug -disable-output < %s 2>&1 \
+; RUN: '-passes=polly-optree,polly-delicm,polly-simplify,polly-opt-isl' \
+; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 \
; RUN: | FileCheck %s
; REQUIRES: asserts
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts-after-delicm_2.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts-after-delicm_2.ll
index 95da89f90755..4bda7584f596 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts-after-delicm_2.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts-after-delicm_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-delicm -polly-simplify -polly-opt-isl \
+; RUN: opt %loadNPMPolly '-passes=polly-delicm,polly-simplify,polly-opt-isl' \
; RUN: -polly-pattern-matching-based-opts=true \
; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts.ll
index 7604257f98e0..09118e252233 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts.ll
@@ -1,8 +1,8 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=false \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=false \
; RUN: -debug -polly-tc-opt -disable-output < %s 2>&1 | FileCheck %s
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true -debug -polly-tc-opt -disable-output < %s 2>&1 | FileCheck %s --check-prefix=PATTERN-MATCHING-OPTS
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true -polly-ast-detect-parallel -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=PARALLEL-AST
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true -stats -disable-output < %s 2>&1 | FileCheck %s --check-prefix=STATS -match-full-lines
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true -debug -polly-tc-opt -disable-output < %s 2>&1 | FileCheck %s --check-prefix=PATTERN-MATCHING-OPTS
+; RUN: opt %loadNPMPolly '-passes=polly-opt-isl,print<polly-ast>' -polly-pattern-matching-based-opts=true -polly-ast-detect-parallel -disable-output < %s | FileCheck %s --check-prefix=PARALLEL-AST
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true -stats -disable-output < %s 2>&1 | FileCheck %s --check-prefix=STATS -match-full-lines
; REQUIRES: asserts
;
; /* C := alpha*A*B + beta*C */
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_11.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_11.ll
index ccdb39b60d75..b771d1f87537 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_11.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_11.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop \
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-opt-isl' \
; RUN: -polly-import-jscop-postfix=transformed \
; RUN: -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
@@ -8,7 +8,7 @@
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-vector-register-bitwidth=256 \
; RUN: -polly-target-2nd-cache-level-size=262144 \
-; RUN: -polly-opt-isl -debug \
+; RUN: -debug \
; RUN: -polly-tc-opt=true -disable-output < %s 2>&1 \
; RUN: | FileCheck %s
; REQUIRES: asserts
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_12.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_12.ll
index dd39fec5e21f..238f6dd798e6 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_12.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_12.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-associativity=8 \
@@ -6,7 +6,7 @@
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-vector-register-bitwidth=256 \
; RUN: -polly-target-2nd-cache-level-size=262144 \
-; RUN: -polly-opt-isl -disable-output < %s
+; RUN: -passes=polly-opt-isl -disable-output < %s
;
; Test whether isolation works as expected.
;
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_13.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_13.ll
index e086dd36c4d9..0e4540eb7ba3 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_13.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_13.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=2 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-associativity=8 \
@@ -6,7 +6,7 @@
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-vector-register-bitwidth=128 \
; RUN: -polly-target-2nd-cache-level-size=262144 \
-; RUN: -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
;
; Test whether isolation works as expected.
;
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_14.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_14.ll
index a4c71c2dace5..9678ad83ff04 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_14.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_14.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-opt-isl \
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-opt-isl,polly-codegen' \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-associativity=8 \
@@ -6,7 +6,7 @@
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-vector-register-bitwidth=256 \
; RUN: -polly-target-2nd-cache-level-size=262144 \
-; RUN: -polly-import-jscop-postfix=transformed -polly-codegen -S < %s \
+; RUN: -polly-import-jscop-postfix=transformed -S < %s \
; RUN: | FileCheck %s
;
; Check that we disable the Loop Vectorizer.
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_15.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_15.ll
index a8da21955b63..e74884d59c31 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_15.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_15.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -debug-only=polly-opt-isl -disable-output \
; RUN: -polly-tc-opt=true < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_16.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_16.ll
index c1ad3017a0d4..9c99a090b69e 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_16.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_16.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_17.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_17.ll
index 002816a4ae80..8e14035ce862 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_17.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_17.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_18.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_18.ll
index d5679c7ae2f7..4f562c306f96 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_18.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_18.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_19.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_19.ll
index 4e1620abd252..32ded897d4ff 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_19.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_19.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_2.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_2.ll
index 01e336ebc60f..f0c0177da84b 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_2.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_20.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_20.ll
index 0be08d8d493c..155177bdfade 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_20.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_20.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_21.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_21.ll
index 9b2df49698a1..3d21ac3859a7 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_21.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_21.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_22.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_22.ll
index 3d3641df5098..00a4bf885aef 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_22.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_22.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_24.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_24.ll
index 895961488014..bfe5c5249a3a 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_24.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_24.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-reschedule=0 -polly-opt-isl \
+; RUN: opt %loadNPMPolly -polly-reschedule=0 -passes=polly-opt-isl \
; RUN: -polly-pattern-matching-based-opts=true -polly-tc-opt=true \
; RUN: -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_25.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_25.ll
index 8a3957909d9d..a2e1ced3e632 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_25.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_25.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
@@ -53,4 +53,4 @@ for.body8: ; preds = %for.body8, %for.con
br i1 %exitcond.not, label %for.cond.cleanup7, label %for.body8
}
-declare double @llvm.fmuladd.f64(double, double, double) \ No newline at end of file
+declare double @llvm.fmuladd.f64(double, double, double)
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_3.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_3.ll
index fab3ac5e58dc..9844d377e609 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_3.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_3.ll
@@ -1,11 +1,11 @@
-; RUN: opt %loadPolly -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-size=0 \
; RUN: -polly-target-vector-register-bitwidth=256 \
-; RUN: -polly-opt-isl -polly-print-ast -disable-output < %s 2>&1 | FileCheck %s
+; RUN: '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s 2>&1 | FileCheck %s
-; RUN: opt %loadPolly -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-associativity=8 \
@@ -13,7 +13,7 @@
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-vector-register-bitwidth=256 \
; RUN: -polly-target-2nd-cache-level-size=262144 \
-; RUN: -polly-opt-isl -polly-print-ast -disable-output < %s 2>&1 | FileCheck %s --check-prefix=EXTRACTION-OF-MACRO-KERNEL
+; RUN: '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=EXTRACTION-OF-MACRO-KERNEL
;
; /* C := alpha*A*B + beta*C */
; for (i = 0; i < _PB_NI; i++)
@@ -24,7 +24,7 @@
; C[i][j] += alpha * A[i][k] * B[k][j];
; }
;
-; CHECK-LABEL: Printing analysis 'Polly - Generate an AST from the SCoP (isl)' for region: 'bb8 => bb32' in function 'kernel_gemm':
+; CHECK-LABEL: :: isl ast :: kernel_gemm :: %bb8---%bb32
; CHECK: {
; CHECK-NEXT: // 1st level tiling - Tiles
; CHECK-NEXT: for (int c0 = 0; c0 <= 32; c0 += 1)
@@ -76,7 +76,7 @@
; CHECK-NEXT: }
; CHECK-NEXT: }
;
-; EXTRACTION-OF-MACRO-KERNEL-LABEL: Printing analysis 'Polly - Generate an AST from the SCoP (isl)' for region: 'bb8 => bb32' in function 'kernel_gemm':
+; EXTRACTION-OF-MACRO-KERNEL-LABEL: :: isl ast :: kernel_gemm :: %bb8---%bb32
; EXTRACTION-OF-MACRO-KERNEL: {
; EXTRACTION-OF-MACRO-KERNEL-NEXT: // 1st level tiling - Tiles
; EXTRACTION-OF-MACRO-KERNEL-NEXT: for (int c0 = 0; c0 <= 32; c0 += 1)
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_4.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_4.ll
index dc0edc6c5a3b..250641d57bac 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_4.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_4.ll
@@ -1,12 +1,12 @@
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; RUN: -debug -polly-tc-opt=true -disable-output < %s 2>&1 | FileCheck %s
-; RUN: opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly '-passes=polly-opt-isl,print<polly-ast>' -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-vector-register-bitwidth=256 \
-; RUN: -polly-target-2nd-cache-level-size=262144 -polly-print-ast \
-; RUN: -polly-tc-opt=true -disable-output -polly-opt-isl < %s | \
+; RUN: -polly-target-2nd-cache-level-size=262144 \
+; RUN: -polly-tc-opt=true -disable-output < %s | \
; RUN: FileCheck %s --check-prefix=PATTERN-MATCHING-OPTS
; REQUIRES: asserts
;
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_5.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_5.ll
index 6581566bf13f..ad2c195ba1e8 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_5.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_5.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-associativity=8 \
@@ -6,12 +6,12 @@
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-vector-register-bitwidth=256 \
; RUN: -polly-target-2nd-cache-level-size=262144 \
-; RUN: -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
;
-; opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; -polly-target-throughput-vector-fma=1 \
; -polly-target-latency-vector-fma=8 \
-; -polly-codegen -polly-target-1st-cache-level-associativity=8 \
+; -passes=polly-codegen -polly-target-1st-cache-level-associativity=8 \
; -polly-target-2nd-cache-level-associativity=8 \
; -polly-target-1st-cache-level-size=32768 \
; -polly-target-vector-register-bitwidth=256 \
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_6.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_6.ll
index bcf1fc9fe813..1d3cdbdbfdd8 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_6.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_6.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-associativity=8 \
@@ -6,12 +6,12 @@
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-vector-register-bitwidth=256 \
; RUN: -polly-target-2nd-cache-level-size=262144 \
-; RUN: -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
;
-; opt %loadPolly -polly-opt-isl -polly-pattern-matching-based-opts=true \
+; opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
; -polly-target-throughput-vector-fma=1 \
; -polly-target-latency-vector-fma=8 \
-; -polly-codegen -polly-target-1st-cache-level-associativity=8 \
+; -passes=polly-codegen -polly-target-1st-cache-level-associativity=8 \
; -polly-target-2nd-cache-level-associativity=8 \
; -polly-target-1st-cache-level-size=32768 \
; -polly-target-vector-register-bitwidth=256 \
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_7.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_7.ll
index 77a3e02a0063..59eaa4a0928e 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_7.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_7.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-associativity=8 \
@@ -6,7 +6,7 @@
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-vector-register-bitwidth=256 \
; RUN: -polly-target-2nd-cache-level-size=262144 \
-; RUN: -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
;
; /* C := A * B + C */
; /* Elements of the matrices A, B, C have the float type. */
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_8.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_8.ll
index d02bc359e79d..2544d502a2dc 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_8.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_8.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-associativity=8 \
@@ -6,7 +6,7 @@
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-vector-register-bitwidth=256 \
; RUN: -polly-target-2nd-cache-level-size=262144 \
-; RUN: -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
;
; /* C := A * B + C */
; /* Elements of the matrices B, C have the double type. */
diff --git a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_9.ll b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_9.ll
index 144abfd7622f..85c143562f5a 100644
--- a/polly/test/ScheduleOptimizer/pattern-matching-based-opts_9.ll
+++ b/polly/test/ScheduleOptimizer/pattern-matching-based-opts_9.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-pattern-matching-based-opts=true \
+; RUN: opt %loadNPMPolly -polly-pattern-matching-based-opts=true \
; RUN: -polly-target-throughput-vector-fma=1 \
; RUN: -polly-target-latency-vector-fma=8 \
; RUN: -polly-target-1st-cache-level-associativity=8 \
@@ -6,9 +6,9 @@
; RUN: -polly-target-1st-cache-level-size=32768 \
; RUN: -polly-target-vector-register-bitwidth=256 \
; RUN: -polly-target-2nd-cache-level-size=262144 \
-; RUN: -polly-opt-isl -disable-output < %s
+; RUN: -passes=polly-opt-isl -disable-output < %s
;
-; RUN: opt %loadPolly -polly-print-dependences -disable-output < %s | FileCheck %s --check-prefix=DEPENDENCES
+; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s --check-prefix=DEPENDENCES
;
; /* C := A * B + C */
; /* Elements of the matrices A, B, C have the char type. */
diff --git a/polly/test/ScheduleOptimizer/pattern_matching_based_opts_splitmap.ll b/polly/test/ScheduleOptimizer/pattern_matching_based_opts_splitmap.ll
index 5b9783d20bfc..64285891a16c 100644
--- a/polly/test/ScheduleOptimizer/pattern_matching_based_opts_splitmap.ll
+++ b/polly/test/ScheduleOptimizer/pattern_matching_based_opts_splitmap.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-opt-isl -debug-only=polly-opt-isl -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-import-jscop -polly-import-jscop-postfix=transformed -passes=polly-opt-isl -debug-only=polly-opt-isl -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
; void pattern_matching_based_opts_splitmap(double C[static const restrict 2][2], double A[static const restrict 2][784], double B[static const restrict 784][2]) {
diff --git a/polly/test/ScheduleOptimizer/prevectorization-without-tiling.ll b/polly/test/ScheduleOptimizer/prevectorization-without-tiling.ll
index fea2155b1e4e..a18ba1daef84 100644
--- a/polly/test/ScheduleOptimizer/prevectorization-without-tiling.ll
+++ b/polly/test/ScheduleOptimizer/prevectorization-without-tiling.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S %loadPolly -basic-aa -polly-tiling=false -polly-pattern-matching-based-opts=false -polly-vectorizer=stripmine -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa -polly-tiling=false -polly-pattern-matching-based-opts=false -polly-vectorizer=stripmine '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@C = common global [1536 x [1536 x float]] zeroinitializer, align 16
diff --git a/polly/test/ScheduleOptimizer/prevectorization.ll b/polly/test/ScheduleOptimizer/prevectorization.ll
index 385ebf14712a..4db61ad032ea 100644
--- a/polly/test/ScheduleOptimizer/prevectorization.ll
+++ b/polly/test/ScheduleOptimizer/prevectorization.ll
@@ -1,5 +1,5 @@
-; RUN: opt -S %loadPolly -basic-aa -polly-pattern-matching-based-opts=false -polly-vectorizer=stripmine -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt -S %loadPolly -basic-aa -polly-pattern-matching-based-opts=false -polly-vectorizer=stripmine -polly-prevect-width=16 -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s -check-prefix=VEC16
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa -polly-pattern-matching-based-opts=false -polly-vectorizer=stripmine '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt -S %loadNPMPolly -aa-pipeline=basic-aa -polly-pattern-matching-based-opts=false -polly-vectorizer=stripmine -polly-prevect-width=16 '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s -check-prefix=VEC16
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ScheduleOptimizer/rectangular-tiling.ll b/polly/test/ScheduleOptimizer/rectangular-tiling.ll
index b527255ab5f7..e1d768b351d7 100644
--- a/polly/test/ScheduleOptimizer/rectangular-tiling.ll
+++ b/polly/test/ScheduleOptimizer/rectangular-tiling.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-tile-sizes=256,16 -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-tile-sizes=256,16 -polly-tiling=false -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=NOTILING
-; RUN: opt %loadPolly -polly-tile-sizes=256,16 -polly-2nd-level-tiling -polly-2nd-level-tile-sizes=16,8 -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=TWOLEVEL
-; RUN: opt %loadPolly -polly-tile-sizes=256,16 -polly-2nd-level-tiling -polly-2nd-level-tile-sizes=16,8 -polly-register-tiling -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=TWO-PLUS-REGISTER
+; RUN: opt %loadNPMPolly -polly-tile-sizes=256,16 '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-tile-sizes=256,16 -polly-tiling=false '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s --check-prefix=NOTILING
+; RUN: opt %loadNPMPolly -polly-tile-sizes=256,16 -polly-2nd-level-tiling -polly-2nd-level-tile-sizes=16,8 '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s --check-prefix=TWOLEVEL
+; RUN: opt %loadNPMPolly -polly-tile-sizes=256,16 -polly-2nd-level-tiling -polly-2nd-level-tile-sizes=16,8 -polly-register-tiling '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s --check-prefix=TWO-PLUS-REGISTER
; CHECK: // 1st level tiling - Tiles
; CHECK: for (int c0 = 0; c0 <= 3; c0 += 1)
diff --git a/polly/test/ScheduleOptimizer/schedule_computeout.ll b/polly/test/ScheduleOptimizer/schedule_computeout.ll
index acc8601a31a8..1e1359e3ecc6 100644
--- a/polly/test/ScheduleOptimizer/schedule_computeout.ll
+++ b/polly/test/ScheduleOptimizer/schedule_computeout.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -S -polly-optree -polly-delicm -polly-opt-isl -polly-schedule-computeout=10000 -debug-only="polly-opt-isl" < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-optree -passes=polly-delicm -passes=polly-opt-isl -polly-schedule-computeout=10000 -debug-only="polly-opt-isl" < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
; Bailout if the computations of schedule compute exceeds the max scheduling quota.
diff --git a/polly/test/ScheduleOptimizer/statistics.ll b/polly/test/ScheduleOptimizer/statistics.ll
index 472febea173f..84eb59341d27 100644
--- a/polly/test/ScheduleOptimizer/statistics.ll
+++ b/polly/test/ScheduleOptimizer/statistics.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-opt-isl -stats -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -passes=polly-opt-isl -stats -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
; REQUIRES: asserts
diff --git a/polly/test/ScheduleOptimizer/tile_after_fusion.ll b/polly/test/ScheduleOptimizer/tile_after_fusion.ll
index 8e5849234af6..50a46d66176e 100644
--- a/polly/test/ScheduleOptimizer/tile_after_fusion.ll
+++ b/polly/test/ScheduleOptimizer/tile_after_fusion.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-isl-arg=--no-schedule-serialize-sccs -polly-opt-isl -polly-print-ast -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-isl-arg=--no-schedule-serialize-sccs '-passes=polly-opt-isl,print<polly-ast>' -disable-output < %s | FileCheck %s
;
;
; void tf(int C[256][256][256], int A0[256][256][256], int A1[256][256][256]) {
@@ -17,7 +17,7 @@
; checks whether they are tiled after being fused when polly-opt-fusion equals
; "max".
;
-; CHECK-LABEL: Printing analysis 'Polly - Generate an AST from the SCoP (isl)' for region: 'for.cond => for.end56' in function 'tf':
+; CHECK-LABEL: :: isl ast :: tf :: %for.cond---%for.end56
; CHECK: 1st level tiling - Tiles
; CHECK-NEXT: for (int c0 = 0; c0 <= 7; c0 += 1)
; CHECK-NEXT: for (int c1 = 0; c1 <= 7; c1 += 1)
diff --git a/polly/test/ScheduleOptimizer/vivid-vbi-gen-vivid_vbi_gen_sliced-before-llvmreduced.ll b/polly/test/ScheduleOptimizer/vivid-vbi-gen-vivid_vbi_gen_sliced-before-llvmreduced.ll
index d08595db8fce..e59a31665d77 100644
--- a/polly/test/ScheduleOptimizer/vivid-vbi-gen-vivid_vbi_gen_sliced-before-llvmreduced.ll
+++ b/polly/test/ScheduleOptimizer/vivid-vbi-gen-vivid_vbi_gen_sliced-before-llvmreduced.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-vectorizer=stripmine -polly-isl-arg=--no-schedule-serialize-sccs -polly-tiling=0 -polly-print-opt-isl -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-vectorizer=stripmine -polly-isl-arg=--no-schedule-serialize-sccs -polly-tiling=0 '-passes=print<polly-opt-isl>' -disable-output < %s | FileCheck %s
; isl_schedule_node_band_sink may sink into multiple children.
; https://llvm.org/PR52637
diff --git a/polly/test/ScopDetect/aliasing_parametric_simple_1.ll b/polly/test/ScopDetect/aliasing_parametric_simple_1.ll
index 2eddbd4cb262..cee1c06cf7aa 100644
--- a/polly/test/ScopDetect/aliasing_parametric_simple_1.ll
+++ b/polly/test/ScopDetect/aliasing_parametric_simple_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Valid Region for Scop:
;
diff --git a/polly/test/ScopDetect/aliasing_parametric_simple_2.ll b/polly/test/ScopDetect/aliasing_parametric_simple_2.ll
index c111f686c462..5506b3c626cf 100644
--- a/polly/test/ScopDetect/aliasing_parametric_simple_2.ll
+++ b/polly/test/ScopDetect/aliasing_parametric_simple_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Valid Region for Scop:
;
diff --git a/polly/test/ScopDetect/aliasing_simple_1.ll b/polly/test/ScopDetect/aliasing_simple_1.ll
index 524ca19ae398..5f43ec1856a7 100644
--- a/polly/test/ScopDetect/aliasing_simple_1.ll
+++ b/polly/test/ScopDetect/aliasing_simple_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Valid Region for Scop:
;
diff --git a/polly/test/ScopDetect/aliasing_simple_2.ll b/polly/test/ScopDetect/aliasing_simple_2.ll
index 457df996c7b8..e853dfcc6448 100644
--- a/polly/test/ScopDetect/aliasing_simple_2.ll
+++ b/polly/test/ScopDetect/aliasing_simple_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Valid Region for Scop:
;
diff --git a/polly/test/ScopDetect/base_pointer_load_setNewAccessRelation.ll b/polly/test/ScopDetect/base_pointer_load_setNewAccessRelation.ll
index 0411aed6ae04..eeb9e11f812c 100644
--- a/polly/test/ScopDetect/base_pointer_load_setNewAccessRelation.ll
+++ b/polly/test/ScopDetect/base_pointer_load_setNewAccessRelation.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-ignore-aliasing -polly-invariant-load-hoisting=true -polly-scops -polly-print-import-jscop -polly-codegen -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-ignore-aliasing -polly-invariant-load-hoisting=true '-passes=print<polly-function-scops>,scop(polly-import-jscop,polly-codegen)' -disable-output < %s 2>&1 | FileCheck %s
;
; This violated an assertion in setNewAccessRelation that assumed base pointers
; to be load-hoisted. Without this assertion, it codegen would generate invalid
diff --git a/polly/test/ScopDetect/base_pointer_setNewAccessRelation.ll b/polly/test/ScopDetect/base_pointer_setNewAccessRelation.ll
index ff9be6ea16e8..16976e631327 100644
--- a/polly/test/ScopDetect/base_pointer_setNewAccessRelation.ll
+++ b/polly/test/ScopDetect/base_pointer_setNewAccessRelation.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -disable-basic-aa -polly-detect -polly-print-import-jscop -polly-codegen -disable-output < %s | FileCheck %s --allow-empty
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,scop(polly-import-jscop,polly-codegen)' -disable-output < %s 2>&1 | FileCheck %s --allow-empty
;
; Polly codegen used to generate invalid code (referring to %ptr from the
; original region) when regeneration of the access function is necessary.
diff --git a/polly/test/ScopDetect/callbr.ll b/polly/test/ScopDetect/callbr.ll
index d65ab934bf2e..418297469367 100644
--- a/polly/test/ScopDetect/callbr.ll
+++ b/polly/test/ScopDetect/callbr.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-detect -polly-detect-track-failures -disable-output -pass-remarks-missed=polly-detect < %s 2>&1 | FileCheck %s --check-prefix=REMARK
-; RUN: opt %loadPolly -polly-detect -polly-detect-track-failures -disable-output -stats < %s 2>&1 | FileCheck %s --check-prefix=STAT
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -polly-detect-track-failures -disable-output -pass-remarks-missed=polly-detect < %s 2>&1 | FileCheck %s --check-prefix=REMARK
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -polly-detect-track-failures -disable-output -stats < %s 2>&1 | FileCheck %s --check-prefix=STAT
; REQUIRES: asserts
; REMARK: Branch from indirect terminator.
diff --git a/polly/test/ScopDetect/collective_invariant_loads.ll b/polly/test/ScopDetect/collective_invariant_loads.ll
index f1d2eea520c6..f451bccec706 100644
--- a/polly/test/ScopDetect/collective_invariant_loads.ll
+++ b/polly/test/ScopDetect/collective_invariant_loads.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting -disable-output< %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting -disable-output< %s 2>&1 | FileCheck %s
;CHECK: Function: test_init_chpl
;CHECK-NEXT: Region: %bb1---%bb16
diff --git a/polly/test/ScopDetect/cross_loop_non_single_exit.ll b/polly/test/ScopDetect/cross_loop_non_single_exit.ll
index ae23930b92a6..fe3922174c07 100644
--- a/polly/test/ScopDetect/cross_loop_non_single_exit.ll
+++ b/polly/test/ScopDetect/cross_loop_non_single_exit.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/ScopDetect/cross_loop_non_single_exit_2.ll b/polly/test/ScopDetect/cross_loop_non_single_exit_2.ll
index 5c25da66d7ef..4cac173932a6 100644
--- a/polly/test/ScopDetect/cross_loop_non_single_exit_2.ll
+++ b/polly/test/ScopDetect/cross_loop_non_single_exit_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/ScopDetect/dependency_to_phi_node_outside_of_region.ll b/polly/test/ScopDetect/dependency_to_phi_node_outside_of_region.ll
index 12983d2321cc..7d7476471bb6 100644
--- a/polly/test/ScopDetect/dependency_to_phi_node_outside_of_region.ll
+++ b/polly/test/ScopDetect/dependency_to_phi_node_outside_of_region.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-detect -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
define void @f(ptr %A, i64 %N, i64 %M) nounwind {
diff --git a/polly/test/ScopDetect/dot-scops-npm.ll b/polly/test/ScopDetect/dot-scops-npm.ll
index 7c8be032fd4f..d14bf8a23a16 100644
--- a/polly/test/ScopDetect/dot-scops-npm.ll
+++ b/polly/test/ScopDetect/dot-scops-npm.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadNPMPolly "-passes=polly-scop-printer" -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=polly-scop-printer' -disable-output < %s
; RUN: FileCheck %s -input-file=scops.func_npm.dot
;
; Check that the ScopPrinter does not crash.
diff --git a/polly/test/ScopDetect/dot-scops.ll b/polly/test/ScopDetect/dot-scops.ll
index c31562e4c62d..63163b23617c 100644
--- a/polly/test/ScopDetect/dot-scops.ll
+++ b/polly/test/ScopDetect/dot-scops.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-scops -dot-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>,polly-scop-printer' -disable-output < %s
;
; Check that the ScopPrinter does not crash.
; ScopPrinter needs the ScopDetection pass, which should depend on
diff --git a/polly/test/ScopDetect/error-block-always-executed.ll b/polly/test/ScopDetect/error-block-always-executed.ll
index 894be2119941..d799d575a530 100644
--- a/polly/test/ScopDetect/error-block-always-executed.ll
+++ b/polly/test/ScopDetect/error-block-always-executed.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK-NOT: Valid Region for Scop:
diff --git a/polly/test/ScopDetect/error-block-referenced-from-scop.ll b/polly/test/ScopDetect/error-block-referenced-from-scop.ll
index 085351482139..ba271f34ea7b 100644
--- a/polly/test/ScopDetect/error-block-referenced-from-scop.ll
+++ b/polly/test/ScopDetect/error-block-referenced-from-scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK-NOT: Valid Region for Scop:
diff --git a/polly/test/ScopDetect/error-block-unreachable.ll b/polly/test/ScopDetect/error-block-unreachable.ll
index 48f6fe8e0547..6ba7698a972b 100644
--- a/polly/test/ScopDetect/error-block-unreachable.ll
+++ b/polly/test/ScopDetect/error-block-unreachable.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-detect -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s
; Verify that the scop detection does not crash on inputs with unreachable
; blocks. Earlier we crashed when detecting error blocks.
diff --git a/polly/test/ScopDetect/expand-region-correctly-2.ll b/polly/test/ScopDetect/expand-region-correctly-2.ll
index fadb503cff35..df35d05674f9 100644
--- a/polly/test/ScopDetect/expand-region-correctly-2.ll
+++ b/polly/test/ScopDetect/expand-region-correctly-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Valid Region for Scop: if.end.1631 => for.cond.1647.outer
;
diff --git a/polly/test/ScopDetect/expand-region-correctly.ll b/polly/test/ScopDetect/expand-region-correctly.ll
index 72082a32fa79..a8c90c08fde0 100644
--- a/polly/test/ScopDetect/expand-region-correctly.ll
+++ b/polly/test/ScopDetect/expand-region-correctly.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK: Valid Region for Scop: if.end.1631 => for.cond.1647.outer
diff --git a/polly/test/ScopDetect/ignore_func_flag_regex.ll b/polly/test/ScopDetect/ignore_func_flag_regex.ll
index 224126ec010e..a75e705995a7 100644
--- a/polly/test/ScopDetect/ignore_func_flag_regex.ll
+++ b/polly/test/ScopDetect/ignore_func_flag_regex.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-ignore-func=f.*,g.* -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-ignore-func=f.*,g.* '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that the flag `-polly-ignore-func` works with regexes.
;
diff --git a/polly/test/ScopDetect/index_from_unpredictable_loop.ll b/polly/test/ScopDetect/index_from_unpredictable_loop.ll
index 27ed64da17e6..f6d6cfab0eed 100644
--- a/polly/test/ScopDetect/index_from_unpredictable_loop.ll
+++ b/polly/test/ScopDetect/index_from_unpredictable_loop.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=AFFINE
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=NONAFFINE
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s | FileCheck %s --check-prefix=AFFINE
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine '-passes=print<polly-function-scops>' -disable-output < %s | FileCheck %s --check-prefix=NONAFFINE
; The SCoP contains a loop with multiple exit blocks (BBs after leaving
; the loop). The current implementation of deriving their domain derives
diff --git a/polly/test/ScopDetect/index_from_unpredictable_loop2.ll b/polly/test/ScopDetect/index_from_unpredictable_loop2.ll
index 9b5a3a4389d4..16d47619b0ff 100644
--- a/polly/test/ScopDetect/index_from_unpredictable_loop2.ll
+++ b/polly/test/ScopDetect/index_from_unpredictable_loop2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=AFFINE
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=NONAFFINE
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s | FileCheck %s --check-prefix=AFFINE
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine '-passes=print<polly-function-scops>' -disable-output < %s | FileCheck %s --check-prefix=NONAFFINE
; The SCoP contains a loop with multiple exit blocks (BBs after leaving
; the loop). The current implementation of deriving their domain derives
diff --git a/polly/test/ScopDetect/indvars.ll b/polly/test/ScopDetect/indvars.ll
index 2ba4d1f5aabf..3fbc4d65bbe2 100644
--- a/polly/test/ScopDetect/indvars.ll
+++ b/polly/test/ScopDetect/indvars.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -polly-codegen -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,scop(polly-codegen)' -disable-output < %s 2>&1 | FileCheck %s
;
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/ScopDetect/intrinsics_1.ll b/polly/test/ScopDetect/intrinsics_1.ll
index 65d3968e247c..0f9c70084a3d 100644
--- a/polly/test/ScopDetect/intrinsics_1.ll
+++ b/polly/test/ScopDetect/intrinsics_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Valid Region for Scop: for.cond => for.end
;
diff --git a/polly/test/ScopDetect/intrinsics_2.ll b/polly/test/ScopDetect/intrinsics_2.ll
index f0575511b2ef..1db9807cadb8 100644
--- a/polly/test/ScopDetect/intrinsics_2.ll
+++ b/polly/test/ScopDetect/intrinsics_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify that we allow the lifetime markers for the tmp array.
;
diff --git a/polly/test/ScopDetect/intrinsics_3.ll b/polly/test/ScopDetect/intrinsics_3.ll
index bce90d136a41..a230d0aa831c 100644
--- a/polly/test/ScopDetect/intrinsics_3.ll
+++ b/polly/test/ScopDetect/intrinsics_3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify that we allow the misc intrinsics.
;
diff --git a/polly/test/ScopDetect/invalid-latch-conditions.ll b/polly/test/ScopDetect/invalid-latch-conditions.ll
index eb8097470ecf..db4898c9c7bd 100644
--- a/polly/test/ScopDetect/invalid-latch-conditions.ll
+++ b/polly/test/ScopDetect/invalid-latch-conditions.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-process-unprofitable=false -polly-print-detect -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-allow-nonaffine-loops -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=NALOOPS
-; RUN: opt %loadPolly -polly-allow-nonaffine-loops -polly-process-unprofitable=false -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=PROFIT
+; RUN: opt %loadNPMPolly -polly-process-unprofitable=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-loops '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=NALOOPS
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-loops -polly-process-unprofitable=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=PROFIT
; The latch conditions of the outer loop are not affine, thus the loop cannot
; handled by the domain generation and needs to be overapproximated.
diff --git a/polly/test/ScopDetect/invalidate_scalar_evolution.ll b/polly/test/ScopDetect/invalidate_scalar_evolution.ll
index 01d34c49e289..ddef510ad4d9 100644
--- a/polly/test/ScopDetect/invalidate_scalar_evolution.ll
+++ b/polly/test/ScopDetect/invalidate_scalar_evolution.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s -check-prefix=PHI
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=PHI
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/ScopDetect/invariant-load-before-scop.ll b/polly/test/ScopDetect/invariant-load-before-scop.ll
index f72085ff88a1..10479643959c 100644
--- a/polly/test/ScopDetect/invariant-load-before-scop.ll
+++ b/polly/test/ScopDetect/invariant-load-before-scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
;
; The LoadInst %.b761 is defined outside the SCoP, hence is always constant
; within it. It is no "required invariant load".
diff --git a/polly/test/ScopDetect/keep_going_expansion.ll b/polly/test/ScopDetect/keep_going_expansion.ll
index 9bcfb3924f6a..074aae9ae95c 100644
--- a/polly/test/ScopDetect/keep_going_expansion.ll
+++ b/polly/test/ScopDetect/keep_going_expansion.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-detect-track-failures -polly-detect-keep-going -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-detect-track-failures -polly-detect-keep-going '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopDetect/mod_ref_read_pointer.ll b/polly/test/ScopDetect/mod_ref_read_pointer.ll
index 95a4649f4705..64535d85f2ab 100644
--- a/polly/test/ScopDetect/mod_ref_read_pointer.ll
+++ b/polly/test/ScopDetect/mod_ref_read_pointer.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-allow-modref-calls -polly-print-detect -disable-output < %s | FileCheck %s -check-prefix=MODREF
-; RUN: opt %loadPolly -basic-aa -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-modref-calls '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=MODREF
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK-NOT: Valid Region for Scop: for.body => for.end
; MODREF: Valid Region for Scop: for.body => for.end
diff --git a/polly/test/ScopDetect/more-than-one-loop.ll b/polly/test/ScopDetect/more-than-one-loop.ll
index bfd226c1bcfc..30090652326d 100644
--- a/polly/test/ScopDetect/more-than-one-loop.ll
+++ b/polly/test/ScopDetect/more-than-one-loop.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-process-unprofitable=false -polly-print-detect -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-process-unprofitable=true -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable=true '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK: Valid Region for Scop:
diff --git a/polly/test/ScopDetect/multidim-with-undef-size.ll b/polly/test/ScopDetect/multidim-with-undef-size.ll
index 9973c6c72169..2a5f8b15534f 100644
--- a/polly/test/ScopDetect/multidim-with-undef-size.ll
+++ b/polly/test/ScopDetect/multidim-with-undef-size.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; CHECK: Valid Region for Scop: bb14 => bb17
diff --git a/polly/test/ScopDetect/multidim.ll b/polly/test/ScopDetect/multidim.ll
index f43698819f32..91202373263f 100644
--- a/polly/test/ScopDetect/multidim.ll
+++ b/polly/test/ScopDetect/multidim.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; CHECK: Valid Region for Scop: bb19 => bb20
diff --git a/polly/test/ScopDetect/multidim_indirect_access.ll b/polly/test/ScopDetect/multidim_indirect_access.ll
index 3e06251f5fd1..a9cd446d2767 100644
--- a/polly/test/ScopDetect/multidim_indirect_access.ll
+++ b/polly/test/ScopDetect/multidim_indirect_access.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that we will recognize this SCoP.
;
diff --git a/polly/test/ScopDetect/multidim_two_accesses_different_delinearization.ll b/polly/test/ScopDetect/multidim_two_accesses_different_delinearization.ll
index ed554a24a6d6..9c91fbfbe0b6 100644
--- a/polly/test/ScopDetect/multidim_two_accesses_different_delinearization.ll
+++ b/polly/test/ScopDetect/multidim_two_accesses_different_delinearization.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; Derived from the following code:
diff --git a/polly/test/ScopDetect/nested_loop_single_exit.ll b/polly/test/ScopDetect/nested_loop_single_exit.ll
index 377e8088eedb..a0742112b6e1 100644
--- a/polly/test/ScopDetect/nested_loop_single_exit.ll
+++ b/polly/test/ScopDetect/nested_loop_single_exit.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -disable-output < %s
; void f(long A[], long N) {
; long i, j;
diff --git a/polly/test/ScopDetect/non-affine-conditional.ll b/polly/test/ScopDetect/non-affine-conditional.ll
index fc2d0c02d2da..e74619cd8775 100644
--- a/polly/test/ScopDetect/non-affine-conditional.ll
+++ b/polly/test/ScopDetect/non-affine-conditional.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A) {
; for (int i = 0; i < 1024; i++)
diff --git a/polly/test/ScopDetect/non-affine-float-compare.ll b/polly/test/ScopDetect/non-affine-float-compare.ll
index 984f14aaff8f..9326cd429038 100644
--- a/polly/test/ScopDetect/non-affine-float-compare.ll
+++ b/polly/test/ScopDetect/non-affine-float-compare.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(float *A) {
; for (int i = 0; i < 1024; i++)
diff --git a/polly/test/ScopDetect/non-affine-loop-condition-dependent-access.ll b/polly/test/ScopDetect/non-affine-loop-condition-dependent-access.ll
index 068367fa1e3c..1ab6b35ae93f 100644
--- a/polly/test/ScopDetect/non-affine-loop-condition-dependent-access.ll
+++ b/polly/test/ScopDetect/non-affine-loop-condition-dependent-access.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=REJECTNONAFFINELOOPS
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPS
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPSANDACCESSES
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine -polly-process-unprofitable=false -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=PROFIT
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=REJECTNONAFFINELOOPS
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPS
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPSANDACCESSES
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine -polly-process-unprofitable=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=PROFIT
;
; Here we have a non-affine loop but also a non-affine access which should
; be rejected as long as -polly-allow-nonaffine isn't given.
diff --git a/polly/test/ScopDetect/non-affine-loop-condition-dependent-access_2.ll b/polly/test/ScopDetect/non-affine-loop-condition-dependent-access_2.ll
index cd2140518b46..921f6ab53549 100644
--- a/polly/test/ScopDetect/non-affine-loop-condition-dependent-access_2.ll
+++ b/polly/test/ScopDetect/non-affine-loop-condition-dependent-access_2.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=REJECTNONAFFINELOOPS
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPS
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPSANDACCESSES
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=REJECTNONAFFINELOOPS
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPS
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPSANDACCESSES
;
; Here we have a non-affine loop (in the context of the loop nest)
; and also a non-affine access (A[k]). While we can always detect the
diff --git a/polly/test/ScopDetect/non-affine-loop-condition-dependent-access_3.ll b/polly/test/ScopDetect/non-affine-loop-condition-dependent-access_3.ll
index fb936216e45c..78774d92e0a4 100644
--- a/polly/test/ScopDetect/non-affine-loop-condition-dependent-access_3.ll
+++ b/polly/test/ScopDetect/non-affine-loop-condition-dependent-access_3.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=REJECTNONAFFINELOOPS
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPS
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPSANDACCESSES
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=REJECTNONAFFINELOOPS
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPS
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPSANDACCESSES
;
; Here we have a non-affine loop (in the context of the loop nest)
; and also a non-affine access (A[k]). While we can always detect the
diff --git a/polly/test/ScopDetect/non-affine-loop.ll b/polly/test/ScopDetect/non-affine-loop.ll
index d5f7ea128a79..5136b3b8779b 100644
--- a/polly/test/ScopDetect/non-affine-loop.ll
+++ b/polly/test/ScopDetect/non-affine-loop.ll
@@ -1,8 +1,8 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=REJECTNONAFFINELOOPS
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPS
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false -polly-allow-nonaffine -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=ALLOWNONAFFINEREGIONSANDACCESSES
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPSANDACCESSES
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine -polly-process-unprofitable=false -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=PROFIT
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=REJECTNONAFFINELOOPS
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPS
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false -polly-allow-nonaffine '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALLOWNONAFFINEREGIONSANDACCESSES
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALLOWNONAFFINELOOPSANDACCESSES
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine -polly-process-unprofitable=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=PROFIT
;
; This function/region does contain a loop, however it is non-affine, hence the access
; A[i] is also. Furthermore, it is the only loop, thus when we over approximate
diff --git a/polly/test/ScopDetect/non-beneficial-loops-small-trip-count.ll b/polly/test/ScopDetect/non-beneficial-loops-small-trip-count.ll
index 43af1684dccb..fd52c5df7b27 100644
--- a/polly/test/ScopDetect/non-beneficial-loops-small-trip-count.ll
+++ b/polly/test/ScopDetect/non-beneficial-loops-small-trip-count.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-process-unprofitable=false -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK-NOT: Valid
;
diff --git a/polly/test/ScopDetect/non-constant-add-rec-start-expr.ll b/polly/test/ScopDetect/non-constant-add-rec-start-expr.ll
index 4cddcc916a76..d0c1f7a61333 100644
--- a/polly/test/ScopDetect/non-constant-add-rec-start-expr.ll
+++ b/polly/test/ScopDetect/non-constant-add-rec-start-expr.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK: Valid Region for Scop: bb11 => bb25
diff --git a/polly/test/ScopDetect/non-simple-memory-accesses.ll b/polly/test/ScopDetect/non-simple-memory-accesses.ll
index a82228982885..bdc48984f996 100644
--- a/polly/test/ScopDetect/non-simple-memory-accesses.ll
+++ b/polly/test/ScopDetect/non-simple-memory-accesses.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify that we do not model atomic memory accesses. We did not reason about
; how to handle them correctly and the Alias Set Tracker models some of them
diff --git a/polly/test/ScopDetect/non_affine_loop_condition.ll b/polly/test/ScopDetect/non_affine_loop_condition.ll
index f268442cd8ee..63bd7b3a2f1f 100644
--- a/polly/test/ScopDetect/non_affine_loop_condition.ll
+++ b/polly/test/ScopDetect/non_affine_loop_condition.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-loops -polly-print-detect -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-allow-nonaffine-loops -polly-process-unprofitable=false -polly-print-detect -disable-output < %s | FileCheck %s --check-prefix=PROFIT
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-loops '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-loops -polly-process-unprofitable=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=PROFIT
;
; void f(int *A) {
; for (int i = 0; i < 1024; i++) {
diff --git a/polly/test/ScopDetect/only-one-affine-loop.ll b/polly/test/ScopDetect/only-one-affine-loop.ll
index d6d50bb611d9..1d36f4df35bc 100644
--- a/polly/test/ScopDetect/only-one-affine-loop.ll
+++ b/polly/test/ScopDetect/only-one-affine-loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-process-unprofitable=false -polly-allow-nonaffine-loops -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable=false -polly-allow-nonaffine-loops '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; Even if we allow non-affine loops we can only model the outermost loop, all
; other loops are boxed in non-affine regions. However, the inner loops can be
diff --git a/polly/test/ScopDetect/only_func_flag.ll b/polly/test/ScopDetect/only_func_flag.ll
index d465cd0f50f7..4742375fec5c 100644
--- a/polly/test/ScopDetect/only_func_flag.ll
+++ b/polly/test/ScopDetect/only_func_flag.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-only-func=f,g -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-only-func=f,g '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that the flag `-polly-only-func` limits analysis to `f` and `g`.
;
diff --git a/polly/test/ScopDetect/only_func_flag_regex.ll b/polly/test/ScopDetect/only_func_flag_regex.ll
index e6675798eeb9..2ad22c9f7a7f 100644
--- a/polly/test/ScopDetect/only_func_flag_regex.ll
+++ b/polly/test/ScopDetect/only_func_flag_regex.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-only-func=f.*,g.* -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-only-func=f.*,g.* '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that the flag `-polly-only-func` works with regexes.
;
diff --git a/polly/test/ScopDetect/parametric-multiply-in-scev-2.ll b/polly/test/ScopDetect/parametric-multiply-in-scev-2.ll
index fc957a7f912c..271825a58c39 100644
--- a/polly/test/ScopDetect/parametric-multiply-in-scev-2.ll
+++ b/polly/test/ScopDetect/parametric-multiply-in-scev-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK-NOT: Valid Region
diff --git a/polly/test/ScopDetect/parametric-multiply-in-scev.ll b/polly/test/ScopDetect/parametric-multiply-in-scev.ll
index 9c6e5ccc8f52..2ab8997c6333 100644
--- a/polly/test/ScopDetect/parametric-multiply-in-scev.ll
+++ b/polly/test/ScopDetect/parametric-multiply-in-scev.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; foo(float *A, long n, long k) {
; if (true)
diff --git a/polly/test/ScopDetect/phi_with_multi_exiting_edges.ll b/polly/test/ScopDetect/phi_with_multi_exiting_edges.ll
index 054de168d76b..248bb43aacd9 100644
--- a/polly/test/ScopDetect/phi_with_multi_exiting_edges.ll
+++ b/polly/test/ScopDetect/phi_with_multi_exiting_edges.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; Region with an exit node that has a PHI node multiple incoming edges from
; inside the region. Motivation for supporting such cases in Polly.
diff --git a/polly/test/ScopDetect/profitability-large-basic-blocks.ll b/polly/test/ScopDetect/profitability-large-basic-blocks.ll
index e1650febf11c..d74185b45c75 100644
--- a/polly/test/ScopDetect/profitability-large-basic-blocks.ll
+++ b/polly/test/ScopDetect/profitability-large-basic-blocks.ll
@@ -1,12 +1,12 @@
-; RUN: opt %loadPolly -polly-process-unprofitable=false \
+; RUN: opt %loadNPMPolly -polly-process-unprofitable=false \
; RUN: -polly-detect-profitability-min-per-loop-insts=40 \
-; RUN: -polly-print-detect -disable-output < %s | FileCheck %s -check-prefix=PROFITABLE
+; RUN: '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=PROFITABLE
-; RUN: opt %loadPolly -polly-process-unprofitable=true \
-; RUN: -polly-print-detect -disable-output < %s | FileCheck %s -check-prefix=PROFITABLE
+; RUN: opt %loadNPMPolly -polly-process-unprofitable=true \
+; RUN: '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=PROFITABLE
-; RUN: opt %loadPolly -polly-process-unprofitable=false \
-; RUN: -polly-print-detect -disable-output < %s | FileCheck %s -check-prefix=UNPROFITABLE
+; RUN: opt %loadNPMPolly -polly-process-unprofitable=false \
+; RUN: '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=UNPROFITABLE
; UNPROFITABLE-NOT: Valid Region for Scop:
; PROFITABLE: Valid Region for Scop:
diff --git a/polly/test/ScopDetect/profitability-two-nested-loops.ll b/polly/test/ScopDetect/profitability-two-nested-loops.ll
index 525f91cbc2f4..0291d3be452a 100644
--- a/polly/test/ScopDetect/profitability-two-nested-loops.ll
+++ b/polly/test/ScopDetect/profitability-two-nested-loops.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK: Valid Region for Scop: next => bb3
;
diff --git a/polly/test/ScopDetect/remove_all_children.ll b/polly/test/ScopDetect/remove_all_children.ll
index 6d5097b80607..d95e9bde0b38 100644
--- a/polly/test/ScopDetect/remove_all_children.ll
+++ b/polly/test/ScopDetect/remove_all_children.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopDetect/report-scop-location.ll b/polly/test/ScopDetect/report-scop-location.ll
index 750699cbe763..a99a2ef2b484 100644
--- a/polly/test/ScopDetect/report-scop-location.ll
+++ b/polly/test/ScopDetect/report-scop-location.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-detect -polly-report -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -polly-report -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-i64:64-f80:128-s:64-n8:16:32:64-S128"
; Function Attrs: nounwind uwtable
diff --git a/polly/test/ScopDetect/restrict-undef-size-scopdetect.ll b/polly/test/ScopDetect/restrict-undef-size-scopdetect.ll
index e94f1e7728c5..f49190b33ccf 100644
--- a/polly/test/ScopDetect/restrict-undef-size-scopdetect.ll
+++ b/polly/test/ScopDetect/restrict-undef-size-scopdetect.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK-NOT: Valid Region for Scop:
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopDetect/run_time_alias_check.ll b/polly/test/ScopDetect/run_time_alias_check.ll
index 672f3dfa6365..74cbedb34e5c 100644
--- a/polly/test/ScopDetect/run_time_alias_check.ll
+++ b/polly/test/ScopDetect/run_time_alias_check.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
diff --git a/polly/test/ScopDetect/scev_remove_max.ll b/polly/test/ScopDetect/scev_remove_max.ll
index 5353e06bdf2f..caf55bf87a66 100644
--- a/polly/test/ScopDetect/scev_remove_max.ll
+++ b/polly/test/ScopDetect/scev_remove_max.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-detect < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' < %s
; This test case helps to determine wether SCEVRemoveMax::remove produces
; an infinite loop and a segmentation fault, if it processes, for example,
diff --git a/polly/test/ScopDetect/sequential_loops.ll b/polly/test/ScopDetect/sequential_loops.ll
index e6ac38aa1604..4a84f356f3e8 100644
--- a/polly/test/ScopDetect/sequential_loops.ll
+++ b/polly/test/ScopDetect/sequential_loops.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
@@ -13,7 +13,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; }
define void @f1(ptr %A, i64 %N) nounwind {
-; CHECK-LABEL: 'Polly - Detect static control parts (SCoPs)' for function 'f1'
+; CHECK-LABEL: Detected Scops in Function f1
entry:
fence seq_cst
br label %for.i.1
@@ -60,7 +60,7 @@ return:
; }
define void @f2(ptr %A, i64 %N) nounwind {
-; CHECK-LABEL: 'Polly - Detect static control parts (SCoPs)' for function 'f2'
+; CHECK-LABEL: Detected Scops in Function f2
entry:
fence seq_cst
br label %for.i.1
diff --git a/polly/test/ScopDetect/simple_loop.ll b/polly/test/ScopDetect/simple_loop.ll
index c8ed89a97d00..33823b21fb8f 100644
--- a/polly/test/ScopDetect/simple_loop.ll
+++ b/polly/test/ScopDetect/simple_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/ScopDetect/simple_loop_non_single_entry.ll b/polly/test/ScopDetect/simple_loop_non_single_entry.ll
index 22adec5d2039..1bba2c21c747 100644
--- a/polly/test/ScopDetect/simple_loop_non_single_entry.ll
+++ b/polly/test/ScopDetect/simple_loop_non_single_entry.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/ScopDetect/simple_loop_non_single_exit.ll b/polly/test/ScopDetect/simple_loop_non_single_exit.ll
index 71ac830cae7d..93ec84e911c5 100644
--- a/polly/test/ScopDetect/simple_loop_non_single_exit.ll
+++ b/polly/test/ScopDetect/simple_loop_non_single_exit.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/ScopDetect/simple_loop_non_single_exit_2.ll b/polly/test/ScopDetect/simple_loop_non_single_exit_2.ll
index d9915dc130d5..33b0d8d7d6fc 100644
--- a/polly/test/ScopDetect/simple_loop_non_single_exit_2.ll
+++ b/polly/test/ScopDetect/simple_loop_non_single_exit_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/ScopDetect/simple_loop_two_phi_nodes.ll b/polly/test/ScopDetect/simple_loop_two_phi_nodes.ll
index 867bd50513f0..9b47b7c946ca 100644
--- a/polly/test/ScopDetect/simple_loop_two_phi_nodes.ll
+++ b/polly/test/ScopDetect/simple_loop_two_phi_nodes.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/ScopDetect/simple_loop_with_param.ll b/polly/test/ScopDetect/simple_loop_with_param.ll
index 1ae5c6608739..4a0a3adab661 100644
--- a/polly/test/ScopDetect/simple_loop_with_param.ll
+++ b/polly/test/ScopDetect/simple_loop_with_param.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-detect -disable-output < %s | FileCheck %s -check-prefix=PHI
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=PHI
; void f(long A[], long N, long *init_ptr) {
; long i, j;
diff --git a/polly/test/ScopDetect/simple_loop_with_param_2.ll b/polly/test/ScopDetect/simple_loop_with_param_2.ll
index 1a4750621c19..670936b6fee8 100644
--- a/polly/test/ScopDetect/simple_loop_with_param_2.ll
+++ b/polly/test/ScopDetect/simple_loop_with_param_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long A[], int N, int *init_ptr) {
; long i, j;
diff --git a/polly/test/ScopDetect/simple_non_single_entry.ll b/polly/test/ScopDetect/simple_non_single_entry.ll
index a1995a427903..6ace3b636019 100644
--- a/polly/test/ScopDetect/simple_non_single_entry.ll
+++ b/polly/test/ScopDetect/simple_non_single_entry.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long A[], long N) {
; long i;
diff --git a/polly/test/ScopDetect/skip_function_attribute.ll b/polly/test/ScopDetect/skip_function_attribute.ll
index e85dbd4c2b83..2150a3e8c35d 100644
--- a/polly/test/ScopDetect/skip_function_attribute.ll
+++ b/polly/test/ScopDetect/skip_function_attribute.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify polly skips this function
;
diff --git a/polly/test/ScopDetect/srem_with_parametric_divisor.ll b/polly/test/ScopDetect/srem_with_parametric_divisor.ll
index 4b5c3b04c2ce..66c3b045f62a 100644
--- a/polly/test/ScopDetect/srem_with_parametric_divisor.ll
+++ b/polly/test/ScopDetect/srem_with_parametric_divisor.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK-NOT: Valid Region for Scop:
;
diff --git a/polly/test/ScopDetect/statistics.ll b/polly/test/ScopDetect/statistics.ll
index 64df3d081605..a1dcebec63ff 100644
--- a/polly/test/ScopDetect/statistics.ll
+++ b/polly/test/ScopDetect/statistics.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-detect -stats -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -stats -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
diff --git a/polly/test/ScopDetect/switch-in-loop-patch.ll b/polly/test/ScopDetect/switch-in-loop-patch.ll
index ab4729fc09a4..2f9b670384db 100644
--- a/polly/test/ScopDetect/switch-in-loop-patch.ll
+++ b/polly/test/ScopDetect/switch-in-loop-patch.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK-NOT: Valid
diff --git a/polly/test/ScopDetectionDiagnostics/ReportAlias-01.ll b/polly/test/ScopDetectionDiagnostics/ReportAlias-01.ll
index 97ba7f9634e9..4ae86a940e0c 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportAlias-01.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportAlias-01.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-use-runtime-alias-checks=false -pass-remarks-missed="polly-detect" -polly-detect-track-failures -polly-print-detect -disable-output < %s 2>&1| FileCheck %s
+; RUN: opt %loadNPMPolly -polly-use-runtime-alias-checks=false -pass-remarks-missed="polly-detect" -polly-detect-track-failures '-passes=print<polly-detect>' -disable-output < %s 2>&1| FileCheck %s
;void f(int A[], int B[]) {
; for (int i=0; i<42; i++)
diff --git a/polly/test/ScopDetectionDiagnostics/ReportEntry.ll b/polly/test/ScopDetectionDiagnostics/ReportEntry.ll
index fc21e192f32c..adb14b5b017d 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportEntry.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportEntry.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-detect -pass-remarks-missed="polly-detect" -disable-output < %s 2>&1| FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -pass-remarks-missed="polly-detect" -disable-output < %s 2>&1| FileCheck %s
; CHECK: remark: <unknown>:0:0: Scop contains function entry (not yet supported).
diff --git a/polly/test/ScopDetectionDiagnostics/ReportFuncCall-01.ll b/polly/test/ScopDetectionDiagnostics/ReportFuncCall-01.ll
index abace4ba520d..428a7cf855f6 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportFuncCall-01.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportFuncCall-01.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-missed="polly-detect" -polly-detect-track-failures -polly-print-detect -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -pass-remarks-missed="polly-detect" -polly-detect-track-failures '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; #define N 1024
; double invalidCall(double A[N]);
diff --git a/polly/test/ScopDetectionDiagnostics/ReportIrreducibleRegion.ll b/polly/test/ScopDetectionDiagnostics/ReportIrreducibleRegion.ll
index 8368a68b42f0..d22c3b6d27c3 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportIrreducibleRegion.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportIrreducibleRegion.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -pass-remarks-missed="polly-detect" -disable-output < %s 2>&1| FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -pass-remarks-missed="polly-detect" -disable-output < %s 2>&1| FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
;void foo(int a, int b) {
diff --git a/polly/test/ScopDetectionDiagnostics/ReportIrreducibleRegionWithoutDebugLoc.ll b/polly/test/ScopDetectionDiagnostics/ReportIrreducibleRegionWithoutDebugLoc.ll
index 82c6c33e287c..2bc515e0ae5e 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportIrreducibleRegionWithoutDebugLoc.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportIrreducibleRegionWithoutDebugLoc.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-detect -pass-remarks-missed="polly-detect" -disable-output < %s 2>&1| FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -pass-remarks-missed="polly-detect" -disable-output < %s 2>&1| FileCheck %s
; CHECK: remark: <unknown>:0:0: Irreducible region encountered in control flow.
diff --git a/polly/test/ScopDetectionDiagnostics/ReportLoopBound-01.ll b/polly/test/ScopDetectionDiagnostics/ReportLoopBound-01.ll
index 35986b5e0b35..cb913000a993 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportLoopBound-01.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportLoopBound-01.ll
@@ -1,15 +1,15 @@
-; RUN: opt %loadPolly \
+; RUN: opt %loadNPMPolly \
; RUN: -pass-remarks-missed="polly-detect" -polly-detect-track-failures \
-; RUN: -polly-allow-nonaffine-loops=false -polly-print-detect -disable-output \
+; RUN: -polly-allow-nonaffine-loops=false '-passes=print<polly-detect>' -disable-output \
; RUN: < %s 2>&1| FileCheck %s --check-prefix=REJECTNONAFFINELOOPS
-; RUN: opt %loadPolly \
+; RUN: opt %loadNPMPolly \
; RUN: -pass-remarks-missed="polly-detect" -polly-detect-track-failures \
-; RUN: -polly-allow-nonaffine-loops=true -polly-print-detect -disable-output \
+; RUN: -polly-allow-nonaffine-loops=true '-passes=print<polly-detect>' -disable-output \
; RUN: < %s 2>&1| FileCheck %s --check-prefix=ALLOWNONAFFINELOOPS
-; RUN: opt %loadPolly -pass-remarks-missed="polly-detect" \
+; RUN: opt %loadNPMPolly -pass-remarks-missed="polly-detect" \
; RUN: -polly-process-unprofitable=false \
; RUN: -polly-detect-track-failures -polly-allow-nonaffine-loops=true \
-; RUN: -polly-allow-nonaffine -polly-print-detect -disable-output < %s 2>&1 \
+; RUN: -polly-allow-nonaffine '-passes=print<polly-detect>' -disable-output < %s 2>&1 \
; RUN: | FileCheck %s --check-prefix=ALLOWNONAFFINEALL
; void f(int A[], int n) {
diff --git a/polly/test/ScopDetectionDiagnostics/ReportLoopHasNoExit.ll b/polly/test/ScopDetectionDiagnostics/ReportLoopHasNoExit.ll
index 5dbeaded45c9..92028093f70b 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportLoopHasNoExit.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportLoopHasNoExit.ll
@@ -4,8 +4,8 @@
; the PostDominatorTree. Infinite loops are postdominated ony by the virtual
; root, which causes them not to appear in regions in ScopDetection anymore.
-; RUN: opt %loadPolly -pass-remarks-missed="polly-detect" -polly-allow-nonaffine-loops -polly-print-detect -disable-output < %s 2>&1 | FileCheck %s
-; RUN: opt %loadPolly -pass-remarks-missed="polly-detect" -polly-allow-nonaffine-loops=false -polly-print-detect -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -pass-remarks-missed="polly-detect" -polly-allow-nonaffine-loops '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -pass-remarks-missed="polly-detect" -polly-allow-nonaffine-loops=false '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
; void func (int param0, int N, int *A)
; {
diff --git a/polly/test/ScopDetectionDiagnostics/ReportMultipleNonAffineAccesses.ll b/polly/test/ScopDetectionDiagnostics/ReportMultipleNonAffineAccesses.ll
index 634b63e6d44d..dd95bd6ede71 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportMultipleNonAffineAccesses.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportMultipleNonAffineAccesses.ll
@@ -1,9 +1,9 @@
-; RUN: opt %loadPolly -basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures -polly-print-detect -disable-output < %s 2>&1| FileCheck %s
-; RUN: opt %loadPolly -basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures -polly-print-detect -polly-delinearize=false -polly-detect-keep-going -disable-output < %s 2>&1| FileCheck %s -check-prefix=ALL
-; RUN: opt %loadPolly -basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures -polly-print-detect -disable-output < %s 2>&1| FileCheck %s -check-prefix=DELIN
-; RUN: opt %loadPolly -basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures -polly-print-detect -polly-detect-keep-going -disable-output < %s 2>&1| FileCheck %s -check-prefix=DELIN-ALL
-; RUN: opt %loadPolly -basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures -polly-print-detect -polly-allow-nonaffine -disable-output < %s 2>&1| FileCheck %s -check-prefix=NONAFFINE
-; RUN: opt %loadPolly -basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures -polly-print-detect -polly-allow-nonaffine -disable-output < %s 2>&1| FileCheck %s -check-prefix=NONAFFINE
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures '-passes=print<polly-detect>' -disable-output < %s 2>&1| FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures '-passes=print<polly-detect>' -polly-delinearize=false -polly-detect-keep-going -disable-output < %s 2>&1| FileCheck %s -check-prefix=ALL
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures '-passes=print<polly-detect>' -disable-output < %s 2>&1| FileCheck %s -check-prefix=DELIN
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures '-passes=print<polly-detect>' -polly-detect-keep-going -disable-output < %s 2>&1| FileCheck %s -check-prefix=DELIN-ALL
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures '-passes=print<polly-detect>' -polly-allow-nonaffine -disable-output < %s 2>&1| FileCheck %s -check-prefix=NONAFFINE
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -pass-remarks-missed="polly-detect" -polly-detect-track-failures '-passes=print<polly-detect>' -polly-allow-nonaffine -disable-output < %s 2>&1| FileCheck %s -check-prefix=NONAFFINE
; 1 void manyaccesses(float A[restrict], long n, float B[restrict][n])
; 2 {
diff --git a/polly/test/ScopDetectionDiagnostics/ReportNonAffineAccess-01.ll b/polly/test/ScopDetectionDiagnostics/ReportNonAffineAccess-01.ll
index 23d8c9c061c9..832045f089d6 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportNonAffineAccess-01.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportNonAffineAccess-01.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-missed="polly-detect" -polly-detect-track-failures -polly-print-detect -disable-output < %s 2>&1| FileCheck %s
+; RUN: opt %loadNPMPolly -pass-remarks-missed="polly-detect" -polly-detect-track-failures '-passes=print<polly-detect>' -disable-output < %s 2>&1| FileCheck %s
; void f(int A[]) {
; for(int i=0; i<42; ++i)
diff --git a/polly/test/ScopDetectionDiagnostics/ReportUnprofitable.ll b/polly/test/ScopDetectionDiagnostics/ReportUnprofitable.ll
index d35b7a28ba89..b951487d6197 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportUnprofitable.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportUnprofitable.ll
@@ -1,9 +1,9 @@
-; RUN: opt %loadPolly -pass-remarks-missed="polly-detect" \
-; RUN: -polly-detect-track-failures -polly-print-detect -disable-output \
+; RUN: opt %loadNPMPolly -pass-remarks-missed="polly-detect" \
+; RUN: -polly-detect-track-failures '-passes=print<polly-detect>' -disable-output \
; RUN: -polly-process-unprofitable=false < %s 2>&1| FileCheck %s
-; RUN: opt %loadPolly -pass-remarks-missed="polly-detect" \
-; RUN: -polly-detect-track-failures -polly-print-detect -disable-output \
+; RUN: opt %loadNPMPolly -pass-remarks-missed="polly-detect" \
+; RUN: -polly-detect-track-failures '-passes=print<polly-detect>' -disable-output \
; RUN: -polly-process-unprofitable=false < %s 2>&1 -pass-remarks-output=%t.yaml
; RUN: cat %t.yaml | FileCheck -check-prefix=YAML %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopDetectionDiagnostics/ReportUnreachableInExit.ll b/polly/test/ScopDetectionDiagnostics/ReportUnreachableInExit.ll
index 6c868db78ce7..d110cfefc27d 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportUnreachableInExit.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportUnreachableInExit.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s \
; RUN: -pass-remarks-missed="polly-detect" 2>&1 | FileCheck %s
; void f(long A[], long N) {
diff --git a/polly/test/ScopDetectionDiagnostics/ReportVariantBasePtr-01.ll b/polly/test/ScopDetectionDiagnostics/ReportVariantBasePtr-01.ll
index a82f56b7a5fa..c2efd6165a26 100644
--- a/polly/test/ScopDetectionDiagnostics/ReportVariantBasePtr-01.ll
+++ b/polly/test/ScopDetectionDiagnostics/ReportVariantBasePtr-01.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-missed="polly-detect" -polly-detect-track-failures -polly-print-detect -disable-output < %s 2>&1| FileCheck %s
+; RUN: opt %loadNPMPolly -pass-remarks-missed="polly-detect" -polly-detect-track-failures '-passes=print<polly-detect>' -disable-output < %s 2>&1| FileCheck %s
; struct b {
; double **b;
diff --git a/polly/test/ScopDetectionDiagnostics/loop_has_multiple_exits.ll b/polly/test/ScopDetectionDiagnostics/loop_has_multiple_exits.ll
index a0f2704b1372..3cdeed13ec28 100644
--- a/polly/test/ScopDetectionDiagnostics/loop_has_multiple_exits.ll
+++ b/polly/test/ScopDetectionDiagnostics/loop_has_multiple_exits.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-missed="polly-detect" -polly-detect-track-failures -polly-detect -disable-output 2>&1 < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -pass-remarks-missed="polly-detect" -polly-detect-track-failures '-passes=print<polly-detect>' -disable-output 2>&1 < %s | FileCheck %s -match-full-lines
;
; Derived from test-suite/MultiSource/Benchmarks/BitBench/uuencode/uuencode.c
;
diff --git a/polly/test/ScopDetectionDiagnostics/loop_partially_in_scop-2.ll b/polly/test/ScopDetectionDiagnostics/loop_partially_in_scop-2.ll
index 667ed7d18ab5..4a9a200d67df 100644
--- a/polly/test/ScopDetectionDiagnostics/loop_partially_in_scop-2.ll
+++ b/polly/test/ScopDetectionDiagnostics/loop_partially_in_scop-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -pass-remarks-missed="polly-detect" -disable-output < %s 2>&1| FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -pass-remarks-missed="polly-detect" -disable-output < %s 2>&1| FileCheck %s
; CHECK: remark: <unknown>:0:0: Loop cannot be handled because not all latches are part of loop region.
diff --git a/polly/test/ScopDetectionDiagnostics/loop_partially_in_scop.ll b/polly/test/ScopDetectionDiagnostics/loop_partially_in_scop.ll
index 9dce56a3a3c4..61ff033d9f93 100644
--- a/polly/test/ScopDetectionDiagnostics/loop_partially_in_scop.ll
+++ b/polly/test/ScopDetectionDiagnostics/loop_partially_in_scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -pass-remarks-missed="polly-detect" -disable-output < %s 2>&1| FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -pass-remarks-missed="polly-detect" -disable-output < %s 2>&1| FileCheck %s
; CHECK: remark: <unknown>:0:0: Loop cannot be handled because not all latches are part of loop region.
; CHECK: remark: <unknown>:0:0: Loop cannot be handled because not all latches are part of loop region.
diff --git a/polly/test/ScopInfo/20110312-Fail-without-basicaa.ll b/polly/test/ScopInfo/20110312-Fail-without-basicaa.ll
index 94dd5824777c..c5efec3f50c5 100644
--- a/polly/test/ScopInfo/20110312-Fail-without-basicaa.ll
+++ b/polly/test/ScopInfo/20110312-Fail-without-basicaa.ll
@@ -1,5 +1,5 @@
; This should be run without alias analysis enabled.
-;RUN: opt %loadPolly -polly-scops -disable-output < %s
+;RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
define i32 @main() nounwind {
diff --git a/polly/test/ScopInfo/20111108-Parameter-not-detected.ll b/polly/test/ScopInfo/20111108-Parameter-not-detected.ll
index f80177cb90e7..81c7efb96365 100644
--- a/polly/test/ScopInfo/20111108-Parameter-not-detected.ll
+++ b/polly/test/ScopInfo/20111108-Parameter-not-detected.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
declare void @foo()
diff --git a/polly/test/ScopInfo/2012-03-16-Crash-because-of-unsigned-in-scev.ll b/polly/test/ScopInfo/2012-03-16-Crash-because-of-unsigned-in-scev.ll
index b55d635947e5..5abf8ff29ef8 100644
--- a/polly/test/ScopInfo/2012-03-16-Crash-because-of-unsigned-in-scev.ll
+++ b/polly/test/ScopInfo/2012-03-16-Crash-because-of-unsigned-in-scev.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32"
diff --git a/polly/test/ScopInfo/2015-10-04-Crash-in-domain-generation.ll b/polly/test/ScopInfo/2015-10-04-Crash-in-domain-generation.ll
index d4d931fd2e0c..d16ba453f981 100644
--- a/polly/test/ScopInfo/2015-10-04-Crash-in-domain-generation.ll
+++ b/polly/test/ScopInfo/2015-10-04-Crash-in-domain-generation.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-loops -polly-scops -disable-output < %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-loops '-passes=print<polly-function-scops>' -disable-output < %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopInfo/Alias-0.ll b/polly/test/ScopInfo/Alias-0.ll
index 0fc4ad91b7db..ebbe744627ef 100644
--- a/polly/test/ScopInfo/Alias-0.ll
+++ b/polly/test/ScopInfo/Alias-0.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=RTA
-; RUN: opt %loadPolly -polly-print-scops -polly-use-runtime-alias-checks=false -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=NORTA
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=RTA
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-use-runtime-alias-checks=false -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=NORTA
; REQUIRES: asserts
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/ScopInfo/Alias-1.ll b/polly/test/ScopInfo/Alias-1.ll
index eab8c062f4ba..b1711c25857d 100644
--- a/polly/test/ScopInfo/Alias-1.ll
+++ b/polly/test/ScopInfo/Alias-1.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=RTA
-; RUN: opt %loadPolly -polly-print-scops -polly-use-runtime-alias-checks=false -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=NORTA
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=RTA
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-use-runtime-alias-checks=false -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=NORTA
; REQUIRES: asserts
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/ScopInfo/Alias-2.ll b/polly/test/ScopInfo/Alias-2.ll
index 64f1e0bc919d..b94f130c94eb 100644
--- a/polly/test/ScopInfo/Alias-2.ll
+++ b/polly/test/ScopInfo/Alias-2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=RTA
-; RUN: opt %loadPolly -polly-print-scops -polly-use-runtime-alias-checks=false -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=NORTA
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=RTA
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-use-runtime-alias-checks=false -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=NORTA
; REQUIRES: asserts
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/ScopInfo/Alias-3.ll b/polly/test/ScopInfo/Alias-3.ll
index 5e9b94e692bc..af7816546b4a 100644
--- a/polly/test/ScopInfo/Alias-3.ll
+++ b/polly/test/ScopInfo/Alias-3.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=RTA
-; RUN: opt %loadPolly -polly-print-scops -polly-use-runtime-alias-checks=false -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=NORTA
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=RTA
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-use-runtime-alias-checks=false -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=NORTA
; REQUIRES: asserts
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/ScopInfo/Alias-4.ll b/polly/test/ScopInfo/Alias-4.ll
index 4d5a91abb96f..fe651c87b241 100644
--- a/polly/test/ScopInfo/Alias-4.ll
+++ b/polly/test/ScopInfo/Alias-4.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -disable-basic-aa -polly-print-scops -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=RTA
-; RUN: opt %loadPolly -disable-basic-aa -polly-print-scops -polly-use-runtime-alias-checks=false -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=NORTA
+; RUN: opt %loadNPMPolly -aa-pipeline= '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=RTA
+; RUN: opt %loadNPMPolly -aa-pipeline= '-passes=print<polly-detect>,print<polly-function-scops>' -polly-use-runtime-alias-checks=false -disable-output < %s -stats 2>&1 | FileCheck %s --check-prefix=NORTA
; REQUIRES: asserts
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/ScopInfo/BoundChecks/single-loop.ll b/polly/test/ScopInfo/BoundChecks/single-loop.ll
index bc96c907afc9..10a0a58f381d 100644
--- a/polly/test/ScopInfo/BoundChecks/single-loop.ll
+++ b/polly/test/ScopInfo/BoundChecks/single-loop.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=AST
;
; This only works after the post-dominator tree has been fixed.
;
diff --git a/polly/test/ScopInfo/BoundChecks/two-loops.ll b/polly/test/ScopInfo/BoundChecks/two-loops.ll
index 14e07f42a3ae..c85ac5b4ba8f 100644
--- a/polly/test/ScopInfo/BoundChecks/two-loops.ll
+++ b/polly/test/ScopInfo/BoundChecks/two-loops.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output< %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output< %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=AST
;
; This only works after the post-dominator tree has fixed.
; XFAIL: *
diff --git a/polly/test/ScopInfo/NonAffine/div_backedge.ll b/polly/test/ScopInfo/NonAffine/div_backedge.ll
index a6aca032ef62..3b0c673ece38 100644
--- a/polly/test/ScopInfo/NonAffine/div_backedge.ll
+++ b/polly/test/ScopInfo/NonAffine/div_backedge.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void foo(float *A) {
; for (long i = 1;; i++) {
diff --git a/polly/test/ScopInfo/NonAffine/div_domain.ll b/polly/test/ScopInfo/NonAffine/div_domain.ll
index f61c4eb459ed..34a5cecdfe3d 100644
--- a/polly/test/ScopInfo/NonAffine/div_domain.ll
+++ b/polly/test/ScopInfo/NonAffine/div_domain.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void foo(float *A) {
; for (long i = 0; i < 16; i++) {
diff --git a/polly/test/ScopInfo/NonAffine/invariant_loads_dependent_in_non_affine_region.ll b/polly/test/ScopInfo/NonAffine/invariant_loads_dependent_in_non_affine_region.ll
index f5d63dfb9d2c..7d02fae7f98f 100644
--- a/polly/test/ScopInfo/NonAffine/invariant_loads_dependent_in_non_affine_region.ll
+++ b/polly/test/ScopInfo/NonAffine/invariant_loads_dependent_in_non_affine_region.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-invariant-load-hoisting=true -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-invariant-load-hoisting=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, int *B, int *C) {
; for (int i = 0; i < 1000; i++)
diff --git a/polly/test/ScopInfo/NonAffine/modulo_backedge.ll b/polly/test/ScopInfo/NonAffine/modulo_backedge.ll
index dec63ca6813d..d5c808d9021f 100644
--- a/polly/test/ScopInfo/NonAffine/modulo_backedge.ll
+++ b/polly/test/ScopInfo/NonAffine/modulo_backedge.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Domain :=
; CHECK: { Stmt_for_body[i0] : 0 <= i0 <= 6 };
diff --git a/polly/test/ScopInfo/NonAffine/modulo_domain.ll b/polly/test/ScopInfo/NonAffine/modulo_domain.ll
index f5ebec2b0346..13fe53f11633 100644
--- a/polly/test/ScopInfo/NonAffine/modulo_domain.ll
+++ b/polly/test/ScopInfo/NonAffine/modulo_domain.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; TODO: The new domain generation cannot handle modulo domain constraints,
; hence modulo handling has been disabled completely. Once this is
diff --git a/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_1.ll b/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_1.ll
index 837d9b21b16e..2b8427d74ec8 100644
--- a/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_1.ll
+++ b/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_1.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-print-scops -disable-output < %s | FileCheck %s -check-prefix=SCALAR
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-process-unprofitable=false -polly-print-scops -disable-output < %s | FileCheck %s -check-prefix=PROFIT
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=SCALAR
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-process-unprofitable=false '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=PROFIT
;
; SCALAR: Function: f
; SCALAR-NEXT: Region: %bb1---%bb13
diff --git a/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_2.ll b/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_2.ll
index e39569abc52d..30f756e81e47 100644
--- a/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_2.ll
+++ b/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_2.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=INNERMOST
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=INNERMOST
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=ALL
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=INNERMOST
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=INNERMOST
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALL
;
; Here we have a non-affine loop (in the context of the loop nest)
; and also a non-affine access (A[k]). While we can always model the
diff --git a/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_3.ll b/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_3.ll
index 75dd7ac26bb3..6dacd719862e 100644
--- a/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_3.ll
+++ b/polly/test/ScopInfo/NonAffine/non-affine-loop-condition-dependent-access_3.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=INNERMOST
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=INNERMOST
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=ALL
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=false '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=INNERMOST
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=INNERMOST
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true -polly-allow-nonaffine '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALL
;
; Here we have a non-affine loop (in the context of the loop nest)
; and also a non-affine access (A[k]). While we can always model the
diff --git a/polly/test/ScopInfo/NonAffine/non_affine_access_with_range_2.ll b/polly/test/ScopInfo/NonAffine/non_affine_access_with_range_2.ll
index 34b04933af86..8a13f791ed6d 100644
--- a/polly/test/ScopInfo/NonAffine/non_affine_access_with_range_2.ll
+++ b/polly/test/ScopInfo/NonAffine/non_affine_access_with_range_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A) {
; for (int i = 0; i < 128; i++)
diff --git a/polly/test/ScopInfo/NonAffine/non_affine_but_sdiv.ll b/polly/test/ScopInfo/NonAffine/non_affine_but_sdiv.ll
index 9955c88b2cfd..1e70d2c9db87 100644
--- a/polly/test/ScopInfo/NonAffine/non_affine_but_sdiv.ll
+++ b/polly/test/ScopInfo/NonAffine/non_affine_but_sdiv.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Statements {
; CHECK-NEXT: Stmt_for_body
diff --git a/polly/test/ScopInfo/NonAffine/non_affine_but_srem.ll b/polly/test/ScopInfo/NonAffine/non_affine_but_srem.ll
index b194ee762e9f..dcfaa9280dcb 100644
--- a/polly/test/ScopInfo/NonAffine/non_affine_but_srem.ll
+++ b/polly/test/ScopInfo/NonAffine/non_affine_but_srem.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void pos(float *A, long n) {
; for (long i = 0; i < 100; i++)
diff --git a/polly/test/ScopInfo/NonAffine/non_affine_conditional_nested.ll b/polly/test/ScopInfo/NonAffine/non_affine_conditional_nested.ll
index 1f55530b137d..24bfe6050216 100644
--- a/polly/test/ScopInfo/NonAffine/non_affine_conditional_nested.ll
+++ b/polly/test/ScopInfo/NonAffine/non_affine_conditional_nested.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A) {
; for (int i = 0; i < 1024; i++)
diff --git a/polly/test/ScopInfo/NonAffine/non_affine_conditional_surrounding_affine_loop.ll b/polly/test/ScopInfo/NonAffine/non_affine_conditional_surrounding_affine_loop.ll
index 3511362304b4..931ad36d15f3 100644
--- a/polly/test/ScopInfo/NonAffine/non_affine_conditional_surrounding_affine_loop.ll
+++ b/polly/test/ScopInfo/NonAffine/non_affine_conditional_surrounding_affine_loop.ll
@@ -1,11 +1,11 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches \
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches \
; RUN: -polly-invariant-load-hoisting=true \
; RUN: -polly-allow-nonaffine-loops=true \
-; RUN: -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=INNERMOST
-; RUN: opt %loadPolly -polly-allow-nonaffine \
+; RUN: '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=INNERMOST
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine \
; RUN: -polly-invariant-load-hoisting=true \
; RUN: -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true \
-; RUN: -polly-print-scops -disable-output < %s | FileCheck %s \
+; RUN: '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s \
; RUN: --check-prefix=ALL
;
; Negative test for INNERMOST.
diff --git a/polly/test/ScopInfo/NonAffine/non_affine_conditional_surrounding_non_affine_loop.ll b/polly/test/ScopInfo/NonAffine/non_affine_conditional_surrounding_non_affine_loop.ll
index c2e1e46f6f18..37b51cebd74d 100644
--- a/polly/test/ScopInfo/NonAffine/non_affine_conditional_surrounding_non_affine_loop.ll
+++ b/polly/test/ScopInfo/NonAffine/non_affine_conditional_surrounding_non_affine_loop.ll
@@ -1,16 +1,16 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches \
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches \
; RUN: -polly-invariant-load-hoisting=true \
; RUN: -polly-allow-nonaffine-loops=true \
-; RUN: -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=INNERMOST
-; RUN: opt %loadPolly -polly-allow-nonaffine \
+; RUN: '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=INNERMOST
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine \
; RUN: -polly-invariant-load-hoisting=true \
; RUN: -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true \
-; RUN: -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=ALL
-; RUN: opt %loadPolly -polly-allow-nonaffine \
+; RUN: '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=ALL
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine \
; RUN: -polly-invariant-load-hoisting=true \
; RUN: -polly-process-unprofitable=false \
; RUN: -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops=true \
-; RUN: -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=PROFIT
+; RUN: '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=PROFIT
;
; Negative test for INNERMOST.
; At the moment we will optimistically assume A[i] in the conditional before the inner
diff --git a/polly/test/ScopInfo/NonAffine/non_affine_float_compare.ll b/polly/test/ScopInfo/NonAffine/non_affine_float_compare.ll
index c62447b6c15c..7bfd7f86efcd 100644
--- a/polly/test/ScopInfo/NonAffine/non_affine_float_compare.ll
+++ b/polly/test/ScopInfo/NonAffine/non_affine_float_compare.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(float *A) {
; for (int i = 0; i < 1024; i++)
diff --git a/polly/test/ScopInfo/NonAffine/non_affine_loop_condition.ll b/polly/test/ScopInfo/NonAffine/non_affine_loop_condition.ll
index 873b44b9c8cf..fc779d544e62 100644
--- a/polly/test/ScopInfo/NonAffine/non_affine_loop_condition.ll
+++ b/polly/test/ScopInfo/NonAffine/non_affine_loop_condition.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops -polly-process-unprofitable=false -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=PROFIT
-; RUN: opt %loadPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops -polly-detect-reductions=false -polly-print-scops -disable-output < %s | FileCheck %s -check-prefix=NO-REDUCTION
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops -polly-process-unprofitable=false '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=PROFIT
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops -polly-detect-reductions=false '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=NO-REDUCTION
;
; void f(int *A, int *C) {
; for (int i = 0; i < 1024; i++) {
diff --git a/polly/test/ScopInfo/NonAffine/non_affine_loop_used_later.ll b/polly/test/ScopInfo/NonAffine/non_affine_loop_used_later.ll
index 127bf80b9451..79b61eca258f 100644
--- a/polly/test/ScopInfo/NonAffine/non_affine_loop_used_later.ll
+++ b/polly/test/ScopInfo/NonAffine/non_affine_loop_used_later.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops -polly-unprofitable-scalar-accs=true -polly-process-unprofitable=false -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=PROFIT
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine -polly-allow-nonaffine-branches -polly-allow-nonaffine-loops -polly-unprofitable-scalar-accs=true -polly-process-unprofitable=false '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=PROFIT
;
; Verify that we over approximate the read acces of A[j] in the last statement as j is
; computed in a non-affine loop we do not model.
diff --git a/polly/test/ScopInfo/NonAffine/non_affine_parametric_loop.ll b/polly/test/ScopInfo/NonAffine/non_affine_parametric_loop.ll
index de011e29aeea..d33befe2c66e 100644
--- a/polly/test/ScopInfo/NonAffine/non_affine_parametric_loop.ll
+++ b/polly/test/ScopInfo/NonAffine/non_affine_parametric_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-allow-nonaffine -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-nonaffine '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; void foo(long n, double A[], int INDEX[]) {
diff --git a/polly/test/ScopInfo/NonAffine/non_affine_region_guaranteed_non-entry.ll b/polly/test/ScopInfo/NonAffine/non_affine_region_guaranteed_non-entry.ll
index 7303b4ea47fd..77c2df48d651 100644
--- a/polly/test/ScopInfo/NonAffine/non_affine_region_guaranteed_non-entry.ll
+++ b/polly/test/ScopInfo/NonAffine/non_affine_region_guaranteed_non-entry.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-loops -polly-detect -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-loops -polly-detect '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; The SCoP contains a loop with multiple exit blocks (BBs after leaving
; the loop). The current implementation of deriving their domain derives
diff --git a/polly/test/ScopInfo/NonAffine/whole-scop-non-affine-subregion-in-loop.ll b/polly/test/ScopInfo/NonAffine/whole-scop-non-affine-subregion-in-loop.ll
index 4f54d03d43fb..9ed340d1d304 100644
--- a/polly/test/ScopInfo/NonAffine/whole-scop-non-affine-subregion-in-loop.ll
+++ b/polly/test/ScopInfo/NonAffine/whole-scop-non-affine-subregion-in-loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
;
; Regression test that triggered a memory leak at some point (24947).
;
diff --git a/polly/test/ScopInfo/aliasing_conditional_alias_groups_1.ll b/polly/test/ScopInfo/aliasing_conditional_alias_groups_1.ll
index dc59fbfc66a8..cbd024ba7a39 100644
--- a/polly/test/ScopInfo/aliasing_conditional_alias_groups_1.ll
+++ b/polly/test/ScopInfo/aliasing_conditional_alias_groups_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that there is no alias group because we either access A or B never both.
;
diff --git a/polly/test/ScopInfo/aliasing_conditional_alias_groups_2.ll b/polly/test/ScopInfo/aliasing_conditional_alias_groups_2.ll
index a19d60dd9147..3858d8a7bb1d 100644
--- a/polly/test/ScopInfo/aliasing_conditional_alias_groups_2.ll
+++ b/polly/test/ScopInfo/aliasing_conditional_alias_groups_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that we create two alias groups since the minimal/maximal accesses
; depend on %b.
diff --git a/polly/test/ScopInfo/aliasing_dead_access.ll b/polly/test/ScopInfo/aliasing_dead_access.ll
index 2a725cf3c855..7baa3dce1f9d 100644
--- a/polly/test/ScopInfo/aliasing_dead_access.ll
+++ b/polly/test/ScopInfo/aliasing_dead_access.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that we do not create a SCoP if there is no statement executed.
;
diff --git a/polly/test/ScopInfo/aliasing_many_arrays_to_compare.ll b/polly/test/ScopInfo/aliasing_many_arrays_to_compare.ll
index 937d4ada3ec9..7265aab22a49 100644
--- a/polly/test/ScopInfo/aliasing_many_arrays_to_compare.ll
+++ b/polly/test/ScopInfo/aliasing_many_arrays_to_compare.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output \
-; RUN: < %s | FileCheck %s --check-prefix=FOUND
-; RUN: opt %loadPolly -polly-print-scops -disable-output \
-; RUN: -polly-rtc-max-arrays-per-group=3 < %s | FileCheck %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output \
+; RUN: < %s 2>&1 | FileCheck %s --check-prefix=FOUND
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output \
+; RUN: -polly-rtc-max-arrays-per-group=3 < %s 2>&1 | FileCheck %s \
; RUN: --check-prefix=IGNORED
;
; FOUND: Function: foo
diff --git a/polly/test/ScopInfo/aliasing_many_read_only_acesses.ll b/polly/test/ScopInfo/aliasing_many_read_only_acesses.ll
index c22cfe55e118..d66a10bc511b 100644
--- a/polly/test/ScopInfo/aliasing_many_read_only_acesses.ll
+++ b/polly/test/ScopInfo/aliasing_many_read_only_acesses.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Assumed Context:
; CHECK-NEXT: { : }
diff --git a/polly/test/ScopInfo/aliasing_multiple_alias_groups.ll b/polly/test/ScopInfo/aliasing_multiple_alias_groups.ll
index 16cb3dc0f5ac..9943802ec859 100644
--- a/polly/test/ScopInfo/aliasing_multiple_alias_groups.ll
+++ b/polly/test/ScopInfo/aliasing_multiple_alias_groups.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=NOAA
-; RUN: opt %loadPolly -polly-print-scops -disable-output -tbaa < %s | FileCheck %s --check-prefix=TBAA
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output -aa-pipeline= < %s 2>&1 | FileCheck %s --check-prefix=NOAA
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output -aa-pipeline=tbaa < %s 2>&1 | FileCheck %s --check-prefix=TBAA
;
; void jd(int *Int0, int *Int1, float *Float0, float *Float1) {
; for (int i = 0; i < 1024; i++) {
diff --git a/polly/test/ScopInfo/aliasing_with_non_affine_access.ll b/polly/test/ScopInfo/aliasing_with_non_affine_access.ll
index 056b644cd5ed..900d5d40d96f 100644
--- a/polly/test/ScopInfo/aliasing_with_non_affine_access.ll
+++ b/polly/test/ScopInfo/aliasing_with_non_affine_access.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-ast -polly-process-unprofitable -polly-allow-nonaffine -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -polly-process-unprofitable -polly-allow-nonaffine -disable-output < %s 2>&1 | FileCheck %s
;
; @test1
; Make sure we generate the correct aliasing check for a fixed-size memset operation.
diff --git a/polly/test/ScopInfo/allow-all-parameters-dereferencable.ll b/polly/test/ScopInfo/allow-all-parameters-dereferencable.ll
index d170a50e26fc..cb06e352da65 100644
--- a/polly/test/ScopInfo/allow-all-parameters-dereferencable.ll
+++ b/polly/test/ScopInfo/allow-all-parameters-dereferencable.ll
@@ -1,14 +1,14 @@
-; RUN: opt %loadPolly -disable-output -polly-invariant-load-hoisting \
+; RUN: opt %loadNPMPolly -disable-output -polly-invariant-load-hoisting \
; RUN: -polly-allow-dereference-of-all-function-parameters \
-; RUN: -polly-print-scops < %s | FileCheck %s --check-prefix=SCOP
+; RUN: '-passes=print<polly-function-scops>' < %s 2>&1 | FileCheck %s --check-prefix=SCOP
-; RUN: opt %loadPolly -S -polly-invariant-load-hoisting \
-; RUN: -polly-codegen < %s | FileCheck %s --check-prefix=CODE-RTC
+; RUN: opt %loadNPMPolly -S -polly-invariant-load-hoisting \
+; RUN: -passes=polly-codegen < %s 2>&1 | FileCheck %s --check-prefix=CODE-RTC
-; RUN: opt %loadPolly -S -polly-invariant-load-hoisting \
+; RUN: opt %loadNPMPolly -S -polly-invariant-load-hoisting \
; RUN: -polly-allow-dereference-of-all-function-parameters \
-; RUN: -polly-codegen < %s | FileCheck %s --check-prefix=CODE
+; RUN: -passes=polly-codegen < %s 2>&1 | FileCheck %s --check-prefix=CODE
; SCOP: Function: hoge
; SCOP-NEXT: Region: %bb15---%bb37
diff --git a/polly/test/ScopInfo/assume_gep_bounds.ll b/polly/test/ScopInfo/assume_gep_bounds.ll
index d0ce47148071..bd14e3868d52 100644
--- a/polly/test/ScopInfo/assume_gep_bounds.ll
+++ b/polly/test/ScopInfo/assume_gep_bounds.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void foo(float A[][20][30], long n, long m, long p) {
; for (long i = 0; i < n; i++)
diff --git a/polly/test/ScopInfo/assume_gep_bounds_2.ll b/polly/test/ScopInfo/assume_gep_bounds_2.ll
index e327195da94c..7a8c1870abe2 100644
--- a/polly/test/ScopInfo/assume_gep_bounds_2.ll
+++ b/polly/test/ScopInfo/assume_gep_bounds_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: -polly-precise-inbounds | FileCheck %s
;
; void foo(float A[restrict][20], float B[restrict][20], long n, long m,
diff --git a/polly/test/ScopInfo/assume_gep_bounds_many.ll b/polly/test/ScopInfo/assume_gep_bounds_many.ll
index 261491564fc2..01fc12cd7f10 100644
--- a/polly/test/ScopInfo/assume_gep_bounds_many.ll
+++ b/polly/test/ScopInfo/assume_gep_bounds_many.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -disable-output -polly-print-scops -polly-ignore-aliasing \
-; RUN: < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -disable-output '-passes=print<polly-function-scops>' -polly-ignore-aliasing \
+; RUN: < %s 2>&1 | FileCheck %s
; CHECK: Assumed Context:
; CHECK-NEXT: [n1_a, n1_b, n1_c, n1_d, n2_a, n2_b, n2_c, n2_d, n3_a, n3_b, n3_c, n3_d, n4_a, n4_b, n4_c, n4_d, n5_a, n5_b, n5_c, n5_d, n6_a, n6_b, n6_c, n6_d, n7_a, n7_b, n7_c, n7_d, n8_a, n8_b, n8_c, n8_d, n9_a, n9_b, n9_c, n9_d, p1_b, p1_c, p1_d, p2_b, p2_c, p2_d, p3_b, p3_c, p3_d, p4_b, p4_c, p4_d, p5_b, p5_c, p5_d, p6_b, p6_c, p6_d, p7_b, p7_c, p7_d, p8_b, p8_c, p8_d, p9_b, p9_c, p9_d] -> { : p1_b >= n1_b and p1_c >= n1_c and p1_d >= n1_d and p2_b >= n2_b and p2_c >= n2_c and p2_d >= n2_d and p3_b >= n3_b and p3_c >= n3_c and p3_d >= n3_d and p4_b >= n4_b and p4_c >= n4_c and p4_d >= n4_d and p5_b >= n5_b and p5_c >= n5_c and p5_d >= n5_d and p6_b >= n6_b and p6_c >= n6_c and p6_d >= n6_d and p7_b >= n7_b and p7_c >= n7_c and p7_d >= n7_d and p8_b >= n8_b and p8_c >= n8_c and p8_d >= n8_d and p9_b >= n9_b and p9_c >= n9_c and p9_d >= n9_d }
diff --git a/polly/test/ScopInfo/avoid_new_parameters_from_geps.ll b/polly/test/ScopInfo/avoid_new_parameters_from_geps.ll
index 0e17eb1d3668..3fb7a1329c74 100644
--- a/polly/test/ScopInfo/avoid_new_parameters_from_geps.ll
+++ b/polly/test/ScopInfo/avoid_new_parameters_from_geps.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that we do no introduce a parameter here that is actually not needed.
;
diff --git a/polly/test/ScopInfo/bool-addrec.ll b/polly/test/ScopInfo/bool-addrec.ll
index 1924a4b5266b..81fcade08f65 100644
--- a/polly/test/ScopInfo/bool-addrec.ll
+++ b/polly/test/ScopInfo/bool-addrec.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -disable-output -polly-print-ast -polly-process-unprofitable < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -disable-output '-passes=print<polly-ast>' -polly-process-unprofitable < %s 2>&1 | FileCheck %s
; CHECK: for (int c0 = 0; c0 <= 19999; c0 += 1) {
; CHECK-NEXT: if (c0 % 2 == 0)
diff --git a/polly/test/ScopInfo/bounded_loop_assumptions.ll b/polly/test/ScopInfo/bounded_loop_assumptions.ll
index d472c7586c53..5628092de776 100644
--- a/polly/test/ScopInfo/bounded_loop_assumptions.ll
+++ b/polly/test/ScopInfo/bounded_loop_assumptions.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; The assumed context is tricky here as the equality test for the inner loop
; allows an "unbounded" loop trip count. We assume that does not happen, thus
diff --git a/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations-2.ll b/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations-2.ll
index 5c5f264aab60..83743e4e4ecc 100644
--- a/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations-2.ll
+++ b/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations-2.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | \
; RUN: FileCheck %s -check-prefix=DETECT
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | \
; RUN: FileCheck %s -check-prefix=SCOP
; DETECT: Valid Region for Scop: loop => barrier
diff --git a/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations-3.ll b/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations-3.ll
index d69d3a16c0d7..9685ba37a49a 100644
--- a/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations-3.ll
+++ b/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations-3.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | \
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | \
; RUN: FileCheck %s -check-prefix=NONAFFINE
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output \
-; RUN: -polly-allow-nonaffine-branches=false < %s | \
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output \
+; RUN: -polly-allow-nonaffine-branches=false < %s 2>&1 | \
; RUN: FileCheck %s -check-prefix=NO-NONEAFFINE
; NONAFFINE: Statements {
diff --git a/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations.ll b/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations.ll
index 57918fa5c92d..f41e6500fb30 100644
--- a/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations.ll
+++ b/polly/test/ScopInfo/branch-references-loop-scev-with-unknown-iterations.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | \
; RUN: FileCheck %s -check-prefix=NONAFFINE
-; RUN: opt %loadPolly -polly-print-scops -disable-output \
-; RUN: -polly-allow-nonaffine-branches=false < %s | \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output \
+; RUN: -polly-allow-nonaffine-branches=false < %s 2>&1 | \
; RUN: FileCheck %s -check-prefix=NO-NONEAFFINE
; NONAFFINE-NOT: Statements
diff --git a/polly/test/ScopInfo/bug_2010_10_22.ll b/polly/test/ScopInfo/bug_2010_10_22.ll
index 7ba996b6d0f1..71e7051922b5 100644
--- a/polly/test/ScopInfo/bug_2010_10_22.ll
+++ b/polly/test/ScopInfo/bug_2010_10_22.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/ScopInfo/bug_2011_1_5.ll b/polly/test/ScopInfo/bug_2011_1_5.ll
index 95c25f9d9cdb..f4a24e06f46a 100644
--- a/polly/test/ScopInfo/bug_2011_1_5.ll
+++ b/polly/test/ScopInfo/bug_2011_1_5.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
; Bug description: Alias Analysis thinks IntToPtrInst aliases with alloca instructions created by IndependentBlocks Pass.
; This will trigger the assertion when we are verifying the SCoP after IndependentBlocks.
diff --git a/polly/test/ScopInfo/bug_scev_not_fully_eval.ll b/polly/test/ScopInfo/bug_scev_not_fully_eval.ll
index 89d5f318829e..ed6bbafdac1f 100644
--- a/polly/test/ScopInfo/bug_scev_not_fully_eval.ll
+++ b/polly/test/ScopInfo/bug_scev_not_fully_eval.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | not FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | not FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@edge.8265 = external global [72 x i32], align 32 ; <ptr> [#uses=1]
diff --git a/polly/test/ScopInfo/cfg_consequences.ll b/polly/test/ScopInfo/cfg_consequences.ll
index 84f94b135735..9161d3db4167 100644
--- a/polly/test/ScopInfo/cfg_consequences.ll
+++ b/polly/test/ScopInfo/cfg_consequences.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void consequences(int *A, int bool_cond, int lhs, int rhs) {
;
diff --git a/polly/test/ScopInfo/complex-branch-structure.ll b/polly/test/ScopInfo/complex-branch-structure.ll
index 24ebdcf213f8..de79c2226e68 100644
--- a/polly/test/ScopInfo/complex-branch-structure.ll
+++ b/polly/test/ScopInfo/complex-branch-structure.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-print-scops \
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' \
; RUN: -disable-output < %s 2>&1 | FileCheck %s
; We build a scop of the following form to check that the domain construction
diff --git a/polly/test/ScopInfo/complex-condition.ll b/polly/test/ScopInfo/complex-condition.ll
index 31d34b033725..c3b8d2bb0ef8 100644
--- a/polly/test/ScopInfo/complex-condition.ll
+++ b/polly/test/ScopInfo/complex-condition.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-print-scops \
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' \
; RUN: -polly-invariant-load-hoisting=true \
; RUN: -disable-output < %s 2>&1 | FileCheck %s
;
diff --git a/polly/test/ScopInfo/complex-expression.ll b/polly/test/ScopInfo/complex-expression.ll
index 1822c9de852a..6a6dde62d1ae 100644
--- a/polly/test/ScopInfo/complex-expression.ll
+++ b/polly/test/ScopInfo/complex-expression.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-print-scops \
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' \
; RUN: -polly-invariant-load-hoisting=true \
; RUN: -disable-output < %s 2>&1 | FileCheck %s
;
diff --git a/polly/test/ScopInfo/complex-loop-nesting.ll b/polly/test/ScopInfo/complex-loop-nesting.ll
index 97a9bfd939d5..36cb078f19ff 100644
--- a/polly/test/ScopInfo/complex-loop-nesting.ll
+++ b/polly/test/ScopInfo/complex-loop-nesting.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; The SCoP contains a loop with multiple exit blocks (BBs after leaving
; the loop). The current implementation of deriving their domain derives
diff --git a/polly/test/ScopInfo/complex-successor-structure-2.ll b/polly/test/ScopInfo/complex-successor-structure-2.ll
index 6bb7bb14a8cc..f4a78bf75385 100644
--- a/polly/test/ScopInfo/complex-successor-structure-2.ll
+++ b/polly/test/ScopInfo/complex-successor-structure-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-print-scops \
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' \
; RUN: -polly-invariant-load-hoisting=true \
; RUN: -disable-output < %s 2>&1 | FileCheck %s
diff --git a/polly/test/ScopInfo/complex-successor-structure-3.ll b/polly/test/ScopInfo/complex-successor-structure-3.ll
index 14c3fc1babeb..6da1fe3a8b9f 100644
--- a/polly/test/ScopInfo/complex-successor-structure-3.ll
+++ b/polly/test/ScopInfo/complex-successor-structure-3.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -disable-output -polly-print-scops \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -disable-output '-passes=print<polly-function-scops>' \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s
;
; Check that propagation of domains from A(X) to A(X+1) will keep the
; domains small and concise.
diff --git a/polly/test/ScopInfo/complex-successor-structure.ll b/polly/test/ScopInfo/complex-successor-structure.ll
index 364344045a6a..6c87ba3e9850 100644
--- a/polly/test/ScopInfo/complex-successor-structure.ll
+++ b/polly/test/ScopInfo/complex-successor-structure.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-print-scops \
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' \
; RUN: -polly-invariant-load-hoisting=true \
; RUN: -disable-output < %s 2>&1 | FileCheck %s
diff --git a/polly/test/ScopInfo/complex_domain_binary_condition.ll b/polly/test/ScopInfo/complex_domain_binary_condition.ll
index cec26855debb..6091e3be4560 100644
--- a/polly/test/ScopInfo/complex_domain_binary_condition.ll
+++ b/polly/test/ScopInfo/complex_domain_binary_condition.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-scops \
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' \
; RUN: -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Low complexity assumption: { : false }
diff --git a/polly/test/ScopInfo/complex_execution_context.ll b/polly/test/ScopInfo/complex_execution_context.ll
index 164254308fa9..9880a1dd67d1 100644
--- a/polly/test/ScopInfo/complex_execution_context.ll
+++ b/polly/test/ScopInfo/complex_execution_context.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-print-scops \
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' \
; RUN: -polly-invariant-load-hoisting=true \
; RUN: -disable-output < %s 2>&1 | FileCheck %s
;
diff --git a/polly/test/ScopInfo/cond_constant_in_loop.ll b/polly/test/ScopInfo/cond_constant_in_loop.ll
index ef7d857e1084..552fddc6ff08 100644
--- a/polly/test/ScopInfo/cond_constant_in_loop.ll
+++ b/polly/test/ScopInfo/cond_constant_in_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;void f(long a[], long N, long M) {
; long i, j, k;
diff --git a/polly/test/ScopInfo/cond_in_loop.ll b/polly/test/ScopInfo/cond_in_loop.ll
index 2d435f6a6a93..c06dcd955bac 100644
--- a/polly/test/ScopInfo/cond_in_loop.ll
+++ b/polly/test/ScopInfo/cond_in_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;void f(long a[], long N, long M) {
; long i, j, k;
diff --git a/polly/test/ScopInfo/condition-after-error-block-2.ll b/polly/test/ScopInfo/condition-after-error-block-2.ll
index 695d864e483c..8c4b2170ad69 100644
--- a/polly/test/ScopInfo/condition-after-error-block-2.ll
+++ b/polly/test/ScopInfo/condition-after-error-block-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; Verify that we do not allow PHI nodes such as %phi, if they reference an error
; block and are used by anything else than a terminator instruction.
diff --git a/polly/test/ScopInfo/condition-after-error-block-before-scop.ll b/polly/test/ScopInfo/condition-after-error-block-before-scop.ll
index 184be3642f0c..d5069da916fa 100644
--- a/polly/test/ScopInfo/condition-after-error-block-before-scop.ll
+++ b/polly/test/ScopInfo/condition-after-error-block-before-scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/polly/test/ScopInfo/condtion-after-error-block.ll b/polly/test/ScopInfo/condtion-after-error-block.ll
index 92e743e2d879..d9de4fc40a20 100644
--- a/polly/test/ScopInfo/condtion-after-error-block.ll
+++ b/polly/test/ScopInfo/condtion-after-error-block.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; Verify that we allow scops containing uniform branch conditions, where all
; but one incoming block comes from an error condition.
diff --git a/polly/test/ScopInfo/const_srem_sdiv.ll b/polly/test/ScopInfo/const_srem_sdiv.ll
index 3acca980da70..b4c2f119fe05 100644
--- a/polly/test/ScopInfo/const_srem_sdiv.ll
+++ b/polly/test/ScopInfo/const_srem_sdiv.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s
;
; See http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf
;
diff --git a/polly/test/ScopInfo/constant-non-integer-branch-condition.ll b/polly/test/ScopInfo/constant-non-integer-branch-condition.ll
index fc95a4cc7891..42c3b83d47f1 100644
--- a/polly/test/ScopInfo/constant-non-integer-branch-condition.ll
+++ b/polly/test/ScopInfo/constant-non-integer-branch-condition.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; At some point this caused a problem in the domain generation as we
; assumed any constant branch condition to be valid. However, only constant
diff --git a/polly/test/ScopInfo/constant_factor_in_parameter.ll b/polly/test/ScopInfo/constant_factor_in_parameter.ll
index 1f0173c0edf9..b58d413e074e 100644
--- a/polly/test/ScopInfo/constant_factor_in_parameter.ll
+++ b/polly/test/ScopInfo/constant_factor_in_parameter.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -disable-output -polly-print-scops < %s | FileCheck %s
-; RUN: opt %loadPolly -disable-output -polly-print-function-scops < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -disable-output '-passes=print<polly-function-scops>' < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -disable-output '-passes=print<polly-function-scops>' < %s 2>&1 | FileCheck %s
;
; Check that the constant part of the N * M * 4 expression is not part of the
; parameter but explicit in the access function. This can avoid existentially
diff --git a/polly/test/ScopInfo/constant_functions_outside_scop_as_unknown.ll b/polly/test/ScopInfo/constant_functions_outside_scop_as_unknown.ll
index 38b2b8958e2f..62e6cd4641de 100644
--- a/polly/test/ScopInfo/constant_functions_outside_scop_as_unknown.ll
+++ b/polly/test/ScopInfo/constant_functions_outside_scop_as_unknown.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
diff --git a/polly/test/ScopInfo/constant_start_integer.ll b/polly/test/ScopInfo/constant_start_integer.ll
index aa6640c98f73..8991f8250f0b 100644
--- a/polly/test/ScopInfo/constant_start_integer.ll
+++ b/polly/test/ScopInfo/constant_start_integer.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; void foo(float *input) {
diff --git a/polly/test/ScopInfo/debug_call.ll b/polly/test/ScopInfo/debug_call.ll
index 93b5bc520a00..a6761ecebe6a 100644
--- a/polly/test/ScopInfo/debug_call.ll
+++ b/polly/test/ScopInfo/debug_call.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-debug-func=dbg_printf -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-debug-func=dbg_printf '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
;
; Check that the call to dbg_printf is accepted as a debug-function.
;
diff --git a/polly/test/ScopInfo/delinearize-together-all-data-refs.ll b/polly/test/ScopInfo/delinearize-together-all-data-refs.ll
index 108392b27f07..676c8a27e574 100644
--- a/polly/test/ScopInfo/delinearize-together-all-data-refs.ll
+++ b/polly/test/ScopInfo/delinearize-together-all-data-refs.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void foo(long n, long m, long o, double A[n][m][o]) {
; for (long i = 0; i < n-3; i++)
diff --git a/polly/test/ScopInfo/div_by_zero.ll b/polly/test/ScopInfo/div_by_zero.ll
index 2205b85a9ebc..aecd16833b84 100644
--- a/polly/test/ScopInfo/div_by_zero.ll
+++ b/polly/test/ScopInfo/div_by_zero.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, int N) {
; for (int i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/do-not-model-error-block-accesses.ll b/polly/test/ScopInfo/do-not-model-error-block-accesses.ll
index 997e0d4b37cf..baa423f40780 100644
--- a/polly/test/ScopInfo/do-not-model-error-block-accesses.ll
+++ b/polly/test/ScopInfo/do-not-model-error-block-accesses.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
; Check that we do not crash on this input. Earlier this indeed crashed as
; we tried to model the access functions in an error block.
diff --git a/polly/test/ScopInfo/eager-binary-and-or-conditions.ll b/polly/test/ScopInfo/eager-binary-and-or-conditions.ll
index e9ad63c51b85..a988b3f8c2b0 100644
--- a/polly/test/ScopInfo/eager-binary-and-or-conditions.ll
+++ b/polly/test/ScopInfo/eager-binary-and-or-conditions.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output< %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output< %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -disable-output < %s
;
; void or(float *A, long n, long m) {
; for (long i = 0; i < 100; i++) {
diff --git a/polly/test/ScopInfo/early_exit_for_complex_domains.ll b/polly/test/ScopInfo/early_exit_for_complex_domains.ll
index a72ea031c236..eed19b3214a7 100644
--- a/polly/test/ScopInfo/early_exit_for_complex_domains.ll
+++ b/polly/test/ScopInfo/early_exit_for_complex_domains.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
;
; Check we do not crash.
;
diff --git a/polly/test/ScopInfo/error-blocks-1.ll b/polly/test/ScopInfo/error-blocks-1.ll
index 03353edf297a..047b095a9594 100644
--- a/polly/test/ScopInfo/error-blocks-1.ll
+++ b/polly/test/ScopInfo/error-blocks-1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Context:
; CHECK-NEXT: [N] -> { : -2147483648 <= N <= 2147483647 }
diff --git a/polly/test/ScopInfo/error-blocks-2.ll b/polly/test/ScopInfo/error-blocks-2.ll
index 29095dacacfb..6fa12947540c 100644
--- a/polly/test/ScopInfo/error-blocks-2.ll
+++ b/polly/test/ScopInfo/error-blocks-2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses: {
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/escaping_empty_scop.ll b/polly/test/ScopInfo/escaping_empty_scop.ll
index 8837e19eefe4..2efaef3fb99b 100644
--- a/polly/test/ScopInfo/escaping_empty_scop.ll
+++ b/polly/test/ScopInfo/escaping_empty_scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void g();
; int f(int *A) {
diff --git a/polly/test/ScopInfo/exit-phi-1.ll b/polly/test/ScopInfo/exit-phi-1.ll
index 8e6c5fb9e211..cbd6c280e8ca 100644
--- a/polly/test/ScopInfo/exit-phi-1.ll
+++ b/polly/test/ScopInfo/exit-phi-1.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-codegen -S < %s | FileCheck %s --check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -passes=polly-codegen -S < %s 2>&1 | FileCheck %s --check-prefix=CODEGEN
;
; Check for correct code generation of exit PHIs, even if the same PHI value
; is used again inside the the SCoP.
diff --git a/polly/test/ScopInfo/exit-phi-2.ll b/polly/test/ScopInfo/exit-phi-2.ll
index d218d5fa039b..695c617b14c1 100644
--- a/polly/test/ScopInfo/exit-phi-2.ll
+++ b/polly/test/ScopInfo/exit-phi-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that there is no MK_ExitPHI READ access.
;
diff --git a/polly/test/ScopInfo/exit_phi_accesses-2.ll b/polly/test/ScopInfo/exit_phi_accesses-2.ll
index e376f0df9d54..b3b7cb1c6599 100644
--- a/polly/test/ScopInfo/exit_phi_accesses-2.ll
+++ b/polly/test/ScopInfo/exit_phi_accesses-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK-LABEL: Function: foo
;
diff --git a/polly/test/ScopInfo/exit_phi_accesses.ll b/polly/test/ScopInfo/exit_phi_accesses.ll
index f4fbe31f6b24..77b038ec8e4a 100644
--- a/polly/test/ScopInfo/exit_phi_accesses.ll
+++ b/polly/test/ScopInfo/exit_phi_accesses.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; Check that PHI nodes only create PHI access and nothing else (e.g. unnecessary
; SCALAR accesses). In this case, for a PHI in the exit node, hence there is no
diff --git a/polly/test/ScopInfo/expensive-boundary-context.ll b/polly/test/ScopInfo/expensive-boundary-context.ll
index 7001b96acd21..1a8858d8fce2 100644
--- a/polly/test/ScopInfo/expensive-boundary-context.ll
+++ b/polly/test/ScopInfo/expensive-boundary-context.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output \
-; RUN: < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output \
+; RUN: < %s 2>&1 | FileCheck %s
; CHECK-NOT: Assumed Context:
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopInfo/extract_constant_factor_introduces_new_parameter.ll b/polly/test/ScopInfo/extract_constant_factor_introduces_new_parameter.ll
index 89ca344fdf54..5e833e7ae0f4 100644
--- a/polly/test/ScopInfo/extract_constant_factor_introduces_new_parameter.ll
+++ b/polly/test/ScopInfo/extract_constant_factor_introduces_new_parameter.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
; CHECK: Valid Region for Scop: bb10 => bb16
diff --git a/polly/test/ScopInfo/full-function.ll b/polly/test/ScopInfo/full-function.ll
index 670472576fe7..596c3d0af66a 100644
--- a/polly/test/ScopInfo/full-function.ll
+++ b/polly/test/ScopInfo/full-function.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output -polly-detect-full-functions < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output -polly-detect-full-functions < %s 2>&1 \
; RUN: | FileCheck %s -check-prefix=FULL
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: | FileCheck %s -check-prefix=WITHOUT-FULL
; FULL: Region: %bb---FunctionExit
diff --git a/polly/test/ScopInfo/granularity_same_name.ll b/polly/test/ScopInfo/granularity_same_name.ll
index 1ebf5c6f71a2..17f75fbf8a97 100644
--- a/polly/test/ScopInfo/granularity_same_name.ll
+++ b/polly/test/ScopInfo/granularity_same_name.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-use-llvm-names=0 -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines -check-prefix=IDX
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-use-llvm-names=1 -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines -check-prefix=BB
-; RUN: opt %loadPolly -polly-stmt-granularity=scalar-indep -polly-use-llvm-names=0 -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines -check-prefix=IDX
-; RUN: opt %loadPolly -polly-stmt-granularity=scalar-indep -polly-use-llvm-names=1 -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines -check-prefix=BB
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-use-llvm-names=0 '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines -check-prefix=IDX
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-use-llvm-names=1 '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines -check-prefix=BB
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=scalar-indep -polly-use-llvm-names=0 '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines -check-prefix=IDX
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=scalar-indep -polly-use-llvm-names=1 '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines -check-prefix=BB
;
; Check that the statement has the same name, regardless of how the
; basic block is split into multiple statements.
diff --git a/polly/test/ScopInfo/granularity_scalar-indep.ll b/polly/test/ScopInfo/granularity_scalar-indep.ll
index fe509b468272..5c4484f9d457 100644
--- a/polly/test/ScopInfo/granularity_scalar-indep.ll
+++ b/polly/test/ScopInfo/granularity_scalar-indep.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
;
; Split a block into two independent statements that share no scalar.
; This case has the instructions of the two statements interleaved, such that
diff --git a/polly/test/ScopInfo/granularity_scalar-indep_cross-referencing-phi1.ll b/polly/test/ScopInfo/granularity_scalar-indep_cross-referencing-phi1.ll
index 56bc11aed28d..7ae0d961b38f 100644
--- a/polly/test/ScopInfo/granularity_scalar-indep_cross-referencing-phi1.ll
+++ b/polly/test/ScopInfo/granularity_scalar-indep_cross-referencing-phi1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
;
; Two PHIs, cross-referencing each other. The PHI READs must be carried-out
; before the PHI WRITEs to ensure that the value when entering the block is
diff --git a/polly/test/ScopInfo/granularity_scalar-indep_cross-referencing-phi2.ll b/polly/test/ScopInfo/granularity_scalar-indep_cross-referencing-phi2.ll
index f46cf4e6a0a2..7839e51c163a 100644
--- a/polly/test/ScopInfo/granularity_scalar-indep_cross-referencing-phi2.ll
+++ b/polly/test/ScopInfo/granularity_scalar-indep_cross-referencing-phi2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
;
; Two PHIs, cross-referencing each other. The PHI READs must be carried-out
; before the PHI WRITEs to ensure that the value when entering the block is
diff --git a/polly/test/ScopInfo/granularity_scalar-indep_epilogue.ll b/polly/test/ScopInfo/granularity_scalar-indep_epilogue.ll
index e202e38f0844..8643e85e0559 100644
--- a/polly/test/ScopInfo/granularity_scalar-indep_epilogue.ll
+++ b/polly/test/ScopInfo/granularity_scalar-indep_epilogue.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
;
; Split a block into two independent statements that share no scalar.
; This case has an independent statement just for PHI writes.
diff --git a/polly/test/ScopInfo/granularity_scalar-indep_epilogue_last.ll b/polly/test/ScopInfo/granularity_scalar-indep_epilogue_last.ll
index 40af34bfb067..bc71cbe45cd9 100644
--- a/polly/test/ScopInfo/granularity_scalar-indep_epilogue_last.ll
+++ b/polly/test/ScopInfo/granularity_scalar-indep_epilogue_last.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
;
; Check that the PHI Write of value that is defined in the same basic
; block is in the statement where it is defined.
diff --git a/polly/test/ScopInfo/granularity_scalar-indep_noepilogue.ll b/polly/test/ScopInfo/granularity_scalar-indep_noepilogue.ll
index 9a0d207c0c2a..f3864bac519b 100644
--- a/polly/test/ScopInfo/granularity_scalar-indep_noepilogue.ll
+++ b/polly/test/ScopInfo/granularity_scalar-indep_noepilogue.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
;
; This case has no explicit epilogue for PHI writes because it would
; have a scalar dependency to the previous statement.
diff --git a/polly/test/ScopInfo/granularity_scalar-indep_ordered-2.ll b/polly/test/ScopInfo/granularity_scalar-indep_ordered-2.ll
index d093806bc9cc..43101a8a0abf 100644
--- a/polly/test/ScopInfo/granularity_scalar-indep_ordered-2.ll
+++ b/polly/test/ScopInfo/granularity_scalar-indep_ordered-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
;
; This case should be split into two statements because {X[0], Y[0]}
; and {A[0], B[0]} do not intersect.
diff --git a/polly/test/ScopInfo/granularity_scalar-indep_ordered.ll b/polly/test/ScopInfo/granularity_scalar-indep_ordered.ll
index b1d2936882aa..4974f7e9b28c 100644
--- a/polly/test/ScopInfo/granularity_scalar-indep_ordered.ll
+++ b/polly/test/ScopInfo/granularity_scalar-indep_ordered.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
;
; This case cannot be split into two statements because the order of
; loads and store would be violated.
diff --git a/polly/test/ScopInfo/i1_params.ll b/polly/test/ScopInfo/i1_params.ll
index 1cb1329b08f9..be3e28737201 100644
--- a/polly/test/ScopInfo/i1_params.ll
+++ b/polly/test/ScopInfo/i1_params.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that both a signed as well as an unsigned extended i1 parameter
; is represented correctly.
diff --git a/polly/test/ScopInfo/infeasible-rtc.ll b/polly/test/ScopInfo/infeasible-rtc.ll
index ef96627e640e..7a0bfe0fa4d8 100644
--- a/polly/test/ScopInfo/infeasible-rtc.ll
+++ b/polly/test/ScopInfo/infeasible-rtc.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 \
; RUN: | FileCheck %s -check-prefix=DETECT
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: | FileCheck %s -check-prefix=SCOPS
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopInfo/infeasible_invalid_context.ll b/polly/test/ScopInfo/infeasible_invalid_context.ll
index 2c299f06c12e..006901ab05b7 100644
--- a/polly/test/ScopInfo/infeasible_invalid_context.ll
+++ b/polly/test/ScopInfo/infeasible_invalid_context.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 \
; RUN: | FileCheck %s -check-prefix=DETECT
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: | FileCheck %s -check-prefix=SCOPS
; DETECT: Valid Region for Scop: if.end116 => for.inc216
diff --git a/polly/test/ScopInfo/int2ptr_ptr2int.ll b/polly/test/ScopInfo/int2ptr_ptr2int.ll
index 9fadc5a8eb28..f6668ecdd089 100644
--- a/polly/test/ScopInfo/int2ptr_ptr2int.ll
+++ b/polly/test/ScopInfo/int2ptr_ptr2int.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s --check-prefix=IR
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen < %s 2>&1 | FileCheck %s --check-prefix=IR
;
; void f(long *A, long *ptr, long val) {
; for (long i = 0; i < 100; i++) {
diff --git a/polly/test/ScopInfo/int2ptr_ptr2int_2.ll b/polly/test/ScopInfo/int2ptr_ptr2int_2.ll
index 97878f7091b1..361bf5a95761 100644
--- a/polly/test/ScopInfo/int2ptr_ptr2int_2.ll
+++ b/polly/test/ScopInfo/int2ptr_ptr2int_2.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-print-scops \
-; RUN: -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -S -polly-codegen \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s --check-prefix=IR
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' \
+; RUN: -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -S -passes=polly-codegen \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s --check-prefix=IR
;
; void f(long *A, long *B, long *ptr, long val) {
; for (long i = 0; i < 100; i++) {
diff --git a/polly/test/ScopInfo/integers.ll b/polly/test/ScopInfo/integers.ll
index b608bf84cffa..4f6d1117e2bc 100644
--- a/polly/test/ScopInfo/integers.ll
+++ b/polly/test/ScopInfo/integers.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; Check that we correctly convert integers to isl values.
diff --git a/polly/test/ScopInfo/inter-error-bb-dependence.ll b/polly/test/ScopInfo/inter-error-bb-dependence.ll
index 4e23de7e6a99..761fcbbe3435 100644
--- a/polly/test/ScopInfo/inter-error-bb-dependence.ll
+++ b/polly/test/ScopInfo/inter-error-bb-dependence.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-print-scops -disable-output < %s 2>&1 > /dev/null | FileCheck %s
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 > /dev/null | FileCheck %s
;
; Error statements (%bb33) do not require their uses to be verified.
; In this case it uses %tmp32 from %bb31 which is not available because
diff --git a/polly/test/ScopInfo/inter_bb_scalar_dep.ll b/polly/test/ScopInfo/inter_bb_scalar_dep.ll
index 456f7a773f04..7313618b082b 100644
--- a/polly/test/ScopInfo/inter_bb_scalar_dep.ll
+++ b/polly/test/ScopInfo/inter_bb_scalar_dep.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-scops \
-; RUN: -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-function-scops>' \
+; RUN: -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
; void f(long A[], int N, int *init_ptr) {
; long i, j;
diff --git a/polly/test/ScopInfo/intra-non-affine-stmt-phi-node.ll b/polly/test/ScopInfo/intra-non-affine-stmt-phi-node.ll
index 859972b27402..d2ed3c17fe9d 100644
--- a/polly/test/ScopInfo/intra-non-affine-stmt-phi-node.ll
+++ b/polly/test/ScopInfo/intra-non-affine-stmt-phi-node.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output \
-; RUN: < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output \
+; RUN: < %s 2>&1 | FileCheck %s
; CHECK: Statements {
; CHECK-NEXT: Stmt_loop__TO__backedge
diff --git a/polly/test/ScopInfo/intra_and_inter_bb_scalar_dep.ll b/polly/test/ScopInfo/intra_and_inter_bb_scalar_dep.ll
index 37f4e0513ed3..b3286cd2a724 100644
--- a/polly/test/ScopInfo/intra_and_inter_bb_scalar_dep.ll
+++ b/polly/test/ScopInfo/intra_and_inter_bb_scalar_dep.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-scops -disable-output \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-function-scops>' -disable-output \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s
; void f(long A[], int N, int *init_ptr) {
; long i, j;
diff --git a/polly/test/ScopInfo/intra_bb_scalar_dep.ll b/polly/test/ScopInfo/intra_bb_scalar_dep.ll
index 0252273d3107..86855e7499a5 100644
--- a/polly/test/ScopInfo/intra_bb_scalar_dep.ll
+++ b/polly/test/ScopInfo/intra_bb_scalar_dep.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-scops -disable-output \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-function-scops>' -disable-output \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s
; void f(long A[], int N, int *init_ptr) {
; long i, j;
diff --git a/polly/test/ScopInfo/intrinsics.ll b/polly/test/ScopInfo/intrinsics.ll
index 853429341381..c5bbacbe6d8c 100644
--- a/polly/test/ScopInfo/intrinsics.ll
+++ b/polly/test/ScopInfo/intrinsics.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-print-instructions -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-print-instructions -disable-output < %s 2>&1 | FileCheck %s
;
; Verify that we remove the ignored intrinsics from the instruction list.
;
diff --git a/polly/test/ScopInfo/invalid_add_rec_after_invariant_load_remapping.ll b/polly/test/ScopInfo/invalid_add_rec_after_invariant_load_remapping.ll
index 8d0de03e9866..723942668d8c 100644
--- a/polly/test/ScopInfo/invalid_add_rec_after_invariant_load_remapping.ll
+++ b/polly/test/ScopInfo/invalid_add_rec_after_invariant_load_remapping.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
;
; This crashed at some point as we place %1 and %4 in the same equivalence class
; for invariant loads and when we remap SCEVs to use %4 instead of %1 AddRec SCEVs
diff --git a/polly/test/ScopInfo/invalidate_iterator_during_MA_removal.ll b/polly/test/ScopInfo/invalidate_iterator_during_MA_removal.ll
index dcb0ad301ba3..c493c22af32d 100644
--- a/polly/test/ScopInfo/invalidate_iterator_during_MA_removal.ll
+++ b/polly/test/ScopInfo/invalidate_iterator_during_MA_removal.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
;
; Check that no invalidated iterator is accessed while elements from
; the list of MemoryAccesses are removed.
diff --git a/polly/test/ScopInfo/invariant-load-instlist.ll b/polly/test/ScopInfo/invariant-load-instlist.ll
index 7f4cf050f064..ecb80e4054c3 100644
--- a/polly/test/ScopInfo/invariant-load-instlist.ll
+++ b/polly/test/ScopInfo/invariant-load-instlist.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
; The load is a required invariant load and at the same time used in a store.
; Polly used to add two MemoryAccesses for it which caused an assertion to fail.
diff --git a/polly/test/ScopInfo/invariant-loads-leave-read-only-statements.ll b/polly/test/ScopInfo/invariant-loads-leave-read-only-statements.ll
index b97fe22e076e..89eac6ce69a1 100644
--- a/polly/test/ScopInfo/invariant-loads-leave-read-only-statements.ll
+++ b/polly/test/ScopInfo/invariant-loads-leave-read-only-statements.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -disable-output < %s
; CHECK: Statements {
; CHECK-NEXT: Stmt_L_4
diff --git a/polly/test/ScopInfo/invariant_load.ll b/polly/test/ScopInfo/invariant_load.ll
index fcea77e19b85..9dc064276c40 100644
--- a/polly/test/ScopInfo/invariant_load.ll
+++ b/polly/test/ScopInfo/invariant_load.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses:
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/invariant_load_access_classes_different_base_type.ll b/polly/test/ScopInfo/invariant_load_access_classes_different_base_type.ll
index 100a8db2a9d1..40aa3098683b 100644
--- a/polly/test/ScopInfo/invariant_load_access_classes_different_base_type.ll
+++ b/polly/test/ScopInfo/invariant_load_access_classes_different_base_type.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s --check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s 2>&1 | FileCheck %s --check-prefix=CODEGEN
;
; struct {
; int a;
diff --git a/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_escaping.ll b/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_escaping.ll
index e31deb6fd472..287676024079 100644
--- a/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_escaping.ll
+++ b/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_escaping.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s --check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s 2>&1 | FileCheck %s --check-prefix=CODEGEN
;
; struct {
; int a;
diff --git a/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_same_pointer.ll b/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_same_pointer.ll
index bbf6d69a5fbb..cb745b4920b8 100644
--- a/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_same_pointer.ll
+++ b/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_same_pointer.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s --check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s 2>&1 | FileCheck %s --check-prefix=CODEGEN
;
; int U;
; void f(int *A) {
diff --git a/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_same_pointer_escaping.ll b/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_same_pointer_escaping.ll
index 011c2fe3d549..fa5429d4803a 100644
--- a/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_same_pointer_escaping.ll
+++ b/polly/test/ScopInfo/invariant_load_access_classes_different_base_type_same_pointer_escaping.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s --check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s 2>&1 | FileCheck %s --check-prefix=CODEGEN
;
; int U;
; int f(int *A) {
diff --git a/polly/test/ScopInfo/invariant_load_addrec_sum.ll b/polly/test/ScopInfo/invariant_load_addrec_sum.ll
index 09b158d342ed..2e639f7d5e33 100644
--- a/polly/test/ScopInfo/invariant_load_addrec_sum.ll
+++ b/polly/test/ScopInfo/invariant_load_addrec_sum.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Region: %entry.split---%if.end
; CHECK: Invariant Accesses: {
diff --git a/polly/test/ScopInfo/invariant_load_base_pointer.ll b/polly/test/ScopInfo/invariant_load_base_pointer.ll
index ddf11d892adb..f2539af97a0b 100644
--- a/polly/test/ScopInfo/invariant_load_base_pointer.ll
+++ b/polly/test/ScopInfo/invariant_load_base_pointer.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses:
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/invariant_load_base_pointer_conditional.ll b/polly/test/ScopInfo/invariant_load_base_pointer_conditional.ll
index 07f2c3768b0a..f854b1f48ea9 100644
--- a/polly/test/ScopInfo/invariant_load_base_pointer_conditional.ll
+++ b/polly/test/ScopInfo/invariant_load_base_pointer_conditional.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -polly-ignore-aliasing -polly-process-unprofitable -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses:
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/invariant_load_base_pointer_in_conditional.ll b/polly/test/ScopInfo/invariant_load_base_pointer_in_conditional.ll
index d66d718d492a..5a9c5c6cabbe 100644
--- a/polly/test/ScopInfo/invariant_load_base_pointer_in_conditional.ll
+++ b/polly/test/ScopInfo/invariant_load_base_pointer_in_conditional.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -polly-ignore-aliasing -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -polly-ignore-aliasing -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses:
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/invariant_load_branch_condition.ll b/polly/test/ScopInfo/invariant_load_branch_condition.ll
index 4f49d2969d86..d12750c30ba9 100644
--- a/polly/test/ScopInfo/invariant_load_branch_condition.ll
+++ b/polly/test/ScopInfo/invariant_load_branch_condition.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output \
-; RUN: -polly-invariant-load-hoisting < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output \
+; RUN: -polly-invariant-load-hoisting < %s 2>&1 | FileCheck %s
; CHECK: Invariant Accesses: {
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs.ll b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs.ll
index c6a7faf2e355..34d50a18663c 100644
--- a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs.ll
+++ b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: -polly-invariant-load-hoisting \
; RUN: | FileCheck %s
diff --git a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_2.ll b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_2.ll
index 921dd4fbde5c..51f3cf6c095a 100644
--- a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_2.ll
+++ b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: -polly-invariant-load-hoisting \
; RUN: | FileCheck %s
diff --git a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_3.ll b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_3.ll
index c15d11ca865d..3a742bbccdf1 100644
--- a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_3.ll
+++ b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: -polly-invariant-load-hoisting \
; RUN: | FileCheck %s
diff --git a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4.ll b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4.ll
index 0495a330792c..6bd8b3146e87 100644
--- a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4.ll
+++ b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: -polly-invariant-load-hoisting \
; RUN: | FileCheck %s
diff --git a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4b.ll b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4b.ll
index 9144fcf186c3..cb7e5646fc2b 100644
--- a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4b.ll
+++ b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4b.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: -polly-invariant-load-hoisting \
; RUN: | FileCheck %s
diff --git a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4c.ll b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4c.ll
index aefacff6b46f..6f7fbacc089c 100644
--- a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4c.ll
+++ b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4c.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: -polly-invariant-load-hoisting \
; RUN: | FileCheck %s
diff --git a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_5.ll b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_5.ll
index ecc0c0a23014..445832822bdf 100644
--- a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_5.ll
+++ b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_5.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 \
; RUN: -polly-invariant-load-hoisting \
; RUN: | FileCheck %s
diff --git a/polly/test/ScopInfo/invariant_load_complex_condition.ll b/polly/test/ScopInfo/invariant_load_complex_condition.ll
index e721c222db5f..11e7088d68db 100644
--- a/polly/test/ScopInfo/invariant_load_complex_condition.ll
+++ b/polly/test/ScopInfo/invariant_load_complex_condition.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -S -polly-print-scops -disable-output \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -S '-passes=print<polly-function-scops>' -disable-output \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopInfo/invariant_load_condition.ll b/polly/test/ScopInfo/invariant_load_condition.ll
index 84546984709e..c7d7b3c9ba61 100644
--- a/polly/test/ScopInfo/invariant_load_condition.ll
+++ b/polly/test/ScopInfo/invariant_load_condition.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses:
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/invariant_load_dereferenceable.ll b/polly/test/ScopInfo/invariant_load_dereferenceable.ll
index adba32d8d463..526bdc6ddb3b 100644
--- a/polly/test/ScopInfo/invariant_load_dereferenceable.ll
+++ b/polly/test/ScopInfo/invariant_load_dereferenceable.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-print-detect -polly-print-scops \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' '-passes=print<polly-function-scops>' \
; RUN: -polly-invariant-load-hoisting=true \
-; RUN: -disable-output < %s | FileCheck %s
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
; CHECK-NOT: Function: foo_undereferanceable
diff --git a/polly/test/ScopInfo/invariant_load_distinct_parameter_valuations.ll b/polly/test/ScopInfo/invariant_load_distinct_parameter_valuations.ll
index 60b4a1daa824..eb148063320e 100644
--- a/polly/test/ScopInfo/invariant_load_distinct_parameter_valuations.ll
+++ b/polly/test/ScopInfo/invariant_load_distinct_parameter_valuations.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; Check that we do not consolidate the invariant loads to smp[order - 1] and
; smp[order - 2] in the blocks %0 and %16. While they have the same pointer
diff --git a/polly/test/ScopInfo/invariant_load_in_non_affine.ll b/polly/test/ScopInfo/invariant_load_in_non_affine.ll
index d00bc2d642e0..5261113f5a0c 100644
--- a/polly/test/ScopInfo/invariant_load_in_non_affine.ll
+++ b/polly/test/ScopInfo/invariant_load_in_non_affine.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s
;
; CHECK-NOT: Valid Region for Scop
;
diff --git a/polly/test/ScopInfo/invariant_load_loop_ub.ll b/polly/test/ScopInfo/invariant_load_loop_ub.ll
index 856b6e4dd508..ee889e6c4d5a 100644
--- a/polly/test/ScopInfo/invariant_load_loop_ub.ll
+++ b/polly/test/ScopInfo/invariant_load_loop_ub.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -polly-process-unprofitable -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -polly-invariant-load-hoisting=true -polly-process-unprofitable -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -polly-process-unprofitable -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -polly-process-unprofitable -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses:
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/invariant_load_ptr_ptr_noalias.ll b/polly/test/ScopInfo/invariant_load_ptr_ptr_noalias.ll
index 69463d420aca..6af7caecc0b3 100644
--- a/polly/test/ScopInfo/invariant_load_ptr_ptr_noalias.ll
+++ b/polly/test/ScopInfo/invariant_load_ptr_ptr_noalias.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -tbaa -polly-print-scops -polly-invariant-load-hoisting=true -polly-ignore-aliasing \
-; RUN: -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=tbaa '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -polly-ignore-aliasing \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
;
; Note: The order of the invariant accesses is important because A is the
; base pointer of tmp3 and we will generate code in the same order as
diff --git a/polly/test/ScopInfo/invariant_load_scalar_dep.ll b/polly/test/ScopInfo/invariant_load_scalar_dep.ll
index 79a10426862a..319f24bdcb92 100644
--- a/polly/test/ScopInfo/invariant_load_scalar_dep.ll
+++ b/polly/test/ScopInfo/invariant_load_scalar_dep.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses:
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/invariant_load_stmt_domain.ll b/polly/test/ScopInfo/invariant_load_stmt_domain.ll
index 6cd71c85ea2f..715948062c05 100644
--- a/polly/test/ScopInfo/invariant_load_stmt_domain.ll
+++ b/polly/test/ScopInfo/invariant_load_stmt_domain.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
; This test case verifies that the statement domain of the invariant access
; is the universe. In earlier versions of Polly, we accidentally computed an
diff --git a/polly/test/ScopInfo/invariant_load_zext_parameter-2.ll b/polly/test/ScopInfo/invariant_load_zext_parameter-2.ll
index e77515280241..a6108320d560 100644
--- a/polly/test/ScopInfo/invariant_load_zext_parameter-2.ll
+++ b/polly/test/ScopInfo/invariant_load_zext_parameter-2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -scalar-evolution-max-value-compare-depth=3 -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -scalar-evolution-max-value-compare-depth=3 -polly-codegen -polly-invariant-load-hoisting=true -disable-output < %s
+; RUN: opt %loadNPMPolly -scalar-evolution-max-value-compare-depth=3 '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -scalar-evolution-max-value-compare-depth=3 -passes=polly-codegen -polly-invariant-load-hoisting=true -disable-output < %s
;
; Stress test for the code generation of invariant accesses.
;
diff --git a/polly/test/ScopInfo/invariant_load_zext_parameter.ll b/polly/test/ScopInfo/invariant_load_zext_parameter.ll
index 1bde70282d44..e3c183aab5e2 100644
--- a/polly/test/ScopInfo/invariant_load_zext_parameter.ll
+++ b/polly/test/ScopInfo/invariant_load_zext_parameter.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s --check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s 2>&1 | FileCheck %s --check-prefix=CODEGEN
;
; void f(int *I0, int *I1, int *V) {
; for (int i = 0; i < 1000; i++) {
diff --git a/polly/test/ScopInfo/invariant_load_zextended_in_own_execution_context.ll b/polly/test/ScopInfo/invariant_load_zextended_in_own_execution_context.ll
index 775369e55c92..b5168e912ed7 100644
--- a/polly/test/ScopInfo/invariant_load_zextended_in_own_execution_context.ll
+++ b/polly/test/ScopInfo/invariant_load_zextended_in_own_execution_context.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -disable-output < %s
;
; CHECK: Execution Context: [p_0_loaded_from_currpc] -> { : }
;
diff --git a/polly/test/ScopInfo/invariant_loads_complicated_dependences.ll b/polly/test/ScopInfo/invariant_loads_complicated_dependences.ll
index 1d54ccc69023..85360821078d 100644
--- a/polly/test/ScopInfo/invariant_loads_complicated_dependences.ll
+++ b/polly/test/ScopInfo/invariant_loads_complicated_dependences.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses: {
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/invariant_loads_cyclic_dependences.ll b/polly/test/ScopInfo/invariant_loads_cyclic_dependences.ll
index e97de0c936bc..134eac22bff5 100644
--- a/polly/test/ScopInfo/invariant_loads_cyclic_dependences.ll
+++ b/polly/test/ScopInfo/invariant_loads_cyclic_dependences.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; Negative test. If we assume UB[*V] to be invariant we get a cyclic
; dependence in the invariant loads that needs to be resolved by
diff --git a/polly/test/ScopInfo/invariant_loop_bounds.ll b/polly/test/ScopInfo/invariant_loop_bounds.ll
index 4e1fd88fac30..f22199cfe494 100644
--- a/polly/test/ScopInfo/invariant_loop_bounds.ll
+++ b/polly/test/ScopInfo/invariant_loop_bounds.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses: {
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/invariant_same_loop_bound_multiple_times-1.ll b/polly/test/ScopInfo/invariant_same_loop_bound_multiple_times-1.ll
index 3d5737bbe168..a473ef30376c 100644
--- a/polly/test/ScopInfo/invariant_same_loop_bound_multiple_times-1.ll
+++ b/polly/test/ScopInfo/invariant_same_loop_bound_multiple_times-1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; Verify that we only have one parameter and one invariant load for all
; three loads that occure in the region but actually access the same
diff --git a/polly/test/ScopInfo/invariant_same_loop_bound_multiple_times-2.ll b/polly/test/ScopInfo/invariant_same_loop_bound_multiple_times-2.ll
index e2de503eb83f..66a0bc631b1d 100644
--- a/polly/test/ScopInfo/invariant_same_loop_bound_multiple_times-2.ll
+++ b/polly/test/ScopInfo/invariant_same_loop_bound_multiple_times-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; Verify that we only have one parameter and one invariant load for all
; three loads that occure in the region but actually access the same
diff --git a/polly/test/ScopInfo/isl_aff_out_of_bounds.ll b/polly/test/ScopInfo/isl_aff_out_of_bounds.ll
index ca1b235be358..2df96faf7624 100644
--- a/polly/test/ScopInfo/isl_aff_out_of_bounds.ll
+++ b/polly/test/ScopInfo/isl_aff_out_of_bounds.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-detect < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' < %s 2>&1
; Used to fail with:
; ../../isl/isl_aff.c:591: position out of bounds
diff --git a/polly/test/ScopInfo/isl_trip_count_01.ll b/polly/test/ScopInfo/isl_trip_count_01.ll
index fc6b79c5a68a..480b6e9574a6 100644
--- a/polly/test/ScopInfo/isl_trip_count_01.ll
+++ b/polly/test/ScopInfo/isl_trip_count_01.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: [M, N] -> { Stmt_while_body[i0] : i0 > 0 and 4i0 <= -M + N; Stmt_while_body[0] };
;
diff --git a/polly/test/ScopInfo/isl_trip_count_02.ll b/polly/test/ScopInfo/isl_trip_count_02.ll
index 9376cb415cec..b78fb838edd0 100644
--- a/polly/test/ScopInfo/isl_trip_count_02.ll
+++ b/polly/test/ScopInfo/isl_trip_count_02.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; TODO: We do not allow unbounded loops at the moment.
;
diff --git a/polly/test/ScopInfo/isl_trip_count_03.ll b/polly/test/ScopInfo/isl_trip_count_03.ll
index f5b0048a0e0e..96df05f89bcf 100644
--- a/polly/test/ScopInfo/isl_trip_count_03.ll
+++ b/polly/test/ScopInfo/isl_trip_count_03.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Test comes from a bug (15771) or better a feature request. It was not allowed
; in Polly in the old domain generation as ScalarEvolution cannot figure out the
diff --git a/polly/test/ScopInfo/isl_trip_count_multiple_exiting_blocks.ll b/polly/test/ScopInfo/isl_trip_count_multiple_exiting_blocks.ll
index 91bc19e2de44..fd310ececaa3 100644
--- a/polly/test/ScopInfo/isl_trip_count_multiple_exiting_blocks.ll
+++ b/polly/test/ScopInfo/isl_trip_count_multiple_exiting_blocks.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; The SCoP contains a loop with multiple exit blocks (BBs after leaving
; the loop). The current implementation of deriving their domain derives
diff --git a/polly/test/ScopInfo/licm_reduction_nested.ll b/polly/test/ScopInfo/licm_reduction_nested.ll
index a3ba478cd9ff..c1676033fa90 100644
--- a/polly/test/ScopInfo/licm_reduction_nested.ll
+++ b/polly/test/ScopInfo/licm_reduction_nested.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -loop-rotate -indvars -polly-prepare -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -basic-aa -loop-rotate -indvars -licm -polly-prepare -polly-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -loop-rotate -indvars -passes=polly-prepare '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -loop-rotate -indvars -licm -passes=polly-prepare '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; XFAIL: *
;
diff --git a/polly/test/ScopInfo/long-compile-time-alias-analysis.ll b/polly/test/ScopInfo/long-compile-time-alias-analysis.ll
index 1cbecf086968..f102518da526 100644
--- a/polly/test/ScopInfo/long-compile-time-alias-analysis.ll
+++ b/polly/test/ScopInfo/long-compile-time-alias-analysis.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
; Verify that the compilation of this test case does not take infinite time.
; At some point Polly tried to model this test case and got stuck in
diff --git a/polly/test/ScopInfo/long-sequence-of-error-blocks-2.ll b/polly/test/ScopInfo/long-sequence-of-error-blocks-2.ll
index c88ea1327389..6027975b563b 100644
--- a/polly/test/ScopInfo/long-sequence-of-error-blocks-2.ll
+++ b/polly/test/ScopInfo/long-sequence-of-error-blocks-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/polly/test/ScopInfo/long-sequence-of-error-blocks.ll b/polly/test/ScopInfo/long-sequence-of-error-blocks.ll
index 5b6ea9cc212d..4ef5ef09c44b 100644
--- a/polly/test/ScopInfo/long-sequence-of-error-blocks.ll
+++ b/polly/test/ScopInfo/long-sequence-of-error-blocks.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
diff --git a/polly/test/ScopInfo/loop-multiexit-succ-cond.ll b/polly/test/ScopInfo/loop-multiexit-succ-cond.ll
index 350db05c6dc0..431c907857fe 100644
--- a/polly/test/ScopInfo/loop-multiexit-succ-cond.ll
+++ b/polly/test/ScopInfo/loop-multiexit-succ-cond.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s --check-prefix=IR
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s 2>&1 | FileCheck %s --check-prefix=IR
;
; The SCoP contains a loop with multiple exit blocks (BBs after leaving
; the loop). The current implementation of deriving their domain derives
diff --git a/polly/test/ScopInfo/loop_affine_bound_0.ll b/polly/test/ScopInfo/loop_affine_bound_0.ll
index 33f49df7780f..918d4099740c 100644
--- a/polly/test/ScopInfo/loop_affine_bound_0.ll
+++ b/polly/test/ScopInfo/loop_affine_bound_0.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long a[][128], long N, long M) {
; long i, j;
diff --git a/polly/test/ScopInfo/loop_affine_bound_1.ll b/polly/test/ScopInfo/loop_affine_bound_1.ll
index 38e47b74465b..8f7a87f1c5ac 100644
--- a/polly/test/ScopInfo/loop_affine_bound_1.ll
+++ b/polly/test/ScopInfo/loop_affine_bound_1.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output< %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output< %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;void f(long a[][128], long N, long M) {
; long i, j;
diff --git a/polly/test/ScopInfo/loop_affine_bound_2.ll b/polly/test/ScopInfo/loop_affine_bound_2.ll
index e34662f4e6ab..2d9f997a0767 100644
--- a/polly/test/ScopInfo/loop_affine_bound_2.ll
+++ b/polly/test/ScopInfo/loop_affine_bound_2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long a[][128], long N, long M) {
; long i, j;
diff --git a/polly/test/ScopInfo/loop_carry.ll b/polly/test/ScopInfo/loop_carry.ll
index f7c1dca0919c..20ebbfbc8b49 100644
--- a/polly/test/ScopInfo/loop_carry.ll
+++ b/polly/test/ScopInfo/loop_carry.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/ScopInfo/many-scalar-dependences.ll b/polly/test/ScopInfo/many-scalar-dependences.ll
index aaa02f581a1c..5b003325ef0f 100644
--- a/polly/test/ScopInfo/many-scalar-dependences.ll
+++ b/polly/test/ScopInfo/many-scalar-dependences.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(float a[100][100]) {
; float x;
diff --git a/polly/test/ScopInfo/max-loop-depth.ll b/polly/test/ScopInfo/max-loop-depth.ll
index 3c7db4458604..71e9c02aa8dc 100644
--- a/polly/test/ScopInfo/max-loop-depth.ll
+++ b/polly/test/ScopInfo/max-loop-depth.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void bar();
; void foo(int *A, int *B, long int N, long int M) {
diff --git a/polly/test/ScopInfo/memcpy-raw-source.ll b/polly/test/ScopInfo/memcpy-raw-source.ll
index 137ab8229220..d9024cd27346 100644
--- a/polly/test/ScopInfo/memcpy-raw-source.ll
+++ b/polly/test/ScopInfo/memcpy-raw-source.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -scoped-noalias-aa -tbaa -polly-print-scops -disable-output < %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa,scoped-noalias-aa,tbaa '-passes=print<polly-function-scops>' -disable-output < %s
;
; Ensure that ScopInfo's alias analysis llvm.memcpy for,
; like the AliasSetTracker, preserves bitcasts.
diff --git a/polly/test/ScopInfo/memcpy.ll b/polly/test/ScopInfo/memcpy.ll
index 705dea769e42..95c455f097b2 100644
--- a/polly/test/ScopInfo/memcpy.ll
+++ b/polly/test/ScopInfo/memcpy.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-allow-differing-element-types -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -S -basic-aa -polly-allow-differing-element-types -polly-codegen < %s | FileCheck --check-prefix=IR %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-differing-element-types '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -S -aa-pipeline=basic-aa -polly-allow-differing-element-types -passes=polly-codegen < %s 2>&1 | FileCheck --check-prefix=IR %s
;
; CHECK: Arrays {
; CHECK-NEXT: i8 MemRef_A[*]; // Element size 1
diff --git a/polly/test/ScopInfo/memmove.ll b/polly/test/ScopInfo/memmove.ll
index 15123422f419..8ff471a11cd1 100644
--- a/polly/test/ScopInfo/memmove.ll
+++ b/polly/test/ScopInfo/memmove.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-allow-differing-element-types -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -S -basic-aa -polly-allow-differing-element-types -polly-codegen < %s | FileCheck --check-prefix=IR %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-differing-element-types '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -S -aa-pipeline=basic-aa -polly-allow-differing-element-types -passes=polly-codegen < %s 2>&1 | FileCheck --check-prefix=IR %s
;
; CHECK: Arrays {
; CHECK-NEXT: i8 MemRef_A[*]; // Element size 1
diff --git a/polly/test/ScopInfo/memset.ll b/polly/test/ScopInfo/memset.ll
index ef86b4c275e5..89b048772821 100644
--- a/polly/test/ScopInfo/memset.ll
+++ b/polly/test/ScopInfo/memset.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-allow-differing-element-types -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -S -polly-allow-differing-element-types -polly-codegen < %s | FileCheck --check-prefix=IR %s
+; RUN: opt %loadNPMPolly -polly-allow-differing-element-types '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -S -polly-allow-differing-element-types -passes=polly-codegen < %s 2>&1 | FileCheck --check-prefix=IR %s
;
; CHECK: Arrays {
; CHECK-NEXT: i8 MemRef_A[*]; // Element size 1
diff --git a/polly/test/ScopInfo/memset_null.ll b/polly/test/ScopInfo/memset_null.ll
index 1608ff6ebef4..9755cf1129e6 100644
--- a/polly/test/ScopInfo/memset_null.ll
+++ b/polly/test/ScopInfo/memset_null.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-allow-modref-calls -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-allow-modref-calls -S -polly-codegen < %s
+; RUN: opt %loadNPMPolly -polly-allow-modref-calls '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-modref-calls -S -passes=polly-codegen < %s
;
; Verify we can handle a memset to "null" and that we do not model it.
; TODO: FIXME: We could use the undefined memset to optimize the code further,
diff --git a/polly/test/ScopInfo/mismatching-array-dimensions.ll b/polly/test/ScopInfo/mismatching-array-dimensions.ll
index a1c6d4e82127..ed1e28cbee6e 100644
--- a/polly/test/ScopInfo/mismatching-array-dimensions.ll
+++ b/polly/test/ScopInfo/mismatching-array-dimensions.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK-NOT: AssumedContext
diff --git a/polly/test/ScopInfo/mod_ref_access_pointee_arguments.ll b/polly/test/ScopInfo/mod_ref_access_pointee_arguments.ll
index 72889324e37e..6bc5f8d8eb73 100644
--- a/polly/test/ScopInfo/mod_ref_access_pointee_arguments.ll
+++ b/polly/test/ScopInfo/mod_ref_access_pointee_arguments.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -basic-aa -polly-stmt-granularity=bb -polly-print-scops -polly-allow-modref-calls \
-; RUN: -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -basic-aa -polly-stmt-granularity=bb -polly-codegen -polly-allow-modref-calls \
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -polly-allow-modref-calls \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-stmt-granularity=bb -passes=polly-codegen -polly-allow-modref-calls \
; RUN: -disable-output < %s
;
; Verify that we model the may-write access of the prefetch intrinsic
diff --git a/polly/test/ScopInfo/mod_ref_read_pointee_arguments.ll b/polly/test/ScopInfo/mod_ref_read_pointee_arguments.ll
index 2f6c6792fd9d..21322bc648f8 100644
--- a/polly/test/ScopInfo/mod_ref_read_pointee_arguments.ll
+++ b/polly/test/ScopInfo/mod_ref_read_pointee_arguments.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -basic-aa -polly-stmt-granularity=bb -polly-print-scops -polly-allow-modref-calls \
-; RUN: -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -basic-aa -polly-codegen -disable-output \
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -polly-allow-modref-calls \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -passes=polly-codegen -disable-output \
; RUN: -polly-allow-modref-calls < %s
;
; Verify that we model the read access of the gcread intrinsic
diff --git a/polly/test/ScopInfo/mod_ref_read_pointer.ll b/polly/test/ScopInfo/mod_ref_read_pointer.ll
index 657e37c68a7b..25e56a08a961 100644
--- a/polly/test/ScopInfo/mod_ref_read_pointer.ll
+++ b/polly/test/ScopInfo/mod_ref_read_pointer.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-allow-modref-calls -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -basic-aa -polly-allow-modref-calls -polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-modref-calls '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-allow-modref-calls -passes=polly-codegen -disable-output < %s
;
; Check that we assume the call to func has a read on the whole A array.
;
diff --git a/polly/test/ScopInfo/mod_ref_read_pointers.ll b/polly/test/ScopInfo/mod_ref_read_pointers.ll
index 7ed3423a2aeb..5cc96cf3a06e 100644
--- a/polly/test/ScopInfo/mod_ref_read_pointers.ll
+++ b/polly/test/ScopInfo/mod_ref_read_pointers.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-scops -polly-allow-modref-calls \
-; RUN: -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -basic-aa -polly-codegen -disable-output \
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-function-scops>' -polly-allow-modref-calls \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -passes=polly-codegen -disable-output \
; RUN: -polly-allow-modref-calls < %s
;
; Check that the call to func will "read" not only the A array but also the
diff --git a/polly/test/ScopInfo/modulo_zext_1.ll b/polly/test/ScopInfo/modulo_zext_1.ll
index d611ec4807b5..0a8957da4931 100644
--- a/polly/test/ScopInfo/modulo_zext_1.ll
+++ b/polly/test/ScopInfo/modulo_zext_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Assumed Context:
; CHECK-NEXT: [N] -> { : }
diff --git a/polly/test/ScopInfo/modulo_zext_2.ll b/polly/test/ScopInfo/modulo_zext_2.ll
index 8d2321849174..7af2411e7e8c 100644
--- a/polly/test/ScopInfo/modulo_zext_2.ll
+++ b/polly/test/ScopInfo/modulo_zext_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Assumed Context:
; CHECK-NEXT: [N] -> { : }
diff --git a/polly/test/ScopInfo/modulo_zext_3.ll b/polly/test/ScopInfo/modulo_zext_3.ll
index acb26dc1c77f..1dac723aa2c2 100644
--- a/polly/test/ScopInfo/modulo_zext_3.ll
+++ b/polly/test/ScopInfo/modulo_zext_3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Assumed Context:
; CHECK-NEXT: [N] -> { : }
diff --git a/polly/test/ScopInfo/multi-scop.ll b/polly/test/ScopInfo/multi-scop.ll
index e26c8c7bae10..c6dc1f201efa 100644
--- a/polly/test/ScopInfo/multi-scop.ll
+++ b/polly/test/ScopInfo/multi-scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-detect -polly-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; This test case contains two scops.
diff --git a/polly/test/ScopInfo/multidim_2d-diagonal-matrix.ll b/polly/test/ScopInfo/multidim_2d-diagonal-matrix.ll
index 278c06a2fdba..bd46532d87f1 100644
--- a/polly/test/ScopInfo/multidim_2d-diagonal-matrix.ll
+++ b/polly/test/ScopInfo/multidim_2d-diagonal-matrix.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; Derived from the following code:
diff --git a/polly/test/ScopInfo/multidim_2d_outer_parametric_offset.ll b/polly/test/ScopInfo/multidim_2d_outer_parametric_offset.ll
index 06a76466c25e..cdd46304c932 100644
--- a/polly/test/ScopInfo/multidim_2d_outer_parametric_offset.ll
+++ b/polly/test/ScopInfo/multidim_2d_outer_parametric_offset.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; Derived from the following code:
diff --git a/polly/test/ScopInfo/multidim_2d_parametric_array_static_loop_bounds.ll b/polly/test/ScopInfo/multidim_2d_parametric_array_static_loop_bounds.ll
index bfbe5682d44a..0b735b910618 100644
--- a/polly/test/ScopInfo/multidim_2d_parametric_array_static_loop_bounds.ll
+++ b/polly/test/ScopInfo/multidim_2d_parametric_array_static_loop_bounds.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; Derived from the following code:
diff --git a/polly/test/ScopInfo/multidim_2d_with_modref_call.ll b/polly/test/ScopInfo/multidim_2d_with_modref_call.ll
index ba934adb675a..befca87972c1 100644
--- a/polly/test/ScopInfo/multidim_2d_with_modref_call.ll
+++ b/polly/test/ScopInfo/multidim_2d_with_modref_call.ll
@@ -1,9 +1,9 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -polly-allow-modref-calls \
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -polly-allow-modref-calls \
; RUN: -polly-invariant-load-hoisting=true \
-; RUN: -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -polly-allow-nonaffine \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -polly-allow-nonaffine \
; RUN: -polly-invariant-load-hoisting=true \
-; RUN: -polly-allow-modref-calls -disable-output < %s | FileCheck %s --check-prefix=NONAFFINE
+; RUN: -polly-allow-modref-calls -disable-output < %s 2>&1 | FileCheck %s --check-prefix=NONAFFINE
; TODO: We should delinearize the accesses despite the use in a call to a
; readonly function. For now we verify we do not delinearize them though.
diff --git a/polly/test/ScopInfo/multidim_2d_with_modref_call_2.ll b/polly/test/ScopInfo/multidim_2d_with_modref_call_2.ll
index 3da123fd1f60..cceb5353d74c 100644
--- a/polly/test/ScopInfo/multidim_2d_with_modref_call_2.ll
+++ b/polly/test/ScopInfo/multidim_2d_with_modref_call_2.ll
@@ -1,9 +1,9 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -polly-allow-modref-calls \
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -polly-allow-modref-calls \
; RUN: -polly-invariant-load-hoisting=true \
-; RUN: -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -polly-allow-nonaffine \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -polly-allow-nonaffine \
; RUN: -polly-invariant-load-hoisting=true \
-; RUN: -polly-allow-modref-calls -disable-output < %s | FileCheck %s --check-prefix=NONAFFINE
+; RUN: -polly-allow-modref-calls -disable-output < %s 2>&1 | FileCheck %s --check-prefix=NONAFFINE
; TODO: We should delinearize the accesses despite the use in a call to a
; readonly function. For now we verify we do not delinearize them though.
diff --git a/polly/test/ScopInfo/multidim_3d_parametric_array_static_loop_bounds.ll b/polly/test/ScopInfo/multidim_3d_parametric_array_static_loop_bounds.ll
index 988475575fec..c957dd10ed65 100644
--- a/polly/test/ScopInfo/multidim_3d_parametric_array_static_loop_bounds.ll
+++ b/polly/test/ScopInfo/multidim_3d_parametric_array_static_loop_bounds.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; void foo(long n, long m, long o, double A[n][m][o]) {
diff --git a/polly/test/ScopInfo/multidim_fixedsize_different_dimensionality.ll b/polly/test/ScopInfo/multidim_fixedsize_different_dimensionality.ll
index ddc35a46a633..4a1ee3b1af51 100644
--- a/polly/test/ScopInfo/multidim_fixedsize_different_dimensionality.ll
+++ b/polly/test/ScopInfo/multidim_fixedsize_different_dimensionality.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; #define N 400
;
diff --git a/polly/test/ScopInfo/multidim_fixedsize_multi_offset.ll b/polly/test/ScopInfo/multidim_fixedsize_multi_offset.ll
index 9c749f0c48c8..9a6d8fbe1275 100644
--- a/polly/test/ScopInfo/multidim_fixedsize_multi_offset.ll
+++ b/polly/test/ScopInfo/multidim_fixedsize_multi_offset.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Context:
; CHECK-NEXT: { : }
diff --git a/polly/test/ScopInfo/multidim_fold_constant_dim.ll b/polly/test/ScopInfo/multidim_fold_constant_dim.ll
index e95d400a860c..9f4769402286 100644
--- a/polly/test/ScopInfo/multidim_fold_constant_dim.ll
+++ b/polly/test/ScopInfo/multidim_fold_constant_dim.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; struct com {
; double Real;
diff --git a/polly/test/ScopInfo/multidim_fold_constant_dim_zero.ll b/polly/test/ScopInfo/multidim_fold_constant_dim_zero.ll
index 57275e4024ab..5778126ad8f1 100644
--- a/polly/test/ScopInfo/multidim_fold_constant_dim_zero.ll
+++ b/polly/test/ScopInfo/multidim_fold_constant_dim_zero.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -debug -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -debug -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
diff --git a/polly/test/ScopInfo/multidim_fortran_2d.ll b/polly/test/ScopInfo/multidim_fortran_2d.ll
index 29279a4e886b..e5b005f17dcc 100644
--- a/polly/test/ScopInfo/multidim_fortran_2d.ll
+++ b/polly/test/ScopInfo/multidim_fortran_2d.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops \
-; RUN: -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' \
+; RUN: -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
; subroutine init_array(ni, nj, pi, pj, a)
; implicit none
diff --git a/polly/test/ScopInfo/multidim_fortran_2d_params.ll b/polly/test/ScopInfo/multidim_fortran_2d_params.ll
index 93145b399ca5..a7f7ebc13036 100644
--- a/polly/test/ScopInfo/multidim_fortran_2d_params.ll
+++ b/polly/test/ScopInfo/multidim_fortran_2d_params.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output \
; RUN: -polly-precise-fold-accesses \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s
; subroutine init_array(ni, nj, pi, pj, a)
; implicit none
diff --git a/polly/test/ScopInfo/multidim_fortran_2d_with_modref_call.ll b/polly/test/ScopInfo/multidim_fortran_2d_with_modref_call.ll
index dff6a8be85cf..5f3080a12fdb 100644
--- a/polly/test/ScopInfo/multidim_fortran_2d_with_modref_call.ll
+++ b/polly/test/ScopInfo/multidim_fortran_2d_with_modref_call.ll
@@ -1,9 +1,9 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -polly-allow-modref-calls \
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -polly-allow-modref-calls \
; RUN: -polly-invariant-load-hoisting=true \
-; RUN: -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -polly-allow-nonaffine \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -polly-allow-nonaffine \
; RUN: -polly-invariant-load-hoisting=true \
-; RUN: -polly-allow-modref-calls -disable-output < %s | FileCheck %s --check-prefix=NONAFFINE
+; RUN: -polly-allow-modref-calls -disable-output < %s 2>&1 | FileCheck %s --check-prefix=NONAFFINE
; TODO: We should delinearize the accesses despite the use in a call to a
; readonly function. For now we verify we do not delinearize them though.
diff --git a/polly/test/ScopInfo/multidim_fortran_srem.ll b/polly/test/ScopInfo/multidim_fortran_srem.ll
index 8c24c5b8ee71..31cc633fa65c 100644
--- a/polly/test/ScopInfo/multidim_fortran_srem.ll
+++ b/polly/test/ScopInfo/multidim_fortran_srem.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; CHECK: Statements {
diff --git a/polly/test/ScopInfo/multidim_gep_pointercast.ll b/polly/test/ScopInfo/multidim_gep_pointercast.ll
index 20d59fa91eaf..fd8048b11f14 100644
--- a/polly/test/ScopInfo/multidim_gep_pointercast.ll
+++ b/polly/test/ScopInfo/multidim_gep_pointercast.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; The load access to A has a pointer-bitcast to another elements size before the
; GetElementPtr. Verify that we do not the GEP delinearization because it
diff --git a/polly/test/ScopInfo/multidim_gep_pointercast2.ll b/polly/test/ScopInfo/multidim_gep_pointercast2.ll
index deed9c7c3f57..b31a0d0262db 100644
--- a/polly/test/ScopInfo/multidim_gep_pointercast2.ll
+++ b/polly/test/ScopInfo/multidim_gep_pointercast2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verfy that we do not use the GetElementPtr information to delinearize A
; because of the cast in-between. Use the single-dimensional modeling instead.
diff --git a/polly/test/ScopInfo/multidim_ivs_and_integer_offsets_3d.ll b/polly/test/ScopInfo/multidim_ivs_and_integer_offsets_3d.ll
index 9f7e6bc4a2a2..92b42a9e7a87 100644
--- a/polly/test/ScopInfo/multidim_ivs_and_integer_offsets_3d.ll
+++ b/polly/test/ScopInfo/multidim_ivs_and_integer_offsets_3d.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; void foo(long n, long m, long o, double A[n][m][o]) {
diff --git a/polly/test/ScopInfo/multidim_ivs_and_parameteric_offsets_3d.ll b/polly/test/ScopInfo/multidim_ivs_and_parameteric_offsets_3d.ll
index 131bb7b3ebed..261cba1e68aa 100644
--- a/polly/test/ScopInfo/multidim_ivs_and_parameteric_offsets_3d.ll
+++ b/polly/test/ScopInfo/multidim_ivs_and_parameteric_offsets_3d.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-precise-fold-accesses -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-precise-fold-accesses '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; void foo(long n, long m, long o, double A[n][m][o], long p, long q, long r) {
diff --git a/polly/test/ScopInfo/multidim_many_references.ll b/polly/test/ScopInfo/multidim_many_references.ll
index b0483b267260..3801fda4923c 100644
--- a/polly/test/ScopInfo/multidim_many_references.ll
+++ b/polly/test/ScopInfo/multidim_many_references.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-ignore-aliasing -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -polly-ignore-aliasing -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-ignore-aliasing -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-ignore-aliasing -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopInfo/multidim_nested_start_integer.ll b/polly/test/ScopInfo/multidim_nested_start_integer.ll
index 741a0ef45c27..6ee9798a050d 100644
--- a/polly/test/ScopInfo/multidim_nested_start_integer.ll
+++ b/polly/test/ScopInfo/multidim_nested_start_integer.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; void foo(long n, long m, long o, double A[n][m][o]) {
diff --git a/polly/test/ScopInfo/multidim_nested_start_share_parameter.ll b/polly/test/ScopInfo/multidim_nested_start_share_parameter.ll
index 692746bad3d7..e238bddf4783 100644
--- a/polly/test/ScopInfo/multidim_nested_start_share_parameter.ll
+++ b/polly/test/ScopInfo/multidim_nested_start_share_parameter.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; void foo(long n, long m, long o, double A[n][m][o]) {
diff --git a/polly/test/ScopInfo/multidim_only_ivs_2d.ll b/polly/test/ScopInfo/multidim_only_ivs_2d.ll
index 71245642e751..33b321716edc 100644
--- a/polly/test/ScopInfo/multidim_only_ivs_2d.ll
+++ b/polly/test/ScopInfo/multidim_only_ivs_2d.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; Derived from the following code:
diff --git a/polly/test/ScopInfo/multidim_only_ivs_3d.ll b/polly/test/ScopInfo/multidim_only_ivs_3d.ll
index a019d58b241d..39ea4243d942 100644
--- a/polly/test/ScopInfo/multidim_only_ivs_3d.ll
+++ b/polly/test/ScopInfo/multidim_only_ivs_3d.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; void foo(long n, long m, long o, double A[n][m][o]) {
diff --git a/polly/test/ScopInfo/multidim_only_ivs_3d_cast.ll b/polly/test/ScopInfo/multidim_only_ivs_3d_cast.ll
index 41577ef1a0be..7f7f7f91067e 100644
--- a/polly/test/ScopInfo/multidim_only_ivs_3d_cast.ll
+++ b/polly/test/ScopInfo/multidim_only_ivs_3d_cast.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void foo(int n, int m, int o, double A[n][m][o]) {
;
diff --git a/polly/test/ScopInfo/multidim_only_ivs_3d_reverse.ll b/polly/test/ScopInfo/multidim_only_ivs_3d_reverse.ll
index 25907f2ee79c..1675110ffd6f 100644
--- a/polly/test/ScopInfo/multidim_only_ivs_3d_reverse.ll
+++ b/polly/test/ScopInfo/multidim_only_ivs_3d_reverse.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; This test case checks for array access functions where the order in which the
diff --git a/polly/test/ScopInfo/multidim_param_in_subscript-2.ll b/polly/test/ScopInfo/multidim_param_in_subscript-2.ll
index 0790664f7129..da9827fd5f2c 100644
--- a/polly/test/ScopInfo/multidim_param_in_subscript-2.ll
+++ b/polly/test/ScopInfo/multidim_param_in_subscript-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-precise-fold-accesses -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-precise-fold-accesses '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void foo(long n, long m, float A[][n][m]) {
; for (long i = 0; i < 100; i++)
diff --git a/polly/test/ScopInfo/multidim_param_in_subscript.ll b/polly/test/ScopInfo/multidim_param_in_subscript.ll
index b8ec80b321fe..c86b5f0ae238 100644
--- a/polly/test/ScopInfo/multidim_param_in_subscript.ll
+++ b/polly/test/ScopInfo/multidim_param_in_subscript.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
;
; void foo(long n, float A[][n]) {
diff --git a/polly/test/ScopInfo/multidim_parameter_addrec_product.ll b/polly/test/ScopInfo/multidim_parameter_addrec_product.ll
index 7db3e9dc3b5f..da563a05560c 100644
--- a/polly/test/ScopInfo/multidim_parameter_addrec_product.ll
+++ b/polly/test/ScopInfo/multidim_parameter_addrec_product.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-invariant-load-hoisting=true -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-invariant-load-hoisting=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void foo(float *A, long *p) {
; for (long i = 0; i < 100; i++)
diff --git a/polly/test/ScopInfo/multidim_single_and_multidim_array.ll b/polly/test/ScopInfo/multidim_single_and_multidim_array.ll
index 1e302dec4861..7059e5396987 100644
--- a/polly/test/ScopInfo/multidim_single_and_multidim_array.ll
+++ b/polly/test/ScopInfo/multidim_single_and_multidim_array.ll
@@ -1,11 +1,11 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-delinearize=false -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-scops -polly-delinearize=false -polly-allow-nonaffine -disable-output < %s | FileCheck %s --check-prefix=NONAFFINE
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=DELIN
-; RUN: opt %loadPolly -polly-print-scops -polly-allow-nonaffine -disable-output < %s | FileCheck %s --check-prefix=DELIN
-; RUN: opt %loadPolly -polly-print-function-scops -polly-delinearize=false -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -polly-delinearize=false -polly-allow-nonaffine -disable-output < %s | FileCheck %s --check-prefix=NONAFFINE
-; RUN: opt %loadPolly -polly-print-function-scops -disable-output < %s | FileCheck %s --check-prefix=DELIN
-; RUN: opt %loadPolly -polly-print-function-scops -polly-allow-nonaffine -disable-output < %s | FileCheck %s --check-prefix=DELIN
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-delinearize=false -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-delinearize=false -polly-allow-nonaffine -disable-output < %s 2>&1 | FileCheck %s --check-prefix=NONAFFINE
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=DELIN
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-allow-nonaffine -disable-output < %s 2>&1 | FileCheck %s --check-prefix=DELIN
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-delinearize=false -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-delinearize=false -polly-allow-nonaffine -disable-output < %s 2>&1 | FileCheck %s --check-prefix=NONAFFINE
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=DELIN
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-allow-nonaffine -disable-output < %s 2>&1 | FileCheck %s --check-prefix=DELIN
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopInfo/multidim_srem.ll b/polly/test/ScopInfo/multidim_srem.ll
index f89843f0a5bc..c965e2c86e2b 100644
--- a/polly/test/ScopInfo/multidim_srem.ll
+++ b/polly/test/ScopInfo/multidim_srem.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void foo(long n, float A[][n][n]) {
; for (long i = 0; i < 200; i++)
diff --git a/polly/test/ScopInfo/multidim_with_bitcast.ll b/polly/test/ScopInfo/multidim_with_bitcast.ll
index b77ff689b953..0ab9c2d93ff4 100644
--- a/polly/test/ScopInfo/multidim_with_bitcast.ll
+++ b/polly/test/ScopInfo/multidim_with_bitcast.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopInfo/multiple-binary-or-conditions.ll b/polly/test/ScopInfo/multiple-binary-or-conditions.ll
index b905a11f577c..65416e6fffda 100644
--- a/polly/test/ScopInfo/multiple-binary-or-conditions.ll
+++ b/polly/test/ScopInfo/multiple-binary-or-conditions.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -disable-output < %s
;
; void or(float *A, long n, long m) {
; for (long i = 0; i < 100; i++) {
diff --git a/polly/test/ScopInfo/multiple-types-access-offset-not-dividable-by-element-size.ll b/polly/test/ScopInfo/multiple-types-access-offset-not-dividable-by-element-size.ll
index 2d03ad941c05..910e624adb50 100644
--- a/polly/test/ScopInfo/multiple-types-access-offset-not-dividable-by-element-size.ll
+++ b/polly/test/ScopInfo/multiple-types-access-offset-not-dividable-by-element-size.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -pass-remarks-analysis="polly-scops" \
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -pass-remarks-analysis="polly-scops" \
; RUN: -polly-allow-differing-element-types \
; RUN: -disable-output < %s 2>&1 | FileCheck %s
;
diff --git a/polly/test/ScopInfo/multiple-types-non-affine-2.ll b/polly/test/ScopInfo/multiple-types-non-affine-2.ll
index 5b0aa5de1e71..cb0630da1b2e 100644
--- a/polly/test/ScopInfo/multiple-types-non-affine-2.ll
+++ b/polly/test/ScopInfo/multiple-types-non-affine-2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-allow-differing-element-types -polly-print-scops -polly-allow-nonaffine -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-allow-differing-element-types -polly-codegen -polly-allow-nonaffine -disable-output
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-allow-differing-element-types '-passes=print<polly-function-scops>' -polly-allow-nonaffine -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-allow-differing-element-types -passes=polly-codegen -polly-allow-nonaffine -disable-output
;
; // Check that accessing one array with different types works,
; // even though some accesses are non-affine.
diff --git a/polly/test/ScopInfo/multiple-types-non-affine.ll b/polly/test/ScopInfo/multiple-types-non-affine.ll
index 8e4be4c86d5a..7349c5ae48ba 100644
--- a/polly/test/ScopInfo/multiple-types-non-affine.ll
+++ b/polly/test/ScopInfo/multiple-types-non-affine.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-allow-differing-element-types -polly-print-scops -polly-allow-nonaffine -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-allow-differing-element-types -polly-codegen -polly-allow-nonaffine -disable-output
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-allow-differing-element-types '-passes=print<polly-function-scops>' -polly-allow-nonaffine -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-allow-differing-element-types -passes=polly-codegen -polly-allow-nonaffine -disable-output
;
; // Check that accessing one array with different types works,
; // even though some accesses are non-affine.
diff --git a/polly/test/ScopInfo/multiple-types-non-power-of-two-2.ll b/polly/test/ScopInfo/multiple-types-non-power-of-two-2.ll
index 01f5923457b4..df280c88f866 100644
--- a/polly/test/ScopInfo/multiple-types-non-power-of-two-2.ll
+++ b/polly/test/ScopInfo/multiple-types-non-power-of-two-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-allow-differing-element-types -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-allow-differing-element-types -disable-output < %s 2>&1 | FileCheck %s
;
; void multiple_types(i8 *A) {
; for (long i = 0; i < 100; i++) {
diff --git a/polly/test/ScopInfo/multiple-types-non-power-of-two.ll b/polly/test/ScopInfo/multiple-types-non-power-of-two.ll
index 142a5ac395b3..b9494187d0ff 100644
--- a/polly/test/ScopInfo/multiple-types-non-power-of-two.ll
+++ b/polly/test/ScopInfo/multiple-types-non-power-of-two.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-allow-differing-element-types -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-allow-differing-element-types -disable-output < %s 2>&1 | FileCheck %s
;
; void multiple_types(i8 *A) {
; for (long i = 0; i < 100; i++) {
diff --git a/polly/test/ScopInfo/multiple-types-two-dimensional-2.ll b/polly/test/ScopInfo/multiple-types-two-dimensional-2.ll
index 1e2e53e85c25..e971ccc0ba44 100644
--- a/polly/test/ScopInfo/multiple-types-two-dimensional-2.ll
+++ b/polly/test/ScopInfo/multiple-types-two-dimensional-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -pass-remarks-analysis="polly-scops" \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -pass-remarks-analysis="polly-scops" \
; RUN: -polly-allow-differing-element-types \
; RUN: -disable-output < %s 2>&1 | FileCheck %s
;
diff --git a/polly/test/ScopInfo/multiple-types-two-dimensional.ll b/polly/test/ScopInfo/multiple-types-two-dimensional.ll
index 21dc96e6f95d..34179508cae8 100644
--- a/polly/test/ScopInfo/multiple-types-two-dimensional.ll
+++ b/polly/test/ScopInfo/multiple-types-two-dimensional.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -pass-remarks-analysis="polly-scops" \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -pass-remarks-analysis="polly-scops" \
; RUN: -polly-allow-differing-element-types \
; RUN: -disable-output < %s 2>&1 | FileCheck %s
;
diff --git a/polly/test/ScopInfo/multiple-types.ll b/polly/test/ScopInfo/multiple-types.ll
index 16db191c522f..84d7d3349e29 100644
--- a/polly/test/ScopInfo/multiple-types.ll
+++ b/polly/test/ScopInfo/multiple-types.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops \
-; RUN: -polly-allow-differing-element-types -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' \
+; RUN: -polly-allow-differing-element-types -disable-output < %s 2>&1 | FileCheck %s
;
; // Check that accessing one array with different types works.
; void multiple_types(char *Short, char *Float, char *Double) {
diff --git a/polly/test/ScopInfo/multiple_exiting_blocks.ll b/polly/test/ScopInfo/multiple_exiting_blocks.ll
index f8e5d4106a16..b0c425ee62cc 100644
--- a/polly/test/ScopInfo/multiple_exiting_blocks.ll
+++ b/polly/test/ScopInfo/multiple_exiting_blocks.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; The SCoP contains a loop with multiple exit blocks (BBs after leaving
; the loop). The current implementation of deriving their domain derives
diff --git a/polly/test/ScopInfo/multiple_exiting_blocks_two_loop.ll b/polly/test/ScopInfo/multiple_exiting_blocks_two_loop.ll
index c695f3c913db..ff0ec47be1c5 100644
--- a/polly/test/ScopInfo/multiple_exiting_blocks_two_loop.ll
+++ b/polly/test/ScopInfo/multiple_exiting_blocks_two_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; The SCoP contains a loop with multiple exit blocks (BBs after leaving
; the loop). The current implementation of deriving their domain derives
diff --git a/polly/test/ScopInfo/multiple_latch_blocks.ll b/polly/test/ScopInfo/multiple_latch_blocks.ll
index d3949e7e2c3c..e5085daa2ca1 100644
--- a/polly/test/ScopInfo/multiple_latch_blocks.ll
+++ b/polly/test/ScopInfo/multiple_latch_blocks.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Domain :=
; CHECK: [N, P] -> { Stmt_if_end[i0] : 0 <= i0 < N and (i0 > P or i0 < P) };
diff --git a/polly/test/ScopInfo/nested-loops.ll b/polly/test/ScopInfo/nested-loops.ll
index ed814f826829..91002979f4fa 100644
--- a/polly/test/ScopInfo/nested-loops.ll
+++ b/polly/test/ScopInfo/nested-loops.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
diff --git a/polly/test/ScopInfo/no-scalar-deps-in-non-affine-subregion.ll b/polly/test/ScopInfo/no-scalar-deps-in-non-affine-subregion.ll
index 7c55e242641c..df010846bed2 100644
--- a/polly/test/ScopInfo/no-scalar-deps-in-non-affine-subregion.ll
+++ b/polly/test/ScopInfo/no-scalar-deps-in-non-affine-subregion.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that we do not generate any scalar dependences regarding x. It is
; defined and used on the non-affine subregion only, thus we do not need
diff --git a/polly/test/ScopInfo/non-affine-region-phi.ll b/polly/test/ScopInfo/non-affine-region-phi.ll
index f99782b9a0ff..3fb655e60f1c 100644
--- a/polly/test/ScopInfo/non-affine-region-phi.ll
+++ b/polly/test/ScopInfo/non-affine-region-phi.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine -S < %s | FileCheck %s --check-prefix=CODE
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine -S < %s 2>&1 | FileCheck %s --check-prefix=CODE
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify there is a phi in the non-affine region but it is not represented in
; the SCoP as all operands as well as the uses are inside the region too.
diff --git a/polly/test/ScopInfo/non-affine-region-with-loop-2.ll b/polly/test/ScopInfo/non-affine-region-with-loop-2.ll
index b673fda5ec3c..4c3ca4d21447 100644
--- a/polly/test/ScopInfo/non-affine-region-with-loop-2.ll
+++ b/polly/test/ScopInfo/non-affine-region-with-loop-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-allow-nonaffine-loops -polly-print-scops -polly-codegen -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-allow-nonaffine-loops '-passes=print<polly-detect>,print<polly-function-scops>,scop(polly-codegen)' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Stmt_loop3
; CHECK: Domain :=
diff --git a/polly/test/ScopInfo/non-affine-region-with-loop.ll b/polly/test/ScopInfo/non-affine-region-with-loop.ll
index 32dde8b4a682..f4c028ac2340 100644
--- a/polly/test/ScopInfo/non-affine-region-with-loop.ll
+++ b/polly/test/ScopInfo/non-affine-region-with-loop.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-loops -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-allow-nonaffine-loops -polly-codegen -disable-output
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-loops '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-loops -passes=polly-codegen -disable-output
;
; CHECK: Domain :=
; CHECK-NEXT: { Stmt_loop2__TO__loop[] };
diff --git a/polly/test/ScopInfo/non-precise-inv-load-1.ll b/polly/test/ScopInfo/non-precise-inv-load-1.ll
index 5394206dd547..d55344b355f1 100644
--- a/polly/test/ScopInfo/non-precise-inv-load-1.ll
+++ b/polly/test/ScopInfo/non-precise-inv-load-1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; Verify we do hoist the invariant access to I with a execution context
; as the address computation might wrap in the original but not in our
diff --git a/polly/test/ScopInfo/non-precise-inv-load-2.ll b/polly/test/ScopInfo/non-precise-inv-load-2.ll
index 5c0c56513a08..79ef3b88cb4f 100644
--- a/polly/test/ScopInfo/non-precise-inv-load-2.ll
+++ b/polly/test/ScopInfo/non-precise-inv-load-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
;
; CHECK: Invariant Accesses: {
diff --git a/polly/test/ScopInfo/non-precise-inv-load-3.ll b/polly/test/ScopInfo/non-precise-inv-load-3.ll
index 09d09319656b..aa9284766116 100644
--- a/polly/test/ScopInfo/non-precise-inv-load-3.ll
+++ b/polly/test/ScopInfo/non-precise-inv-load-3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses: {
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/non-precise-inv-load-4.ll b/polly/test/ScopInfo/non-precise-inv-load-4.ll
index da5f656576d1..2a2241cb5a99 100644
--- a/polly/test/ScopInfo/non-precise-inv-load-4.ll
+++ b/polly/test/ScopInfo/non-precise-inv-load-4.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; Verify we hoist I[0] without execution context even though it
; is executed in a statement with an invalid domain.
diff --git a/polly/test/ScopInfo/non-precise-inv-load-5.ll b/polly/test/ScopInfo/non-precise-inv-load-5.ll
index bff5f59a3302..a414c7c0fed1 100644
--- a/polly/test/ScopInfo/non-precise-inv-load-5.ll
+++ b/polly/test/ScopInfo/non-precise-inv-load-5.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; Verify we do not hoist I[c] without execution context because it
; is executed in a statement with an invalid domain and it depends
diff --git a/polly/test/ScopInfo/non-precise-inv-load-6.ll b/polly/test/ScopInfo/non-precise-inv-load-6.ll
index 03540a8ead96..1300617f00ee 100644
--- a/polly/test/ScopInfo/non-precise-inv-load-6.ll
+++ b/polly/test/ScopInfo/non-precise-inv-load-6.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; Check that we model the execution context correctly.
;
diff --git a/polly/test/ScopInfo/non-pure-function-call.ll b/polly/test/ScopInfo/non-pure-function-call.ll
index 4ffb8d28865d..81d43db5c352 100644
--- a/polly/test/ScopInfo/non-pure-function-call.ll
+++ b/polly/test/ScopInfo/non-pure-function-call.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Assumed Context:
; CHECK-NEXT: [N] -> { : }
diff --git a/polly/test/ScopInfo/non-pure-function-calls-causes-dead-blocks.ll b/polly/test/ScopInfo/non-pure-function-calls-causes-dead-blocks.ll
index 27998b50b74f..6cbb41041be8 100644
--- a/polly/test/ScopInfo/non-pure-function-calls-causes-dead-blocks.ll
+++ b/polly/test/ScopInfo/non-pure-function-calls-causes-dead-blocks.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Error blocks are skipped during SCoP detection. We skip them during
; SCoP formation too as they might contain instructions we can not handle.
diff --git a/polly/test/ScopInfo/non-pure-function-calls.ll b/polly/test/ScopInfo/non-pure-function-calls.ll
index 3ecf75853773..f97644052272 100644
--- a/polly/test/ScopInfo/non-pure-function-calls.ll
+++ b/polly/test/ScopInfo/non-pure-function-calls.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Allow the user to define function names that are treated as
; error functions and assumed not to be executed.
diff --git a/polly/test/ScopInfo/non_affine_access.ll b/polly/test/ScopInfo/non_affine_access.ll
index a83c9484ad52..0338edf05329 100644
--- a/polly/test/ScopInfo/non_affine_access.ll
+++ b/polly/test/ScopInfo/non_affine_access.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-scops -polly-allow-nonaffine -disable-output < %s | FileCheck %s -check-prefix=NONAFFINE
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -polly-allow-nonaffine -disable-output < %s 2>&1 | FileCheck %s -check-prefix=NONAFFINE
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; void foo(long *A) {
diff --git a/polly/test/ScopInfo/non_affine_region_1.ll b/polly/test/ScopInfo/non_affine_region_1.ll
index 7c4312599cf0..8980a711b325 100644
--- a/polly/test/ScopInfo/non_affine_region_1.ll
+++ b/polly/test/ScopInfo/non_affine_region_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify only the incoming scalar x is modeled as a read in the non-affine
; region.
diff --git a/polly/test/ScopInfo/non_affine_region_2.ll b/polly/test/ScopInfo/non_affine_region_2.ll
index 0bc467c92bcb..b2e072f7a3bf 100644
--- a/polly/test/ScopInfo/non_affine_region_2.ll
+++ b/polly/test/ScopInfo/non_affine_region_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify the scalar x defined in a non-affine subregion is written as it
; escapes the region. In this test the two conditionals inside the region
diff --git a/polly/test/ScopInfo/non_affine_region_3.ll b/polly/test/ScopInfo/non_affine_region_3.ll
index 6d5f94df6110..d850cb5c95aa 100644
--- a/polly/test/ScopInfo/non_affine_region_3.ll
+++ b/polly/test/ScopInfo/non_affine_region_3.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify the scalar x defined in a non-affine subregion is written as it
; escapes the region. In this test the two conditionals inside the region
diff --git a/polly/test/ScopInfo/non_affine_region_4.ll b/polly/test/ScopInfo/non_affine_region_4.ll
index f37e0ecb89d1..c5309734a668 100644
--- a/polly/test/ScopInfo/non_affine_region_4.ll
+++ b/polly/test/ScopInfo/non_affine_region_4.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify that both scalars (x and y) are properly written in the non-affine
; region and read afterwards.
diff --git a/polly/test/ScopInfo/nonaffine-buildMemoryAccess.ll b/polly/test/ScopInfo/nonaffine-buildMemoryAccess.ll
index 445dd164898b..b1ce00f0df94 100644
--- a/polly/test/ScopInfo/nonaffine-buildMemoryAccess.ll
+++ b/polly/test/ScopInfo/nonaffine-buildMemoryAccess.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine-loops -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine-loops '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Domain :=
; CHECK-NEXT: { Stmt_while_cond_i__TO__while_end_i[] };
diff --git a/polly/test/ScopInfo/not-a-reduction.ll b/polly/test/ScopInfo/not-a-reduction.ll
index 87909290fd71..3a961b2dc171 100644
--- a/polly/test/ScopInfo/not-a-reduction.ll
+++ b/polly/test/ScopInfo/not-a-reduction.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s 2>&1 | not FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | not FileCheck %s
;#define TYPE float
;#define NUM 4
diff --git a/polly/test/ScopInfo/opaque-struct.ll b/polly/test/ScopInfo/opaque-struct.ll
index 19fdd9bf9179..f4f79525069e 100644
--- a/polly/test/ScopInfo/opaque-struct.ll
+++ b/polly/test/ScopInfo/opaque-struct.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-scops -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s
;
; Check that we do not crash with unsized (opaque) types.
;
diff --git a/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node-nonaffine-subregion.ll b/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node-nonaffine-subregion.ll
index 394173bdc986..eed27b1c4d9d 100644
--- a/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node-nonaffine-subregion.ll
+++ b/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node-nonaffine-subregion.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-codegen -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S < %s 2>&1 | FileCheck %s
;
; Check whether %newval is identified as escaping value, even though it is used
; in a phi that is in the region. Non-affine subregion case.
diff --git a/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node.ll b/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node.ll
index e17164e89372..44da399e704d 100644
--- a/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node.ll
+++ b/polly/test/ScopInfo/out-of-scop-use-in-region-entry-phi-node.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK: MustWriteAccess := [Reduction Type: NONE] [Scalar: 1]
; CHECK-NEXT: [p_0] -> { Stmt_bb3[] -> MemRef_tmp5[] };
diff --git a/polly/test/ScopInfo/parameter-constant-division.ll b/polly/test/ScopInfo/parameter-constant-division.ll
index cd6b9e3526aa..e5dd359158b8 100644
--- a/polly/test/ScopInfo/parameter-constant-division.ll
+++ b/polly/test/ScopInfo/parameter-constant-division.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops \
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' \
; RUN: -polly-invariant-load-hoisting=true \
-; RUN: -disable-output < %s | FileCheck %s
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses: {
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/parameter_in_dead_statement.ll b/polly/test/ScopInfo/parameter_in_dead_statement.ll
index 4b4a87f098d7..b295f17f628a 100644
--- a/polly/test/ScopInfo/parameter_in_dead_statement.ll
+++ b/polly/test/ScopInfo/parameter_in_dead_statement.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -S \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s --check-prefix=IR
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -S \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s --check-prefix=IR
;
; Verify we do not create assumptions based on the parameter p_1 which is the
; load %0 and due to error-assumptions not "part of the SCoP".
diff --git a/polly/test/ScopInfo/parameter_product.ll b/polly/test/ScopInfo/parameter_product.ll
index 1ba7280f97c9..2fe16f9d95f6 100644
--- a/polly/test/ScopInfo/parameter_product.ll
+++ b/polly/test/ScopInfo/parameter_product.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; int n, m;
; void foo(char* __restrict a)
diff --git a/polly/test/ScopInfo/parameter_with_constant_factor_in_add.ll b/polly/test/ScopInfo/parameter_with_constant_factor_in_add.ll
index 72d580801573..6544aaec76f7 100644
--- a/polly/test/ScopInfo/parameter_with_constant_factor_in_add.ll
+++ b/polly/test/ScopInfo/parameter_with_constant_factor_in_add.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that the access function of the store is simple and concise
;
diff --git a/polly/test/ScopInfo/partially_invariant_load_1.ll b/polly/test/ScopInfo/partially_invariant_load_1.ll
index 274a7873c782..f3923f6127cd 100644
--- a/polly/test/ScopInfo/partially_invariant_load_1.ll
+++ b/polly/test/ScopInfo/partially_invariant_load_1.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-codegen -polly-invariant-load-hoisting=true -S < %s | FileCheck %s --check-prefix=IR
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -passes=polly-codegen -polly-invariant-load-hoisting=true -S < %s 2>&1 | FileCheck %s --check-prefix=IR
;
; CHECK: Invariant Accesses: {
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/partially_invariant_load_2.ll b/polly/test/ScopInfo/partially_invariant_load_2.ll
index ee1092883f72..d0d74ad99e09 100644
--- a/polly/test/ScopInfo/partially_invariant_load_2.ll
+++ b/polly/test/ScopInfo/partially_invariant_load_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-invariant-load-hoisting=true -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; Check that we do not try to preload *I and assume p != 42.
;
diff --git a/polly/test/ScopInfo/phi-in-non-affine-region.ll b/polly/test/ScopInfo/phi-in-non-affine-region.ll
index 6ef24e3f1456..fbbc158b566b 100644
--- a/polly/test/ScopInfo/phi-in-non-affine-region.ll
+++ b/polly/test/ScopInfo/phi-in-non-affine-region.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; Verify that 'tmp' is stored in bb1 and read by bb3, as it is needed as
; incoming value for the tmp11 PHI node.
diff --git a/polly/test/ScopInfo/phi_after_error_block.ll b/polly/test/ScopInfo/phi_after_error_block.ll
index 039fb86bec5b..a1eadff3e971 100644
--- a/polly/test/ScopInfo/phi_after_error_block.ll
+++ b/polly/test/ScopInfo/phi_after_error_block.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
declare void @bar()
diff --git a/polly/test/ScopInfo/phi_condition_modeling_1.ll b/polly/test/ScopInfo/phi_condition_modeling_1.ll
index a879c2005ad8..a889ec96a4b1 100644
--- a/polly/test/ScopInfo/phi_condition_modeling_1.ll
+++ b/polly/test/ScopInfo/phi_condition_modeling_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, int c, int N) {
; int tmp;
diff --git a/polly/test/ScopInfo/phi_condition_modeling_2.ll b/polly/test/ScopInfo/phi_condition_modeling_2.ll
index cedc140f8438..b56b77e1f453 100644
--- a/polly/test/ScopInfo/phi_condition_modeling_2.ll
+++ b/polly/test/ScopInfo/phi_condition_modeling_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, int c, int N) {
; int tmp;
diff --git a/polly/test/ScopInfo/phi_conditional_simple_1.ll b/polly/test/ScopInfo/phi_conditional_simple_1.ll
index 90213a953767..14fdc38201bc 100644
--- a/polly/test/ScopInfo/phi_conditional_simple_1.ll
+++ b/polly/test/ScopInfo/phi_conditional_simple_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void jd(int *A, int c) {
; for (int i = 0; i < 1024; i++) {
diff --git a/polly/test/ScopInfo/phi_loop_carried_float.ll b/polly/test/ScopInfo/phi_loop_carried_float.ll
index d8d2608329bc..76e5507f24b0 100644
--- a/polly/test/ScopInfo/phi_loop_carried_float.ll
+++ b/polly/test/ScopInfo/phi_loop_carried_float.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; float f(float *A, int N) {
; float tmp = 0;
diff --git a/polly/test/ScopInfo/phi_not_grouped_at_top.ll b/polly/test/ScopInfo/phi_not_grouped_at_top.ll
index be082165b635..c97d9a27b24b 100644
--- a/polly/test/ScopInfo/phi_not_grouped_at_top.ll
+++ b/polly/test/ScopInfo/phi_not_grouped_at_top.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-prepare -disable-output < %s
+; RUN: opt %loadNPMPolly -passes=polly-prepare -disable-output < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
declare i32 @funa() align 2
diff --git a/polly/test/ScopInfo/phi_scalar_simple_1.ll b/polly/test/ScopInfo/phi_scalar_simple_1.ll
index d042613c023f..ffd1a37f8a79 100644
--- a/polly/test/ScopInfo/phi_scalar_simple_1.ll
+++ b/polly/test/ScopInfo/phi_scalar_simple_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; The assumed context should be empty since the <nsw> flags on the IV
; increments already guarantee that there is no wrap in the loop trip
diff --git a/polly/test/ScopInfo/phi_scalar_simple_2.ll b/polly/test/ScopInfo/phi_scalar_simple_2.ll
index fb4292e05ca6..0d6d9029c61c 100644
--- a/polly/test/ScopInfo/phi_scalar_simple_2.ll
+++ b/polly/test/ScopInfo/phi_scalar_simple_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; int jd(int *restrict A, int x, int N, int c) {
; for (int i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/phi_with_invoke_edge.ll b/polly/test/ScopInfo/phi_with_invoke_edge.ll
index dbcf04c0561a..9c98ec0c603c 100644
--- a/polly/test/ScopInfo/phi_with_invoke_edge.ll
+++ b/polly/test/ScopInfo/phi_with_invoke_edge.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-detect -disable-output < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
declare i32 @generic_personality_v0(i32, i64, ptr, ptr)
diff --git a/polly/test/ScopInfo/pointer-comparison-no-nsw.ll b/polly/test/ScopInfo/pointer-comparison-no-nsw.ll
index 094c5ccab54d..18ba18c69f1f 100644
--- a/polly/test/ScopInfo/pointer-comparison-no-nsw.ll
+++ b/polly/test/ScopInfo/pointer-comparison-no-nsw.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, int *B) {
; while (A != B) {
diff --git a/polly/test/ScopInfo/pointer-comparison.ll b/polly/test/ScopInfo/pointer-comparison.ll
index 15ce0491209a..846640ac630f 100644
--- a/polly/test/ScopInfo/pointer-comparison.ll
+++ b/polly/test/ScopInfo/pointer-comparison.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; TODO: FIXME: Investigate why we need a InvalidContext here.
;
diff --git a/polly/test/ScopInfo/pointer-type-expressions.ll b/polly/test/ScopInfo/pointer-type-expressions.ll
index ebbb644340f6..89dce6536a10 100644
--- a/polly/test/ScopInfo/pointer-type-expressions.ll
+++ b/polly/test/ScopInfo/pointer-type-expressions.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void f(int a[], int N, float *P) {
; int i;
diff --git a/polly/test/ScopInfo/pointer-used-as-base-pointer-and-scalar-read.ll b/polly/test/ScopInfo/pointer-used-as-base-pointer-and-scalar-read.ll
index 3ac86a3443af..7b6d0d542581 100644
--- a/polly/test/ScopInfo/pointer-used-as-base-pointer-and-scalar-read.ll
+++ b/polly/test/ScopInfo/pointer-used-as-base-pointer-and-scalar-read.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; In this test case we pass a pointer %A into a PHI node and also use this
; pointer as base pointer of an array store. As a result, we get both scalar
diff --git a/polly/test/ScopInfo/polly-timeout-parameter-bounds.ll b/polly/test/ScopInfo/polly-timeout-parameter-bounds.ll
index 8152010c2c99..13087a517501 100644
--- a/polly/test/ScopInfo/polly-timeout-parameter-bounds.ll
+++ b/polly/test/ScopInfo/polly-timeout-parameter-bounds.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK: Statements {
; CHECK-NEXT: Stmt_bb9
diff --git a/polly/test/ScopInfo/preserve-equiv-class-order-in-basic_block.ll b/polly/test/ScopInfo/preserve-equiv-class-order-in-basic_block.ll
index 4a68acd3d509..33fa0126aa30 100644
--- a/polly/test/ScopInfo/preserve-equiv-class-order-in-basic_block.ll
+++ b/polly/test/ScopInfo/preserve-equiv-class-order-in-basic_block.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=scalar-indep -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/polly/test/ScopInfo/process_added_dimensions.ll b/polly/test/ScopInfo/process_added_dimensions.ll
index 6cb270a071f4..2d06f4b99597 100644
--- a/polly/test/ScopInfo/process_added_dimensions.ll
+++ b/polly/test/ScopInfo/process_added_dimensions.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK: Context:
; CHECK-NEXT: { : }
diff --git a/polly/test/ScopInfo/pwaff-complexity-bailout.ll b/polly/test/ScopInfo/pwaff-complexity-bailout.ll
index 19dd156d27db..931e08fb8f2f 100644
--- a/polly/test/ScopInfo/pwaff-complexity-bailout.ll
+++ b/polly/test/ScopInfo/pwaff-complexity-bailout.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-scops -pass-remarks-analysis=.* -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -pass-remarks-analysis=.* -disable-output < %s 2>&1 | FileCheck %s
; Make sure we hit the complexity bailout, and don't crash.
; CHECK: Low complexity assumption: { : false }
diff --git a/polly/test/ScopInfo/ranged_parameter.ll b/polly/test/ScopInfo/ranged_parameter.ll
index 4b04960ee845..03562b1fd124 100644
--- a/polly/test/ScopInfo/ranged_parameter.ll
+++ b/polly/test/ScopInfo/ranged_parameter.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that the constraints on the parameter derived from the
; range metadata (see bottom of the file) are present:
diff --git a/polly/test/ScopInfo/ranged_parameter_2.ll b/polly/test/ScopInfo/ranged_parameter_2.ll
index cd7d2bfb84d0..18cbbf3b87cd 100644
--- a/polly/test/ScopInfo/ranged_parameter_2.ll
+++ b/polly/test/ScopInfo/ranged_parameter_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output -polly-allow-nonaffine -polly-invariant-load-hoisting=true < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output -polly-allow-nonaffine -polly-invariant-load-hoisting=true < %s \
; RUN: -debug 2>&1 | FileCheck %s
; REQUIRES: asserts
diff --git a/polly/test/ScopInfo/ranged_parameter_wrap.ll b/polly/test/ScopInfo/ranged_parameter_wrap.ll
index 173746352cf0..d236eeeefc11 100644
--- a/polly/test/ScopInfo/ranged_parameter_wrap.ll
+++ b/polly/test/ScopInfo/ranged_parameter_wrap.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that the constraints on the parameter derived from the
; __wrapping__ range metadata (see bottom of the file) are present:
diff --git a/polly/test/ScopInfo/ranged_parameter_wrap_2.ll b/polly/test/ScopInfo/ranged_parameter_wrap_2.ll
index 33f57f37a1e8..fc0a737a5edb 100644
--- a/polly/test/ScopInfo/ranged_parameter_wrap_2.ll
+++ b/polly/test/ScopInfo/ranged_parameter_wrap_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that the context is built fast and does not explode due to us
; combining a large number of non-convex ranges. Instead, after a certain
diff --git a/polly/test/ScopInfo/read-only-scalar-used-in-phi-2.ll b/polly/test/ScopInfo/read-only-scalar-used-in-phi-2.ll
index 23c7aa261ac0..7e6f2406a0ac 100644
--- a/polly/test/ScopInfo/read-only-scalar-used-in-phi-2.ll
+++ b/polly/test/ScopInfo/read-only-scalar-used-in-phi-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; float foo(float sum, float A[]) {
;
diff --git a/polly/test/ScopInfo/read-only-scalar-used-in-phi.ll b/polly/test/ScopInfo/read-only-scalar-used-in-phi.ll
index 20f44c94251c..18e6c1fac9e1 100644
--- a/polly/test/ScopInfo/read-only-scalar-used-in-phi.ll
+++ b/polly/test/ScopInfo/read-only-scalar-used-in-phi.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; float foo(float sum, float A[]) {
;
diff --git a/polly/test/ScopInfo/read-only-scalars.ll b/polly/test/ScopInfo/read-only-scalars.ll
index 71c2d21e357a..f04163e48028 100644
--- a/polly/test/ScopInfo/read-only-scalars.ll
+++ b/polly/test/ScopInfo/read-only-scalars.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-analyze-read-only-scalars=false -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-analyze-read-only-scalars=true -polly-print-scops -disable-output < %s | FileCheck %s -check-prefix=SCALARS
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-analyze-read-only-scalars=false '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-analyze-read-only-scalars=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=SCALARS
; CHECK-NOT: Memref_scalar
diff --git a/polly/test/ScopInfo/read-only-statements.ll b/polly/test/ScopInfo/read-only-statements.ll
index a93063ea3ad6..7bac53a2b6b5 100644
--- a/polly/test/ScopInfo/read-only-statements.ll
+++ b/polly/test/ScopInfo/read-only-statements.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check we remove read only statements.
;
diff --git a/polly/test/ScopInfo/reduction_alternating_base.ll b/polly/test/ScopInfo/reduction_alternating_base.ll
index 854e28023a3e..e38ff6046ac0 100644
--- a/polly/test/ScopInfo/reduction_alternating_base.ll
+++ b/polly/test/ScopInfo/reduction_alternating_base.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
;
; void f(int *A) {
diff --git a/polly/test/ScopInfo/reduction_chain_partially_outside_the_scop.ll b/polly/test/ScopInfo/reduction_chain_partially_outside_the_scop.ll
index fb0274972082..17f9dc57f282 100644
--- a/polly/test/ScopInfo/reduction_chain_partially_outside_the_scop.ll
+++ b/polly/test/ScopInfo/reduction_chain_partially_outside_the_scop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Reduction Type: NONE
;
diff --git a/polly/test/ScopInfo/reduction_different_index.ll b/polly/test/ScopInfo/reduction_different_index.ll
index 575e5a16d7b2..d2786d5fd677 100644
--- a/polly/test/ScopInfo/reduction_different_index.ll
+++ b/polly/test/ScopInfo/reduction_different_index.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; Verify if the following case is not detected as reduction.
;
; void f(int *A,int *sum) {
diff --git a/polly/test/ScopInfo/reduction_different_index1.ll b/polly/test/ScopInfo/reduction_different_index1.ll
index 39bd3c4b9abe..710ae3e74f21 100644
--- a/polly/test/ScopInfo/reduction_different_index1.ll
+++ b/polly/test/ScopInfo/reduction_different_index1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; Verify if the following case is not detected as reduction.
;
; void f(int *A, int *sum, int i1, int i2) {
diff --git a/polly/test/ScopInfo/reduction_disabled_multiplicative.ll b/polly/test/ScopInfo/reduction_disabled_multiplicative.ll
index 7120740fbf34..61228e075dab 100644
--- a/polly/test/ScopInfo/reduction_disabled_multiplicative.ll
+++ b/polly/test/ScopInfo/reduction_disabled_multiplicative.ll
@@ -1,4 +1,4 @@
-; RUN: opt -basic-aa %loadPolly -polly-stmt-granularity=bb -polly-print-scops -polly-disable-multiplicative-reductions -disable-output < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -polly-disable-multiplicative-reductions -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: ReadAccess := [Reduction Type: +
; CHECK: { Stmt_for_body[i0] -> MemRef_sum[0] };
diff --git a/polly/test/ScopInfo/reduction_escaping_intermediate.ll b/polly/test/ScopInfo/reduction_escaping_intermediate.ll
index dde09108ecc4..c66a8be0852f 100644
--- a/polly/test/ScopInfo/reduction_escaping_intermediate.ll
+++ b/polly/test/ScopInfo/reduction_escaping_intermediate.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int N, int * restrict sums, int * restrict escape) {
; int i, j;
diff --git a/polly/test/ScopInfo/reduction_escaping_intermediate_2.ll b/polly/test/ScopInfo/reduction_escaping_intermediate_2.ll
index 702fc56025d9..c574d315b2fe 100644
--- a/polly/test/ScopInfo/reduction_escaping_intermediate_2.ll
+++ b/polly/test/ScopInfo/reduction_escaping_intermediate_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int N, int * restrict sums, int * restrict escape) {
; int i, j;
diff --git a/polly/test/ScopInfo/reduction_invalid_different_operators.ll b/polly/test/ScopInfo/reduction_invalid_different_operators.ll
index f47919dcad99..9846f1029c08 100644
--- a/polly/test/ScopInfo/reduction_invalid_different_operators.ll
+++ b/polly/test/ScopInfo/reduction_invalid_different_operators.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; int f() {
; int i, sum = 0, sth = 0;
diff --git a/polly/test/ScopInfo/reduction_invalid_overlapping_accesses.ll b/polly/test/ScopInfo/reduction_invalid_overlapping_accesses.ll
index be1d7b5bbbd9..4d70e5330455 100644
--- a/polly/test/ScopInfo/reduction_invalid_overlapping_accesses.ll
+++ b/polly/test/ScopInfo/reduction_invalid_overlapping_accesses.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *sums) {
; int i, j;
diff --git a/polly/test/ScopInfo/reduction_multiple_loops_array_sum.ll b/polly/test/ScopInfo/reduction_multiple_loops_array_sum.ll
index 8d20fa13ffe5..800eb2043dc6 100644
--- a/polly/test/ScopInfo/reduction_multiple_loops_array_sum.ll
+++ b/polly/test/ScopInfo/reduction_multiple_loops_array_sum.ll
@@ -1,4 +1,4 @@
-; RUN: opt -basic-aa %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Stmt_for_body
; CHECK: Reduction Type: *
diff --git a/polly/test/ScopInfo/reduction_multiple_loops_array_sum_1.ll b/polly/test/ScopInfo/reduction_multiple_loops_array_sum_1.ll
index 782332b56aad..49ebdcb04498 100644
--- a/polly/test/ScopInfo/reduction_multiple_loops_array_sum_1.ll
+++ b/polly/test/ScopInfo/reduction_multiple_loops_array_sum_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt -basic-aa %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Stmt_for_body
; CHECK: Reduction Type: NONE
diff --git a/polly/test/ScopInfo/reduction_multiple_simple_binary.ll b/polly/test/ScopInfo/reduction_multiple_simple_binary.ll
index 0f1a3ad90dac..77b71f4df301 100644
--- a/polly/test/ScopInfo/reduction_multiple_simple_binary.ll
+++ b/polly/test/ScopInfo/reduction_multiple_simple_binary.ll
@@ -1,4 +1,4 @@
-; RUN: opt -basic-aa %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: ReadAccess := [Reduction Type: NONE
; CHECK: { Stmt_for_body[i0] -> MemRef_A[1 + i0] };
diff --git a/polly/test/ScopInfo/reduction_non_overlapping_chains.ll b/polly/test/ScopInfo/reduction_non_overlapping_chains.ll
index 4e3f841cd8e1..61aaa051e49d 100644
--- a/polly/test/ScopInfo/reduction_non_overlapping_chains.ll
+++ b/polly/test/ScopInfo/reduction_non_overlapping_chains.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Reduction Type: +
; CHECK: Reduction Type: +
diff --git a/polly/test/ScopInfo/reduction_only_reduction_like_access.ll b/polly/test/ScopInfo/reduction_only_reduction_like_access.ll
index 0c61d63a2d45..fb6d236764b7 100644
--- a/polly/test/ScopInfo/reduction_only_reduction_like_access.ll
+++ b/polly/test/ScopInfo/reduction_only_reduction_like_access.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Reduction Type: +
;
diff --git a/polly/test/ScopInfo/reduction_simple_fp.ll b/polly/test/ScopInfo/reduction_simple_fp.ll
index ba0a034a17e3..aa4cd00f39f5 100644
--- a/polly/test/ScopInfo/reduction_simple_fp.ll
+++ b/polly/test/ScopInfo/reduction_simple_fp.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Function: f_no_fast_math
; CHECK: Reduction Type: NONE
diff --git a/polly/test/ScopInfo/reduction_simple_w_constant.ll b/polly/test/ScopInfo/reduction_simple_w_constant.ll
index dc1f8550602d..e385b66f9db2 100644
--- a/polly/test/ScopInfo/reduction_simple_w_constant.ll
+++ b/polly/test/ScopInfo/reduction_simple_w_constant.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Reduction Type: +
;
diff --git a/polly/test/ScopInfo/reduction_simple_w_iv.ll b/polly/test/ScopInfo/reduction_simple_w_iv.ll
index b6c3229d08d5..e22eccbb2831 100644
--- a/polly/test/ScopInfo/reduction_simple_w_iv.ll
+++ b/polly/test/ScopInfo/reduction_simple_w_iv.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Reduction Type: +
;
diff --git a/polly/test/ScopInfo/reduction_two_identical_reads.ll b/polly/test/ScopInfo/reduction_two_identical_reads.ll
index 19d45a5f4ea9..8f00954f7efc 100644
--- a/polly/test/ScopInfo/reduction_two_identical_reads.ll
+++ b/polly/test/ScopInfo/reduction_two_identical_reads.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Reduction Type: NONE
;
diff --git a/polly/test/ScopInfo/redundant_parameter_constraint.ll b/polly/test/ScopInfo/redundant_parameter_constraint.ll
index c9d912191eed..ad71f1f59e18 100644
--- a/polly/test/ScopInfo/redundant_parameter_constraint.ll
+++ b/polly/test/ScopInfo/redundant_parameter_constraint.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; The constraint that r2 has to be bigger than r1 is implicitly contained in
; the domain, hence we do not want to see it explicitly.
diff --git a/polly/test/ScopInfo/region-with-instructions.ll b/polly/test/ScopInfo/region-with-instructions.ll
index 39d4a72a7814..d4720511b7aa 100644
--- a/polly/test/ScopInfo/region-with-instructions.ll
+++ b/polly/test/ScopInfo/region-with-instructions.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -polly-print-instructions -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -polly-print-instructions -disable-output < %s 2>&1 | FileCheck %s
; CHECK: Statements {
; CHECK: Stmt_bb46
diff --git a/polly/test/ScopInfo/remarks.ll b/polly/test/ScopInfo/remarks.ll
index dcdeb58c7694..2c173a31c46e 100644
--- a/polly/test/ScopInfo/remarks.ll
+++ b/polly/test/ScopInfo/remarks.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-scops \
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' \
; RUN: -polly-invariant-load-hoisting=true -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: remark: test/ScopInfo/remarks.c:4:7: SCoP begins here.
diff --git a/polly/test/ScopInfo/required-invariant-loop-bounds.ll b/polly/test/ScopInfo/required-invariant-loop-bounds.ll
index 248acbea6e68..abf0b0e23855 100644
--- a/polly/test/ScopInfo/required-invariant-loop-bounds.ll
+++ b/polly/test/ScopInfo/required-invariant-loop-bounds.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output \
-; RUN: -polly-invariant-load-hoisting=true < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output \
+; RUN: -polly-invariant-load-hoisting=true < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses: {
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
diff --git a/polly/test/ScopInfo/restriction_in_dead_block.ll b/polly/test/ScopInfo/restriction_in_dead_block.ll
index 81d9b96be419..487c585cb9d9 100644
--- a/polly/test/ScopInfo/restriction_in_dead_block.ll
+++ b/polly/test/ScopInfo/restriction_in_dead_block.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify we do not generate an empty invalid context only because the wrap
; in the second conditional will always happen if the block is executed.
diff --git a/polly/test/ScopInfo/run-time-check-many-array-disjuncts.ll b/polly/test/ScopInfo/run-time-check-many-array-disjuncts.ll
index d36da2b2becf..702b7dc5e004 100644
--- a/polly/test/ScopInfo/run-time-check-many-array-disjuncts.ll
+++ b/polly/test/ScopInfo/run-time-check-many-array-disjuncts.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 \
; RUN: | FileCheck %s -check-prefix=DETECT
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; DETECT: Valid Region for Scop: bb124 => bb176
;
diff --git a/polly/test/ScopInfo/run-time-check-many-parameters.ll b/polly/test/ScopInfo/run-time-check-many-parameters.ll
index 30f8d5fff34c..559c38d2682e 100644
--- a/polly/test/ScopInfo/run-time-check-many-parameters.ll
+++ b/polly/test/ScopInfo/run-time-check-many-parameters.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; A valid Scop would print the list of it's statements, we check that we do not
; see that list.
diff --git a/polly/test/ScopInfo/run-time-check-many-piecewise-aliasing.ll b/polly/test/ScopInfo/run-time-check-many-piecewise-aliasing.ll
index 487c803bba98..3cf4c40bdb60 100644
--- a/polly/test/ScopInfo/run-time-check-many-piecewise-aliasing.ll
+++ b/polly/test/ScopInfo/run-time-check-many-piecewise-aliasing.ll
@@ -1,6 +1,6 @@
-; RUN: opt %loadPolly -polly-print-detect -disable-output < %s \
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>' -disable-output < %s 2>&1 \
; RUN: | FileCheck %s -check-prefix=DETECT
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; DETECT: Valid Region for Scop: for => return
;
diff --git a/polly/test/ScopInfo/run-time-check-read-only-arrays.ll b/polly/test/ScopInfo/run-time-check-read-only-arrays.ll
index d590aaf00ddb..51ab81476d54 100644
--- a/polly/test/ScopInfo/run-time-check-read-only-arrays.ll
+++ b/polly/test/ScopInfo/run-time-check-read-only-arrays.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void foo(float *A, float *B, float *C, long N) {
; for (long i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/same-base-address-scalar-and-array.ll b/polly/test/ScopInfo/same-base-address-scalar-and-array.ll
index a5f353e7ad2a..dd809ba156c7 100644
--- a/polly/test/ScopInfo/same-base-address-scalar-and-array.ll
+++ b/polly/test/ScopInfo/same-base-address-scalar-and-array.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify we introduce two ScopArrayInfo objects (or virtual arrays) for the %out variable
; as it is used as a memory base pointer (%0) but also as a scalar (%out.addr.0.lcssa).
diff --git a/polly/test/ScopInfo/scalar.ll b/polly/test/ScopInfo/scalar.ll
index c38eaa853b9b..812d2fddc3c8 100644
--- a/polly/test/ScopInfo/scalar.ll
+++ b/polly/test/ScopInfo/scalar.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
diff --git a/polly/test/ScopInfo/scalar_dependence_cond_br.ll b/polly/test/ScopInfo/scalar_dependence_cond_br.ll
index 3303bfb7c6c5..59549f3dbbad 100644
--- a/polly/test/ScopInfo/scalar_dependence_cond_br.ll
+++ b/polly/test/ScopInfo/scalar_dependence_cond_br.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output< %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output< %s 2>&1 | FileCheck %s
;
; void f(int *A, int c, int d) {
; for (int i = 0; i < 1024; i++)
diff --git a/polly/test/ScopInfo/scalar_to_array.ll b/polly/test/ScopInfo/scalar_to_array.ll
index 5c275108602a..d64f1696c30b 100644
--- a/polly/test/ScopInfo/scalar_to_array.ll
+++ b/polly/test/ScopInfo/scalar_to_array.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -basic-aa -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -basic-aa -polly-print-function-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; ModuleID = 'scalar_to_array.ll'
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/polly/test/ScopInfo/scev-div-with-evaluatable-divisor.ll b/polly/test/ScopInfo/scev-div-with-evaluatable-divisor.ll
index fc7a1bfc3d5e..d14569cf0c5d 100644
--- a/polly/test/ScopInfo/scev-div-with-evaluatable-divisor.ll
+++ b/polly/test/ScopInfo/scev-div-with-evaluatable-divisor.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; Derived from test-suite/SingleSource/UnitTests/Vector/SSE/sse.stepfft.c
diff --git a/polly/test/ScopInfo/scev-invalidated.ll b/polly/test/ScopInfo/scev-invalidated.ll
index 97fc5ec3d4ca..6b9efd4b37c7 100644
--- a/polly/test/ScopInfo/scev-invalidated.ll
+++ b/polly/test/ScopInfo/scev-invalidated.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Region: %if.then6---%return
;
diff --git a/polly/test/ScopInfo/schedule-const-post-dominator-walk-2.ll b/polly/test/ScopInfo/schedule-const-post-dominator-walk-2.ll
index 2fdf7d66c3ad..6e2ed1240b07 100644
--- a/polly/test/ScopInfo/schedule-const-post-dominator-walk-2.ll
+++ b/polly/test/ScopInfo/schedule-const-post-dominator-walk-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; The SCoP contains a loop with multiple exit blocks (BBs after leaving
; the loop). The current implementation of deriving their domain derives
diff --git a/polly/test/ScopInfo/schedule-const-post-dominator-walk.ll b/polly/test/ScopInfo/schedule-const-post-dominator-walk.ll
index 92685858610c..d0e8a2accaa2 100644
--- a/polly/test/ScopInfo/schedule-const-post-dominator-walk.ll
+++ b/polly/test/ScopInfo/schedule-const-post-dominator-walk.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; The SCoP contains a loop with multiple exit blocks (BBs after leaving
; the loop). The current implementation of deriving their domain derives
diff --git a/polly/test/ScopInfo/schedule-constuction-endless-loop1.ll b/polly/test/ScopInfo/schedule-constuction-endless-loop1.ll
index 413d1d8ec556..9ffc30f7360e 100644
--- a/polly/test/ScopInfo/schedule-constuction-endless-loop1.ll
+++ b/polly/test/ScopInfo/schedule-constuction-endless-loop1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that we do not build a SCoP and do not crash.
;
diff --git a/polly/test/ScopInfo/schedule-constuction-endless-loop2.ll b/polly/test/ScopInfo/schedule-constuction-endless-loop2.ll
index be254477286f..65f2f99b48c1 100644
--- a/polly/test/ScopInfo/schedule-constuction-endless-loop2.ll
+++ b/polly/test/ScopInfo/schedule-constuction-endless-loop2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Check that we do not build a SCoP and do not crash.
;
diff --git a/polly/test/ScopInfo/schedule-incorrectly-contructed-in-case-of-infinite-loop.ll b/polly/test/ScopInfo/schedule-incorrectly-contructed-in-case-of-infinite-loop.ll
index ff339e03fb5a..7c36f8d7f72e 100644
--- a/polly/test/ScopInfo/schedule-incorrectly-contructed-in-case-of-infinite-loop.ll
+++ b/polly/test/ScopInfo/schedule-incorrectly-contructed-in-case-of-infinite-loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-process-unprofitable -polly-scops -disable-output < %s
+; RUN: opt %loadNPMPolly -polly-process-unprofitable '-passes=print<polly-function-scops>' -disable-output < %s
;
; This test contains a infinite loop (bb13) and crashed the domain generation
; at some point. Just verify it does not anymore.
diff --git a/polly/test/ScopInfo/scop-affine-parameter-ordering.ll b/polly/test/ScopInfo/scop-affine-parameter-ordering.ll
index 24c028a6764a..c8a234e9cbce 100644
--- a/polly/test/ScopInfo/scop-affine-parameter-ordering.ll
+++ b/polly/test/ScopInfo/scop-affine-parameter-ordering.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-i128:128-n8:16:32:64-S128"
target triple = "aarch64--linux-android"
diff --git a/polly/test/ScopInfo/sign_wrapped_set.ll b/polly/test/ScopInfo/sign_wrapped_set.ll
index 23c9c8a3b84d..93b63df1c584 100644
--- a/polly/test/ScopInfo/sign_wrapped_set.ll
+++ b/polly/test/ScopInfo/sign_wrapped_set.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-process-unprofitable -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine -polly-process-unprofitable '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Domain :=
; CHECK-NEXT: [srcHeight] -> { Stmt_for_cond6_preheader_us[i0] : 0 <= i0 <= -3 + srcHeight };
diff --git a/polly/test/ScopInfo/simple_loop_1.ll b/polly/test/ScopInfo/simple_loop_1.ll
index 2c3481facc02..e736f3382d90 100644
--- a/polly/test/ScopInfo/simple_loop_1.ll
+++ b/polly/test/ScopInfo/simple_loop_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void f(int a[], int N) {
; int i;
diff --git a/polly/test/ScopInfo/simple_loop_2.ll b/polly/test/ScopInfo/simple_loop_2.ll
index 2f580094a147..ae83dd633b96 100644
--- a/polly/test/ScopInfo/simple_loop_2.ll
+++ b/polly/test/ScopInfo/simple_loop_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void f(int a[], int N) {
; int i;
diff --git a/polly/test/ScopInfo/simple_loop_unsigned.ll b/polly/test/ScopInfo/simple_loop_unsigned.ll
index 12903d9c1580..c4a96e4381c9 100644
--- a/polly/test/ScopInfo/simple_loop_unsigned.ll
+++ b/polly/test/ScopInfo/simple_loop_unsigned.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void f(int a[], unsigned N) {
; unsigned i;
diff --git a/polly/test/ScopInfo/simple_loop_unsigned_2.ll b/polly/test/ScopInfo/simple_loop_unsigned_2.ll
index 1379180a6dd9..37e907dc006f 100644
--- a/polly/test/ScopInfo/simple_loop_unsigned_2.ll
+++ b/polly/test/ScopInfo/simple_loop_unsigned_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK: Assumed Context:
; CHECK-NEXT: [N] -> { : }
diff --git a/polly/test/ScopInfo/simple_loop_unsigned_3.ll b/polly/test/ScopInfo/simple_loop_unsigned_3.ll
index 7783c4681e1f..7f2cf5caa1ce 100644
--- a/polly/test/ScopInfo/simple_loop_unsigned_3.ll
+++ b/polly/test/ScopInfo/simple_loop_unsigned_3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK: Assumed Context:
; CHECK-NEXT: [N] -> { : }
diff --git a/polly/test/ScopInfo/simple_nonaffine_loop_not.ll b/polly/test/ScopInfo/simple_nonaffine_loop_not.ll
index 42eff85d8c9b..4df0d343b0fc 100644
--- a/polly/test/ScopInfo/simple_nonaffine_loop_not.ll
+++ b/polly/test/ScopInfo/simple_nonaffine_loop_not.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | not FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | not FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@.str = private unnamed_addr constant [17 x i8] c"Random Value: %d\00", align 1
diff --git a/polly/test/ScopInfo/smax.ll b/polly/test/ScopInfo/smax.ll
index b938e4e412da..8968e1319247 100644
--- a/polly/test/ScopInfo/smax.ll
+++ b/polly/test/ScopInfo/smax.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:32-n32-S64"
define void @foo(ptr noalias %data, ptr noalias %ptr, i32 %x_pos, i32 %w) {
diff --git a/polly/test/ScopInfo/statistics.ll b/polly/test/ScopInfo/statistics.ll
index 3797b7d71df9..0a294f2016eb 100644
--- a/polly/test/ScopInfo/statistics.ll
+++ b/polly/test/ScopInfo/statistics.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-scops -stats -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -stats -disable-output < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
; CHECK-DAG: 4 polly-scops - Maximal number of loops in scops
diff --git a/polly/test/ScopInfo/stmt_split_exit_of_region_stmt.ll b/polly/test/ScopInfo/stmt_split_exit_of_region_stmt.ll
index d86d2418cf9b..a46acb090b7f 100644
--- a/polly/test/ScopInfo/stmt_split_exit_of_region_stmt.ll
+++ b/polly/test/ScopInfo/stmt_split_exit_of_region_stmt.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Statements {
; CHECK-NEXT: Stmt_Region__TO__Stmt
diff --git a/polly/test/ScopInfo/stmt_split_no_after_split.ll b/polly/test/ScopInfo/stmt_split_no_after_split.ll
index f8339bd8ae94..3a5ebf0725b1 100644
--- a/polly/test/ScopInfo/stmt_split_no_after_split.ll
+++ b/polly/test/ScopInfo/stmt_split_no_after_split.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Statements {
; CHECK-NEXT: Stmt_Stmt
diff --git a/polly/test/ScopInfo/stmt_split_no_dependence.ll b/polly/test/ScopInfo/stmt_split_no_dependence.ll
index 7ad48f499792..9edd0f0a13e5 100644
--- a/polly/test/ScopInfo/stmt_split_no_dependence.ll
+++ b/polly/test/ScopInfo/stmt_split_no_dependence.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void func(int *A, int *B){
; for (int i = 0; i < 1024; i+=1) {
diff --git a/polly/test/ScopInfo/stmt_split_on_store.ll b/polly/test/ScopInfo/stmt_split_on_store.ll
index 6af3dc8633dd..d645becb1958 100644
--- a/polly/test/ScopInfo/stmt_split_on_store.ll
+++ b/polly/test/ScopInfo/stmt_split_on_store.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=store -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=store -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void func(int *A, int *B){
; for (int i = 0; i < 1024; i+=1) {
diff --git a/polly/test/ScopInfo/stmt_split_on_synthesizable.ll b/polly/test/ScopInfo/stmt_split_on_synthesizable.ll
index 92855cfd0124..1a1ccff4f02d 100644
--- a/polly/test/ScopInfo/stmt_split_on_synthesizable.ll
+++ b/polly/test/ScopInfo/stmt_split_on_synthesizable.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Statements {
; CHECK-NEXT: Stmt_Stmt
diff --git a/polly/test/ScopInfo/stmt_split_phi_in_beginning_bb.ll b/polly/test/ScopInfo/stmt_split_phi_in_beginning_bb.ll
index ee6afa4638d2..594b36279d6b 100644
--- a/polly/test/ScopInfo/stmt_split_phi_in_beginning_bb.ll
+++ b/polly/test/ScopInfo/stmt_split_phi_in_beginning_bb.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Statements {
; CHECK-NEXT: Stmt_Stmt
diff --git a/polly/test/ScopInfo/stmt_split_phi_in_stmt.ll b/polly/test/ScopInfo/stmt_split_phi_in_stmt.ll
index 0a5f41d637e7..6c9f1c2cb5fd 100644
--- a/polly/test/ScopInfo/stmt_split_phi_in_stmt.ll
+++ b/polly/test/ScopInfo/stmt_split_phi_in_stmt.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Statements {
; CHECK-NEXT: Stmt_Stmt
diff --git a/polly/test/ScopInfo/stmt_split_scalar_dependence.ll b/polly/test/ScopInfo/stmt_split_scalar_dependence.ll
index 5b02d1b5d08a..07abe46ac039 100644
--- a/polly/test/ScopInfo/stmt_split_scalar_dependence.ll
+++ b/polly/test/ScopInfo/stmt_split_scalar_dependence.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Statements {
; CHECK-NEXT: Stmt_Stmt
diff --git a/polly/test/ScopInfo/stmt_split_within_loop.ll b/polly/test/ScopInfo/stmt_split_within_loop.ll
index 3ed9bbbeaccb..9a42ae3a3727 100644
--- a/polly/test/ScopInfo/stmt_split_within_loop.ll
+++ b/polly/test/ScopInfo/stmt_split_within_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-instructions -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-print-instructions '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Statements {
; CHECK-NEXT: Stmt_Stmt
diff --git a/polly/test/ScopInfo/stmt_with_read_but_without_sideffect.ll b/polly/test/ScopInfo/stmt_with_read_but_without_sideffect.ll
index 73fc543a66e8..ba4801d9a000 100644
--- a/polly/test/ScopInfo/stmt_with_read_but_without_sideffect.ll
+++ b/polly/test/ScopInfo/stmt_with_read_but_without_sideffect.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-delicm -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-delicm>' -disable-output < %s 2>&1 | FileCheck %s
;
; The statement Stmt_for_if_else_1 should be removed because it has no
; sideeffects. But it has a use of MemRef_tmp21 that must also be
diff --git a/polly/test/ScopInfo/switch-1.ll b/polly/test/ScopInfo/switch-1.ll
index 0ea40a7ed251..0c3610185e6e 100644
--- a/polly/test/ScopInfo/switch-1.ll
+++ b/polly/test/ScopInfo/switch-1.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=AST
;
; void f(int *A, int N) {
; for (int i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/switch-2.ll b/polly/test/ScopInfo/switch-2.ll
index 7956058c9de6..f0056da37955 100644
--- a/polly/test/ScopInfo/switch-2.ll
+++ b/polly/test/ScopInfo/switch-2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=AST
;
; void f(int *A, int N) {
; for (int i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/switch-3.ll b/polly/test/ScopInfo/switch-3.ll
index aa7ada4edbb8..a1810bf6ef53 100644
--- a/polly/test/ScopInfo/switch-3.ll
+++ b/polly/test/ScopInfo/switch-3.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=AST
;
; void f(int *A, int N) {
; for (int i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/switch-4.ll b/polly/test/ScopInfo/switch-4.ll
index 6aeb7197e382..00665fd75cbc 100644
--- a/polly/test/ScopInfo/switch-4.ll
+++ b/polly/test/ScopInfo/switch-4.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=AST
;
; void f(int *A, int N) {
; for (int i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/switch-5.ll b/polly/test/ScopInfo/switch-5.ll
index 24cc92a0933d..2de369564940 100644
--- a/polly/test/ScopInfo/switch-5.ll
+++ b/polly/test/ScopInfo/switch-5.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=AST
;
; The SCoP contains a loop with multiple exit blocks (BBs after leaving
; the loop). The current implementation of deriving their domain derives
diff --git a/polly/test/ScopInfo/switch-6.ll b/polly/test/ScopInfo/switch-6.ll
index efb3df504d23..b859840ee111 100644
--- a/polly/test/ScopInfo/switch-6.ll
+++ b/polly/test/ScopInfo/switch-6.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=AST
;
; void f(int *A, int N) {
; for (int i = 0; i < N; i++) {
diff --git a/polly/test/ScopInfo/switch-7.ll b/polly/test/ScopInfo/switch-7.ll
index 2f0d034e84fe..f73d97f70b28 100644
--- a/polly/test/ScopInfo/switch-7.ll
+++ b/polly/test/ScopInfo/switch-7.ll
@@ -1,6 +1,5 @@
-
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-ast -disable-output < %s | FileCheck %s --check-prefix=AST
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-ast>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=AST
;
; void f(int *A, int c, int N) {
; switch (c) {
diff --git a/polly/test/ScopInfo/tempscop-printing.ll b/polly/test/ScopInfo/tempscop-printing.ll
index 80c675d4c3d3..4f02176569b7 100644
--- a/polly/test/ScopInfo/tempscop-printing.ll
+++ b/polly/test/ScopInfo/tempscop-printing.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -basic-aa -polly-invariant-load-hoisting=true -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -aa-pipeline=basic-aa -polly-invariant-load-hoisting=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void f(long A[], int N, int *init_ptr) {
; long i, j;
diff --git a/polly/test/ScopInfo/test-wrapping-in-condition.ll b/polly/test/ScopInfo/test-wrapping-in-condition.ll
index 3ff978f7265e..746350422d6b 100644
--- a/polly/test/ScopInfo/test-wrapping-in-condition.ll
+++ b/polly/test/ScopInfo/test-wrapping-in-condition.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-print-function-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invalid Context:
; CHECK: [N] -> { : N >= 129 }
diff --git a/polly/test/ScopInfo/truncate-1.ll b/polly/test/ScopInfo/truncate-1.ll
index 5c5fac150b4b..44222c88dfa7 100644
--- a/polly/test/ScopInfo/truncate-1.ll
+++ b/polly/test/ScopInfo/truncate-1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(char *A, short N) {
; for (char i = 0; i < (char)N; i++)
diff --git a/polly/test/ScopInfo/truncate-2.ll b/polly/test/ScopInfo/truncate-2.ll
index e6c5f2cb32d0..c78a5337fdeb 100644
--- a/polly/test/ScopInfo/truncate-2.ll
+++ b/polly/test/ScopInfo/truncate-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(char *A, short N) {
; for (short i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/truncate-3.ll b/polly/test/ScopInfo/truncate-3.ll
index dd0fe489e990..5a80a873cd47 100644
--- a/polly/test/ScopInfo/truncate-3.ll
+++ b/polly/test/ScopInfo/truncate-3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-scops -pass-remarks-analysis="polly-scops" \
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -pass-remarks-analysis="polly-scops" \
; RUN: -disable-output < %s 2>&1 | FileCheck %s
; CHECK: Signed-unsigned restriction: [p] -> { : p <= -129 or p >= 128 }
diff --git a/polly/test/ScopInfo/two-loops-one-infinite.ll b/polly/test/ScopInfo/two-loops-one-infinite.ll
index 71f72383b048..e2723a8a9a2e 100644
--- a/polly/test/ScopInfo/two-loops-one-infinite.ll
+++ b/polly/test/ScopInfo/two-loops-one-infinite.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Verify we do not create a SCoP in the presence of infinite loops.
;
diff --git a/polly/test/ScopInfo/two-loops-right-after-each-other.ll b/polly/test/ScopInfo/two-loops-right-after-each-other.ll
index dd457c31afdd..51f3c2d6eb87 100644
--- a/polly/test/ScopInfo/two-loops-right-after-each-other.ll
+++ b/polly/test/ScopInfo/two-loops-right-after-each-other.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; CHECK: Statements {
; CHECK-NEXT: Stmt_loop_1
diff --git a/polly/test/ScopInfo/undef_in_cond.ll b/polly/test/ScopInfo/undef_in_cond.ll
index 5282a853c17a..ef117612f6cb 100644
--- a/polly/test/ScopInfo/undef_in_cond.ll
+++ b/polly/test/ScopInfo/undef_in_cond.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define fastcc void @fix_operands() nounwind {
diff --git a/polly/test/ScopInfo/unnamed_nonaffine.ll b/polly/test/ScopInfo/unnamed_nonaffine.ll
index bf32cc7806f4..5b9f98059177 100644
--- a/polly/test/ScopInfo/unnamed_nonaffine.ll
+++ b/polly/test/ScopInfo/unnamed_nonaffine.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-use-llvm-names=true -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-allow-nonaffine -polly-use-llvm-names=false -polly-print-scops -disable-output < %s | FileCheck %s -check-prefix=UNNAMED
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine -polly-use-llvm-names=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-allow-nonaffine -polly-use-llvm-names=false '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -check-prefix=UNNAMED
;
; void f(int *A, int b) {
; int x;
diff --git a/polly/test/ScopInfo/unnamed_stmts.ll b/polly/test/ScopInfo/unnamed_stmts.ll
index 686c0f87d9cf..5a189454471f 100644
--- a/polly/test/ScopInfo/unnamed_stmts.ll
+++ b/polly/test/ScopInfo/unnamed_stmts.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; This test case verifies that we generate numbered statement names in case
; no LLVM-IR names are used in the test case. We also verify, that we
diff --git a/polly/test/ScopInfo/unpredictable_nonscop_loop.ll b/polly/test/ScopInfo/unpredictable_nonscop_loop.ll
index 0656b77e3409..daa1f8c78387 100644
--- a/polly/test/ScopInfo/unpredictable_nonscop_loop.ll
+++ b/polly/test/ScopInfo/unpredictable_nonscop_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s -match-full-lines
; Derived from test-suite/MultiSource/Applications/sgefa/blas.c
;
; The exit value of %i.0320 in land.rhs is not computable.
diff --git a/polly/test/ScopInfo/unprofitable_scalar-accs.ll b/polly/test/ScopInfo/unprofitable_scalar-accs.ll
index 9703587091a7..ca8daa4de01a 100644
--- a/polly/test/ScopInfo/unprofitable_scalar-accs.ll
+++ b/polly/test/ScopInfo/unprofitable_scalar-accs.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-process-unprofitable=false -polly-unprofitable-scalar-accs=false -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-process-unprofitable=false -polly-unprofitable-scalar-accs=true -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=HEURISTIC
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-process-unprofitable=false -polly-unprofitable-scalar-accs=false '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb -polly-process-unprofitable=false -polly-unprofitable-scalar-accs=true '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=HEURISTIC
; Check the effect of -polly-unprofitable-scalar-accs
diff --git a/polly/test/ScopInfo/unsigned-condition.ll b/polly/test/ScopInfo/unsigned-condition.ll
index 35673d1b6a36..0529ded1f6cf 100644
--- a/polly/test/ScopInfo/unsigned-condition.ll
+++ b/polly/test/ScopInfo/unsigned-condition.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void f(int a[], int N, unsigned P) {
; int i;
diff --git a/polly/test/ScopInfo/unsigned-division-1.ll b/polly/test/ScopInfo/unsigned-division-1.ll
index 8c65062bd941..1c06b55300b6 100644
--- a/polly/test/ScopInfo/unsigned-division-1.ll
+++ b/polly/test/ScopInfo/unsigned-division-1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, unsigned N) {
; for (unsigned i = 0; i < N / 2; i++)
diff --git a/polly/test/ScopInfo/unsigned-division-2.ll b/polly/test/ScopInfo/unsigned-division-2.ll
index bf4ebce9099a..153639c42b38 100644
--- a/polly/test/ScopInfo/unsigned-division-2.ll
+++ b/polly/test/ScopInfo/unsigned-division-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, unsigned N) {
; for (unsigned i = 0; i < N / 2 + 3; i++)
diff --git a/polly/test/ScopInfo/unsigned-division-3.ll b/polly/test/ScopInfo/unsigned-division-3.ll
index 47ba1f2ef09d..34561fc4645c 100644
--- a/polly/test/ScopInfo/unsigned-division-3.ll
+++ b/polly/test/ScopInfo/unsigned-division-3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, unsigned char N) {
; for (unsigned i = 0; i <= N / -128; i++)
diff --git a/polly/test/ScopInfo/unsigned-division-4.ll b/polly/test/ScopInfo/unsigned-division-4.ll
index edcd8a18a854..be539b47123b 100644
--- a/polly/test/ScopInfo/unsigned-division-4.ll
+++ b/polly/test/ScopInfo/unsigned-division-4.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, unsigned char N) {
; for (unsigned i = 0; i < (N / -128) + 3; i++)
diff --git a/polly/test/ScopInfo/unsigned-division-5.ll b/polly/test/ScopInfo/unsigned-division-5.ll
index f9a3d39288a9..61716ecec0d9 100644
--- a/polly/test/ScopInfo/unsigned-division-5.ll
+++ b/polly/test/ScopInfo/unsigned-division-5.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-invariant-load-hoisting=true -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-invariant-load-hoisting=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, unsigned N) {
; for (unsigned i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/unsigned_wrap_uge.ll b/polly/test/ScopInfo/unsigned_wrap_uge.ll
index 89c50ee3764b..d25a9576e863 100644
--- a/polly/test/ScopInfo/unsigned_wrap_uge.ll
+++ b/polly/test/ScopInfo/unsigned_wrap_uge.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Unsigned wrap-around check.
;
diff --git a/polly/test/ScopInfo/unsigned_wrap_ugt.ll b/polly/test/ScopInfo/unsigned_wrap_ugt.ll
index 3249123c9918..0310fdde6d26 100644
--- a/polly/test/ScopInfo/unsigned_wrap_ugt.ll
+++ b/polly/test/ScopInfo/unsigned_wrap_ugt.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Unsigned wrap-around check.
;
diff --git a/polly/test/ScopInfo/unsigned_wrap_ule.ll b/polly/test/ScopInfo/unsigned_wrap_ule.ll
index 3c6ea18b439c..47bfc6065b1a 100644
--- a/polly/test/ScopInfo/unsigned_wrap_ule.ll
+++ b/polly/test/ScopInfo/unsigned_wrap_ule.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Unsigned wrap-around check.
;
diff --git a/polly/test/ScopInfo/unsigned_wrap_ult.ll b/polly/test/ScopInfo/unsigned_wrap_ult.ll
index 5d859f85d52b..1b73c0d6dd7e 100644
--- a/polly/test/ScopInfo/unsigned_wrap_ult.ll
+++ b/polly/test/ScopInfo/unsigned_wrap_ult.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; Unsigned wrap-around check.
;
diff --git a/polly/test/ScopInfo/user_context.ll b/polly/test/ScopInfo/user_context.ll
index 46232cd59c03..74088120e401 100644
--- a/polly/test/ScopInfo/user_context.ll
+++ b/polly/test/ScopInfo/user_context.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-context='[N] -> {: N = 1024}' -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=CTX
-; RUN: opt %loadPolly -polly-context='[N,M] -> {: 1 = 0}' -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-context='[] -> {: 1 = 0}' -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-context='[N] -> {: N = 1024}' '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=CTX
+; RUN: opt %loadNPMPolly -polly-context='[N,M] -> {: 1 = 0}' '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-context='[] -> {: 1 = 0}' '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
; void f(int a[], int N) {
; int i;
diff --git a/polly/test/ScopInfo/user_provided_assumptions-in-bb-signed-conditional.ll b/polly/test/ScopInfo/user_provided_assumptions-in-bb-signed-conditional.ll
index 4bd02c96a3d2..bd13ba8bb696 100644
--- a/polly/test/ScopInfo/user_provided_assumptions-in-bb-signed-conditional.ll
+++ b/polly/test/ScopInfo/user_provided_assumptions-in-bb-signed-conditional.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-scops -disable-output < %s 2>&1 | FileCheck %s --check-prefix=REMARK
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=REMARK
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; REMARK: remark: <unknown>:0:0: Use user assumption: [n, b] -> { : n <= 100 or (b = 0 and n >= 101) }
;
diff --git a/polly/test/ScopInfo/user_provided_assumptions-in-bb-signed.ll b/polly/test/ScopInfo/user_provided_assumptions-in-bb-signed.ll
index 262bd1349a69..45f59170942e 100644
--- a/polly/test/ScopInfo/user_provided_assumptions-in-bb-signed.ll
+++ b/polly/test/ScopInfo/user_provided_assumptions-in-bb-signed.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Context:
; CHECK-NEXT: [n] -> { : -9223372036854775808 <= n <= 100 }
diff --git a/polly/test/ScopInfo/user_provided_assumptions-in-bb-unsigned.ll b/polly/test/ScopInfo/user_provided_assumptions-in-bb-unsigned.ll
index 4a10fcff929a..fb71c75aa75e 100644
--- a/polly/test/ScopInfo/user_provided_assumptions-in-bb-unsigned.ll
+++ b/polly/test/ScopInfo/user_provided_assumptions-in-bb-unsigned.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-scops -disable-output < %s 2>&1 | FileCheck %s --check-prefix=REMARK
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=REMARK
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; REMARK: remark: <unknown>:0:0: SCoP begins here.
; REMARK-NEXT: remark: <unknown>:0:0: Use user assumption: [n] -> { : n <= 100 }
diff --git a/polly/test/ScopInfo/user_provided_assumptions.ll b/polly/test/ScopInfo/user_provided_assumptions.ll
index 6640e4a65e36..49b23b1e784d 100644
--- a/polly/test/ScopInfo/user_provided_assumptions.ll
+++ b/polly/test/ScopInfo/user_provided_assumptions.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-scops -disable-output < %s 2>&1 | FileCheck %s
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=SCOP
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=SCOP
;
; CHECK: remark: <unknown>:0:0: SCoP begins here.
; CHECK-NEXT: remark: <unknown>:0:0: Use user assumption: [M, N] -> { : N <= 2147483647 - M }
diff --git a/polly/test/ScopInfo/user_provided_assumptions_2.ll b/polly/test/ScopInfo/user_provided_assumptions_2.ll
index 994cd6f15103..f8643b68cc63 100644
--- a/polly/test/ScopInfo/user_provided_assumptions_2.ll
+++ b/polly/test/ScopInfo/user_provided_assumptions_2.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-scops -disable-output < %s 2>&1 | FileCheck %s
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=SCOP
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=SCOP
;
; CHECK: remark: <unknown>:0:0: SCoP begins here.
; CHECK-NEXT: remark: <unknown>:0:0: Use user assumption: { : }
diff --git a/polly/test/ScopInfo/user_provided_assumptions_3.ll b/polly/test/ScopInfo/user_provided_assumptions_3.ll
index 2fcde8bd1826..70f8f359e16c 100644
--- a/polly/test/ScopInfo/user_provided_assumptions_3.ll
+++ b/polly/test/ScopInfo/user_provided_assumptions_3.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-scops -disable-output < %s 2>&1 | FileCheck %s
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s --check-prefix=SCOP
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s --check-prefix=SCOP
;
; CHECK: remark: <unknown>:0:0: SCoP begins here.
; CHECK-NEXT: remark: <unknown>:0:0: Use user assumption: [N] -> { : N >= 2 }
diff --git a/polly/test/ScopInfo/user_provided_non_dominating_assumptions.ll b/polly/test/ScopInfo/user_provided_non_dominating_assumptions.ll
index 1eb3c15810e4..3e7883db48fc 100644
--- a/polly/test/ScopInfo/user_provided_non_dominating_assumptions.ll
+++ b/polly/test/ScopInfo/user_provided_non_dominating_assumptions.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-scops \
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' \
; RUN: -polly-precise-inbounds -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: remark: <unknown>:0:0: SCoP begins here.
@@ -18,7 +18,7 @@
;
-; RUN: opt %loadPolly -pass-remarks-analysis="polly-scops" -polly-scops \
+; RUN: opt %loadNPMPolly -pass-remarks-analysis="polly-scops" '-passes=print<polly-function-scops>' \
; RUN: -polly-precise-inbounds -disable-output < %s 2>&1 -pass-remarks-output=%t.yaml
; RUN: cat %t.yaml | FileCheck -check-prefix=YAML %s
; YAML: --- !Analysis
diff --git a/polly/test/ScopInfo/variant_base_pointer.ll b/polly/test/ScopInfo/variant_base_pointer.ll
index 321657c87e79..32cb114fab05 100644
--- a/polly/test/ScopInfo/variant_base_pointer.ll
+++ b/polly/test/ScopInfo/variant_base_pointer.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-ignore-aliasing -polly-invariant-load-hoisting=true -polly-print-scops -disable-output < %s | FileCheck %s
-; RUN: opt %loadPolly -polly-ignore-aliasing -polly-invariant-load-hoisting=true -polly-codegen -disable-output < %s
+; RUN: opt %loadNPMPolly -polly-ignore-aliasing -polly-invariant-load-hoisting=true '-passes=print<polly-detect>,print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-ignore-aliasing -polly-invariant-load-hoisting=true -passes=polly-codegen -disable-output < %s
;
; %tmp is added to the list of required hoists by -polly-scops and just
; assumed to be hoisted. Only -polly-scops recognizes it to be unhoistable
diff --git a/polly/test/ScopInfo/variant_load_empty_domain.ll b/polly/test/ScopInfo/variant_load_empty_domain.ll
index 0e685c3c7e73..6a28bd0405fd 100644
--- a/polly/test/ScopInfo/variant_load_empty_domain.ll
+++ b/polly/test/ScopInfo/variant_load_empty_domain.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invariant Accesses: {
; CHECK-NEXT: }
diff --git a/polly/test/ScopInfo/wraping_signed_expr_0.ll b/polly/test/ScopInfo/wraping_signed_expr_0.ll
index 7ad0f64028b6..f5f06bfd7d33 100644
--- a/polly/test/ScopInfo/wraping_signed_expr_0.ll
+++ b/polly/test/ScopInfo/wraping_signed_expr_0.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, char N, char p) {
; for (char i = 0; i < N; i++) {
diff --git a/polly/test/ScopInfo/wraping_signed_expr_1.ll b/polly/test/ScopInfo/wraping_signed_expr_1.ll
index 0a62b9cf542c..e04257acc201 100644
--- a/polly/test/ScopInfo/wraping_signed_expr_1.ll
+++ b/polly/test/ScopInfo/wraping_signed_expr_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(long *A, long N, long p) {
; for (long i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/wraping_signed_expr_2.ll b/polly/test/ScopInfo/wraping_signed_expr_2.ll
index f3b4665f7f37..2511c0d64608 100644
--- a/polly/test/ScopInfo/wraping_signed_expr_2.ll
+++ b/polly/test/ScopInfo/wraping_signed_expr_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, int N, int p) {
; for (int i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/wraping_signed_expr_3.ll b/polly/test/ScopInfo/wraping_signed_expr_3.ll
index 7a5cbba9436b..2106bdf4c068 100644
--- a/polly/test/ScopInfo/wraping_signed_expr_3.ll
+++ b/polly/test/ScopInfo/wraping_signed_expr_3.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(int *A, int N, int p) {
; for (int i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/wraping_signed_expr_4.ll b/polly/test/ScopInfo/wraping_signed_expr_4.ll
index ec65f70a092f..3ea17f6e266b 100644
--- a/polly/test/ScopInfo/wraping_signed_expr_4.ll
+++ b/polly/test/ScopInfo/wraping_signed_expr_4.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(char *A, char N, char p) {
; for (char i = 0; i < N; i++)
diff --git a/polly/test/ScopInfo/wraping_signed_expr_5.ll b/polly/test/ScopInfo/wraping_signed_expr_5.ll
index 5f3b09ba33c1..90706a3d3bc4 100644
--- a/polly/test/ScopInfo/wraping_signed_expr_5.ll
+++ b/polly/test/ScopInfo/wraping_signed_expr_5.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; We should not generate runtime check for ((int)r1 + (int)r2) as it is known not
; to overflow. However (p + q) can, thus checks are needed.
diff --git a/polly/test/ScopInfo/wraping_signed_expr_6.ll b/polly/test/ScopInfo/wraping_signed_expr_6.ll
index 23258bb513bf..9cf67fc10180 100644
--- a/polly/test/ScopInfo/wraping_signed_expr_6.ll
+++ b/polly/test/ScopInfo/wraping_signed_expr_6.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invalid Context:
; CHECK: [N] -> { : N >= 129 }
diff --git a/polly/test/ScopInfo/wraping_signed_expr_7.ll b/polly/test/ScopInfo/wraping_signed_expr_7.ll
index 0663d4e0bc10..d18d2b2df3e1 100644
--- a/polly/test/ScopInfo/wraping_signed_expr_7.ll
+++ b/polly/test/ScopInfo/wraping_signed_expr_7.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Invalid Context:
; CHECK: [N] -> { : N >= 129 }
diff --git a/polly/test/ScopInfo/wraping_signed_expr_slow_1.ll b/polly/test/ScopInfo/wraping_signed_expr_slow_1.ll
index ec36d2c5fcde..84626861bd39 100644
--- a/polly/test/ScopInfo/wraping_signed_expr_slow_1.ll
+++ b/polly/test/ScopInfo/wraping_signed_expr_slow_1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; This checks that the no-wraps checks will be computed fast as some example
; already showed huge slowdowns even though the inbounds and nsw flags were
diff --git a/polly/test/ScopInfo/wraping_signed_expr_slow_2.ll b/polly/test/ScopInfo/wraping_signed_expr_slow_2.ll
index 6db33ab166d5..b4dd567bafa6 100644
--- a/polly/test/ScopInfo/wraping_signed_expr_slow_2.ll
+++ b/polly/test/ScopInfo/wraping_signed_expr_slow_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; This checks that the no-wraps checks will be computed fast as some example
; already showed huge slowdowns even though the inbounds and nsw flags were
diff --git a/polly/test/ScopInfo/zero_ext_of_truncate.ll b/polly/test/ScopInfo/zero_ext_of_truncate.ll
index fc55df5e053c..bd3749b6aa74 100644
--- a/polly/test/ScopInfo/zero_ext_of_truncate.ll
+++ b/polly/test/ScopInfo/zero_ext_of_truncate.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-invariant-load-hoisting=true -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-invariant-load-hoisting=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(unsigned *restrict I, unsigned *restrict A, unsigned N, unsigned M) {
; for (unsigned i = 0; i < N; i++) {
diff --git a/polly/test/ScopInfo/zero_ext_of_truncate_2.ll b/polly/test/ScopInfo/zero_ext_of_truncate_2.ll
index 13e9c03ecd2d..b30604527676 100644
--- a/polly/test/ScopInfo/zero_ext_of_truncate_2.ll
+++ b/polly/test/ScopInfo/zero_ext_of_truncate_2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-invariant-load-hoisting=true -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-invariant-load-hoisting=true '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; void f(unsigned long *restrict I, unsigned *restrict A, unsigned N) {
; for (unsigned i = 0; i < N; i++) {
diff --git a/polly/test/ScopInfo/zero_ext_space_mismatch.ll b/polly/test/ScopInfo/zero_ext_space_mismatch.ll
index 835a8664b75e..3c02ae295b5b 100644
--- a/polly/test/ScopInfo/zero_ext_space_mismatch.ll
+++ b/polly/test/ScopInfo/zero_ext_space_mismatch.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output < %s 2>&1 | FileCheck %s
;
; CHECK: Assumed Context:
; CHECK-NEXT: [dim] -> { : dim > 0 }
diff --git a/polly/test/ScopInliner/invariant-load-func.ll b/polly/test/ScopInliner/invariant-load-func.ll
index 38e4a15aab94..ffd2ec9cdb60 100644
--- a/polly/test/ScopInliner/invariant-load-func.ll
+++ b/polly/test/ScopInliner/invariant-load-func.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-detect-full-functions -polly-scop-inliner \
-; RUN: -polly-invariant-load-hoisting -polly-print-scops -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-detect-full-functions -polly-scop-inliner \
+; RUN: -polly-invariant-load-hoisting '-passes=print<polly-function-scops>' -disable-output < %s | FileCheck %s
; Check that we inline a function that requires invariant load hoisting
; correctly.
diff --git a/polly/test/Simplify/coalesce_3partials.ll b/polly/test/Simplify/coalesce_3partials.ll
index 0c1556ff263a..4112787e51bf 100644
--- a/polly/test/Simplify/coalesce_3partials.ll
+++ b/polly/test/Simplify/coalesce_3partials.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck -match-full-lines %s
;
; Combine 3 partial accesses into one.
;
diff --git a/polly/test/Simplify/coalesce_disjointelements.ll b/polly/test/Simplify/coalesce_disjointelements.ll
index 2f4cf4e3f920..b140f287e27f 100644
--- a/polly/test/Simplify/coalesce_disjointelements.ll
+++ b/polly/test/Simplify/coalesce_disjointelements.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck -match-full-lines %s
;
; Combine four partial stores into two.
; The stores write to the same array, but never the same element.
diff --git a/polly/test/Simplify/coalesce_overlapping.ll b/polly/test/Simplify/coalesce_overlapping.ll
index 78ed21e9855b..ee716fc12f09 100644
--- a/polly/test/Simplify/coalesce_overlapping.ll
+++ b/polly/test/Simplify/coalesce_overlapping.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck -match-full-lines %s
;
; Combine two partial stores (with overlapping domains) into one.
;
diff --git a/polly/test/Simplify/coalesce_partial.ll b/polly/test/Simplify/coalesce_partial.ll
index c42aaa113035..aea691f43e93 100644
--- a/polly/test/Simplify/coalesce_partial.ll
+++ b/polly/test/Simplify/coalesce_partial.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck -match-full-lines %s
;
; Combine two partial stores (with disjoint domains) into one.
;
diff --git a/polly/test/Simplify/dead_access_load.ll b/polly/test/Simplify/dead_access_load.ll
index 1804613c0a79..66f94795ea6e 100644
--- a/polly/test/Simplify/dead_access_load.ll
+++ b/polly/test/Simplify/dead_access_load.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; Remove a dead load-instruction
diff --git a/polly/test/Simplify/dead_access_phi.ll b/polly/test/Simplify/dead_access_phi.ll
index d263b89aff58..fb40e4cc45b3 100644
--- a/polly/test/Simplify/dead_access_phi.ll
+++ b/polly/test/Simplify/dead_access_phi.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; Remove a dead PHI write/read pair
diff --git a/polly/test/Simplify/dead_access_value.ll b/polly/test/Simplify/dead_access_value.ll
index 6e3c211577f6..a8ff7f28542b 100644
--- a/polly/test/Simplify/dead_access_value.ll
+++ b/polly/test/Simplify/dead_access_value.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; Remove a dead value write/read pair
diff --git a/polly/test/Simplify/dead_instruction.ll b/polly/test/Simplify/dead_instruction.ll
index 4e693b0ccb44..81e55e1c7bb3 100644
--- a/polly/test/Simplify/dead_instruction.ll
+++ b/polly/test/Simplify/dead_instruction.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; Remove a dead instruction
diff --git a/polly/test/Simplify/emptyaccessdomain.ll b/polly/test/Simplify/emptyaccessdomain.ll
index 54ac14ab398c..9b06cec965a9 100644
--- a/polly/test/Simplify/emptyaccessdomain.ll
+++ b/polly/test/Simplify/emptyaccessdomain.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck %s -match-full-lines
;
; for (int j = 0; j < n; j += 1) {
; A[0] = 42.0;
diff --git a/polly/test/Simplify/exit_phi_accesses-2.ll b/polly/test/Simplify/exit_phi_accesses-2.ll
index 01748aa59bd3..379c7e0ace0a 100644
--- a/polly/test/Simplify/exit_phi_accesses-2.ll
+++ b/polly/test/Simplify/exit_phi_accesses-2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-scops -polly-print-simplify -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>,scop(print<polly-simplify>)' -disable-output < %s | FileCheck %s
;
; The use of %sum.next by %phi counts as an escaping use.
; Don't remove the scalar write of %sum.next.
diff --git a/polly/test/Simplify/func-b320a7.ll b/polly/test/Simplify/func-b320a7.ll
index c8a823a468d7..5aa2caba95cf 100644
--- a/polly/test/Simplify/func-b320a7.ll
+++ b/polly/test/Simplify/func-b320a7.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-print-simplify -polly-optree -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=print<polly-simplify>,polly-optree' -disable-output < %s | FileCheck %s -match-full-lines
; llvm.org/PR47098
; Use-after-free by reference to Stmt remaining in InstStmtMap after removing it has been removed by Scop::simplifyScop.
diff --git a/polly/test/Simplify/gemm.ll b/polly/test/Simplify/gemm.ll
index 23f8de5573cd..5120de2db767 100644
--- a/polly/test/Simplify/gemm.ll
+++ b/polly/test/Simplify/gemm.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck %s
;
; void gemm(float A[][1024], float B[][1024], float C[][1024]) {
; for (long i = 0; i < 1024; i++)
diff --git a/polly/test/Simplify/nocoalesce_differentvalues.ll b/polly/test/Simplify/nocoalesce_differentvalues.ll
index 68991d2eecf5..33d04b2f96de 100644
--- a/polly/test/Simplify/nocoalesce_differentvalues.ll
+++ b/polly/test/Simplify/nocoalesce_differentvalues.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck -match-full-lines %s
;
; Do not combine stores that write different values.
;
diff --git a/polly/test/Simplify/nocoalesce_elementmismatch.ll b/polly/test/Simplify/nocoalesce_elementmismatch.ll
index 2bab360e6858..608b055e691d 100644
--- a/polly/test/Simplify/nocoalesce_elementmismatch.ll
+++ b/polly/test/Simplify/nocoalesce_elementmismatch.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck -match-full-lines %s
;
; Do not combine stores that do not write to different elements in the
; same instance.
diff --git a/polly/test/Simplify/nocoalesce_readbetween.ll b/polly/test/Simplify/nocoalesce_readbetween.ll
index ada79dc18b87..e112b036cd77 100644
--- a/polly/test/Simplify/nocoalesce_readbetween.ll
+++ b/polly/test/Simplify/nocoalesce_readbetween.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck -match-full-lines %s
;
; Do not combine stores if there is a read between them.
; Note: The read between is unused, so will be removed by markAndSweep.
diff --git a/polly/test/Simplify/nocoalesce_writebetween.ll b/polly/test/Simplify/nocoalesce_writebetween.ll
index 48e785ec2c26..fd5eee52eaf5 100644
--- a/polly/test/Simplify/nocoalesce_writebetween.ll
+++ b/polly/test/Simplify/nocoalesce_writebetween.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck -match-full-lines %s
;
; Do not combine stores if there is a write between them.
;
diff --git a/polly/test/Simplify/notdead_region_exitphi.ll b/polly/test/Simplify/notdead_region_exitphi.ll
index bd29fd578b97..42fafb446cea 100644
--- a/polly/test/Simplify/notdead_region_exitphi.ll
+++ b/polly/test/Simplify/notdead_region_exitphi.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; Do not remove dependencies of a phi node in a region's exit block.
diff --git a/polly/test/Simplify/notdead_region_innerphi.ll b/polly/test/Simplify/notdead_region_innerphi.ll
index a176a28af233..966448c9884b 100644
--- a/polly/test/Simplify/notdead_region_innerphi.ll
+++ b/polly/test/Simplify/notdead_region_innerphi.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; Do not remove dependencies of a phi node within a region statement (%phi).
diff --git a/polly/test/Simplify/notredundant_region_loop.ll b/polly/test/Simplify/notredundant_region_loop.ll
index 0ea9be7e9d2d..88f6c4152173 100644
--- a/polly/test/Simplify/notredundant_region_loop.ll
+++ b/polly/test/Simplify/notredundant_region_loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-allow-nonaffine-loops -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -polly-allow-nonaffine-loops -disable-output < %s | FileCheck %s -match-full-lines
;
; Do not remove the store in region_entry. It can be executed multiple times
; due to being part of a non-affine loop.
diff --git a/polly/test/Simplify/notredundant_region_middle.ll b/polly/test/Simplify/notredundant_region_middle.ll
index 84598746e0bb..43c05436809b 100644
--- a/polly/test/Simplify/notredundant_region_middle.ll
+++ b/polly/test/Simplify/notredundant_region_middle.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; Do not remove redundant stores in the middle of region statements.
diff --git a/polly/test/Simplify/notredundant_synthesizable_unknownit.ll b/polly/test/Simplify/notredundant_synthesizable_unknownit.ll
index 2affdbb2f1de..8a9aec8be9e0 100644
--- a/polly/test/Simplify/notredundant_synthesizable_unknownit.ll
+++ b/polly/test/Simplify/notredundant_synthesizable_unknownit.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; Do not remove the scalar value write of %i.trunc in inner.for.
diff --git a/polly/test/Simplify/out-of-scop-use-in-region-entry-phi-node.ll b/polly/test/Simplify/out-of-scop-use-in-region-entry-phi-node.ll
index 511f35a9388e..7218f328f9ca 100644
--- a/polly/test/Simplify/out-of-scop-use-in-region-entry-phi-node.ll
+++ b/polly/test/Simplify/out-of-scop-use-in-region-entry-phi-node.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-scops -polly-print-simplify -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb '-passes=print<polly-function-scops>,scop(print<polly-simplify>)' -disable-output < %s 2>&1 | FileCheck %s
;
; %tmp5 must keep the Value WRITE MemoryAccess, because as an incoming value of
; %tmp4, it is an "external use".
diff --git a/polly/test/Simplify/overwritten.ll b/polly/test/Simplify/overwritten.ll
index a32d6a8daeb0..eccdd8044d07 100644
--- a/polly/test/Simplify/overwritten.ll
+++ b/polly/test/Simplify/overwritten.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck -match-full-lines %s
;
; Remove a store that is overwritten by another store in the same statement.
diff --git a/polly/test/Simplify/overwritten_3phi.ll b/polly/test/Simplify/overwritten_3phi.ll
index 24758b9b7cf9..4cee4f13d26d 100644
--- a/polly/test/Simplify/overwritten_3phi.ll
+++ b/polly/test/Simplify/overwritten_3phi.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck -match-full-lines %s
;
; Remove identical writes
; (two stores in the same statement that write the same value to the same
diff --git a/polly/test/Simplify/overwritten_3store.ll b/polly/test/Simplify/overwritten_3store.ll
index 63eb5b54f931..c9f06c85dba5 100644
--- a/polly/test/Simplify/overwritten_3store.ll
+++ b/polly/test/Simplify/overwritten_3store.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-stmt-granularity=bb -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
; RUN: opt %loadNPMPolly -polly-stmt-granularity=bb "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck -match-full-lines %s
;
; Remove a store that is overwritten by another store in the same statement.
diff --git a/polly/test/Simplify/overwritten_implicit_and_explicit.ll b/polly/test/Simplify/overwritten_implicit_and_explicit.ll
index 56c63b48f761..b1b7635e2626 100644
--- a/polly/test/Simplify/overwritten_implicit_and_explicit.ll
+++ b/polly/test/Simplify/overwritten_implicit_and_explicit.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck -match-full-lines %s
;
; Remove a store that is overwritten by another store in the same statement.
; Check that this works even if one of the writes is a scalar MemoryKind.
diff --git a/polly/test/Simplify/overwritten_loadbetween.ll b/polly/test/Simplify/overwritten_loadbetween.ll
index b31f45d5db62..cdca2f11531e 100644
--- a/polly/test/Simplify/overwritten_loadbetween.ll
+++ b/polly/test/Simplify/overwritten_loadbetween.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck -match-full-lines %s
;
; Do not remove overwrites when the value is read before.
diff --git a/polly/test/Simplify/overwritten_scalar.ll b/polly/test/Simplify/overwritten_scalar.ll
index d55ea7712c36..700adb6aed2e 100644
--- a/polly/test/Simplify/overwritten_scalar.ll
+++ b/polly/test/Simplify/overwritten_scalar.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck -match-full-lines %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck -match-full-lines %s
;
; Remove identical writes
; (two stores in the same statement that write the same value to the same
diff --git a/polly/test/Simplify/pass_existence.ll b/polly/test/Simplify/pass_existence.ll
index fc5287ed2ee2..4d1d800b2a80 100644
--- a/polly/test/Simplify/pass_existence.ll
+++ b/polly/test/Simplify/pass_existence.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-print-simplify -disable-output < %s | FileCheck %s
; RUN: opt %loadNPMPolly -disable-output "-passes=scop(print<polly-simplify>)" < %s -aa-pipeline=basic-aa < %s | FileCheck %s
;
; Simple test for the existence of the Simplify pass.
diff --git a/polly/test/Simplify/phi_in_regionstmt.ll b/polly/test/Simplify/phi_in_regionstmt.ll
index 32bb75427589..76efd484f547 100644
--- a/polly/test/Simplify/phi_in_regionstmt.ll
+++ b/polly/test/Simplify/phi_in_regionstmt.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; The PHINode %cond91.sink.sink.us.sink.6 is in the middle of a region
diff --git a/polly/test/Simplify/pr33323.ll b/polly/test/Simplify/pr33323.ll
index 751f0bff5961..22921d5fba50 100644
--- a/polly/test/Simplify/pr33323.ll
+++ b/polly/test/Simplify/pr33323.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck %s
;
; llvm.org/PR33323
;
diff --git a/polly/test/Simplify/redundant.ll b/polly/test/Simplify/redundant.ll
index e85352bc889f..540e537460e5 100644
--- a/polly/test/Simplify/redundant.ll
+++ b/polly/test/Simplify/redundant.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; Remove redundant store (a store that writes the same value already
diff --git a/polly/test/Simplify/redundant_differentindex.ll b/polly/test/Simplify/redundant_differentindex.ll
index 23531c24344f..5ce25836dedb 100644
--- a/polly/test/Simplify/redundant_differentindex.ll
+++ b/polly/test/Simplify/redundant_differentindex.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; A store that has a different index than the load it is storing is
diff --git a/polly/test/Simplify/redundant_region.ll b/polly/test/Simplify/redundant_region.ll
index dbcb420ac2f3..927aac6c4af0 100644
--- a/polly/test/Simplify/redundant_region.ll
+++ b/polly/test/Simplify/redundant_region.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck %s -match-full-lines
;
; Remove redundant store (a store that writes the same value already
; at the destination) in a region.
diff --git a/polly/test/Simplify/redundant_region_scalar.ll b/polly/test/Simplify/redundant_region_scalar.ll
index 95a581ad6f57..72d570d46bdc 100644
--- a/polly/test/Simplify/redundant_region_scalar.ll
+++ b/polly/test/Simplify/redundant_region_scalar.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck %s -match-full-lines
;
; Remove redundant store (a store that writes the same value already
; at the destination) in a region.
diff --git a/polly/test/Simplify/redundant_scalarwrite.ll b/polly/test/Simplify/redundant_scalarwrite.ll
index e2f7bbedc023..84cb971be11f 100644
--- a/polly/test/Simplify/redundant_scalarwrite.ll
+++ b/polly/test/Simplify/redundant_scalarwrite.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck %s -match-full-lines
;
; Remove redundant scalar stores.
;
diff --git a/polly/test/Simplify/redundant_storebetween.ll b/polly/test/Simplify/redundant_storebetween.ll
index f624b6e5b995..6540d7751e46 100644
--- a/polly/test/Simplify/redundant_storebetween.ll
+++ b/polly/test/Simplify/redundant_storebetween.ll
@@ -1,4 +1,3 @@
-; RUN: opt %loadPolly -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
; RUN: opt %loadNPMPolly "-passes=scop(print<polly-simplify>)" -disable-output -aa-pipeline=basic-aa < %s | FileCheck %s -match-full-lines
;
; Don't remove store where there is another store to the same target
diff --git a/polly/test/Simplify/scalability1.ll b/polly/test/Simplify/scalability1.ll
index 0ef99ce1ad8e..c6e36f9dcdef 100644
--- a/polly/test/Simplify/scalability1.ll
+++ b/polly/test/Simplify/scalability1.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-ignore-inbounds -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-ignore-inbounds '-passes=print<polly-simplify>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Test scalability.
;
diff --git a/polly/test/Simplify/scalability2.ll b/polly/test/Simplify/scalability2.ll
index bac0810b0afa..adcf9eef348a 100644
--- a/polly/test/Simplify/scalability2.ll
+++ b/polly/test/Simplify/scalability2.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-ignore-inbounds -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly -polly-ignore-inbounds '-passes=print<polly-simplify>' -disable-output < %s | FileCheck %s -match-full-lines
;
; Test scalability.
;
diff --git a/polly/test/Simplify/sweep_mapped_phi.ll b/polly/test/Simplify/sweep_mapped_phi.ll
index add1681cdf36..495d77a22f61 100644
--- a/polly/test/Simplify/sweep_mapped_phi.ll
+++ b/polly/test/Simplify/sweep_mapped_phi.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck %s -match-full-lines
;
; Map %phi to A[j], so the scalar write in Stmt_for_bodyA can be removed.
;
diff --git a/polly/test/Simplify/sweep_mapped_value.ll b/polly/test/Simplify/sweep_mapped_value.ll
index 2e2f9c37febe..c83941a8f0ba 100644
--- a/polly/test/Simplify/sweep_mapped_value.ll
+++ b/polly/test/Simplify/sweep_mapped_value.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-import-jscop -polly-import-jscop-postfix=transformed -polly-print-simplify -disable-output < %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,print<polly-simplify>' -polly-import-jscop-postfix=transformed -disable-output < %s | FileCheck %s -match-full-lines
;
; Map %val to A[j], so the scalar write on Stmt_for_bodyB can be removed.
;
diff --git a/polly/test/Simplify/ununsed_read_in_region_entry.ll b/polly/test/Simplify/ununsed_read_in_region_entry.ll
index 9b2d4521e2d6..f2436c263a96 100644
--- a/polly/test/Simplify/ununsed_read_in_region_entry.ll
+++ b/polly/test/Simplify/ununsed_read_in_region_entry.ll
@@ -1,5 +1,5 @@
-; RUN: opt %loadPolly -polly-print-simplify -disable-output< %s | FileCheck %s -match-full-lines
-; RUN: opt %loadPolly -polly-simplify -polly-codegen -S < %s | FileCheck %s -check-prefix=CODEGEN
+; RUN: opt %loadNPMPolly '-passes=print<polly-simplify>' -disable-output< %s | FileCheck %s -match-full-lines
+; RUN: opt %loadNPMPolly '-passes=polly-simplify,polly-codegen' -S < %s | FileCheck %s -check-prefix=CODEGEN
;
; for (int i = 0; i < n; i+=1) {
; (void)A[0];
diff --git a/polly/test/Support/Plugins.ll b/polly/test/Support/Plugins.ll
index cee878f1c6ac..872a32fad4fe 100644
--- a/polly/test/Support/Plugins.ll
+++ b/polly/test/Support/Plugins.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadNPMPolly -passes='polly-prepare,scop(print<polly-ast>)' -S < %s \
+; RUN: opt %loadNPMPolly '-passes=polly-prepare,scop(print<polly-ast>)' -S < %s \
; RUN: | FileCheck %s
; This testcase tests plugin registration. Check-lines below serve to verify
diff --git a/polly/test/Support/isl-args.ll b/polly/test/Support/isl-args.ll
index efa94194bc3f..206cb73bfc5a 100644
--- a/polly/test/Support/isl-args.ll
+++ b/polly/test/Support/isl-args.ll
@@ -1,7 +1,7 @@
-; RUN: opt %loadPolly -polly-scops -disable-output -polly-isl-arg=-V < %s | FileCheck %s -match-full-lines --check-prefix=VERSION
-; RUN: opt %loadPolly -polly-scops -disable-output -polly-isl-arg=-h < %s | FileCheck %s -match-full-lines --check-prefix=HELP
-; RUN: not opt %loadPolly -polly-scops -disable-output -polly-isl-arg=-asdf < %s 2>&1| FileCheck %s -match-full-lines --check-prefix=UNKNOWN
-; RUN: opt %loadPolly -polly-scops -disable-output -polly-isl-arg=--schedule-algorithm=feautrier < %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output -polly-isl-arg=-V < %s | FileCheck %s -match-full-lines --check-prefix=VERSION
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output -polly-isl-arg=-h < %s | FileCheck %s -match-full-lines --check-prefix=HELP
+; RUN: not opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output -polly-isl-arg=-asdf < %s 2>&1| FileCheck %s -match-full-lines --check-prefix=UNKNOWN
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -disable-output -polly-isl-arg=--schedule-algorithm=feautrier < %s
; VERSION: isl-{{.*}}-IMath-32
; HELP: Usage: -polly-isl-arg [OPTION...]
diff --git a/polly/test/lit.site.cfg.in b/polly/test/lit.site.cfg.in
index b44061260834..d8a0b6ae3a3b 100644
--- a/polly/test/lit.site.cfg.in
+++ b/polly/test/lit.site.cfg.in
@@ -48,7 +48,6 @@ else:
config.substitutions.append(('%loadPolly', commonOpts ))
config.substitutions.append(('%loadNPMPolly', commonOpts ))
-
import lit.llvm
lit.llvm.initialize(lit_config, config)
diff --git a/polly/test/polly.ll b/polly/test/polly.ll
index f78cceacfb12..2e455b39a9cd 100644
--- a/polly/test/polly.ll
+++ b/polly/test/polly.ll
@@ -1,4 +1,4 @@
-; RUN: opt %loadPolly -polly-scops -S < %s | FileCheck %s
+; RUN: opt %loadNPMPolly '-passes=print<polly-function-scops>' -S < %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define void @foo() nounwind {
start:
diff --git a/polly/unittests/CMakeLists.txt b/polly/unittests/CMakeLists.txt
index 94b75449c302..093a2146f63c 100644
--- a/polly/unittests/CMakeLists.txt
+++ b/polly/unittests/CMakeLists.txt
@@ -1,5 +1,5 @@
add_custom_target(PollyUnitTests)
-set_target_properties(PollyUnitTests PROPERTIES FOLDER "Polly")
+set_target_properties(PollyUnitTests PROPERTIES FOLDER "Polly/Tests")
# add_polly_unittest(test_dirname file1.cpp file2.cpp)
#
@@ -13,9 +13,8 @@ function(add_polly_unittest test_name)
target_link_libraries(${test_name} PRIVATE gtest_main gtest)
add_dependencies(PollyUnitTests ${test_name})
-
- set_property(TARGET ${test_name} PROPERTY FOLDER "Polly")
endif()
+ set_property(TARGET ${test_name} PROPERTY FOLDER "Polly/Tests/Unit")
if(LLVM_LINK_LLVM_DYLIB AND LLVM_POLLY_LINK_INTO_TOOLS)
# In this case Polly is already present in libLLVM,
diff --git a/pstl/CMakeLists.txt b/pstl/CMakeLists.txt
index 255e22af9a26..592e11d35647 100644
--- a/pstl/CMakeLists.txt
+++ b/pstl/CMakeLists.txt
@@ -6,6 +6,7 @@
#
#===----------------------------------------------------------------------===##
cmake_minimum_required(VERSION 3.20.0)
+set(LLVM_SUBPROJECT_TITLE "Parallel STL")
set(PARALLELSTL_VERSION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/include/pstl/internal/pstl_config.h")
file(STRINGS "${PARALLELSTL_VERSION_FILE}" PARALLELSTL_VERSION_SOURCE REGEX "#define _PSTL_VERSION .*$")
diff --git a/runtimes/CMakeLists.txt b/runtimes/CMakeLists.txt
index fcc59c8fa1c3..24f485116959 100644
--- a/runtimes/CMakeLists.txt
+++ b/runtimes/CMakeLists.txt
@@ -9,6 +9,8 @@ include(${LLVM_COMMON_CMAKE_UTILS}/Modules/CMakePolicy.cmake
include(${LLVM_COMMON_CMAKE_UTILS}/Modules/LLVMVersion.cmake)
project(Runtimes C CXX ASM)
+set(LLVM_SUBPROJECT_TITLE "Runtimes")
+set_property(GLOBAL PROPERTY USE_FOLDERS ON)
list(INSERT CMAKE_MODULE_PATH 0
"${CMAKE_CURRENT_SOURCE_DIR}/cmake"
diff --git a/utils/bazel/.bazelrc b/utils/bazel/.bazelrc
index 5a6d1889076a..09111bcdc834 100644
--- a/utils/bazel/.bazelrc
+++ b/utils/bazel/.bazelrc
@@ -51,9 +51,6 @@ build --experimental_cc_shared_library
build:zlib_external --repo_env=BAZEL_LLVM_ZLIB_STRATEGY=external
build:zlib_system --repo_env=BAZEL_LLVM_ZLIB_STRATEGY=system
-build:terminfo_external --repo_env=BAZEL_LLVM_TERMINFO_STRATEGY=external
-build:terminfo_system --repo_env=BAZEL_LLVM_TERMINFO_STRATEGY=system
-
###############################################################################
# Options for "generic_clang" builds: these options should generally apply to
# builds using a Clang-based compiler, and default to the `clang` executable on
diff --git a/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel b/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel
index 1c12c8167ba4..7413b018ef32 100644
--- a/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel
@@ -167,7 +167,10 @@ cc_library(
]),
hdrs = glob([
"include/bolt/Passes/*.h",
- ]),
+ ]) + [
+ # To avoid circular dependency on "Profile".
+ "include/bolt/Profile/BoltAddressTranslation.h",
+ ],
includes = ["include"],
deps = [
":Core",
diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
index c469da74fc56..d1a2c6f11d98 100644
--- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
@@ -721,6 +721,7 @@ cc_binary(
":TargetParser",
":config",
":llvm-tblgen-headers",
+ ":vt_gen",
],
)
@@ -1881,6 +1882,7 @@ cc_library(
":Instrumentation",
":MC",
":MCParser",
+ ":ObjCARC",
":Object",
":ProfileData",
":Remarks",
diff --git a/utils/bazel/llvm-project-overlay/llvm/driver.bzl b/utils/bazel/llvm-project-overlay/llvm/driver.bzl
index 10796d919834..a57a14ebd5f8 100644
--- a/utils/bazel/llvm-project-overlay/llvm/driver.bzl
+++ b/utils/bazel/llvm-project-overlay/llvm/driver.bzl
@@ -39,6 +39,7 @@ _EXTRA_ALIASES = {
"clang": ["clang++", "clang-cl", "clang-cpp"],
"lld": ["ld", "lld-link", "ld.lld", "ld64.lld", "wasm-ld"],
"llvm-ar": ["ranlib", "lib", "dlltool"],
+ "llvm-cxxfilt": ["c++filt"],
"llvm-objcopy": ["bitcode-strip", "install-name-tool", "strip"],
"llvm-objdump": ["otool"],
"llvm-rc": ["windres"],
diff --git a/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h b/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h
index e9385f45c5e5..a4fb47d677ab 100644
--- a/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h
+++ b/utils/bazel/llvm-project-overlay/llvm/include/llvm/Config/config.h
@@ -222,9 +222,6 @@
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
-/* Define if the setupterm() function is supported this platform. */
-/* LLVM_ENABLE_TERMINFO defined in Bazel */
-
/* Define to 1 if you have the <termios.h> header file. */
#define HAVE_TERMIOS_H 1
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 71fca298e9b9..f31f75ca5c74 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -51,10 +51,7 @@ expand_template(
"#cmakedefine01 MLIR_ENABLE_NVPTXCOMPILER": "#define MLIR_ENABLE_NVPTXCOMPILER 0",
"#cmakedefine01 MLIR_ENABLE_PDL_IN_PATTERNMATCH": "#define MLIR_ENABLE_PDL_IN_PATTERNMATCH 1",
"#cmakedefine01 MLIR_ENABLE_ROCM_CONVERSIONS": "#define MLIR_ENABLE_ROCM_CONVERSIONS 0",
- } | if_cuda_available(
- {"#cmakedefine01 MLIR_ENABLE_CUDA_CONVERSIONS": "#define MLIR_ENABLE_CUDA_CONVERSIONS 1"},
- {"#cmakedefine01 MLIR_ENABLE_CUDA_CONVERSIONS": "#define MLIR_ENABLE_CUDA_CONVERSIONS 0"},
- ),
+ },
template = "include/mlir/Config/mlir-config.h.cmake",
)
@@ -4979,7 +4976,6 @@ cc_library(
":VectorToLLVM",
":VectorToSCF",
":VectorTransformOpsIncGen",
- ":VectorTransforms",
":X86VectorTransforms",
],
)
@@ -5616,7 +5612,6 @@ cc_library(
":Transforms",
":VectorToLLVM",
":VectorToSCF",
- ":config",
],
)
@@ -6282,7 +6277,6 @@ cc_library(
":NVVMToLLVMIRTranslation",
":TargetLLVM",
":ToLLVMIRTranslation",
- ":config",
"//llvm:NVPTXCodeGen",
"//llvm:Support",
"//llvm:config",
@@ -7597,7 +7591,6 @@ cc_library(
"include/mlir/Transforms/LoopInvariantCodeMotionUtils.h",
"include/mlir/Transforms/OneToNTypeConversion.h",
"include/mlir/Transforms/RegionUtils.h",
- "include/mlir/Transforms/TopologicalSortUtils.h",
],
includes = ["include"],
deps = [
@@ -8367,6 +8360,7 @@ cc_library(
":ArithDialect",
":ConversionPassIncGen",
":EmitCDialect",
+ ":PDLLAST",
":Pass",
":TransformUtils",
],
@@ -8723,6 +8717,7 @@ cc_library(
],
includes = ["include"],
deps = [
+ ":Analysis",
":DLTIDialect",
":IR",
":LLVMConversionIncGen",
@@ -8957,6 +8952,7 @@ cc_library(
hdrs = glob(["include/mlir/Target/LLVMIR/Dialect/OpenACC/*.h"]),
includes = ["include"],
deps = [
+ ":Analysis",
":IR",
":LLVMDialect",
":OpenACCDialect",
@@ -8976,6 +8972,7 @@ cc_library(
hdrs = glob(["include/mlir/Target/LLVMIR/Dialect/OpenMP/*.h"]),
includes = ["include"],
deps = [
+ ":Analysis",
":IR",
":LLVMDialect",
":OpenMPCommon",
@@ -9360,7 +9357,6 @@ cc_library(
":X86VectorTransforms",
":XeGPUDialect",
":XeGPUTransforms",
- ":config",
],
)
@@ -10145,6 +10141,10 @@ td_library(
srcs = [
"include/mlir/Dialect/OpenACCMPCommon/Interfaces/AtomicInterfaces.td",
"include/mlir/Dialect/OpenMP/OmpCommon.td",
+ "include/mlir/Dialect/OpenMP/OpenMPAttrDefs.td",
+ "include/mlir/Dialect/OpenMP/OpenMPDialect.td",
+ "include/mlir/Dialect/OpenMP/OpenMPEnums.td",
+ "include/mlir/Dialect/OpenMP/OpenMPOpBase.td",
"include/mlir/Dialect/OpenMP/OpenMPOps.td",
"include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td",
"include/mlir/Dialect/OpenMP/OpenMPTypeInterfaces.td",
@@ -11157,6 +11157,7 @@ cc_library(
":LinalgStructuredOpsIncGen",
":LinalgUtils",
":MaskableOpInterface",
+ ":MathDialect",
":MemRefDialect",
":MemRefTransforms",
":MeshDialect",
diff --git a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
index 258cc88ebbf3..fdf89d00cbb1 100644
--- a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
@@ -36,7 +36,7 @@ expand_template(
"\"@MLIR_BINARY_DIR@\"": "os.environ[\"TEST_UNDECLARED_OUTPUTS_DIR\"]",
# All disabled, but required to substituted because they are not in quotes.
"@LLVM_BUILD_EXAMPLES@": "0",
- "@MLIR_ENABLE_CUDA_CONVERSIONS@": "0",
+ "@LLVM_HAS_NVPTX_TARGET@": "0",
"@MLIR_ENABLE_CUDA_RUNNER@": "0",
"@MLIR_ENABLE_ROCM_CONVERSIONS@": "0",
"@MLIR_ENABLE_ROCM_RUNNER@": "0",
@@ -608,6 +608,7 @@ cc_library(
":TestDialect",
"//mlir:FuncDialect",
"//mlir:FuncToLLVM",
+ "//mlir:IR",
"//mlir:LLVMCommonConversion",
"//mlir:LLVMDialect",
"//mlir:Pass",
@@ -951,10 +952,10 @@ cc_library(
"//mlir:ArmSMEToSCF",
"//mlir:ArmSMETransforms",
"//mlir:ArmSVETransforms",
- "//mlir:FuncDialect",
+ "//mlir:FuncDialect",
"//mlir:IR",
"//mlir:Pass",
- "//mlir:SCFToControlFlow",
+ "//mlir:SCFToControlFlow",
"//mlir:Transforms",
"//mlir:VectorToArmSME",
"//mlir:VectorToSCF",
diff --git a/utils/bazel/llvm_configs/config.h.cmake b/utils/bazel/llvm_configs/config.h.cmake
index 977c182e9d2b..ff30741c8f36 100644
--- a/utils/bazel/llvm_configs/config.h.cmake
+++ b/utils/bazel/llvm_configs/config.h.cmake
@@ -209,9 +209,6 @@
/* Define to 1 if you have the <sys/types.h> header file. */
#cmakedefine HAVE_SYS_TYPES_H ${HAVE_SYS_TYPES_H}
-/* Define if the setupterm() function is supported this platform. */
-#cmakedefine LLVM_ENABLE_TERMINFO ${LLVM_ENABLE_TERMINFO}
-
/* Define to 1 if you have the <termios.h> header file. */
#cmakedefine HAVE_TERMIOS_H ${HAVE_TERMIOS_H}